patches for DPDK stable branches
 help / color / mirror / Atom feed
* [PATCH 19.11 1/6] net/mlx5: fix VXLAN-GPE next protocol translation
@ 2021-12-02 16:00 Gregory Etelson
  2021-12-02 16:00 ` [PATCH 19.11 2/6] net/mlx5: fix RSS expansion scheme for GRE header Gregory Etelson
  2021-12-02 16:00 ` [PATCH 19.11 3/6] net/mlx5: fix GENEVE protocol type translation Gregory Etelson
  0 siblings, 2 replies; 3+ messages in thread
From: Gregory Etelson @ 2021-12-02 16:00 UTC (permalink / raw)
  To: stable, getelson
  Cc: Viacheslav Ovsiienko, Matan Azrad, Shahaf Shuler, Raslan Darawsheh

[ upstream commit 861fa3796f75748ccc4a6dae55e5a7e34c97dea4 ]

VXLAN-GPE extends VXLAN protocol and provides the next protocol
field specifying the first inner header type.

The application can assign some explicit value to
VXLAN-GPE::next_protocol field or set it to the default one. In the
latter case, the rdma-core library cannot recognize the matcher
built by PMD correctly, and it results in hardware configuration
missing inner headers match.

The patch forces VXLAN-GPE::next_protocol assignment if the
application did not explicitly assign it to the non-default value

Fixes: 90456726eb80 ("net/mlx5: fix VXLAN-GPE item translation")
Cc: stable@dpdk.org

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_dv.c | 76 ++++++++++++++++++---------------
 1 file changed, 42 insertions(+), 34 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index f8ca36b1c6..f124f42c9c 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -6056,46 +6056,40 @@ flow_dv_translate_item_vxlan(void *matcher, void *key,
 
 static void
 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
-				 const struct rte_flow_item *item, int inner)
+				 const struct rte_flow_item *item,
+				 const uint64_t pattern_flags)
 {
+	static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
 	const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
 	const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
-	void *headers_m;
-	void *headers_v;
+	/* The item was validated to be on the outer side */
+	void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+	void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
 	void *misc_m =
 		MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
 	void *misc_v =
 		MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
-	char *vni_m;
-	char *vni_v;
-	uint16_t dport;
-	int size;
-	int i;
+	char *vni_m =
+		MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
+	char *vni_v =
+		MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
+	int i, size = sizeof(vxlan_m->vni);
 	uint8_t flags_m = 0xff;
 	uint8_t flags_v = 0xc;
+	uint8_t m_protocol, v_protocol;
 
-	if (inner) {
-		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-					 inner_headers);
-		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
-	} else {
-		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-					 outer_headers);
-		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
-	}
-	dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
-		MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
-		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+			 MLX5_UDP_PORT_VXLAN_GPE);
+	}
+	if (!vxlan_v) {
+		vxlan_v = &dummy_vxlan_gpe_hdr;
+		vxlan_m = &dummy_vxlan_gpe_hdr;
+	} else {
+		if (!vxlan_m)
+			vxlan_m = &rte_flow_item_vxlan_gpe_mask;
 	}
-	if (!vxlan_v)
-		return;
-	if (!vxlan_m)
-		vxlan_m = &rte_flow_item_vxlan_gpe_mask;
-	size = sizeof(vxlan_m->vni);
-	vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
-	vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
 	memcpy(vni_m, vxlan_m->vni, size);
 	for (i = 0; i < size; ++i)
 		vni_v[i] = vni_m[i] & vxlan_v->vni[i];
@@ -6105,10 +6099,22 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
 	}
 	MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
 	MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
-	MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
-		 vxlan_m->protocol);
-	MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
-		 vxlan_v->protocol);
+	m_protocol = vxlan_m->protocol;
+	v_protocol = vxlan_v->protocol;
+	if (!m_protocol) {
+		m_protocol = 0xff;
+		/* Force next protocol to ensure next headers parsing. */
+		if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+			v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
+		else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+			v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
+		else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+			v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
+	}
+	MLX5_SET(fte_match_set_misc3, misc_m,
+		 outer_vxlan_gpe_next_protocol, m_protocol);
+	MLX5_SET(fte_match_set_misc3, misc_v,
+		 outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
 }
 
 /**
@@ -7237,6 +7243,7 @@ __flow_dv_translate(struct rte_eth_dev *dev,
 	struct rte_vlan_hdr vlan = { 0 };
 	uint32_t table;
 	int ret = 0;
+	const struct rte_flow_item *tunnel_item = NULL;
 
 	mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
 					   MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
@@ -7793,12 +7800,10 @@ __flow_dv_translate(struct rte_eth_dev *dev,
 			last_item = MLX5_FLOW_LAYER_VXLAN;
 			break;
 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
-			flow_dv_translate_item_vxlan_gpe(match_mask,
-							 match_value, items,
-							 tunnel);
 			matcher.priority = flow->rss.level >= 2 ?
 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+			tunnel_item = items;
 			break;
 		case RTE_FLOW_ITEM_TYPE_GENEVE:
 			flow_dv_translate_item_geneve(match_mask, match_value,
@@ -7868,6 +7873,9 @@ __flow_dv_translate(struct rte_eth_dev *dev,
 						   match_value, NULL))
 			return -rte_errno;
 	}
+	if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
+		flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
+						 tunnel_item, item_flags);
 	assert(!flow_dv_check_valid_spec(matcher.mask.buf,
 					 dev_flow->dv.value.buf));
 	/*
-- 
2.34.0


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 19.11 2/6] net/mlx5: fix RSS expansion scheme for GRE header
  2021-12-02 16:00 [PATCH 19.11 1/6] net/mlx5: fix VXLAN-GPE next protocol translation Gregory Etelson
@ 2021-12-02 16:00 ` Gregory Etelson
  2021-12-02 16:00 ` [PATCH 19.11 3/6] net/mlx5: fix GENEVE protocol type translation Gregory Etelson
  1 sibling, 0 replies; 3+ messages in thread
From: Gregory Etelson @ 2021-12-02 16:00 UTC (permalink / raw)
  To: stable, getelson
  Cc: Viacheslav Ovsiienko, Matan Azrad, Shahaf Shuler, Yongseok Koh,
	Nelio Laranjeiro

[ upstream commit a21d616b99ffc5810a4b5333e5efdf1ddff21405 ]

RFC-2784 allows any valid Ethernet type in GRE protocol type field.

Add Ethernet to GRE RSS expansion.

Fixes: f4b901a46aec ("net/mlx5: add flow GRE item")
Cc: stable@dpdk.org

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 0805bdb8c9..eff0057a60 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -180,7 +180,8 @@ static const struct rte_flow_expand_node mlx5_support_expansion[] = {
 		.type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
 	},
 	[MLX5_EXPANSION_GRE] = {
-		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+		.next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+						 MLX5_EXPANSION_IPV4,
 						 MLX5_EXPANSION_IPV6,
 						 MLX5_EXPANSION_GRE_KEY,
 						 MLX5_EXPANSION_MPLS),
-- 
2.34.0


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 19.11 3/6] net/mlx5: fix GENEVE protocol type translation
  2021-12-02 16:00 [PATCH 19.11 1/6] net/mlx5: fix VXLAN-GPE next protocol translation Gregory Etelson
  2021-12-02 16:00 ` [PATCH 19.11 2/6] net/mlx5: fix RSS expansion scheme for GRE header Gregory Etelson
@ 2021-12-02 16:00 ` Gregory Etelson
  1 sibling, 0 replies; 3+ messages in thread
From: Gregory Etelson @ 2021-12-02 16:00 UTC (permalink / raw)
  To: stable, getelson
  Cc: Viacheslav Ovsiienko, Matan Azrad, Shahaf Shuler, Moti Haimovsky

[ upstream commit 690391dd0e8bc7a8d02a3aba844ffc3dffe7aecd ]

When application creates several flows to match on GENEVE tunnel
without explicitly specifying GENEVE protocol type value in
flow rules, PMD will translate that to zero mask.
RDMA-CORE cannot distinguish between different inner flow types and
produces identical matchers for each zero mask.

The patch extracts inner header type from flow rule and forces it
in GENEVE protocol type, if application did not specify
any without explicitly specifying GENEVE protocol type value in
flow rules, protocol type value.

Fixes: e59a5dbcfd07 ("net/mlx5: add flow match on GENEVE item")
Cc: stable@dpdk.org

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_dv.c | 78 ++++++++++++++++++++-------------
 1 file changed, 47 insertions(+), 31 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index f124f42c9c..8dec8d9ff5 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -75,6 +75,20 @@ static int
 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
 			     struct mlx5_flow_tbl_resource *tbl);
 
+static inline uint16_t
+mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
+{
+	if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+		return RTE_ETHER_TYPE_TEB;
+	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+		return RTE_ETHER_TYPE_IPV4;
+	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+		return RTE_ETHER_TYPE_IPV6;
+	else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
+		return RTE_ETHER_TYPE_MPLS;
+	return 0;
+}
+
 /**
  * Initialize flow attributes structure according to flow items' types.
  *
@@ -6132,49 +6146,39 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
 
 static void
 flow_dv_translate_item_geneve(void *matcher, void *key,
-			      const struct rte_flow_item *item, int inner)
+			      const struct rte_flow_item *item,
+			      uint64_t pattern_flags)
 {
+	static const struct rte_flow_item_geneve empty_geneve = {0,};
 	const struct rte_flow_item_geneve *geneve_m = item->mask;
 	const struct rte_flow_item_geneve *geneve_v = item->spec;
-	void *headers_m;
-	void *headers_v;
+	/* GENEVE flow item validation allows single tunnel item */
+	void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+	void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
-	uint16_t dport;
 	uint16_t gbhdr_m;
 	uint16_t gbhdr_v;
-	char *vni_m;
-	char *vni_v;
-	size_t size, i;
+	char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
+	char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
+	size_t size = sizeof(geneve_m->vni), i;
+	uint16_t protocol_m, protocol_v;
 
-	if (inner) {
-		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-					 inner_headers);
-		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
-	} else {
-		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-					 outer_headers);
-		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
-	}
-	dport = MLX5_UDP_PORT_GENEVE;
 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
-		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+			 MLX5_UDP_PORT_GENEVE);
+	}
+	if (!geneve_v) {
+		geneve_v = &empty_geneve;
+		geneve_m = &empty_geneve;
+	} else {
+		if (!geneve_m)
+			geneve_m = &rte_flow_item_geneve_mask;
 	}
-	if (!geneve_v)
-		return;
-	if (!geneve_m)
-		geneve_m = &rte_flow_item_geneve_mask;
-	size = sizeof(geneve_m->vni);
-	vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
-	vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
 	memcpy(vni_m, geneve_m->vni, size);
 	for (i = 0; i < size; ++i)
 		vni_v[i] = vni_m[i] & geneve_v->vni[i];
-	MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
-		 rte_be_to_cpu_16(geneve_m->protocol));
-	MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
-		 rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
 	gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
 	gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
 	MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
@@ -6186,6 +6190,16 @@ flow_dv_translate_item_geneve(void *matcher, void *key,
 	MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
 		 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
 		 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
+	protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
+	protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
+	if (!protocol_m) {
+		/* Force next protocol to prevent matchers duplication */
+		protocol_m = 0xFFFF;
+		protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+	}
+	MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
+	MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
+		 protocol_m & protocol_v);
 }
 
 /**
@@ -7806,11 +7820,10 @@ __flow_dv_translate(struct rte_eth_dev *dev,
 			tunnel_item = items;
 			break;
 		case RTE_FLOW_ITEM_TYPE_GENEVE:
-			flow_dv_translate_item_geneve(match_mask, match_value,
-						      items, tunnel);
 			matcher.priority = flow->rss.level >= 2 ?
 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_GENEVE;
+			tunnel_item = items;
 			break;
 		case RTE_FLOW_ITEM_TYPE_MPLS:
 			flow_dv_translate_item_mpls(match_mask, match_value,
@@ -7876,6 +7889,9 @@ __flow_dv_translate(struct rte_eth_dev *dev,
 	if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
 		flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
 						 tunnel_item, item_flags);
+	else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
+		flow_dv_translate_item_geneve(match_mask, match_value,
+					      tunnel_item, item_flags);
 	assert(!flow_dv_check_valid_spec(matcher.mask.buf,
 					 dev_flow->dv.value.buf));
 	/*
-- 
2.34.0


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-12-02 16:01 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-12-02 16:00 [PATCH 19.11 1/6] net/mlx5: fix VXLAN-GPE next protocol translation Gregory Etelson
2021-12-02 16:00 ` [PATCH 19.11 2/6] net/mlx5: fix RSS expansion scheme for GRE header Gregory Etelson
2021-12-02 16:00 ` [PATCH 19.11 3/6] net/mlx5: fix GENEVE protocol type translation Gregory Etelson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).