patches for DPDK stable branches
 help / color / mirror / Atom feed
* [PATCH 19.11 1/6] net/mlx5: fix VXLAN-GPE next protocol translation
@ 2021-12-02 16:00 Gregory Etelson
  2021-12-02 16:00 ` [PATCH 19.11 2/6] net/mlx5: fix RSS expansion scheme for GRE header Gregory Etelson
  2021-12-02 16:00 ` [PATCH 19.11 3/6] net/mlx5: fix GENEVE protocol type translation Gregory Etelson
  0 siblings, 2 replies; 3+ messages in thread
From: Gregory Etelson @ 2021-12-02 16:00 UTC (permalink / raw)
  To: stable, getelson
  Cc: Viacheslav Ovsiienko, Matan Azrad, Shahaf Shuler, Raslan Darawsheh

[ upstream commit 861fa3796f75748ccc4a6dae55e5a7e34c97dea4 ]

VXLAN-GPE extends VXLAN protocol and provides the next protocol
field specifying the first inner header type.

The application can assign some explicit value to
VXLAN-GPE::next_protocol field or set it to the default one. In the
latter case, the rdma-core library cannot recognize the matcher
built by PMD correctly, and it results in hardware configuration
missing inner headers match.

The patch forces VXLAN-GPE::next_protocol assignment if the
application did not explicitly assign it to the non-default value

Fixes: 90456726eb80 ("net/mlx5: fix VXLAN-GPE item translation")
Cc: stable@dpdk.org

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_dv.c | 76 ++++++++++++++++++---------------
 1 file changed, 42 insertions(+), 34 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index f8ca36b1c6..f124f42c9c 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -6056,46 +6056,40 @@ flow_dv_translate_item_vxlan(void *matcher, void *key,
 
 static void
 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
-				 const struct rte_flow_item *item, int inner)
+				 const struct rte_flow_item *item,
+				 const uint64_t pattern_flags)
 {
+	static const struct rte_flow_item_vxlan_gpe dummy_vxlan_gpe_hdr = {0, };
 	const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
 	const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
-	void *headers_m;
-	void *headers_v;
+	/* The item was validated to be on the outer side */
+	void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+	void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
 	void *misc_m =
 		MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
 	void *misc_v =
 		MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
-	char *vni_m;
-	char *vni_v;
-	uint16_t dport;
-	int size;
-	int i;
+	char *vni_m =
+		MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
+	char *vni_v =
+		MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
+	int i, size = sizeof(vxlan_m->vni);
 	uint8_t flags_m = 0xff;
 	uint8_t flags_v = 0xc;
+	uint8_t m_protocol, v_protocol;
 
-	if (inner) {
-		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-					 inner_headers);
-		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
-	} else {
-		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
-					 outer_headers);
-		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
-	}
-	dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
-		MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
-		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+			 MLX5_UDP_PORT_VXLAN_GPE);
+	}
+	if (!vxlan_v) {
+		vxlan_v = &dummy_vxlan_gpe_hdr;
+		vxlan_m = &dummy_vxlan_gpe_hdr;
+	} else {
+		if (!vxlan_m)
+			vxlan_m = &rte_flow_item_vxlan_gpe_mask;
 	}
-	if (!vxlan_v)
-		return;
-	if (!vxlan_m)
-		vxlan_m = &rte_flow_item_vxlan_gpe_mask;
-	size = sizeof(vxlan_m->vni);
-	vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
-	vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
 	memcpy(vni_m, vxlan_m->vni, size);
 	for (i = 0; i < size; ++i)
 		vni_v[i] = vni_m[i] & vxlan_v->vni[i];
@@ -6105,10 +6099,22 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
 	}
 	MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
 	MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
-	MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
-		 vxlan_m->protocol);
-	MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
-		 vxlan_v->protocol);
+	m_protocol = vxlan_m->protocol;
+	v_protocol = vxlan_v->protocol;
+	if (!m_protocol) {
+		m_protocol = 0xff;
+		/* Force next protocol to ensure next headers parsing. */
+		if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+			v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
+		else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+			v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
+		else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+			v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
+	}
+	MLX5_SET(fte_match_set_misc3, misc_m,
+		 outer_vxlan_gpe_next_protocol, m_protocol);
+	MLX5_SET(fte_match_set_misc3, misc_v,
+		 outer_vxlan_gpe_next_protocol, m_protocol & v_protocol);
 }
 
 /**
@@ -7237,6 +7243,7 @@ __flow_dv_translate(struct rte_eth_dev *dev,
 	struct rte_vlan_hdr vlan = { 0 };
 	uint32_t table;
 	int ret = 0;
+	const struct rte_flow_item *tunnel_item = NULL;
 
 	mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
 					   MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
@@ -7793,12 +7800,10 @@ __flow_dv_translate(struct rte_eth_dev *dev,
 			last_item = MLX5_FLOW_LAYER_VXLAN;
 			break;
 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
-			flow_dv_translate_item_vxlan_gpe(match_mask,
-							 match_value, items,
-							 tunnel);
 			matcher.priority = flow->rss.level >= 2 ?
 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+			tunnel_item = items;
 			break;
 		case RTE_FLOW_ITEM_TYPE_GENEVE:
 			flow_dv_translate_item_geneve(match_mask, match_value,
@@ -7868,6 +7873,9 @@ __flow_dv_translate(struct rte_eth_dev *dev,
 						   match_value, NULL))
 			return -rte_errno;
 	}
+	if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
+		flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
+						 tunnel_item, item_flags);
 	assert(!flow_dv_check_valid_spec(matcher.mask.buf,
 					 dev_flow->dv.value.buf));
 	/*
-- 
2.34.0


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-12-02 16:01 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-12-02 16:00 [PATCH 19.11 1/6] net/mlx5: fix VXLAN-GPE next protocol translation Gregory Etelson
2021-12-02 16:00 ` [PATCH 19.11 2/6] net/mlx5: fix RSS expansion scheme for GRE header Gregory Etelson
2021-12-02 16:00 ` [PATCH 19.11 3/6] net/mlx5: fix GENEVE protocol type translation Gregory Etelson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).