DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/mlx5: support the new VLAN matching fields
@ 2020-10-25 16:03 Matan Azrad
  2020-10-26 23:19 ` Raslan Darawsheh
  0 siblings, 1 reply; 2+ messages in thread
From: Matan Azrad @ 2020-10-25 16:03 UTC (permalink / raw)
  To: Viacheslav Ovsiienko; +Cc: dev

The fields ``has_vlan`` and ``has_more_vlan`` were added in rte_flow by
patch [1].

Using these fields, the application can match all the VLAN options by
single flow: any, VLAN only and non-VLAN only.

Add the support for the fields.
By the way, add the support for QinQ packets matching.

VLAN\QinQ limitations are listed in the driver document.

[1] https://patches.dpdk.org/patch/80965/

Signed-off-by: Matan Azrad <matan@nvidia.com>
Acked-by: Dekel Peled <dekelp@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
---
 doc/guides/nics/mlx5.rst               |  24 +++--
 doc/guides/rel_notes/release_20_11.rst |   3 +
 drivers/net/mlx5/mlx5_flow.c           |   5 +-
 drivers/net/mlx5/mlx5_flow.h           |   2 +-
 drivers/net/mlx5/mlx5_flow_dv.c        | 176 +++++++++++++++++----------------
 drivers/net/mlx5/mlx5_flow_verbs.c     |   2 +-
 6 files changed, 117 insertions(+), 95 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 66524f1..79cb436 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -122,23 +122,29 @@ Limitations
 
   Will match any ipv4 packet (VLAN included).
 
-- When using DV flow engine (``dv_flow_en`` = 1), flow pattern without VLAN item
-  will match untagged packets only.
+- When using Verbs flow engine (``dv_flow_en`` = 0), multi-tagged(QinQ) match is not supported.
+
+- When using DV flow engine (``dv_flow_en`` = 1), flow pattern with any VLAN specification will match only single-tagged packets unless the ETH item ``type`` field is 0x88A8 or the VLAN item ``has_more_vlan`` field is 1.
   The flow rule::
 
         flow create 0 ingress pattern eth / ipv4 / end ...
 
-  Will match untagged packets only.
-  The flow rule::
+  Will match any ipv4 packet.
+  The flow rules::
 
-        flow create 0 ingress pattern eth / vlan / ipv4 / end ...
+        flow create 0 ingress pattern eth / vlan / end ...
+        flow create 0 ingress pattern eth has_vlan is 1 / end ...
+        flow create 0 ingress pattern eth type is 0x8100 / end ...
 
-  Will match tagged packets only, with any VLAN ID value.
-  The flow rule::
+  Will match single-tagged packets only, with any VLAN ID value.
+  The flow rules::
 
-        flow create 0 ingress pattern eth / vlan vid is 3 / ipv4 / end ...
+        flow create 0 ingress pattern eth type is 0x88A8 / end ...
+        flow create 0 ingress pattern eth / vlan has_more_vlan is 1 / end ...
+
+  Will match multi-tagged packets only, with any VLAN ID value.
 
-  Will only match tagged packets with VLAN ID 3.
+- A flow pattern with 2 sequential VLAN items is not supported.
 
 - VLAN pop offload command:
 
diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst
index dd59fe8..56b3198 100644
--- a/doc/guides/rel_notes/release_20_11.rst
+++ b/doc/guides/rel_notes/release_20_11.rst
@@ -353,6 +353,9 @@ New Features
   * Updated the supported timeout for Age action to the maximal value supported
     by rte_flow API.
   * Added support of Age action query.
+  * Added support for QinQ packets matching.
+  * Added support for the new vlan fields ``has_vlan`` in the eth item and
+    ``has_more_vlan`` in the vlan item.
 
 Removed Items
 -------------
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 949b9ce..9bc1465 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1794,6 +1794,8 @@ struct mlx5_flow_tunnel_info {
  *   Item specification.
  * @param[in] item_flags
  *   Bit-fields that holds the items detected until now.
+ * @param[in] ext_vlan_sup
+ *   Whether extended VLAN features are supported or not.
  * @param[out] error
  *   Pointer to error structure.
  *
@@ -1914,7 +1916,7 @@ struct mlx5_flow_tunnel_info {
  */
 int
 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
-			    uint64_t item_flags,
+			    uint64_t item_flags, bool ext_vlan_sup,
 			    struct rte_flow_error *error)
 {
 	const struct rte_flow_item_eth *mask = item->mask;
@@ -1922,6 +1924,7 @@ struct mlx5_flow_tunnel_info {
 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
 		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
 		.type = RTE_BE16(0xffff),
+		.has_vlan = ext_vlan_sup ? 1 : 0,
 	};
 	int ret;
 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 20beb96..8b5a93f 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1306,7 +1306,7 @@ int mlx5_flow_item_acceptable(const struct rte_flow_item *item,
 			      bool range_accepted,
 			      struct rte_flow_error *error);
 int mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
-				uint64_t item_flags,
+				uint64_t item_flags, bool ext_vlan_sup,
 				struct rte_flow_error *error);
 int mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
 				uint64_t item_flags,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 504d842..7bfab7d 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -1676,6 +1676,7 @@ struct field_modify_info modify_tcp[] = {
 	const struct rte_flow_item_vlan nic_mask = {
 		.tci = RTE_BE16(UINT16_MAX),
 		.inner_type = RTE_BE16(UINT16_MAX),
+		.has_more_vlan = 1,
 	};
 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 	int ret;
@@ -5358,7 +5359,7 @@ struct field_modify_info modify_tcp[] = {
 			break;
 		case RTE_FLOW_ITEM_TYPE_ETH:
 			ret = mlx5_flow_validate_item_eth(items, item_flags,
-							  error);
+							  true, error);
 			if (ret < 0)
 				return ret;
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
@@ -6360,9 +6361,10 @@ struct field_modify_info modify_tcp[] = {
 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
 		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
 		.type = RTE_BE16(0xffff),
+		.has_vlan = 0,
 	};
-	void *headers_m;
-	void *headers_v;
+	void *hdrs_m;
+	void *hdrs_v;
 	char *l24_v;
 	unsigned int i;
 
@@ -6371,38 +6373,26 @@ struct field_modify_info modify_tcp[] = {
 	if (!eth_m)
 		eth_m = &nic_mask;
 	if (inner) {
-		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
 					 inner_headers);
-		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+		hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
 	} else {
-		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
 					 outer_headers);
-		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+		hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
 	}
-	memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
+	memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
 	       &eth_m->dst, sizeof(eth_m->dst));
 	/* The value must be in the range of the mask. */
-	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
+	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
 	for (i = 0; i < sizeof(eth_m->dst); ++i)
 		l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
-	memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
+	memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
 	       &eth_m->src, sizeof(eth_m->src));
-	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
+	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
 	/* The value must be in the range of the mask. */
 	for (i = 0; i < sizeof(eth_m->dst); ++i)
 		l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
-	if (eth_v->type) {
-		/* When ethertype is present set mask for tagged VLAN. */
-		MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
-		/* Set value for tagged VLAN if ethertype is 802.1Q. */
-		if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
-		    eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
-			MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
-				 1);
-			/* Return here to avoid setting match on ethertype. */
-			return;
-		}
-	}
 	/*
 	 * HW supports match on one Ethertype, the Ethertype following the last
 	 * VLAN tag of the packet (see PRM).
@@ -6411,19 +6401,42 @@ struct field_modify_info modify_tcp[] = {
 	 * ethertype, and use ip_version field instead.
 	 * eCPRI over Ether layer will use type value 0xAEFE.
 	 */
-	if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
-	    eth_m->type == 0xFFFF) {
-		flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
-	} else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
-		   eth_m->type == 0xFFFF) {
-		flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
-	} else {
-		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
-			 rte_be_to_cpu_16(eth_m->type));
-		l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
-				     ethertype);
-		*(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
+	if (eth_m->type == 0xFFFF) {
+		/* Set cvlan_tag mask for any single\multi\un-tagged case. */
+		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
+		switch (eth_v->type) {
+		case RTE_BE16(RTE_ETHER_TYPE_VLAN):
+			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
+			return;
+		case RTE_BE16(RTE_ETHER_TYPE_QINQ):
+			MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
+			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
+			return;
+		case RTE_BE16(RTE_ETHER_TYPE_IPV4):
+			flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
+			return;
+		case RTE_BE16(RTE_ETHER_TYPE_IPV6):
+			flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
+			return;
+		default:
+			break;
+		}
+	}
+	if (eth_m->has_vlan) {
+		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
+		if (eth_v->has_vlan) {
+			/*
+			 * Here, when also has_more_vlan field in VLAN item is
+			 * not set, only single-tagged packets will be matched.
+			 */
+			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
+			return;
+		}
 	}
+	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
+		 rte_be_to_cpu_16(eth_m->type));
+	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
+	*(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
 }
 
 /**
@@ -6448,19 +6461,19 @@ struct field_modify_info modify_tcp[] = {
 {
 	const struct rte_flow_item_vlan *vlan_m = item->mask;
 	const struct rte_flow_item_vlan *vlan_v = item->spec;
-	void *headers_m;
-	void *headers_v;
+	void *hdrs_m;
+	void *hdrs_v;
 	uint16_t tci_m;
 	uint16_t tci_v;
 
 	if (inner) {
-		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
 					 inner_headers);
-		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+		hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
 	} else {
-		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
 					 outer_headers);
-		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+		hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
 		/*
 		 * This is workaround, masks are not supported,
 		 * and pre-validated.
@@ -6473,37 +6486,54 @@ struct field_modify_info modify_tcp[] = {
 	 * When VLAN item exists in flow, mark packet as tagged,
 	 * even if TCI is not specified.
 	 */
-	MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
-	MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+	if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
+		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
+		MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
+	}
 	if (!vlan_v)
 		return;
 	if (!vlan_m)
 		vlan_m = &rte_flow_item_vlan_mask;
 	tci_m = rte_be_to_cpu_16(vlan_m->tci);
 	tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
-	MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
-	MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
-	MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
-	MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
-	MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
-	MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
+	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
+	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
+	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
+	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
+	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
+	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
 	/*
 	 * HW is optimized for IPv4/IPv6. In such cases, avoid setting
 	 * ethertype, and use ip_version field instead.
 	 */
-	if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
-	    vlan_m->inner_type == 0xFFFF) {
-		flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
-	} else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
-		   vlan_m->inner_type == 0xFFFF) {
-		flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
-	} else {
-		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
-			 rte_be_to_cpu_16(vlan_m->inner_type));
-		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
-			 rte_be_to_cpu_16(vlan_m->inner_type &
-					  vlan_v->inner_type));
+	if (vlan_m->inner_type == 0xFFFF) {
+		switch (vlan_v->inner_type) {
+		case RTE_BE16(RTE_ETHER_TYPE_VLAN):
+			MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
+			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
+			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
+			return;
+		case RTE_BE16(RTE_ETHER_TYPE_IPV4):
+			flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
+			return;
+		case RTE_BE16(RTE_ETHER_TYPE_IPV6):
+			flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
+			return;
+		default:
+			break;
+		}
 	}
+	if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
+		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
+		MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
+		/* Only one vlan_tag bit can be set. */
+		MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
+		return;
+	}
+	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
+		 rte_be_to_cpu_16(vlan_m->inner_type));
+	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
+		 rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
 }
 
 /**
@@ -6515,8 +6545,6 @@ struct field_modify_info modify_tcp[] = {
  *   Flow matcher value.
  * @param[in] item
  *   Flow pattern to translate.
- * @param[in] item_flags
- *   Bit-fields that holds the items detected until now.
  * @param[in] inner
  *   Item is inner pattern.
  * @param[in] group
@@ -6525,7 +6553,6 @@ struct field_modify_info modify_tcp[] = {
 static void
 flow_dv_translate_item_ipv4(void *matcher, void *key,
 			    const struct rte_flow_item *item,
-			    const uint64_t item_flags,
 			    int inner, uint32_t group)
 {
 	const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
@@ -6555,13 +6582,6 @@ struct field_modify_info modify_tcp[] = {
 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
 	}
 	flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
-	/*
-	 * On outer header (which must contains L2), or inner header with L2,
-	 * set cvlan_tag mask bit to mark this packet as untagged.
-	 * This should be done even if item->spec is empty.
-	 */
-	if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
-		MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
 	if (!ipv4_v)
 		return;
 	if (!ipv4_m)
@@ -6608,8 +6628,6 @@ struct field_modify_info modify_tcp[] = {
  *   Flow matcher value.
  * @param[in] item
  *   Flow pattern to translate.
- * @param[in] item_flags
- *   Bit-fields that holds the items detected until now.
  * @param[in] inner
  *   Item is inner pattern.
  * @param[in] group
@@ -6618,7 +6636,6 @@ struct field_modify_info modify_tcp[] = {
 static void
 flow_dv_translate_item_ipv6(void *matcher, void *key,
 			    const struct rte_flow_item *item,
-			    const uint64_t item_flags,
 			    int inner, uint32_t group)
 {
 	const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
@@ -6657,13 +6674,6 @@ struct field_modify_info modify_tcp[] = {
 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
 	}
 	flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
-	/*
-	 * On outer header (which must contains L2), or inner header with L2,
-	 * set cvlan_tag mask bit to mark this packet as untagged.
-	 * This should be done even if item->spec is empty.
-	 */
-	if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
-		MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
 	if (!ipv6_v)
 		return;
 	if (!ipv6_m)
@@ -9915,7 +9925,7 @@ struct field_modify_info modify_tcp[] = {
 			mlx5_flow_tunnel_ip_check(items, next_protocol,
 						  &item_flags, &tunnel);
 			flow_dv_translate_item_ipv4(match_mask, match_value,
-						    items, item_flags, tunnel,
+						    items, tunnel,
 						    dev_flow->dv.group);
 			matcher.priority = MLX5_PRIORITY_MAP_L3;
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
@@ -9938,7 +9948,7 @@ struct field_modify_info modify_tcp[] = {
 			mlx5_flow_tunnel_ip_check(items, next_protocol,
 						  &item_flags, &tunnel);
 			flow_dv_translate_item_ipv6(match_mask, match_value,
-						    items, item_flags, tunnel,
+						    items, tunnel,
 						    dev_flow->dv.group);
 			matcher.priority = MLX5_PRIORITY_MAP_L3;
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 9cc4410..f0e1bca 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1263,7 +1263,7 @@
 			break;
 		case RTE_FLOW_ITEM_TYPE_ETH:
 			ret = mlx5_flow_validate_item_eth(items, item_flags,
-							  error);
+							  false, error);
 			if (ret < 0)
 				return ret;
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: support the new VLAN matching fields
  2020-10-25 16:03 [dpdk-dev] [PATCH] net/mlx5: support the new VLAN matching fields Matan Azrad
@ 2020-10-26 23:19 ` Raslan Darawsheh
  0 siblings, 0 replies; 2+ messages in thread
From: Raslan Darawsheh @ 2020-10-26 23:19 UTC (permalink / raw)
  To: Matan Azrad, Slava Ovsiienko; +Cc: dev

Hi,
> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Matan Azrad
> Sent: Sunday, October 25, 2020 6:04 PM
> To: Slava Ovsiienko <viacheslavo@nvidia.com>
> Cc: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH] net/mlx5: support the new VLAN matching
> fields
> 
> The fields ``has_vlan`` and ``has_more_vlan`` were added in rte_flow by
> patch [1].
> 
> Using these fields, the application can match all the VLAN options by
> single flow: any, VLAN only and non-VLAN only.
> 
> Add the support for the fields.
> By the way, add the support for QinQ packets matching.
> 
> VLAN\QinQ limitations are listed in the driver document.
> 
> [1]
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fpatc
> hes.dpdk.org%2Fpatch%2F80965%2F&amp;data=04%7C01%7Crasland%40nvi
> dia.com%7C94645c5c01464724cccc08d878ff9f90%7C43083d15727340c1b7db39
> efd9ccc17a%7C0%7C0%7C637392386576427312%7CUnknown%7CTWFpbGZs
> b3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn
> 0%3D%7C1000&amp;sdata=V7AAqp8X8hOEdSq51wGtPSYVOucQi2gEcFExhua
> Sjls%3D&amp;reserved=0
> 
> Signed-off-by: Matan Azrad <matan@nvidia.com>
> Acked-by: Dekel Peled <dekelp@nvidia.com>
> Acked-by: Ori Kam <orika@nvidia.com>
> ---
>  doc/guides/nics/mlx5.rst               |  24 +++--
>  doc/guides/rel_notes/release_20_11.rst |   3 +
>  drivers/net/mlx5/mlx5_flow.c           |   5 +-
>  drivers/net/mlx5/mlx5_flow.h           |   2 +-
>  drivers/net/mlx5/mlx5_flow_dv.c        | 176 +++++++++++++++++-------------
> ---
>  drivers/net/mlx5/mlx5_flow_verbs.c     |   2 +-
>  6 files changed, 117 insertions(+), 95 deletions(-)
> 
> diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
> index 66524f1..79cb436 100644
> --- a/doc/guides/nics/mlx5.rst
> +++ b/doc/guides/nics/mlx5.rst
> @@ -122,23 +122,29 @@ Limitations
> 
>    Will match any ipv4 packet (VLAN included).
> 
> -- When using DV flow engine (``dv_flow_en`` = 1), flow pattern without
> VLAN item
> -  will match untagged packets only.
> +- When using Verbs flow engine (``dv_flow_en`` = 0), multi-tagged(QinQ)
> match is not supported.
> +
> +- When using DV flow engine (``dv_flow_en`` = 1), flow pattern with any
> VLAN specification will match only single-tagged packets unless the ETH item
> ``type`` field is 0x88A8 or the VLAN item ``has_more_vlan`` field is 1.
>    The flow rule::
> 
>          flow create 0 ingress pattern eth / ipv4 / end ...
> 
> -  Will match untagged packets only.
> -  The flow rule::
> +  Will match any ipv4 packet.
> +  The flow rules::
> 
> -        flow create 0 ingress pattern eth / vlan / ipv4 / end ...
> +        flow create 0 ingress pattern eth / vlan / end ...
> +        flow create 0 ingress pattern eth has_vlan is 1 / end ...
> +        flow create 0 ingress pattern eth type is 0x8100 / end ...
> 
> -  Will match tagged packets only, with any VLAN ID value.
> -  The flow rule::
> +  Will match single-tagged packets only, with any VLAN ID value.
> +  The flow rules::
> 
> -        flow create 0 ingress pattern eth / vlan vid is 3 / ipv4 / end ...
> +        flow create 0 ingress pattern eth type is 0x88A8 / end ...
> +        flow create 0 ingress pattern eth / vlan has_more_vlan is 1 / end ...
> +
> +  Will match multi-tagged packets only, with any VLAN ID value.
> 
> -  Will only match tagged packets with VLAN ID 3.
> +- A flow pattern with 2 sequential VLAN items is not supported.
> 
>  - VLAN pop offload command:
> 
> diff --git a/doc/guides/rel_notes/release_20_11.rst
> b/doc/guides/rel_notes/release_20_11.rst
> index dd59fe8..56b3198 100644
> --- a/doc/guides/rel_notes/release_20_11.rst
> +++ b/doc/guides/rel_notes/release_20_11.rst
> @@ -353,6 +353,9 @@ New Features
>    * Updated the supported timeout for Age action to the maximal value
> supported
>      by rte_flow API.
>    * Added support of Age action query.
> +  * Added support for QinQ packets matching.
> +  * Added support for the new vlan fields ``has_vlan`` in the eth item and
> +    ``has_more_vlan`` in the vlan item.
> 
>  Removed Items
>  -------------
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index 949b9ce..9bc1465 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -1794,6 +1794,8 @@ struct mlx5_flow_tunnel_info {
>   *   Item specification.
>   * @param[in] item_flags
>   *   Bit-fields that holds the items detected until now.
> + * @param[in] ext_vlan_sup
> + *   Whether extended VLAN features are supported or not.
>   * @param[out] error
>   *   Pointer to error structure.
>   *
> @@ -1914,7 +1916,7 @@ struct mlx5_flow_tunnel_info {
>   */
>  int
>  mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
> -			    uint64_t item_flags,
> +			    uint64_t item_flags, bool ext_vlan_sup,
>  			    struct rte_flow_error *error)
>  {
>  	const struct rte_flow_item_eth *mask = item->mask;
> @@ -1922,6 +1924,7 @@ struct mlx5_flow_tunnel_info {
>  		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
>  		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
>  		.type = RTE_BE16(0xffff),
> +		.has_vlan = ext_vlan_sup ? 1 : 0,
>  	};
>  	int ret;
>  	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
> diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
> index 20beb96..8b5a93f 100644
> --- a/drivers/net/mlx5/mlx5_flow.h
> +++ b/drivers/net/mlx5/mlx5_flow.h
> @@ -1306,7 +1306,7 @@ int mlx5_flow_item_acceptable(const struct
> rte_flow_item *item,
>  			      bool range_accepted,
>  			      struct rte_flow_error *error);
>  int mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
> -				uint64_t item_flags,
> +				uint64_t item_flags, bool ext_vlan_sup,
>  				struct rte_flow_error *error);
>  int mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
>  				uint64_t item_flags,
> diff --git a/drivers/net/mlx5/mlx5_flow_dv.c
> b/drivers/net/mlx5/mlx5_flow_dv.c
> index 504d842..7bfab7d 100644
> --- a/drivers/net/mlx5/mlx5_flow_dv.c
> +++ b/drivers/net/mlx5/mlx5_flow_dv.c
> @@ -1676,6 +1676,7 @@ struct field_modify_info modify_tcp[] = {
>  	const struct rte_flow_item_vlan nic_mask = {
>  		.tci = RTE_BE16(UINT16_MAX),
>  		.inner_type = RTE_BE16(UINT16_MAX),
> +		.has_more_vlan = 1,
>  	};
>  	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
>  	int ret;
> @@ -5358,7 +5359,7 @@ struct field_modify_info modify_tcp[] = {
>  			break;
>  		case RTE_FLOW_ITEM_TYPE_ETH:
>  			ret = mlx5_flow_validate_item_eth(items,
> item_flags,
> -							  error);
> +							  true, error);
>  			if (ret < 0)
>  				return ret;
>  			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
> @@ -6360,9 +6361,10 @@ struct field_modify_info modify_tcp[] = {
>  		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
>  		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
>  		.type = RTE_BE16(0xffff),
> +		.has_vlan = 0,
>  	};
> -	void *headers_m;
> -	void *headers_v;
> +	void *hdrs_m;
> +	void *hdrs_v;
>  	char *l24_v;
>  	unsigned int i;
> 
> @@ -6371,38 +6373,26 @@ struct field_modify_info modify_tcp[] = {
>  	if (!eth_m)
>  		eth_m = &nic_mask;
>  	if (inner) {
> -		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
> +		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
>  					 inner_headers);
> -		headers_v = MLX5_ADDR_OF(fte_match_param, key,
> inner_headers);
> +		hdrs_v = MLX5_ADDR_OF(fte_match_param, key,
> inner_headers);
>  	} else {
> -		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
> +		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
>  					 outer_headers);
> -		headers_v = MLX5_ADDR_OF(fte_match_param, key,
> outer_headers);
> +		hdrs_v = MLX5_ADDR_OF(fte_match_param, key,
> outer_headers);
>  	}
> -	memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
> dmac_47_16),
> +	memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m,
> dmac_47_16),
>  	       &eth_m->dst, sizeof(eth_m->dst));
>  	/* The value must be in the range of the mask. */
> -	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
> dmac_47_16);
> +	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v,
> dmac_47_16);
>  	for (i = 0; i < sizeof(eth_m->dst); ++i)
>  		l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v-
> >dst.addr_bytes[i];
> -	memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
> smac_47_16),
> +	memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m,
> smac_47_16),
>  	       &eth_m->src, sizeof(eth_m->src));
> -	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
> smac_47_16);
> +	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v,
> smac_47_16);
>  	/* The value must be in the range of the mask. */
>  	for (i = 0; i < sizeof(eth_m->dst); ++i)
>  		l24_v[i] = eth_m->src.addr_bytes[i] & eth_v-
> >src.addr_bytes[i];
> -	if (eth_v->type) {
> -		/* When ethertype is present set mask for tagged VLAN. */
> -		MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag,
> 1);
> -		/* Set value for tagged VLAN if ethertype is 802.1Q. */
> -		if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
> -		    eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
> -			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
> cvlan_tag,
> -				 1);
> -			/* Return here to avoid setting match on ethertype.
> */
> -			return;
> -		}
> -	}
>  	/*
>  	 * HW supports match on one Ethertype, the Ethertype following the
> last
>  	 * VLAN tag of the packet (see PRM).
> @@ -6411,19 +6401,42 @@ struct field_modify_info modify_tcp[] = {
>  	 * ethertype, and use ip_version field instead.
>  	 * eCPRI over Ether layer will use type value 0xAEFE.
>  	 */
> -	if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
> -	    eth_m->type == 0xFFFF) {
> -		flow_dv_set_match_ip_version(group, headers_v,
> headers_m, 4);
> -	} else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
> -		   eth_m->type == 0xFFFF) {
> -		flow_dv_set_match_ip_version(group, headers_v,
> headers_m, 6);
> -	} else {
> -		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
> -			 rte_be_to_cpu_16(eth_m->type));
> -		l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
> -				     ethertype);
> -		*(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
> +	if (eth_m->type == 0xFFFF) {
> +		/* Set cvlan_tag mask for any single\multi\un-tagged case. */
> +		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
> +		switch (eth_v->type) {
> +		case RTE_BE16(RTE_ETHER_TYPE_VLAN):
> +			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v,
> cvlan_tag, 1);
> +			return;
> +		case RTE_BE16(RTE_ETHER_TYPE_QINQ):
> +			MLX5_SET(fte_match_set_lyr_2_4, hdrs_m,
> svlan_tag, 1);
> +			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v,
> svlan_tag, 1);
> +			return;
> +		case RTE_BE16(RTE_ETHER_TYPE_IPV4):
> +			flow_dv_set_match_ip_version(group, hdrs_v,
> hdrs_m, 4);
> +			return;
> +		case RTE_BE16(RTE_ETHER_TYPE_IPV6):
> +			flow_dv_set_match_ip_version(group, hdrs_v,
> hdrs_m, 6);
> +			return;
> +		default:
> +			break;
> +		}
> +	}
> +	if (eth_m->has_vlan) {
> +		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
> +		if (eth_v->has_vlan) {
> +			/*
> +			 * Here, when also has_more_vlan field in VLAN item
> is
> +			 * not set, only single-tagged packets will be
> matched.
> +			 */
> +			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v,
> cvlan_tag, 1);
> +			return;
> +		}
>  	}
> +	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
> +		 rte_be_to_cpu_16(eth_m->type));
> +	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v,
> ethertype);
> +	*(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
>  }
> 
>  /**
> @@ -6448,19 +6461,19 @@ struct field_modify_info modify_tcp[] = {
>  {
>  	const struct rte_flow_item_vlan *vlan_m = item->mask;
>  	const struct rte_flow_item_vlan *vlan_v = item->spec;
> -	void *headers_m;
> -	void *headers_v;
> +	void *hdrs_m;
> +	void *hdrs_v;
>  	uint16_t tci_m;
>  	uint16_t tci_v;
> 
>  	if (inner) {
> -		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
> +		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
>  					 inner_headers);
> -		headers_v = MLX5_ADDR_OF(fte_match_param, key,
> inner_headers);
> +		hdrs_v = MLX5_ADDR_OF(fte_match_param, key,
> inner_headers);
>  	} else {
> -		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
> +		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
>  					 outer_headers);
> -		headers_v = MLX5_ADDR_OF(fte_match_param, key,
> outer_headers);
> +		hdrs_v = MLX5_ADDR_OF(fte_match_param, key,
> outer_headers);
>  		/*
>  		 * This is workaround, masks are not supported,
>  		 * and pre-validated.
> @@ -6473,37 +6486,54 @@ struct field_modify_info modify_tcp[] = {
>  	 * When VLAN item exists in flow, mark packet as tagged,
>  	 * even if TCI is not specified.
>  	 */
> -	MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
> -	MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
> +	if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
> +		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
> +		MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
> +	}
>  	if (!vlan_v)
>  		return;
>  	if (!vlan_m)
>  		vlan_m = &rte_flow_item_vlan_mask;
>  	tci_m = rte_be_to_cpu_16(vlan_m->tci);
>  	tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
> -	MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
> -	MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
> -	MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >>
> 12);
> -	MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
> -	MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >>
> 13);
> -	MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >>
> 13);
> +	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
> +	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
> +	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
> +	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
> +	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
> +	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
>  	/*
>  	 * HW is optimized for IPv4/IPv6. In such cases, avoid setting
>  	 * ethertype, and use ip_version field instead.
>  	 */
> -	if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
> -	    vlan_m->inner_type == 0xFFFF) {
> -		flow_dv_set_match_ip_version(group, headers_v,
> headers_m, 4);
> -	} else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6)
> &&
> -		   vlan_m->inner_type == 0xFFFF) {
> -		flow_dv_set_match_ip_version(group, headers_v,
> headers_m, 6);
> -	} else {
> -		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
> -			 rte_be_to_cpu_16(vlan_m->inner_type));
> -		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
> -			 rte_be_to_cpu_16(vlan_m->inner_type &
> -					  vlan_v->inner_type));
> +	if (vlan_m->inner_type == 0xFFFF) {
> +		switch (vlan_v->inner_type) {
> +		case RTE_BE16(RTE_ETHER_TYPE_VLAN):
> +			MLX5_SET(fte_match_set_lyr_2_4, hdrs_m,
> svlan_tag, 1);
> +			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v,
> svlan_tag, 1);
> +			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v,
> cvlan_tag, 0);
> +			return;
> +		case RTE_BE16(RTE_ETHER_TYPE_IPV4):
> +			flow_dv_set_match_ip_version(group, hdrs_v,
> hdrs_m, 4);
> +			return;
> +		case RTE_BE16(RTE_ETHER_TYPE_IPV6):
> +			flow_dv_set_match_ip_version(group, hdrs_v,
> hdrs_m, 6);
> +			return;
> +		default:
> +			break;
> +		}
>  	}
> +	if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
> +		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
> +		MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
> +		/* Only one vlan_tag bit can be set. */
> +		MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
> +		return;
> +	}
> +	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
> +		 rte_be_to_cpu_16(vlan_m->inner_type));
> +	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
> +		 rte_be_to_cpu_16(vlan_m->inner_type & vlan_v-
> >inner_type));
>  }
> 
>  /**
> @@ -6515,8 +6545,6 @@ struct field_modify_info modify_tcp[] = {
>   *   Flow matcher value.
>   * @param[in] item
>   *   Flow pattern to translate.
> - * @param[in] item_flags
> - *   Bit-fields that holds the items detected until now.
>   * @param[in] inner
>   *   Item is inner pattern.
>   * @param[in] group
> @@ -6525,7 +6553,6 @@ struct field_modify_info modify_tcp[] = {
>  static void
>  flow_dv_translate_item_ipv4(void *matcher, void *key,
>  			    const struct rte_flow_item *item,
> -			    const uint64_t item_flags,
>  			    int inner, uint32_t group)
>  {
>  	const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
> @@ -6555,13 +6582,6 @@ struct field_modify_info modify_tcp[] = {
>  		headers_v = MLX5_ADDR_OF(fte_match_param, key,
> outer_headers);
>  	}
>  	flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
> -	/*
> -	 * On outer header (which must contains L2), or inner header with L2,
> -	 * set cvlan_tag mask bit to mark this packet as untagged.
> -	 * This should be done even if item->spec is empty.
> -	 */
> -	if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
> -		MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag,
> 1);
>  	if (!ipv4_v)
>  		return;
>  	if (!ipv4_m)
> @@ -6608,8 +6628,6 @@ struct field_modify_info modify_tcp[] = {
>   *   Flow matcher value.
>   * @param[in] item
>   *   Flow pattern to translate.
> - * @param[in] item_flags
> - *   Bit-fields that holds the items detected until now.
>   * @param[in] inner
>   *   Item is inner pattern.
>   * @param[in] group
> @@ -6618,7 +6636,6 @@ struct field_modify_info modify_tcp[] = {
>  static void
>  flow_dv_translate_item_ipv6(void *matcher, void *key,
>  			    const struct rte_flow_item *item,
> -			    const uint64_t item_flags,
>  			    int inner, uint32_t group)
>  {
>  	const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
> @@ -6657,13 +6674,6 @@ struct field_modify_info modify_tcp[] = {
>  		headers_v = MLX5_ADDR_OF(fte_match_param, key,
> outer_headers);
>  	}
>  	flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
> -	/*
> -	 * On outer header (which must contains L2), or inner header with L2,
> -	 * set cvlan_tag mask bit to mark this packet as untagged.
> -	 * This should be done even if item->spec is empty.
> -	 */
> -	if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
> -		MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag,
> 1);
>  	if (!ipv6_v)
>  		return;
>  	if (!ipv6_m)
> @@ -9915,7 +9925,7 @@ struct field_modify_info modify_tcp[] = {
>  			mlx5_flow_tunnel_ip_check(items, next_protocol,
>  						  &item_flags, &tunnel);
>  			flow_dv_translate_item_ipv4(match_mask,
> match_value,
> -						    items, item_flags, tunnel,
> +						    items, tunnel,
>  						    dev_flow->dv.group);
>  			matcher.priority = MLX5_PRIORITY_MAP_L3;
>  			last_item = tunnel ?
> MLX5_FLOW_LAYER_INNER_L3_IPV4 :
> @@ -9938,7 +9948,7 @@ struct field_modify_info modify_tcp[] = {
>  			mlx5_flow_tunnel_ip_check(items, next_protocol,
>  						  &item_flags, &tunnel);
>  			flow_dv_translate_item_ipv6(match_mask,
> match_value,
> -						    items, item_flags, tunnel,
> +						    items, tunnel,
>  						    dev_flow->dv.group);
>  			matcher.priority = MLX5_PRIORITY_MAP_L3;
>  			last_item = tunnel ?
> MLX5_FLOW_LAYER_INNER_L3_IPV6 :
> diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c
> b/drivers/net/mlx5/mlx5_flow_verbs.c
> index 9cc4410..f0e1bca 100644
> --- a/drivers/net/mlx5/mlx5_flow_verbs.c
> +++ b/drivers/net/mlx5/mlx5_flow_verbs.c
> @@ -1263,7 +1263,7 @@
>  			break;
>  		case RTE_FLOW_ITEM_TYPE_ETH:
>  			ret = mlx5_flow_validate_item_eth(items,
> item_flags,
> -							  error);
> +							  false, error);
>  			if (ret < 0)
>  				return ret;
>  			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
> --
> 1.8.3.1

Patch applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-10-26 23:19 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-25 16:03 [dpdk-dev] [PATCH] net/mlx5: support the new VLAN matching fields Matan Azrad
2020-10-26 23:19 ` Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).