DPDK patches and discussions
 help / color / mirror / Atom feed
From: Dekel Peled <dekelp@mellanox.com>
To: yskoh@mellanox.com, shahafs@mellanox.com
Cc: dev@dpdk.org, orika@mellanox.com, dekelp@mellanox.com
Subject: [dpdk-dev] [PATCH v3 2/3] net/mlx5: fix MPLS item validation
Date: Thu, 15 Nov 2018 17:17:13 +0200	[thread overview]
Message-ID: <1542295034-16098-3-git-send-email-dekelp@mellanox.com> (raw)
In-Reply-To: <1542102705-55243-1-git-send-email-dekelp@mellanox.com>

Update the mlx5_flow_validate_item_mpls() function to allow
MPLS over IP, UDP, and GRE.
Modify the flow_dv_validate() function with the new logic introduced
in previous patch of this series: set new variable last_item
after each validation, update item_flags after each item iteration.
The new variable last_item is sent to mlx5_flow_validate_item_mpls()
and used to validate the MPLS item.
Same change implemented in flow_verbs_validate().

Update the mlx5_flow_validate_item_mpls() function to verify that
device configuration has mpls_en set to 1.
This code was added earlier this year, but unintentionaly removed
in recent versions.

Fixes: 84c406e74524 ("net/mlx5: add flow translate function")
Cc: orika@mellanox.com

Signed-off-by: Dekel Peled <dekelp@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c       | 23 +++++++++++++++-----
 drivers/net/mlx5/mlx5_flow.h       |  5 +++--
 drivers/net/mlx5/mlx5_flow_dv.c    | 42 +++++++++++++++++++-----------------
 drivers/net/mlx5/mlx5_flow_verbs.c | 44 ++++++++++++++++++++------------------
 4 files changed, 66 insertions(+), 48 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 5ad3a11..ea90ea5 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1593,12 +1593,14 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 /**
  * Validate MPLS item.
  *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
  * @param[in] item
  *   Item specification.
  * @param[in] item_flags
  *   Bit-fields that holds the items detected until now.
- * @param[in] target_protocol
- *   The next protocol in the previous item.
+ * @param[in] prev_layer
+ *   The protocol layer indicated in previous item.
  * @param[out] error
  *   Pointer to error structure.
  *
@@ -1606,16 +1608,27 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused,
+mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
+			     const struct rte_flow_item *item __rte_unused,
 			     uint64_t item_flags __rte_unused,
-			     uint8_t target_protocol __rte_unused,
+			     uint64_t prev_layer __rte_unused,
 			     struct rte_flow_error *error)
 {
 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
 	const struct rte_flow_item_mpls *mask = item->mask;
+	struct priv *priv = dev->data->dev_private;
 	int ret;
 
-	if (target_protocol != 0xff && target_protocol != IPPROTO_MPLS)
+	if (!priv->config.mpls_en)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "MPLS not supported or"
+					  " disabled in firmware"
+					  " configuration.");
+	/* MPLS over IP, UDP, GRE is allowed */
+	if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
+			    MLX5_FLOW_LAYER_OUTER_L4_UDP |
+			    MLX5_FLOW_LAYER_GRE)))
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "protocol filtering not compatible"
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 3022090..11defa0 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -384,9 +384,10 @@ int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
 int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
 				 uint64_t item_flags,
 				 struct rte_flow_error *error);
-int mlx5_flow_validate_item_mpls(const struct rte_flow_item *item,
+int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
+				 const struct rte_flow_item *item,
 				 uint64_t item_flags,
-				 uint8_t target_protocol,
+				 uint64_t prev_layer,
 				 struct rte_flow_error *error);
 int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
 				uint64_t item_flags,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index e8cd253..414a6c6 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -775,6 +775,7 @@
 	int ret;
 	uint64_t action_flags = 0;
 	uint64_t item_flags = 0;
+	uint64_t last_item = 0;
 	int tunnel = 0;
 	uint8_t next_protocol = 0xff;
 	int actions_n = 0;
@@ -794,24 +795,24 @@
 							  error);
 			if (ret < 0)
 				return ret;
-			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
-					       MLX5_FLOW_LAYER_OUTER_L2;
+			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+					     MLX5_FLOW_LAYER_OUTER_L2;
 			break;
 		case RTE_FLOW_ITEM_TYPE_VLAN:
 			ret = mlx5_flow_validate_item_vlan(items, item_flags,
 							   error);
 			if (ret < 0)
 				return ret;
-			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
-					       MLX5_FLOW_LAYER_OUTER_VLAN;
+			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+					     MLX5_FLOW_LAYER_OUTER_VLAN;
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
 			ret = mlx5_flow_validate_item_ipv4(items, item_flags,
 							   error);
 			if (ret < 0)
 				return ret;
-			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
-					       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
 			if (items->mask != NULL &&
 			    ((const struct rte_flow_item_ipv4 *)
 			     items->mask)->hdr.next_proto_id) {
@@ -831,8 +832,8 @@
 							   error);
 			if (ret < 0)
 				return ret;
-			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
-					       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
 			if (items->mask != NULL &&
 			    ((const struct rte_flow_item_ipv6 *)
 			     items->mask)->hdr.proto) {
@@ -855,8 +856,8 @@
 						 error);
 			if (ret < 0)
 				return ret;
-			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
-					       MLX5_FLOW_LAYER_OUTER_L4_TCP;
+			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
 			break;
 		case RTE_FLOW_ITEM_TYPE_UDP:
 			ret = mlx5_flow_validate_item_udp(items, item_flags,
@@ -864,8 +865,8 @@
 							  error);
 			if (ret < 0)
 				return ret;
-			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
-					       MLX5_FLOW_LAYER_OUTER_L4_UDP;
+			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
 			break;
 		case RTE_FLOW_ITEM_TYPE_GRE:
 		case RTE_FLOW_ITEM_TYPE_NVGRE:
@@ -873,14 +874,14 @@
 							  next_protocol, error);
 			if (ret < 0)
 				return ret;
-			item_flags |= MLX5_FLOW_LAYER_GRE;
+			last_item = MLX5_FLOW_LAYER_GRE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_VXLAN:
 			ret = mlx5_flow_validate_item_vxlan(items, item_flags,
 							    error);
 			if (ret < 0)
 				return ret;
-			item_flags |= MLX5_FLOW_LAYER_VXLAN;
+			last_item = MLX5_FLOW_LAYER_VXLAN;
 			break;
 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
 			ret = mlx5_flow_validate_item_vxlan_gpe(items,
@@ -888,28 +889,29 @@
 								error);
 			if (ret < 0)
 				return ret;
-			item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_MPLS:
-			ret = mlx5_flow_validate_item_mpls(items, item_flags,
-							   next_protocol,
-							   error);
+			ret = mlx5_flow_validate_item_mpls(dev, items,
+							   item_flags,
+							   last_item, error);
 			if (ret < 0)
 				return ret;
-			item_flags |= MLX5_FLOW_LAYER_MPLS;
+			last_item = MLX5_FLOW_LAYER_MPLS;
 			break;
 		case RTE_FLOW_ITEM_TYPE_META:
 			ret = flow_dv_validate_item_meta(dev, items, attr,
 							 error);
 			if (ret < 0)
 				return ret;
-			item_flags |= MLX5_FLOW_ITEM_METADATA;
+			last_item = MLX5_FLOW_ITEM_METADATA;
 			break;
 		default:
 			return rte_flow_error_set(error, ENOTSUP,
 						  RTE_FLOW_ERROR_TYPE_ITEM,
 						  NULL, "item not supported");
 		}
+		item_flags |= last_item;
 	}
 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
 		if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index d6d95db..0925d00 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1017,6 +1017,7 @@
 	int ret;
 	uint64_t action_flags = 0;
 	uint64_t item_flags = 0;
+	uint64_t last_item = 0;
 	uint8_t next_protocol = 0xff;
 
 	if (items == NULL)
@@ -1036,26 +1037,26 @@
 							  error);
 			if (ret < 0)
 				return ret;
-			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
-					       MLX5_FLOW_LAYER_OUTER_L2;
+			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+					     MLX5_FLOW_LAYER_OUTER_L2;
 			break;
 		case RTE_FLOW_ITEM_TYPE_VLAN:
 			ret = mlx5_flow_validate_item_vlan(items, item_flags,
 							   error);
 			if (ret < 0)
 				return ret;
-			item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
-						MLX5_FLOW_LAYER_INNER_VLAN) :
-					       (MLX5_FLOW_LAYER_OUTER_L2 |
-						MLX5_FLOW_LAYER_OUTER_VLAN);
+			last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
+					      MLX5_FLOW_LAYER_INNER_VLAN) :
+					     (MLX5_FLOW_LAYER_OUTER_L2 |
+					      MLX5_FLOW_LAYER_OUTER_VLAN);
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
 			ret = mlx5_flow_validate_item_ipv4(items, item_flags,
 							   error);
 			if (ret < 0)
 				return ret;
-			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
-					       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
 			if (items->mask != NULL &&
 			    ((const struct rte_flow_item_ipv4 *)
 			     items->mask)->hdr.next_proto_id) {
@@ -1075,8 +1076,8 @@
 							   error);
 			if (ret < 0)
 				return ret;
-			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
-					       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
 			if (items->mask != NULL &&
 			    ((const struct rte_flow_item_ipv6 *)
 			     items->mask)->hdr.proto) {
@@ -1097,8 +1098,8 @@
 							  error);
 			if (ret < 0)
 				return ret;
-			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
-					       MLX5_FLOW_LAYER_OUTER_L4_UDP;
+			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
 			break;
 		case RTE_FLOW_ITEM_TYPE_TCP:
 			ret = mlx5_flow_validate_item_tcp
@@ -1108,15 +1109,15 @@
 						 error);
 			if (ret < 0)
 				return ret;
-			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
-					       MLX5_FLOW_LAYER_OUTER_L4_TCP;
+			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
 			break;
 		case RTE_FLOW_ITEM_TYPE_VXLAN:
 			ret = mlx5_flow_validate_item_vxlan(items, item_flags,
 							    error);
 			if (ret < 0)
 				return ret;
-			item_flags |= MLX5_FLOW_LAYER_VXLAN;
+			last_item = MLX5_FLOW_LAYER_VXLAN;
 			break;
 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
 			ret = mlx5_flow_validate_item_vxlan_gpe(items,
@@ -1124,28 +1125,29 @@
 								dev, error);
 			if (ret < 0)
 				return ret;
-			item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_GRE:
 			ret = mlx5_flow_validate_item_gre(items, item_flags,
 							  next_protocol, error);
 			if (ret < 0)
 				return ret;
-			item_flags |= MLX5_FLOW_LAYER_GRE;
+			last_item = MLX5_FLOW_LAYER_GRE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_MPLS:
-			ret = mlx5_flow_validate_item_mpls(items, item_flags,
-							   next_protocol,
-							   error);
+			ret = mlx5_flow_validate_item_mpls(dev, items,
+							   item_flags,
+							   last_item, error);
 			if (ret < 0)
 				return ret;
-			item_flags |= MLX5_FLOW_LAYER_MPLS;
+			last_item = MLX5_FLOW_LAYER_MPLS;
 			break;
 		default:
 			return rte_flow_error_set(error, ENOTSUP,
 						  RTE_FLOW_ERROR_TYPE_ITEM,
 						  NULL, "item not supported");
 		}
+		item_flags |= last_item;
 	}
 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
 		switch (actions->type) {
-- 
1.8.3.1

  parent reply	other threads:[~2018-11-15 15:18 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-12  9:20 [dpdk-dev] [PATCH] " Dekel Peled
2018-11-12  9:52 ` Ori Kam
2018-11-13  9:51 ` [dpdk-dev] [PATCH v2 0/2] net/mlx5: update MPLS item support Dekel Peled
2018-11-15 15:17   ` [dpdk-dev] [PATCH v3 0/3] " Dekel Peled
2018-11-15 20:44     ` Shahaf Shuler
2018-11-15 15:17   ` [dpdk-dev] [PATCH v3 1/3] net/mlx5: add MPLS to Direct Verbs flow engine Dekel Peled
2018-11-15 15:17   ` Dekel Peled [this message]
2018-11-15 15:17   ` [dpdk-dev] [PATCH v3 3/3] net/mlx5: fix tunnel ptype of MPLS in UDP Dekel Peled
2018-11-13  9:51 ` [dpdk-dev] [PATCH v2 1/2] net/mlx5: add MPLS to Direct Verbs flow engine Dekel Peled
2018-11-13  9:51 ` [dpdk-dev] [PATCH v2 2/2] net/mlx5: fix MPLS item validation Dekel Peled

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1542295034-16098-3-git-send-email-dekelp@mellanox.com \
    --to=dekelp@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=orika@mellanox.com \
    --cc=shahafs@mellanox.com \
    --cc=yskoh@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).