patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH 18.11] net/mlx5: improve flow item IP validation
@ 2019-12-17  7:27 Xiaoyu Min
  2019-12-17 14:09 ` Kevin Traynor
  0 siblings, 1 reply; 2+ messages in thread
From: Xiaoyu Min @ 2019-12-17  7:27 UTC (permalink / raw)
  To: stable, Matan Azrad, Shahaf Shuler, Viacheslav Ovsiienko; +Cc: Ori Kam

[ upstream commit fba3213015891c7ec9634e66bec5eb8eaed0d70a ]

Currently PMD doesn't check whether the user specified ethernet type is
conflicting with the followed IPv4/IPv6 items, which leads to HW refuse
to create rule, for example:

  ... pattern eth type is 0x86dd / ipv4 / end ...

ethernet type is IPv6 but IPv4 is following, this should be validated
as failure and report corresponding error in detail.

Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated function")

Signed-off-by: Xiaoyu Min <jackmin@mellanox.com>
Acked-by: Ori Kam <orika@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c       | 22 ++++++++++++++++++++++
 drivers/net/mlx5/mlx5_flow.h       |  8 ++++++++
 drivers/net/mlx5/mlx5_flow_dv.c    | 27 +++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_flow_tcf.c   |  8 ++++----
 drivers/net/mlx5/mlx5_flow_verbs.c | 27 +++++++++++++++++++++++++++
 5 files changed, 88 insertions(+), 4 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index cf9cdcfe3a..f699985932 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1170,6 +1170,8 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
 int
 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
 			     uint64_t item_flags,
+			     uint64_t last_item,
+			     uint16_t ether_type,
 			     struct rte_flow_error *error)
 {
 	const struct rte_flow_item_ipv4 *mask = item->mask;
@@ -1187,7 +1189,16 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
 				      MLX5_FLOW_LAYER_OUTER_L4;
 	int ret;
+	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
+				  MLX5_FLOW_LAYER_OUTER_VLAN |
+				  MLX5_FLOW_LAYER_INNER_VLAN);
 
+	if ((last_item & l2_vlan) && ether_type &&
+	    ether_type != ETHER_TYPE_IPv4)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "IPv4 cannot follow L2/VLAN layer "
+					  "which ether type is not IPv4");
 	if (item_flags & l3m)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -1233,6 +1244,8 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
 int
 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
 			     uint64_t item_flags,
+			     uint64_t last_item,
+			     uint16_t ether_type,
 			     struct rte_flow_error *error)
 {
 	const struct rte_flow_item_ipv6 *mask = item->mask;
@@ -1255,7 +1268,16 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
 				      MLX5_FLOW_LAYER_OUTER_L4;
 	int ret;
+	const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
+				  MLX5_FLOW_LAYER_OUTER_VLAN |
+				  MLX5_FLOW_LAYER_INNER_VLAN);
 
+	if ((last_item & l2_vlan) && ether_type &&
+	    ether_type != ETHER_TYPE_IPv6)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "IPv6 cannot follow L2/VLAN layer "
+					  "which ether type is not IPv6");
 	if (item_flags & l3m)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index e1424c789b..4edc718a38 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -69,6 +69,10 @@
 	(MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
 	 MLX5_FLOW_LAYER_INNER_L4)
 
+/* Layer Masks. */
+#define MLX5_FLOW_LAYER_L2 \
+	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_INNER_L2)
+
 /* Actions */
 #define MLX5_FLOW_ACTION_DROP (1u << 0)
 #define MLX5_FLOW_ACTION_QUEUE (1u << 1)
@@ -382,9 +386,13 @@ int mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
 				struct rte_flow_error *error);
 int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
 				 uint64_t item_flags,
+				 uint64_t last_item,
+				 uint16_t ether_type,
 				 struct rte_flow_error *error);
 int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
 				 uint64_t item_flags,
+				 uint64_t last_item,
+				 uint16_t ether_type,
 				 struct rte_flow_error *error);
 int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
 				 const struct rte_flow_item *item,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 3c61083d7d..b854e66dcb 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -783,6 +783,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 	uint64_t item_flags = 0;
 	uint64_t last_item = 0;
 	uint8_t next_protocol = 0xff;
+	uint16_t ether_type = 0;
 	int actions_n = 0;
 
 	if (items == NULL)
@@ -802,6 +803,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 				return ret;
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
 					     MLX5_FLOW_LAYER_OUTER_L2;
+			if (items->mask != NULL && items->spec != NULL) {
+				ether_type =
+					((const struct rte_flow_item_eth *)
+					 items->spec)->type;
+				ether_type &=
+					((const struct rte_flow_item_eth *)
+					 items->mask)->type;
+				ether_type = rte_be_to_cpu_16(ether_type);
+			} else {
+				ether_type = 0;
+			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_VLAN:
 			ret = mlx5_flow_validate_item_vlan(items, item_flags,
@@ -810,9 +822,22 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 				return ret;
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
 					     MLX5_FLOW_LAYER_OUTER_VLAN;
+			if (items->mask != NULL && items->spec != NULL) {
+				ether_type =
+					((const struct rte_flow_item_vlan *)
+					 items->spec)->inner_type;
+				ether_type &=
+					((const struct rte_flow_item_vlan *)
+					 items->mask)->inner_type;
+				ether_type = rte_be_to_cpu_16(ether_type);
+			} else {
+				ether_type = 0;
+			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
 			ret = mlx5_flow_validate_item_ipv4(items, item_flags,
+							   last_item,
+							   ether_type,
 							   error);
 			if (ret < 0)
 				return ret;
@@ -834,6 +859,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
+							   last_item,
+							   ether_type,
 							   error);
 			if (ret < 0)
 				return ret;
diff --git a/drivers/net/mlx5/mlx5_flow_tcf.c b/drivers/net/mlx5/mlx5_flow_tcf.c
index 92f984f96e..e9c4fe9d75 100644
--- a/drivers/net/mlx5/mlx5_flow_tcf.c
+++ b/drivers/net/mlx5/mlx5_flow_tcf.c
@@ -1529,7 +1529,7 @@ flow_tcf_validate_vxlan_encap(const struct rte_flow_action *action,
 		break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
 			ret = mlx5_flow_validate_item_ipv4(items, item_flags,
-							   error);
+							   0, 0, error);
 			if (ret < 0)
 				return ret;
 			ret = flow_tcf_validate_vxlan_encap_ipv4(items, error);
@@ -1539,7 +1539,7 @@ flow_tcf_validate_vxlan_encap(const struct rte_flow_action *action,
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
-							   error);
+							   0, 0, error);
 			if (ret < 0)
 				return ret;
 			ret = flow_tcf_validate_vxlan_encap_ipv6(items, error);
@@ -2059,7 +2059,7 @@ flow_tcf_validate(struct rte_eth_dev *dev,
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
 			ret = mlx5_flow_validate_item_ipv4(items, item_flags,
-							   error);
+							   0, 0, error);
 			if (ret < 0)
 				return ret;
 			item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
@@ -2119,7 +2119,7 @@ flow_tcf_validate(struct rte_eth_dev *dev,
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
-							   error);
+							   0, 0, error);
 			if (ret < 0)
 				return ret;
 			item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 1fdbca3d22..1a8a9e63ca 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1020,6 +1020,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,
 	uint64_t item_flags = 0;
 	uint64_t last_item = 0;
 	uint8_t next_protocol = 0xff;
+	uint16_t ether_type = 0;
 
 	if (items == NULL)
 		return -1;
@@ -1040,6 +1041,17 @@ flow_verbs_validate(struct rte_eth_dev *dev,
 				return ret;
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
 					     MLX5_FLOW_LAYER_OUTER_L2;
+			if (items->mask != NULL && items->spec != NULL) {
+				ether_type =
+					((const struct rte_flow_item_eth *)
+					 items->spec)->type;
+				ether_type &=
+					((const struct rte_flow_item_eth *)
+					 items->mask)->type;
+				ether_type = rte_be_to_cpu_16(ether_type);
+			} else {
+				ether_type = 0;
+			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_VLAN:
 			ret = mlx5_flow_validate_item_vlan(items, item_flags,
@@ -1050,9 +1062,22 @@ flow_verbs_validate(struct rte_eth_dev *dev,
 					      MLX5_FLOW_LAYER_INNER_VLAN) :
 					     (MLX5_FLOW_LAYER_OUTER_L2 |
 					      MLX5_FLOW_LAYER_OUTER_VLAN);
+			if (items->mask != NULL && items->spec != NULL) {
+				ether_type =
+					((const struct rte_flow_item_vlan *)
+					 items->spec)->inner_type;
+				ether_type &=
+					((const struct rte_flow_item_vlan *)
+					 items->mask)->inner_type;
+				ether_type = rte_be_to_cpu_16(ether_type);
+			} else {
+				ether_type = 0;
+			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
 			ret = mlx5_flow_validate_item_ipv4(items, item_flags,
+							   last_item,
+							   ether_type,
 							   error);
 			if (ret < 0)
 				return ret;
@@ -1074,6 +1099,8 @@ flow_verbs_validate(struct rte_eth_dev *dev,
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
+							   last_item,
+							   ether_type,
 							   error);
 			if (ret < 0)
 				return ret;
-- 
2.24.0


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [dpdk-stable] [PATCH 18.11] net/mlx5: improve flow item IP validation
  2019-12-17  7:27 [dpdk-stable] [PATCH 18.11] net/mlx5: improve flow item IP validation Xiaoyu Min
@ 2019-12-17 14:09 ` Kevin Traynor
  0 siblings, 0 replies; 2+ messages in thread
From: Kevin Traynor @ 2019-12-17 14:09 UTC (permalink / raw)
  To: Xiaoyu Min, stable, Matan Azrad, Shahaf Shuler, Viacheslav Ovsiienko
  Cc: Ori Kam

On 17/12/2019 07:27, Xiaoyu Min wrote:
> [ upstream commit fba3213015891c7ec9634e66bec5eb8eaed0d70a ]
> 
> Currently PMD doesn't check whether the user specified ethernet type is
> conflicting with the followed IPv4/IPv6 items, which leads to HW refuse
> to create rule, for example:
> 
>   ... pattern eth type is 0x86dd / ipv4 / end ...
> 
> ethernet type is IPv6 but IPv4 is following, this should be validated
> as failure and report corresponding error in detail.
> 
> Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated function")
> 
> Signed-off-by: Xiaoyu Min <jackmin@mellanox.com>
> Acked-by: Ori Kam <orika@mellanox.com>
> ---

Thanks, applied to 18.11 branch.


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2019-12-17 14:09 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-17  7:27 [dpdk-stable] [PATCH 18.11] net/mlx5: improve flow item IP validation Xiaoyu Min
2019-12-17 14:09 ` Kevin Traynor

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).