DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/2] net/mlx5: fix bit width of flow items
@ 2018-10-25  8:53 Shahaf Shuler
  2018-10-25  8:53 ` [dpdk-dev] [PATCH 2/2] net/mlx5: fix detection and error for multiple item layers Shahaf Shuler
  2018-10-26 23:01 ` [dpdk-dev] [PATCH 1/2] net/mlx5: fix bit width of flow items Yongseok Koh
  0 siblings, 2 replies; 5+ messages in thread
From: Shahaf Shuler @ 2018-10-25  8:53 UTC (permalink / raw)
  To: yskoh; +Cc: dev

Apply the changes from commit c744f6b1b969 ("net/mlx5: fix bit width of
item and action flags") in some places that were overlooked.

Fixes: c744f6b1b969 ("net/mlx5: fix bit width of item and action flags")
Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated function")

Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c | 10 +++++-----
 drivers/net/mlx5/mlx5_flow.h |  4 ++--
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 280af0abce..87189a3405 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -275,7 +275,7 @@ static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
 
 /* Tunnel information. */
 struct mlx5_flow_tunnel_info {
-	uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
+	uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
 	uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
 };
 
@@ -1079,7 +1079,7 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
  */
 int
 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
-			     int64_t item_flags,
+			     uint64_t item_flags,
 			     struct rte_flow_error *error)
 {
 	const struct rte_flow_item_vlan *spec = item->spec;
@@ -1091,11 +1091,11 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
 	uint16_t vlan_tag = 0;
 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 	int ret;
-	const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
+	const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
 					MLX5_FLOW_LAYER_INNER_L4) :
 				       (MLX5_FLOW_LAYER_OUTER_L3 |
 					MLX5_FLOW_LAYER_OUTER_L4);
-	const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
 					MLX5_FLOW_LAYER_OUTER_VLAN;
 
 	if (item_flags & vlanm)
@@ -1145,7 +1145,7 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
  */
 int
 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
-			     int64_t item_flags,
+			     uint64_t item_flags,
 			     struct rte_flow_error *error)
 {
 	const struct rte_flow_item_ipv4 *mask = item->mask;
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 61299d66b3..c24d26ed45 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -336,7 +336,7 @@ int mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
 				uint8_t target_protocol,
 				struct rte_flow_error *error);
 int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
-				 int64_t item_flags,
+				 uint64_t item_flags,
 				 struct rte_flow_error *error);
 int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
 				 uint64_t item_flags,
@@ -355,7 +355,7 @@ int mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
 				uint8_t target_protocol,
 				struct rte_flow_error *error);
 int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
-				 int64_t item_flags,
+				 uint64_t item_flags,
 				 struct rte_flow_error *error);
 int mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
 				  uint64_t item_flags,
-- 
2.12.0

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dpdk-dev] [PATCH 2/2] net/mlx5: fix detection and error for multiple item layers
  2018-10-25  8:53 [dpdk-dev] [PATCH 1/2] net/mlx5: fix bit width of flow items Shahaf Shuler
@ 2018-10-25  8:53 ` Shahaf Shuler
  2018-10-26 23:01   ` Yongseok Koh
  2018-10-26 23:01 ` [dpdk-dev] [PATCH 1/2] net/mlx5: fix bit width of flow items Yongseok Koh
  1 sibling, 1 reply; 5+ messages in thread
From: Shahaf Shuler @ 2018-10-25  8:53 UTC (permalink / raw)
  To: yskoh; +Cc: dev, orika

1. The check for the Eth item was wrong. causing an error with
flow rules like:

flow create 0 ingress pattern eth / vlan vid is 13 / ipv4 / gre / eth /
vlan vid is 15 / end actions drop / end

2. align all error messages.

3. align multiple item layers check.

Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated function")
Cc: orika@mellanox.com

Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c | 69 +++++++++++++++++++++++++-------------------
 1 file changed, 39 insertions(+), 30 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 87189a3405..2dd481f81a 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1046,15 +1046,13 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
 	};
 	int ret;
 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+	const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2	:
+				       MLX5_FLOW_LAYER_OUTER_L2;
 
-	if (item_flags & MLX5_FLOW_LAYER_OUTER_L2)
+	if (item_flags & ethm)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
-					  "3 levels of l2 are not supported");
-	if ((item_flags & MLX5_FLOW_LAYER_INNER_L2) && !tunnel)
-		return rte_flow_error_set(error, ENOTSUP,
-					  RTE_FLOW_ERROR_TYPE_ITEM, item,
-					  "2 L2 without tunnel are not supported");
+					  "multiple L2 layers not supported");
 	if (!mask)
 		mask = &rte_flow_item_eth_mask;
 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
@@ -1101,7 +1099,7 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
 	if (item_flags & vlanm)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
-					  "VLAN layer already configured");
+					  "multiple VLAN layers not supported");
 	else if ((item_flags & l34m) != 0)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -1158,15 +1156,17 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
 		},
 	};
 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+				      MLX5_FLOW_LAYER_OUTER_L3;
+	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+				      MLX5_FLOW_LAYER_OUTER_L4;
 	int ret;
 
-	if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
-				   MLX5_FLOW_LAYER_OUTER_L3))
+	if (item_flags & l3m)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "multiple L3 layers not supported");
-	else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
-					MLX5_FLOW_LAYER_OUTER_L4))
+	else if (item_flags & l4m)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L3 cannot follow an L4 layer.");
@@ -1214,15 +1214,17 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
 		},
 	};
 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+				      MLX5_FLOW_LAYER_OUTER_L3;
+	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+				      MLX5_FLOW_LAYER_OUTER_L4;
 	int ret;
 
-	if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
-				   MLX5_FLOW_LAYER_OUTER_L3))
+	if (item_flags & l3m)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "multiple L3 layers not supported");
-	else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
-					MLX5_FLOW_LAYER_OUTER_L4))
+	else if (item_flags & l4m)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L3 cannot follow an L4 layer.");
@@ -1273,6 +1275,10 @@ mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
 {
 	const struct rte_flow_item_udp *mask = item->mask;
 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+				      MLX5_FLOW_LAYER_OUTER_L3;
+	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+				      MLX5_FLOW_LAYER_OUTER_L4;
 	int ret;
 
 	if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
@@ -1280,16 +1286,14 @@ mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "protocol filtering not compatible"
 					  " with UDP layer");
-	if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
-				     MLX5_FLOW_LAYER_OUTER_L3)))
+	if (!(item_flags & l3m))
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L3 is mandatory to filter on L4");
-	if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
-				   MLX5_FLOW_LAYER_OUTER_L4))
+	if (item_flags & l4m)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
-					  "L4 layer is already present");
+					  "multiple L4 layers not supported");
 	if (!mask)
 		mask = &rte_flow_item_udp_mask;
 	ret = mlx5_flow_item_acceptable
@@ -1325,6 +1329,10 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
 {
 	const struct rte_flow_item_tcp *mask = item->mask;
 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+				      MLX5_FLOW_LAYER_OUTER_L3;
+	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+				      MLX5_FLOW_LAYER_OUTER_L4;
 	int ret;
 
 	assert(flow_mask);
@@ -1333,16 +1341,14 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "protocol filtering not compatible"
 					  " with TCP layer");
-	if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
-				     MLX5_FLOW_LAYER_OUTER_L3)))
+	if (!(item_flags & l3m))
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L3 is mandatory to filter on L4");
-	if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
-				   MLX5_FLOW_LAYER_OUTER_L4))
+	if (item_flags & l4m)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
-					  "L4 layer is already present");
+					  "multiple L4 layers not supported");
 	if (!mask)
 		mask = &rte_flow_item_tcp_mask;
 	ret = mlx5_flow_item_acceptable
@@ -1387,7 +1393,8 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
-					  "a tunnel is already present");
+					  "multiple tunnel layers not"
+					  " supported");
 	/*
 	 * Verify only UDPv4 is present as defined in
 	 * https://tools.ietf.org/html/rfc7348
@@ -1473,7 +1480,8 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
-					  "a tunnel is already present");
+					  "multiple tunnel layers not"
+					  " supported");
 	/*
 	 * Verify only UDPv4 is present as defined in
 	 * https://tools.ietf.org/html/rfc7348
@@ -1556,7 +1564,8 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
-					  "a tunnel is already present");
+					  "multiple tunnel layers not"
+					  " supported");
 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -1613,8 +1622,8 @@ mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused,
 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
-					  "a tunnel is already"
-					  " present");
+					  "multiple tunnel layers not"
+					  " supported");
 	if (!mask)
 		mask = &rte_flow_item_mpls_mask;
 	ret = mlx5_flow_item_acceptable
-- 
2.12.0

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] net/mlx5: fix bit width of flow items
  2018-10-25  8:53 [dpdk-dev] [PATCH 1/2] net/mlx5: fix bit width of flow items Shahaf Shuler
  2018-10-25  8:53 ` [dpdk-dev] [PATCH 2/2] net/mlx5: fix detection and error for multiple item layers Shahaf Shuler
@ 2018-10-26 23:01 ` Yongseok Koh
  2018-10-28 12:53   ` Shahaf Shuler
  1 sibling, 1 reply; 5+ messages in thread
From: Yongseok Koh @ 2018-10-26 23:01 UTC (permalink / raw)
  To: Shahaf Shuler; +Cc: dev


> On Oct 25, 2018, at 1:53 AM, Shahaf Shuler <shahafs@mellanox.com> wrote:
> 
> Apply the changes from commit c744f6b1b969 ("net/mlx5: fix bit width of
> item and action flags") in some places that were overlooked.
> 
> Fixes: c744f6b1b969 ("net/mlx5: fix bit width of item and action flags")
> Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated function")
> 
> Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
> ---
Acked-by: Yongseok Koh <yskoh@mellanox.com>
 
Thanks

> drivers/net/mlx5/mlx5_flow.c | 10 +++++-----
> drivers/net/mlx5/mlx5_flow.h |  4 ++--
> 2 files changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index 280af0abce..87189a3405 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -275,7 +275,7 @@ static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
> 
> /* Tunnel information. */
> struct mlx5_flow_tunnel_info {
> -	uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
> +	uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
> 	uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
> };
> 
> @@ -1079,7 +1079,7 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
>  */
> int
> mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
> -			     int64_t item_flags,
> +			     uint64_t item_flags,
> 			     struct rte_flow_error *error)
> {
> 	const struct rte_flow_item_vlan *spec = item->spec;
> @@ -1091,11 +1091,11 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
> 	uint16_t vlan_tag = 0;
> 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
> 	int ret;
> -	const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
> +	const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
> 					MLX5_FLOW_LAYER_INNER_L4) :
> 				       (MLX5_FLOW_LAYER_OUTER_L3 |
> 					MLX5_FLOW_LAYER_OUTER_L4);
> -	const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
> +	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
> 					MLX5_FLOW_LAYER_OUTER_VLAN;
> 
> 	if (item_flags & vlanm)
> @@ -1145,7 +1145,7 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
>  */
> int
> mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
> -			     int64_t item_flags,
> +			     uint64_t item_flags,
> 			     struct rte_flow_error *error)
> {
> 	const struct rte_flow_item_ipv4 *mask = item->mask;
> diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
> index 61299d66b3..c24d26ed45 100644
> --- a/drivers/net/mlx5/mlx5_flow.h
> +++ b/drivers/net/mlx5/mlx5_flow.h
> @@ -336,7 +336,7 @@ int mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
> 				uint8_t target_protocol,
> 				struct rte_flow_error *error);
> int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
> -				 int64_t item_flags,
> +				 uint64_t item_flags,
> 				 struct rte_flow_error *error);
> int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
> 				 uint64_t item_flags,
> @@ -355,7 +355,7 @@ int mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
> 				uint8_t target_protocol,
> 				struct rte_flow_error *error);
> int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
> -				 int64_t item_flags,
> +				 uint64_t item_flags,
> 				 struct rte_flow_error *error);
> int mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
> 				  uint64_t item_flags,
> -- 
> 2.12.0
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [dpdk-dev] [PATCH 2/2] net/mlx5: fix detection and error for multiple item layers
  2018-10-25  8:53 ` [dpdk-dev] [PATCH 2/2] net/mlx5: fix detection and error for multiple item layers Shahaf Shuler
@ 2018-10-26 23:01   ` Yongseok Koh
  0 siblings, 0 replies; 5+ messages in thread
From: Yongseok Koh @ 2018-10-26 23:01 UTC (permalink / raw)
  To: Shahaf Shuler; +Cc: dev, Ori Kam


> On Oct 25, 2018, at 1:53 AM, Shahaf Shuler <shahafs@mellanox.com> wrote:
> 
> 1. The check for the Eth item was wrong. causing an error with
> flow rules like:
> 
> flow create 0 ingress pattern eth / vlan vid is 13 / ipv4 / gre / eth /
> vlan vid is 15 / end actions drop / end
> 
> 2. align all error messages.
> 
> 3. align multiple item layers check.
> 
> Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated function")
> Cc: orika@mellanox.com
> 
> Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
> ---
Acked-by: Yongseok Koh <yskoh@mellanox.com>
 
Thanks

> drivers/net/mlx5/mlx5_flow.c | 69 +++++++++++++++++++++++++-------------------
> 1 file changed, 39 insertions(+), 30 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index 87189a3405..2dd481f81a 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -1046,15 +1046,13 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
> 	};
> 	int ret;
> 	int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
> +	const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2	:
> +				       MLX5_FLOW_LAYER_OUTER_L2;
> 
> -	if (item_flags & MLX5_FLOW_LAYER_OUTER_L2)
> +	if (item_flags & ethm)
> 		return rte_flow_error_set(error, ENOTSUP,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> -					  "3 levels of l2 are not supported");
> -	if ((item_flags & MLX5_FLOW_LAYER_INNER_L2) && !tunnel)
> -		return rte_flow_error_set(error, ENOTSUP,
> -					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> -					  "2 L2 without tunnel are not supported");
> +					  "multiple L2 layers not supported");
> 	if (!mask)
> 		mask = &rte_flow_item_eth_mask;
> 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
> @@ -1101,7 +1099,7 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
> 	if (item_flags & vlanm)
> 		return rte_flow_error_set(error, EINVAL,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> -					  "VLAN layer already configured");
> +					  "multiple VLAN layers not supported");
> 	else if ((item_flags & l34m) != 0)
> 		return rte_flow_error_set(error, EINVAL,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> @@ -1158,15 +1156,17 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
> 		},
> 	};
> 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
> +	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> +				      MLX5_FLOW_LAYER_OUTER_L3;
> +	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> +				      MLX5_FLOW_LAYER_OUTER_L4;
> 	int ret;
> 
> -	if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> -				   MLX5_FLOW_LAYER_OUTER_L3))
> +	if (item_flags & l3m)
> 		return rte_flow_error_set(error, ENOTSUP,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> 					  "multiple L3 layers not supported");
> -	else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> -					MLX5_FLOW_LAYER_OUTER_L4))
> +	else if (item_flags & l4m)
> 		return rte_flow_error_set(error, EINVAL,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> 					  "L3 cannot follow an L4 layer.");
> @@ -1214,15 +1214,17 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
> 		},
> 	};
> 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
> +	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> +				      MLX5_FLOW_LAYER_OUTER_L3;
> +	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> +				      MLX5_FLOW_LAYER_OUTER_L4;
> 	int ret;
> 
> -	if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> -				   MLX5_FLOW_LAYER_OUTER_L3))
> +	if (item_flags & l3m)
> 		return rte_flow_error_set(error, ENOTSUP,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> 					  "multiple L3 layers not supported");
> -	else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> -					MLX5_FLOW_LAYER_OUTER_L4))
> +	else if (item_flags & l4m)
> 		return rte_flow_error_set(error, EINVAL,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> 					  "L3 cannot follow an L4 layer.");
> @@ -1273,6 +1275,10 @@ mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
> {
> 	const struct rte_flow_item_udp *mask = item->mask;
> 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
> +	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> +				      MLX5_FLOW_LAYER_OUTER_L3;
> +	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> +				      MLX5_FLOW_LAYER_OUTER_L4;
> 	int ret;
> 
> 	if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
> @@ -1280,16 +1286,14 @@ mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> 					  "protocol filtering not compatible"
> 					  " with UDP layer");
> -	if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> -				     MLX5_FLOW_LAYER_OUTER_L3)))
> +	if (!(item_flags & l3m))
> 		return rte_flow_error_set(error, EINVAL,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> 					  "L3 is mandatory to filter on L4");
> -	if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> -				   MLX5_FLOW_LAYER_OUTER_L4))
> +	if (item_flags & l4m)
> 		return rte_flow_error_set(error, EINVAL,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> -					  "L4 layer is already present");
> +					  "multiple L4 layers not supported");
> 	if (!mask)
> 		mask = &rte_flow_item_udp_mask;
> 	ret = mlx5_flow_item_acceptable
> @@ -1325,6 +1329,10 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
> {
> 	const struct rte_flow_item_tcp *mask = item->mask;
> 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
> +	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> +				      MLX5_FLOW_LAYER_OUTER_L3;
> +	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> +				      MLX5_FLOW_LAYER_OUTER_L4;
> 	int ret;
> 
> 	assert(flow_mask);
> @@ -1333,16 +1341,14 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> 					  "protocol filtering not compatible"
> 					  " with TCP layer");
> -	if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> -				     MLX5_FLOW_LAYER_OUTER_L3)))
> +	if (!(item_flags & l3m))
> 		return rte_flow_error_set(error, EINVAL,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> 					  "L3 is mandatory to filter on L4");
> -	if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> -				   MLX5_FLOW_LAYER_OUTER_L4))
> +	if (item_flags & l4m)
> 		return rte_flow_error_set(error, EINVAL,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> -					  "L4 layer is already present");
> +					  "multiple L4 layers not supported");
> 	if (!mask)
> 		mask = &rte_flow_item_tcp_mask;
> 	ret = mlx5_flow_item_acceptable
> @@ -1387,7 +1393,8 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
> 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
> 		return rte_flow_error_set(error, ENOTSUP,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> -					  "a tunnel is already present");
> +					  "multiple tunnel layers not"
> +					  " supported");
> 	/*
> 	 * Verify only UDPv4 is present as defined in
> 	 * https://tools.ietf.org/html/rfc7348
> @@ -1473,7 +1480,8 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
> 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
> 		return rte_flow_error_set(error, ENOTSUP,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> -					  "a tunnel is already present");
> +					  "multiple tunnel layers not"
> +					  " supported");
> 	/*
> 	 * Verify only UDPv4 is present as defined in
> 	 * https://tools.ietf.org/html/rfc7348
> @@ -1556,7 +1564,8 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
> 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
> 		return rte_flow_error_set(error, ENOTSUP,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> -					  "a tunnel is already present");
> +					  "multiple tunnel layers not"
> +					  " supported");
> 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
> 		return rte_flow_error_set(error, ENOTSUP,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> @@ -1613,8 +1622,8 @@ mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused,
> 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
> 		return rte_flow_error_set(error, ENOTSUP,
> 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
> -					  "a tunnel is already"
> -					  " present");
> +					  "multiple tunnel layers not"
> +					  " supported");
> 	if (!mask)
> 		mask = &rte_flow_item_mpls_mask;
> 	ret = mlx5_flow_item_acceptable
> -- 
> 2.12.0
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] net/mlx5: fix bit width of flow items
  2018-10-26 23:01 ` [dpdk-dev] [PATCH 1/2] net/mlx5: fix bit width of flow items Yongseok Koh
@ 2018-10-28 12:53   ` Shahaf Shuler
  0 siblings, 0 replies; 5+ messages in thread
From: Shahaf Shuler @ 2018-10-28 12:53 UTC (permalink / raw)
  To: Yongseok Koh; +Cc: dev

Saturday, October 27, 2018 2:01 AM, Yongseok Koh:
> Subject: Re: [PATCH 1/2] net/mlx5: fix bit width of flow items
> 
> 
> > On Oct 25, 2018, at 1:53 AM, Shahaf Shuler <shahafs@mellanox.com>
> wrote:
> >
> > Apply the changes from commit c744f6b1b969 ("net/mlx5: fix bit width
> > of item and action flags") in some places that were overlooked.
> >
> > Fixes: c744f6b1b969 ("net/mlx5: fix bit width of item and action
> > flags")
> > Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated
> > function")
> >
> > Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
> > ---
> Acked-by: Yongseok Koh <yskoh@mellanox.com>

Series applied to next-net-mlx, thanks. 

> 
> Thanks
> 
> > drivers/net/mlx5/mlx5_flow.c | 10 +++++-----
> > drivers/net/mlx5/mlx5_flow.h |  4 ++--
> > 2 files changed, 7 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/net/mlx5/mlx5_flow.c
> > b/drivers/net/mlx5/mlx5_flow.c index 280af0abce..87189a3405 100644
> > --- a/drivers/net/mlx5/mlx5_flow.c
> > +++ b/drivers/net/mlx5/mlx5_flow.c
> > @@ -275,7 +275,7 @@ static const uint32_t
> > priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
> >
> > /* Tunnel information. */
> > struct mlx5_flow_tunnel_info {
> > -	uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
> > +	uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
> > 	uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */ };
> >
> > @@ -1079,7 +1079,7 @@ mlx5_flow_validate_item_eth(const struct
> > rte_flow_item *item,  */ int mlx5_flow_validate_item_vlan(const struct
> > rte_flow_item *item,
> > -			     int64_t item_flags,
> > +			     uint64_t item_flags,
> > 			     struct rte_flow_error *error)
> > {
> > 	const struct rte_flow_item_vlan *spec = item->spec; @@ -1091,11
> > +1091,11 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item
> *item,
> > 	uint16_t vlan_tag = 0;
> > 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
> > 	int ret;
> > -	const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
> > +	const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
> > 					MLX5_FLOW_LAYER_INNER_L4) :
> > 				       (MLX5_FLOW_LAYER_OUTER_L3 |
> > 					MLX5_FLOW_LAYER_OUTER_L4);
> > -	const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
> > +	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
> > 					MLX5_FLOW_LAYER_OUTER_VLAN;
> >
> > 	if (item_flags & vlanm)
> > @@ -1145,7 +1145,7 @@ mlx5_flow_validate_item_vlan(const struct
> > rte_flow_item *item,  */ int mlx5_flow_validate_item_ipv4(const struct
> > rte_flow_item *item,
> > -			     int64_t item_flags,
> > +			     uint64_t item_flags,
> > 			     struct rte_flow_error *error)
> > {
> > 	const struct rte_flow_item_ipv4 *mask = item->mask; diff --git
> > a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index
> > 61299d66b3..c24d26ed45 100644
> > --- a/drivers/net/mlx5/mlx5_flow.h
> > +++ b/drivers/net/mlx5/mlx5_flow.h
> > @@ -336,7 +336,7 @@ int mlx5_flow_validate_item_gre(const struct
> rte_flow_item *item,
> > 				uint8_t target_protocol,
> > 				struct rte_flow_error *error);
> > int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
> > -				 int64_t item_flags,
> > +				 uint64_t item_flags,
> > 				 struct rte_flow_error *error);
> > int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
> > 				 uint64_t item_flags,
> > @@ -355,7 +355,7 @@ int mlx5_flow_validate_item_udp(const struct
> rte_flow_item *item,
> > 				uint8_t target_protocol,
> > 				struct rte_flow_error *error);
> > int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
> > -				 int64_t item_flags,
> > +				 uint64_t item_flags,
> > 				 struct rte_flow_error *error);
> > int mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
> > 				  uint64_t item_flags,
> > --
> > 2.12.0
> >

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2018-10-28 12:53 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-10-25  8:53 [dpdk-dev] [PATCH 1/2] net/mlx5: fix bit width of flow items Shahaf Shuler
2018-10-25  8:53 ` [dpdk-dev] [PATCH 2/2] net/mlx5: fix detection and error for multiple item layers Shahaf Shuler
2018-10-26 23:01   ` Yongseok Koh
2018-10-26 23:01 ` [dpdk-dev] [PATCH 1/2] net/mlx5: fix bit width of flow items Yongseok Koh
2018-10-28 12:53   ` Shahaf Shuler

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).