From: Yongseok Koh <yskoh@mellanox.com>
To: Shahaf Shuler <shahafs@mellanox.com>
Cc: dev <dev@dpdk.org>, Ori Kam <orika@mellanox.com>
Subject: Re: [dpdk-dev] [PATCH 2/2] net/mlx5: fix detection and error for multiple item layers
Date: Fri, 26 Oct 2018 23:01:37 +0000 [thread overview]
Message-ID: <8AB65797-D441-4FCC-B95E-2C190B196929@mellanox.com> (raw)
In-Reply-To: <20181025085351.15738-2-shahafs@mellanox.com>
> On Oct 25, 2018, at 1:53 AM, Shahaf Shuler <shahafs@mellanox.com> wrote:
>
> 1. The check for the Eth item was wrong. causing an error with
> flow rules like:
>
> flow create 0 ingress pattern eth / vlan vid is 13 / ipv4 / gre / eth /
> vlan vid is 15 / end actions drop / end
>
> 2. align all error messages.
>
> 3. align multiple item layers check.
>
> Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated function")
> Cc: orika@mellanox.com
>
> Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
> ---
Acked-by: Yongseok Koh <yskoh@mellanox.com>
Thanks
> drivers/net/mlx5/mlx5_flow.c | 69 +++++++++++++++++++++++++-------------------
> 1 file changed, 39 insertions(+), 30 deletions(-)
>
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index 87189a3405..2dd481f81a 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -1046,15 +1046,13 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
> };
> int ret;
> int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
> + const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
> + MLX5_FLOW_LAYER_OUTER_L2;
>
> - if (item_flags & MLX5_FLOW_LAYER_OUTER_L2)
> + if (item_flags & ethm)
> return rte_flow_error_set(error, ENOTSUP,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> - "3 levels of l2 are not supported");
> - if ((item_flags & MLX5_FLOW_LAYER_INNER_L2) && !tunnel)
> - return rte_flow_error_set(error, ENOTSUP,
> - RTE_FLOW_ERROR_TYPE_ITEM, item,
> - "2 L2 without tunnel are not supported");
> + "multiple L2 layers not supported");
> if (!mask)
> mask = &rte_flow_item_eth_mask;
> ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
> @@ -1101,7 +1099,7 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
> if (item_flags & vlanm)
> return rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> - "VLAN layer already configured");
> + "multiple VLAN layers not supported");
> else if ((item_flags & l34m) != 0)
> return rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> @@ -1158,15 +1156,17 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
> },
> };
> const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
> + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> + MLX5_FLOW_LAYER_OUTER_L3;
> + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> + MLX5_FLOW_LAYER_OUTER_L4;
> int ret;
>
> - if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> - MLX5_FLOW_LAYER_OUTER_L3))
> + if (item_flags & l3m)
> return rte_flow_error_set(error, ENOTSUP,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> "multiple L3 layers not supported");
> - else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> - MLX5_FLOW_LAYER_OUTER_L4))
> + else if (item_flags & l4m)
> return rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> "L3 cannot follow an L4 layer.");
> @@ -1214,15 +1214,17 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
> },
> };
> const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
> + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> + MLX5_FLOW_LAYER_OUTER_L3;
> + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> + MLX5_FLOW_LAYER_OUTER_L4;
> int ret;
>
> - if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> - MLX5_FLOW_LAYER_OUTER_L3))
> + if (item_flags & l3m)
> return rte_flow_error_set(error, ENOTSUP,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> "multiple L3 layers not supported");
> - else if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> - MLX5_FLOW_LAYER_OUTER_L4))
> + else if (item_flags & l4m)
> return rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> "L3 cannot follow an L4 layer.");
> @@ -1273,6 +1275,10 @@ mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
> {
> const struct rte_flow_item_udp *mask = item->mask;
> const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
> + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> + MLX5_FLOW_LAYER_OUTER_L3;
> + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> + MLX5_FLOW_LAYER_OUTER_L4;
> int ret;
>
> if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
> @@ -1280,16 +1286,14 @@ mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> "protocol filtering not compatible"
> " with UDP layer");
> - if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> - MLX5_FLOW_LAYER_OUTER_L3)))
> + if (!(item_flags & l3m))
> return rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> "L3 is mandatory to filter on L4");
> - if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> - MLX5_FLOW_LAYER_OUTER_L4))
> + if (item_flags & l4m)
> return rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> - "L4 layer is already present");
> + "multiple L4 layers not supported");
> if (!mask)
> mask = &rte_flow_item_udp_mask;
> ret = mlx5_flow_item_acceptable
> @@ -1325,6 +1329,10 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
> {
> const struct rte_flow_item_tcp *mask = item->mask;
> const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
> + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> + MLX5_FLOW_LAYER_OUTER_L3;
> + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> + MLX5_FLOW_LAYER_OUTER_L4;
> int ret;
>
> assert(flow_mask);
> @@ -1333,16 +1341,14 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> "protocol filtering not compatible"
> " with TCP layer");
> - if (!(item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
> - MLX5_FLOW_LAYER_OUTER_L3)))
> + if (!(item_flags & l3m))
> return rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> "L3 is mandatory to filter on L4");
> - if (item_flags & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
> - MLX5_FLOW_LAYER_OUTER_L4))
> + if (item_flags & l4m)
> return rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> - "L4 layer is already present");
> + "multiple L4 layers not supported");
> if (!mask)
> mask = &rte_flow_item_tcp_mask;
> ret = mlx5_flow_item_acceptable
> @@ -1387,7 +1393,8 @@ mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
> if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
> return rte_flow_error_set(error, ENOTSUP,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> - "a tunnel is already present");
> + "multiple tunnel layers not"
> + " supported");
> /*
> * Verify only UDPv4 is present as defined in
> * https://tools.ietf.org/html/rfc7348
> @@ -1473,7 +1480,8 @@ mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
> if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
> return rte_flow_error_set(error, ENOTSUP,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> - "a tunnel is already present");
> + "multiple tunnel layers not"
> + " supported");
> /*
> * Verify only UDPv4 is present as defined in
> * https://tools.ietf.org/html/rfc7348
> @@ -1556,7 +1564,8 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
> if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
> return rte_flow_error_set(error, ENOTSUP,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> - "a tunnel is already present");
> + "multiple tunnel layers not"
> + " supported");
> if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
> return rte_flow_error_set(error, ENOTSUP,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> @@ -1613,8 +1622,8 @@ mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused,
> if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
> return rte_flow_error_set(error, ENOTSUP,
> RTE_FLOW_ERROR_TYPE_ITEM, item,
> - "a tunnel is already"
> - " present");
> + "multiple tunnel layers not"
> + " supported");
> if (!mask)
> mask = &rte_flow_item_mpls_mask;
> ret = mlx5_flow_item_acceptable
> --
> 2.12.0
>
next prev parent reply other threads:[~2018-10-26 23:01 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-10-25 8:53 [dpdk-dev] [PATCH 1/2] net/mlx5: fix bit width of flow items Shahaf Shuler
2018-10-25 8:53 ` [dpdk-dev] [PATCH 2/2] net/mlx5: fix detection and error for multiple item layers Shahaf Shuler
2018-10-26 23:01 ` Yongseok Koh [this message]
2018-10-26 23:01 ` [dpdk-dev] [PATCH 1/2] net/mlx5: fix bit width of flow items Yongseok Koh
2018-10-28 12:53 ` Shahaf Shuler
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=8AB65797-D441-4FCC-B95E-2C190B196929@mellanox.com \
--to=yskoh@mellanox.com \
--cc=dev@dpdk.org \
--cc=orika@mellanox.com \
--cc=shahafs@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).