From: Gregory Etelson <getelson@nvidia.com>
To: <dev@dpdk.org>
Cc: <getelson@nvidia.com>, <mkashani@nvidia.com>,
<rasland@nvidia.com>, "Ori Kam" <orika@nvidia.com>,
Dariusz Sosnowski <dsosnowski@nvidia.com>,
"Viacheslav Ovsiienko" <viacheslavo@nvidia.com>,
Suanming Mou <suanmingm@nvidia.com>,
Matan Azrad <matan@nvidia.com>, Yongseok Koh <yskoh@mellanox.com>
Subject: [PATCH 2/2] net/mlx5: fix IP-in-IP tunnels recognition
Date: Thu, 29 Feb 2024 18:05:04 +0200 [thread overview]
Message-ID: <20240229160505.630586-3-getelson@nvidia.com> (raw)
In-Reply-To: <20240229160505.630586-1-getelson@nvidia.com>
The patch fixes IP-in-IP tunnel recognition for the following patterns
/ [ipv4|ipv6] proto is [ipv4|ipv6] / end
/ [ipv4|ipv6] / [ipv4|ipv6] /
Fixes: 3d69434113d1 ("net/mlx5: add Direct Verbs validation function")
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_dv.c | 104 ++++++++++++++++++++++++--------
1 file changed, 80 insertions(+), 24 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index fe0a06f364..92a5b7b503 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -275,21 +275,41 @@ struct field_modify_info modify_tcp[] = {
{0, 0, 0},
};
-static void
+enum mlx5_l3_tunnel_detection {
+ l3_tunnel_none,
+ l3_tunnel_outer,
+ l3_tunnel_inner
+};
+
+static enum mlx5_l3_tunnel_detection
mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
- uint8_t next_protocol, uint64_t *item_flags,
- int *tunnel)
+ uint8_t next_protocol, uint64_t item_flags,
+ uint64_t *l3_tunnel_flag)
{
+ enum mlx5_l3_tunnel_detection td = l3_tunnel_none;
+
MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
item->type == RTE_FLOW_ITEM_TYPE_IPV6);
- if (next_protocol == IPPROTO_IPIP) {
- *item_flags |= MLX5_FLOW_LAYER_IPIP;
- *tunnel = 1;
- }
- if (next_protocol == IPPROTO_IPV6) {
- *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
- *tunnel = 1;
+ if ((item_flags & MLX5_FLOW_LAYER_OUTER_L3) == 0) {
+ switch (next_protocol) {
+ case IPPROTO_IPIP:
+ td = l3_tunnel_outer;
+ *l3_tunnel_flag = MLX5_FLOW_LAYER_IPIP;
+ break;
+ case IPPROTO_IPV6:
+ td = l3_tunnel_outer;
+ *l3_tunnel_flag = MLX5_FLOW_LAYER_IPV6_ENCAP;
+ break;
+ default:
+ break;
+ }
+ } else {
+ td = l3_tunnel_inner;
+ *l3_tunnel_flag = item->type == RTE_FLOW_ITEM_TYPE_IPV4 ?
+ MLX5_FLOW_LAYER_IPIP :
+ MLX5_FLOW_LAYER_IPV6_ENCAP;
}
+ return td;
}
static inline struct mlx5_hlist *
@@ -7718,6 +7738,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
is_root = (uint64_t)ret;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ enum mlx5_l3_tunnel_detection l3_tunnel_detection;
+ uint64_t l3_tunnel_flag;
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int type = items->type;
@@ -7795,8 +7817,16 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
vlan_m = items->mask;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- mlx5_flow_tunnel_ip_check(items, next_protocol,
- &item_flags, &tunnel);
+ next_protocol = mlx5_flow_l3_next_protocol
+ (items, (enum MLX5_SET_MATCHER)-1);
+ l3_tunnel_detection =
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ item_flags,
+ &l3_tunnel_flag);
+ if (l3_tunnel_detection == l3_tunnel_inner) {
+ item_flags |= l3_tunnel_flag;
+ tunnel = 1;
+ }
ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
last_item, ether_type,
error);
@@ -7804,12 +7834,20 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- next_protocol = mlx5_flow_l3_next_protocol
- (items, (enum MLX5_SET_MATCHER)-1);
+ if (l3_tunnel_detection == l3_tunnel_outer)
+ item_flags |= l3_tunnel_flag;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- mlx5_flow_tunnel_ip_check(items, next_protocol,
- &item_flags, &tunnel);
+ next_protocol = mlx5_flow_l3_next_protocol
+ (items, (enum MLX5_SET_MATCHER)-1);
+ l3_tunnel_detection =
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ item_flags,
+ &l3_tunnel_flag);
+ if (l3_tunnel_detection == l3_tunnel_inner) {
+ item_flags |= l3_tunnel_flag;
+ tunnel = 1;
+ }
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
last_item,
ether_type,
@@ -7819,8 +7857,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- next_protocol = mlx5_flow_l3_next_protocol
- (items, (enum MLX5_SET_MATCHER)-1);
+ if (l3_tunnel_detection == l3_tunnel_outer)
+ item_flags |= l3_tunnel_flag;
break;
case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
ret = flow_dv_validate_item_ipv6_frag_ext(items,
@@ -13945,6 +13983,8 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
int tunnel = !!(wks->item_flags & MLX5_FLOW_LAYER_TUNNEL);
int item_type = items->type;
uint64_t last_item = wks->last_item;
+ enum mlx5_l3_tunnel_detection l3_tunnel_detection;
+ uint64_t l3_tunnel_flag;
int ret;
switch (item_type) {
@@ -13988,24 +14028,40 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
MLX5_FLOW_LAYER_OUTER_VLAN);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- mlx5_flow_tunnel_ip_check(items, next_protocol,
- &wks->item_flags, &tunnel);
+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
+ l3_tunnel_detection =
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ wks->item_flags,
+ &l3_tunnel_flag);
+ if (l3_tunnel_detection == l3_tunnel_inner) {
+ wks->item_flags |= l3_tunnel_flag;
+ tunnel = 1;
+ }
flow_dv_translate_item_ipv4(key, items, tunnel,
wks->group, key_type);
wks->priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
+ if (l3_tunnel_detection == l3_tunnel_outer)
+ wks->item_flags |= l3_tunnel_flag;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- mlx5_flow_tunnel_ip_check(items, next_protocol,
- &wks->item_flags, &tunnel);
+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
+ l3_tunnel_detection =
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ wks->item_flags,
+ &l3_tunnel_flag);
+ if (l3_tunnel_detection == l3_tunnel_inner) {
+ wks->item_flags |= l3_tunnel_flag;
+ tunnel = 1;
+ }
flow_dv_translate_item_ipv6(key, items, tunnel,
wks->group, key_type);
wks->priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
+ if (l3_tunnel_detection == l3_tunnel_outer)
+ wks->item_flags |= l3_tunnel_flag;
break;
case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
flow_dv_translate_item_ipv6_frag_ext
--
2.39.2
next prev parent reply other threads:[~2024-02-29 16:06 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-02-29 16:05 [PATCH 0/2] " Gregory Etelson
2024-02-29 16:05 ` [PATCH 1/2] net/mlx5: remove code duplications Gregory Etelson
2024-02-29 16:05 ` Gregory Etelson [this message]
2024-03-13 7:39 ` [PATCH 2/2] net/mlx5: fix IP-in-IP tunnels recognition Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240229160505.630586-3-getelson@nvidia.com \
--to=getelson@nvidia.com \
--cc=dev@dpdk.org \
--cc=dsosnowski@nvidia.com \
--cc=matan@nvidia.com \
--cc=mkashani@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=suanmingm@nvidia.com \
--cc=viacheslavo@nvidia.com \
--cc=yskoh@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).