From: Gregory Etelson <getelson@nvidia.com>
To: <dev@dpdk.org>
Cc: <getelson@nvidia.com>, <mkashani@nvidia.com>,
<rasland@nvidia.com>, <stable@dpdk.org>,
Ori Kam <orika@nvidia.com>,
Dariusz Sosnowski <dsosnowski@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
Suanming Mou <suanmingm@nvidia.com>,
Matan Azrad <matan@nvidia.com>,
"Yongseok Koh" <yskoh@mellanox.com>,
Shahaf Shuler <shahafs@nvidia.com>
Subject: [PATCH 1/2] net/mlx5: remove code duplications
Date: Thu, 29 Feb 2024 18:05:03 +0200 [thread overview]
Message-ID: <20240229160505.630586-2-getelson@nvidia.com> (raw)
In-Reply-To: <20240229160505.630586-1-getelson@nvidia.com>
Remove code duplications in DV L3 items validation translation.
Fixes: 3193c2494eea ("net/mlx5: fix L4 protocol validation")
Cc: stable@dpdk.org
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_dv.c | 151 +++++++++-----------------------
1 file changed, 43 insertions(+), 108 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 18f09b22be..fe0a06f364 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7488,6 +7488,40 @@ flow_dv_validate_item_flex(struct rte_eth_dev *dev,
return 0;
}
+static __rte_always_inline uint8_t
+mlx5_flow_l3_next_protocol(const struct rte_flow_item *l3_item,
+ enum MLX5_SET_MATCHER key_type)
+{
+#define MLX5_L3_NEXT_PROTOCOL(i, ms) \
+ ((i)->type == RTE_FLOW_ITEM_TYPE_IPV4 ? \
+ ((const struct rte_flow_item_ipv4 *)(i)->ms)->hdr.next_proto_id : \
+ (i)->type == RTE_FLOW_ITEM_TYPE_IPV6 ? \
+ ((const struct rte_flow_item_ipv6 *)(i)->ms)->hdr.proto : \
+ (i)->type == RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT ? \
+ ((const struct rte_flow_item_ipv6_frag_ext *)(i)->ms)->hdr.next_header :\
+ 0xff)
+
+ uint8_t next_protocol;
+
+ if (l3_item->mask != NULL && l3_item->spec != NULL) {
+ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, spec);
+ if (next_protocol)
+ next_protocol &= MLX5_L3_NEXT_PROTOCOL(l3_item, mask);
+ else
+ next_protocol = 0xff;
+ } else if (key_type == MLX5_SET_MATCHER_HS_M && l3_item->mask != NULL) {
+ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, mask);
+ } else if (key_type == MLX5_SET_MATCHER_HS_V && l3_item->spec != NULL) {
+ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, spec);
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
+ return next_protocol;
+
+#undef MLX5_L3_NEXT_PROTOCOL
+}
+
/**
* Validate IB BTH item.
*
@@ -7770,19 +7804,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- if (items->mask != NULL &&
- ((const struct rte_flow_item_ipv4 *)
- items->mask)->hdr.next_proto_id) {
- next_protocol =
- ((const struct rte_flow_item_ipv4 *)
- (items->spec))->hdr.next_proto_id;
- next_protocol &=
- ((const struct rte_flow_item_ipv4 *)
- (items->mask))->hdr.next_proto_id;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol
+ (items, (enum MLX5_SET_MATCHER)-1);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
mlx5_flow_tunnel_ip_check(items, next_protocol,
@@ -7796,22 +7819,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- if (items->mask != NULL &&
- ((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto) {
- item_ipv6_proto =
- ((const struct rte_flow_item_ipv6 *)
- items->spec)->hdr.proto;
- next_protocol =
- ((const struct rte_flow_item_ipv6 *)
- items->spec)->hdr.proto;
- next_protocol &=
- ((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol
+ (items, (enum MLX5_SET_MATCHER)-1);
break;
case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
ret = flow_dv_validate_item_ipv6_frag_ext(items,
@@ -7822,19 +7831,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
last_item = tunnel ?
MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
- if (items->mask != NULL &&
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->mask)->hdr.next_header) {
- next_protocol =
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->spec)->hdr.next_header;
- next_protocol &=
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->mask)->hdr.next_header;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol
+ (items, (enum MLX5_SET_MATCHER)-1);
break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp
@@ -13997,28 +13995,7 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
wks->priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- if (items->mask != NULL &&
- items->spec != NULL &&
- ((const struct rte_flow_item_ipv4 *)
- items->mask)->hdr.next_proto_id) {
- next_protocol =
- ((const struct rte_flow_item_ipv4 *)
- (items->spec))->hdr.next_proto_id;
- next_protocol &=
- ((const struct rte_flow_item_ipv4 *)
- (items->mask))->hdr.next_proto_id;
- } else if (key_type == MLX5_SET_MATCHER_HS_M &&
- items->mask != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv4 *)
- (items->mask))->hdr.next_proto_id;
- } else if (key_type == MLX5_SET_MATCHER_HS_V &&
- items->spec != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv4 *)
- (items->spec))->hdr.next_proto_id;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
mlx5_flow_tunnel_ip_check(items, next_protocol,
@@ -14028,56 +14005,14 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
wks->priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- if (items->mask != NULL &&
- items->spec != NULL &&
- ((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto) {
- next_protocol =
- ((const struct rte_flow_item_ipv6 *)
- items->spec)->hdr.proto;
- next_protocol &=
- ((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto;
- } else if (key_type == MLX5_SET_MATCHER_HS_M &&
- items->mask != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv6 *)
- (items->mask))->hdr.proto;
- } else if (key_type == MLX5_SET_MATCHER_HS_V &&
- items->spec != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv6 *)
- (items->spec))->hdr.proto;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
break;
case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
flow_dv_translate_item_ipv6_frag_ext
(key, items, tunnel, key_type);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
- if (items->mask != NULL &&
- items->spec != NULL &&
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->mask)->hdr.next_header) {
- next_protocol =
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->spec)->hdr.next_header;
- next_protocol &=
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->mask)->hdr.next_header;
- } else if (key_type == MLX5_SET_MATCHER_HS_M &&
- items->mask != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv6_frag_ext *)
- (items->mask))->hdr.next_header;
- } else if (key_type == MLX5_SET_MATCHER_HS_V &&
- items->spec != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv6_frag_ext *)
- (items->spec))->hdr.next_header;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
break;
case RTE_FLOW_ITEM_TYPE_TCP:
flow_dv_translate_item_tcp(key, items, tunnel, key_type);
--
2.39.2
next prev parent reply other threads:[~2024-02-29 16:06 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-02-29 16:05 [PATCH 0/2] fix IP-in-IP tunnels recognition Gregory Etelson
2024-02-29 16:05 ` Gregory Etelson [this message]
2024-02-29 16:05 ` [PATCH 2/2] net/mlx5: " Gregory Etelson
2024-03-13 7:39 ` Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240229160505.630586-2-getelson@nvidia.com \
--to=getelson@nvidia.com \
--cc=dev@dpdk.org \
--cc=dsosnowski@nvidia.com \
--cc=matan@nvidia.com \
--cc=mkashani@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=shahafs@nvidia.com \
--cc=stable@dpdk.org \
--cc=suanmingm@nvidia.com \
--cc=viacheslavo@nvidia.com \
--cc=yskoh@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).