From: Gregory Etelson <getelson@nvidia.com>
To: <stable@dpdk.org>, <getelson@nvidia.com>
Cc: Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
Matan Azrad <matan@nvidia.com>,
Shahaf Shuler <shahafs@nvidia.com>,
Moti Haimovsky <motih@mellanox.com>
Subject: [PATCH 19.11 3/6] net/mlx5: fix GENEVE protocol type translation
Date: Thu, 2 Dec 2021 18:00:16 +0200 [thread overview]
Message-ID: <20211202160019.26453-3-getelson@nvidia.com> (raw)
In-Reply-To: <20211202160019.26453-1-getelson@nvidia.com>
[ upstream commit 690391dd0e8bc7a8d02a3aba844ffc3dffe7aecd ]
When application creates several flows to match on GENEVE tunnel
without explicitly specifying GENEVE protocol type value in
flow rules, PMD will translate that to zero mask.
RDMA-CORE cannot distinguish between different inner flow types and
produces identical matchers for each zero mask.
The patch extracts inner header type from flow rule and forces it
in GENEVE protocol type, if application did not specify
any without explicitly specifying GENEVE protocol type value in
flow rules, protocol type value.
Fixes: e59a5dbcfd07 ("net/mlx5: add flow match on GENEVE item")
Cc: stable@dpdk.org
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_dv.c | 78 ++++++++++++++++++++-------------
1 file changed, 47 insertions(+), 31 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index f124f42c9c..8dec8d9ff5 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -75,6 +75,20 @@ static int
flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
struct mlx5_flow_tbl_resource *tbl);
+static inline uint16_t
+mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
+{
+ if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+ return RTE_ETHER_TYPE_TEB;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+ return RTE_ETHER_TYPE_IPV4;
+ else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+ return RTE_ETHER_TYPE_IPV6;
+ else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
+ return RTE_ETHER_TYPE_MPLS;
+ return 0;
+}
+
/**
* Initialize flow attributes structure according to flow items' types.
*
@@ -6132,49 +6146,39 @@ flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
static void
flow_dv_translate_item_geneve(void *matcher, void *key,
- const struct rte_flow_item *item, int inner)
+ const struct rte_flow_item *item,
+ uint64_t pattern_flags)
{
+ static const struct rte_flow_item_geneve empty_geneve = {0,};
const struct rte_flow_item_geneve *geneve_m = item->mask;
const struct rte_flow_item_geneve *geneve_v = item->spec;
- void *headers_m;
- void *headers_v;
+ /* GENEVE flow item validation allows single tunnel item */
+ void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
- uint16_t dport;
uint16_t gbhdr_m;
uint16_t gbhdr_v;
- char *vni_m;
- char *vni_v;
- size_t size, i;
+ char *vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
+ char *vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
+ size_t size = sizeof(geneve_m->vni), i;
+ uint16_t protocol_m, protocol_v;
- if (inner) {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
- } else {
- headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
- outer_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
- }
- dport = MLX5_UDP_PORT_GENEVE;
if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_GENEVE);
+ }
+ if (!geneve_v) {
+ geneve_v = &empty_geneve;
+ geneve_m = &empty_geneve;
+ } else {
+ if (!geneve_m)
+ geneve_m = &rte_flow_item_geneve_mask;
}
- if (!geneve_v)
- return;
- if (!geneve_m)
- geneve_m = &rte_flow_item_geneve_mask;
- size = sizeof(geneve_m->vni);
- vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
- vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
memcpy(vni_m, geneve_m->vni, size);
for (i = 0; i < size; ++i)
vni_v[i] = vni_m[i] & geneve_v->vni[i];
- MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
- rte_be_to_cpu_16(geneve_m->protocol));
- MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
- rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
@@ -6186,6 +6190,16 @@ flow_dv_translate_item_geneve(void *matcher, void *key,
MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
+ protocol_m = rte_be_to_cpu_16(geneve_m->protocol);
+ protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
+ if (!protocol_m) {
+ /* Force next protocol to prevent matchers duplication */
+ protocol_m = 0xFFFF;
+ protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+ }
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
+ protocol_m & protocol_v);
}
/**
@@ -7806,11 +7820,10 @@ __flow_dv_translate(struct rte_eth_dev *dev,
tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
- flow_dv_translate_item_geneve(match_mask, match_value,
- items, tunnel);
matcher.priority = flow->rss.level >= 2 ?
MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GENEVE;
+ tunnel_item = items;
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
flow_dv_translate_item_mpls(match_mask, match_value,
@@ -7876,6 +7889,9 @@ __flow_dv_translate(struct rte_eth_dev *dev,
if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
tunnel_item, item_flags);
+ else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
+ flow_dv_translate_item_geneve(match_mask, match_value,
+ tunnel_item, item_flags);
assert(!flow_dv_check_valid_spec(matcher.mask.buf,
dev_flow->dv.value.buf));
/*
--
2.34.0
prev parent reply other threads:[~2021-12-02 16:00 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-12-02 16:00 [PATCH 19.11 1/6] net/mlx5: fix VXLAN-GPE next protocol translation Gregory Etelson
2021-12-02 16:00 ` [PATCH 19.11 2/6] net/mlx5: fix RSS expansion scheme for GRE header Gregory Etelson
2021-12-02 16:00 ` Gregory Etelson [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211202160019.26453-3-getelson@nvidia.com \
--to=getelson@nvidia.com \
--cc=matan@nvidia.com \
--cc=motih@mellanox.com \
--cc=shahafs@nvidia.com \
--cc=stable@dpdk.org \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).