* [PATCH] net/mlx5/hws: add support for NVGRE matching
@ 2024-05-20 6:25 Dong Zhou
2024-05-27 11:36 ` Raslan Darawsheh
0 siblings, 1 reply; 2+ messages in thread
From: Dong Zhou @ 2024-05-20 6:25 UTC (permalink / raw)
To: valex, Dariusz Sosnowski, Viacheslav Ovsiienko, Ori Kam,
Suanming Mou, Matan Azrad
Cc: dev, thomas, rasland
Add HWS support for RTE_FLOW_ITEM_TYPE_NVGRE item
all fields.
Signed-off-by: Dong Zhou <dongzhou@nvidia.com>
Acked-by: Alex Vesker <valex@nvidia.com>
---
doc/guides/nics/mlx5.rst | 10 +++
drivers/net/mlx5/hws/mlx5dr_definer.c | 87 +++++++++++++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_definer.h | 3 +
drivers/net/mlx5/mlx5_flow_hw.c | 1 +
4 files changed, 101 insertions(+)
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 9b2fe07fd3..06f5cb6454 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -875,6 +875,16 @@ Limitations
Matching on checksum and sequence needs MLNX_OFED 5.6+.
+- Matching on NVGRE header:
+
+ - c_rc_k_s_rsvd0_ver
+ - protocol
+ - tni
+ - flow_id
+
+ In SW steering (``dv_flow_en`` = 1), only tni is supported.
+ In HW steering (``dv_flow_en`` = 2), all fields are supported.
+
- The NIC egress flow rules on representor port are not supported.
- A driver limitation for ``RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR`` action
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 35a2ed2048..42fb4c32c6 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -15,6 +15,9 @@
#define UDP_GENEVE_PORT 6081
#define UDP_ROCEV2_PORT 4791
#define DR_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
+#define NVGRE_PORT 0x6558
+#define NVGRE_C_RSVD0_VER 0x2000
+#define NVGRE_C_RSVD0_VER_MASK 0xB000
#define STE_NO_VLAN 0x0
#define STE_SVLAN 0x1
@@ -220,6 +223,12 @@ struct mlx5dr_definer_conv_data {
X(SET_BE32, gre_opt_key, v->key.key, rte_flow_item_gre_opt) \
X(SET_BE32, gre_opt_seq, v->sequence.sequence, rte_flow_item_gre_opt) \
X(SET_BE16, gre_opt_checksum, v->checksum_rsvd.checksum, rte_flow_item_gre_opt) \
+ X(SET, nvgre_def_c_rsvd0_ver, NVGRE_C_RSVD0_VER, rte_flow_item_nvgre) \
+ X(SET, nvgre_def_c_rsvd0_ver_mask, NVGRE_C_RSVD0_VER_MASK, rte_flow_item_nvgre) \
+ X(SET, nvgre_def_protocol, NVGRE_PORT, rte_flow_item_nvgre) \
+ X(SET_BE16, nvgre_c_rsvd0_ver, v->c_k_s_rsvd0_ver, rte_flow_item_nvgre) \
+ X(SET_BE16, nvgre_protocol, v->protocol, rte_flow_item_nvgre) \
+ X(SET_BE32P, nvgre_dw1, &v->tni[0], rte_flow_item_nvgre) \
X(SET, meter_color, rte_col_2_mlx5_col(v->color), rte_flow_item_meter_color) \
X(SET_BE32, ipsec_spi, v->hdr.spi, rte_flow_item_esp) \
X(SET_BE32, ipsec_sequence_number, v->hdr.seq, rte_flow_item_esp) \
@@ -2012,6 +2021,80 @@ mlx5dr_definer_conv_item_gre_key(struct mlx5dr_definer_conv_data *cd,
return 0;
}
+static int
+mlx5dr_definer_conv_item_nvgre(struct mlx5dr_definer_conv_data *cd,
+ struct rte_flow_item *item,
+ int item_idx)
+{
+ const struct rte_flow_item_nvgre *m = item->mask;
+ struct mlx5dr_definer_fc *fc;
+ bool inner = cd->tunnel;
+
+ if (inner) {
+ DR_LOG(ERR, "Inner gre item not supported");
+ rte_errno = ENOTSUP;
+ return rte_errno;
+ }
+
+ if (!cd->relaxed) {
+ fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
+ if (!fc->tag_set) {
+ fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
+ fc->item_idx = item_idx;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ fc->tag_set = &mlx5dr_definer_ipv4_protocol_gre_set;
+ DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
+ }
+
+ fc = &cd->fc[MLX5DR_DEFINER_FNAME_NVGRE_C_K_S];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_nvgre_def_c_rsvd0_ver_set;
+ fc->tag_mask_set = &mlx5dr_definer_nvgre_def_c_rsvd0_ver_mask_set;
+ DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
+ fc->bit_mask = __mlx5_mask(header_gre, c_rsvd0_ver);
+ fc->bit_off = __mlx5_dw_bit_off(header_gre, c_rsvd0_ver);
+
+ fc = &cd->fc[MLX5DR_DEFINER_FNAME_NVGRE_PROTOCOL];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_nvgre_def_protocol_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
+ fc->byte_off += MLX5_BYTE_OFF(header_gre, gre_protocol);
+ fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
+ fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
+ }
+
+ if (!m)
+ return 0;
+
+ if (m->c_k_s_rsvd0_ver) {
+ fc = &cd->fc[MLX5DR_DEFINER_FNAME_NVGRE_C_K_S];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_nvgre_c_rsvd0_ver_set;
+ DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
+ fc->bit_mask = __mlx5_mask(header_gre, c_rsvd0_ver);
+ fc->bit_off = __mlx5_dw_bit_off(header_gre, c_rsvd0_ver);
+ }
+
+ if (m->protocol) {
+ fc = &cd->fc[MLX5DR_DEFINER_FNAME_NVGRE_PROTOCOL];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_nvgre_protocol_set;
+ DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
+ fc->byte_off += MLX5_BYTE_OFF(header_gre, gre_protocol);
+ fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
+ fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
+ }
+
+ if (!is_mem_zero(m->tni, 4)) {
+ fc = &cd->fc[MLX5DR_DEFINER_FNAME_NVGRE_DW1];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_nvgre_dw1_set;
+ DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_2);
+ }
+ return 0;
+}
+
static int
mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd,
struct rte_flow_item *item,
@@ -3195,6 +3278,10 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
break;
case RTE_FLOW_ITEM_TYPE_VOID:
break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ ret = mlx5dr_definer_conv_item_nvgre(&cd, items, i);
+ item_flags |= MLX5_FLOW_LAYER_NVGRE;
+ break;
default:
DR_LOG(ERR, "Unsupported item type %d", items->type);
goto not_supp;
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index ca530ebf30..3204bb825f 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -151,6 +151,9 @@ enum mlx5dr_definer_fname {
MLX5DR_DEFINER_FNAME_GRE_OPT_KEY,
MLX5DR_DEFINER_FNAME_GRE_OPT_SEQ,
MLX5DR_DEFINER_FNAME_GRE_OPT_CHECKSUM,
+ MLX5DR_DEFINER_FNAME_NVGRE_C_K_S,
+ MLX5DR_DEFINER_FNAME_NVGRE_PROTOCOL,
+ MLX5DR_DEFINER_FNAME_NVGRE_DW1,
MLX5DR_DEFINER_FNAME_INTEGRITY_O,
MLX5DR_DEFINER_FNAME_INTEGRITY_I,
MLX5DR_DEFINER_FNAME_ICMP_DW1,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 825f258065..b864808820 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -7652,6 +7652,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_GRE:
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
case RTE_FLOW_ITEM_TYPE_ICMP:
case RTE_FLOW_ITEM_TYPE_ICMP6:
case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
--
2.27.0
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH] net/mlx5/hws: add support for NVGRE matching
2024-05-20 6:25 [PATCH] net/mlx5/hws: add support for NVGRE matching Dong Zhou
@ 2024-05-27 11:36 ` Raslan Darawsheh
0 siblings, 0 replies; 2+ messages in thread
From: Raslan Darawsheh @ 2024-05-27 11:36 UTC (permalink / raw)
To: Bill Zhou, Alex Vesker, Dariusz Sosnowski, Slava Ovsiienko,
Ori Kam, Suanming Mou, Matan Azrad
Cc: dev, NBU-Contact-Thomas Monjalon (EXTERNAL)
Hi,
From: Bill Zhou <dongzhou@nvidia.com>
Sent: Monday, May 20, 2024 9:25 AM
To: Alex Vesker; Dariusz Sosnowski; Slava Ovsiienko; Ori Kam; Suanming Mou; Matan Azrad
Cc: dev@dpdk.org; NBU-Contact-Thomas Monjalon (EXTERNAL); Raslan Darawsheh
Subject: [PATCH] net/mlx5/hws: add support for NVGRE matching
Add HWS support for RTE_FLOW_ITEM_TYPE_NVGRE item
all fields.
Signed-off-by: Dong Zhou <dongzhou@nvidia.com>
Acked-by: Alex Vesker <valex@nvidia.com>
Patch applied to next-net-mlx,
Kindest regards
Raslan Darawsheh
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2024-05-27 11:36 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-05-20 6:25 [PATCH] net/mlx5/hws: add support for NVGRE matching Dong Zhou
2024-05-27 11:36 ` Raslan Darawsheh
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).