From: Dong Zhou <dongzhou@nvidia.com>
To: <orika@nvidia.com>, <viacheslavo@nvidia.com>,
<thomas@monjalon.net>, "Matan Azrad" <matan@nvidia.com>
Cc: <dev@dpdk.org>, <rasland@nvidia.com>
Subject: [PATCH v1 3/3] net/mlx5/hws: add support for infiniband BTH match
Date: Thu, 11 May 2023 10:55:04 +0300 [thread overview]
Message-ID: <20230511075504.664871-4-dongzhou@nvidia.com> (raw)
In-Reply-To: <20230511075504.664871-1-dongzhou@nvidia.com>
This patch adds support to match opcode and dst_qp fields in
infiniband BTH. Currently, only the RoCEv2 packet is supported,
the input BTH match item is defaulted to match one RoCEv2 packet.
Signed-off-by: Dong Zhou <dongzhou@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_definer.c | 76 ++++++++++++++++++++++++++-
drivers/net/mlx5/hws/mlx5dr_definer.h | 2 +
drivers/net/mlx5/mlx5_flow_hw.c | 1 +
3 files changed, 78 insertions(+), 1 deletion(-)
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index f92d3e8e1f..1a427c9b64 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -10,6 +10,7 @@
#define ETH_TYPE_IPV6_VXLAN 0x86DD
#define ETH_VXLAN_DEFAULT_PORT 4789
#define IP_UDP_PORT_MPLS 6635
+#define UDP_ROCEV2_PORT 4791
#define DR_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
#define STE_NO_VLAN 0x0
@@ -171,7 +172,9 @@ struct mlx5dr_definer_conv_data {
X(SET_BE16, gre_opt_checksum, v->checksum_rsvd.checksum, rte_flow_item_gre_opt) \
X(SET, meter_color, rte_col_2_mlx5_col(v->color), rte_flow_item_meter_color) \
X(SET_BE32, ipsec_spi, v->hdr.spi, rte_flow_item_esp) \
- X(SET_BE32, ipsec_sequence_number, v->hdr.seq, rte_flow_item_esp)
+ X(SET_BE32, ipsec_sequence_number, v->hdr.seq, rte_flow_item_esp) \
+ X(SET, ib_l4_udp_port, UDP_ROCEV2_PORT, rte_flow_item_ib_bth) \
+ X(SET, ib_l4_opcode, v->hdr.opcode, rte_flow_item_ib_bth)
/* Item set function format */
#define X(set_type, func_name, value, item_type) \
@@ -583,6 +586,16 @@ mlx5dr_definer_mpls_label_set(struct mlx5dr_definer_fc *fc,
memcpy(tag + fc->byte_off + sizeof(v->label_tc_s), &v->ttl, sizeof(v->ttl));
}
+static void
+mlx5dr_definer_ib_l4_qp_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ const struct rte_flow_item_ib_bth *v = item_spec;
+
+ memcpy(tag + fc->byte_off, &v->hdr.dst_qp, sizeof(v->hdr.dst_qp));
+}
+
static int
mlx5dr_definer_conv_item_eth(struct mlx5dr_definer_conv_data *cd,
struct rte_flow_item *item,
@@ -2041,6 +2054,63 @@ mlx5dr_definer_conv_item_flex_parser(struct mlx5dr_definer_conv_data *cd,
return 0;
}
+static int
+mlx5dr_definer_conv_item_ib_l4(struct mlx5dr_definer_conv_data *cd,
+ struct rte_flow_item *item,
+ int item_idx)
+{
+ const struct rte_flow_item_ib_bth *m = item->mask;
+ struct mlx5dr_definer_fc *fc;
+ bool inner = cd->tunnel;
+
+ /* In order to match on RoCEv2(layer4 ib), we must match
+ * on ip_protocol and l4_dport.
+ */
+ if (!cd->relaxed) {
+ fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
+ if (!fc->tag_set) {
+ fc->item_idx = item_idx;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ fc->tag_set = &mlx5dr_definer_udp_protocol_set;
+ DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
+ }
+
+ fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
+ if (!fc->tag_set) {
+ fc->item_idx = item_idx;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ fc->tag_set = &mlx5dr_definer_ib_l4_udp_port_set;
+ DR_CALC_SET(fc, eth_l4, destination_port, inner);
+ }
+ }
+
+ if (!m)
+ return 0;
+
+ if (m->hdr.se || m->hdr.m || m->hdr.padcnt || m->hdr.tver ||
+ m->hdr.pkey || m->hdr.f || m->hdr.b || m->hdr.rsvd0 ||
+ m->hdr.a || m->hdr.rsvd1 || !is_mem_zero(m->hdr.psn, 3)) {
+ rte_errno = ENOTSUP;
+ return rte_errno;
+ }
+
+ if (m->hdr.opcode) {
+ fc = &cd->fc[MLX5DR_DEFINER_FNAME_IB_L4_OPCODE];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ib_l4_opcode_set;
+ DR_CALC_SET_HDR(fc, ib_l4, opcode);
+ }
+
+ if (!is_mem_zero(m->hdr.dst_qp, 3)) {
+ fc = &cd->fc[MLX5DR_DEFINER_FNAME_IB_L4_QPN];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ib_l4_qp_set;
+ DR_CALC_SET_HDR(fc, ib_l4, qp);
+ }
+
+ return 0;
+}
+
static int
mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
struct mlx5dr_match_template *mt,
@@ -2182,6 +2252,10 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
item_flags |= MLX5_FLOW_LAYER_MPLS;
cd.mpls_idx++;
break;
+ case RTE_FLOW_ITEM_TYPE_IB_BTH:
+ ret = mlx5dr_definer_conv_item_ib_l4(&cd, items, i);
+ item_flags |= MLX5_FLOW_ITEM_IB_BTH;
+ break;
default:
DR_LOG(ERR, "Unsupported item type %d", items->type);
rte_errno = ENOTSUP;
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index 90ec4ce845..6b645f4cf0 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -134,6 +134,8 @@ enum mlx5dr_definer_fname {
MLX5DR_DEFINER_FNAME_OKS2_MPLS2_I,
MLX5DR_DEFINER_FNAME_OKS2_MPLS3_I,
MLX5DR_DEFINER_FNAME_OKS2_MPLS4_I,
+ MLX5DR_DEFINER_FNAME_IB_L4_OPCODE,
+ MLX5DR_DEFINER_FNAME_IB_L4_QPN,
MLX5DR_DEFINER_FNAME_MAX,
};
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 7e0ee8d883..9381646267 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4969,6 +4969,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
case RTE_FLOW_ITEM_TYPE_ESP:
case RTE_FLOW_ITEM_TYPE_FLEX:
+ case RTE_FLOW_ITEM_TYPE_IB_BTH:
break;
case RTE_FLOW_ITEM_TYPE_INTEGRITY:
/*
--
2.27.0
next prev parent reply other threads:[~2023-05-11 7:56 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-11 7:55 [PATCH v1 0/3] " Dong Zhou
2023-05-11 7:55 ` [PATCH v1 1/3] ethdev: add flow item for RoCE infiniband BTH Dong Zhou
2023-05-17 17:06 ` Ori Kam
2023-05-22 7:01 ` Andrew Rybchenko
2023-05-24 6:58 ` Bill Zhou
2023-05-11 7:55 ` [PATCH v1 2/3] net/mlx5: add support for infiniband BTH match Dong Zhou
2023-05-11 7:55 ` Dong Zhou [this message]
2023-05-24 10:08 ` [PATCH v2 0/3] " Dong Zhou
2023-05-24 10:08 ` [PATCH v2 1/3] ethdev: add flow item for RoCE infiniband BTH Dong Zhou
2023-05-24 10:08 ` [PATCH v2 2/3] net/mlx5: add support for infiniband BTH match Dong Zhou
2023-05-24 12:54 ` Ori Kam
2023-05-24 10:08 ` [PATCH v2 3/3] net/mlx5/hws: " Dong Zhou
2023-05-25 7:40 ` [PATCH v3 0/3] " Dong Zhou
2023-05-25 7:40 ` [PATCH v3 1/3] ethdev: add flow item for RoCE infiniband BTH Dong Zhou
2023-05-25 7:40 ` [PATCH v3 2/3] net/mlx5: add support for infiniband BTH match Dong Zhou
2023-05-25 7:40 ` [PATCH v3 3/3] net/mlx5/hws: " Dong Zhou
2023-05-29 13:36 ` Alex Vesker
2023-05-30 3:06 ` [PATCH v4] ethdev: add flow item for RoCE infiniband BTH Dong Zhou
2023-05-30 17:46 ` Ferruh Yigit
2023-05-31 3:22 ` Dong Zhou
2023-05-31 3:26 ` [PATCH v5] " Dong Zhou
2023-05-31 8:01 ` [PATCH v6] " Dong Zhou
2023-05-31 8:47 ` [PATCH v5] " Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230511075504.664871-4-dongzhou@nvidia.com \
--to=dongzhou@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=thomas@monjalon.net \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).