From: Michael Baum <michaelba@nvidia.com>
To: <dev@dpdk.org>
Cc: Matan Azrad <matan@nvidia.com>,
Raslan Darawsheh <rasland@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
Erez Shitrit <erezsh@nvidia.com>
Subject: [PATCH v2 1/2] net/mlx5/hws: support matching on MPLSoUDP
Date: Wed, 22 Feb 2023 23:23:09 +0200 [thread overview]
Message-ID: <20230222212310.3295762-2-michaelba@nvidia.com> (raw)
In-Reply-To: <20230222212310.3295762-1-michaelba@nvidia.com>
From: Erez Shitrit <erezsh@nvidia.com>
Add support for matching MPLS labels while it is under UDP protocol.
Matching up to 5 MPLS labels with or without the MPLS value.
Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_definer.c | 183 +++++++++++++++++++++++++-
drivers/net/mlx5/hws/mlx5dr_definer.h | 32 ++++-
2 files changed, 212 insertions(+), 3 deletions(-)
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 6374f9df33..4bfc2caed0 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -9,6 +9,8 @@
#define ETH_TYPE_IPV4_VXLAN 0x0800
#define ETH_TYPE_IPV6_VXLAN 0x86DD
#define ETH_VXLAN_DEFAULT_PORT 4789
+#define IP_UDP_PORT_MPLS 6635
+#define DR_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
#define STE_NO_VLAN 0x0
#define STE_SVLAN 0x1
@@ -104,6 +106,8 @@ struct mlx5dr_definer_conv_data {
struct mlx5dr_definer_fc *fc;
uint8_t relaxed;
uint8_t tunnel;
+ uint8_t mpls_idx;
+ enum rte_flow_item_type last_item;
};
/* Xmacro used to create generic item setter from items */
@@ -154,6 +158,7 @@ struct mlx5dr_definer_conv_data {
X(SET, gtp_ext_hdr_qfi, v->hdr.qfi, rte_flow_item_gtp_psc) \
X(SET, vxlan_flags, v->flags, rte_flow_item_vxlan) \
X(SET, vxlan_udp_port, ETH_VXLAN_DEFAULT_PORT, rte_flow_item_vxlan) \
+ X(SET, mpls_udp_port, IP_UDP_PORT_MPLS, rte_flow_item_mpls) \
X(SET, source_qp, v->queue, mlx5_rte_flow_item_sq) \
X(SET, tag, v->data, rte_flow_item_tag) \
X(SET, metadata, v->data, rte_flow_item_meta) \
@@ -457,6 +462,89 @@ mlx5dr_definer_vport_set(struct mlx5dr_definer_fc *fc,
DR_SET(tag, regc_value, fc->byte_off, fc->bit_off, fc->bit_mask);
}
+static struct mlx5dr_definer_fc *
+mlx5dr_definer_get_mpls_fc(struct mlx5dr_definer_conv_data *cd, bool inner)
+{
+ uint8_t mpls_idx = cd->mpls_idx;
+ struct mlx5dr_definer_fc *fc;
+
+ switch (mpls_idx) {
+ case 0:
+ fc = &cd->fc[DR_CALC_FNAME(MPLS0, inner)];
+ DR_CALC_SET_HDR(fc, mpls_inner, mpls0_label);
+ break;
+ case 1:
+ fc = &cd->fc[DR_CALC_FNAME(MPLS1, inner)];
+ DR_CALC_SET_HDR(fc, mpls_inner, mpls1_label);
+ break;
+ case 2:
+ fc = &cd->fc[DR_CALC_FNAME(MPLS2, inner)];
+ DR_CALC_SET_HDR(fc, mpls_inner, mpls2_label);
+ break;
+ case 3:
+ fc = &cd->fc[DR_CALC_FNAME(MPLS3, inner)];
+ DR_CALC_SET_HDR(fc, mpls_inner, mpls3_label);
+ break;
+ case 4:
+ fc = &cd->fc[DR_CALC_FNAME(MPLS4, inner)];
+ DR_CALC_SET_HDR(fc, mpls_inner, mpls4_label);
+ break;
+ default:
+ rte_errno = ENOTSUP;
+ DR_LOG(ERR, "MPLS index %d is not supported\n", mpls_idx);
+ return NULL;
+ }
+
+ return fc;
+}
+
+static struct mlx5dr_definer_fc *
+mlx5dr_definer_get_mpls_oks_fc(struct mlx5dr_definer_conv_data *cd, bool inner)
+{
+ uint8_t mpls_idx = cd->mpls_idx;
+ struct mlx5dr_definer_fc *fc;
+
+ switch (mpls_idx) {
+ case 0:
+ fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS0, inner)];
+ DR_CALC_SET_HDR(fc, oks2, second_mpls0_qualifier);
+ break;
+ case 1:
+ fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS1, inner)];
+ DR_CALC_SET_HDR(fc, oks2, second_mpls1_qualifier);
+ break;
+ case 2:
+ fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS2, inner)];
+ DR_CALC_SET_HDR(fc, oks2, second_mpls2_qualifier);
+ break;
+ case 3:
+ fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS3, inner)];
+ DR_CALC_SET_HDR(fc, oks2, second_mpls3_qualifier);
+ break;
+ case 4:
+ fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS4, inner)];
+ DR_CALC_SET_HDR(fc, oks2, second_mpls4_qualifier);
+ break;
+ default:
+ rte_errno = ENOTSUP;
+ DR_LOG(ERR, "MPLS index %d is not supported\n", mpls_idx);
+ return NULL;
+ }
+
+ return fc;
+}
+
+static void
+mlx5dr_definer_mpls_label_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ const struct rte_flow_item_mpls *v = item_spec;
+
+ memcpy(tag + fc->byte_off, v->label_tc_s, sizeof(v->label_tc_s));
+ memcpy(tag + fc->byte_off + sizeof(v->label_tc_s), &v->ttl, sizeof(v->ttl));
+}
+
static int
mlx5dr_definer_conv_item_eth(struct mlx5dr_definer_conv_data *cd,
struct rte_flow_item *item,
@@ -1157,6 +1245,74 @@ mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd,
return 0;
}
+static int
+mlx5dr_definer_conv_item_mpls(struct mlx5dr_definer_conv_data *cd,
+ struct rte_flow_item *item,
+ int item_idx)
+{
+ const struct rte_flow_item_mpls *m = item->mask;
+ struct mlx5dr_definer_fc *fc;
+ bool inner = cd->tunnel;
+
+ if (inner) {
+ DR_LOG(ERR, "Inner MPLS item not supported");
+ rte_errno = ENOTSUP;
+ return rte_errno;
+ }
+
+ if (cd->relaxed) {
+ DR_LOG(ERR, "Relaxed mode is not supported");
+ rte_errno = ENOTSUP;
+ return rte_errno;
+ }
+
+ /* Currently support only MPLSoUDP */
+ if (cd->last_item != RTE_FLOW_ITEM_TYPE_UDP &&
+ cd->last_item != RTE_FLOW_ITEM_TYPE_MPLS) {
+ DR_LOG(ERR, "MPLS supported only after UDP");
+ rte_errno = ENOTSUP;
+ return rte_errno;
+ }
+
+ /* In order to match on MPLS we must match on ip_protocol and l4_dport. */
+ fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)];
+ if (!fc->tag_set) {
+ fc->item_idx = item_idx;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ fc->tag_set = &mlx5dr_definer_udp_protocol_set;
+ DR_CALC_SET(fc, eth_l2, l4_type_bwc, false);
+ }
+
+ fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)];
+ if (!fc->tag_set) {
+ fc->item_idx = item_idx;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ fc->tag_set = &mlx5dr_definer_mpls_udp_port_set;
+ DR_CALC_SET(fc, eth_l4, destination_port, false);
+ }
+
+ if (m && (!is_mem_zero(m->label_tc_s, 3) || m->ttl)) {
+ /* According to HW MPLSoUDP is handled as inner */
+ fc = mlx5dr_definer_get_mpls_fc(cd, true);
+ if (!fc)
+ return rte_errno;
+
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_mpls_label_set;
+ } else { /* Mask relevant oks2 bit, indicates MPLS label exists.
+ * According to HW MPLSoUDP is handled as inner
+ */
+ fc = mlx5dr_definer_get_mpls_oks_fc(cd, true);
+ if (!fc)
+ return rte_errno;
+
+ fc->item_idx = item_idx;
+ fc->tag_set = mlx5dr_definer_ones_set;
+ }
+
+ return 0;
+}
+
static struct mlx5dr_definer_fc *
mlx5dr_definer_get_register_fc(struct mlx5dr_definer_conv_data *cd, int reg)
{
@@ -1782,6 +1938,24 @@ mlx5dr_definer_conv_item_esp(struct mlx5dr_definer_conv_data *cd,
return 0;
}
+static void mlx5dr_definer_set_conv_tunnel(enum rte_flow_item_type cur_type,
+ uint64_t item_flags,
+ struct mlx5dr_definer_conv_data *cd)
+{
+ /* already tunnel nothing to change */
+ if (cd->tunnel)
+ return;
+
+ /* we can have more than one MPLS label at each level (inner/outer), so
+ * consider tunnel only when it is already under tunnel or if we moved to the
+ * second MPLS level.
+ */
+ if (cur_type != RTE_FLOW_ITEM_TYPE_MPLS)
+ cd->tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ else
+ cd->tunnel = !!(item_flags & DR_FLOW_LAYER_TUNNEL_NO_MPLS);
+}
+
static int
mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
struct mlx5dr_match_template *mt,
@@ -1799,7 +1973,7 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
/* Collect all RTE fields to the field array and set header layout */
for (i = 0; items->type != RTE_FLOW_ITEM_TYPE_END; i++, items++) {
- cd.tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ mlx5dr_definer_set_conv_tunnel(items->type, item_flags, &cd);
ret = mlx5dr_definer_check_item_range_supp(items);
if (ret)
@@ -1913,12 +2087,19 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
ret = mlx5dr_definer_conv_item_esp(&cd, items, i);
item_flags |= MLX5_FLOW_ITEM_ESP;
break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ ret = mlx5dr_definer_conv_item_mpls(&cd, items, i);
+ item_flags |= MLX5_FLOW_LAYER_MPLS;
+ cd.mpls_idx++;
+ break;
default:
DR_LOG(ERR, "Unsupported item type %d", items->type);
rte_errno = ENOTSUP;
return rte_errno;
}
+ cd.last_item = items->type;
+
if (ret) {
DR_LOG(ERR, "Failed processing item type: %d", items->type);
return ret;
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index 4bf7bd8df3..0cd83db756 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -114,6 +114,26 @@ enum mlx5dr_definer_fname {
MLX5DR_DEFINER_FNAME_ICMP_DW2,
MLX5DR_DEFINER_FNAME_ESP_SPI,
MLX5DR_DEFINER_FNAME_ESP_SEQUENCE_NUMBER,
+ MLX5DR_DEFINER_FNAME_MPLS0_O,
+ MLX5DR_DEFINER_FNAME_MPLS1_O,
+ MLX5DR_DEFINER_FNAME_MPLS2_O,
+ MLX5DR_DEFINER_FNAME_MPLS3_O,
+ MLX5DR_DEFINER_FNAME_MPLS4_O,
+ MLX5DR_DEFINER_FNAME_MPLS0_I,
+ MLX5DR_DEFINER_FNAME_MPLS1_I,
+ MLX5DR_DEFINER_FNAME_MPLS2_I,
+ MLX5DR_DEFINER_FNAME_MPLS3_I,
+ MLX5DR_DEFINER_FNAME_MPLS4_I,
+ MLX5DR_DEFINER_FNAME_OKS2_MPLS0_O,
+ MLX5DR_DEFINER_FNAME_OKS2_MPLS1_O,
+ MLX5DR_DEFINER_FNAME_OKS2_MPLS2_O,
+ MLX5DR_DEFINER_FNAME_OKS2_MPLS3_O,
+ MLX5DR_DEFINER_FNAME_OKS2_MPLS4_O,
+ MLX5DR_DEFINER_FNAME_OKS2_MPLS0_I,
+ MLX5DR_DEFINER_FNAME_OKS2_MPLS1_I,
+ MLX5DR_DEFINER_FNAME_OKS2_MPLS2_I,
+ MLX5DR_DEFINER_FNAME_OKS2_MPLS3_I,
+ MLX5DR_DEFINER_FNAME_OKS2_MPLS4_I,
MLX5DR_DEFINER_FNAME_MAX,
};
@@ -434,6 +454,14 @@ struct mlx5_ifc_definer_hl_registers_bits {
u8 register_c_1[0x20];
};
+struct mlx5_ifc_definer_hl_mpls_bits {
+ u8 mpls0_label[0x20];
+ u8 mpls1_label[0x20];
+ u8 mpls2_label[0x20];
+ u8 mpls3_label[0x20];
+ u8 mpls4_label[0x20];
+};
+
struct mlx5_ifc_definer_hl_bits {
struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_outer;
struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_inner;
@@ -462,8 +490,8 @@ struct mlx5_ifc_definer_hl_bits {
u8 unsupported_udp_misc_inner[0x20];
struct mlx5_ifc_definer_tcp_icmp_header_bits tcp_icmp;
struct mlx5_ifc_definer_hl_tunnel_header_bits tunnel_header;
- u8 unsupported_mpls_outer[0xa0];
- u8 unsupported_mpls_inner[0xa0];
+ struct mlx5_ifc_definer_hl_mpls_bits mpls_outer;
+ struct mlx5_ifc_definer_hl_mpls_bits mpls_inner;
u8 unsupported_config_headers_outer[0x80];
u8 unsupported_config_headers_inner[0x80];
u8 unsupported_random_number[0x20];
--
2.25.1
next prev parent reply other threads:[~2023-02-22 21:23 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-22 21:23 [PATCH v2 0/2] net/mlx5: support MPLSoUDP for HWS Michael Baum
2023-02-22 21:23 ` Michael Baum [this message]
2023-02-22 21:23 ` [PATCH v2 2/2] net/mlx5: add MPLS tunnel support " Michael Baum
2023-02-23 7:47 ` [PATCH v3 0/2] net/mlx5: support MPLSoUDP " Michael Baum
2023-02-23 7:47 ` [PATCH v3 1/2] net/mlx5/hws: support matching on MPLSoUDP Michael Baum
2023-03-07 12:41 ` Slava Ovsiienko
2023-02-23 7:47 ` [PATCH v3 2/2] net/mlx5: add MPLS tunnel support for HWS Michael Baum
2023-03-08 3:34 ` Suanming Mou
2023-03-08 13:51 ` [PATCH v3 0/2] net/mlx5: support MPLSoUDP " Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230222212310.3295762-2-michaelba@nvidia.com \
--to=michaelba@nvidia.com \
--cc=dev@dpdk.org \
--cc=erezsh@nvidia.com \
--cc=matan@nvidia.com \
--cc=rasland@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).