From: Alexander Kozyrev <akozyrev@nvidia.com>
To: <dev@dpdk.org>
Cc: <orika@nvidia.com>, <matan@nvidia.com>, <michaelba@nvidia.com>,
<valex@nvidia.com>, <suanmingm@nvidia.com>,
<viacheslavo@nvidia.com>
Subject: [PATCH 1/5] net/mlx5: add support for ptype match in hardware steering
Date: Mon, 9 Oct 2023 19:36:13 +0300 [thread overview]
Message-ID: <20231009163617.3999365-2-akozyrev@nvidia.com> (raw)
In-Reply-To: <20231009163617.3999365-1-akozyrev@nvidia.com>
The packet type matching provides quick way of finding out
L2/L3/L4 protocols in a given packet. That helps with
optimized flow rules matching, eliminating the need of
stacking all the packet headers in the matching criteria.
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_definer.c | 170 ++++++++++++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_definer.h | 8 ++
drivers/net/mlx5/mlx5_flow.h | 3 +
drivers/net/mlx5/mlx5_flow_hw.c | 1 +
4 files changed, 182 insertions(+)
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 88f22e7f70..e3f4a3c0a8 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -16,11 +16,14 @@
#define STE_NO_VLAN 0x0
#define STE_SVLAN 0x1
#define STE_CVLAN 0x2
+#define STE_NO_L3 0x0
#define STE_IPV4 0x1
#define STE_IPV6 0x2
+#define STE_NO_L4 0x0
#define STE_TCP 0x1
#define STE_UDP 0x2
#define STE_ICMP 0x3
+#define STE_ESP 0x3
#define MLX5DR_DEFINER_QUOTA_BLOCK 0
#define MLX5DR_DEFINER_QUOTA_PASS 2
@@ -276,6 +279,88 @@ mlx5dr_definer_conntrack_tag(struct mlx5dr_definer_fc *fc,
DR_SET(tag, reg_value, fc->byte_off, fc->bit_off, fc->bit_mask);
}
+static void
+mlx5dr_definer_ptype_l2_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L2_I);
+ const struct rte_flow_item_ptype *v = item_spec;
+ uint32_t packet_type = v->packet_type &
+ (inner ? RTE_PTYPE_INNER_L2_MASK : RTE_PTYPE_L2_MASK);
+ uint8_t l2_type = STE_NO_VLAN;
+
+ if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER : RTE_PTYPE_L2_ETHER))
+ l2_type = STE_NO_VLAN;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER_VLAN))
+ l2_type = STE_CVLAN;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_QINQ : RTE_PTYPE_L2_ETHER_QINQ))
+ l2_type = STE_SVLAN;
+
+ DR_SET(tag, l2_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_l3_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L3_I);
+ const struct rte_flow_item_ptype *v = item_spec;
+ uint32_t packet_type = v->packet_type &
+ (inner ? RTE_PTYPE_INNER_L3_MASK : RTE_PTYPE_L3_MASK);
+ uint8_t l3_type = STE_NO_L3;
+
+ if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4))
+ l3_type = STE_IPV4;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6))
+ l3_type = STE_IPV6;
+
+ DR_SET(tag, l3_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_l4_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L4_I);
+ const struct rte_flow_item_ptype *v = item_spec;
+ uint32_t packet_type = v->packet_type &
+ (inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK);
+ uint8_t l4_type = STE_NO_L4;
+
+ if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP))
+ l4_type = STE_TCP;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP))
+ l4_type = STE_UDP;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_ESP : RTE_PTYPE_L4_ESP))
+ l4_type = STE_ESP;
+
+ DR_SET(tag, l4_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_l4_ext_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L4_EXT_I);
+ const struct rte_flow_item_ptype *v = item_spec;
+ uint32_t packet_type = v->packet_type &
+ (inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK);
+ uint8_t l4_type = STE_NO_L4;
+
+ if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP))
+ l4_type = STE_TCP;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP))
+ l4_type = STE_UDP;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_ICMP : RTE_PTYPE_L4_ICMP))
+ l4_type = STE_ICMP;
+
+ DR_SET(tag, l4_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
static void
mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
const void *item_spec,
@@ -1692,6 +1777,87 @@ mlx5dr_definer_conv_item_gre_key(struct mlx5dr_definer_conv_data *cd,
return 0;
}
+static int
+mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd,
+ struct rte_flow_item *item,
+ int item_idx)
+{
+ const struct rte_flow_item_ptype *m = item->mask;
+ struct mlx5dr_definer_fc *fc;
+
+ if (!m)
+ return 0;
+
+ if (!(m->packet_type &
+ (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK |
+ RTE_PTYPE_INNER_L2_MASK | RTE_PTYPE_INNER_L3_MASK | RTE_PTYPE_INNER_L4_MASK))) {
+ rte_errno = ENOTSUP;
+ return rte_errno;
+ }
+
+ if (m->packet_type & RTE_PTYPE_L2_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, false)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l2_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, false);
+ }
+
+ if (m->packet_type & RTE_PTYPE_INNER_L2_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, true)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l2_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, true);
+ }
+
+ if (m->packet_type & RTE_PTYPE_L3_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, false)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l3_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l3_type, false);
+ }
+
+ if (m->packet_type & RTE_PTYPE_INNER_L3_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, true)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l3_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l3_type, true);
+ }
+
+ if (m->packet_type & RTE_PTYPE_L4_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, false)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l4_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type_bwc, false);
+
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4_EXT, false)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l4_ext_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type, false);
+ }
+
+ if (m->packet_type & RTE_PTYPE_INNER_L4_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, true)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l4_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type_bwc, true);
+
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4_EXT, true)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l4_ext_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type, true);
+ }
+
+ return 0;
+}
+
static int
mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd,
struct rte_flow_item *item,
@@ -2308,6 +2474,10 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
ret = mlx5dr_definer_conv_item_ib_l4(&cd, items, i);
item_flags |= MLX5_FLOW_ITEM_IB_BTH;
break;
+ case RTE_FLOW_ITEM_TYPE_PTYPE:
+ ret = mlx5dr_definer_conv_item_ptype(&cd, items, i);
+ item_flags |= MLX5_FLOW_ITEM_PTYPE;
+ break;
default:
DR_LOG(ERR, "Unsupported item type %d", items->type);
rte_errno = ENOTSUP;
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index 6b645f4cf0..6b02161e02 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -136,6 +136,14 @@ enum mlx5dr_definer_fname {
MLX5DR_DEFINER_FNAME_OKS2_MPLS4_I,
MLX5DR_DEFINER_FNAME_IB_L4_OPCODE,
MLX5DR_DEFINER_FNAME_IB_L4_QPN,
+ MLX5DR_DEFINER_FNAME_PTYPE_L2_O,
+ MLX5DR_DEFINER_FNAME_PTYPE_L2_I,
+ MLX5DR_DEFINER_FNAME_PTYPE_L3_O,
+ MLX5DR_DEFINER_FNAME_PTYPE_L3_I,
+ MLX5DR_DEFINER_FNAME_PTYPE_L4_O,
+ MLX5DR_DEFINER_FNAME_PTYPE_L4_I,
+ MLX5DR_DEFINER_FNAME_PTYPE_L4_EXT_O,
+ MLX5DR_DEFINER_FNAME_PTYPE_L4_EXT_I,
MLX5DR_DEFINER_FNAME_MAX,
};
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 6beac3902c..c670bf72bc 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -233,6 +233,9 @@ enum mlx5_feature_name {
/* IB BTH ITEM. */
#define MLX5_FLOW_ITEM_IB_BTH (1ull << 51)
+/* PTYPE ITEM */
+#define MLX5_FLOW_ITEM_PTYPE (1ull << 52)
+
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index b7853d3379..587d55148e 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -5392,6 +5392,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_ESP:
case RTE_FLOW_ITEM_TYPE_FLEX:
case RTE_FLOW_ITEM_TYPE_IB_BTH:
+ case RTE_FLOW_ITEM_TYPE_PTYPE:
break;
case RTE_FLOW_ITEM_TYPE_INTEGRITY:
/*
--
2.18.2
next prev parent reply other threads:[~2023-10-09 16:36 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-09 16:36 [PATCH 0/5] ptype matching support in mlx5 Alexander Kozyrev
2023-10-09 16:36 ` Alexander Kozyrev [this message]
2023-10-09 16:36 ` [PATCH 2/5] net/mlx5/hws: add support for fragmented ptype match Alexander Kozyrev
2023-10-09 16:36 ` [PATCH 3/5] doc: add PMD ptype item limitations Alexander Kozyrev
2023-10-09 16:36 ` [PATCH 4/5] net/mlx5/hws: remove csum check from L3 ok check Alexander Kozyrev
2023-10-09 16:36 ` [PATCH 5/5] net/mlx5/hws: fix integrity bits level Alexander Kozyrev
2023-10-23 21:07 ` [PATCH v2 0/7] ptype matching support in mlx5 Alexander Kozyrev
2023-10-23 21:07 ` [PATCH v2 1/7] ethdev: fix ESP packet type description Alexander Kozyrev
2023-10-23 21:07 ` [PATCH v2 2/7] net/mlx5: add support for ptype match in hardware steering Alexander Kozyrev
2023-10-23 21:07 ` [PATCH v2 3/7] net/mlx5/hws: add support for fragmented ptype match Alexander Kozyrev
2023-10-23 21:07 ` [PATCH v2 4/7] doc: add PMD ptype item limitations Alexander Kozyrev
2023-10-23 21:07 ` [PATCH v2 5/7] doc: add packet type matching item to release notes Alexander Kozyrev
2023-10-23 21:07 ` [PATCH v2 6/7] net/mlx5/hws: remove csum check from L3 ok check Alexander Kozyrev
2023-10-23 21:07 ` [PATCH v2 7/7] net/mlx5/hws: fix integrity bits level Alexander Kozyrev
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231009163617.3999365-2-akozyrev@nvidia.com \
--to=akozyrev@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=michaelba@nvidia.com \
--cc=orika@nvidia.com \
--cc=suanmingm@nvidia.com \
--cc=valex@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).