DPDK patches and discussions
 help / color / mirror / Atom feed
From: Alexander Kozyrev <akozyrev@nvidia.com>
To: <dev@dpdk.org>
Cc: <orika@nvidia.com>, <matan@nvidia.com>, <michaelba@nvidia.com>,
	<valex@nvidia.com>, <suanmingm@nvidia.com>,
	<viacheslavo@nvidia.com>
Subject: [PATCH v2 2/7] net/mlx5: add support for ptype match in hardware steering
Date: Tue, 24 Oct 2023 00:07:02 +0300	[thread overview]
Message-ID: <20231023210707.1344241-3-akozyrev@nvidia.com> (raw)
In-Reply-To: <20231023210707.1344241-1-akozyrev@nvidia.com>

The packet type matching provides quick way of finding out
L2/L3/L4 protocols in a given packet. That helps with
optimized flow rules matching, eliminating the need of
stacking all the packet headers in the matching criteria.

Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr_definer.c | 161 ++++++++++++++++++++++++++
 drivers/net/mlx5/hws/mlx5dr_definer.h |   7 ++
 drivers/net/mlx5/mlx5_flow.h          |   3 +
 drivers/net/mlx5/mlx5_flow_hw.c       |   1 +
 4 files changed, 172 insertions(+)

diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 95b5d4b70e..8d846984e7 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -16,11 +16,15 @@
 #define STE_NO_VLAN	0x0
 #define STE_SVLAN	0x1
 #define STE_CVLAN	0x2
+#define STE_NO_L3	0x0
 #define STE_IPV4	0x1
 #define STE_IPV6	0x2
+#define STE_NO_L4	0x0
 #define STE_TCP		0x1
 #define STE_UDP		0x2
 #define STE_ICMP	0x3
+#define STE_NO_TUN	0x0
+#define STE_ESP		0x3
 
 #define MLX5DR_DEFINER_QUOTA_BLOCK 0
 #define MLX5DR_DEFINER_QUOTA_PASS  2
@@ -277,6 +281,82 @@ mlx5dr_definer_conntrack_tag(struct mlx5dr_definer_fc *fc,
 	DR_SET(tag, reg_value, fc->byte_off, fc->bit_off, fc->bit_mask);
 }
 
+static void
+mlx5dr_definer_ptype_l2_set(struct mlx5dr_definer_fc *fc,
+			    const void *item_spec,
+			    uint8_t *tag)
+{
+	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L2_I);
+	const struct rte_flow_item_ptype *v = item_spec;
+	uint32_t packet_type = v->packet_type &
+		(inner ? RTE_PTYPE_INNER_L2_MASK : RTE_PTYPE_L2_MASK);
+	uint8_t l2_type = STE_NO_VLAN;
+
+	if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER : RTE_PTYPE_L2_ETHER))
+		l2_type = STE_NO_VLAN;
+	else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER_VLAN))
+		l2_type = STE_CVLAN;
+	else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_QINQ : RTE_PTYPE_L2_ETHER_QINQ))
+		l2_type = STE_SVLAN;
+
+	DR_SET(tag, l2_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_l3_set(struct mlx5dr_definer_fc *fc,
+			    const void *item_spec,
+			    uint8_t *tag)
+{
+	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L3_I);
+	const struct rte_flow_item_ptype *v = item_spec;
+	uint32_t packet_type = v->packet_type &
+		(inner ? RTE_PTYPE_INNER_L3_MASK : RTE_PTYPE_L3_MASK);
+	uint8_t l3_type = STE_NO_L3;
+
+	if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4))
+		l3_type = STE_IPV4;
+	else if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6))
+		l3_type = STE_IPV6;
+
+	DR_SET(tag, l3_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_l4_set(struct mlx5dr_definer_fc *fc,
+			    const void *item_spec,
+			    uint8_t *tag)
+{
+	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L4_I);
+	const struct rte_flow_item_ptype *v = item_spec;
+	uint32_t packet_type = v->packet_type &
+		(inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK);
+	uint8_t l4_type = STE_NO_L4;
+
+	if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP))
+		l4_type = STE_TCP;
+	else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP))
+		l4_type = STE_UDP;
+	else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_ICMP : RTE_PTYPE_L4_ICMP))
+		l4_type = STE_ICMP;
+
+	DR_SET(tag, l4_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_tunnel_set(struct mlx5dr_definer_fc *fc,
+				const void *item_spec,
+				uint8_t *tag)
+{
+	const struct rte_flow_item_ptype *v = item_spec;
+	uint32_t packet_type = v->packet_type & RTE_PTYPE_TUNNEL_MASK;
+	uint8_t tun_type = STE_NO_TUN;
+
+	if (packet_type == RTE_PTYPE_TUNNEL_ESP)
+		tun_type = STE_ESP;
+
+	DR_SET(tag, tun_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
 static void
 mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
 			     const void *item_spec,
@@ -1709,6 +1789,83 @@ mlx5dr_definer_conv_item_gre_key(struct mlx5dr_definer_conv_data *cd,
 	return 0;
 }
 
+static int
+mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd,
+			       struct rte_flow_item *item,
+			       int item_idx)
+{
+	const struct rte_flow_item_ptype *m = item->mask;
+	struct mlx5dr_definer_fc *fc;
+
+	if (!m)
+		return 0;
+
+	if (!(m->packet_type &
+	      (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK | RTE_PTYPE_TUNNEL_MASK |
+	       RTE_PTYPE_INNER_L2_MASK | RTE_PTYPE_INNER_L3_MASK | RTE_PTYPE_INNER_L4_MASK))) {
+		rte_errno = ENOTSUP;
+		return rte_errno;
+	}
+
+	if (m->packet_type & RTE_PTYPE_L2_MASK) {
+		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, false)];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ptype_l2_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, false);
+	}
+
+	if (m->packet_type & RTE_PTYPE_INNER_L2_MASK) {
+		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, true)];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ptype_l2_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, true);
+	}
+
+	if (m->packet_type & RTE_PTYPE_L3_MASK) {
+		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, false)];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ptype_l3_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, l3_type, false);
+	}
+
+	if (m->packet_type & RTE_PTYPE_INNER_L3_MASK) {
+		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, true)];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ptype_l3_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, l3_type, true);
+	}
+
+	if (m->packet_type & RTE_PTYPE_L4_MASK) {
+		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, false)];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ptype_l4_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, l4_type, false);
+	}
+
+	if (m->packet_type & RTE_PTYPE_INNER_L4_MASK) {
+		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, true)];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ptype_l4_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, l4_type, true);
+	}
+
+	if (m->packet_type & RTE_PTYPE_TUNNEL_MASK) {
+		fc = &cd->fc[MLX5DR_DEFINER_FNAME_PTYPE_TUNNEL];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ptype_tunnel_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, l4_type_bwc, false);
+	}
+
+	return 0;
+}
+
 static int
 mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd,
 				   struct rte_flow_item *item,
@@ -2332,6 +2489,10 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 			ret = mlx5dr_definer_conv_item_ib_l4(&cd, items, i);
 			item_flags |= MLX5_FLOW_ITEM_IB_BTH;
 			break;
+		case RTE_FLOW_ITEM_TYPE_PTYPE:
+			ret = mlx5dr_definer_conv_item_ptype(&cd, items, i);
+			item_flags |= MLX5_FLOW_ITEM_PTYPE;
+			break;
 		default:
 			DR_LOG(ERR, "Unsupported item type %d", items->type);
 			rte_errno = ENOTSUP;
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index f5a541bc17..ea07f55d52 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -141,6 +141,13 @@ enum mlx5dr_definer_fname {
 	MLX5DR_DEFINER_FNAME_IB_L4_OPCODE,
 	MLX5DR_DEFINER_FNAME_IB_L4_QPN,
 	MLX5DR_DEFINER_FNAME_IB_L4_A,
+	MLX5DR_DEFINER_FNAME_PTYPE_L2_O,
+	MLX5DR_DEFINER_FNAME_PTYPE_L2_I,
+	MLX5DR_DEFINER_FNAME_PTYPE_L3_O,
+	MLX5DR_DEFINER_FNAME_PTYPE_L3_I,
+	MLX5DR_DEFINER_FNAME_PTYPE_L4_O,
+	MLX5DR_DEFINER_FNAME_PTYPE_L4_I,
+	MLX5DR_DEFINER_FNAME_PTYPE_TUNNEL,
 	MLX5DR_DEFINER_FNAME_MAX,
 };
 
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 903ff66d72..98b267245c 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -233,6 +233,9 @@ enum mlx5_feature_name {
 /* IB BTH ITEM. */
 #define MLX5_FLOW_ITEM_IB_BTH (1ull << 51)
 
+/* PTYPE ITEM */
+#define MLX5_FLOW_ITEM_PTYPE (1ull << 52)
+
 /* NSH ITEM */
 #define MLX5_FLOW_ITEM_NSH (1ull << 53)
 
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 6fcf654e4a..34b3c9e6ad 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -5382,6 +5382,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
 		case RTE_FLOW_ITEM_TYPE_ESP:
 		case RTE_FLOW_ITEM_TYPE_FLEX:
 		case RTE_FLOW_ITEM_TYPE_IB_BTH:
+		case RTE_FLOW_ITEM_TYPE_PTYPE:
 			break;
 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
 			/*
-- 
2.18.2


  parent reply	other threads:[~2023-10-23 21:07 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-09 16:36 [PATCH 0/5] ptype matching support in mlx5 Alexander Kozyrev
2023-10-09 16:36 ` [PATCH 1/5] net/mlx5: add support for ptype match in hardware steering Alexander Kozyrev
2023-10-09 16:36 ` [PATCH 2/5] net/mlx5/hws: add support for fragmented ptype match Alexander Kozyrev
2023-10-09 16:36 ` [PATCH 3/5] doc: add PMD ptype item limitations Alexander Kozyrev
2023-10-09 16:36 ` [PATCH 4/5] net/mlx5/hws: remove csum check from L3 ok check Alexander Kozyrev
2023-10-09 16:36 ` [PATCH 5/5] net/mlx5/hws: fix integrity bits level Alexander Kozyrev
2023-10-23 21:07 ` [PATCH v2 0/7] ptype matching support in mlx5 Alexander Kozyrev
2023-10-23 21:07   ` [PATCH v2 1/7] ethdev: fix ESP packet type description Alexander Kozyrev
2023-10-23 21:07   ` Alexander Kozyrev [this message]
2023-10-23 21:07   ` [PATCH v2 3/7] net/mlx5/hws: add support for fragmented ptype match Alexander Kozyrev
2023-10-23 21:07   ` [PATCH v2 4/7] doc: add PMD ptype item limitations Alexander Kozyrev
2023-10-23 21:07   ` [PATCH v2 5/7] doc: add packet type matching item to release notes Alexander Kozyrev
2023-10-23 21:07   ` [PATCH v2 6/7] net/mlx5/hws: remove csum check from L3 ok check Alexander Kozyrev
2023-10-23 21:07   ` [PATCH v2 7/7] net/mlx5/hws: fix integrity bits level Alexander Kozyrev

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231023210707.1344241-3-akozyrev@nvidia.com \
    --to=akozyrev@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=michaelba@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=suanmingm@nvidia.com \
    --cc=valex@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).