* [PATCH v3 1/7] ethdev: fix ESP packet type description
2023-10-24 17:51 ` [PATCH v3 0/7] ptype matching support in mlx5 Alexander Kozyrev
@ 2023-10-24 17:51 ` Alexander Kozyrev
2023-10-24 17:51 ` [PATCH v3 2/7] net/mlx5: add support for ptype match in hardware steering Alexander Kozyrev
` (6 subsequent siblings)
7 siblings, 0 replies; 27+ messages in thread
From: Alexander Kozyrev @ 2023-10-24 17:51 UTC (permalink / raw)
To: dev; +Cc: orika, matan, michaelba, valex, suanmingm, viacheslavo
The correct protocol number for ESP (IP Encapsulating Security Payload)
packet type is 50. 51 is IPSec AH (Authentication Header).
Fixes: 1e84afd3906b ("mbuf: add security crypto flags and fields")
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
lib/mbuf/rte_mbuf_ptype.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/mbuf/rte_mbuf_ptype.h b/lib/mbuf/rte_mbuf_ptype.h
index 17a2dd3576..f2276e2909 100644
--- a/lib/mbuf/rte_mbuf_ptype.h
+++ b/lib/mbuf/rte_mbuf_ptype.h
@@ -419,10 +419,10 @@ extern "C" {
*
* Packet format:
* <'ether type'=0x0800
- * | 'version'=4, 'protocol'=51>
+ * | 'version'=4, 'protocol'=50>
* or,
* <'ether type'=0x86DD
- * | 'version'=6, 'next header'=51>
+ * | 'version'=6, 'next header'=50>
*/
#define RTE_PTYPE_TUNNEL_ESP 0x00009000
/**
--
2.18.2
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v3 2/7] net/mlx5: add support for ptype match in hardware steering
2023-10-24 17:51 ` [PATCH v3 0/7] ptype matching support in mlx5 Alexander Kozyrev
2023-10-24 17:51 ` [PATCH v3 1/7] ethdev: fix ESP packet type description Alexander Kozyrev
@ 2023-10-24 17:51 ` Alexander Kozyrev
2023-10-24 17:51 ` [PATCH v3 3/7] net/mlx5/hws: add support for fragmented ptype match Alexander Kozyrev
` (5 subsequent siblings)
7 siblings, 0 replies; 27+ messages in thread
From: Alexander Kozyrev @ 2023-10-24 17:51 UTC (permalink / raw)
To: dev; +Cc: orika, matan, michaelba, valex, suanmingm, viacheslavo
The packet type matching provides quick way of finding out
L2/L3/L4 protocols in a given packet. That helps with
optimized flow rules matching, eliminating the need of
stacking all the packet headers in the matching criteria.
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_definer.c | 161 ++++++++++++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_definer.h | 7 ++
drivers/net/mlx5/mlx5_flow.h | 3 +
drivers/net/mlx5/mlx5_flow_hw.c | 1 +
4 files changed, 172 insertions(+)
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 95b5d4b70e..8d846984e7 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -16,11 +16,15 @@
#define STE_NO_VLAN 0x0
#define STE_SVLAN 0x1
#define STE_CVLAN 0x2
+#define STE_NO_L3 0x0
#define STE_IPV4 0x1
#define STE_IPV6 0x2
+#define STE_NO_L4 0x0
#define STE_TCP 0x1
#define STE_UDP 0x2
#define STE_ICMP 0x3
+#define STE_NO_TUN 0x0
+#define STE_ESP 0x3
#define MLX5DR_DEFINER_QUOTA_BLOCK 0
#define MLX5DR_DEFINER_QUOTA_PASS 2
@@ -277,6 +281,82 @@ mlx5dr_definer_conntrack_tag(struct mlx5dr_definer_fc *fc,
DR_SET(tag, reg_value, fc->byte_off, fc->bit_off, fc->bit_mask);
}
+static void
+mlx5dr_definer_ptype_l2_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L2_I);
+ const struct rte_flow_item_ptype *v = item_spec;
+ uint32_t packet_type = v->packet_type &
+ (inner ? RTE_PTYPE_INNER_L2_MASK : RTE_PTYPE_L2_MASK);
+ uint8_t l2_type = STE_NO_VLAN;
+
+ if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER : RTE_PTYPE_L2_ETHER))
+ l2_type = STE_NO_VLAN;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER_VLAN))
+ l2_type = STE_CVLAN;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_QINQ : RTE_PTYPE_L2_ETHER_QINQ))
+ l2_type = STE_SVLAN;
+
+ DR_SET(tag, l2_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_l3_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L3_I);
+ const struct rte_flow_item_ptype *v = item_spec;
+ uint32_t packet_type = v->packet_type &
+ (inner ? RTE_PTYPE_INNER_L3_MASK : RTE_PTYPE_L3_MASK);
+ uint8_t l3_type = STE_NO_L3;
+
+ if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4))
+ l3_type = STE_IPV4;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6))
+ l3_type = STE_IPV6;
+
+ DR_SET(tag, l3_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_l4_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L4_I);
+ const struct rte_flow_item_ptype *v = item_spec;
+ uint32_t packet_type = v->packet_type &
+ (inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK);
+ uint8_t l4_type = STE_NO_L4;
+
+ if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP))
+ l4_type = STE_TCP;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP))
+ l4_type = STE_UDP;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_ICMP : RTE_PTYPE_L4_ICMP))
+ l4_type = STE_ICMP;
+
+ DR_SET(tag, l4_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_tunnel_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ const struct rte_flow_item_ptype *v = item_spec;
+ uint32_t packet_type = v->packet_type & RTE_PTYPE_TUNNEL_MASK;
+ uint8_t tun_type = STE_NO_TUN;
+
+ if (packet_type == RTE_PTYPE_TUNNEL_ESP)
+ tun_type = STE_ESP;
+
+ DR_SET(tag, tun_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
static void
mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
const void *item_spec,
@@ -1709,6 +1789,83 @@ mlx5dr_definer_conv_item_gre_key(struct mlx5dr_definer_conv_data *cd,
return 0;
}
+static int
+mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd,
+ struct rte_flow_item *item,
+ int item_idx)
+{
+ const struct rte_flow_item_ptype *m = item->mask;
+ struct mlx5dr_definer_fc *fc;
+
+ if (!m)
+ return 0;
+
+ if (!(m->packet_type &
+ (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK | RTE_PTYPE_TUNNEL_MASK |
+ RTE_PTYPE_INNER_L2_MASK | RTE_PTYPE_INNER_L3_MASK | RTE_PTYPE_INNER_L4_MASK))) {
+ rte_errno = ENOTSUP;
+ return rte_errno;
+ }
+
+ if (m->packet_type & RTE_PTYPE_L2_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, false)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l2_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, false);
+ }
+
+ if (m->packet_type & RTE_PTYPE_INNER_L2_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, true)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l2_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, true);
+ }
+
+ if (m->packet_type & RTE_PTYPE_L3_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, false)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l3_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l3_type, false);
+ }
+
+ if (m->packet_type & RTE_PTYPE_INNER_L3_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, true)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l3_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l3_type, true);
+ }
+
+ if (m->packet_type & RTE_PTYPE_L4_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, false)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l4_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type, false);
+ }
+
+ if (m->packet_type & RTE_PTYPE_INNER_L4_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, true)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l4_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type, true);
+ }
+
+ if (m->packet_type & RTE_PTYPE_TUNNEL_MASK) {
+ fc = &cd->fc[MLX5DR_DEFINER_FNAME_PTYPE_TUNNEL];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_tunnel_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type_bwc, false);
+ }
+
+ return 0;
+}
+
static int
mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd,
struct rte_flow_item *item,
@@ -2332,6 +2489,10 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
ret = mlx5dr_definer_conv_item_ib_l4(&cd, items, i);
item_flags |= MLX5_FLOW_ITEM_IB_BTH;
break;
+ case RTE_FLOW_ITEM_TYPE_PTYPE:
+ ret = mlx5dr_definer_conv_item_ptype(&cd, items, i);
+ item_flags |= MLX5_FLOW_ITEM_PTYPE;
+ break;
default:
DR_LOG(ERR, "Unsupported item type %d", items->type);
rte_errno = ENOTSUP;
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index f5a541bc17..ea07f55d52 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -141,6 +141,13 @@ enum mlx5dr_definer_fname {
MLX5DR_DEFINER_FNAME_IB_L4_OPCODE,
MLX5DR_DEFINER_FNAME_IB_L4_QPN,
MLX5DR_DEFINER_FNAME_IB_L4_A,
+ MLX5DR_DEFINER_FNAME_PTYPE_L2_O,
+ MLX5DR_DEFINER_FNAME_PTYPE_L2_I,
+ MLX5DR_DEFINER_FNAME_PTYPE_L3_O,
+ MLX5DR_DEFINER_FNAME_PTYPE_L3_I,
+ MLX5DR_DEFINER_FNAME_PTYPE_L4_O,
+ MLX5DR_DEFINER_FNAME_PTYPE_L4_I,
+ MLX5DR_DEFINER_FNAME_PTYPE_TUNNEL,
MLX5DR_DEFINER_FNAME_MAX,
};
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 903ff66d72..98b267245c 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -233,6 +233,9 @@ enum mlx5_feature_name {
/* IB BTH ITEM. */
#define MLX5_FLOW_ITEM_IB_BTH (1ull << 51)
+/* PTYPE ITEM */
+#define MLX5_FLOW_ITEM_PTYPE (1ull << 52)
+
/* NSH ITEM */
#define MLX5_FLOW_ITEM_NSH (1ull << 53)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 6fcf654e4a..34b3c9e6ad 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -5382,6 +5382,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_ESP:
case RTE_FLOW_ITEM_TYPE_FLEX:
case RTE_FLOW_ITEM_TYPE_IB_BTH:
+ case RTE_FLOW_ITEM_TYPE_PTYPE:
break;
case RTE_FLOW_ITEM_TYPE_INTEGRITY:
/*
--
2.18.2
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v3 3/7] net/mlx5/hws: add support for fragmented ptype match
2023-10-24 17:51 ` [PATCH v3 0/7] ptype matching support in mlx5 Alexander Kozyrev
2023-10-24 17:51 ` [PATCH v3 1/7] ethdev: fix ESP packet type description Alexander Kozyrev
2023-10-24 17:51 ` [PATCH v3 2/7] net/mlx5: add support for ptype match in hardware steering Alexander Kozyrev
@ 2023-10-24 17:51 ` Alexander Kozyrev
2023-10-24 17:51 ` [PATCH v3 4/7] doc: add PMD ptype item limitations Alexander Kozyrev
` (4 subsequent siblings)
7 siblings, 0 replies; 27+ messages in thread
From: Alexander Kozyrev @ 2023-10-24 17:51 UTC (permalink / raw)
To: dev; +Cc: orika, matan, michaelba, valex, suanmingm, viacheslavo
Expand packet type matching with support of the
Fragmented IP (Internet Protocol) packet type.
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_definer.c | 54 ++++++++++++++++++++++-----
drivers/net/mlx5/hws/mlx5dr_definer.h | 2 +
2 files changed, 46 insertions(+), 10 deletions(-)
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 8d846984e7..0e1035c6bd 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -357,6 +357,19 @@ mlx5dr_definer_ptype_tunnel_set(struct mlx5dr_definer_fc *fc,
DR_SET(tag, tun_type, fc->byte_off, fc->bit_off, fc->bit_mask);
}
+static void
+mlx5dr_definer_ptype_frag_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_FRAG_I);
+ const struct rte_flow_item_ptype *v = item_spec;
+ uint32_t packet_type = v->packet_type &
+ (inner ? RTE_PTYPE_INNER_L4_FRAG : RTE_PTYPE_L4_FRAG);
+
+ DR_SET(tag, !!packet_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
static void
mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
const void *item_spec,
@@ -1840,19 +1853,40 @@ mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd,
}
if (m->packet_type & RTE_PTYPE_L4_MASK) {
- fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, false)];
- fc->item_idx = item_idx;
- fc->tag_set = &mlx5dr_definer_ptype_l4_set;
- fc->tag_mask_set = &mlx5dr_definer_ones_set;
- DR_CALC_SET(fc, eth_l2, l4_type, false);
+ /*
+ * Fragmented IP (Internet Protocol) packet type.
+ * Cannot be combined with Layer 4 Types (TCP/UDP).
+ * The exact value must be specified in the mask.
+ */
+ if (m->packet_type == RTE_PTYPE_L4_FRAG) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_FRAG, false)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_frag_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, ip_fragmented, false);
+ } else {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, false)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l4_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type, false);
+ }
}
if (m->packet_type & RTE_PTYPE_INNER_L4_MASK) {
- fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, true)];
- fc->item_idx = item_idx;
- fc->tag_set = &mlx5dr_definer_ptype_l4_set;
- fc->tag_mask_set = &mlx5dr_definer_ones_set;
- DR_CALC_SET(fc, eth_l2, l4_type, true);
+ if (m->packet_type == RTE_PTYPE_INNER_L4_FRAG) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_FRAG, true)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_frag_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, ip_fragmented, true);
+ } else {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, true)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l4_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type, true);
+ }
}
if (m->packet_type & RTE_PTYPE_TUNNEL_MASK) {
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index ea07f55d52..791154a7dc 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -148,6 +148,8 @@ enum mlx5dr_definer_fname {
MLX5DR_DEFINER_FNAME_PTYPE_L4_O,
MLX5DR_DEFINER_FNAME_PTYPE_L4_I,
MLX5DR_DEFINER_FNAME_PTYPE_TUNNEL,
+ MLX5DR_DEFINER_FNAME_PTYPE_FRAG_O,
+ MLX5DR_DEFINER_FNAME_PTYPE_FRAG_I,
MLX5DR_DEFINER_FNAME_MAX,
};
--
2.18.2
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v3 4/7] doc: add PMD ptype item limitations
2023-10-24 17:51 ` [PATCH v3 0/7] ptype matching support in mlx5 Alexander Kozyrev
` (2 preceding siblings ...)
2023-10-24 17:51 ` [PATCH v3 3/7] net/mlx5/hws: add support for fragmented ptype match Alexander Kozyrev
@ 2023-10-24 17:51 ` Alexander Kozyrev
2023-10-24 17:51 ` [PATCH v3 5/7] doc: add packet type matching item to release notes Alexander Kozyrev
` (3 subsequent siblings)
7 siblings, 0 replies; 27+ messages in thread
From: Alexander Kozyrev @ 2023-10-24 17:51 UTC (permalink / raw)
To: dev; +Cc: orika, matan, michaelba, valex, suanmingm, viacheslavo
From: Michael Baum <michaelba@nvidia.com>
Add limitations for ptype item support in "mlx5.rst" file.
Signed-off-by: Michael Baum <michaelba@nvidia.com>
---
doc/guides/nics/features/mlx5.ini | 1 +
doc/guides/nics/mlx5.rst | 15 +++++++++++++++
2 files changed, 16 insertions(+)
diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index fc67415c6c..e3927ab4df 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -86,6 +86,7 @@ nsh = Y
nvgre = Y
port_id = Y
port_representor = Y
+ptype = Y
quota = Y
tag = Y
tcp = Y
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 7086f3d1d4..c9e74948cc 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -646,6 +646,21 @@ Limitations
- When using HWS flow engine (``dv_flow_en`` = 2),
only meter mark action is supported.
+- Ptype:
+
+ - Only supports HW steering (``dv_flow_en=2``).
+ - The supported values are:
+ L2: ``RTE_PTYPE_L2_ETHER``, ``RTE_PTYPE_L2_ETHER_VLAN``, ``RTE_PTYPE_L2_ETHER_QINQ``
+ L3: ``RTE_PTYPE_L3_IPV4``, ``RTE_PTYPE_L3_IPV6``
+ L4: ``RTE_PTYPE_L4_TCP``, ``RTE_PTYPE_L4_UDP``, ``RTE_PTYPE_L4_ICMP``
+ and their ``RTE_PTYPE_INNER_XXX`` counterparts as well as ``RTE_PTYPE_TUNNEL_ESP``.
+ Any other values are not supported. Using them as a value will cause unexpected behavior.
+ - Matching on both outer and inner IP fragmented is supported using ``RTE_PTYPE_L4_FRAG`` and
+ ``RTE_PTYPE_INNER_L4_FRAG`` values. They are not part of L4 types, so they should be provided
+ explicitly as a mask value during pattern template creation. Providing ``RTE_PTYPE_L4_MASK``
+ during pattern template creation and ``RTE_PTYPE_L4_FRAG`` during flow rule creation
+ will cause unexpected behavior.
+
- Integrity:
- Integrity offload is enabled starting from **ConnectX-6 Dx**.
--
2.18.2
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v3 5/7] doc: add packet type matching item to release notes
2023-10-24 17:51 ` [PATCH v3 0/7] ptype matching support in mlx5 Alexander Kozyrev
` (3 preceding siblings ...)
2023-10-24 17:51 ` [PATCH v3 4/7] doc: add PMD ptype item limitations Alexander Kozyrev
@ 2023-10-24 17:51 ` Alexander Kozyrev
2023-10-24 17:51 ` [PATCH v3 6/7] net/mlx5/hws: remove csum check from L3 ok check Alexander Kozyrev
` (2 subsequent siblings)
7 siblings, 0 replies; 27+ messages in thread
From: Alexander Kozyrev @ 2023-10-24 17:51 UTC (permalink / raw)
To: dev; +Cc: orika, matan, michaelba, valex, suanmingm, viacheslavo
Document new RTE_FLOW_ITEM_TYPE_PTYPE in the release notes.
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
doc/guides/rel_notes/release_23_11.rst | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 0a6fc76a9d..548e38cde4 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -122,6 +122,10 @@ New Features
a group's miss actions, which are the actions to be performed on packets
that didn't match any of the flow rules in the group.
+* **Added ptype matching criteria.**
+ Added ``RTE_FLOW_ITEM_TYPE_PTYPE`` to allow matching on L2/L3/L4
+ and tunnel information as defined in mbuf packet type.
+
* **Updated Intel cpfl driver.**
* Added support for port representor.
--
2.18.2
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v3 6/7] net/mlx5/hws: remove csum check from L3 ok check
2023-10-24 17:51 ` [PATCH v3 0/7] ptype matching support in mlx5 Alexander Kozyrev
` (4 preceding siblings ...)
2023-10-24 17:51 ` [PATCH v3 5/7] doc: add packet type matching item to release notes Alexander Kozyrev
@ 2023-10-24 17:51 ` Alexander Kozyrev
2023-10-24 17:51 ` [PATCH v3 7/7] net/mlx5/hws: fix integrity bits level Alexander Kozyrev
2023-10-25 20:51 ` [PATCH v4 0/4] ptype matching support in mlx5 Alexander Kozyrev
7 siblings, 0 replies; 27+ messages in thread
From: Alexander Kozyrev @ 2023-10-24 17:51 UTC (permalink / raw)
To: dev; +Cc: orika, matan, michaelba, valex, suanmingm, viacheslavo
From: Michael Baum <michaelba@nvidia.com>
This patch changes the integrity item behavior for HW steering.
Old behavior: the "ipv4_csum_ok" checks only IPv4 checksum and "l3_ok"
checks everything is ok including IPv4 checksum.
New behavior: the "l3_ok" checks everything is ok excluding IPv4
checksum.
This change enables matching "l3_ok" in IPv6 packets since for IPv6
packets "ipv4_csum_ok" is always miss.
For SW steering the old behavior is kept as same as for L4 ok.
Signed-off-by: Michael Baum <michaelba@nvidia.com>
---
doc/guides/nics/mlx5.rst | 11 ++++++++---
drivers/net/mlx5/hws/mlx5dr_definer.c | 6 ++----
2 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index c9e74948cc..8d7e0aad7e 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -663,12 +663,13 @@ Limitations
- Integrity:
- - Integrity offload is enabled starting from **ConnectX-6 Dx**.
- Verification bits provided by the hardware are ``l3_ok``, ``ipv4_csum_ok``, ``l4_ok``, ``l4_csum_ok``.
- ``level`` value 0 references outer headers.
- Negative integrity item verification is not supported.
- - Multiple integrity items not supported in a single flow rule.
- - Flow rule items supplied by application must explicitly specify network headers referred by integrity item.
+ - With SW steering (``dv_flow_en=1``)
+ - Integrity offload is enabled starting from **ConnectX-6 Dx**.
+ - Multiple integrity items not supported in a single flow rule.
+ - Flow rule items supplied by application must explicitly specify network headers referred by integrity item.
For example, if integrity item mask sets ``l4_ok`` or ``l4_csum_ok`` bits, reference to L4 network header,
TCP or UDP, must be in the rule pattern as well::
@@ -676,6 +677,10 @@ Limitations
flow create 0 ingress pattern integrity level is 0 value mask l4_ok value spec l4_ok / eth / ipv4 proto is udp / end …
+ - With HW steering (``dv_flow_en=2``)
+ - The ``l3_ok`` field represents all L3 checks, but nothing about whether IPv4 checksum ok.
+ - The ``l4_ok`` field represents all L4 checks including L4 checksum ok.
+
- Connection tracking:
- Cannot co-exist with ASO meter, ASO age action in a single flow rule.
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 0e1035c6bd..c752896ca7 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -380,10 +380,8 @@ mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
uint32_t ok1_bits = 0;
if (v->l3_ok)
- ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L3_OK) |
- BIT(MLX5DR_DEFINER_OKS1_SECOND_IPV4_CSUM_OK) :
- BIT(MLX5DR_DEFINER_OKS1_FIRST_L3_OK) |
- BIT(MLX5DR_DEFINER_OKS1_FIRST_IPV4_CSUM_OK);
+ ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L3_OK) :
+ BIT(MLX5DR_DEFINER_OKS1_FIRST_L3_OK);
if (v->ipv4_csum_ok)
ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_IPV4_CSUM_OK) :
--
2.18.2
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v3 7/7] net/mlx5/hws: fix integrity bits level
2023-10-24 17:51 ` [PATCH v3 0/7] ptype matching support in mlx5 Alexander Kozyrev
` (5 preceding siblings ...)
2023-10-24 17:51 ` [PATCH v3 6/7] net/mlx5/hws: remove csum check from L3 ok check Alexander Kozyrev
@ 2023-10-24 17:51 ` Alexander Kozyrev
2023-10-25 20:51 ` [PATCH v4 0/4] ptype matching support in mlx5 Alexander Kozyrev
7 siblings, 0 replies; 27+ messages in thread
From: Alexander Kozyrev @ 2023-10-24 17:51 UTC (permalink / raw)
To: dev; +Cc: orika, matan, michaelba, valex, suanmingm, viacheslavo
The level field in the integrity item is not taken into account
in the current implementation of hardware steering.
Use this value instead of trying to find out the encapsulation
level according to the protocol items involved.
Fixes: c55c2bf35333 ("net/mlx5/hws: add definer layer")
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_definer.c | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index c752896ca7..f1f9235956 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -1905,7 +1905,6 @@ mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd,
{
const struct rte_flow_item_integrity *m = item->mask;
struct mlx5dr_definer_fc *fc;
- bool inner = cd->tunnel;
if (!m)
return 0;
@@ -1916,7 +1915,7 @@ mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd,
}
if (m->l3_ok || m->ipv4_csum_ok || m->l4_ok || m->l4_csum_ok) {
- fc = &cd->fc[DR_CALC_FNAME(INTEGRITY, inner)];
+ fc = &cd->fc[DR_CALC_FNAME(INTEGRITY, m->level)];
fc->item_idx = item_idx;
fc->tag_set = &mlx5dr_definer_integrity_set;
DR_CALC_SET_HDR(fc, oks1, oks1_bits);
@@ -2471,8 +2470,7 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
break;
case RTE_FLOW_ITEM_TYPE_INTEGRITY:
ret = mlx5dr_definer_conv_item_integrity(&cd, items, i);
- item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_INTEGRITY :
- MLX5_FLOW_ITEM_OUTER_INTEGRITY;
+ item_flags |= MLX5_FLOW_ITEM_INTEGRITY;
break;
case RTE_FLOW_ITEM_TYPE_CONNTRACK:
ret = mlx5dr_definer_conv_item_conntrack(&cd, items, i);
--
2.18.2
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v4 0/4] ptype matching support in mlx5
2023-10-24 17:51 ` [PATCH v3 0/7] ptype matching support in mlx5 Alexander Kozyrev
` (6 preceding siblings ...)
2023-10-24 17:51 ` [PATCH v3 7/7] net/mlx5/hws: fix integrity bits level Alexander Kozyrev
@ 2023-10-25 20:51 ` Alexander Kozyrev
2023-10-25 20:51 ` [PATCH v4 1/4] net/mlx5: add support for ptype match in hardware steering Alexander Kozyrev
` (4 more replies)
7 siblings, 5 replies; 27+ messages in thread
From: Alexander Kozyrev @ 2023-10-25 20:51 UTC (permalink / raw)
To: dev; +Cc: orika, matan, michaelba, valex, suanmingm, viacheslavo, erezsh
Add support for RTE_FLOW_ITEM_TYPE_PTYPE in mlx5 PMD.
Alexander Kozyrev (3):
net/mlx5: add support for ptype match in hardware steering
net/mlx5/hws: add support for fragmented ptype match
doc: add packet type matching item to release notes
Michael Baum (1):
doc: add PMD ptype item limitations
doc/guides/nics/features/mlx5.ini | 1 +
doc/guides/nics/mlx5.rst | 15 ++
doc/guides/rel_notes/release_23_11.rst | 5 +
drivers/net/mlx5/hws/mlx5dr_definer.c | 195 +++++++++++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_definer.h | 9 ++
drivers/net/mlx5/mlx5_flow.h | 3 +
drivers/net/mlx5/mlx5_flow_hw.c | 1 +
7 files changed, 229 insertions(+)
--
2.18.2
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v4 1/4] net/mlx5: add support for ptype match in hardware steering
2023-10-25 20:51 ` [PATCH v4 0/4] ptype matching support in mlx5 Alexander Kozyrev
@ 2023-10-25 20:51 ` Alexander Kozyrev
2023-10-29 13:10 ` Ori Kam
2023-10-25 20:51 ` [PATCH v4 2/4] net/mlx5/hws: add support for fragmented ptype match Alexander Kozyrev
` (3 subsequent siblings)
4 siblings, 1 reply; 27+ messages in thread
From: Alexander Kozyrev @ 2023-10-25 20:51 UTC (permalink / raw)
To: dev; +Cc: orika, matan, michaelba, valex, suanmingm, viacheslavo, erezsh
The packet type matching provides quick way of finding out
L2/L3/L4 protocols in a given packet. That helps with
optimized flow rules matching, eliminating the need of
stacking all the packet headers in the matching criteria.
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_definer.c | 161 ++++++++++++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_definer.h | 7 ++
drivers/net/mlx5/mlx5_flow.h | 3 +
drivers/net/mlx5/mlx5_flow_hw.c | 1 +
4 files changed, 172 insertions(+)
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 95b5d4b70e..8d846984e7 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -16,11 +16,15 @@
#define STE_NO_VLAN 0x0
#define STE_SVLAN 0x1
#define STE_CVLAN 0x2
+#define STE_NO_L3 0x0
#define STE_IPV4 0x1
#define STE_IPV6 0x2
+#define STE_NO_L4 0x0
#define STE_TCP 0x1
#define STE_UDP 0x2
#define STE_ICMP 0x3
+#define STE_NO_TUN 0x0
+#define STE_ESP 0x3
#define MLX5DR_DEFINER_QUOTA_BLOCK 0
#define MLX5DR_DEFINER_QUOTA_PASS 2
@@ -277,6 +281,82 @@ mlx5dr_definer_conntrack_tag(struct mlx5dr_definer_fc *fc,
DR_SET(tag, reg_value, fc->byte_off, fc->bit_off, fc->bit_mask);
}
+static void
+mlx5dr_definer_ptype_l2_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L2_I);
+ const struct rte_flow_item_ptype *v = item_spec;
+ uint32_t packet_type = v->packet_type &
+ (inner ? RTE_PTYPE_INNER_L2_MASK : RTE_PTYPE_L2_MASK);
+ uint8_t l2_type = STE_NO_VLAN;
+
+ if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER : RTE_PTYPE_L2_ETHER))
+ l2_type = STE_NO_VLAN;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER_VLAN))
+ l2_type = STE_CVLAN;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_QINQ : RTE_PTYPE_L2_ETHER_QINQ))
+ l2_type = STE_SVLAN;
+
+ DR_SET(tag, l2_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_l3_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L3_I);
+ const struct rte_flow_item_ptype *v = item_spec;
+ uint32_t packet_type = v->packet_type &
+ (inner ? RTE_PTYPE_INNER_L3_MASK : RTE_PTYPE_L3_MASK);
+ uint8_t l3_type = STE_NO_L3;
+
+ if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4))
+ l3_type = STE_IPV4;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6))
+ l3_type = STE_IPV6;
+
+ DR_SET(tag, l3_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_l4_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L4_I);
+ const struct rte_flow_item_ptype *v = item_spec;
+ uint32_t packet_type = v->packet_type &
+ (inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK);
+ uint8_t l4_type = STE_NO_L4;
+
+ if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP))
+ l4_type = STE_TCP;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP))
+ l4_type = STE_UDP;
+ else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_ICMP : RTE_PTYPE_L4_ICMP))
+ l4_type = STE_ICMP;
+
+ DR_SET(tag, l4_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_tunnel_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ const struct rte_flow_item_ptype *v = item_spec;
+ uint32_t packet_type = v->packet_type & RTE_PTYPE_TUNNEL_MASK;
+ uint8_t tun_type = STE_NO_TUN;
+
+ if (packet_type == RTE_PTYPE_TUNNEL_ESP)
+ tun_type = STE_ESP;
+
+ DR_SET(tag, tun_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
static void
mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
const void *item_spec,
@@ -1709,6 +1789,83 @@ mlx5dr_definer_conv_item_gre_key(struct mlx5dr_definer_conv_data *cd,
return 0;
}
+static int
+mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd,
+ struct rte_flow_item *item,
+ int item_idx)
+{
+ const struct rte_flow_item_ptype *m = item->mask;
+ struct mlx5dr_definer_fc *fc;
+
+ if (!m)
+ return 0;
+
+ if (!(m->packet_type &
+ (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK | RTE_PTYPE_TUNNEL_MASK |
+ RTE_PTYPE_INNER_L2_MASK | RTE_PTYPE_INNER_L3_MASK | RTE_PTYPE_INNER_L4_MASK))) {
+ rte_errno = ENOTSUP;
+ return rte_errno;
+ }
+
+ if (m->packet_type & RTE_PTYPE_L2_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, false)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l2_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, false);
+ }
+
+ if (m->packet_type & RTE_PTYPE_INNER_L2_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, true)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l2_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, true);
+ }
+
+ if (m->packet_type & RTE_PTYPE_L3_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, false)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l3_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l3_type, false);
+ }
+
+ if (m->packet_type & RTE_PTYPE_INNER_L3_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, true)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l3_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l3_type, true);
+ }
+
+ if (m->packet_type & RTE_PTYPE_L4_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, false)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l4_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type, false);
+ }
+
+ if (m->packet_type & RTE_PTYPE_INNER_L4_MASK) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, true)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l4_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type, true);
+ }
+
+ if (m->packet_type & RTE_PTYPE_TUNNEL_MASK) {
+ fc = &cd->fc[MLX5DR_DEFINER_FNAME_PTYPE_TUNNEL];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_tunnel_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type_bwc, false);
+ }
+
+ return 0;
+}
+
static int
mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd,
struct rte_flow_item *item,
@@ -2332,6 +2489,10 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
ret = mlx5dr_definer_conv_item_ib_l4(&cd, items, i);
item_flags |= MLX5_FLOW_ITEM_IB_BTH;
break;
+ case RTE_FLOW_ITEM_TYPE_PTYPE:
+ ret = mlx5dr_definer_conv_item_ptype(&cd, items, i);
+ item_flags |= MLX5_FLOW_ITEM_PTYPE;
+ break;
default:
DR_LOG(ERR, "Unsupported item type %d", items->type);
rte_errno = ENOTSUP;
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index f5a541bc17..ea07f55d52 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -141,6 +141,13 @@ enum mlx5dr_definer_fname {
MLX5DR_DEFINER_FNAME_IB_L4_OPCODE,
MLX5DR_DEFINER_FNAME_IB_L4_QPN,
MLX5DR_DEFINER_FNAME_IB_L4_A,
+ MLX5DR_DEFINER_FNAME_PTYPE_L2_O,
+ MLX5DR_DEFINER_FNAME_PTYPE_L2_I,
+ MLX5DR_DEFINER_FNAME_PTYPE_L3_O,
+ MLX5DR_DEFINER_FNAME_PTYPE_L3_I,
+ MLX5DR_DEFINER_FNAME_PTYPE_L4_O,
+ MLX5DR_DEFINER_FNAME_PTYPE_L4_I,
+ MLX5DR_DEFINER_FNAME_PTYPE_TUNNEL,
MLX5DR_DEFINER_FNAME_MAX,
};
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 903ff66d72..98b267245c 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -233,6 +233,9 @@ enum mlx5_feature_name {
/* IB BTH ITEM. */
#define MLX5_FLOW_ITEM_IB_BTH (1ull << 51)
+/* PTYPE ITEM */
+#define MLX5_FLOW_ITEM_PTYPE (1ull << 52)
+
/* NSH ITEM */
#define MLX5_FLOW_ITEM_NSH (1ull << 53)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 89b6f546ae..7be7cdbbc1 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -5382,6 +5382,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_ESP:
case RTE_FLOW_ITEM_TYPE_FLEX:
case RTE_FLOW_ITEM_TYPE_IB_BTH:
+ case RTE_FLOW_ITEM_TYPE_PTYPE:
break;
case RTE_FLOW_ITEM_TYPE_INTEGRITY:
/*
--
2.18.2
^ permalink raw reply [flat|nested] 27+ messages in thread
* RE: [PATCH v4 1/4] net/mlx5: add support for ptype match in hardware steering
2023-10-25 20:51 ` [PATCH v4 1/4] net/mlx5: add support for ptype match in hardware steering Alexander Kozyrev
@ 2023-10-29 13:10 ` Ori Kam
0 siblings, 0 replies; 27+ messages in thread
From: Ori Kam @ 2023-10-29 13:10 UTC (permalink / raw)
To: Alexander Kozyrev, dev
Cc: Matan Azrad, Michael Baum, Alex Vesker, Suanming Mou,
Slava Ovsiienko, Erez Shitrit
Hi
> -----Original Message-----
> From: Alexander Kozyrev <akozyrev@nvidia.com>
> Sent: Wednesday, October 25, 2023 11:51 PM
> Subject: [PATCH v4 1/4] net/mlx5: add support for ptype match in hardware
> steering
>
> The packet type matching provides quick way of finding out
> L2/L3/L4 protocols in a given packet. That helps with
> optimized flow rules matching, eliminating the need of
> stacking all the packet headers in the matching criteria.
>
> Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
> ---
Acked-by: Ori Kam <orika@nvidia.com>
Best,
Ori
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v4 2/4] net/mlx5/hws: add support for fragmented ptype match
2023-10-25 20:51 ` [PATCH v4 0/4] ptype matching support in mlx5 Alexander Kozyrev
2023-10-25 20:51 ` [PATCH v4 1/4] net/mlx5: add support for ptype match in hardware steering Alexander Kozyrev
@ 2023-10-25 20:51 ` Alexander Kozyrev
2023-10-29 13:12 ` Ori Kam
2023-10-25 20:51 ` [PATCH v4 3/4] doc: add PMD ptype item limitations Alexander Kozyrev
` (2 subsequent siblings)
4 siblings, 1 reply; 27+ messages in thread
From: Alexander Kozyrev @ 2023-10-25 20:51 UTC (permalink / raw)
To: dev; +Cc: orika, matan, michaelba, valex, suanmingm, viacheslavo, erezsh
Expand packet type matching with support of the
Fragmented IP (Internet Protocol) packet type.
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_definer.c | 54 ++++++++++++++++++++++-----
drivers/net/mlx5/hws/mlx5dr_definer.h | 2 +
2 files changed, 46 insertions(+), 10 deletions(-)
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 8d846984e7..0e1035c6bd 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -357,6 +357,19 @@ mlx5dr_definer_ptype_tunnel_set(struct mlx5dr_definer_fc *fc,
DR_SET(tag, tun_type, fc->byte_off, fc->bit_off, fc->bit_mask);
}
+static void
+mlx5dr_definer_ptype_frag_set(struct mlx5dr_definer_fc *fc,
+ const void *item_spec,
+ uint8_t *tag)
+{
+ bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_FRAG_I);
+ const struct rte_flow_item_ptype *v = item_spec;
+ uint32_t packet_type = v->packet_type &
+ (inner ? RTE_PTYPE_INNER_L4_FRAG : RTE_PTYPE_L4_FRAG);
+
+ DR_SET(tag, !!packet_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
static void
mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
const void *item_spec,
@@ -1840,19 +1853,40 @@ mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd,
}
if (m->packet_type & RTE_PTYPE_L4_MASK) {
- fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, false)];
- fc->item_idx = item_idx;
- fc->tag_set = &mlx5dr_definer_ptype_l4_set;
- fc->tag_mask_set = &mlx5dr_definer_ones_set;
- DR_CALC_SET(fc, eth_l2, l4_type, false);
+ /*
+ * Fragmented IP (Internet Protocol) packet type.
+ * Cannot be combined with Layer 4 Types (TCP/UDP).
+ * The exact value must be specified in the mask.
+ */
+ if (m->packet_type == RTE_PTYPE_L4_FRAG) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_FRAG, false)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_frag_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, ip_fragmented, false);
+ } else {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, false)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l4_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type, false);
+ }
}
if (m->packet_type & RTE_PTYPE_INNER_L4_MASK) {
- fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, true)];
- fc->item_idx = item_idx;
- fc->tag_set = &mlx5dr_definer_ptype_l4_set;
- fc->tag_mask_set = &mlx5dr_definer_ones_set;
- DR_CALC_SET(fc, eth_l2, l4_type, true);
+ if (m->packet_type == RTE_PTYPE_INNER_L4_FRAG) {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_FRAG, true)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_frag_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, ip_fragmented, true);
+ } else {
+ fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, true)];
+ fc->item_idx = item_idx;
+ fc->tag_set = &mlx5dr_definer_ptype_l4_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ DR_CALC_SET(fc, eth_l2, l4_type, true);
+ }
}
if (m->packet_type & RTE_PTYPE_TUNNEL_MASK) {
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index ea07f55d52..791154a7dc 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -148,6 +148,8 @@ enum mlx5dr_definer_fname {
MLX5DR_DEFINER_FNAME_PTYPE_L4_O,
MLX5DR_DEFINER_FNAME_PTYPE_L4_I,
MLX5DR_DEFINER_FNAME_PTYPE_TUNNEL,
+ MLX5DR_DEFINER_FNAME_PTYPE_FRAG_O,
+ MLX5DR_DEFINER_FNAME_PTYPE_FRAG_I,
MLX5DR_DEFINER_FNAME_MAX,
};
--
2.18.2
^ permalink raw reply [flat|nested] 27+ messages in thread
* RE: [PATCH v4 2/4] net/mlx5/hws: add support for fragmented ptype match
2023-10-25 20:51 ` [PATCH v4 2/4] net/mlx5/hws: add support for fragmented ptype match Alexander Kozyrev
@ 2023-10-29 13:12 ` Ori Kam
0 siblings, 0 replies; 27+ messages in thread
From: Ori Kam @ 2023-10-29 13:12 UTC (permalink / raw)
To: Alexander Kozyrev, dev
Cc: Matan Azrad, Michael Baum, Alex Vesker, Suanming Mou,
Slava Ovsiienko, Erez Shitrit
Hi
> -----Original Message-----
> From: Alexander Kozyrev <akozyrev@nvidia.com>
> Sent: Wednesday, October 25, 2023 11:51 PM
>
> Expand packet type matching with support of the
> Fragmented IP (Internet Protocol) packet type.
>
> Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
> ---
Acked-by: Ori Kam <orika@nvidia.com>
Best,
Ori
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v4 3/4] doc: add PMD ptype item limitations
2023-10-25 20:51 ` [PATCH v4 0/4] ptype matching support in mlx5 Alexander Kozyrev
2023-10-25 20:51 ` [PATCH v4 1/4] net/mlx5: add support for ptype match in hardware steering Alexander Kozyrev
2023-10-25 20:51 ` [PATCH v4 2/4] net/mlx5/hws: add support for fragmented ptype match Alexander Kozyrev
@ 2023-10-25 20:51 ` Alexander Kozyrev
2023-10-29 13:13 ` Ori Kam
2023-10-25 20:51 ` [PATCH v4 4/4] doc: add packet type matching item to release notes Alexander Kozyrev
2023-10-29 17:27 ` [PATCH v4 0/4] ptype matching support in mlx5 Raslan Darawsheh
4 siblings, 1 reply; 27+ messages in thread
From: Alexander Kozyrev @ 2023-10-25 20:51 UTC (permalink / raw)
To: dev; +Cc: orika, matan, michaelba, valex, suanmingm, viacheslavo, erezsh
From: Michael Baum <michaelba@nvidia.com>
Add limitations for ptype item support in "mlx5.rst" file.
Signed-off-by: Michael Baum <michaelba@nvidia.com>
---
doc/guides/nics/features/mlx5.ini | 1 +
doc/guides/nics/mlx5.rst | 15 +++++++++++++++
2 files changed, 16 insertions(+)
diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index fc67415c6c..e3927ab4df 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -86,6 +86,7 @@ nsh = Y
nvgre = Y
port_id = Y
port_representor = Y
+ptype = Y
quota = Y
tag = Y
tcp = Y
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 7086f3d1d4..c9e74948cc 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -646,6 +646,21 @@ Limitations
- When using HWS flow engine (``dv_flow_en`` = 2),
only meter mark action is supported.
+- Ptype:
+
+ - Only supports HW steering (``dv_flow_en=2``).
+ - The supported values are:
+ L2: ``RTE_PTYPE_L2_ETHER``, ``RTE_PTYPE_L2_ETHER_VLAN``, ``RTE_PTYPE_L2_ETHER_QINQ``
+ L3: ``RTE_PTYPE_L3_IPV4``, ``RTE_PTYPE_L3_IPV6``
+ L4: ``RTE_PTYPE_L4_TCP``, ``RTE_PTYPE_L4_UDP``, ``RTE_PTYPE_L4_ICMP``
+ and their ``RTE_PTYPE_INNER_XXX`` counterparts as well as ``RTE_PTYPE_TUNNEL_ESP``.
+ Any other values are not supported. Using them as a value will cause unexpected behavior.
+ - Matching on both outer and inner IP fragmented is supported using ``RTE_PTYPE_L4_FRAG`` and
+ ``RTE_PTYPE_INNER_L4_FRAG`` values. They are not part of L4 types, so they should be provided
+ explicitly as a mask value during pattern template creation. Providing ``RTE_PTYPE_L4_MASK``
+ during pattern template creation and ``RTE_PTYPE_L4_FRAG`` during flow rule creation
+ will cause unexpected behavior.
+
- Integrity:
- Integrity offload is enabled starting from **ConnectX-6 Dx**.
--
2.18.2
^ permalink raw reply [flat|nested] 27+ messages in thread
* RE: [PATCH v4 3/4] doc: add PMD ptype item limitations
2023-10-25 20:51 ` [PATCH v4 3/4] doc: add PMD ptype item limitations Alexander Kozyrev
@ 2023-10-29 13:13 ` Ori Kam
0 siblings, 0 replies; 27+ messages in thread
From: Ori Kam @ 2023-10-29 13:13 UTC (permalink / raw)
To: Alexander Kozyrev, dev
Cc: Matan Azrad, Michael Baum, Alex Vesker, Suanming Mou,
Slava Ovsiienko, Erez Shitrit
Hi
> -----Original Message-----
> From: Alexander Kozyrev <akozyrev@nvidia.com>
> Sent: Wednesday, October 25, 2023 11:51 PM
>
> From: Michael Baum <michaelba@nvidia.com>
>
> Add limitations for ptype item support in "mlx5.rst" file.
>
> Signed-off-by: Michael Baum <michaelba@nvidia.com>
> ---
Acked-by: Ori Kam <orika@nvidia.com>
Best,
Ori
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v4 4/4] doc: add packet type matching item to release notes
2023-10-25 20:51 ` [PATCH v4 0/4] ptype matching support in mlx5 Alexander Kozyrev
` (2 preceding siblings ...)
2023-10-25 20:51 ` [PATCH v4 3/4] doc: add PMD ptype item limitations Alexander Kozyrev
@ 2023-10-25 20:51 ` Alexander Kozyrev
2023-10-29 13:14 ` Ori Kam
2023-10-29 17:27 ` [PATCH v4 0/4] ptype matching support in mlx5 Raslan Darawsheh
4 siblings, 1 reply; 27+ messages in thread
From: Alexander Kozyrev @ 2023-10-25 20:51 UTC (permalink / raw)
To: dev; +Cc: orika, matan, michaelba, valex, suanmingm, viacheslavo, erezsh
Document new RTE_FLOW_ITEM_TYPE_PTYPE in the release notes.
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
doc/guides/rel_notes/release_23_11.rst | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 0a6fc76a9d..b94328b8a7 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -122,6 +122,10 @@ New Features
a group's miss actions, which are the actions to be performed on packets
that didn't match any of the flow rules in the group.
+* **Added ptype matching criteria.**
+ Added ``RTE_FLOW_ITEM_TYPE_PTYPE`` to allow matching on L2/L3/L4
+ and tunnel information as defined in mbuf packet type.
+
* **Updated Intel cpfl driver.**
* Added support for port representor.
@@ -143,6 +147,7 @@ New Features
* **Updated NVIDIA mlx5 net driver.**
* Added support for Network Service Header (NSH) flow matching.
+ * Added support for ``RTE_FLOW_ITEM_TYPE_PTYPE`` flow item.
* **Updated Solarflare net driver.**
--
2.18.2
^ permalink raw reply [flat|nested] 27+ messages in thread
* RE: [PATCH v4 4/4] doc: add packet type matching item to release notes
2023-10-25 20:51 ` [PATCH v4 4/4] doc: add packet type matching item to release notes Alexander Kozyrev
@ 2023-10-29 13:14 ` Ori Kam
0 siblings, 0 replies; 27+ messages in thread
From: Ori Kam @ 2023-10-29 13:14 UTC (permalink / raw)
To: Alexander Kozyrev, dev
Cc: Matan Azrad, Michael Baum, Alex Vesker, Suanming Mou,
Slava Ovsiienko, Erez Shitrit
> -----Original Message-----
> From: Alexander Kozyrev <akozyrev@nvidia.com>
> Sent: Wednesday, October 25, 2023 11:51 PM
> Document new RTE_FLOW_ITEM_TYPE_PTYPE in the release notes.
>
> Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
> ---
Acked-by: Ori Kam <orika@nvidia.com>
Best,
Ori
^ permalink raw reply [flat|nested] 27+ messages in thread
* RE: [PATCH v4 0/4] ptype matching support in mlx5
2023-10-25 20:51 ` [PATCH v4 0/4] ptype matching support in mlx5 Alexander Kozyrev
` (3 preceding siblings ...)
2023-10-25 20:51 ` [PATCH v4 4/4] doc: add packet type matching item to release notes Alexander Kozyrev
@ 2023-10-29 17:27 ` Raslan Darawsheh
4 siblings, 0 replies; 27+ messages in thread
From: Raslan Darawsheh @ 2023-10-29 17:27 UTC (permalink / raw)
To: Alexander Kozyrev, dev
Cc: Ori Kam, Matan Azrad, Michael Baum, Alex Vesker, Suanming Mou,
Slava Ovsiienko, Erez Shitrit
Hi,
> -----Original Message-----
> From: Alexander Kozyrev <akozyrev@nvidia.com>
> Sent: Wednesday, October 25, 2023 11:51 PM
> To: dev@dpdk.org
> Cc: Ori Kam <orika@nvidia.com>; Matan Azrad <matan@nvidia.com>; Michael
> Baum <michaelba@nvidia.com>; Alex Vesker <valex@nvidia.com>; Suanming
> Mou <suanmingm@nvidia.com>; Slava Ovsiienko <viacheslavo@nvidia.com>;
> Erez Shitrit <erezsh@nvidia.com>
> Subject: [PATCH v4 0/4] ptype matching support in mlx5
>
> Add support for RTE_FLOW_ITEM_TYPE_PTYPE in mlx5 PMD.
>
> Alexander Kozyrev (3):
> net/mlx5: add support for ptype match in hardware steering
> net/mlx5/hws: add support for fragmented ptype match
> doc: add packet type matching item to release notes
>
> Michael Baum (1):
> doc: add PMD ptype item limitations
>
> doc/guides/nics/features/mlx5.ini | 1 +
> doc/guides/nics/mlx5.rst | 15 ++
> doc/guides/rel_notes/release_23_11.rst | 5 +
> drivers/net/mlx5/hws/mlx5dr_definer.c | 195
> +++++++++++++++++++++++++
> drivers/net/mlx5/hws/mlx5dr_definer.h | 9 ++
> drivers/net/mlx5/mlx5_flow.h | 3 +
> drivers/net/mlx5/mlx5_flow_hw.c | 1 +
> 7 files changed, 229 insertions(+)
>
> --
> 2.18.2
Series applied to next-net-mlx,
Squashed the last two patches into first as they need to be with the same patch that introduced the feature.
Kindest regards,
Raslan Darawsheh
^ permalink raw reply [flat|nested] 27+ messages in thread