DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/9] net/mlx5: add flex item support
@ 2021-11-01  9:15 Gregory Etelson
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 1/9] common/mlx5: refactor HCA attributes query Gregory Etelson
                   ` (9 more replies)
  0 siblings, 10 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-01  9:15 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

Gregory Etelson (4):
  common/mlx5: extend flex parser capabilities
  common/mlx5: fix flex parser DevX creation routine
  net/mlx5: add flex parser DevX object management
  net/mlx5: handle flex item in flows

Viacheslav Ovsiienko (5):
  common/mlx5: refactor HCA attributes query
  net/mlx5: update eCPRI flex parser structures
  net/mlx5: add flex item API
  net/mlx5: translate flex item configuration
  net/mlx5: translate flex item pattern into matcher

 drivers/common/mlx5/mlx5_devx_cmds.c |  239 +++--
 drivers/common/mlx5/mlx5_devx_cmds.h |   65 +-
 drivers/common/mlx5/mlx5_prm.h       |   50 +-
 drivers/net/mlx5/linux/mlx5_os.c     |   14 +
 drivers/net/mlx5/meson.build         |    1 +
 drivers/net/mlx5/mlx5.c              |   15 +-
 drivers/net/mlx5/mlx5.h              |   79 +-
 drivers/net/mlx5/mlx5_flow.c         |   49 +
 drivers/net/mlx5/mlx5_flow.h         |   27 +-
 drivers/net/mlx5/mlx5_flow_dv.c      |  127 ++-
 drivers/net/mlx5/mlx5_flow_flex.c    | 1373 ++++++++++++++++++++++++++
 11 files changed, 1913 insertions(+), 126 deletions(-)
 create mode 100644 drivers/net/mlx5/mlx5_flow_flex.c

-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH 1/9] common/mlx5: refactor HCA attributes query
  2021-11-01  9:15 [dpdk-dev] [PATCH 0/9] net/mlx5: add flex item support Gregory Etelson
@ 2021-11-01  9:15 ` Gregory Etelson
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 2/9] common/mlx5: extend flex parser capabilities Gregory Etelson
                   ` (8 subsequent siblings)
  9 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-01  9:15 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

There is the common part of code querying the HCA attributes
from the device, and this part can be commoditized as
dedicated routine.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c | 173 +++++++++++----------------
 1 file changed, 73 insertions(+), 100 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index fb7c8e986f..d005eb3643 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -13,6 +13,42 @@
 #include "mlx5_common_log.h"
 #include "mlx5_malloc.h"
 
+static void *
+mlx5_devx_get_hca_cap(void *ctx, uint32_t *in, uint32_t *out,
+		      int *err, uint32_t flags)
+{
+	const size_t size_in = MLX5_ST_SZ_DW(query_hca_cap_in) * sizeof(int);
+	const size_t size_out = MLX5_ST_SZ_DW(query_hca_cap_out) * sizeof(int);
+	int status, syndrome, rc;
+
+	if (err)
+		*err = 0;
+	memset(in, 0, size_in);
+	memset(out, 0, size_out);
+	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+	MLX5_SET(query_hca_cap_in, in, op_mod, flags);
+	rc = mlx5_glue->devx_general_cmd(ctx, in, size_in, out, size_out);
+	if (rc) {
+		DRV_LOG(ERR,
+			"Failed to query devx HCA capabilities func %#02x",
+			flags >> 1);
+		if (err)
+			*err = rc > 0 ? -rc : rc;
+		return NULL;
+	}
+	status = MLX5_GET(query_hca_cap_out, out, status);
+	syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
+	if (status) {
+		DRV_LOG(ERR,
+			"Failed to query devx HCA capabilities func %#02x status %x, syndrome = %x",
+			flags >> 1, status, syndrome);
+		if (err)
+			*err = -1;
+		return NULL;
+	}
+	return MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+}
+
 /**
  * Perform read access to the registers. Reads data from register
  * and writes ones to the specified buffer.
@@ -472,21 +508,15 @@ static void
 mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx,
 				  struct mlx5_hca_vdpa_attr *vdpa_attr)
 {
-	uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
-	uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
-	void *hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
-	int status, syndrome, rc;
+	uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)];
+	uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)];
+	void *hcattr;
 
-	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
-	MLX5_SET(query_hca_cap_in, in, op_mod,
-		 MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION |
-		 MLX5_HCA_CAP_OPMOD_GET_CUR);
-	rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
-	status = MLX5_GET(query_hca_cap_out, out, status);
-	syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
-	if (rc || status) {
-		RTE_LOG(DEBUG, PMD, "Failed to query devx VDPA capabilities,"
-			" status %x, syndrome = %x", status, syndrome);
+	hcattr = mlx5_devx_get_hca_cap(ctx, in, out, NULL,
+			MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION |
+			MLX5_HCA_CAP_OPMOD_GET_CUR);
+	if (!hcattr) {
+		RTE_LOG(DEBUG, PMD, "Failed to query devx VDPA capabilities");
 		vdpa_attr->valid = 0;
 	} else {
 		vdpa_attr->valid = 1;
@@ -741,27 +771,15 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 {
 	uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
 	uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
-	void *hcattr;
-	int status, syndrome, rc, i;
 	uint64_t general_obj_types_supported = 0;
+	void *hcattr;
+	int rc, i;
 
-	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
-	MLX5_SET(query_hca_cap_in, in, op_mod,
-		 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
-		 MLX5_HCA_CAP_OPMOD_GET_CUR);
-
-	rc = mlx5_glue->devx_general_cmd(ctx,
-					 in, sizeof(in), out, sizeof(out));
-	if (rc)
-		goto error;
-	status = MLX5_GET(query_hca_cap_out, out, status);
-	syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
-	if (status) {
-		DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
-			"status %x, syndrome = %x", status, syndrome);
-		return -1;
-	}
-	hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+	hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+			MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
+			MLX5_HCA_CAP_OPMOD_GET_CUR);
+	if (!hcattr)
+		return rc;
 	attr->flow_counter_bulk_alloc_bitmap =
 			MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc);
 	attr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr,
@@ -893,19 +911,13 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 					 general_obj_types) &
 			      MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD);
 	if (attr->qos.sup) {
-		MLX5_SET(query_hca_cap_in, in, op_mod,
-			 MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
-			 MLX5_HCA_CAP_OPMOD_GET_CUR);
-		rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in),
-						 out, sizeof(out));
-		if (rc)
-			goto error;
-		if (status) {
-			DRV_LOG(DEBUG, "Failed to query devx QOS capabilities,"
-				" status %x, syndrome = %x", status, syndrome);
-			return -1;
+		hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+				MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
+				MLX5_HCA_CAP_OPMOD_GET_CUR);
+		if (!hcattr) {
+			DRV_LOG(DEBUG, "Failed to query devx QOS capabilities");
+			return rc;
 		}
-		hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
 		attr->qos.flow_meter_old =
 				MLX5_GET(qos_cap, hcattr, flow_meter_old);
 		attr->qos.log_max_flow_meter =
@@ -934,27 +946,14 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 		mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa);
 	if (!attr->eth_net_offloads)
 		return 0;
-
 	/* Query Flow Sampler Capability From FLow Table Properties Layout. */
-	memset(in, 0, sizeof(in));
-	memset(out, 0, sizeof(out));
-	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
-	MLX5_SET(query_hca_cap_in, in, op_mod,
-		 MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE |
-		 MLX5_HCA_CAP_OPMOD_GET_CUR);
-
-	rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
-	if (rc)
-		goto error;
-	status = MLX5_GET(query_hca_cap_out, out, status);
-	syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
-	if (status) {
-		DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
-			"status %x, syndrome = %x", status, syndrome);
+	hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+			MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE |
+			MLX5_HCA_CAP_OPMOD_GET_CUR);
+	if (!hcattr) {
 		attr->log_max_ft_sampler_num = 0;
-		return -1;
+		return rc;
 	}
-	hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
 	attr->log_max_ft_sampler_num = MLX5_GET
 		(flow_table_nic_cap, hcattr,
 		 flow_table_properties_nic_receive.log_max_ft_sampler_num);
@@ -969,27 +968,13 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 		(flow_table_nic_cap, hcattr,
 		 ft_field_support_2_nic_receive.outer_ipv4_ihl);
 	/* Query HCA offloads for Ethernet protocol. */
-	memset(in, 0, sizeof(in));
-	memset(out, 0, sizeof(out));
-	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
-	MLX5_SET(query_hca_cap_in, in, op_mod,
-		 MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS |
-		 MLX5_HCA_CAP_OPMOD_GET_CUR);
-
-	rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
-	if (rc) {
+	hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+			MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS |
+			MLX5_HCA_CAP_OPMOD_GET_CUR);
+	if (!hcattr) {
 		attr->eth_net_offloads = 0;
-		goto error;
+		return rc;
 	}
-	status = MLX5_GET(query_hca_cap_out, out, status);
-	syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
-	if (status) {
-		DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
-			"status %x, syndrome = %x", status, syndrome);
-		attr->eth_net_offloads = 0;
-		return -1;
-	}
-	hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
 	attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps,
 					 hcattr, wqe_vlan_insert);
 	attr->csum_cap = MLX5_GET(per_protocol_networking_offload_caps,
@@ -1044,26 +1029,14 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 					 hcattr, rss_ind_tbl_cap);
 	/* Query HCA attribute for ROCE. */
 	if (attr->roce) {
-		memset(in, 0, sizeof(in));
-		memset(out, 0, sizeof(out));
-		MLX5_SET(query_hca_cap_in, in, opcode,
-			 MLX5_CMD_OP_QUERY_HCA_CAP);
-		MLX5_SET(query_hca_cap_in, in, op_mod,
-			 MLX5_GET_HCA_CAP_OP_MOD_ROCE |
-			 MLX5_HCA_CAP_OPMOD_GET_CUR);
-		rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in),
-						 out, sizeof(out));
-		if (rc)
-			goto error;
-		status = MLX5_GET(query_hca_cap_out, out, status);
-		syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
-		if (status) {
+		hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+				MLX5_GET_HCA_CAP_OP_MOD_ROCE |
+				MLX5_HCA_CAP_OPMOD_GET_CUR);
+		if (!hcattr) {
 			DRV_LOG(DEBUG,
-				"Failed to query devx HCA ROCE capabilities, "
-				"status %x, syndrome = %x", status, syndrome);
-			return -1;
+				"Failed to query devx HCA ROCE capabilities");
+			return rc;
 		}
-		hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
 		attr->qp_ts_format = MLX5_GET(roce_caps, hcattr, qp_ts_format);
 	}
 	if (attr->eth_virt &&
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH 2/9] common/mlx5: extend flex parser capabilities
  2021-11-01  9:15 [dpdk-dev] [PATCH 0/9] net/mlx5: add flex item support Gregory Etelson
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 1/9] common/mlx5: refactor HCA attributes query Gregory Etelson
@ 2021-11-01  9:15 ` Gregory Etelson
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 3/9] common/mlx5: fix flex parser DevX creation routine Gregory Etelson
                   ` (7 subsequent siblings)
  9 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-01  9:15 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

MLX5 PARSE_GRAPH_NODE is the main data structure used by the Flex
Parser when a new parsing protocol is defined. While software
creates PARSE_GRAPH_NODE object for a new protocol, it must
verify that configuration parameters it uses comply with
hardware limits.

The patch queries hardware PARSE_GRAPH_NODE capabilities and
stores ones in PMD internal configuration structure:

 - query capabilities from parse_graph_node attribute page
 - query max_num_prog_sample_field capability from HCA page 2

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Reviewed-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c | 57 ++++++++++++++++++++++++
 drivers/common/mlx5/mlx5_devx_cmds.h | 65 +++++++++++++++++++++++++++-
 drivers/common/mlx5/mlx5_prm.h       | 50 ++++++++++++++++++++-
 3 files changed, 168 insertions(+), 4 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index d005eb3643..28e577a37e 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -729,6 +729,53 @@ mlx5_devx_cmd_create_flex_parser(void *ctx,
 	return parse_flex_obj;
 }
 
+static int
+mlx5_devx_cmd_query_hca_parse_graph_node_cap
+	(void *ctx, struct mlx5_hca_flex_attr *attr)
+{
+	uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)];
+	uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)];
+	void *hcattr;
+	int rc;
+
+	hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+			MLX5_GET_HCA_CAP_OP_MOD_PARSE_GRAPH_NODE_CAP |
+			MLX5_HCA_CAP_OPMOD_GET_CUR);
+	if (!hcattr)
+		return rc;
+	attr->node_in = MLX5_GET(parse_graph_node_cap, hcattr, node_in);
+	attr->node_out = MLX5_GET(parse_graph_node_cap, hcattr, node_out);
+	attr->header_length_mode = MLX5_GET(parse_graph_node_cap, hcattr,
+					    header_length_mode);
+	attr->sample_offset_mode = MLX5_GET(parse_graph_node_cap, hcattr,
+					    sample_offset_mode);
+	attr->max_num_arc_in = MLX5_GET(parse_graph_node_cap, hcattr,
+					max_num_arc_in);
+	attr->max_num_arc_out = MLX5_GET(parse_graph_node_cap, hcattr,
+					 max_num_arc_out);
+	attr->max_num_sample = MLX5_GET(parse_graph_node_cap, hcattr,
+					max_num_sample);
+	attr->sample_id_in_out = MLX5_GET(parse_graph_node_cap, hcattr,
+					  sample_id_in_out);
+	attr->max_base_header_length = MLX5_GET(parse_graph_node_cap, hcattr,
+						max_base_header_length);
+	attr->max_sample_base_offset = MLX5_GET(parse_graph_node_cap, hcattr,
+						max_sample_base_offset);
+	attr->max_next_header_offset = MLX5_GET(parse_graph_node_cap, hcattr,
+						max_next_header_offset);
+	attr->header_length_mask_width = MLX5_GET(parse_graph_node_cap, hcattr,
+						  header_length_mask_width);
+	/* Get the max supported samples from HCA CAP 2 */
+	hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+			MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 |
+			MLX5_HCA_CAP_OPMOD_GET_CUR);
+	if (!hcattr)
+		return rc;
+	attr->max_num_prog_sample =
+		MLX5_GET(cmd_hca_cap_2, hcattr,	max_num_prog_sample_field);
+	return 0;
+}
+
 static int
 mlx5_devx_query_pkt_integrity_match(void *hcattr)
 {
@@ -942,6 +989,16 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 					log_max_num_meter_aso);
 		}
 	}
+	/*
+	 * Flex item support needs max_num_prog_sample_field
+	 * from the Capabilities 2 table for PARSE_GRAPH_NODE
+	 */
+	if (attr->parse_graph_flex_node) {
+		rc = mlx5_devx_cmd_query_hca_parse_graph_node_cap
+			(ctx, &attr->flex);
+		if (rc)
+			return -1;
+	}
 	if (attr->vdpa.valid)
 		mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa);
 	if (!attr->eth_net_offloads)
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index 80b5dca1eb..2326f1e968 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -6,6 +6,7 @@
 #define RTE_PMD_MLX5_DEVX_CMDS_H_
 
 #include <rte_compat.h>
+#include <rte_bitops.h>
 
 #include "mlx5_glue.h"
 #include "mlx5_prm.h"
@@ -86,6 +87,64 @@ struct mlx5_hca_flow_attr {
 	uint32_t tunnel_header_2_3;
 };
 
+/**
+ * Accumulate port PARSE_GRAPH_NODE capabilities from
+ * PARSE_GRAPH_NODE Capabilities and HCA Capabilities 2 tables
+ */
+__extension__
+struct mlx5_hca_flex_attr {
+	uint32_t node_in;
+	uint32_t node_out;
+	uint16_t header_length_mode;
+	uint16_t sample_offset_mode;
+	uint8_t  max_num_arc_in;
+	uint8_t  max_num_arc_out;
+	uint8_t  max_num_sample;
+	uint8_t  max_num_prog_sample:5;	/* From HCA CAP 2 */
+	uint8_t  sample_id_in_out:1;
+	uint16_t max_base_header_length;
+	uint8_t  max_sample_base_offset;
+	uint16_t max_next_header_offset;
+	uint8_t  header_length_mask_width;
+};
+
+/* ISO C restricts enumerator values to range of 'int' */
+__extension__
+enum {
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_HEAD          = RTE_BIT32(1),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_MAC           = RTE_BIT32(2),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IP            = RTE_BIT32(3),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_GRE           = RTE_BIT32(4),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_UDP           = RTE_BIT32(5),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_MPLS          = RTE_BIT32(6),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_TCP           = RTE_BIT32(7),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_VXLAN_GRE     = RTE_BIT32(8),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_GENEVE        = RTE_BIT32(9),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IPSEC_ESP     = RTE_BIT32(10),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IPV4          = RTE_BIT32(11),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IPV6          = RTE_BIT32(12),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_PROGRAMMABLE  = RTE_BIT32(31)
+};
+
+enum {
+	PARSE_GRAPH_NODE_CAP_LENGTH_MODE_FIXED          = RTE_BIT32(0),
+	PARSE_GRAPH_NODE_CAP_LENGTH_MODE_EXPLISIT_FIELD = RTE_BIT32(1),
+	PARSE_GRAPH_NODE_CAP_LENGTH_MODE_BITMASK_FIELD  = RTE_BIT32(2)
+};
+
+/*
+ * DWORD shift is the base for calculating header_length_field_mask
+ * value in the MLX5_GRAPH_NODE_LEN_FIELD mode.
+ */
+#define MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD 0x02
+
+static inline uint32_t
+mlx5_hca_parse_graph_node_base_hdr_len_mask
+	(const struct mlx5_hca_flex_attr *attr)
+{
+	return (1 << attr->header_length_mask_width) - 1;
+}
+
 /* HCA supports this number of time periods for LRO. */
 #define MLX5_LRO_NUM_SUPP_PERIODS 4
 
@@ -164,6 +223,7 @@ struct mlx5_hca_attr {
 	struct mlx5_hca_qos_attr qos;
 	struct mlx5_hca_vdpa_attr vdpa;
 	struct mlx5_hca_flow_attr flow;
+	struct mlx5_hca_flex_attr flex;
 	int log_max_qp_sz;
 	int log_max_cq_sz;
 	int log_max_qp;
@@ -586,8 +646,9 @@ int mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
 				      uint32_t ids[], uint32_t num);
 
 __rte_internal
-struct mlx5_devx_obj *mlx5_devx_cmd_create_flex_parser(void *ctx,
-					struct mlx5_devx_graph_node_attr *data);
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_flex_parser(void *ctx,
+				 struct mlx5_devx_graph_node_attr *data);
 
 __rte_internal
 int mlx5_devx_cmd_register_read(void *ctx, uint16_t reg_id,
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index eab80eaead..8014ec2f92 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -975,7 +975,14 @@ struct mlx5_ifc_fte_match_set_misc4_bits {
 	u8 prog_sample_field_id_2[0x20];
 	u8 prog_sample_field_value_3[0x20];
 	u8 prog_sample_field_id_3[0x20];
-	u8 reserved_at_100[0x100];
+	u8 prog_sample_field_value_4[0x20];
+	u8 prog_sample_field_id_4[0x20];
+	u8 prog_sample_field_value_5[0x20];
+	u8 prog_sample_field_id_5[0x20];
+	u8 prog_sample_field_value_6[0x20];
+	u8 prog_sample_field_id_6[0x20];
+	u8 prog_sample_field_value_7[0x20];
+	u8 prog_sample_field_id_7[0x20];
 };
 
 struct mlx5_ifc_fte_match_set_misc5_bits {
@@ -1245,6 +1252,7 @@ enum {
 	MLX5_GET_HCA_CAP_OP_MOD_ROCE = 0x4 << 1,
 	MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE = 0x7 << 1,
 	MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION = 0x13 << 1,
+	MLX5_GET_HCA_CAP_OP_MOD_PARSE_GRAPH_NODE_CAP = 0x1C << 1,
 	MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 = 0x20 << 1,
 };
 
@@ -1759,6 +1767,27 @@ struct mlx5_ifc_virtio_emulation_cap_bits {
 	u8 reserved_at_1c0[0x620];
 };
 
+/**
+ * PARSE_GRAPH_NODE Capabilities Field Descriptions
+ */
+struct mlx5_ifc_parse_graph_node_cap_bits {
+	u8 node_in[0x20];
+	u8 node_out[0x20];
+	u8 header_length_mode[0x10];
+	u8 sample_offset_mode[0x10];
+	u8 max_num_arc_in[0x08];
+	u8 max_num_arc_out[0x08];
+	u8 max_num_sample[0x08];
+	u8 reserved_at_78[0x07];
+	u8 sample_id_in_out[0x1];
+	u8 max_base_header_length[0x10];
+	u8 reserved_at_90[0x08];
+	u8 max_sample_base_offset[0x08];
+	u8 max_next_header_offset[0x10];
+	u8 reserved_at_b0[0x08];
+	u8 header_length_mask_width[0x08];
+};
+
 struct mlx5_ifc_flow_table_prop_layout_bits {
 	u8 ft_support[0x1];
 	u8 flow_tag[0x1];
@@ -1853,9 +1882,14 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
 		ft_field_support_2_nic_receive;
 };
 
+/*
+ *  HCA Capabilities 2
+ */
 struct mlx5_ifc_cmd_hca_cap_2_bits {
 	u8 reserved_at_0[0x80]; /* End of DW4. */
-	u8 reserved_at_80[0xb];
+	u8 reserved_at_80[0x3];
+	u8 max_num_prog_sample_field[0x5];
+	u8 reserved_at_88[0x3];
 	u8 log_max_num_reserved_qpn[0x5];
 	u8 reserved_at_90[0x3];
 	u8 log_reserved_qpn_granularity[0x5];
@@ -3954,6 +3988,12 @@ enum mlx5_parse_graph_flow_match_sample_offset_mode {
 	MLX5_GRAPH_SAMPLE_OFFSET_BITMASK = 0x2,
 };
 
+enum mlx5_parse_graph_flow_match_sample_tunnel_mode {
+	MLX5_GRAPH_SAMPLE_TUNNEL_OUTER = 0x0,
+	MLX5_GRAPH_SAMPLE_TUNNEL_INNER = 0x1,
+	MLX5_GRAPH_SAMPLE_TUNNEL_FIRST = 0x2
+};
+
 /* Node index for an input / output arc of the flex parser graph. */
 enum mlx5_parse_graph_arc_node_index {
 	MLX5_GRAPH_ARC_NODE_NULL = 0x0,
@@ -3967,9 +4007,15 @@ enum mlx5_parse_graph_arc_node_index {
 	MLX5_GRAPH_ARC_NODE_VXLAN_GPE = 0x8,
 	MLX5_GRAPH_ARC_NODE_GENEVE = 0x9,
 	MLX5_GRAPH_ARC_NODE_IPSEC_ESP = 0xa,
+	MLX5_GRAPH_ARC_NODE_IPV4 = 0xb,
+	MLX5_GRAPH_ARC_NODE_IPV6 = 0xc,
 	MLX5_GRAPH_ARC_NODE_PROGRAMMABLE = 0x1f,
 };
 
+#define MLX5_PARSE_GRAPH_FLOW_SAMPLE_MAX 8
+#define MLX5_PARSE_GRAPH_IN_ARC_MAX 8
+#define MLX5_PARSE_GRAPH_OUT_ARC_MAX 8
+
 /**
  * Convert a user mark to flow mark.
  *
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH 3/9] common/mlx5: fix flex parser DevX creation routine
  2021-11-01  9:15 [dpdk-dev] [PATCH 0/9] net/mlx5: add flex item support Gregory Etelson
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 1/9] common/mlx5: refactor HCA attributes query Gregory Etelson
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 2/9] common/mlx5: extend flex parser capabilities Gregory Etelson
@ 2021-11-01  9:15 ` Gregory Etelson
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 4/9] net/mlx5: update eCPRI flex parser structures Gregory Etelson
                   ` (6 subsequent siblings)
  9 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-01  9:15 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan, stable, Netanel Gonen, Bing Zhao

Add missing modify_field_select, next_header_field_size
field values setting.

Fixes: 38119ebe01d6 ("common/mlx5: add DevX command for flex parsers")
Cc: stable@dpdk.org

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Reviewed-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index 28e577a37e..12c114a91b 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -620,10 +620,9 @@ mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
 	return ret;
 }
 
-
 struct mlx5_devx_obj *
 mlx5_devx_cmd_create_flex_parser(void *ctx,
-			      struct mlx5_devx_graph_node_attr *data)
+				 struct mlx5_devx_graph_node_attr *data)
 {
 	uint32_t in[MLX5_ST_SZ_DW(create_flex_parser_in)] = {0};
 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
@@ -647,12 +646,18 @@ mlx5_devx_cmd_create_flex_parser(void *ctx,
 		 MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH);
 	MLX5_SET(parse_graph_flex, flex, header_length_mode,
 		 data->header_length_mode);
+	MLX5_SET64(parse_graph_flex, flex, modify_field_select,
+		   data->modify_field_select);
 	MLX5_SET(parse_graph_flex, flex, header_length_base_value,
 		 data->header_length_base_value);
 	MLX5_SET(parse_graph_flex, flex, header_length_field_offset,
 		 data->header_length_field_offset);
 	MLX5_SET(parse_graph_flex, flex, header_length_field_shift,
 		 data->header_length_field_shift);
+	MLX5_SET(parse_graph_flex, flex, next_header_field_offset,
+		 data->next_header_field_offset);
+	MLX5_SET(parse_graph_flex, flex, next_header_field_size,
+		 data->next_header_field_size);
 	MLX5_SET(parse_graph_flex, flex, header_length_field_mask,
 		 data->header_length_field_mask);
 	for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH 4/9] net/mlx5: update eCPRI flex parser structures
  2021-11-01  9:15 [dpdk-dev] [PATCH 0/9] net/mlx5: add flex item support Gregory Etelson
                   ` (2 preceding siblings ...)
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 3/9] common/mlx5: fix flex parser DevX creation routine Gregory Etelson
@ 2021-11-01  9:15 ` Gregory Etelson
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 5/9] net/mlx5: add flex item API Gregory Etelson
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-01  9:15 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

To handle eCPRI protocol in the flows the mlx5 PMD engages
flex parser hardware feature. While we were implementing
eCPRI support we anticipated the flex parser usage extension,
and all related variables were named accordingly, containing
flex syllabus. Now we are preparing to introduce more common
approach of flex item, in order to avoid naming conflicts
and improve the code readability the eCPRI infrastructure
related variables are renamed as preparation step.

Later, once we have the new flex item implemented, we could
consider to refactor the eCPRI protocol support  to move on
common flex item basis.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.c         |  9 +++------
 drivers/net/mlx5/mlx5.h         | 12 +++---------
 drivers/net/mlx5/mlx5_flow_dv.c |  2 +-
 3 files changed, 7 insertions(+), 16 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 4fe7e34578..aee4fbb5ed 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -865,8 +865,7 @@ bool
 mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flex_parser_profiles *prf =
-				&priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+	struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
 
 	return !!prf->obj;
 }
@@ -885,8 +884,7 @@ int
 mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flex_parser_profiles *prf =
-				&priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+	struct mlx5_ecpri_parser_profile *prf =	&priv->sh->ecpri_parser;
 	struct mlx5_devx_graph_node_attr node = {
 		.modify_field_select = 0,
 	};
@@ -949,8 +947,7 @@ static void
 mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flex_parser_profiles *prf =
-				&priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+	struct mlx5_ecpri_parser_profile *prf =	&priv->sh->ecpri_parser;
 
 	if (prf->obj)
 		mlx5_devx_cmd_destroy(prf->obj);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 5768b82935..8ae66c4f34 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1015,14 +1015,8 @@ struct mlx5_dev_txpp {
 	uint64_t err_ts_future; /* Timestamp in the distant future. */
 };
 
-/* Supported flex parser profile ID. */
-enum mlx5_flex_parser_profile_id {
-	MLX5_FLEX_PARSER_ECPRI_0 = 0,
-	MLX5_FLEX_PARSER_MAX = 8,
-};
-
-/* Sample ID information of flex parser structure. */
-struct mlx5_flex_parser_profiles {
+/* Sample ID information of eCPRI flex parser structure. */
+struct mlx5_ecpri_parser_profile {
 	uint32_t num;		/* Actual number of samples. */
 	uint32_t ids[8];	/* Sample IDs for this profile. */
 	uint8_t offset[8];	/* Bytes offset of each parser. */
@@ -1167,7 +1161,7 @@ struct mlx5_dev_ctx_shared {
 	struct mlx5_devx_obj *td; /* Transport domain. */
 	struct mlx5_lag lag; /* LAG attributes */
 	void *tx_uar; /* Tx/packet pacing shared UAR. */
-	struct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX];
+	struct mlx5_ecpri_parser_profile ecpri_parser;
 	/* Flex parser profiles information. */
 	void *devx_rx_uar; /* DevX UAR for Rx. */
 	struct mlx5_aso_age_mng *aso_age_mng;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 9cba22ca2d..ff8f2b75e1 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -9935,7 +9935,7 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
 	 */
 	if (!ecpri_m->hdr.common.u32)
 		return;
-	samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
+	samples = priv->sh->ecpri_parser.ids;
 	/* Need to take the whole DW as the mask to fill the entry. */
 	dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
 			    prog_sample_field_value_0);
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH 5/9] net/mlx5: add flex item API
  2021-11-01  9:15 [dpdk-dev] [PATCH 0/9] net/mlx5: add flex item support Gregory Etelson
                   ` (3 preceding siblings ...)
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 4/9] net/mlx5: update eCPRI flex parser structures Gregory Etelson
@ 2021-11-01  9:15 ` Gregory Etelson
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 6/9] net/mlx5: add flex parser DevX object management Gregory Etelson
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-01  9:15 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan, Anatoly Burakov

From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

This patch is a preparation step of implementing
flex item feature in driver and it provides:

  - external entry point routines for flex item
    creation/deletion

  - flex item objects management over the ports.

The flex item object keeps information about
the item created over the port - reference counter
to track whether item is in use by some active
flows and the pointer to underlying shared DevX
object, providing all the data needed to translate
the flow flex pattern into matcher fields according
hardware configuration.

There is not too many flex items supposed to be
created on the port, the design is optimized
rather for flow insertion rate than memory savings.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c  |   4 +
 drivers/net/mlx5/meson.build      |   1 +
 drivers/net/mlx5/mlx5.c           |   2 +-
 drivers/net/mlx5/mlx5.h           |  24 ++++
 drivers/net/mlx5/mlx5_flow.c      |  49 ++++++++
 drivers/net/mlx5/mlx5_flow.h      |  18 ++-
 drivers/net/mlx5/mlx5_flow_dv.c   |   3 +-
 drivers/net/mlx5/mlx5_flow_flex.c | 189 ++++++++++++++++++++++++++++++
 8 files changed, 286 insertions(+), 4 deletions(-)
 create mode 100644 drivers/net/mlx5/mlx5_flow_flex.c

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 72bbb665cf..cf5c5b9722 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1682,6 +1682,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		err = mlx5_alloc_shared_dr(priv);
 		if (err)
 			goto error;
+		if (mlx5_flex_item_port_init(eth_dev) < 0)
+			goto error;
 	}
 	if (sh->devx && config->dv_flow_en && config->dest_tir) {
 		priv->obj_ops = devx_obj_ops;
@@ -1810,6 +1812,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 			claim_zero(rte_eth_switch_domain_free(priv->domain_id));
 		if (priv->hrxqs)
 			mlx5_list_destroy(priv->hrxqs);
+		if (eth_dev && priv->flex_item_map)
+			mlx5_flex_item_port_cleanup(eth_dev);
 		mlx5_free(priv);
 		if (eth_dev != NULL)
 			eth_dev->data->dev_private = NULL;
diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build
index 636a1be890..2f6d8cbb3d 100644
--- a/drivers/net/mlx5/meson.build
+++ b/drivers/net/mlx5/meson.build
@@ -17,6 +17,7 @@ sources = files(
         'mlx5_flow_meter.c',
         'mlx5_flow_dv.c',
         'mlx5_flow_aso.c',
+        'mlx5_flow_flex.c',
         'mlx5_mac.c',
         'mlx5_rss.c',
         'mlx5_rx.c',
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index aee4fbb5ed..8166d6272c 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -378,7 +378,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 	},
 };
 
-
 #define MLX5_FLOW_MIN_ID_POOL_SIZE 512
 #define MLX5_ID_GENERATION_ARRAY_FACTOR 16
 
@@ -1682,6 +1681,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
 	mlx5_mp_os_req_stop_rxtx(dev);
 	/* Free the eCPRI flex parser resource. */
 	mlx5_flex_parser_ecpri_release(dev);
+	mlx5_flex_item_port_cleanup(dev);
 	if (priv->rxqs != NULL) {
 		/* XXX race condition if mlx5_rx_burst() is still running. */
 		rte_delay_us_sleep(1000);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 8ae66c4f34..75906da2c0 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -50,6 +50,9 @@
 #define MLX5_MAX_MODIFY_NUM			32
 #define MLX5_ROOT_TBL_MODIFY_NUM		16
 
+/* Maximal number of flex items created on the port.*/
+#define MLX5_PORT_FLEX_ITEM_NUM			4
+
 enum mlx5_ipool_index {
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
 	MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
@@ -1098,6 +1101,12 @@ struct mlx5_lag {
 	uint8_t affinity_mode; /* TIS or hash based affinity */
 };
 
+/* Port flex item context. */
+struct mlx5_flex_item {
+	struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
+	uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+};
+
 /*
  * Shared Infiniband device context for Master/Representors
  * which belong to same IB device with multiple IB ports.
@@ -1420,6 +1429,10 @@ struct mlx5_priv {
 	struct mlx5_devx_obj *q_counters; /* DevX queue counter object. */
 	uint32_t counter_set_id; /* Queue counter ID to set in DevX objects. */
 	uint32_t lag_affinity_idx; /* LAG mode queue 0 affinity starting. */
+	rte_spinlock_t flex_item_sl; /* Flex item list spinlock. */
+	struct mlx5_flex_item flex_item[MLX5_PORT_FLEX_ITEM_NUM];
+	/* Flex items have been created on the port. */
+	uint32_t flex_item_map; /* Map of allocated flex item elements. */
 };
 
 #define PORT_ID(priv) ((priv)->dev_data->port_id)
@@ -1799,4 +1812,15 @@ mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr);
 uint32_t
 mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr);
 
+/* mlx5_flow_flex.c */
+
+struct rte_flow_item_flex_handle *
+flow_dv_item_create(struct rte_eth_dev *dev,
+		    const struct rte_flow_item_flex_conf *conf,
+		    struct rte_flow_error *error);
+int flow_dv_item_release(struct rte_eth_dev *dev,
+		    const struct rte_flow_item_flex_handle *flex_handle,
+		    struct rte_flow_error *error);
+int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
+void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 5d19ef1e82..25ffc57f99 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -741,6 +741,14 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
 				  struct rte_mbuf *m,
 				  struct rte_flow_restore_info *info,
 				  struct rte_flow_error *err);
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct rte_flow_error *error);
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+			    const struct rte_flow_item_flex_handle *handle,
+			    struct rte_flow_error *error);
 
 static const struct rte_flow_ops mlx5_flow_ops = {
 	.validate = mlx5_flow_validate,
@@ -760,6 +768,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
 	.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
 	.tunnel_item_release = mlx5_flow_tunnel_item_release,
 	.get_restore_info = mlx5_flow_tunnel_get_restore_info,
+	.flex_item_create = mlx5_flow_flex_item_create,
+	.flex_item_release = mlx5_flow_flex_item_release,
 };
 
 /* Tunnel information. */
@@ -9514,6 +9524,45 @@ mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
 }
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 
+/* Flex flow item API */
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct rte_flow_error *error)
+{
+	static const char err_msg[] = "flex item creation unsupported";
+	struct rte_flow_attr attr = { .transfer = 0 };
+	const struct mlx5_flow_driver_ops *fops =
+			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+	if (!fops->item_create) {
+		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, err_msg);
+		return NULL;
+	}
+	return fops->item_create(dev, conf, error);
+}
+
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+			    const struct rte_flow_item_flex_handle *handle,
+			    struct rte_flow_error *error)
+{
+	static const char err_msg[] = "flex item release unsupported";
+	struct rte_flow_attr attr = { .transfer = 0 };
+	const struct mlx5_flow_driver_ops *fops =
+			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+	if (!fops->item_release) {
+		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, err_msg);
+		return -rte_errno;
+	}
+	return fops->item_release(dev, handle, error);
+}
+
 static void
 mlx5_dbg__print_pattern(const struct rte_flow_item *item)
 {
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 4a16f30fb7..c1b6198adf 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1229,6 +1229,19 @@ typedef int (*mlx5_flow_create_def_policy_t)
 			(struct rte_eth_dev *dev);
 typedef void (*mlx5_flow_destroy_def_policy_t)
 			(struct rte_eth_dev *dev);
+typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t)
+			(struct rte_eth_dev *dev,
+			 const struct rte_flow_item_flex_conf *conf,
+			 struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_release_t)
+			(struct rte_eth_dev *dev,
+			 const struct rte_flow_item_flex_handle *handle,
+			 struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_update_t)
+			(struct rte_eth_dev *dev,
+			 const struct rte_flow_item_flex_handle *handle,
+			 const struct rte_flow_item_flex_conf *conf,
+			 struct rte_flow_error *error);
 
 struct mlx5_flow_driver_ops {
 	mlx5_flow_validate_t validate;
@@ -1263,6 +1276,9 @@ struct mlx5_flow_driver_ops {
 	mlx5_flow_action_update_t action_update;
 	mlx5_flow_action_query_t action_query;
 	mlx5_flow_sync_domain_t sync_domain;
+	mlx5_flow_item_create_t item_create;
+	mlx5_flow_item_release_t item_release;
+	mlx5_flow_item_update_t item_update;
 };
 
 /* mlx5_flow.c */
@@ -1712,6 +1728,4 @@ const struct mlx5_flow_tunnel *
 mlx5_get_tof(const struct rte_flow_item *items,
 	     const struct rte_flow_action *actions,
 	     enum mlx5_tof_rule_type *rule_type);
-
-
 #endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index ff8f2b75e1..1b92dd75ff 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -17942,7 +17942,8 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
 	.action_update = flow_dv_action_update,
 	.action_query = flow_dv_action_query,
 	.sync_domain = flow_dv_sync_domain,
+	.item_create = flow_dv_item_create,
+	.item_release = flow_dv_item_release,
 };
-
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
new file mode 100644
index 0000000000..b7bc4af6fb
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021 NVIDIA Corporation & Affiliates
+ */
+#include <rte_malloc.h>
+#include <mlx5_devx_cmds.h>
+#include <mlx5_malloc.h>
+#include "mlx5.h"
+#include "mlx5_flow.h"
+
+static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM,
+	      "Flex item maximal number exceeds uint32_t bit width");
+
+/**
+ *  Routine called once on port initialization to init flex item
+ *  related infrastructure initialization
+ *
+ * @param dev
+ *   Ethernet device to perform flex item initialization
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flex_item_port_init(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	rte_spinlock_init(&priv->flex_item_sl);
+	MLX5_ASSERT(!priv->flex_item_map);
+	return 0;
+}
+
+/**
+ *  Routine called once on port close to perform flex item
+ *  related infrastructure cleanup.
+ *
+ * @param dev
+ *   Ethernet device to perform cleanup
+ */
+void
+mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	uint32_t i;
+
+	for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
+		if (priv->flex_item_map & (1 << i)) {
+			/* DevX object dereferencing should be provided here. */
+			priv->flex_item_map &= ~(1 << i);
+		}
+	}
+}
+
+static int
+mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item)
+{
+	uintptr_t start = (uintptr_t)&priv->flex_item[0];
+	uintptr_t entry = (uintptr_t)item;
+	uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item);
+
+	if (entry < start ||
+	    idx >= MLX5_PORT_FLEX_ITEM_NUM ||
+	    (entry - start) % sizeof(struct mlx5_flex_item) ||
+	    !(priv->flex_item_map & (1u << idx)))
+		return -1;
+	return (int)idx;
+}
+
+static struct mlx5_flex_item *
+mlx5_flex_alloc(struct mlx5_priv *priv)
+{
+	struct mlx5_flex_item *item = NULL;
+
+	rte_spinlock_lock(&priv->flex_item_sl);
+	if (~priv->flex_item_map) {
+		uint32_t idx = rte_bsf32(~priv->flex_item_map);
+
+		if (idx < MLX5_PORT_FLEX_ITEM_NUM) {
+			item = &priv->flex_item[idx];
+			MLX5_ASSERT(!item->refcnt);
+			MLX5_ASSERT(!item->devx_fp);
+			item->devx_fp = NULL;
+			__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+			priv->flex_item_map |= 1u << idx;
+		}
+	}
+	rte_spinlock_unlock(&priv->flex_item_sl);
+	return item;
+}
+
+static void
+mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
+{
+	int idx = mlx5_flex_index(priv, item);
+
+	MLX5_ASSERT(idx >= 0 &&
+		    idx < MLX5_PORT_FLEX_ITEM_NUM &&
+		    (priv->flex_item_map & (1u << idx)));
+	if (idx >= 0) {
+		rte_spinlock_lock(&priv->flex_item_sl);
+		MLX5_ASSERT(!item->refcnt);
+		MLX5_ASSERT(!item->devx_fp);
+		item->devx_fp = NULL;
+		__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+		priv->flex_item_map &= ~(1u << idx);
+		rte_spinlock_unlock(&priv->flex_item_sl);
+	}
+}
+
+/**
+ * Create the flex item with specified configuration over the Ethernet device.
+ *
+ * @param dev
+ *   Ethernet device to create flex item on.
+ * @param[in] conf
+ *   Flex item configuration.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. PMDs initialize this
+ *   structure in case of error only.
+ *
+ * @return
+ *   Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set.
+ */
+struct rte_flow_item_flex_handle *
+flow_dv_item_create(struct rte_eth_dev *dev,
+		    const struct rte_flow_item_flex_conf *conf,
+		    struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_item *flex;
+
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	flex = mlx5_flex_alloc(priv);
+	if (!flex) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "too many flex items created on the port");
+		return NULL;
+	}
+	RTE_SET_USED(conf);
+	/* Mark initialized flex item valid. */
+	__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
+	return (struct rte_flow_item_flex_handle *)flex;
+}
+
+/**
+ * Release the flex item on the specified Ethernet device.
+ *
+ * @param dev
+ *   Ethernet device to destroy flex item on.
+ * @param[in] handle
+ *   Handle of the item existing on the specified device.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. PMDs initialize this
+ *   structure in case of error only.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+flow_dv_item_release(struct rte_eth_dev *dev,
+		     const struct rte_flow_item_flex_handle *handle,
+		     struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_item *flex =
+		(struct mlx5_flex_item *)(uintptr_t)handle;
+	uint32_t old_refcnt = 1;
+
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	rte_spinlock_lock(&priv->flex_item_sl);
+	if (mlx5_flex_index(priv, flex) < 0) {
+		rte_spinlock_unlock(&priv->flex_item_sl);
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "invalid flex item handle value");
+	}
+	if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
+					 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+		rte_spinlock_unlock(&priv->flex_item_sl);
+		return rte_flow_error_set(error, EBUSY,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "flex item has flow references");
+	}
+	/* Flex item is marked as invalid, we can leave locked section. */
+	rte_spinlock_unlock(&priv->flex_item_sl);
+	mlx5_flex_free(priv, flex);
+	return 0;
+}
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH 6/9] net/mlx5: add flex parser DevX object management
  2021-11-01  9:15 [dpdk-dev] [PATCH 0/9] net/mlx5: add flex item support Gregory Etelson
                   ` (4 preceding siblings ...)
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 5/9] net/mlx5: add flex item API Gregory Etelson
@ 2021-11-01  9:15 ` Gregory Etelson
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 7/9] net/mlx5: translate flex item configuration Gregory Etelson
                   ` (3 subsequent siblings)
  9 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-01  9:15 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

The DevX flex parsers can be shared between representors
within the same IB context. We should put the flex parser
objects into the shared list and engage the standard
mlx5_list_xxx API to manage ones.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Reviewed-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c  |  10 +++
 drivers/net/mlx5/mlx5.c           |   4 +
 drivers/net/mlx5/mlx5.h           |  20 +++++
 drivers/net/mlx5/mlx5_flow_flex.c | 121 +++++++++++++++++++++++++++++-
 4 files changed, 154 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index cf5c5b9722..b800ddd01a 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -337,6 +337,16 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 					      flow_dv_dest_array_clone_free_cb);
 	if (!sh->dest_array_list)
 		goto error;
+	/* Init shared flex parsers list, no need lcore_share */
+	snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
+	sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
+					       mlx5_flex_parser_create_cb,
+					       mlx5_flex_parser_match_cb,
+					       mlx5_flex_parser_remove_cb,
+					       mlx5_flex_parser_clone_cb,
+					       mlx5_flex_parser_clone_free_cb);
+	if (!sh->flex_parsers_dv)
+		goto error;
 #endif
 #ifdef HAVE_MLX5DV_DR
 	void *domain;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 8166d6272c..2c1e6b6637 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1428,6 +1428,10 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 		mlx5_flow_os_release_workspace();
 	}
 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+	if (sh->flex_parsers_dv) {
+		mlx5_list_destroy(sh->flex_parsers_dv);
+		sh->flex_parsers_dv = NULL;
+	}
 	/*
 	 *  Ensure there is no async event handler installed.
 	 *  Only primary process handles async device events.
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 75906da2c0..244f45bea2 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1101,6 +1101,15 @@ struct mlx5_lag {
 	uint8_t affinity_mode; /* TIS or hash based affinity */
 };
 
+/* DevX flex parser context. */
+struct mlx5_flex_parser_devx {
+	struct mlx5_list_entry entry;  /* List element at the beginning. */
+	uint32_t num_samples;
+	void *devx_obj;
+	struct mlx5_devx_graph_node_attr devx_conf;
+	uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
+};
+
 /* Port flex item context. */
 struct mlx5_flex_item {
 	struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
@@ -1157,6 +1166,7 @@ struct mlx5_dev_ctx_shared {
 	struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
 	struct mlx5_list *sample_action_list; /* List of sample actions. */
 	struct mlx5_list *dest_array_list;
+	struct mlx5_list *flex_parsers_dv; /* Flex Item parsers. */
 	/* List of destination array actions. */
 	struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
 	void *default_miss_action; /* Default miss action. */
@@ -1823,4 +1833,14 @@ int flow_dv_item_release(struct rte_eth_dev *dev,
 		    struct rte_flow_error *error);
 int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
 void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
+/* Flex parser list callbacks. */
+struct mlx5_list_entry *mlx5_flex_parser_create_cb(void *list_ctx, void *ctx);
+int mlx5_flex_parser_match_cb(void *list_ctx,
+			      struct mlx5_list_entry *iter, void *ctx);
+void mlx5_flex_parser_remove_cb(void *list_ctx,	struct mlx5_list_entry *entry);
+struct mlx5_list_entry *mlx5_flex_parser_clone_cb(void *list_ctx,
+						  struct mlx5_list_entry *entry,
+						  void *ctx);
+void mlx5_flex_parser_clone_free_cb(void *tool_ctx,
+				    struct mlx5_list_entry *entry);
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index b7bc4af6fb..2f87073e97 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -45,7 +45,13 @@ mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
 
 	for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
 		if (priv->flex_item_map & (1 << i)) {
-			/* DevX object dereferencing should be provided here. */
+			struct mlx5_flex_item *flex = &priv->flex_item[i];
+
+			claim_zero(mlx5_list_unregister
+					(priv->sh->flex_parsers_dv,
+					 &flex->devx_fp->entry));
+			flex->devx_fp = NULL;
+			flex->refcnt = 0;
 			priv->flex_item_map &= ~(1 << i);
 		}
 	}
@@ -127,7 +133,9 @@ flow_dv_item_create(struct rte_eth_dev *dev,
 		    struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_parser_devx devx_config = { .devx_obj = NULL };
 	struct mlx5_flex_item *flex;
+	struct mlx5_list_entry *ent;
 
 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	flex = mlx5_flex_alloc(priv);
@@ -137,10 +145,22 @@ flow_dv_item_create(struct rte_eth_dev *dev,
 				   "too many flex items created on the port");
 		return NULL;
 	}
+	ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
+	if (!ent) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "flex item creation failure");
+		goto error;
+	}
+	flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
 	RTE_SET_USED(conf);
 	/* Mark initialized flex item valid. */
 	__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
 	return (struct rte_flow_item_flex_handle *)flex;
+
+error:
+	mlx5_flex_free(priv, flex);
+	return NULL;
 }
 
 /**
@@ -166,6 +186,7 @@ flow_dv_item_release(struct rte_eth_dev *dev,
 	struct mlx5_flex_item *flex =
 		(struct mlx5_flex_item *)(uintptr_t)handle;
 	uint32_t old_refcnt = 1;
+	int rc;
 
 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	rte_spinlock_lock(&priv->flex_item_sl);
@@ -184,6 +205,104 @@ flow_dv_item_release(struct rte_eth_dev *dev,
 	}
 	/* Flex item is marked as invalid, we can leave locked section. */
 	rte_spinlock_unlock(&priv->flex_item_sl);
+	MLX5_ASSERT(flex->devx_fp);
+	rc = mlx5_list_unregister(priv->sh->flex_parsers_dv,
+				  &flex->devx_fp->entry);
+	flex->devx_fp = NULL;
 	mlx5_flex_free(priv, flex);
+	if (rc < 0)
+		return rte_flow_error_set(error, EBUSY,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "flex item release failure");
 	return 0;
 }
+
+/* DevX flex parser list callbacks. */
+struct mlx5_list_entry *
+mlx5_flex_parser_create_cb(void *list_ctx, void *ctx)
+{
+	struct mlx5_dev_ctx_shared *sh = list_ctx;
+	struct mlx5_flex_parser_devx *fp, *conf = ctx;
+	int ret;
+
+	fp = mlx5_malloc(MLX5_MEM_ZERO,	sizeof(struct mlx5_flex_parser_devx),
+			 0, SOCKET_ID_ANY);
+	if (!fp)
+		return NULL;
+	/* Copy the requested configurations. */
+	fp->num_samples = conf->num_samples;
+	memcpy(&fp->devx_conf, &conf->devx_conf, sizeof(fp->devx_conf));
+	/* Create DevX flex parser. */
+	fp->devx_obj = mlx5_devx_cmd_create_flex_parser(sh->cdev->ctx,
+							&fp->devx_conf);
+	if (!fp->devx_obj)
+		goto error;
+	/* Query the firmware assigned sample ids. */
+	ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj,
+						fp->sample_ids,
+						fp->num_samples);
+	if (ret)
+		goto error;
+	DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u",
+		(const void *)fp, fp->num_samples);
+	return &fp->entry;
+error:
+	if (fp->devx_obj)
+		mlx5_devx_cmd_destroy((void *)(uintptr_t)fp->devx_obj);
+	if (fp)
+		mlx5_free(fp);
+	return NULL;
+}
+
+int
+mlx5_flex_parser_match_cb(void *list_ctx,
+			  struct mlx5_list_entry *iter, void *ctx)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(iter, struct mlx5_flex_parser_devx, entry);
+	struct mlx5_flex_parser_devx *org =
+		container_of(ctx, struct mlx5_flex_parser_devx, entry);
+
+	RTE_SET_USED(list_ctx);
+	return !iter || !ctx || memcmp(&fp->devx_conf,
+				       &org->devx_conf,
+				       sizeof(fp->devx_conf));
+}
+
+void
+mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(entry, struct mlx5_flex_parser_devx, entry);
+
+	RTE_SET_USED(list_ctx);
+	MLX5_ASSERT(fp->devx_obj);
+	claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
+	DRV_LOG(DEBUG, "DEVx flex parser %p destroyed", (const void *)fp);
+	mlx5_free(entry);
+}
+
+struct mlx5_list_entry *
+mlx5_flex_parser_clone_cb(void *list_ctx,
+			  struct mlx5_list_entry *entry, void *ctx)
+{
+	struct mlx5_flex_parser_devx *fp;
+
+	RTE_SET_USED(list_ctx);
+	RTE_SET_USED(entry);
+	fp = mlx5_malloc(0, sizeof(struct mlx5_flex_parser_devx),
+			 0, SOCKET_ID_ANY);
+	if (!fp)
+		return NULL;
+	memcpy(fp, ctx, sizeof(struct mlx5_flex_parser_devx));
+	return &fp->entry;
+}
+
+void
+mlx5_flex_parser_clone_free_cb(void *list_ctx, struct mlx5_list_entry *entry)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(entry, struct mlx5_flex_parser_devx, entry);
+	RTE_SET_USED(list_ctx);
+	mlx5_free(fp);
+}
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH 7/9] net/mlx5: translate flex item configuration
  2021-11-01  9:15 [dpdk-dev] [PATCH 0/9] net/mlx5: add flex item support Gregory Etelson
                   ` (5 preceding siblings ...)
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 6/9] net/mlx5: add flex parser DevX object management Gregory Etelson
@ 2021-11-01  9:15 ` Gregory Etelson
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 8/9] net/mlx5: translate flex item pattern into matcher Gregory Etelson
                   ` (2 subsequent siblings)
  9 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-01  9:15 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

RTE Flow flex item configuration should be translated
into actual hardware settings:

  - translate header length and next protocol field samplings
  - translate data field sampling, the similar fields with the
    same mode and matching related parameters are relocated
    and grouped to be covered with minimal amount of hardware
    sampling registers (each register can cover arbitrary
    neighbour 32 bits (aligned to byte boundary) in the packet
    and we can combine the fields with smaller lengths or
    segments of bigger fields)
  - input and output links translation
  - preparing data for parsing flex item pattern on flow creation

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.h           |  15 +
 drivers/net/mlx5/mlx5_flow_flex.c | 844 +++++++++++++++++++++++++++++-
 2 files changed, 858 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 244f45bea2..e3c0064f5b 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -53,6 +53,9 @@
 /* Maximal number of flex items created on the port.*/
 #define MLX5_PORT_FLEX_ITEM_NUM			4
 
+/* Maximal number of field/field parts to map into sample registers .*/
+#define MLX5_FLEX_ITEM_MAPPING_NUM		32
+
 enum mlx5_ipool_index {
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
 	MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
@@ -1110,10 +1113,22 @@ struct mlx5_flex_parser_devx {
 	uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
 };
 
+/* Pattern field dscriptor - how to translate flex pattern into samples. */
+__extension__
+struct mlx5_flex_pattern_field {
+	uint16_t width:6;
+	uint16_t shift:5;
+	uint16_t reg_id:5;
+};
+#define MLX5_INVALID_SAMPLE_REG_ID 0x1F
+
 /* Port flex item context. */
 struct mlx5_flex_item {
 	struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
 	uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+	enum rte_flow_item_flex_tunnel_mode tunnel_mode; /* Tunnel mode. */
+	uint32_t mapnum; /* Number of pattern translation entries. */
+	struct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];
 };
 
 /*
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 2f87073e97..b4a9f1a537 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -113,6 +113,847 @@ mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
 	}
 }
 
+/*
+ * Calculate largest mask value for a given shift.
+ *
+ *   shift      mask
+ * ------- ---------------
+ *    0     b111100  0x3C
+ *    1     b111110  0x3E
+ *    2     b111111  0x3F
+ *    3     b011111  0x1F
+ *    4     b001111  0x0F
+ *    5     b000111  0x07
+ */
+static uint8_t
+mlx5_flex_hdr_len_mask(uint8_t shift,
+		       const struct mlx5_hca_flex_attr *attr)
+{
+	uint32_t base_mask;
+	int diff = shift - MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD;
+
+	base_mask = mlx5_hca_parse_graph_node_base_hdr_len_mask(attr);
+	return diff == 0 ? base_mask :
+	       diff < 0 ? (base_mask << -diff) & base_mask : base_mask >> diff;
+}
+
+static int
+mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct mlx5_flex_parser_devx *devx,
+			   struct rte_flow_error *error)
+{
+	const struct rte_flow_item_flex_field *field = &conf->next_header;
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+	uint32_t len_width, mask;
+
+	if (field->field_base % CHAR_BIT)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "not byte aligned header length field");
+	switch (field->field_mode) {
+	case FIELD_MODE_DUMMY:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "invalid header length field mode (DUMMY)");
+	case FIELD_MODE_FIXED:
+		if (!(attr->header_length_mode &
+		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIXED)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported header length field mode (FIXED)");
+		if (attr->header_length_mask_width < field->field_size)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "header length field width exceeds limit");
+		if (field->offset_shift < 0 ||
+		    field->offset_shift > attr->header_length_mask_width)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid header length field shift (FIXED");
+		if (field->field_base < 0)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "negative header length field base (FIXED)");
+		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
+		break;
+	case FIELD_MODE_OFFSET:
+		if (!(attr->header_length_mode &
+		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIELD)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported header length field mode (OFFSET)");
+		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
+		if (field->offset_mask == 0 ||
+		    !rte_is_power_of_2(field->offset_mask + 1))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid length field offset mask (OFFSET)");
+		len_width = rte_fls_u32(field->offset_mask);
+		if (len_width > attr->header_length_mask_width)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "length field offset mask too wide (OFFSET)");
+		mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
+		if (mask < field->offset_mask)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "length field shift too big (OFFSET)");
+		node->header_length_field_mask = RTE_MIN(mask,
+							 field->offset_mask);
+		break;
+	case FIELD_MODE_BITMASK:
+		if (!(attr->header_length_mode &
+		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_BITMASK)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported header length field mode (BITMASK)");
+		if (attr->header_length_mask_width < field->field_size)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "header length field width exceeds limit");
+		node->header_length_mode = MLX5_GRAPH_NODE_LEN_BITMASK;
+		mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
+		if (mask < field->offset_mask)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "length field shift too big (BITMASK)");
+		node->header_length_field_mask = RTE_MIN(mask,
+							 field->offset_mask);
+		break;
+	default:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unknown header length field mode");
+	}
+	if (field->field_base / CHAR_BIT >= 0 &&
+	    field->field_base / CHAR_BIT > attr->max_base_header_length)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "header length field base exceeds limit");
+	node->header_length_base_value = field->field_base / CHAR_BIT;
+	if (field->field_mode == FIELD_MODE_OFFSET ||
+	    field->field_mode == FIELD_MODE_BITMASK) {
+		if (field->offset_shift > 15 || field->offset_shift < 0)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "header length field shift exceeeds limit");
+		node->header_length_field_shift	= field->offset_shift;
+		node->header_length_field_offset = field->offset_base;
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_translate_next(struct mlx5_hca_flex_attr *attr,
+			 const struct rte_flow_item_flex_conf *conf,
+			 struct mlx5_flex_parser_devx *devx,
+			 struct rte_flow_error *error)
+{
+	const struct rte_flow_item_flex_field *field = &conf->next_protocol;
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+
+	switch (field->field_mode) {
+	case FIELD_MODE_DUMMY:
+		if (conf->nb_outputs)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "next protocol field is required (DUMMY)");
+		return 0;
+	case FIELD_MODE_FIXED:
+		break;
+	case FIELD_MODE_OFFSET:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unsupported next protocol field mode (OFFSET)");
+		break;
+	case FIELD_MODE_BITMASK:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unsupported next protocol field mode (BITMASK)");
+	default:
+		return rte_flow_error_set
+			(error, EINVAL,
+			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unknown next protocol field mode");
+	}
+	MLX5_ASSERT(field->field_mode == FIELD_MODE_FIXED);
+	if (!conf->nb_outputs)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "out link(s) is required if next field present");
+	if (attr->max_next_header_offset < field->field_base)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "next protocol field base exceeds limit");
+	if (field->offset_shift)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unsupported next protocol field shift");
+	node->next_header_field_offset = field->field_base;
+	node->next_header_field_size = field->field_size;
+	return 0;
+}
+
+/* Helper structure to handle field bit intervals. */
+struct mlx5_flex_field_cover {
+	uint16_t num;
+	int32_t start[MLX5_FLEX_ITEM_MAPPING_NUM];
+	int32_t end[MLX5_FLEX_ITEM_MAPPING_NUM];
+	uint8_t mapped[MLX5_FLEX_ITEM_MAPPING_NUM / CHAR_BIT + 1];
+};
+
+static void
+mlx5_flex_insert_field(struct mlx5_flex_field_cover *cover,
+		       uint16_t num, int32_t start, int32_t end)
+{
+	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
+	MLX5_ASSERT(num <= cover->num);
+	if (num < cover->num) {
+		memmove(&cover->start[num + 1],	&cover->start[num],
+			(cover->num - num) * sizeof(int32_t));
+		memmove(&cover->end[num + 1],	&cover->end[num],
+			(cover->num - num) * sizeof(int32_t));
+	}
+	cover->start[num] = start;
+	cover->end[num] = end;
+	cover->num++;
+}
+
+static void
+mlx5_flex_merge_field(struct mlx5_flex_field_cover *cover, uint16_t num)
+{
+	uint32_t i, del = 0;
+	int32_t end;
+
+	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
+	MLX5_ASSERT(num < (cover->num - 1));
+	end = cover->end[num];
+	for (i = num + 1; i < cover->num; i++) {
+		if (end < cover->start[i])
+			break;
+		del++;
+		if (end <= cover->end[i]) {
+			cover->end[num] = cover->end[i];
+			break;
+		}
+	}
+	if (del) {
+		MLX5_ASSERT(del < (cover->num - 1u - num));
+		cover->num -= del;
+		MLX5_ASSERT(cover->num > num);
+		if ((cover->num - num) > 1) {
+			memmove(&cover->start[num + 1],
+				&cover->start[num + 1 + del],
+				(cover->num - num - 1) * sizeof(int32_t));
+			memmove(&cover->end[num + 1],
+				&cover->end[num + 1 + del],
+				(cover->num - num - 1) * sizeof(int32_t));
+		}
+	}
+}
+
+/*
+ * Validate the sample field and update interval array
+ * if parameters match with the 'match" field.
+ * Returns:
+ *    < 0  - error
+ *    == 0 - no match, interval array not updated
+ *    > 0  - match, interval array updated
+ */
+static int
+mlx5_flex_cover_sample(struct mlx5_flex_field_cover *cover,
+		       struct rte_flow_item_flex_field *field,
+		       struct rte_flow_item_flex_field *match,
+		       struct mlx5_hca_flex_attr *attr,
+		       struct rte_flow_error *error)
+{
+	int32_t start, end;
+	uint32_t i;
+
+	switch (field->field_mode) {
+	case FIELD_MODE_DUMMY:
+		return 0;
+	case FIELD_MODE_FIXED:
+		if (!(attr->sample_offset_mode &
+		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIXED)))
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported sample field mode (FIXED)");
+		if (field->offset_shift)
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid sample field shift (FIXED");
+		if (field->field_base < 0)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid sample field base (FIXED)");
+		if (field->field_base / CHAR_BIT > attr->max_sample_base_offset)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "sample field base exceeds limit (FIXED)");
+		break;
+	case FIELD_MODE_OFFSET:
+		if (!(attr->sample_offset_mode &
+		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIELD)))
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported sample field mode (OFFSET)");
+		if (field->field_base / CHAR_BIT >= 0 &&
+		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"sample field base exceeds limit");
+		break;
+	case FIELD_MODE_BITMASK:
+		if (!(attr->sample_offset_mode &
+		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_BITMASK)))
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported sample field mode (BITMASK)");
+		if (field->field_base / CHAR_BIT >= 0 &&
+		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"sample field base exceeds limit");
+		break;
+	default:
+		return rte_flow_error_set
+			(error, EINVAL,
+			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unknown data sample field mode");
+	}
+	if (!match) {
+		if (!field->field_size)
+			return rte_flow_error_set
+				(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"zero sample field width");
+		if (field->field_id)
+			DRV_LOG(DEBUG, "sample field id hint ignored");
+	} else {
+		if (field->field_mode != match->field_mode ||
+		    field->offset_base | match->offset_base ||
+		    field->offset_mask | match->offset_mask ||
+		    field->offset_shift | match->offset_shift)
+			return 0;
+	}
+	start = field->field_base;
+	end = start + field->field_size;
+	/* Add the new or similar field to interval array. */
+	if (!cover->num) {
+		cover->start[cover->num] = start;
+		cover->end[cover->num] = end;
+		cover->num = 1;
+		return 1;
+	}
+	for (i = 0; i < cover->num; i++) {
+		if (start > cover->end[i]) {
+			if (i >= (cover->num - 1u)) {
+				mlx5_flex_insert_field(cover, cover->num,
+						       start, end);
+				break;
+			}
+			continue;
+		}
+		if (end < cover->start[i]) {
+			mlx5_flex_insert_field(cover, i, start, end);
+			break;
+		}
+		if (start < cover->start[i])
+			cover->start[i] = start;
+		if (end > cover->end[i]) {
+			cover->end[i] = end;
+			if (i < (cover->num - 1u))
+				mlx5_flex_merge_field(cover, i);
+		}
+		break;
+	}
+	return 1;
+}
+
+static void
+mlx5_flex_config_sample(struct mlx5_devx_match_sample_attr *na,
+			struct rte_flow_item_flex_field *field,
+			enum rte_flow_item_flex_tunnel_mode tunnel_mode)
+{
+	memset(na, 0, sizeof(struct mlx5_devx_match_sample_attr));
+	na->flow_match_sample_en = 1;
+	switch (field->field_mode) {
+	case FIELD_MODE_FIXED:
+		na->flow_match_sample_offset_mode =
+			MLX5_GRAPH_SAMPLE_OFFSET_FIXED;
+		break;
+	case FIELD_MODE_OFFSET:
+		na->flow_match_sample_offset_mode =
+			MLX5_GRAPH_SAMPLE_OFFSET_FIELD;
+		na->flow_match_sample_field_offset = field->offset_base;
+		na->flow_match_sample_field_offset_mask = field->offset_mask;
+		na->flow_match_sample_field_offset_shift = field->offset_shift;
+		break;
+	case FIELD_MODE_BITMASK:
+		na->flow_match_sample_offset_mode =
+			MLX5_GRAPH_SAMPLE_OFFSET_BITMASK;
+		na->flow_match_sample_field_offset = field->offset_base;
+		na->flow_match_sample_field_offset_mask = field->offset_mask;
+		na->flow_match_sample_field_offset_shift = field->offset_shift;
+		break;
+	default:
+		MLX5_ASSERT(false);
+		break;
+	}
+	switch (tunnel_mode) {
+	case FLEX_TUNNEL_MODE_SINGLE:
+		/* Fallthrough */
+	case FLEX_TUNNEL_MODE_TUNNEL:
+		na->flow_match_sample_tunnel_mode =
+			MLX5_GRAPH_SAMPLE_TUNNEL_FIRST;
+		break;
+	case FLEX_TUNNEL_MODE_MULTI:
+		/* Fallthrough */
+	case FLEX_TUNNEL_MODE_OUTER:
+		na->flow_match_sample_tunnel_mode =
+			MLX5_GRAPH_SAMPLE_TUNNEL_OUTER;
+		break;
+	case FLEX_TUNNEL_MODE_INNER:
+		na->flow_match_sample_tunnel_mode =
+			MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
+		break;
+	default:
+		MLX5_ASSERT(false);
+		break;
+	}
+}
+
+/* Map specified field to set/subset of allocated sample registers. */
+static int
+mlx5_flex_map_sample(struct rte_flow_item_flex_field *field,
+		     struct mlx5_flex_parser_devx *parser,
+		     struct mlx5_flex_item *item,
+		     struct rte_flow_error *error)
+{
+	struct mlx5_devx_match_sample_attr node;
+	int32_t start = field->field_base;
+	int32_t end = start + field->field_size;
+	struct mlx5_flex_pattern_field *trans;
+	uint32_t i, done_bits = 0;
+
+	if (field->field_mode == FIELD_MODE_DUMMY) {
+		done_bits = field->field_size;
+		while (done_bits) {
+			uint32_t part = RTE_MIN(done_bits,
+						sizeof(uint32_t) * CHAR_BIT);
+			if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
+				return rte_flow_error_set
+					(error,
+					 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					 "too many flex item pattern translations");
+			trans = &item->map[item->mapnum];
+			trans->reg_id = MLX5_INVALID_SAMPLE_REG_ID;
+			trans->shift = 0;
+			trans->width = part;
+			item->mapnum++;
+			done_bits -= part;
+		}
+		return 0;
+	}
+	mlx5_flex_config_sample(&node, field, item->tunnel_mode);
+	for (i = 0; i < parser->num_samples; i++) {
+		struct mlx5_devx_match_sample_attr *sample =
+			&parser->devx_conf.sample[i];
+		int32_t reg_start, reg_end;
+		int32_t cov_start, cov_end;
+
+		MLX5_ASSERT(sample->flow_match_sample_en);
+		if (!sample->flow_match_sample_en)
+			break;
+		node.flow_match_sample_field_base_offset =
+			sample->flow_match_sample_field_base_offset;
+		if (memcmp(&node, sample, sizeof(node)))
+			continue;
+		reg_start = (int8_t)sample->flow_match_sample_field_base_offset;
+		reg_start *= CHAR_BIT;
+		reg_end = reg_start + 32;
+		if (end <= reg_start || start >= reg_end)
+			continue;
+		cov_start = RTE_MAX(reg_start, start);
+		cov_end = RTE_MIN(reg_end, end);
+		MLX5_ASSERT(cov_end > cov_start);
+		done_bits += cov_end - cov_start;
+		if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "too many flex item pattern translations");
+		trans = &item->map[item->mapnum];
+		item->mapnum++;
+		trans->reg_id = i;
+		trans->shift = cov_start - reg_start;
+		trans->width = cov_end - cov_start;
+	}
+	if (done_bits != field->field_size) {
+		MLX5_ASSERT(false);
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "failed to map field to sample register");
+	}
+	return 0;
+}
+
+/* Allocate sample registers for the specified field type and interval array. */
+static int
+mlx5_flex_alloc_sample(struct mlx5_flex_field_cover *cover,
+		       struct mlx5_flex_parser_devx *parser,
+		       struct mlx5_flex_item *item,
+		       struct rte_flow_item_flex_field *field,
+		       struct mlx5_hca_flex_attr *attr,
+		       struct rte_flow_error *error)
+{
+	struct mlx5_devx_match_sample_attr node;
+	uint32_t idx = 0;
+
+	mlx5_flex_config_sample(&node, field, item->tunnel_mode);
+	while (idx < cover->num) {
+		int32_t start, end;
+
+		/*
+		 * Sample base offsets are in bytes, should be aligned
+		 * to 32-bit as required by firmware for samples.
+		 */
+		start = RTE_ALIGN_FLOOR(cover->start[idx],
+					sizeof(uint32_t) * CHAR_BIT);
+		node.flow_match_sample_field_base_offset =
+						(start / CHAR_BIT) & 0xFF;
+		/* Allocate sample register. */
+		if (parser->num_samples >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
+		    parser->num_samples >= attr->max_num_sample ||
+		    parser->num_samples >= attr->max_num_prog_sample)
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "no sample registers to handle all flex item fields");
+		parser->devx_conf.sample[parser->num_samples] = node;
+		parser->num_samples++;
+		/* Remove or update covered intervals. */
+		end = start + 32;
+		while (idx < cover->num) {
+			if (end >= cover->end[idx]) {
+				idx++;
+				continue;
+			}
+			if (end > cover->start[idx])
+				cover->start[idx] = end;
+			break;
+		}
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_translate_sample(struct mlx5_hca_flex_attr *attr,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct mlx5_flex_parser_devx *parser,
+			   struct mlx5_flex_item *item,
+			   struct rte_flow_error *error)
+{
+	struct mlx5_flex_field_cover cover;
+	uint32_t i, j;
+	int ret;
+
+	switch (conf->tunnel) {
+	case FLEX_TUNNEL_MODE_SINGLE:
+		/* Fallthrough */
+	case FLEX_TUNNEL_MODE_OUTER:
+		/* Fallthrough */
+	case FLEX_TUNNEL_MODE_INNER:
+		/* Fallthrough */
+	case FLEX_TUNNEL_MODE_MULTI:
+		/* Fallthrough */
+	case FLEX_TUNNEL_MODE_TUNNEL:
+		break;
+	default:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unrecognized tunnel mode");
+	}
+	item->tunnel_mode = conf->tunnel;
+	if (conf->nb_samples > MLX5_FLEX_ITEM_MAPPING_NUM)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "sample field number exceeds limit");
+	/*
+	 * The application can specify fields smaller or bigger than 32 bits
+	 * covered with single sample register and it can specify field
+	 * offsets in any order.
+	 *
+	 * Gather all similar fields together, build array of bit intervals
+	 * in asсending order and try to cover with the smallest set of sample
+	 * registers.
+	 */
+	memset(&cover, 0, sizeof(cover));
+	for (i = 0; i < conf->nb_samples; i++) {
+		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
+
+		/* Check whether field was covered in the previous iteration. */
+		if (cover.mapped[i / CHAR_BIT] & (1u << (i % CHAR_BIT)))
+			continue;
+		if (fl->field_mode == FIELD_MODE_DUMMY)
+			continue;
+		/* Build an interval array for the field and similar ones */
+		cover.num = 0;
+		/* Add the first field to array unconditionally. */
+		ret = mlx5_flex_cover_sample(&cover, fl, NULL, attr, error);
+		if (ret < 0)
+			return ret;
+		MLX5_ASSERT(ret > 0);
+		cover.mapped[i / CHAR_BIT] |= 1u << (i % CHAR_BIT);
+		for (j = i + 1; j < conf->nb_samples; j++) {
+			struct rte_flow_item_flex_field *ft;
+
+			/* Add field to array if its type matches. */
+			ft = conf->sample_data + j;
+			ret = mlx5_flex_cover_sample(&cover, ft, fl,
+						     attr, error);
+			if (ret < 0)
+				return ret;
+			if (!ret)
+				continue;
+			cover.mapped[j / CHAR_BIT] |= 1u << (j % CHAR_BIT);
+		}
+		/* Allocate sample registers to cover array of intervals. */
+		ret = mlx5_flex_alloc_sample(&cover, parser, item,
+					     fl, attr, error);
+		if (ret)
+			return ret;
+	}
+	/* Build the item pattern translating data on flow creation. */
+	item->mapnum = 0;
+	memset(&item->map, 0, sizeof(item->map));
+	for (i = 0; i < conf->nb_samples; i++) {
+		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
+
+		ret = mlx5_flex_map_sample(fl, parser, item, error);
+		if (ret) {
+			MLX5_ASSERT(false);
+			return ret;
+		}
+	}
+	if (conf->tunnel == FLEX_TUNNEL_MODE_MULTI) {
+		/*
+		 * In FLEX_TUNNEL_MODE_MULTI tunnel mode PMD creates 2 sets
+		 * of samples. The first set is for outer and the second set
+		 * for inner flex flow item. Outer and inner samples differ
+		 * only in tunnel_mode.
+		 */
+		if (parser->num_samples > MLX5_GRAPH_NODE_SAMPLE_NUM / 2)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "no sample registers for inner");
+		rte_memcpy(parser->devx_conf.sample + parser->num_samples,
+			   parser->devx_conf.sample,
+			   parser->num_samples *
+					sizeof(parser->devx_conf.sample[0]));
+		for (i = 0; i < parser->num_samples; i++) {
+			struct mlx5_devx_match_sample_attr *sm = i +
+				parser->devx_conf.sample + parser->num_samples;
+
+			sm->flow_match_sample_tunnel_mode =
+						MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
+		}
+		parser->num_samples *= 2;
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_arc_type(enum rte_flow_item_type type, int in)
+{
+	switch (type) {
+	case RTE_FLOW_ITEM_TYPE_ETH:
+		return  MLX5_GRAPH_ARC_NODE_MAC;
+	case RTE_FLOW_ITEM_TYPE_IPV4:
+		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV4;
+	case RTE_FLOW_ITEM_TYPE_IPV6:
+		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV6;
+	case RTE_FLOW_ITEM_TYPE_UDP:
+		return MLX5_GRAPH_ARC_NODE_UDP;
+	case RTE_FLOW_ITEM_TYPE_TCP:
+		return MLX5_GRAPH_ARC_NODE_TCP;
+	case RTE_FLOW_ITEM_TYPE_MPLS:
+		return MLX5_GRAPH_ARC_NODE_MPLS;
+	case RTE_FLOW_ITEM_TYPE_GRE:
+		return MLX5_GRAPH_ARC_NODE_GRE;
+	case RTE_FLOW_ITEM_TYPE_GENEVE:
+		return MLX5_GRAPH_ARC_NODE_GENEVE;
+	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+		return MLX5_GRAPH_ARC_NODE_VXLAN_GPE;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int
+mlx5_flex_arc_in_eth(const struct rte_flow_item *item,
+		     struct rte_flow_error *error)
+{
+	const struct rte_flow_item_eth *spec = item->spec;
+	const struct rte_flow_item_eth *mask = item->mask;
+	struct rte_flow_item_eth eth = { .hdr.ether_type = RTE_BE16(0xFFFF) };
+
+	if (memcmp(mask, &eth, sizeof(struct rte_flow_item_eth))) {
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+			 "invalid eth item mask");
+	}
+	return rte_be_to_cpu_16(spec->hdr.ether_type);
+}
+
+static int
+mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
+		     struct rte_flow_error *error)
+{
+	const struct rte_flow_item_udp *spec = item->spec;
+	const struct rte_flow_item_udp *mask = item->mask;
+	struct rte_flow_item_udp udp = { .hdr.dst_port = RTE_BE16(0xFFFF) };
+
+	if (memcmp(mask, &udp, sizeof(struct rte_flow_item_udp))) {
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+			 "invalid eth item mask");
+	}
+	return rte_be_to_cpu_16(spec->hdr.dst_port);
+}
+
+static int
+mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct mlx5_flex_parser_devx *devx,
+			   struct mlx5_flex_item *item,
+			   struct rte_flow_error *error)
+{
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+	uint32_t i;
+
+	RTE_SET_USED(item);
+	if (conf->nb_inputs > attr->max_num_arc_in)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "too many input links");
+	for (i = 0; i < conf->nb_inputs; i++) {
+		struct mlx5_devx_graph_arc_attr *arc = node->in + i;
+		struct rte_flow_item_flex_link *link = conf->input_link + i;
+		const struct rte_flow_item *rte_item = &link->item;
+		int arc_type;
+		int ret;
+
+		if (!rte_item->spec || !rte_item->mask || rte_item->last)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid flex item IN arc format");
+		arc_type = mlx5_flex_arc_type(rte_item->type, true);
+		if (arc_type < 0 || !(attr->node_in & RTE_BIT32(arc_type)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported flex item IN arc type");
+		arc->arc_parse_graph_node = arc_type;
+		arc->start_inner_tunnel = 0;
+		/*
+		 * Configure arc IN condition value. The value location depends
+		 * on protocol. Current FW version supports IP & UDP for IN
+		 * arcs only, and locations for these protocols are defined.
+		 * Add more protocols when available.
+		 */
+		switch (rte_item->type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			ret = mlx5_flex_arc_in_eth(rte_item, error);
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			ret = mlx5_flex_arc_in_udp(rte_item, error);
+			break;
+		default:
+			MLX5_ASSERT(false);
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported flex item IN arc type");
+		}
+		if (ret < 0)
+			return ret;
+		arc->compare_condition_value = (uint16_t)ret;
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_translate_arc_out(struct mlx5_hca_flex_attr *attr,
+			    const struct rte_flow_item_flex_conf *conf,
+			    struct mlx5_flex_parser_devx *devx,
+			    struct mlx5_flex_item *item,
+			    struct rte_flow_error *error)
+{
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+	bool is_tunnel = conf->tunnel == FLEX_TUNNEL_MODE_TUNNEL;
+	uint32_t i;
+
+	RTE_SET_USED(item);
+	if (conf->nb_outputs > attr->max_num_arc_out)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "too many output links");
+	for (i = 0; i < conf->nb_outputs; i++) {
+		struct mlx5_devx_graph_arc_attr *arc = node->out + i;
+		struct rte_flow_item_flex_link *link = conf->output_link + i;
+		const struct rte_flow_item *rte_item = &link->item;
+		int arc_type;
+
+		if (rte_item->spec || rte_item->mask || rte_item->last)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "flex node: invalid OUT arc format");
+		arc_type = mlx5_flex_arc_type(rte_item->type, false);
+		if (arc_type < 0 || !(attr->node_out & RTE_BIT32(arc_type)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported flex item OUT arc type");
+		arc->arc_parse_graph_node = arc_type;
+		arc->start_inner_tunnel = !!is_tunnel;
+		arc->compare_condition_value = link->next;
+	}
+	return 0;
+}
+
+/* Translate RTE flex item API configuration into flaex parser settings. */
+static int
+mlx5_flex_translate_conf(struct rte_eth_dev *dev,
+			 const struct rte_flow_item_flex_conf *conf,
+			 struct mlx5_flex_parser_devx *devx,
+			 struct mlx5_flex_item *item,
+			 struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_hca_flex_attr *attr = &priv->config.hca_attr.flex;
+	int ret;
+
+	ret = mlx5_flex_translate_length(attr, conf, devx, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_next(attr, conf, devx, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_sample(attr, conf, devx, item, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_arc_in(attr, conf, devx, item, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_arc_out(attr, conf, devx, item, error);
+	if (ret)
+		return ret;
+	return 0;
+}
+
 /**
  * Create the flex item with specified configuration over the Ethernet device.
  *
@@ -145,6 +986,8 @@ flow_dv_item_create(struct rte_eth_dev *dev,
 				   "too many flex items created on the port");
 		return NULL;
 	}
+	if (mlx5_flex_translate_conf(dev, conf, &devx_config, flex, error))
+		goto error;
 	ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
 	if (!ent) {
 		rte_flow_error_set(error, ENOMEM,
@@ -153,7 +996,6 @@ flow_dv_item_create(struct rte_eth_dev *dev,
 		goto error;
 	}
 	flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
-	RTE_SET_USED(conf);
 	/* Mark initialized flex item valid. */
 	__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
 	return (struct rte_flow_item_flex_handle *)flex;
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH 8/9] net/mlx5: translate flex item pattern into matcher
  2021-11-01  9:15 [dpdk-dev] [PATCH 0/9] net/mlx5: add flex item support Gregory Etelson
                   ` (6 preceding siblings ...)
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 7/9] net/mlx5: translate flex item configuration Gregory Etelson
@ 2021-11-01  9:15 ` Gregory Etelson
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 9/9] net/mlx5: handle flex item in flows Gregory Etelson
  2021-11-02  8:53 ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Gregory Etelson
  9 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-01  9:15 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

The matcher is an steering engine entity that represents
the flow pattern to hardware to match. It order to
provide match on the flex item pattern the appropriate
matcher fields should be configured with values and masks
accordingly.

The flex item related matcher fields is an array of eight
32-bit fields to match with data captured by sample registers
of configured flex parser. One packet field, presented in
item pattern can be split between several sample registers,
and multiple fields can be combined together into single
sample register to optimize hardware resources usage
(number os sample registers is limited), depending on field
modes, widths and offsets. Actual mapping is complicated
and controlled by special translation data, built by PMD
on flex item creation.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.h           |   8 ++
 drivers/net/mlx5/mlx5_flow_flex.c | 223 ++++++++++++++++++++++++++++++
 2 files changed, 231 insertions(+)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index e3c0064f5b..e5b4f5872e 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1848,6 +1848,14 @@ int flow_dv_item_release(struct rte_eth_dev *dev,
 		    struct rte_flow_error *error);
 int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
 void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
+void mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, void *matcher,
+				   void *key, const struct rte_flow_item *item,
+				   bool is_inner);
+int mlx5_flex_acquire_index(struct rte_eth_dev *dev,
+			    struct rte_flow_item_flex_handle *handle,
+			    bool acquire);
+int mlx5_flex_release_index(struct rte_eth_dev *dev, int index);
+
 /* Flex parser list callbacks. */
 struct mlx5_list_entry *mlx5_flex_parser_create_cb(void *list_ctx, void *ctx);
 int mlx5_flex_parser_match_cb(void *list_ctx,
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index b4a9f1a537..bdfa383c45 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -113,6 +113,229 @@ mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
 	}
 }
 
+static uint32_t
+mlx5_flex_get_bitfield(const struct rte_flow_item_flex *item,
+		       uint32_t pos, uint32_t width, uint32_t shift)
+{
+	const uint8_t *ptr = item->pattern + pos / CHAR_BIT;
+	uint32_t val, vbits;
+
+	/* Proceed the bitfield start byte. */
+	MLX5_ASSERT(width <= sizeof(uint32_t) * CHAR_BIT && width);
+	MLX5_ASSERT(width + shift <= sizeof(uint32_t) * CHAR_BIT);
+	if (item->length <= pos / CHAR_BIT)
+		return 0;
+	val = *ptr++ >> (pos % CHAR_BIT);
+	vbits = CHAR_BIT - pos % CHAR_BIT;
+	pos = (pos + vbits) / CHAR_BIT;
+	vbits = RTE_MIN(vbits, width);
+	val &= RTE_BIT32(vbits) - 1;
+	while (vbits < width && pos < item->length) {
+		uint32_t part = RTE_MIN(width - vbits, (uint32_t)CHAR_BIT);
+		uint32_t tmp = *ptr++;
+
+		pos++;
+		tmp &= RTE_BIT32(part) - 1;
+		val |= tmp << vbits;
+		vbits += part;
+	}
+	return rte_bswap32(val <<= shift);
+}
+
+#define SET_FP_MATCH_SAMPLE_ID(x, def, msk, val, sid) \
+	do { \
+		uint32_t tmp, out = (def); \
+		tmp = MLX5_GET(fte_match_set_misc4, misc4_v, \
+			       prog_sample_field_value_##x); \
+		tmp = (tmp & ~out) | (val); \
+		MLX5_SET(fte_match_set_misc4, misc4_v, \
+			 prog_sample_field_value_##x, tmp); \
+		tmp = MLX5_GET(fte_match_set_misc4, misc4_m, \
+			       prog_sample_field_value_##x); \
+		tmp = (tmp & ~out) | (msk); \
+		MLX5_SET(fte_match_set_misc4, misc4_m, \
+			 prog_sample_field_value_##x, tmp); \
+		tmp = tmp ? (sid) : 0; \
+		MLX5_SET(fte_match_set_misc4, misc4_v, \
+			 prog_sample_field_id_##x, tmp);\
+		MLX5_SET(fte_match_set_misc4, misc4_m, \
+			 prog_sample_field_id_##x, tmp); \
+	} while (0)
+
+__rte_always_inline static void
+mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
+			   uint32_t def, uint32_t mask, uint32_t value,
+			   uint32_t sample_id, uint32_t id)
+{
+	switch (id) {
+	case 0:
+		SET_FP_MATCH_SAMPLE_ID(0, def, mask, value, sample_id);
+		break;
+	case 1:
+		SET_FP_MATCH_SAMPLE_ID(1, def, mask, value, sample_id);
+		break;
+	case 2:
+		SET_FP_MATCH_SAMPLE_ID(2, def, mask, value, sample_id);
+		break;
+	case 3:
+		SET_FP_MATCH_SAMPLE_ID(3, def, mask, value, sample_id);
+		break;
+	case 4:
+		SET_FP_MATCH_SAMPLE_ID(4, def, mask, value, sample_id);
+		break;
+	case 5:
+		SET_FP_MATCH_SAMPLE_ID(5, def, mask, value, sample_id);
+		break;
+	case 6:
+		SET_FP_MATCH_SAMPLE_ID(6, def, mask, value, sample_id);
+		break;
+	case 7:
+		SET_FP_MATCH_SAMPLE_ID(7, def, mask, value, sample_id);
+		break;
+	default:
+		MLX5_ASSERT(false);
+		break;
+	}
+#undef SET_FP_MATCH_SAMPLE_ID
+}
+/**
+ * Translate item pattern into matcher fields according to translation
+ * array.
+ *
+ * @param dev
+ *   Ethernet device to translate flex item on.
+ * @param[in, out] matcher
+ *   Flow matcher to confgiure
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] is_inner
+ *   Inner Flex Item (follows after tunnel header).
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+void
+mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
+			      void *matcher, void *key,
+			      const struct rte_flow_item *item,
+			      bool is_inner)
+{
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+	struct mlx5_priv *priv = dev->data->dev_private;
+#endif
+	const struct rte_flow_item_flex *spec, *mask;
+	void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
+				     misc_parameters_4);
+	void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
+	struct mlx5_flex_item *tp;
+	uint32_t i, pos = 0;
+
+	RTE_SET_USED(dev);
+	MLX5_ASSERT(item->spec && item->mask);
+	spec = item->spec;
+	mask = item->mask;
+	tp = (struct mlx5_flex_item *)spec->handle;
+	MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0);
+	for (i = 0; i < tp->mapnum; i++) {
+		struct mlx5_flex_pattern_field *map = tp->map + i;
+		uint32_t id = map->reg_id;
+		uint32_t def = (RTE_BIT64(map->width) - 1) << map->shift;
+		uint32_t val, msk;
+
+		/* Skip placeholders for DUMMY fields. */
+		if (id == MLX5_INVALID_SAMPLE_REG_ID) {
+			pos += map->width;
+			continue;
+		}
+		val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift);
+		msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift);
+		MLX5_ASSERT(map->width);
+		MLX5_ASSERT(id < tp->devx_fp->num_samples);
+		if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
+			uint32_t num_samples = tp->devx_fp->num_samples / 2;
+
+			MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
+			MLX5_ASSERT(id < num_samples);
+			id += num_samples;
+		}
+		mlx5_flex_set_match_sample(misc4_m, misc4_v,
+					   def, msk & def, val & msk & def,
+					   tp->devx_fp->sample_ids[id], id);
+		pos += map->width;
+	}
+}
+
+/**
+ * Convert flex item handle (from the RTE flow) to flex item index on port.
+ * Optionally can increment flex item object reference count.
+ *
+ * @param dev
+ *   Ethernet device to acquire flex item on.
+ * @param[in] handle
+ *   Flow item handle from item spec.
+ * @param[in] acquire
+ *   If set - increment reference counter.
+ *
+ * @return
+ *   >=0 - index on success, a negative errno value otherwise
+ *         and rte_errno is set.
+ */
+int
+mlx5_flex_acquire_index(struct rte_eth_dev *dev,
+			struct rte_flow_item_flex_handle *handle,
+			bool acquire)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_item *flex = (struct mlx5_flex_item *)handle;
+	int ret = mlx5_flex_index(priv, flex);
+
+	if (ret < 0) {
+		errno = -EINVAL;
+		rte_errno = EINVAL;
+		return ret;
+	}
+	if (acquire)
+		__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
+	return ret;
+}
+
+/**
+ * Release flex item index on port - decrements reference counter by index.
+ *
+ * @param dev
+ *   Ethernet device to acquire flex item on.
+ * @param[in] index
+ *   Flow item index.
+ *
+ * @return
+ *   0 - on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flex_release_index(struct rte_eth_dev *dev,
+			int index)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_item *flex;
+
+	if (index >= MLX5_PORT_FLEX_ITEM_NUM ||
+	    !(priv->flex_item_map & (1u << index))) {
+		errno = EINVAL;
+		rte_errno = -EINVAL;
+		return -EINVAL;
+	}
+	flex = priv->flex_item + index;
+	if (flex->refcnt <= 1) {
+		MLX5_ASSERT(false);
+		errno = EINVAL;
+		rte_errno = -EINVAL;
+		return -EINVAL;
+	}
+	__atomic_sub_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
+	return 0;
+}
+
 /*
  * Calculate largest mask value for a given shift.
  *
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH 9/9] net/mlx5: handle flex item in flows
  2021-11-01  9:15 [dpdk-dev] [PATCH 0/9] net/mlx5: add flex item support Gregory Etelson
                   ` (7 preceding siblings ...)
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 8/9] net/mlx5: translate flex item pattern into matcher Gregory Etelson
@ 2021-11-01  9:15 ` Gregory Etelson
  2021-11-02  8:53 ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Gregory Etelson
  9 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-01  9:15 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

Provide flex item recognition, validation and translation
in flow patterns. Track the flex item referencing.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Reviewed-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.h    |   9 ++-
 drivers/net/mlx5/mlx5_flow_dv.c | 124 ++++++++++++++++++++++++++++++++
 2 files changed, 132 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index c1b6198adf..046516efd0 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -176,6 +176,11 @@ enum mlx5_feature_name {
 /* Conntrack item. */
 #define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 35)
 
+/* Flex item */
+#define MLX5_FLOW_ITEM_OUTER_FLEX (UINT64_C(1) << 36)
+#define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 37)
+#define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 38)
+
 /* Outer Masks. */
 #define MLX5_FLOW_LAYER_OUTER_L3 \
 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
@@ -190,7 +195,8 @@ enum mlx5_feature_name {
 	(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
 	 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
 	 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
-	 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP)
+	 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
+	 MLX5_FLOW_ITEM_FLEX_TUNNEL)
 
 /* Inner Masks. */
 #define MLX5_FLOW_LAYER_INNER_L3 \
@@ -689,6 +695,7 @@ struct mlx5_flow_handle {
 	uint32_t is_meter_flow_id:1; /**< Indate if flow_id is for meter. */
 	uint32_t mark:1; /**< Metadate rxq mark flag. */
 	uint32_t fate_action:3; /**< Fate action type. */
+	uint32_t flex_item; /**< referenced Flex Item bitmask. */
 	union {
 		uint32_t rix_hrxq; /**< Hash Rx queue object index. */
 		uint32_t rix_jump; /**< Index to the jump action resource. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 1b92dd75ff..c12a7a986d 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -6733,6 +6733,88 @@ flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
 	return 0;
 }
 
+static int
+flow_dv_validate_item_flex(struct rte_eth_dev *dev,
+			   const struct rte_flow_item *item,
+			   uint64_t item_flags,
+			   uint64_t *last_item,
+			   bool is_inner,
+			   struct rte_flow_error *error)
+{
+	const struct rte_flow_item_flex *flow_spec = item->spec;
+	const struct rte_flow_item_flex *flow_mask = item->mask;
+	struct mlx5_flex_item *flex;
+
+	if (!flow_spec)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "flex flow item spec cannot be NULL");
+	if (!flow_mask)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "flex flow item mask cannot be NULL");
+	if (item->last)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "flex flow item last not supported");
+	if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "invalid flex flow item handle");
+	flex = (struct mlx5_flex_item *)flow_spec->handle;
+	switch (flex->tunnel_mode) {
+	case FLEX_TUNNEL_MODE_SINGLE:
+		if (item_flags &
+		    (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   NULL, "multiple flex items not supported");
+		break;
+	case FLEX_TUNNEL_MODE_OUTER:
+		if (is_inner)
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   NULL, "inner flex item was not configured");
+		if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
+			rte_flow_error_set(error, ENOTSUP,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   NULL, "multiple flex items not supported");
+		break;
+	case FLEX_TUNNEL_MODE_INNER:
+		if (!is_inner)
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   NULL, "outer flex item was not configured");
+		if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   NULL, "multiple flex items not supported");
+		break;
+	case FLEX_TUNNEL_MODE_MULTI:
+		if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
+		    (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   NULL, "multiple flex items not supported");
+		}
+		break;
+	case FLEX_TUNNEL_MODE_TUNNEL:
+		if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   NULL, "multiple flex tunnel items not supported");
+		break;
+	default:
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "invalid flex item configuration");
+	}
+	*last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
+		     MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
+		     MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
+	return 0;
+}
+
 /**
  * Internal validation function. For validating both actions and items.
  *
@@ -7174,6 +7256,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 			 * list it here as a supported type
 			 */
 			break;
+		case RTE_FLOW_ITEM_TYPE_FLEX:
+			ret = flow_dv_validate_item_flex(dev, items, item_flags,
+							 &last_item,
+							 tunnel != 0, error);
+			if (ret < 0)
+				return ret;
+			break;
 		default:
 			return rte_flow_error_set(error, ENOTSUP,
 						  RTE_FLOW_ERROR_TYPE_ITEM,
@@ -10038,6 +10127,27 @@ flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
 			       reg_value, reg_mask);
 }
 
+static void
+flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
+			    const struct rte_flow_item *item,
+			    struct mlx5_flow *dev_flow, bool is_inner)
+{
+	const struct rte_flow_item_flex *spec =
+		(const struct rte_flow_item_flex *)item->spec;
+	int index = mlx5_flex_acquire_index(dev, spec->handle, false);
+
+	MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
+	if (index < 0)
+		return;
+	if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
+		/* Don't count both inner and outer flex items in one rule. */
+		if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
+			MLX5_ASSERT(false);
+		dev_flow->handle->flex_item |= RTE_BIT32(index);
+	}
+	mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
+}
+
 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
 
 #define HEADER_IS_ZERO(match_criteria, headers)				     \
@@ -13455,6 +13565,13 @@ flow_dv_translate(struct rte_eth_dev *dev,
 			flow_dv_translate_item_aso_ct(dev, match_mask,
 						      match_value, items);
 			break;
+		case RTE_FLOW_ITEM_TYPE_FLEX:
+			flow_dv_translate_item_flex(dev, match_mask,
+						    match_value, items,
+						    dev_flow, tunnel != 0);
+			last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
+				    MLX5_FLOW_ITEM_OUTER_FLEX;
+			break;
 		default:
 			break;
 		}
@@ -14329,6 +14446,12 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 		if (!dev_handle)
 			return;
 		flow->dev_handles = dev_handle->next.next;
+		while (dev_handle->flex_item) {
+			int index = rte_bsf32(dev_handle->flex_item);
+
+			mlx5_flex_release_index(dev, index);
+			dev_handle->flex_item &= ~RTE_BIT32(index);
+		}
 		if (dev_handle->dvh.matcher)
 			flow_dv_matcher_release(dev, dev_handle);
 		if (dev_handle->dvh.rix_sample)
@@ -17945,5 +18068,6 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
 	.item_create = flow_dv_item_create,
 	.item_release = flow_dv_item_release,
 };
+
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support
  2021-11-01  9:15 [dpdk-dev] [PATCH 0/9] net/mlx5: add flex item support Gregory Etelson
                   ` (8 preceding siblings ...)
  2021-11-01  9:15 ` [dpdk-dev] [PATCH 9/9] net/mlx5: handle flex item in flows Gregory Etelson
@ 2021-11-02  8:53 ` Gregory Etelson
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 1/9] common/mlx5: refactor HCA attributes query Gregory Etelson
                     ` (10 more replies)
  9 siblings, 11 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-02  8:53 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

Gregory Etelson (4):
  common/mlx5: extend flex parser capabilities
  common/mlx5: fix flex parser DevX creation routine
  net/mlx5: add flex parser DevX object management
  net/mlx5: handle flex item in flows

Viacheslav Ovsiienko (5):
  common/mlx5: refactor HCA attributes query
  net/mlx5: update eCPRI flex parser structures
  net/mlx5: add flex item API
  net/mlx5: translate flex item configuration
  net/mlx5: translate flex item pattern into matcher

 drivers/common/mlx5/mlx5_devx_cmds.c |  239 +++--
 drivers/common/mlx5/mlx5_devx_cmds.h |   65 +-
 drivers/common/mlx5/mlx5_prm.h       |   50 +-
 drivers/net/mlx5/linux/mlx5_os.c     |   14 +
 drivers/net/mlx5/meson.build         |    1 +
 drivers/net/mlx5/mlx5.c              |   15 +-
 drivers/net/mlx5/mlx5.h              |   79 +-
 drivers/net/mlx5/mlx5_flow.c         |   49 +
 drivers/net/mlx5/mlx5_flow.h         |   27 +-
 drivers/net/mlx5/mlx5_flow_dv.c      |  127 ++-
 drivers/net/mlx5/mlx5_flow_flex.c    | 1373 ++++++++++++++++++++++++++
 11 files changed, 1913 insertions(+), 126 deletions(-)
 create mode 100644 drivers/net/mlx5/mlx5_flow_flex.c

-- 
v2: rebase to updated master-net-mlx.
--
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v2 1/9] common/mlx5: refactor HCA attributes query
  2021-11-02  8:53 ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Gregory Etelson
@ 2021-11-02  8:53   ` Gregory Etelson
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 2/9] common/mlx5: extend flex parser capabilities Gregory Etelson
                     ` (9 subsequent siblings)
  10 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-02  8:53 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

There is the common part of code querying the HCA attributes
from the device, and this part can be commoditized as
dedicated routine.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c | 173 +++++++++++----------------
 1 file changed, 73 insertions(+), 100 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index fb7c8e986f..d005eb3643 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -13,6 +13,42 @@
 #include "mlx5_common_log.h"
 #include "mlx5_malloc.h"
 
+static void *
+mlx5_devx_get_hca_cap(void *ctx, uint32_t *in, uint32_t *out,
+		      int *err, uint32_t flags)
+{
+	const size_t size_in = MLX5_ST_SZ_DW(query_hca_cap_in) * sizeof(int);
+	const size_t size_out = MLX5_ST_SZ_DW(query_hca_cap_out) * sizeof(int);
+	int status, syndrome, rc;
+
+	if (err)
+		*err = 0;
+	memset(in, 0, size_in);
+	memset(out, 0, size_out);
+	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+	MLX5_SET(query_hca_cap_in, in, op_mod, flags);
+	rc = mlx5_glue->devx_general_cmd(ctx, in, size_in, out, size_out);
+	if (rc) {
+		DRV_LOG(ERR,
+			"Failed to query devx HCA capabilities func %#02x",
+			flags >> 1);
+		if (err)
+			*err = rc > 0 ? -rc : rc;
+		return NULL;
+	}
+	status = MLX5_GET(query_hca_cap_out, out, status);
+	syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
+	if (status) {
+		DRV_LOG(ERR,
+			"Failed to query devx HCA capabilities func %#02x status %x, syndrome = %x",
+			flags >> 1, status, syndrome);
+		if (err)
+			*err = -1;
+		return NULL;
+	}
+	return MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+}
+
 /**
  * Perform read access to the registers. Reads data from register
  * and writes ones to the specified buffer.
@@ -472,21 +508,15 @@ static void
 mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx,
 				  struct mlx5_hca_vdpa_attr *vdpa_attr)
 {
-	uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
-	uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
-	void *hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
-	int status, syndrome, rc;
+	uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)];
+	uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)];
+	void *hcattr;
 
-	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
-	MLX5_SET(query_hca_cap_in, in, op_mod,
-		 MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION |
-		 MLX5_HCA_CAP_OPMOD_GET_CUR);
-	rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
-	status = MLX5_GET(query_hca_cap_out, out, status);
-	syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
-	if (rc || status) {
-		RTE_LOG(DEBUG, PMD, "Failed to query devx VDPA capabilities,"
-			" status %x, syndrome = %x", status, syndrome);
+	hcattr = mlx5_devx_get_hca_cap(ctx, in, out, NULL,
+			MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION |
+			MLX5_HCA_CAP_OPMOD_GET_CUR);
+	if (!hcattr) {
+		RTE_LOG(DEBUG, PMD, "Failed to query devx VDPA capabilities");
 		vdpa_attr->valid = 0;
 	} else {
 		vdpa_attr->valid = 1;
@@ -741,27 +771,15 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 {
 	uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
 	uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
-	void *hcattr;
-	int status, syndrome, rc, i;
 	uint64_t general_obj_types_supported = 0;
+	void *hcattr;
+	int rc, i;
 
-	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
-	MLX5_SET(query_hca_cap_in, in, op_mod,
-		 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
-		 MLX5_HCA_CAP_OPMOD_GET_CUR);
-
-	rc = mlx5_glue->devx_general_cmd(ctx,
-					 in, sizeof(in), out, sizeof(out));
-	if (rc)
-		goto error;
-	status = MLX5_GET(query_hca_cap_out, out, status);
-	syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
-	if (status) {
-		DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
-			"status %x, syndrome = %x", status, syndrome);
-		return -1;
-	}
-	hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+	hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+			MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
+			MLX5_HCA_CAP_OPMOD_GET_CUR);
+	if (!hcattr)
+		return rc;
 	attr->flow_counter_bulk_alloc_bitmap =
 			MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc);
 	attr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr,
@@ -893,19 +911,13 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 					 general_obj_types) &
 			      MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD);
 	if (attr->qos.sup) {
-		MLX5_SET(query_hca_cap_in, in, op_mod,
-			 MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
-			 MLX5_HCA_CAP_OPMOD_GET_CUR);
-		rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in),
-						 out, sizeof(out));
-		if (rc)
-			goto error;
-		if (status) {
-			DRV_LOG(DEBUG, "Failed to query devx QOS capabilities,"
-				" status %x, syndrome = %x", status, syndrome);
-			return -1;
+		hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+				MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
+				MLX5_HCA_CAP_OPMOD_GET_CUR);
+		if (!hcattr) {
+			DRV_LOG(DEBUG, "Failed to query devx QOS capabilities");
+			return rc;
 		}
-		hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
 		attr->qos.flow_meter_old =
 				MLX5_GET(qos_cap, hcattr, flow_meter_old);
 		attr->qos.log_max_flow_meter =
@@ -934,27 +946,14 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 		mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa);
 	if (!attr->eth_net_offloads)
 		return 0;
-
 	/* Query Flow Sampler Capability From FLow Table Properties Layout. */
-	memset(in, 0, sizeof(in));
-	memset(out, 0, sizeof(out));
-	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
-	MLX5_SET(query_hca_cap_in, in, op_mod,
-		 MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE |
-		 MLX5_HCA_CAP_OPMOD_GET_CUR);
-
-	rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
-	if (rc)
-		goto error;
-	status = MLX5_GET(query_hca_cap_out, out, status);
-	syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
-	if (status) {
-		DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
-			"status %x, syndrome = %x", status, syndrome);
+	hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+			MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE |
+			MLX5_HCA_CAP_OPMOD_GET_CUR);
+	if (!hcattr) {
 		attr->log_max_ft_sampler_num = 0;
-		return -1;
+		return rc;
 	}
-	hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
 	attr->log_max_ft_sampler_num = MLX5_GET
 		(flow_table_nic_cap, hcattr,
 		 flow_table_properties_nic_receive.log_max_ft_sampler_num);
@@ -969,27 +968,13 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 		(flow_table_nic_cap, hcattr,
 		 ft_field_support_2_nic_receive.outer_ipv4_ihl);
 	/* Query HCA offloads for Ethernet protocol. */
-	memset(in, 0, sizeof(in));
-	memset(out, 0, sizeof(out));
-	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
-	MLX5_SET(query_hca_cap_in, in, op_mod,
-		 MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS |
-		 MLX5_HCA_CAP_OPMOD_GET_CUR);
-
-	rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
-	if (rc) {
+	hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+			MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS |
+			MLX5_HCA_CAP_OPMOD_GET_CUR);
+	if (!hcattr) {
 		attr->eth_net_offloads = 0;
-		goto error;
+		return rc;
 	}
-	status = MLX5_GET(query_hca_cap_out, out, status);
-	syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
-	if (status) {
-		DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
-			"status %x, syndrome = %x", status, syndrome);
-		attr->eth_net_offloads = 0;
-		return -1;
-	}
-	hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
 	attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps,
 					 hcattr, wqe_vlan_insert);
 	attr->csum_cap = MLX5_GET(per_protocol_networking_offload_caps,
@@ -1044,26 +1029,14 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 					 hcattr, rss_ind_tbl_cap);
 	/* Query HCA attribute for ROCE. */
 	if (attr->roce) {
-		memset(in, 0, sizeof(in));
-		memset(out, 0, sizeof(out));
-		MLX5_SET(query_hca_cap_in, in, opcode,
-			 MLX5_CMD_OP_QUERY_HCA_CAP);
-		MLX5_SET(query_hca_cap_in, in, op_mod,
-			 MLX5_GET_HCA_CAP_OP_MOD_ROCE |
-			 MLX5_HCA_CAP_OPMOD_GET_CUR);
-		rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in),
-						 out, sizeof(out));
-		if (rc)
-			goto error;
-		status = MLX5_GET(query_hca_cap_out, out, status);
-		syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
-		if (status) {
+		hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+				MLX5_GET_HCA_CAP_OP_MOD_ROCE |
+				MLX5_HCA_CAP_OPMOD_GET_CUR);
+		if (!hcattr) {
 			DRV_LOG(DEBUG,
-				"Failed to query devx HCA ROCE capabilities, "
-				"status %x, syndrome = %x", status, syndrome);
-			return -1;
+				"Failed to query devx HCA ROCE capabilities");
+			return rc;
 		}
-		hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
 		attr->qp_ts_format = MLX5_GET(roce_caps, hcattr, qp_ts_format);
 	}
 	if (attr->eth_virt &&
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v2 2/9] common/mlx5: extend flex parser capabilities
  2021-11-02  8:53 ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Gregory Etelson
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 1/9] common/mlx5: refactor HCA attributes query Gregory Etelson
@ 2021-11-02  8:53   ` Gregory Etelson
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 3/9] common/mlx5: fix flex parser DevX creation routine Gregory Etelson
                     ` (8 subsequent siblings)
  10 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-02  8:53 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

MLX5 PARSE_GRAPH_NODE is the main data structure used by the Flex
Parser when a new parsing protocol is defined. While software
creates PARSE_GRAPH_NODE object for a new protocol, it must
verify that configuration parameters it uses comply with
hardware limits.

The patch queries hardware PARSE_GRAPH_NODE capabilities and
stores ones in PMD internal configuration structure:

 - query capabilities from parse_graph_node attribute page
 - query max_num_prog_sample_field capability from HCA page 2

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Reviewed-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c | 57 ++++++++++++++++++++++++
 drivers/common/mlx5/mlx5_devx_cmds.h | 65 +++++++++++++++++++++++++++-
 drivers/common/mlx5/mlx5_prm.h       | 50 ++++++++++++++++++++-
 3 files changed, 168 insertions(+), 4 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index d005eb3643..28e577a37e 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -729,6 +729,53 @@ mlx5_devx_cmd_create_flex_parser(void *ctx,
 	return parse_flex_obj;
 }
 
+static int
+mlx5_devx_cmd_query_hca_parse_graph_node_cap
+	(void *ctx, struct mlx5_hca_flex_attr *attr)
+{
+	uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)];
+	uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)];
+	void *hcattr;
+	int rc;
+
+	hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+			MLX5_GET_HCA_CAP_OP_MOD_PARSE_GRAPH_NODE_CAP |
+			MLX5_HCA_CAP_OPMOD_GET_CUR);
+	if (!hcattr)
+		return rc;
+	attr->node_in = MLX5_GET(parse_graph_node_cap, hcattr, node_in);
+	attr->node_out = MLX5_GET(parse_graph_node_cap, hcattr, node_out);
+	attr->header_length_mode = MLX5_GET(parse_graph_node_cap, hcattr,
+					    header_length_mode);
+	attr->sample_offset_mode = MLX5_GET(parse_graph_node_cap, hcattr,
+					    sample_offset_mode);
+	attr->max_num_arc_in = MLX5_GET(parse_graph_node_cap, hcattr,
+					max_num_arc_in);
+	attr->max_num_arc_out = MLX5_GET(parse_graph_node_cap, hcattr,
+					 max_num_arc_out);
+	attr->max_num_sample = MLX5_GET(parse_graph_node_cap, hcattr,
+					max_num_sample);
+	attr->sample_id_in_out = MLX5_GET(parse_graph_node_cap, hcattr,
+					  sample_id_in_out);
+	attr->max_base_header_length = MLX5_GET(parse_graph_node_cap, hcattr,
+						max_base_header_length);
+	attr->max_sample_base_offset = MLX5_GET(parse_graph_node_cap, hcattr,
+						max_sample_base_offset);
+	attr->max_next_header_offset = MLX5_GET(parse_graph_node_cap, hcattr,
+						max_next_header_offset);
+	attr->header_length_mask_width = MLX5_GET(parse_graph_node_cap, hcattr,
+						  header_length_mask_width);
+	/* Get the max supported samples from HCA CAP 2 */
+	hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
+			MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 |
+			MLX5_HCA_CAP_OPMOD_GET_CUR);
+	if (!hcattr)
+		return rc;
+	attr->max_num_prog_sample =
+		MLX5_GET(cmd_hca_cap_2, hcattr,	max_num_prog_sample_field);
+	return 0;
+}
+
 static int
 mlx5_devx_query_pkt_integrity_match(void *hcattr)
 {
@@ -942,6 +989,16 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
 					log_max_num_meter_aso);
 		}
 	}
+	/*
+	 * Flex item support needs max_num_prog_sample_field
+	 * from the Capabilities 2 table for PARSE_GRAPH_NODE
+	 */
+	if (attr->parse_graph_flex_node) {
+		rc = mlx5_devx_cmd_query_hca_parse_graph_node_cap
+			(ctx, &attr->flex);
+		if (rc)
+			return -1;
+	}
 	if (attr->vdpa.valid)
 		mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa);
 	if (!attr->eth_net_offloads)
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index 80b5dca1eb..2326f1e968 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -6,6 +6,7 @@
 #define RTE_PMD_MLX5_DEVX_CMDS_H_
 
 #include <rte_compat.h>
+#include <rte_bitops.h>
 
 #include "mlx5_glue.h"
 #include "mlx5_prm.h"
@@ -86,6 +87,64 @@ struct mlx5_hca_flow_attr {
 	uint32_t tunnel_header_2_3;
 };
 
+/**
+ * Accumulate port PARSE_GRAPH_NODE capabilities from
+ * PARSE_GRAPH_NODE Capabilities and HCA Capabilities 2 tables
+ */
+__extension__
+struct mlx5_hca_flex_attr {
+	uint32_t node_in;
+	uint32_t node_out;
+	uint16_t header_length_mode;
+	uint16_t sample_offset_mode;
+	uint8_t  max_num_arc_in;
+	uint8_t  max_num_arc_out;
+	uint8_t  max_num_sample;
+	uint8_t  max_num_prog_sample:5;	/* From HCA CAP 2 */
+	uint8_t  sample_id_in_out:1;
+	uint16_t max_base_header_length;
+	uint8_t  max_sample_base_offset;
+	uint16_t max_next_header_offset;
+	uint8_t  header_length_mask_width;
+};
+
+/* ISO C restricts enumerator values to range of 'int' */
+__extension__
+enum {
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_HEAD          = RTE_BIT32(1),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_MAC           = RTE_BIT32(2),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IP            = RTE_BIT32(3),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_GRE           = RTE_BIT32(4),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_UDP           = RTE_BIT32(5),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_MPLS          = RTE_BIT32(6),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_TCP           = RTE_BIT32(7),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_VXLAN_GRE     = RTE_BIT32(8),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_GENEVE        = RTE_BIT32(9),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IPSEC_ESP     = RTE_BIT32(10),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IPV4          = RTE_BIT32(11),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_IPV6          = RTE_BIT32(12),
+	PARSE_GRAPH_NODE_CAP_SUPPORTED_PROTOCOL_PROGRAMMABLE  = RTE_BIT32(31)
+};
+
+enum {
+	PARSE_GRAPH_NODE_CAP_LENGTH_MODE_FIXED          = RTE_BIT32(0),
+	PARSE_GRAPH_NODE_CAP_LENGTH_MODE_EXPLISIT_FIELD = RTE_BIT32(1),
+	PARSE_GRAPH_NODE_CAP_LENGTH_MODE_BITMASK_FIELD  = RTE_BIT32(2)
+};
+
+/*
+ * DWORD shift is the base for calculating header_length_field_mask
+ * value in the MLX5_GRAPH_NODE_LEN_FIELD mode.
+ */
+#define MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD 0x02
+
+static inline uint32_t
+mlx5_hca_parse_graph_node_base_hdr_len_mask
+	(const struct mlx5_hca_flex_attr *attr)
+{
+	return (1 << attr->header_length_mask_width) - 1;
+}
+
 /* HCA supports this number of time periods for LRO. */
 #define MLX5_LRO_NUM_SUPP_PERIODS 4
 
@@ -164,6 +223,7 @@ struct mlx5_hca_attr {
 	struct mlx5_hca_qos_attr qos;
 	struct mlx5_hca_vdpa_attr vdpa;
 	struct mlx5_hca_flow_attr flow;
+	struct mlx5_hca_flex_attr flex;
 	int log_max_qp_sz;
 	int log_max_cq_sz;
 	int log_max_qp;
@@ -586,8 +646,9 @@ int mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
 				      uint32_t ids[], uint32_t num);
 
 __rte_internal
-struct mlx5_devx_obj *mlx5_devx_cmd_create_flex_parser(void *ctx,
-					struct mlx5_devx_graph_node_attr *data);
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_flex_parser(void *ctx,
+				 struct mlx5_devx_graph_node_attr *data);
 
 __rte_internal
 int mlx5_devx_cmd_register_read(void *ctx, uint16_t reg_id,
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index eab80eaead..8014ec2f92 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -975,7 +975,14 @@ struct mlx5_ifc_fte_match_set_misc4_bits {
 	u8 prog_sample_field_id_2[0x20];
 	u8 prog_sample_field_value_3[0x20];
 	u8 prog_sample_field_id_3[0x20];
-	u8 reserved_at_100[0x100];
+	u8 prog_sample_field_value_4[0x20];
+	u8 prog_sample_field_id_4[0x20];
+	u8 prog_sample_field_value_5[0x20];
+	u8 prog_sample_field_id_5[0x20];
+	u8 prog_sample_field_value_6[0x20];
+	u8 prog_sample_field_id_6[0x20];
+	u8 prog_sample_field_value_7[0x20];
+	u8 prog_sample_field_id_7[0x20];
 };
 
 struct mlx5_ifc_fte_match_set_misc5_bits {
@@ -1245,6 +1252,7 @@ enum {
 	MLX5_GET_HCA_CAP_OP_MOD_ROCE = 0x4 << 1,
 	MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE = 0x7 << 1,
 	MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION = 0x13 << 1,
+	MLX5_GET_HCA_CAP_OP_MOD_PARSE_GRAPH_NODE_CAP = 0x1C << 1,
 	MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 = 0x20 << 1,
 };
 
@@ -1759,6 +1767,27 @@ struct mlx5_ifc_virtio_emulation_cap_bits {
 	u8 reserved_at_1c0[0x620];
 };
 
+/**
+ * PARSE_GRAPH_NODE Capabilities Field Descriptions
+ */
+struct mlx5_ifc_parse_graph_node_cap_bits {
+	u8 node_in[0x20];
+	u8 node_out[0x20];
+	u8 header_length_mode[0x10];
+	u8 sample_offset_mode[0x10];
+	u8 max_num_arc_in[0x08];
+	u8 max_num_arc_out[0x08];
+	u8 max_num_sample[0x08];
+	u8 reserved_at_78[0x07];
+	u8 sample_id_in_out[0x1];
+	u8 max_base_header_length[0x10];
+	u8 reserved_at_90[0x08];
+	u8 max_sample_base_offset[0x08];
+	u8 max_next_header_offset[0x10];
+	u8 reserved_at_b0[0x08];
+	u8 header_length_mask_width[0x08];
+};
+
 struct mlx5_ifc_flow_table_prop_layout_bits {
 	u8 ft_support[0x1];
 	u8 flow_tag[0x1];
@@ -1853,9 +1882,14 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
 		ft_field_support_2_nic_receive;
 };
 
+/*
+ *  HCA Capabilities 2
+ */
 struct mlx5_ifc_cmd_hca_cap_2_bits {
 	u8 reserved_at_0[0x80]; /* End of DW4. */
-	u8 reserved_at_80[0xb];
+	u8 reserved_at_80[0x3];
+	u8 max_num_prog_sample_field[0x5];
+	u8 reserved_at_88[0x3];
 	u8 log_max_num_reserved_qpn[0x5];
 	u8 reserved_at_90[0x3];
 	u8 log_reserved_qpn_granularity[0x5];
@@ -3954,6 +3988,12 @@ enum mlx5_parse_graph_flow_match_sample_offset_mode {
 	MLX5_GRAPH_SAMPLE_OFFSET_BITMASK = 0x2,
 };
 
+enum mlx5_parse_graph_flow_match_sample_tunnel_mode {
+	MLX5_GRAPH_SAMPLE_TUNNEL_OUTER = 0x0,
+	MLX5_GRAPH_SAMPLE_TUNNEL_INNER = 0x1,
+	MLX5_GRAPH_SAMPLE_TUNNEL_FIRST = 0x2
+};
+
 /* Node index for an input / output arc of the flex parser graph. */
 enum mlx5_parse_graph_arc_node_index {
 	MLX5_GRAPH_ARC_NODE_NULL = 0x0,
@@ -3967,9 +4007,15 @@ enum mlx5_parse_graph_arc_node_index {
 	MLX5_GRAPH_ARC_NODE_VXLAN_GPE = 0x8,
 	MLX5_GRAPH_ARC_NODE_GENEVE = 0x9,
 	MLX5_GRAPH_ARC_NODE_IPSEC_ESP = 0xa,
+	MLX5_GRAPH_ARC_NODE_IPV4 = 0xb,
+	MLX5_GRAPH_ARC_NODE_IPV6 = 0xc,
 	MLX5_GRAPH_ARC_NODE_PROGRAMMABLE = 0x1f,
 };
 
+#define MLX5_PARSE_GRAPH_FLOW_SAMPLE_MAX 8
+#define MLX5_PARSE_GRAPH_IN_ARC_MAX 8
+#define MLX5_PARSE_GRAPH_OUT_ARC_MAX 8
+
 /**
  * Convert a user mark to flow mark.
  *
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v2 3/9] common/mlx5: fix flex parser DevX creation routine
  2021-11-02  8:53 ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Gregory Etelson
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 1/9] common/mlx5: refactor HCA attributes query Gregory Etelson
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 2/9] common/mlx5: extend flex parser capabilities Gregory Etelson
@ 2021-11-02  8:53   ` Gregory Etelson
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 4/9] net/mlx5: update eCPRI flex parser structures Gregory Etelson
                     ` (7 subsequent siblings)
  10 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-02  8:53 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan, stable, Netanel Gonen, Bing Zhao

Add missing modify_field_select, next_header_field_size
field values setting.

Fixes: 38119ebe01d6 ("common/mlx5: add DevX command for flex parsers")
Cc: stable@dpdk.org

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Reviewed-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index 28e577a37e..12c114a91b 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -620,10 +620,9 @@ mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
 	return ret;
 }
 
-
 struct mlx5_devx_obj *
 mlx5_devx_cmd_create_flex_parser(void *ctx,
-			      struct mlx5_devx_graph_node_attr *data)
+				 struct mlx5_devx_graph_node_attr *data)
 {
 	uint32_t in[MLX5_ST_SZ_DW(create_flex_parser_in)] = {0};
 	uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
@@ -647,12 +646,18 @@ mlx5_devx_cmd_create_flex_parser(void *ctx,
 		 MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH);
 	MLX5_SET(parse_graph_flex, flex, header_length_mode,
 		 data->header_length_mode);
+	MLX5_SET64(parse_graph_flex, flex, modify_field_select,
+		   data->modify_field_select);
 	MLX5_SET(parse_graph_flex, flex, header_length_base_value,
 		 data->header_length_base_value);
 	MLX5_SET(parse_graph_flex, flex, header_length_field_offset,
 		 data->header_length_field_offset);
 	MLX5_SET(parse_graph_flex, flex, header_length_field_shift,
 		 data->header_length_field_shift);
+	MLX5_SET(parse_graph_flex, flex, next_header_field_offset,
+		 data->next_header_field_offset);
+	MLX5_SET(parse_graph_flex, flex, next_header_field_size,
+		 data->next_header_field_size);
 	MLX5_SET(parse_graph_flex, flex, header_length_field_mask,
 		 data->header_length_field_mask);
 	for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v2 4/9] net/mlx5: update eCPRI flex parser structures
  2021-11-02  8:53 ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Gregory Etelson
                     ` (2 preceding siblings ...)
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 3/9] common/mlx5: fix flex parser DevX creation routine Gregory Etelson
@ 2021-11-02  8:53   ` Gregory Etelson
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 5/9] net/mlx5: add flex item API Gregory Etelson
                     ` (6 subsequent siblings)
  10 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-02  8:53 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

To handle eCPRI protocol in the flows the mlx5 PMD engages
flex parser hardware feature. While we were implementing
eCPRI support we anticipated the flex parser usage extension,
and all related variables were named accordingly, containing
flex syllabus. Now we are preparing to introduce more common
approach of flex item, in order to avoid naming conflicts
and improve the code readability the eCPRI infrastructure
related variables are renamed as preparation step.

Later, once we have the new flex item implemented, we could
consider to refactor the eCPRI protocol support  to move on
common flex item basis.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.c         |  9 +++------
 drivers/net/mlx5/mlx5.h         | 12 +++---------
 drivers/net/mlx5/mlx5_flow_dv.c |  2 +-
 3 files changed, 7 insertions(+), 16 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 4ba850af26..cd7bb3f27c 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -866,8 +866,7 @@ bool
 mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flex_parser_profiles *prf =
-				&priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+	struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
 
 	return !!prf->obj;
 }
@@ -886,8 +885,7 @@ int
 mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flex_parser_profiles *prf =
-				&priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+	struct mlx5_ecpri_parser_profile *prf =	&priv->sh->ecpri_parser;
 	struct mlx5_devx_graph_node_attr node = {
 		.modify_field_select = 0,
 	};
@@ -950,8 +948,7 @@ static void
 mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flex_parser_profiles *prf =
-				&priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+	struct mlx5_ecpri_parser_profile *prf =	&priv->sh->ecpri_parser;
 
 	if (prf->obj)
 		mlx5_devx_cmd_destroy(prf->obj);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 39c001aa1b..912c4a183b 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1013,14 +1013,8 @@ struct mlx5_dev_txpp {
 	uint64_t err_ts_future; /* Timestamp in the distant future. */
 };
 
-/* Supported flex parser profile ID. */
-enum mlx5_flex_parser_profile_id {
-	MLX5_FLEX_PARSER_ECPRI_0 = 0,
-	MLX5_FLEX_PARSER_MAX = 8,
-};
-
-/* Sample ID information of flex parser structure. */
-struct mlx5_flex_parser_profiles {
+/* Sample ID information of eCPRI flex parser structure. */
+struct mlx5_ecpri_parser_profile {
 	uint32_t num;		/* Actual number of samples. */
 	uint32_t ids[8];	/* Sample IDs for this profile. */
 	uint8_t offset[8];	/* Bytes offset of each parser. */
@@ -1169,7 +1163,7 @@ struct mlx5_dev_ctx_shared {
 	struct mlx5_devx_obj *td; /* Transport domain. */
 	struct mlx5_lag lag; /* LAG attributes */
 	void *tx_uar; /* Tx/packet pacing shared UAR. */
-	struct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX];
+	struct mlx5_ecpri_parser_profile ecpri_parser;
 	/* Flex parser profiles information. */
 	void *devx_rx_uar; /* DevX UAR for Rx. */
 	struct mlx5_aso_age_mng *aso_age_mng;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 8962d26c75..8e2714233c 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -9892,7 +9892,7 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
 	 */
 	if (!ecpri_m->hdr.common.u32)
 		return;
-	samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
+	samples = priv->sh->ecpri_parser.ids;
 	/* Need to take the whole DW as the mask to fill the entry. */
 	dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
 			    prog_sample_field_value_0);
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v2 5/9] net/mlx5: add flex item API
  2021-11-02  8:53 ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Gregory Etelson
                     ` (3 preceding siblings ...)
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 4/9] net/mlx5: update eCPRI flex parser structures Gregory Etelson
@ 2021-11-02  8:53   ` Gregory Etelson
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 6/9] net/mlx5: add flex parser DevX object management Gregory Etelson
                     ` (5 subsequent siblings)
  10 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-02  8:53 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan, Anatoly Burakov

From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

This patch is a preparation step of implementing
flex item feature in driver and it provides:

  - external entry point routines for flex item
    creation/deletion

  - flex item objects management over the ports.

The flex item object keeps information about
the item created over the port - reference counter
to track whether item is in use by some active
flows and the pointer to underlying shared DevX
object, providing all the data needed to translate
the flow flex pattern into matcher fields according
hardware configuration.

There is not too many flex items supposed to be
created on the port, the design is optimized
rather for flow insertion rate than memory savings.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c  |   4 +
 drivers/net/mlx5/meson.build      |   1 +
 drivers/net/mlx5/mlx5.c           |   2 +-
 drivers/net/mlx5/mlx5.h           |  24 ++++
 drivers/net/mlx5/mlx5_flow.c      |  49 ++++++++
 drivers/net/mlx5/mlx5_flow.h      |  18 ++-
 drivers/net/mlx5/mlx5_flow_dv.c   |   3 +-
 drivers/net/mlx5/mlx5_flow_flex.c | 189 ++++++++++++++++++++++++++++++
 8 files changed, 286 insertions(+), 4 deletions(-)
 create mode 100644 drivers/net/mlx5/mlx5_flow_flex.c

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index f31f1e96c6..3f7c34b687 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1687,6 +1687,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		err = mlx5_alloc_shared_dr(priv);
 		if (err)
 			goto error;
+		if (mlx5_flex_item_port_init(eth_dev) < 0)
+			goto error;
 	}
 	if (sh->devx && config->dv_flow_en && config->dest_tir) {
 		priv->obj_ops = devx_obj_ops;
@@ -1823,6 +1825,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 			claim_zero(rte_eth_switch_domain_free(priv->domain_id));
 		if (priv->hrxqs)
 			mlx5_list_destroy(priv->hrxqs);
+		if (eth_dev && priv->flex_item_map)
+			mlx5_flex_item_port_cleanup(eth_dev);
 		mlx5_free(priv);
 		if (eth_dev != NULL)
 			eth_dev->data->dev_private = NULL;
diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build
index 636a1be890..2f6d8cbb3d 100644
--- a/drivers/net/mlx5/meson.build
+++ b/drivers/net/mlx5/meson.build
@@ -17,6 +17,7 @@ sources = files(
         'mlx5_flow_meter.c',
         'mlx5_flow_dv.c',
         'mlx5_flow_aso.c',
+        'mlx5_flow_flex.c',
         'mlx5_mac.c',
         'mlx5_rss.c',
         'mlx5_rx.c',
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index cd7bb3f27c..a4a0e258a9 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -378,7 +378,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 	},
 };
 
-
 #define MLX5_FLOW_MIN_ID_POOL_SIZE 512
 #define MLX5_ID_GENERATION_ARRAY_FACTOR 16
 
@@ -1683,6 +1682,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
 	mlx5_mp_os_req_stop_rxtx(dev);
 	/* Free the eCPRI flex parser resource. */
 	mlx5_flex_parser_ecpri_release(dev);
+	mlx5_flex_item_port_cleanup(dev);
 	if (priv->rxqs != NULL) {
 		/* XXX race condition if mlx5_rx_burst() is still running. */
 		rte_delay_us_sleep(1000);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 912c4a183b..f0c1775f8c 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -50,6 +50,9 @@
 #define MLX5_MAX_MODIFY_NUM			32
 #define MLX5_ROOT_TBL_MODIFY_NUM		16
 
+/* Maximal number of flex items created on the port.*/
+#define MLX5_PORT_FLEX_ITEM_NUM			4
+
 enum mlx5_ipool_index {
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
 	MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
@@ -1096,6 +1099,12 @@ struct mlx5_lag {
 	uint8_t affinity_mode; /* TIS or hash based affinity */
 };
 
+/* Port flex item context. */
+struct mlx5_flex_item {
+	struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
+	uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+};
+
 /*
  * Shared Infiniband device context for Master/Representors
  * which belong to same IB device with multiple IB ports.
@@ -1425,6 +1434,10 @@ struct mlx5_priv {
 	struct mlx5_devx_obj *q_counters; /* DevX queue counter object. */
 	uint32_t counter_set_id; /* Queue counter ID to set in DevX objects. */
 	uint32_t lag_affinity_idx; /* LAG mode queue 0 affinity starting. */
+	rte_spinlock_t flex_item_sl; /* Flex item list spinlock. */
+	struct mlx5_flex_item flex_item[MLX5_PORT_FLEX_ITEM_NUM];
+	/* Flex items have been created on the port. */
+	uint32_t flex_item_map; /* Map of allocated flex item elements. */
 };
 
 #define PORT_ID(priv) ((priv)->dev_data->port_id)
@@ -1804,4 +1817,15 @@ mlx5_get_supported_sw_parsing_offloads(const struct mlx5_hca_attr *attr);
 uint32_t
 mlx5_get_supported_tunneling_offloads(const struct mlx5_hca_attr *attr);
 
+/* mlx5_flow_flex.c */
+
+struct rte_flow_item_flex_handle *
+flow_dv_item_create(struct rte_eth_dev *dev,
+		    const struct rte_flow_item_flex_conf *conf,
+		    struct rte_flow_error *error);
+int flow_dv_item_release(struct rte_eth_dev *dev,
+		    const struct rte_flow_item_flex_handle *flex_handle,
+		    struct rte_flow_error *error);
+int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
+void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 2385a0b550..71550de808 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -748,6 +748,14 @@ mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
 				  struct rte_mbuf *m,
 				  struct rte_flow_restore_info *info,
 				  struct rte_flow_error *err);
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct rte_flow_error *error);
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+			    const struct rte_flow_item_flex_handle *handle,
+			    struct rte_flow_error *error);
 
 static const struct rte_flow_ops mlx5_flow_ops = {
 	.validate = mlx5_flow_validate,
@@ -767,6 +775,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
 	.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
 	.tunnel_item_release = mlx5_flow_tunnel_item_release,
 	.get_restore_info = mlx5_flow_tunnel_get_restore_info,
+	.flex_item_create = mlx5_flow_flex_item_create,
+	.flex_item_release = mlx5_flow_flex_item_release,
 };
 
 /* Tunnel information. */
@@ -9640,6 +9650,45 @@ mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
 }
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 
+/* Flex flow item API */
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct rte_flow_error *error)
+{
+	static const char err_msg[] = "flex item creation unsupported";
+	struct rte_flow_attr attr = { .transfer = 0 };
+	const struct mlx5_flow_driver_ops *fops =
+			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+	if (!fops->item_create) {
+		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, err_msg);
+		return NULL;
+	}
+	return fops->item_create(dev, conf, error);
+}
+
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+			    const struct rte_flow_item_flex_handle *handle,
+			    struct rte_flow_error *error)
+{
+	static const char err_msg[] = "flex item release unsupported";
+	struct rte_flow_attr attr = { .transfer = 0 };
+	const struct mlx5_flow_driver_ops *fops =
+			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+	if (!fops->item_release) {
+		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, err_msg);
+		return -rte_errno;
+	}
+	return fops->item_release(dev, handle, error);
+}
+
 static void
 mlx5_dbg__print_pattern(const struct rte_flow_item *item)
 {
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 5509c28f01..43399471ec 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1232,6 +1232,19 @@ typedef int (*mlx5_flow_create_def_policy_t)
 			(struct rte_eth_dev *dev);
 typedef void (*mlx5_flow_destroy_def_policy_t)
 			(struct rte_eth_dev *dev);
+typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t)
+			(struct rte_eth_dev *dev,
+			 const struct rte_flow_item_flex_conf *conf,
+			 struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_release_t)
+			(struct rte_eth_dev *dev,
+			 const struct rte_flow_item_flex_handle *handle,
+			 struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_update_t)
+			(struct rte_eth_dev *dev,
+			 const struct rte_flow_item_flex_handle *handle,
+			 const struct rte_flow_item_flex_conf *conf,
+			 struct rte_flow_error *error);
 
 struct mlx5_flow_driver_ops {
 	mlx5_flow_validate_t validate;
@@ -1266,6 +1279,9 @@ struct mlx5_flow_driver_ops {
 	mlx5_flow_action_update_t action_update;
 	mlx5_flow_action_query_t action_query;
 	mlx5_flow_sync_domain_t sync_domain;
+	mlx5_flow_item_create_t item_create;
+	mlx5_flow_item_release_t item_release;
+	mlx5_flow_item_update_t item_update;
 };
 
 /* mlx5_flow.c */
@@ -1722,6 +1738,4 @@ const struct mlx5_flow_tunnel *
 mlx5_get_tof(const struct rte_flow_item *items,
 	     const struct rte_flow_action *actions,
 	     enum mlx5_tof_rule_type *rule_type);
-
-
 #endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 8e2714233c..9d60c9154f 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -17965,7 +17965,8 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
 	.action_update = flow_dv_action_update,
 	.action_query = flow_dv_action_query,
 	.sync_domain = flow_dv_sync_domain,
+	.item_create = flow_dv_item_create,
+	.item_release = flow_dv_item_release,
 };
-
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
new file mode 100644
index 0000000000..b7bc4af6fb
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021 NVIDIA Corporation & Affiliates
+ */
+#include <rte_malloc.h>
+#include <mlx5_devx_cmds.h>
+#include <mlx5_malloc.h>
+#include "mlx5.h"
+#include "mlx5_flow.h"
+
+static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM,
+	      "Flex item maximal number exceeds uint32_t bit width");
+
+/**
+ *  Routine called once on port initialization to init flex item
+ *  related infrastructure initialization
+ *
+ * @param dev
+ *   Ethernet device to perform flex item initialization
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flex_item_port_init(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	rte_spinlock_init(&priv->flex_item_sl);
+	MLX5_ASSERT(!priv->flex_item_map);
+	return 0;
+}
+
+/**
+ *  Routine called once on port close to perform flex item
+ *  related infrastructure cleanup.
+ *
+ * @param dev
+ *   Ethernet device to perform cleanup
+ */
+void
+mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	uint32_t i;
+
+	for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
+		if (priv->flex_item_map & (1 << i)) {
+			/* DevX object dereferencing should be provided here. */
+			priv->flex_item_map &= ~(1 << i);
+		}
+	}
+}
+
+static int
+mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item)
+{
+	uintptr_t start = (uintptr_t)&priv->flex_item[0];
+	uintptr_t entry = (uintptr_t)item;
+	uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item);
+
+	if (entry < start ||
+	    idx >= MLX5_PORT_FLEX_ITEM_NUM ||
+	    (entry - start) % sizeof(struct mlx5_flex_item) ||
+	    !(priv->flex_item_map & (1u << idx)))
+		return -1;
+	return (int)idx;
+}
+
+static struct mlx5_flex_item *
+mlx5_flex_alloc(struct mlx5_priv *priv)
+{
+	struct mlx5_flex_item *item = NULL;
+
+	rte_spinlock_lock(&priv->flex_item_sl);
+	if (~priv->flex_item_map) {
+		uint32_t idx = rte_bsf32(~priv->flex_item_map);
+
+		if (idx < MLX5_PORT_FLEX_ITEM_NUM) {
+			item = &priv->flex_item[idx];
+			MLX5_ASSERT(!item->refcnt);
+			MLX5_ASSERT(!item->devx_fp);
+			item->devx_fp = NULL;
+			__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+			priv->flex_item_map |= 1u << idx;
+		}
+	}
+	rte_spinlock_unlock(&priv->flex_item_sl);
+	return item;
+}
+
+static void
+mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
+{
+	int idx = mlx5_flex_index(priv, item);
+
+	MLX5_ASSERT(idx >= 0 &&
+		    idx < MLX5_PORT_FLEX_ITEM_NUM &&
+		    (priv->flex_item_map & (1u << idx)));
+	if (idx >= 0) {
+		rte_spinlock_lock(&priv->flex_item_sl);
+		MLX5_ASSERT(!item->refcnt);
+		MLX5_ASSERT(!item->devx_fp);
+		item->devx_fp = NULL;
+		__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+		priv->flex_item_map &= ~(1u << idx);
+		rte_spinlock_unlock(&priv->flex_item_sl);
+	}
+}
+
+/**
+ * Create the flex item with specified configuration over the Ethernet device.
+ *
+ * @param dev
+ *   Ethernet device to create flex item on.
+ * @param[in] conf
+ *   Flex item configuration.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. PMDs initialize this
+ *   structure in case of error only.
+ *
+ * @return
+ *   Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set.
+ */
+struct rte_flow_item_flex_handle *
+flow_dv_item_create(struct rte_eth_dev *dev,
+		    const struct rte_flow_item_flex_conf *conf,
+		    struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_item *flex;
+
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	flex = mlx5_flex_alloc(priv);
+	if (!flex) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "too many flex items created on the port");
+		return NULL;
+	}
+	RTE_SET_USED(conf);
+	/* Mark initialized flex item valid. */
+	__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
+	return (struct rte_flow_item_flex_handle *)flex;
+}
+
+/**
+ * Release the flex item on the specified Ethernet device.
+ *
+ * @param dev
+ *   Ethernet device to destroy flex item on.
+ * @param[in] handle
+ *   Handle of the item existing on the specified device.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. PMDs initialize this
+ *   structure in case of error only.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+flow_dv_item_release(struct rte_eth_dev *dev,
+		     const struct rte_flow_item_flex_handle *handle,
+		     struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_item *flex =
+		(struct mlx5_flex_item *)(uintptr_t)handle;
+	uint32_t old_refcnt = 1;
+
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	rte_spinlock_lock(&priv->flex_item_sl);
+	if (mlx5_flex_index(priv, flex) < 0) {
+		rte_spinlock_unlock(&priv->flex_item_sl);
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "invalid flex item handle value");
+	}
+	if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
+					 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+		rte_spinlock_unlock(&priv->flex_item_sl);
+		return rte_flow_error_set(error, EBUSY,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "flex item has flow references");
+	}
+	/* Flex item is marked as invalid, we can leave locked section. */
+	rte_spinlock_unlock(&priv->flex_item_sl);
+	mlx5_flex_free(priv, flex);
+	return 0;
+}
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v2 6/9] net/mlx5: add flex parser DevX object management
  2021-11-02  8:53 ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Gregory Etelson
                     ` (4 preceding siblings ...)
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 5/9] net/mlx5: add flex item API Gregory Etelson
@ 2021-11-02  8:53   ` Gregory Etelson
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 7/9] net/mlx5: translate flex item configuration Gregory Etelson
                     ` (4 subsequent siblings)
  10 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-02  8:53 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

The DevX flex parsers can be shared between representors
within the same IB context. We should put the flex parser
objects into the shared list and engage the standard
mlx5_list_xxx API to manage ones.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Reviewed-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c  |  10 +++
 drivers/net/mlx5/mlx5.c           |   4 +
 drivers/net/mlx5/mlx5.h           |  20 +++++
 drivers/net/mlx5/mlx5_flow_flex.c | 121 +++++++++++++++++++++++++++++-
 4 files changed, 154 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 3f7c34b687..1c6f50b72a 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -337,6 +337,16 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 					      flow_dv_dest_array_clone_free_cb);
 	if (!sh->dest_array_list)
 		goto error;
+	/* Init shared flex parsers list, no need lcore_share */
+	snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
+	sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
+					       mlx5_flex_parser_create_cb,
+					       mlx5_flex_parser_match_cb,
+					       mlx5_flex_parser_remove_cb,
+					       mlx5_flex_parser_clone_cb,
+					       mlx5_flex_parser_clone_free_cb);
+	if (!sh->flex_parsers_dv)
+		goto error;
 #endif
 #ifdef HAVE_MLX5DV_DR
 	void *domain;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index a4a0e258a9..dc15688f21 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1429,6 +1429,10 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 		mlx5_flow_os_release_workspace();
 	}
 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+	if (sh->flex_parsers_dv) {
+		mlx5_list_destroy(sh->flex_parsers_dv);
+		sh->flex_parsers_dv = NULL;
+	}
 	/*
 	 *  Ensure there is no async event handler installed.
 	 *  Only primary process handles async device events.
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index f0c1775f8c..63de6523e8 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1099,6 +1099,15 @@ struct mlx5_lag {
 	uint8_t affinity_mode; /* TIS or hash based affinity */
 };
 
+/* DevX flex parser context. */
+struct mlx5_flex_parser_devx {
+	struct mlx5_list_entry entry;  /* List element at the beginning. */
+	uint32_t num_samples;
+	void *devx_obj;
+	struct mlx5_devx_graph_node_attr devx_conf;
+	uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
+};
+
 /* Port flex item context. */
 struct mlx5_flex_item {
 	struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
@@ -1159,6 +1168,7 @@ struct mlx5_dev_ctx_shared {
 	struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
 	struct mlx5_list *sample_action_list; /* List of sample actions. */
 	struct mlx5_list *dest_array_list;
+	struct mlx5_list *flex_parsers_dv; /* Flex Item parsers. */
 	/* List of destination array actions. */
 	struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
 	void *default_miss_action; /* Default miss action. */
@@ -1828,4 +1838,14 @@ int flow_dv_item_release(struct rte_eth_dev *dev,
 		    struct rte_flow_error *error);
 int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
 void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
+/* Flex parser list callbacks. */
+struct mlx5_list_entry *mlx5_flex_parser_create_cb(void *list_ctx, void *ctx);
+int mlx5_flex_parser_match_cb(void *list_ctx,
+			      struct mlx5_list_entry *iter, void *ctx);
+void mlx5_flex_parser_remove_cb(void *list_ctx,	struct mlx5_list_entry *entry);
+struct mlx5_list_entry *mlx5_flex_parser_clone_cb(void *list_ctx,
+						  struct mlx5_list_entry *entry,
+						  void *ctx);
+void mlx5_flex_parser_clone_free_cb(void *tool_ctx,
+				    struct mlx5_list_entry *entry);
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index b7bc4af6fb..2f87073e97 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -45,7 +45,13 @@ mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
 
 	for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
 		if (priv->flex_item_map & (1 << i)) {
-			/* DevX object dereferencing should be provided here. */
+			struct mlx5_flex_item *flex = &priv->flex_item[i];
+
+			claim_zero(mlx5_list_unregister
+					(priv->sh->flex_parsers_dv,
+					 &flex->devx_fp->entry));
+			flex->devx_fp = NULL;
+			flex->refcnt = 0;
 			priv->flex_item_map &= ~(1 << i);
 		}
 	}
@@ -127,7 +133,9 @@ flow_dv_item_create(struct rte_eth_dev *dev,
 		    struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_parser_devx devx_config = { .devx_obj = NULL };
 	struct mlx5_flex_item *flex;
+	struct mlx5_list_entry *ent;
 
 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	flex = mlx5_flex_alloc(priv);
@@ -137,10 +145,22 @@ flow_dv_item_create(struct rte_eth_dev *dev,
 				   "too many flex items created on the port");
 		return NULL;
 	}
+	ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
+	if (!ent) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "flex item creation failure");
+		goto error;
+	}
+	flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
 	RTE_SET_USED(conf);
 	/* Mark initialized flex item valid. */
 	__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
 	return (struct rte_flow_item_flex_handle *)flex;
+
+error:
+	mlx5_flex_free(priv, flex);
+	return NULL;
 }
 
 /**
@@ -166,6 +186,7 @@ flow_dv_item_release(struct rte_eth_dev *dev,
 	struct mlx5_flex_item *flex =
 		(struct mlx5_flex_item *)(uintptr_t)handle;
 	uint32_t old_refcnt = 1;
+	int rc;
 
 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	rte_spinlock_lock(&priv->flex_item_sl);
@@ -184,6 +205,104 @@ flow_dv_item_release(struct rte_eth_dev *dev,
 	}
 	/* Flex item is marked as invalid, we can leave locked section. */
 	rte_spinlock_unlock(&priv->flex_item_sl);
+	MLX5_ASSERT(flex->devx_fp);
+	rc = mlx5_list_unregister(priv->sh->flex_parsers_dv,
+				  &flex->devx_fp->entry);
+	flex->devx_fp = NULL;
 	mlx5_flex_free(priv, flex);
+	if (rc < 0)
+		return rte_flow_error_set(error, EBUSY,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "flex item release failure");
 	return 0;
 }
+
+/* DevX flex parser list callbacks. */
+struct mlx5_list_entry *
+mlx5_flex_parser_create_cb(void *list_ctx, void *ctx)
+{
+	struct mlx5_dev_ctx_shared *sh = list_ctx;
+	struct mlx5_flex_parser_devx *fp, *conf = ctx;
+	int ret;
+
+	fp = mlx5_malloc(MLX5_MEM_ZERO,	sizeof(struct mlx5_flex_parser_devx),
+			 0, SOCKET_ID_ANY);
+	if (!fp)
+		return NULL;
+	/* Copy the requested configurations. */
+	fp->num_samples = conf->num_samples;
+	memcpy(&fp->devx_conf, &conf->devx_conf, sizeof(fp->devx_conf));
+	/* Create DevX flex parser. */
+	fp->devx_obj = mlx5_devx_cmd_create_flex_parser(sh->cdev->ctx,
+							&fp->devx_conf);
+	if (!fp->devx_obj)
+		goto error;
+	/* Query the firmware assigned sample ids. */
+	ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj,
+						fp->sample_ids,
+						fp->num_samples);
+	if (ret)
+		goto error;
+	DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u",
+		(const void *)fp, fp->num_samples);
+	return &fp->entry;
+error:
+	if (fp->devx_obj)
+		mlx5_devx_cmd_destroy((void *)(uintptr_t)fp->devx_obj);
+	if (fp)
+		mlx5_free(fp);
+	return NULL;
+}
+
+int
+mlx5_flex_parser_match_cb(void *list_ctx,
+			  struct mlx5_list_entry *iter, void *ctx)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(iter, struct mlx5_flex_parser_devx, entry);
+	struct mlx5_flex_parser_devx *org =
+		container_of(ctx, struct mlx5_flex_parser_devx, entry);
+
+	RTE_SET_USED(list_ctx);
+	return !iter || !ctx || memcmp(&fp->devx_conf,
+				       &org->devx_conf,
+				       sizeof(fp->devx_conf));
+}
+
+void
+mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(entry, struct mlx5_flex_parser_devx, entry);
+
+	RTE_SET_USED(list_ctx);
+	MLX5_ASSERT(fp->devx_obj);
+	claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
+	DRV_LOG(DEBUG, "DEVx flex parser %p destroyed", (const void *)fp);
+	mlx5_free(entry);
+}
+
+struct mlx5_list_entry *
+mlx5_flex_parser_clone_cb(void *list_ctx,
+			  struct mlx5_list_entry *entry, void *ctx)
+{
+	struct mlx5_flex_parser_devx *fp;
+
+	RTE_SET_USED(list_ctx);
+	RTE_SET_USED(entry);
+	fp = mlx5_malloc(0, sizeof(struct mlx5_flex_parser_devx),
+			 0, SOCKET_ID_ANY);
+	if (!fp)
+		return NULL;
+	memcpy(fp, ctx, sizeof(struct mlx5_flex_parser_devx));
+	return &fp->entry;
+}
+
+void
+mlx5_flex_parser_clone_free_cb(void *list_ctx, struct mlx5_list_entry *entry)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(entry, struct mlx5_flex_parser_devx, entry);
+	RTE_SET_USED(list_ctx);
+	mlx5_free(fp);
+}
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v2 7/9] net/mlx5: translate flex item configuration
  2021-11-02  8:53 ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Gregory Etelson
                     ` (5 preceding siblings ...)
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 6/9] net/mlx5: add flex parser DevX object management Gregory Etelson
@ 2021-11-02  8:53   ` Gregory Etelson
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 8/9] net/mlx5: translate flex item pattern into matcher Gregory Etelson
                     ` (3 subsequent siblings)
  10 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-02  8:53 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

RTE Flow flex item configuration should be translated
into actual hardware settings:

  - translate header length and next protocol field samplings
  - translate data field sampling, the similar fields with the
    same mode and matching related parameters are relocated
    and grouped to be covered with minimal amount of hardware
    sampling registers (each register can cover arbitrary
    neighbour 32 bits (aligned to byte boundary) in the packet
    and we can combine the fields with smaller lengths or
    segments of bigger fields)
  - input and output links translation
  - preparing data for parsing flex item pattern on flow creation

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.h           |  15 +
 drivers/net/mlx5/mlx5_flow_flex.c | 844 +++++++++++++++++++++++++++++-
 2 files changed, 858 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 63de6523e8..18dfa2fc7b 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -53,6 +53,9 @@
 /* Maximal number of flex items created on the port.*/
 #define MLX5_PORT_FLEX_ITEM_NUM			4
 
+/* Maximal number of field/field parts to map into sample registers .*/
+#define MLX5_FLEX_ITEM_MAPPING_NUM		32
+
 enum mlx5_ipool_index {
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
 	MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
@@ -1108,10 +1111,22 @@ struct mlx5_flex_parser_devx {
 	uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
 };
 
+/* Pattern field dscriptor - how to translate flex pattern into samples. */
+__extension__
+struct mlx5_flex_pattern_field {
+	uint16_t width:6;
+	uint16_t shift:5;
+	uint16_t reg_id:5;
+};
+#define MLX5_INVALID_SAMPLE_REG_ID 0x1F
+
 /* Port flex item context. */
 struct mlx5_flex_item {
 	struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
 	uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+	enum rte_flow_item_flex_tunnel_mode tunnel_mode; /* Tunnel mode. */
+	uint32_t mapnum; /* Number of pattern translation entries. */
+	struct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];
 };
 
 /*
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 2f87073e97..b4a9f1a537 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -113,6 +113,847 @@ mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
 	}
 }
 
+/*
+ * Calculate largest mask value for a given shift.
+ *
+ *   shift      mask
+ * ------- ---------------
+ *    0     b111100  0x3C
+ *    1     b111110  0x3E
+ *    2     b111111  0x3F
+ *    3     b011111  0x1F
+ *    4     b001111  0x0F
+ *    5     b000111  0x07
+ */
+static uint8_t
+mlx5_flex_hdr_len_mask(uint8_t shift,
+		       const struct mlx5_hca_flex_attr *attr)
+{
+	uint32_t base_mask;
+	int diff = shift - MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD;
+
+	base_mask = mlx5_hca_parse_graph_node_base_hdr_len_mask(attr);
+	return diff == 0 ? base_mask :
+	       diff < 0 ? (base_mask << -diff) & base_mask : base_mask >> diff;
+}
+
+static int
+mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct mlx5_flex_parser_devx *devx,
+			   struct rte_flow_error *error)
+{
+	const struct rte_flow_item_flex_field *field = &conf->next_header;
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+	uint32_t len_width, mask;
+
+	if (field->field_base % CHAR_BIT)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "not byte aligned header length field");
+	switch (field->field_mode) {
+	case FIELD_MODE_DUMMY:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "invalid header length field mode (DUMMY)");
+	case FIELD_MODE_FIXED:
+		if (!(attr->header_length_mode &
+		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIXED)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported header length field mode (FIXED)");
+		if (attr->header_length_mask_width < field->field_size)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "header length field width exceeds limit");
+		if (field->offset_shift < 0 ||
+		    field->offset_shift > attr->header_length_mask_width)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid header length field shift (FIXED");
+		if (field->field_base < 0)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "negative header length field base (FIXED)");
+		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
+		break;
+	case FIELD_MODE_OFFSET:
+		if (!(attr->header_length_mode &
+		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIELD)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported header length field mode (OFFSET)");
+		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
+		if (field->offset_mask == 0 ||
+		    !rte_is_power_of_2(field->offset_mask + 1))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid length field offset mask (OFFSET)");
+		len_width = rte_fls_u32(field->offset_mask);
+		if (len_width > attr->header_length_mask_width)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "length field offset mask too wide (OFFSET)");
+		mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
+		if (mask < field->offset_mask)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "length field shift too big (OFFSET)");
+		node->header_length_field_mask = RTE_MIN(mask,
+							 field->offset_mask);
+		break;
+	case FIELD_MODE_BITMASK:
+		if (!(attr->header_length_mode &
+		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_BITMASK)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported header length field mode (BITMASK)");
+		if (attr->header_length_mask_width < field->field_size)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "header length field width exceeds limit");
+		node->header_length_mode = MLX5_GRAPH_NODE_LEN_BITMASK;
+		mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
+		if (mask < field->offset_mask)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "length field shift too big (BITMASK)");
+		node->header_length_field_mask = RTE_MIN(mask,
+							 field->offset_mask);
+		break;
+	default:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unknown header length field mode");
+	}
+	if (field->field_base / CHAR_BIT >= 0 &&
+	    field->field_base / CHAR_BIT > attr->max_base_header_length)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "header length field base exceeds limit");
+	node->header_length_base_value = field->field_base / CHAR_BIT;
+	if (field->field_mode == FIELD_MODE_OFFSET ||
+	    field->field_mode == FIELD_MODE_BITMASK) {
+		if (field->offset_shift > 15 || field->offset_shift < 0)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "header length field shift exceeeds limit");
+		node->header_length_field_shift	= field->offset_shift;
+		node->header_length_field_offset = field->offset_base;
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_translate_next(struct mlx5_hca_flex_attr *attr,
+			 const struct rte_flow_item_flex_conf *conf,
+			 struct mlx5_flex_parser_devx *devx,
+			 struct rte_flow_error *error)
+{
+	const struct rte_flow_item_flex_field *field = &conf->next_protocol;
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+
+	switch (field->field_mode) {
+	case FIELD_MODE_DUMMY:
+		if (conf->nb_outputs)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "next protocol field is required (DUMMY)");
+		return 0;
+	case FIELD_MODE_FIXED:
+		break;
+	case FIELD_MODE_OFFSET:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unsupported next protocol field mode (OFFSET)");
+		break;
+	case FIELD_MODE_BITMASK:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unsupported next protocol field mode (BITMASK)");
+	default:
+		return rte_flow_error_set
+			(error, EINVAL,
+			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unknown next protocol field mode");
+	}
+	MLX5_ASSERT(field->field_mode == FIELD_MODE_FIXED);
+	if (!conf->nb_outputs)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "out link(s) is required if next field present");
+	if (attr->max_next_header_offset < field->field_base)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "next protocol field base exceeds limit");
+	if (field->offset_shift)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unsupported next protocol field shift");
+	node->next_header_field_offset = field->field_base;
+	node->next_header_field_size = field->field_size;
+	return 0;
+}
+
+/* Helper structure to handle field bit intervals. */
+struct mlx5_flex_field_cover {
+	uint16_t num;
+	int32_t start[MLX5_FLEX_ITEM_MAPPING_NUM];
+	int32_t end[MLX5_FLEX_ITEM_MAPPING_NUM];
+	uint8_t mapped[MLX5_FLEX_ITEM_MAPPING_NUM / CHAR_BIT + 1];
+};
+
+static void
+mlx5_flex_insert_field(struct mlx5_flex_field_cover *cover,
+		       uint16_t num, int32_t start, int32_t end)
+{
+	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
+	MLX5_ASSERT(num <= cover->num);
+	if (num < cover->num) {
+		memmove(&cover->start[num + 1],	&cover->start[num],
+			(cover->num - num) * sizeof(int32_t));
+		memmove(&cover->end[num + 1],	&cover->end[num],
+			(cover->num - num) * sizeof(int32_t));
+	}
+	cover->start[num] = start;
+	cover->end[num] = end;
+	cover->num++;
+}
+
+static void
+mlx5_flex_merge_field(struct mlx5_flex_field_cover *cover, uint16_t num)
+{
+	uint32_t i, del = 0;
+	int32_t end;
+
+	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
+	MLX5_ASSERT(num < (cover->num - 1));
+	end = cover->end[num];
+	for (i = num + 1; i < cover->num; i++) {
+		if (end < cover->start[i])
+			break;
+		del++;
+		if (end <= cover->end[i]) {
+			cover->end[num] = cover->end[i];
+			break;
+		}
+	}
+	if (del) {
+		MLX5_ASSERT(del < (cover->num - 1u - num));
+		cover->num -= del;
+		MLX5_ASSERT(cover->num > num);
+		if ((cover->num - num) > 1) {
+			memmove(&cover->start[num + 1],
+				&cover->start[num + 1 + del],
+				(cover->num - num - 1) * sizeof(int32_t));
+			memmove(&cover->end[num + 1],
+				&cover->end[num + 1 + del],
+				(cover->num - num - 1) * sizeof(int32_t));
+		}
+	}
+}
+
+/*
+ * Validate the sample field and update interval array
+ * if parameters match with the 'match" field.
+ * Returns:
+ *    < 0  - error
+ *    == 0 - no match, interval array not updated
+ *    > 0  - match, interval array updated
+ */
+static int
+mlx5_flex_cover_sample(struct mlx5_flex_field_cover *cover,
+		       struct rte_flow_item_flex_field *field,
+		       struct rte_flow_item_flex_field *match,
+		       struct mlx5_hca_flex_attr *attr,
+		       struct rte_flow_error *error)
+{
+	int32_t start, end;
+	uint32_t i;
+
+	switch (field->field_mode) {
+	case FIELD_MODE_DUMMY:
+		return 0;
+	case FIELD_MODE_FIXED:
+		if (!(attr->sample_offset_mode &
+		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIXED)))
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported sample field mode (FIXED)");
+		if (field->offset_shift)
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid sample field shift (FIXED");
+		if (field->field_base < 0)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid sample field base (FIXED)");
+		if (field->field_base / CHAR_BIT > attr->max_sample_base_offset)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "sample field base exceeds limit (FIXED)");
+		break;
+	case FIELD_MODE_OFFSET:
+		if (!(attr->sample_offset_mode &
+		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIELD)))
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported sample field mode (OFFSET)");
+		if (field->field_base / CHAR_BIT >= 0 &&
+		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"sample field base exceeds limit");
+		break;
+	case FIELD_MODE_BITMASK:
+		if (!(attr->sample_offset_mode &
+		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_BITMASK)))
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported sample field mode (BITMASK)");
+		if (field->field_base / CHAR_BIT >= 0 &&
+		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"sample field base exceeds limit");
+		break;
+	default:
+		return rte_flow_error_set
+			(error, EINVAL,
+			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unknown data sample field mode");
+	}
+	if (!match) {
+		if (!field->field_size)
+			return rte_flow_error_set
+				(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"zero sample field width");
+		if (field->field_id)
+			DRV_LOG(DEBUG, "sample field id hint ignored");
+	} else {
+		if (field->field_mode != match->field_mode ||
+		    field->offset_base | match->offset_base ||
+		    field->offset_mask | match->offset_mask ||
+		    field->offset_shift | match->offset_shift)
+			return 0;
+	}
+	start = field->field_base;
+	end = start + field->field_size;
+	/* Add the new or similar field to interval array. */
+	if (!cover->num) {
+		cover->start[cover->num] = start;
+		cover->end[cover->num] = end;
+		cover->num = 1;
+		return 1;
+	}
+	for (i = 0; i < cover->num; i++) {
+		if (start > cover->end[i]) {
+			if (i >= (cover->num - 1u)) {
+				mlx5_flex_insert_field(cover, cover->num,
+						       start, end);
+				break;
+			}
+			continue;
+		}
+		if (end < cover->start[i]) {
+			mlx5_flex_insert_field(cover, i, start, end);
+			break;
+		}
+		if (start < cover->start[i])
+			cover->start[i] = start;
+		if (end > cover->end[i]) {
+			cover->end[i] = end;
+			if (i < (cover->num - 1u))
+				mlx5_flex_merge_field(cover, i);
+		}
+		break;
+	}
+	return 1;
+}
+
+static void
+mlx5_flex_config_sample(struct mlx5_devx_match_sample_attr *na,
+			struct rte_flow_item_flex_field *field,
+			enum rte_flow_item_flex_tunnel_mode tunnel_mode)
+{
+	memset(na, 0, sizeof(struct mlx5_devx_match_sample_attr));
+	na->flow_match_sample_en = 1;
+	switch (field->field_mode) {
+	case FIELD_MODE_FIXED:
+		na->flow_match_sample_offset_mode =
+			MLX5_GRAPH_SAMPLE_OFFSET_FIXED;
+		break;
+	case FIELD_MODE_OFFSET:
+		na->flow_match_sample_offset_mode =
+			MLX5_GRAPH_SAMPLE_OFFSET_FIELD;
+		na->flow_match_sample_field_offset = field->offset_base;
+		na->flow_match_sample_field_offset_mask = field->offset_mask;
+		na->flow_match_sample_field_offset_shift = field->offset_shift;
+		break;
+	case FIELD_MODE_BITMASK:
+		na->flow_match_sample_offset_mode =
+			MLX5_GRAPH_SAMPLE_OFFSET_BITMASK;
+		na->flow_match_sample_field_offset = field->offset_base;
+		na->flow_match_sample_field_offset_mask = field->offset_mask;
+		na->flow_match_sample_field_offset_shift = field->offset_shift;
+		break;
+	default:
+		MLX5_ASSERT(false);
+		break;
+	}
+	switch (tunnel_mode) {
+	case FLEX_TUNNEL_MODE_SINGLE:
+		/* Fallthrough */
+	case FLEX_TUNNEL_MODE_TUNNEL:
+		na->flow_match_sample_tunnel_mode =
+			MLX5_GRAPH_SAMPLE_TUNNEL_FIRST;
+		break;
+	case FLEX_TUNNEL_MODE_MULTI:
+		/* Fallthrough */
+	case FLEX_TUNNEL_MODE_OUTER:
+		na->flow_match_sample_tunnel_mode =
+			MLX5_GRAPH_SAMPLE_TUNNEL_OUTER;
+		break;
+	case FLEX_TUNNEL_MODE_INNER:
+		na->flow_match_sample_tunnel_mode =
+			MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
+		break;
+	default:
+		MLX5_ASSERT(false);
+		break;
+	}
+}
+
+/* Map specified field to set/subset of allocated sample registers. */
+static int
+mlx5_flex_map_sample(struct rte_flow_item_flex_field *field,
+		     struct mlx5_flex_parser_devx *parser,
+		     struct mlx5_flex_item *item,
+		     struct rte_flow_error *error)
+{
+	struct mlx5_devx_match_sample_attr node;
+	int32_t start = field->field_base;
+	int32_t end = start + field->field_size;
+	struct mlx5_flex_pattern_field *trans;
+	uint32_t i, done_bits = 0;
+
+	if (field->field_mode == FIELD_MODE_DUMMY) {
+		done_bits = field->field_size;
+		while (done_bits) {
+			uint32_t part = RTE_MIN(done_bits,
+						sizeof(uint32_t) * CHAR_BIT);
+			if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
+				return rte_flow_error_set
+					(error,
+					 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					 "too many flex item pattern translations");
+			trans = &item->map[item->mapnum];
+			trans->reg_id = MLX5_INVALID_SAMPLE_REG_ID;
+			trans->shift = 0;
+			trans->width = part;
+			item->mapnum++;
+			done_bits -= part;
+		}
+		return 0;
+	}
+	mlx5_flex_config_sample(&node, field, item->tunnel_mode);
+	for (i = 0; i < parser->num_samples; i++) {
+		struct mlx5_devx_match_sample_attr *sample =
+			&parser->devx_conf.sample[i];
+		int32_t reg_start, reg_end;
+		int32_t cov_start, cov_end;
+
+		MLX5_ASSERT(sample->flow_match_sample_en);
+		if (!sample->flow_match_sample_en)
+			break;
+		node.flow_match_sample_field_base_offset =
+			sample->flow_match_sample_field_base_offset;
+		if (memcmp(&node, sample, sizeof(node)))
+			continue;
+		reg_start = (int8_t)sample->flow_match_sample_field_base_offset;
+		reg_start *= CHAR_BIT;
+		reg_end = reg_start + 32;
+		if (end <= reg_start || start >= reg_end)
+			continue;
+		cov_start = RTE_MAX(reg_start, start);
+		cov_end = RTE_MIN(reg_end, end);
+		MLX5_ASSERT(cov_end > cov_start);
+		done_bits += cov_end - cov_start;
+		if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "too many flex item pattern translations");
+		trans = &item->map[item->mapnum];
+		item->mapnum++;
+		trans->reg_id = i;
+		trans->shift = cov_start - reg_start;
+		trans->width = cov_end - cov_start;
+	}
+	if (done_bits != field->field_size) {
+		MLX5_ASSERT(false);
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "failed to map field to sample register");
+	}
+	return 0;
+}
+
+/* Allocate sample registers for the specified field type and interval array. */
+static int
+mlx5_flex_alloc_sample(struct mlx5_flex_field_cover *cover,
+		       struct mlx5_flex_parser_devx *parser,
+		       struct mlx5_flex_item *item,
+		       struct rte_flow_item_flex_field *field,
+		       struct mlx5_hca_flex_attr *attr,
+		       struct rte_flow_error *error)
+{
+	struct mlx5_devx_match_sample_attr node;
+	uint32_t idx = 0;
+
+	mlx5_flex_config_sample(&node, field, item->tunnel_mode);
+	while (idx < cover->num) {
+		int32_t start, end;
+
+		/*
+		 * Sample base offsets are in bytes, should be aligned
+		 * to 32-bit as required by firmware for samples.
+		 */
+		start = RTE_ALIGN_FLOOR(cover->start[idx],
+					sizeof(uint32_t) * CHAR_BIT);
+		node.flow_match_sample_field_base_offset =
+						(start / CHAR_BIT) & 0xFF;
+		/* Allocate sample register. */
+		if (parser->num_samples >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
+		    parser->num_samples >= attr->max_num_sample ||
+		    parser->num_samples >= attr->max_num_prog_sample)
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "no sample registers to handle all flex item fields");
+		parser->devx_conf.sample[parser->num_samples] = node;
+		parser->num_samples++;
+		/* Remove or update covered intervals. */
+		end = start + 32;
+		while (idx < cover->num) {
+			if (end >= cover->end[idx]) {
+				idx++;
+				continue;
+			}
+			if (end > cover->start[idx])
+				cover->start[idx] = end;
+			break;
+		}
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_translate_sample(struct mlx5_hca_flex_attr *attr,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct mlx5_flex_parser_devx *parser,
+			   struct mlx5_flex_item *item,
+			   struct rte_flow_error *error)
+{
+	struct mlx5_flex_field_cover cover;
+	uint32_t i, j;
+	int ret;
+
+	switch (conf->tunnel) {
+	case FLEX_TUNNEL_MODE_SINGLE:
+		/* Fallthrough */
+	case FLEX_TUNNEL_MODE_OUTER:
+		/* Fallthrough */
+	case FLEX_TUNNEL_MODE_INNER:
+		/* Fallthrough */
+	case FLEX_TUNNEL_MODE_MULTI:
+		/* Fallthrough */
+	case FLEX_TUNNEL_MODE_TUNNEL:
+		break;
+	default:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unrecognized tunnel mode");
+	}
+	item->tunnel_mode = conf->tunnel;
+	if (conf->nb_samples > MLX5_FLEX_ITEM_MAPPING_NUM)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "sample field number exceeds limit");
+	/*
+	 * The application can specify fields smaller or bigger than 32 bits
+	 * covered with single sample register and it can specify field
+	 * offsets in any order.
+	 *
+	 * Gather all similar fields together, build array of bit intervals
+	 * in asсending order and try to cover with the smallest set of sample
+	 * registers.
+	 */
+	memset(&cover, 0, sizeof(cover));
+	for (i = 0; i < conf->nb_samples; i++) {
+		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
+
+		/* Check whether field was covered in the previous iteration. */
+		if (cover.mapped[i / CHAR_BIT] & (1u << (i % CHAR_BIT)))
+			continue;
+		if (fl->field_mode == FIELD_MODE_DUMMY)
+			continue;
+		/* Build an interval array for the field and similar ones */
+		cover.num = 0;
+		/* Add the first field to array unconditionally. */
+		ret = mlx5_flex_cover_sample(&cover, fl, NULL, attr, error);
+		if (ret < 0)
+			return ret;
+		MLX5_ASSERT(ret > 0);
+		cover.mapped[i / CHAR_BIT] |= 1u << (i % CHAR_BIT);
+		for (j = i + 1; j < conf->nb_samples; j++) {
+			struct rte_flow_item_flex_field *ft;
+
+			/* Add field to array if its type matches. */
+			ft = conf->sample_data + j;
+			ret = mlx5_flex_cover_sample(&cover, ft, fl,
+						     attr, error);
+			if (ret < 0)
+				return ret;
+			if (!ret)
+				continue;
+			cover.mapped[j / CHAR_BIT] |= 1u << (j % CHAR_BIT);
+		}
+		/* Allocate sample registers to cover array of intervals. */
+		ret = mlx5_flex_alloc_sample(&cover, parser, item,
+					     fl, attr, error);
+		if (ret)
+			return ret;
+	}
+	/* Build the item pattern translating data on flow creation. */
+	item->mapnum = 0;
+	memset(&item->map, 0, sizeof(item->map));
+	for (i = 0; i < conf->nb_samples; i++) {
+		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
+
+		ret = mlx5_flex_map_sample(fl, parser, item, error);
+		if (ret) {
+			MLX5_ASSERT(false);
+			return ret;
+		}
+	}
+	if (conf->tunnel == FLEX_TUNNEL_MODE_MULTI) {
+		/*
+		 * In FLEX_TUNNEL_MODE_MULTI tunnel mode PMD creates 2 sets
+		 * of samples. The first set is for outer and the second set
+		 * for inner flex flow item. Outer and inner samples differ
+		 * only in tunnel_mode.
+		 */
+		if (parser->num_samples > MLX5_GRAPH_NODE_SAMPLE_NUM / 2)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "no sample registers for inner");
+		rte_memcpy(parser->devx_conf.sample + parser->num_samples,
+			   parser->devx_conf.sample,
+			   parser->num_samples *
+					sizeof(parser->devx_conf.sample[0]));
+		for (i = 0; i < parser->num_samples; i++) {
+			struct mlx5_devx_match_sample_attr *sm = i +
+				parser->devx_conf.sample + parser->num_samples;
+
+			sm->flow_match_sample_tunnel_mode =
+						MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
+		}
+		parser->num_samples *= 2;
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_arc_type(enum rte_flow_item_type type, int in)
+{
+	switch (type) {
+	case RTE_FLOW_ITEM_TYPE_ETH:
+		return  MLX5_GRAPH_ARC_NODE_MAC;
+	case RTE_FLOW_ITEM_TYPE_IPV4:
+		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV4;
+	case RTE_FLOW_ITEM_TYPE_IPV6:
+		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV6;
+	case RTE_FLOW_ITEM_TYPE_UDP:
+		return MLX5_GRAPH_ARC_NODE_UDP;
+	case RTE_FLOW_ITEM_TYPE_TCP:
+		return MLX5_GRAPH_ARC_NODE_TCP;
+	case RTE_FLOW_ITEM_TYPE_MPLS:
+		return MLX5_GRAPH_ARC_NODE_MPLS;
+	case RTE_FLOW_ITEM_TYPE_GRE:
+		return MLX5_GRAPH_ARC_NODE_GRE;
+	case RTE_FLOW_ITEM_TYPE_GENEVE:
+		return MLX5_GRAPH_ARC_NODE_GENEVE;
+	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+		return MLX5_GRAPH_ARC_NODE_VXLAN_GPE;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int
+mlx5_flex_arc_in_eth(const struct rte_flow_item *item,
+		     struct rte_flow_error *error)
+{
+	const struct rte_flow_item_eth *spec = item->spec;
+	const struct rte_flow_item_eth *mask = item->mask;
+	struct rte_flow_item_eth eth = { .hdr.ether_type = RTE_BE16(0xFFFF) };
+
+	if (memcmp(mask, &eth, sizeof(struct rte_flow_item_eth))) {
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+			 "invalid eth item mask");
+	}
+	return rte_be_to_cpu_16(spec->hdr.ether_type);
+}
+
+static int
+mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
+		     struct rte_flow_error *error)
+{
+	const struct rte_flow_item_udp *spec = item->spec;
+	const struct rte_flow_item_udp *mask = item->mask;
+	struct rte_flow_item_udp udp = { .hdr.dst_port = RTE_BE16(0xFFFF) };
+
+	if (memcmp(mask, &udp, sizeof(struct rte_flow_item_udp))) {
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+			 "invalid eth item mask");
+	}
+	return rte_be_to_cpu_16(spec->hdr.dst_port);
+}
+
+static int
+mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct mlx5_flex_parser_devx *devx,
+			   struct mlx5_flex_item *item,
+			   struct rte_flow_error *error)
+{
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+	uint32_t i;
+
+	RTE_SET_USED(item);
+	if (conf->nb_inputs > attr->max_num_arc_in)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "too many input links");
+	for (i = 0; i < conf->nb_inputs; i++) {
+		struct mlx5_devx_graph_arc_attr *arc = node->in + i;
+		struct rte_flow_item_flex_link *link = conf->input_link + i;
+		const struct rte_flow_item *rte_item = &link->item;
+		int arc_type;
+		int ret;
+
+		if (!rte_item->spec || !rte_item->mask || rte_item->last)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid flex item IN arc format");
+		arc_type = mlx5_flex_arc_type(rte_item->type, true);
+		if (arc_type < 0 || !(attr->node_in & RTE_BIT32(arc_type)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported flex item IN arc type");
+		arc->arc_parse_graph_node = arc_type;
+		arc->start_inner_tunnel = 0;
+		/*
+		 * Configure arc IN condition value. The value location depends
+		 * on protocol. Current FW version supports IP & UDP for IN
+		 * arcs only, and locations for these protocols are defined.
+		 * Add more protocols when available.
+		 */
+		switch (rte_item->type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			ret = mlx5_flex_arc_in_eth(rte_item, error);
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			ret = mlx5_flex_arc_in_udp(rte_item, error);
+			break;
+		default:
+			MLX5_ASSERT(false);
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported flex item IN arc type");
+		}
+		if (ret < 0)
+			return ret;
+		arc->compare_condition_value = (uint16_t)ret;
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_translate_arc_out(struct mlx5_hca_flex_attr *attr,
+			    const struct rte_flow_item_flex_conf *conf,
+			    struct mlx5_flex_parser_devx *devx,
+			    struct mlx5_flex_item *item,
+			    struct rte_flow_error *error)
+{
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+	bool is_tunnel = conf->tunnel == FLEX_TUNNEL_MODE_TUNNEL;
+	uint32_t i;
+
+	RTE_SET_USED(item);
+	if (conf->nb_outputs > attr->max_num_arc_out)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "too many output links");
+	for (i = 0; i < conf->nb_outputs; i++) {
+		struct mlx5_devx_graph_arc_attr *arc = node->out + i;
+		struct rte_flow_item_flex_link *link = conf->output_link + i;
+		const struct rte_flow_item *rte_item = &link->item;
+		int arc_type;
+
+		if (rte_item->spec || rte_item->mask || rte_item->last)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "flex node: invalid OUT arc format");
+		arc_type = mlx5_flex_arc_type(rte_item->type, false);
+		if (arc_type < 0 || !(attr->node_out & RTE_BIT32(arc_type)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported flex item OUT arc type");
+		arc->arc_parse_graph_node = arc_type;
+		arc->start_inner_tunnel = !!is_tunnel;
+		arc->compare_condition_value = link->next;
+	}
+	return 0;
+}
+
+/* Translate RTE flex item API configuration into flaex parser settings. */
+static int
+mlx5_flex_translate_conf(struct rte_eth_dev *dev,
+			 const struct rte_flow_item_flex_conf *conf,
+			 struct mlx5_flex_parser_devx *devx,
+			 struct mlx5_flex_item *item,
+			 struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_hca_flex_attr *attr = &priv->config.hca_attr.flex;
+	int ret;
+
+	ret = mlx5_flex_translate_length(attr, conf, devx, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_next(attr, conf, devx, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_sample(attr, conf, devx, item, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_arc_in(attr, conf, devx, item, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_arc_out(attr, conf, devx, item, error);
+	if (ret)
+		return ret;
+	return 0;
+}
+
 /**
  * Create the flex item with specified configuration over the Ethernet device.
  *
@@ -145,6 +986,8 @@ flow_dv_item_create(struct rte_eth_dev *dev,
 				   "too many flex items created on the port");
 		return NULL;
 	}
+	if (mlx5_flex_translate_conf(dev, conf, &devx_config, flex, error))
+		goto error;
 	ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
 	if (!ent) {
 		rte_flow_error_set(error, ENOMEM,
@@ -153,7 +996,6 @@ flow_dv_item_create(struct rte_eth_dev *dev,
 		goto error;
 	}
 	flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
-	RTE_SET_USED(conf);
 	/* Mark initialized flex item valid. */
 	__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
 	return (struct rte_flow_item_flex_handle *)flex;
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v2 8/9] net/mlx5: translate flex item pattern into matcher
  2021-11-02  8:53 ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Gregory Etelson
                     ` (6 preceding siblings ...)
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 7/9] net/mlx5: translate flex item configuration Gregory Etelson
@ 2021-11-02  8:53   ` Gregory Etelson
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 9/9] net/mlx5: handle flex item in flows Gregory Etelson
                     ` (2 subsequent siblings)
  10 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-02  8:53 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

The matcher is an steering engine entity that represents
the flow pattern to hardware to match. It order to
provide match on the flex item pattern the appropriate
matcher fields should be configured with values and masks
accordingly.

The flex item related matcher fields is an array of eight
32-bit fields to match with data captured by sample registers
of configured flex parser. One packet field, presented in
item pattern can be split between several sample registers,
and multiple fields can be combined together into single
sample register to optimize hardware resources usage
(number os sample registers is limited), depending on field
modes, widths and offsets. Actual mapping is complicated
and controlled by special translation data, built by PMD
on flex item creation.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.h           |   8 ++
 drivers/net/mlx5/mlx5_flow_flex.c | 223 ++++++++++++++++++++++++++++++
 2 files changed, 231 insertions(+)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 18dfa2fc7b..74af88ec19 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1853,6 +1853,14 @@ int flow_dv_item_release(struct rte_eth_dev *dev,
 		    struct rte_flow_error *error);
 int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
 void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
+void mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, void *matcher,
+				   void *key, const struct rte_flow_item *item,
+				   bool is_inner);
+int mlx5_flex_acquire_index(struct rte_eth_dev *dev,
+			    struct rte_flow_item_flex_handle *handle,
+			    bool acquire);
+int mlx5_flex_release_index(struct rte_eth_dev *dev, int index);
+
 /* Flex parser list callbacks. */
 struct mlx5_list_entry *mlx5_flex_parser_create_cb(void *list_ctx, void *ctx);
 int mlx5_flex_parser_match_cb(void *list_ctx,
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index b4a9f1a537..bdfa383c45 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -113,6 +113,229 @@ mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
 	}
 }
 
+static uint32_t
+mlx5_flex_get_bitfield(const struct rte_flow_item_flex *item,
+		       uint32_t pos, uint32_t width, uint32_t shift)
+{
+	const uint8_t *ptr = item->pattern + pos / CHAR_BIT;
+	uint32_t val, vbits;
+
+	/* Proceed the bitfield start byte. */
+	MLX5_ASSERT(width <= sizeof(uint32_t) * CHAR_BIT && width);
+	MLX5_ASSERT(width + shift <= sizeof(uint32_t) * CHAR_BIT);
+	if (item->length <= pos / CHAR_BIT)
+		return 0;
+	val = *ptr++ >> (pos % CHAR_BIT);
+	vbits = CHAR_BIT - pos % CHAR_BIT;
+	pos = (pos + vbits) / CHAR_BIT;
+	vbits = RTE_MIN(vbits, width);
+	val &= RTE_BIT32(vbits) - 1;
+	while (vbits < width && pos < item->length) {
+		uint32_t part = RTE_MIN(width - vbits, (uint32_t)CHAR_BIT);
+		uint32_t tmp = *ptr++;
+
+		pos++;
+		tmp &= RTE_BIT32(part) - 1;
+		val |= tmp << vbits;
+		vbits += part;
+	}
+	return rte_bswap32(val <<= shift);
+}
+
+#define SET_FP_MATCH_SAMPLE_ID(x, def, msk, val, sid) \
+	do { \
+		uint32_t tmp, out = (def); \
+		tmp = MLX5_GET(fte_match_set_misc4, misc4_v, \
+			       prog_sample_field_value_##x); \
+		tmp = (tmp & ~out) | (val); \
+		MLX5_SET(fte_match_set_misc4, misc4_v, \
+			 prog_sample_field_value_##x, tmp); \
+		tmp = MLX5_GET(fte_match_set_misc4, misc4_m, \
+			       prog_sample_field_value_##x); \
+		tmp = (tmp & ~out) | (msk); \
+		MLX5_SET(fte_match_set_misc4, misc4_m, \
+			 prog_sample_field_value_##x, tmp); \
+		tmp = tmp ? (sid) : 0; \
+		MLX5_SET(fte_match_set_misc4, misc4_v, \
+			 prog_sample_field_id_##x, tmp);\
+		MLX5_SET(fte_match_set_misc4, misc4_m, \
+			 prog_sample_field_id_##x, tmp); \
+	} while (0)
+
+__rte_always_inline static void
+mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
+			   uint32_t def, uint32_t mask, uint32_t value,
+			   uint32_t sample_id, uint32_t id)
+{
+	switch (id) {
+	case 0:
+		SET_FP_MATCH_SAMPLE_ID(0, def, mask, value, sample_id);
+		break;
+	case 1:
+		SET_FP_MATCH_SAMPLE_ID(1, def, mask, value, sample_id);
+		break;
+	case 2:
+		SET_FP_MATCH_SAMPLE_ID(2, def, mask, value, sample_id);
+		break;
+	case 3:
+		SET_FP_MATCH_SAMPLE_ID(3, def, mask, value, sample_id);
+		break;
+	case 4:
+		SET_FP_MATCH_SAMPLE_ID(4, def, mask, value, sample_id);
+		break;
+	case 5:
+		SET_FP_MATCH_SAMPLE_ID(5, def, mask, value, sample_id);
+		break;
+	case 6:
+		SET_FP_MATCH_SAMPLE_ID(6, def, mask, value, sample_id);
+		break;
+	case 7:
+		SET_FP_MATCH_SAMPLE_ID(7, def, mask, value, sample_id);
+		break;
+	default:
+		MLX5_ASSERT(false);
+		break;
+	}
+#undef SET_FP_MATCH_SAMPLE_ID
+}
+/**
+ * Translate item pattern into matcher fields according to translation
+ * array.
+ *
+ * @param dev
+ *   Ethernet device to translate flex item on.
+ * @param[in, out] matcher
+ *   Flow matcher to confgiure
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] is_inner
+ *   Inner Flex Item (follows after tunnel header).
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+void
+mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
+			      void *matcher, void *key,
+			      const struct rte_flow_item *item,
+			      bool is_inner)
+{
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+	struct mlx5_priv *priv = dev->data->dev_private;
+#endif
+	const struct rte_flow_item_flex *spec, *mask;
+	void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
+				     misc_parameters_4);
+	void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
+	struct mlx5_flex_item *tp;
+	uint32_t i, pos = 0;
+
+	RTE_SET_USED(dev);
+	MLX5_ASSERT(item->spec && item->mask);
+	spec = item->spec;
+	mask = item->mask;
+	tp = (struct mlx5_flex_item *)spec->handle;
+	MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0);
+	for (i = 0; i < tp->mapnum; i++) {
+		struct mlx5_flex_pattern_field *map = tp->map + i;
+		uint32_t id = map->reg_id;
+		uint32_t def = (RTE_BIT64(map->width) - 1) << map->shift;
+		uint32_t val, msk;
+
+		/* Skip placeholders for DUMMY fields. */
+		if (id == MLX5_INVALID_SAMPLE_REG_ID) {
+			pos += map->width;
+			continue;
+		}
+		val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift);
+		msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift);
+		MLX5_ASSERT(map->width);
+		MLX5_ASSERT(id < tp->devx_fp->num_samples);
+		if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
+			uint32_t num_samples = tp->devx_fp->num_samples / 2;
+
+			MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
+			MLX5_ASSERT(id < num_samples);
+			id += num_samples;
+		}
+		mlx5_flex_set_match_sample(misc4_m, misc4_v,
+					   def, msk & def, val & msk & def,
+					   tp->devx_fp->sample_ids[id], id);
+		pos += map->width;
+	}
+}
+
+/**
+ * Convert flex item handle (from the RTE flow) to flex item index on port.
+ * Optionally can increment flex item object reference count.
+ *
+ * @param dev
+ *   Ethernet device to acquire flex item on.
+ * @param[in] handle
+ *   Flow item handle from item spec.
+ * @param[in] acquire
+ *   If set - increment reference counter.
+ *
+ * @return
+ *   >=0 - index on success, a negative errno value otherwise
+ *         and rte_errno is set.
+ */
+int
+mlx5_flex_acquire_index(struct rte_eth_dev *dev,
+			struct rte_flow_item_flex_handle *handle,
+			bool acquire)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_item *flex = (struct mlx5_flex_item *)handle;
+	int ret = mlx5_flex_index(priv, flex);
+
+	if (ret < 0) {
+		errno = -EINVAL;
+		rte_errno = EINVAL;
+		return ret;
+	}
+	if (acquire)
+		__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
+	return ret;
+}
+
+/**
+ * Release flex item index on port - decrements reference counter by index.
+ *
+ * @param dev
+ *   Ethernet device to acquire flex item on.
+ * @param[in] index
+ *   Flow item index.
+ *
+ * @return
+ *   0 - on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flex_release_index(struct rte_eth_dev *dev,
+			int index)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_item *flex;
+
+	if (index >= MLX5_PORT_FLEX_ITEM_NUM ||
+	    !(priv->flex_item_map & (1u << index))) {
+		errno = EINVAL;
+		rte_errno = -EINVAL;
+		return -EINVAL;
+	}
+	flex = priv->flex_item + index;
+	if (flex->refcnt <= 1) {
+		MLX5_ASSERT(false);
+		errno = EINVAL;
+		rte_errno = -EINVAL;
+		return -EINVAL;
+	}
+	__atomic_sub_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
+	return 0;
+}
+
 /*
  * Calculate largest mask value for a given shift.
  *
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* [dpdk-dev] [PATCH v2 9/9] net/mlx5: handle flex item in flows
  2021-11-02  8:53 ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Gregory Etelson
                     ` (7 preceding siblings ...)
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 8/9] net/mlx5: translate flex item pattern into matcher Gregory Etelson
@ 2021-11-02  8:53   ` Gregory Etelson
  2021-11-03 12:57   ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Raslan Darawsheh
  2021-11-03 18:24   ` Ferruh Yigit
  10 siblings, 0 replies; 22+ messages in thread
From: Gregory Etelson @ 2021-11-02  8:53 UTC (permalink / raw)
  To: dev, getelson, viacheslavo; +Cc: matan

Provide flex item recognition, validation and translation
in flow patterns. Track the flex item referencing.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Reviewed-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.h    |   9 ++-
 drivers/net/mlx5/mlx5_flow_dv.c | 124 ++++++++++++++++++++++++++++++++
 2 files changed, 132 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 43399471ec..ce37c4a197 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -179,6 +179,11 @@ enum mlx5_feature_name {
 /* Conntrack item. */
 #define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 36)
 
+/* Flex item */
+#define MLX5_FLOW_ITEM_OUTER_FLEX (UINT64_C(1) << 36)
+#define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 37)
+#define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 38)
+
 /* Outer Masks. */
 #define MLX5_FLOW_LAYER_OUTER_L3 \
 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
@@ -193,7 +198,8 @@ enum mlx5_feature_name {
 	(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
 	 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
 	 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
-	 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP)
+	 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
+	 MLX5_FLOW_ITEM_FLEX_TUNNEL)
 
 /* Inner Masks. */
 #define MLX5_FLOW_LAYER_INNER_L3 \
@@ -692,6 +698,7 @@ struct mlx5_flow_handle {
 	uint32_t is_meter_flow_id:1; /**< Indate if flow_id is for meter. */
 	uint32_t mark:1; /**< Metadate rxq mark flag. */
 	uint32_t fate_action:3; /**< Fate action type. */
+	uint32_t flex_item; /**< referenced Flex Item bitmask. */
 	union {
 		uint32_t rix_hrxq; /**< Hash Rx queue object index. */
 		uint32_t rix_jump; /**< Index to the jump action resource. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 9d60c9154f..85a504ca2c 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -6687,6 +6687,88 @@ flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
 	return 0;
 }
 
+static int
+flow_dv_validate_item_flex(struct rte_eth_dev *dev,
+			   const struct rte_flow_item *item,
+			   uint64_t item_flags,
+			   uint64_t *last_item,
+			   bool is_inner,
+			   struct rte_flow_error *error)
+{
+	const struct rte_flow_item_flex *flow_spec = item->spec;
+	const struct rte_flow_item_flex *flow_mask = item->mask;
+	struct mlx5_flex_item *flex;
+
+	if (!flow_spec)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "flex flow item spec cannot be NULL");
+	if (!flow_mask)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "flex flow item mask cannot be NULL");
+	if (item->last)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "flex flow item last not supported");
+	if (mlx5_flex_acquire_index(dev, flow_spec->handle, false) < 0)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "invalid flex flow item handle");
+	flex = (struct mlx5_flex_item *)flow_spec->handle;
+	switch (flex->tunnel_mode) {
+	case FLEX_TUNNEL_MODE_SINGLE:
+		if (item_flags &
+		    (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX))
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   NULL, "multiple flex items not supported");
+		break;
+	case FLEX_TUNNEL_MODE_OUTER:
+		if (is_inner)
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   NULL, "inner flex item was not configured");
+		if (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX)
+			rte_flow_error_set(error, ENOTSUP,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   NULL, "multiple flex items not supported");
+		break;
+	case FLEX_TUNNEL_MODE_INNER:
+		if (!is_inner)
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   NULL, "outer flex item was not configured");
+		if (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   NULL, "multiple flex items not supported");
+		break;
+	case FLEX_TUNNEL_MODE_MULTI:
+		if ((is_inner && (item_flags & MLX5_FLOW_ITEM_INNER_FLEX)) ||
+		    (!is_inner && (item_flags & MLX5_FLOW_ITEM_OUTER_FLEX))) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   NULL, "multiple flex items not supported");
+		}
+		break;
+	case FLEX_TUNNEL_MODE_TUNNEL:
+		if (is_inner || (item_flags & MLX5_FLOW_ITEM_FLEX_TUNNEL))
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   NULL, "multiple flex tunnel items not supported");
+		break;
+	default:
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "invalid flex item configuration");
+	}
+	*last_item = flex->tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ?
+		     MLX5_FLOW_ITEM_FLEX_TUNNEL : is_inner ?
+		     MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
+	return 0;
+}
+
 /**
  * Internal validation function. For validating both actions and items.
  *
@@ -7125,6 +7207,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 			 * list it here as a supported type
 			 */
 			break;
+		case RTE_FLOW_ITEM_TYPE_FLEX:
+			ret = flow_dv_validate_item_flex(dev, items, item_flags,
+							 &last_item,
+							 tunnel != 0, error);
+			if (ret < 0)
+				return ret;
+			break;
 		default:
 			return rte_flow_error_set(error, ENOTSUP,
 						  RTE_FLOW_ERROR_TYPE_ITEM,
@@ -9995,6 +10084,27 @@ flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev,
 			       reg_value, reg_mask);
 }
 
+static void
+flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
+			    const struct rte_flow_item *item,
+			    struct mlx5_flow *dev_flow, bool is_inner)
+{
+	const struct rte_flow_item_flex *spec =
+		(const struct rte_flow_item_flex *)item->spec;
+	int index = mlx5_flex_acquire_index(dev, spec->handle, false);
+
+	MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
+	if (index < 0)
+		return;
+	if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
+		/* Don't count both inner and outer flex items in one rule. */
+		if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
+			MLX5_ASSERT(false);
+		dev_flow->handle->flex_item |= RTE_BIT32(index);
+	}
+	mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
+}
+
 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
 
 #define HEADER_IS_ZERO(match_criteria, headers)				     \
@@ -13431,6 +13541,13 @@ flow_dv_translate(struct rte_eth_dev *dev,
 			flow_dv_translate_item_aso_ct(dev, match_mask,
 						      match_value, items);
 			break;
+		case RTE_FLOW_ITEM_TYPE_FLEX:
+			flow_dv_translate_item_flex(dev, match_mask,
+						    match_value, items,
+						    dev_flow, tunnel != 0);
+			last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
+				    MLX5_FLOW_ITEM_OUTER_FLEX;
+			break;
 		default:
 			break;
 		}
@@ -14310,6 +14427,12 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 		if (!dev_handle)
 			return;
 		flow->dev_handles = dev_handle->next.next;
+		while (dev_handle->flex_item) {
+			int index = rte_bsf32(dev_handle->flex_item);
+
+			mlx5_flex_release_index(dev, index);
+			dev_handle->flex_item &= ~RTE_BIT32(index);
+		}
 		if (dev_handle->dvh.matcher)
 			flow_dv_matcher_release(dev, dev_handle);
 		if (dev_handle->dvh.rix_sample)
@@ -17968,5 +18091,6 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
 	.item_create = flow_dv_item_create,
 	.item_release = flow_dv_item_release,
 };
+
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 
-- 
2.33.1


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support
  2021-11-02  8:53 ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Gregory Etelson
                     ` (8 preceding siblings ...)
  2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 9/9] net/mlx5: handle flex item in flows Gregory Etelson
@ 2021-11-03 12:57   ` Raslan Darawsheh
  2021-11-03 18:24   ` Ferruh Yigit
  10 siblings, 0 replies; 22+ messages in thread
From: Raslan Darawsheh @ 2021-11-03 12:57 UTC (permalink / raw)
  To: Gregory Etelson, dev, Gregory Etelson, Slava Ovsiienko; +Cc: Matan Azrad

Hi,

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Gregory Etelson
> Sent: Tuesday, November 2, 2021 10:54 AM
> To: dev@dpdk.org; Gregory Etelson <getelson@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>
> Cc: Matan Azrad <matan@nvidia.com>
> Subject: [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support
> 
> Gregory Etelson (4):
>   common/mlx5: extend flex parser capabilities
>   common/mlx5: fix flex parser DevX creation routine
>   net/mlx5: add flex parser DevX object management
>   net/mlx5: handle flex item in flows
> 
> Viacheslav Ovsiienko (5):
>   common/mlx5: refactor HCA attributes query
>   net/mlx5: update eCPRI flex parser structures
>   net/mlx5: add flex item API
>   net/mlx5: translate flex item configuration
>   net/mlx5: translate flex item pattern into matcher
> 
>  drivers/common/mlx5/mlx5_devx_cmds.c |  239 +++--
>  drivers/common/mlx5/mlx5_devx_cmds.h |   65 +-
>  drivers/common/mlx5/mlx5_prm.h       |   50 +-
>  drivers/net/mlx5/linux/mlx5_os.c     |   14 +
>  drivers/net/mlx5/meson.build         |    1 +
>  drivers/net/mlx5/mlx5.c              |   15 +-
>  drivers/net/mlx5/mlx5.h              |   79 +-
>  drivers/net/mlx5/mlx5_flow.c         |   49 +
>  drivers/net/mlx5/mlx5_flow.h         |   27 +-
>  drivers/net/mlx5/mlx5_flow_dv.c      |  127 ++-
>  drivers/net/mlx5/mlx5_flow_flex.c    | 1373
> ++++++++++++++++++++++++++
>  11 files changed, 1913 insertions(+), 126 deletions(-)  create mode 100644
> drivers/net/mlx5/mlx5_flow_flex.c
> 
> --
> v2: rebase to updated master-net-mlx.
> --
> 2.33.1

Series applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support
  2021-11-02  8:53 ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Gregory Etelson
                     ` (9 preceding siblings ...)
  2021-11-03 12:57   ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Raslan Darawsheh
@ 2021-11-03 18:24   ` Ferruh Yigit
  10 siblings, 0 replies; 22+ messages in thread
From: Ferruh Yigit @ 2021-11-03 18:24 UTC (permalink / raw)
  To: Gregory Etelson, dev, viacheslavo; +Cc: matan, Raslan Darawsheh

On 11/2/2021 8:53 AM, Gregory Etelson wrote:
> Gregory Etelson (4):
>    common/mlx5: extend flex parser capabilities
>    common/mlx5: fix flex parser DevX creation routine
>    net/mlx5: add flex parser DevX object management
>    net/mlx5: handle flex item in flows
> 
> Viacheslav Ovsiienko (5):
>    common/mlx5: refactor HCA attributes query
>    net/mlx5: update eCPRI flex parser structures
>    net/mlx5: add flex item API
>    net/mlx5: translate flex item configuration
>    net/mlx5: translate flex item pattern into matcher
> 

Hi Gregory,

Can you please check following doc error [1] and build error [2]:

[1]
$ ./devtools/check-doc-vs-code.sh
rte_flow doc out of sync for mlx5
         item flex



[2]
                  from ../lib/eal/x86/include/rte_rwlock.h:13,
                  from ../lib/eal/include/rte_fbarray.h:40,
                  from ../lib/eal/include/rte_memory.h:25,
                  from ../lib/eal/include/rte_malloc.h:17,
                  from ../drivers/net/mlx5/mlx5_flow_flex.c:4:
../drivers/net/mlx5/mlx5_flow_flex.c: In function ‘mlx5_flex_flow_translate_item’:
../drivers/net/mlx5/mlx5_flow_flex.c:240:37: error: ‘priv’ undeclared (first use in this function)
   240 |         MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0);
       |                                     ^~~~
../lib/eal/include/rte_branch_prediction.h:38:45: note: in definition of macro ‘unlikely’
    38 | #define unlikely(x)     __builtin_expect(!!(x), 0)
       |                                             ^
../lib/eal/include/rte_debug.h:47:25: note: in expansion of macro ‘RTE_VERIFY’
    47 | #define RTE_ASSERT(exp) RTE_VERIFY(exp)
       |                         ^~~~~~~~~~
../drivers/common/mlx5/mlx5_common.h:104:26: note: in expansion of macro ‘RTE_ASSERT’
   104 | #define MLX5_ASSERT(exp) RTE_ASSERT(exp)
       |                          ^~~~~~~~~~
../drivers/net/mlx5/mlx5_flow_flex.c:240:9: note: in expansion of macro ‘MLX5_ASSERT’
   240 |         MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0);
       |         ^~~~~~~~~~~
../drivers/net/mlx5/mlx5_flow_flex.c:240:37: note: each undeclared identifier is reported only once for each function it appears in
   240 |         MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0);
       |                                     ^~~~
../lib/eal/include/rte_branch_prediction.h:38:45: note: in definition of macro ‘unlikely’
    38 | #define unlikely(x)     __builtin_expect(!!(x), 0)
       |                                             ^
../lib/eal/include/rte_debug.h:47:25: note: in expansion of macro ‘RTE_VERIFY’
    47 | #define RTE_ASSERT(exp) RTE_VERIFY(exp)
       |                         ^~~~~~~~~~
../drivers/common/mlx5/mlx5_common.h:104:26: note: in expansion of macro ‘RTE_ASSERT’
   104 | #define MLX5_ASSERT(exp) RTE_ASSERT(exp)
       |                          ^~~~~~~~~~
../drivers/net/mlx5/mlx5_flow_flex.c:240:9: note: in expansion of macro ‘MLX5_ASSERT’
   240 |         MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0);
       |         ^~~~~~~~~~~

^ permalink raw reply	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2021-11-03 18:24 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-11-01  9:15 [dpdk-dev] [PATCH 0/9] net/mlx5: add flex item support Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 1/9] common/mlx5: refactor HCA attributes query Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 2/9] common/mlx5: extend flex parser capabilities Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 3/9] common/mlx5: fix flex parser DevX creation routine Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 4/9] net/mlx5: update eCPRI flex parser structures Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 5/9] net/mlx5: add flex item API Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 6/9] net/mlx5: add flex parser DevX object management Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 7/9] net/mlx5: translate flex item configuration Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 8/9] net/mlx5: translate flex item pattern into matcher Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 9/9] net/mlx5: handle flex item in flows Gregory Etelson
2021-11-02  8:53 ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 1/9] common/mlx5: refactor HCA attributes query Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 2/9] common/mlx5: extend flex parser capabilities Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 3/9] common/mlx5: fix flex parser DevX creation routine Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 4/9] net/mlx5: update eCPRI flex parser structures Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 5/9] net/mlx5: add flex item API Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 6/9] net/mlx5: add flex parser DevX object management Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 7/9] net/mlx5: translate flex item configuration Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 8/9] net/mlx5: translate flex item pattern into matcher Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 9/9] net/mlx5: handle flex item in flows Gregory Etelson
2021-11-03 12:57   ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Raslan Darawsheh
2021-11-03 18:24   ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).