DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH v1 0/6] add flex item implementation
@ 2023-02-15 11:52 Rongwei Liu
  2023-02-15 11:52 ` [PATCH v1 1/6] net/mlx5: enable hws flex item create Rongwei Liu
                   ` (5 more replies)
  0 siblings, 6 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-15 11:52 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Implement the flex item matching and modify field in HWS.

Rongwei Liu (6):
  net/mlx5: enable hws flex item create
  net/mlx5: add IPv6 protocol as flex item input
  net/mlx5/hws: add hws flex item matching support
  net/mlx5: add flex item modify field implementation
  net/mlx5: return error for sws modify field
  doc/mlx5: update mlx5 doc

 doc/guides/nics/mlx5.rst              |   4 +-
 drivers/common/mlx5/mlx5_prm.h        |   1 +
 drivers/net/mlx5/hws/mlx5dr_definer.c |  83 ++++++++++++
 drivers/net/mlx5/linux/mlx5_os.c      |  27 ++--
 drivers/net/mlx5/mlx5.c               |   2 +-
 drivers/net/mlx5/mlx5.h               |   6 +
 drivers/net/mlx5/mlx5_flow.h          |   4 +
 drivers/net/mlx5/mlx5_flow_dv.c       | 186 +++++++++++++++++++++++---
 drivers/net/mlx5/mlx5_flow_flex.c     | 135 ++++++++++++++++---
 drivers/net/mlx5/mlx5_flow_hw.c       |  64 ++++++++-
 10 files changed, 460 insertions(+), 52 deletions(-)

-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v1 1/6] net/mlx5: enable hws flex item create
  2023-02-15 11:52 [PATCH v1 0/6] add flex item implementation Rongwei Liu
@ 2023-02-15 11:52 ` Rongwei Liu
  2023-02-15 11:52 ` [PATCH v1 2/6] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-15 11:52 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Enable flex item create and destroy with dv_flow_en=2

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c | 27 +++++++++++++++------------
 drivers/net/mlx5/mlx5_flow_hw.c  |  2 ++
 2 files changed, 17 insertions(+), 12 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index a71474c90a..f5b3edea99 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -474,10 +474,20 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 	err = mlx5_alloc_table_hash_list(priv);
 	if (err)
 		goto error;
-	if (priv->sh->config.dv_flow_en == 2)
-		return 0;
 	/* The resources below are only valid with DV support. */
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	/* Init shared flex parsers list, no need lcore_share */
+	snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
+	sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
+					       mlx5_flex_parser_create_cb,
+					       mlx5_flex_parser_match_cb,
+					       mlx5_flex_parser_remove_cb,
+					       mlx5_flex_parser_clone_cb,
+					       mlx5_flex_parser_clone_free_cb);
+	if (!sh->flex_parsers_dv)
+		goto error;
+	if (priv->sh->config.dv_flow_en == 2)
+		return 0;
 	/* Init port id action list. */
 	snprintf(s, sizeof(s), "%s_port_id_action_list", sh->ibdev_name);
 	sh->port_id_action_list = mlx5_list_create(s, sh, true,
@@ -518,16 +528,9 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 					      flow_dv_dest_array_clone_free_cb);
 	if (!sh->dest_array_list)
 		goto error;
-	/* Init shared flex parsers list, no need lcore_share */
-	snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
-	sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
-					       mlx5_flex_parser_create_cb,
-					       mlx5_flex_parser_match_cb,
-					       mlx5_flex_parser_remove_cb,
-					       mlx5_flex_parser_clone_cb,
-					       mlx5_flex_parser_clone_free_cb);
-	if (!sh->flex_parsers_dv)
-		goto error;
+#else
+	if (priv->sh->config.dv_flow_en == 2)
+		return 0;
 #endif
 #ifdef HAVE_MLX5DV_DR
 	void *domain;
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 6799b8a89f..b1d5b47900 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -8361,6 +8361,8 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
 	.query = flow_hw_query,
 	.get_aged_flows = flow_hw_get_aged_flows,
 	.get_q_aged_flows = flow_hw_get_q_aged_flows,
+	.item_create = flow_dv_item_create,
+	.item_release = flow_dv_item_release,
 };
 
 /**
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v1 2/6] net/mlx5: add IPv6 protocol as flex item input
  2023-02-15 11:52 [PATCH v1 0/6] add flex item implementation Rongwei Liu
  2023-02-15 11:52 ` [PATCH v1 1/6] net/mlx5: enable hws flex item create Rongwei Liu
@ 2023-02-15 11:52 ` Rongwei Liu
  2023-02-15 11:52 ` [PATCH v1 3/6] net/mlx5/hws: add hws flex item matching support Rongwei Liu
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-15 11:52 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Support IPv6 protocol as new flex item input link.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_flex.c | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 35f2a9923d..24b7226ee6 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -1050,6 +1050,22 @@ mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
 	return rte_be_to_cpu_16(spec->hdr.dst_port);
 }
 
+static int
+mlx5_flex_arc_in_ipv6(const struct rte_flow_item *item,
+		      struct rte_flow_error *error)
+{
+	const struct rte_flow_item_ipv6 *spec = item->spec;
+	const struct rte_flow_item_ipv6 *mask = item->mask;
+	struct rte_flow_item_ipv6 ip = { .hdr.proto = 0xff };
+
+	if (memcmp(mask, &ip, sizeof(struct rte_flow_item_ipv6))) {
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+			 "invalid ipv6 item mask, full mask is desired");
+	}
+	return spec->hdr.proto;
+}
+
 static int
 mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
 			   const struct rte_flow_item_flex_conf *conf,
@@ -1096,6 +1112,9 @@ mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
 		case RTE_FLOW_ITEM_TYPE_UDP:
 			ret = mlx5_flex_arc_in_udp(rte_item, error);
 			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ret = mlx5_flex_arc_in_ipv6(rte_item, error);
+			break;
 		default:
 			MLX5_ASSERT(false);
 			return rte_flow_error_set
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v1 3/6] net/mlx5/hws: add hws flex item matching support
  2023-02-15 11:52 [PATCH v1 0/6] add flex item implementation Rongwei Liu
  2023-02-15 11:52 ` [PATCH v1 1/6] net/mlx5: enable hws flex item create Rongwei Liu
  2023-02-15 11:52 ` [PATCH v1 2/6] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
@ 2023-02-15 11:52 ` Rongwei Liu
  2023-02-15 11:52 ` [PATCH v1 4/6] net/mlx5: add flex item modify field implementation Rongwei Liu
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-15 11:52 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland, Alex Vesker

Support flex item matching in hws and syntax follows
sws exactly.

Flex item should be created in advance and follow current
json mapping logic.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Reviewed-by: Alex Vesker <valex@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr_definer.c |  83 ++++++++++++++++++
 drivers/net/mlx5/mlx5.c               |   2 +-
 drivers/net/mlx5/mlx5.h               |   6 ++
 drivers/net/mlx5/mlx5_flow.h          |   1 +
 drivers/net/mlx5/mlx5_flow_dv.c       |   2 +-
 drivers/net/mlx5/mlx5_flow_flex.c     | 116 ++++++++++++++++++++++----
 drivers/net/mlx5/mlx5_flow_hw.c       |  48 ++++++++++-
 7 files changed, 239 insertions(+), 19 deletions(-)

diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index ce7cf0504d..29df6823dd 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -309,6 +309,43 @@ mlx5dr_definer_ipv6_routing_ext_set(struct mlx5dr_definer_fc *fc,
 	DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
 }
 
+static void
+mlx5dr_definer_flex_parser_set(struct mlx5dr_definer_fc *fc,
+			       const void *item,
+			       uint8_t *tag, bool is_inner)
+{
+	const struct rte_flow_item_flex *flex = item;
+	uint32_t byte_off, val, idx;
+	int ret;
+
+	val = 0;
+	byte_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
+	idx = fc->fname - MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
+	byte_off -= idx * sizeof(uint32_t);
+	ret = mlx5_flex_get_parser_value_per_byte_off(flex, flex->handle, byte_off,
+						      false, is_inner, &val);
+	if (ret == -1 || !val)
+		return;
+
+	DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_flex_parser_inner_set(struct mlx5dr_definer_fc *fc,
+				     const void *item,
+				     uint8_t *tag)
+{
+	mlx5dr_definer_flex_parser_set(fc, item, tag, true);
+}
+
+static void
+mlx5dr_definer_flex_parser_outer_set(struct mlx5dr_definer_fc *fc,
+				     const void *item,
+				     uint8_t *tag)
+{
+	mlx5dr_definer_flex_parser_set(fc, item, tag, false);
+}
+
 static void
 mlx5dr_definer_gre_key_set(struct mlx5dr_definer_fc *fc,
 			   const void *item_spec,
@@ -1750,6 +1787,47 @@ mlx5dr_definer_check_item_range_supp(struct rte_flow_item *item)
 	}
 }
 
+static int
+mlx5dr_definer_conv_item_flex_parser(struct mlx5dr_definer_conv_data *cd,
+				     struct rte_flow_item *item,
+				     int item_idx)
+{
+	uint32_t base_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
+	const struct rte_flow_item_flex *v, *m;
+	enum mlx5dr_definer_fname fname;
+	struct mlx5dr_definer_fc *fc;
+	uint32_t i, mask, byte_off;
+	bool is_inner = cd->tunnel;
+	int ret;
+
+	m = item->mask;
+	v = item->spec;
+	mask = 0;
+	for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
+		byte_off = base_off - i * sizeof(uint32_t);
+		ret = mlx5_flex_get_parser_value_per_byte_off(m, v->handle, byte_off,
+							      true, is_inner, &mask);
+		if (ret == -1) {
+			rte_errno = EINVAL;
+			return rte_errno;
+		}
+
+		if (!mask)
+			continue;
+
+		fname = MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
+		fname += (enum mlx5dr_definer_fname)i;
+		fc = &cd->fc[fname];
+		fc->byte_off = byte_off;
+		fc->item_idx = item_idx;
+		fc->tag_set = cd->tunnel ? &mlx5dr_definer_flex_parser_inner_set :
+					   &mlx5dr_definer_flex_parser_outer_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		fc->bit_mask = mask;
+	}
+	return 0;
+}
+
 static int
 mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 				struct mlx5dr_match_template *mt,
@@ -1877,6 +1955,11 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 			item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
 						  MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
 			break;
+		case RTE_FLOW_ITEM_TYPE_FLEX:
+			ret = mlx5dr_definer_conv_item_flex_parser(&cd, items, i);
+			item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
+						  MLX5_FLOW_ITEM_OUTER_FLEX;
+			break;
 		default:
 			DR_LOG(ERR, "Unsupported item type %d", items->type);
 			rte_errno = ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 94fd5a91e3..129994752b 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1033,7 +1033,7 @@ static void
 mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_ecpri_parser_profile *prf =	&priv->sh->ecpri_parser;
+	struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
 
 	if (prf->obj)
 		mlx5_devx_cmd_destroy(prf->obj);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index bea1f62ea8..2c8825b733 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -2255,6 +2255,12 @@ void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
 void mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, void *matcher,
 				   void *key, const struct rte_flow_item *item,
 				   bool is_inner);
+int mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp,
+			    uint32_t idx, uint32_t *pos,
+			    bool is_inner, uint32_t *def);
+int mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item,
+					    void *flex, uint32_t byte_off,
+					    bool is_mask, bool tunnel, uint32_t *value);
 int mlx5_flex_acquire_index(struct rte_eth_dev *dev,
 			    struct rte_flow_item_flex_handle *handle,
 			    bool acquire);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 4bef2296b8..ae2fc0aabe 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1229,6 +1229,7 @@ struct rte_flow_pattern_template {
 	 * tag pattern item for representor matching.
 	 */
 	bool implicit_tag;
+	uint8_t flex_item; /* flex item index. */
 };
 
 /* Flow action template struct. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index f93dd4073c..9e7ab08b32 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -10668,7 +10668,7 @@ flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
 		(const struct rte_flow_item_flex *)item->spec;
 	int index = mlx5_flex_acquire_index(dev, spec->handle, false);
 
-	MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
+	MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
 	if (index < 0)
 		return;
 	if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 24b7226ee6..aa317fc958 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -198,6 +198,99 @@ mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
 	}
 #undef SET_FP_MATCH_SAMPLE_ID
 }
+
+/**
+ * Get the flex parser sample id and corresponding mask
+ * per shift and width information.
+ *
+ * @param[in] tp
+ *   Mlx5 flex item sample mapping handle.
+ * @param[in] idx
+ *   Mapping index.
+ * @param[in, out] pos
+ *   Where to search the value and mask.
+ * @param[in] is_inner
+ *   For inner matching or not.
+ * @param[in, def] def
+ *   Mask generated by mapping shift and width.
+ *
+ * @return
+ *   0 on success, -1 to ignore.
+ */
+int
+mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp,
+			uint32_t idx, uint32_t *pos,
+			bool is_inner, uint32_t *def)
+{
+	const struct mlx5_flex_pattern_field *map = tp->map + idx;
+	uint32_t id = map->reg_id;
+
+	*def = (RTE_BIT64(map->width) - 1) << map->shift;
+	/* Skip placeholders for DUMMY fields. */
+	if (id == MLX5_INVALID_SAMPLE_REG_ID) {
+		*pos += map->width;
+		return -1;
+	}
+	MLX5_ASSERT(map->width);
+	MLX5_ASSERT(id < tp->devx_fp->num_samples);
+	if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
+		uint32_t num_samples = tp->devx_fp->num_samples / 2;
+
+		MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
+		MLX5_ASSERT(id < num_samples);
+		id += num_samples;
+	}
+	return id;
+}
+
+/**
+ * Get the flex parser mapping value per definer format_select_dw.
+ *
+ * @param[in] item
+ *   Rte flex item pointer.
+ * @param[in] flex
+ *   Mlx5 flex item sample mapping handle.
+ * @param[in] byte_off
+ *   Mlx5 flex item format_select_dw.
+ * @param[in] is_mask
+ *   Spec or mask.
+ * @param[in] tunnel
+ *   Tunnel mode or not.
+ * @param[in, def] value
+ *   Value calculated for this flex parser, either spec or mask.
+ *
+ * @return
+ *   0 on success, -1 for error.
+ */
+int
+mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item,
+					void *flex, uint32_t byte_off,
+					bool is_mask, bool tunnel, uint32_t *value)
+{
+	struct mlx5_flex_pattern_field *map;
+	struct mlx5_flex_item *tp = flex;
+	uint32_t def, i, pos, val;
+	int id;
+
+	*value = 0;
+	for (i = 0, pos = 0; i < tp->mapnum && pos < item->length * CHAR_BIT; i++) {
+		map = tp->map + i;
+		id = mlx5_flex_get_sample_id(tp, i, &pos, tunnel, &def);
+		if (id == -1)
+			continue;
+		if (id >= (int)tp->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+			return -1;
+		if (byte_off == tp->devx_fp->sample_ids[id].format_select_dw * sizeof(uint32_t)) {
+			val = mlx5_flex_get_bitfield(item, pos, map->width, map->shift);
+			if (is_mask)
+				val &= RTE_BE32(def);
+			*value |= val;
+		}
+		pos += map->width;
+	}
+	return 0;
+}
+
 /**
  * Translate item pattern into matcher fields according to translation
  * array.
@@ -240,26 +333,17 @@ mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
 	MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0);
 	for (i = 0; i < tp->mapnum; i++) {
 		struct mlx5_flex_pattern_field *map = tp->map + i;
-		uint32_t id = map->reg_id;
-		uint32_t def = (RTE_BIT64(map->width) - 1) << map->shift;
-		uint32_t val, msk;
+		uint32_t val, msk, def;
+		int id = mlx5_flex_get_sample_id(tp, i, &pos, is_inner, &def);
 
-		/* Skip placeholders for DUMMY fields. */
-		if (id == MLX5_INVALID_SAMPLE_REG_ID) {
-			pos += map->width;
+		if (id == -1)
 			continue;
-		}
+		MLX5_ASSERT(id < (int)tp->devx_fp->num_samples);
+		if (id >= (int)tp->devx_fp->num_samples ||
+		    id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+			return;
 		val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift);
 		msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift);
-		MLX5_ASSERT(map->width);
-		MLX5_ASSERT(id < tp->devx_fp->num_samples);
-		if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
-			uint32_t num_samples = tp->devx_fp->num_samples / 2;
-
-			MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
-			MLX5_ASSERT(id < num_samples);
-			id += num_samples;
-		}
 		if (attr->ext_sample_id)
 			sample_id = tp->devx_fp->sample_ids[id].sample_id;
 		else
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index b1d5b47900..da9df965c2 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4339,6 +4339,36 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
 					      &modify_action);
 }
 
+static int
+flow_hw_flex_item_acquire(struct rte_eth_dev *dev,
+			  struct rte_flow_item_flex_handle *handle,
+			  uint8_t *flex_item)
+{
+	int index = mlx5_flex_acquire_index(dev, handle, false);
+
+	MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
+	if (index < 0)
+		return -1;
+	if (!(*flex_item & RTE_BIT32(index))) {
+		/* Don't count same flex item again. */
+		if (mlx5_flex_acquire_index(dev, handle, true) != index)
+			MLX5_ASSERT(false);
+		*flex_item |= (uint8_t)RTE_BIT32(index);
+	}
+	return 0;
+}
+
+static void
+flow_hw_flex_item_release(struct rte_eth_dev *dev, uint8_t *flex_item)
+{
+	while (*flex_item) {
+		int index = rte_bsf32(*flex_item);
+
+		mlx5_flex_release_index(dev, index);
+		*flex_item &= ~(uint8_t)RTE_BIT32(index);
+	}
+}
+
 /**
  * Create flow action template.
  *
@@ -4743,6 +4773,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
+		case RTE_FLOW_ITEM_TYPE_FLEX:
 			break;
 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
 			/*
@@ -4820,6 +4851,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 		.mask = &tag_m,
 		.last = NULL
 	};
+	unsigned int i = 0;
 
 	if (flow_hw_pattern_validate(dev, attr, items, error))
 		return NULL;
@@ -4890,6 +4922,19 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 			return NULL;
 		}
 	}
+	for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; ++i) {
+		if (items[i].type == RTE_FLOW_ITEM_TYPE_FLEX) {
+			const struct rte_flow_item_flex *spec =
+				(const struct rte_flow_item_flex *)items[i].spec;
+			struct rte_flow_item_flex_handle *handle = spec->handle;
+
+			if (flow_hw_flex_item_acquire(dev, handle, &it->flex_item)) {
+				claim_zero(mlx5dr_match_template_destroy(it->mt));
+				mlx5_free(it);
+				return NULL;
+			}
+		}
+	}
 	__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
 	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
 	return it;
@@ -4909,7 +4954,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
+flow_hw_pattern_template_destroy(struct rte_eth_dev *dev,
 			      struct rte_flow_pattern_template *template,
 			      struct rte_flow_error *error __rte_unused)
 {
@@ -4925,6 +4970,7 @@ flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
 				    MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT))
 		mlx5_free_srh_flex_parser(dev);
 	LIST_REMOVE(template, next);
+	flow_hw_flex_item_release(dev, &template->flex_item);
 	claim_zero(mlx5dr_match_template_destroy(template->mt));
 	mlx5_free(template);
 	return 0;
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v1 4/6] net/mlx5: add flex item modify field implementation
  2023-02-15 11:52 [PATCH v1 0/6] add flex item implementation Rongwei Liu
                   ` (2 preceding siblings ...)
  2023-02-15 11:52 ` [PATCH v1 3/6] net/mlx5/hws: add hws flex item matching support Rongwei Liu
@ 2023-02-15 11:52 ` Rongwei Liu
  2023-02-15 11:52 ` [PATCH v1 5/6] net/mlx5: return error for sws modify field Rongwei Liu
  2023-02-15 11:52 ` [PATCH v1 6/6] doc/mlx5: update mlx5 doc Rongwei Liu
  5 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-15 11:52 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Add flex item modify field HWS implementation.
The minimum modify boundary is one byte.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/common/mlx5/mlx5_prm.h  |   1 +
 drivers/net/mlx5/mlx5_flow.h    |   3 +
 drivers/net/mlx5/mlx5_flow_dv.c | 165 +++++++++++++++++++++++++++++---
 drivers/net/mlx5/mlx5_flow_hw.c |  14 ++-
 4 files changed, 170 insertions(+), 13 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index c05bce714a..ddfe4d0be7 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -761,6 +761,7 @@ enum mlx5_modification_field {
 	MLX5_MODI_GTPU_FIRST_EXT_DW_0 = 0x76,
 	MLX5_MODI_HASH_RESULT = 0x81,
 	MLX5_MODI_OUT_IPV6_NEXT_HDR = 0x4A,
+	MLX5_MODI_INVALID = INT_MAX,
 };
 
 /* Total number of metadata reg_c's. */
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index ae2fc0aabe..d6831d849d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1084,6 +1084,8 @@ struct field_modify_info {
 	uint32_t size; /* Size of field in protocol header, in bytes. */
 	uint32_t offset; /* Offset of field in protocol header, in bytes. */
 	enum mlx5_modification_field id;
+	uint32_t shift;
+	uint8_t is_flex; /* Temporary indicator for flex item modify filed WA. */
 };
 
 /* HW steering flow attributes. */
@@ -1248,6 +1250,7 @@ struct rte_flow_actions_template {
 	uint16_t mhdr_off; /* Offset of DR modify header action. */
 	uint32_t refcnt; /* Reference counter. */
 	uint16_t rx_cpy_pos; /* Action position of Rx metadata to be copied. */
+	uint8_t flex_item; /* flex item index. */
 };
 
 /* Jump action struct. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 9e7ab08b32..8355249ce5 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -414,10 +414,15 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
 			++field;
 			continue;
 		}
-		/* Deduce actual data width in bits from mask value. */
-		off_b = rte_bsf32(mask) + carry_b;
-		size_b = sizeof(uint32_t) * CHAR_BIT -
-			 off_b - __builtin_clz(mask);
+		if (type == MLX5_MODIFICATION_TYPE_COPY && field->is_flex) {
+			off_b = 32 - field->shift + carry_b - field->size * CHAR_BIT;
+			size_b = field->size * CHAR_BIT - carry_b;
+		} else {
+			/* Deduce actual data width in bits from mask value. */
+			off_b = rte_bsf32(mask) + carry_b;
+			size_b = sizeof(uint32_t) * CHAR_BIT -
+				 off_b - __builtin_clz(mask);
+		}
 		MLX5_ASSERT(size_b);
 		actions[i] = (struct mlx5_modification_cmd) {
 			.action_type = type,
@@ -437,40 +442,46 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
 			 * Destination field overflow. Copy leftovers of
 			 * a source field to the next destination field.
 			 */
-			carry_b = 0;
 			if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
 			    dcopy->size != 0) {
 				actions[i].length =
 					dcopy->size * CHAR_BIT - dcopy->offset;
-				carry_b = actions[i].length;
+				carry_b += actions[i].length;
 				next_field = false;
+			} else {
+				carry_b = 0;
 			}
 			/*
 			 * Not enough bits in a source filed to fill a
 			 * destination field. Switch to the next source.
 			 */
 			if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
-			    (size_b == field->size * CHAR_BIT - off_b)) {
-				actions[i].length =
-					field->size * CHAR_BIT - off_b;
+			    ((size_b == field->size * CHAR_BIT - off_b) ||
+			     field->is_flex)) {
+				actions[i].length = size_b;
 				dcopy->offset += actions[i].length;
 				next_dcopy = false;
 			}
-			if (next_dcopy)
-				++dcopy;
 		} else {
 			MLX5_ASSERT(item->spec);
 			data = flow_dv_fetch_field((const uint8_t *)item->spec +
 						   field->offset, field->size);
 			/* Shift out the trailing masked bits from data. */
 			data = (data & mask) >> off_b;
+			if (field->is_flex)
+				actions[i].offset = 32 - field->shift - field->size * CHAR_BIT;
 			actions[i].data1 = rte_cpu_to_be_32(data);
 		}
 		/* Convert entire record to expected big-endian format. */
 		actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
+		if ((type != MLX5_MODIFICATION_TYPE_COPY ||
+		     dcopy->id != (enum mlx5_modification_field)UINT32_MAX) &&
+		    field->id != (enum mlx5_modification_field)UINT32_MAX)
+			++i;
+		if (next_dcopy && type == MLX5_MODIFICATION_TYPE_COPY)
+			++dcopy;
 		if (next_field)
 			++field;
-		++i;
 	} while (field->size);
 	if (resource->actions_num == i)
 		return rte_flow_error_set(error, EINVAL,
@@ -1422,6 +1433,131 @@ flow_modify_info_mask_32_masked(uint32_t length, uint32_t off, uint32_t post_mas
 	return rte_cpu_to_be_32(mask & post_mask);
 }
 
+static void
+mlx5_modify_flex_item(const struct rte_eth_dev *dev,
+		      const struct mlx5_flex_item *flex,
+		      const struct rte_flow_action_modify_data *data,
+		      struct field_modify_info *info,
+		      uint32_t *mask, uint32_t width)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
+	uint32_t i, j;
+	int id = 0;
+	uint32_t pos = 0;
+	const struct mlx5_flex_pattern_field *map;
+	uint32_t offset = data->offset;
+	uint32_t width_left = width;
+	uint32_t def;
+	uint32_t cur_width = 0;
+	uint32_t tmp_ofs;
+	uint32_t idx = 0;
+	struct field_modify_info tmp;
+	int tmp_id;
+
+	if (!attr->ext_sample_id) {
+		DRV_LOG(ERR, "FW doesn't support modify field with flex item.");
+		return;
+	}
+	/*
+	 * search for the mapping instance until Accumulated width is no
+	 * less than data->offset.
+	 */
+	for (i = 0; i < flex->mapnum; i++) {
+		if (flex->map[i].width + pos > data->offset)
+			break;
+		pos += flex->map[i].width;
+	}
+	if (i >= flex->mapnum)
+		return;
+	tmp_ofs = pos < data->offset ? data->offset - pos : 0;
+	for (j = i; i < flex->mapnum && width_left > 0; ) {
+		map = flex->map + i;
+		id = mlx5_flex_get_sample_id(flex, i, &pos, false, &def);
+		if (id == -1) {
+			i++;
+			/* All left length is dummy */
+			if (pos >= data->offset + width)
+				return;
+			cur_width = map->width;
+		/* One mapping instance covers the whole width. */
+		} else if (pos + map->width >= (data->offset + width)) {
+			cur_width = width_left;
+		} else {
+			cur_width = cur_width + map->width - tmp_ofs;
+			pos += map->width;
+			/*
+			 * Continue to search next until:
+			 * 1. Another flex parser ID.
+			 * 2. Width has been covered.
+			 */
+			for (j = i + 1; j < flex->mapnum; j++) {
+				tmp_id = mlx5_flex_get_sample_id(flex, j, &pos, false, &def);
+				if (tmp_id == -1) {
+					i = j;
+					pos -= flex->map[j].width;
+					break;
+				}
+				if (id >= (int)flex->devx_fp->num_samples ||
+				    id >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
+				    tmp_id >= (int)flex->devx_fp->num_samples ||
+				    tmp_id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+					return;
+				if (flex->devx_fp->sample_ids[id].id !=
+						flex->devx_fp->sample_ids[tmp_id].id ||
+				    flex->map[j].shift != flex->map[j - 1].width +
+							  flex->map[j - 1].shift) {
+					i = j;
+					break;
+				}
+				if ((pos + flex->map[j].width) >= (data->offset + width)) {
+					cur_width = width_left;
+					break;
+				}
+				pos += flex->map[j].width;
+				cur_width += flex->map[j].width;
+			}
+		}
+		if (cur_width > width_left)
+			cur_width = width_left;
+		else if (cur_width < width_left && (j == flex->mapnum || i == flex->mapnum))
+			return;
+
+		MLX5_ASSERT(id < (int)flex->devx_fp->num_samples);
+		if (id >= (int)flex->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+			return;
+		/* Use invalid entry as placeholder for DUMMY mapping. */
+		info[idx] = (struct field_modify_info){cur_width / CHAR_BIT, offset / CHAR_BIT,
+			     id == -1 ? MLX5_MODI_INVALID :
+			     (enum mlx5_modification_field)
+			     flex->devx_fp->sample_ids[id].modify_field_id,
+			     map->shift + tmp_ofs, 1};
+		offset += cur_width;
+		width_left -= cur_width;
+		if (!mask) {
+			info[idx].offset = (32 - cur_width - map->shift - tmp_ofs);
+			info[idx].size = cur_width / CHAR_BIT + info[idx].offset / CHAR_BIT;
+		}
+		cur_width = 0;
+		tmp_ofs = 0;
+		idx++;
+	}
+	if (unlikely(width_left > 0)) {
+		MLX5_ASSERT(false);
+		return;
+	}
+	if (mask)
+		memset(mask, 0xff, data->offset / CHAR_BIT + width / CHAR_BIT);
+	/* Re-order the info to follow IPv6 address. */
+	for (i = 0; i < idx / 2; i++) {
+		tmp = info[i];
+		MLX5_ASSERT(info[i].id);
+		MLX5_ASSERT(info[idx - 1 - i].id);
+		info[i] = info[idx - 1 - i];
+		info[idx - 1 - i] = tmp;
+	}
+}
+
 void
 mlx5_flow_field_id_to_modify_info
 		(const struct rte_flow_action_modify_data *data,
@@ -1893,6 +2029,11 @@ mlx5_flow_field_id_to_modify_info
 		else
 			info[idx].offset = off_be;
 		break;
+	case RTE_FLOW_FIELD_FLEX_ITEM:
+		MLX5_ASSERT(data->flex_handle != NULL && !(data->offset & 0x7));
+		mlx5_modify_flex_item(dev, (const struct mlx5_flex_item *)data->flex_handle,
+				      data, info, mask, width);
+		break;
 	case RTE_FLOW_FIELD_POINTER:
 	case RTE_FLOW_FIELD_VALUE:
 	default:
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index da9df965c2..a68dbdf815 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4558,6 +4558,17 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
 			at->actions[i].conf = actions->conf;
 			at->masks[i].conf = masks->conf;
 		}
+		if (actions->type == RTE_FLOW_ACTION_TYPE_MODIFY_FIELD) {
+			const struct rte_flow_action_modify_field *info = actions->conf;
+
+			if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+			     flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
+						       &at->flex_item)) ||
+			     (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+			      flow_hw_flex_item_acquire(dev, info->src.flex_handle,
+							&at->flex_item)))
+				goto error;
+		}
 	}
 	at->tmpl = flow_hw_dr_actions_template_create(at);
 	if (!at->tmpl)
@@ -4589,7 +4600,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
+flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
 				 struct rte_flow_actions_template *template,
 				 struct rte_flow_error *error __rte_unused)
 {
@@ -4602,6 +4613,7 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
 				   "action template in using");
 	}
 	LIST_REMOVE(template, next);
+	flow_hw_flex_item_release(dev, &template->flex_item);
 	if (template->tmpl)
 		mlx5dr_action_template_destroy(template->tmpl);
 	mlx5_free(template);
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v1 5/6] net/mlx5: return error for sws modify field
  2023-02-15 11:52 [PATCH v1 0/6] add flex item implementation Rongwei Liu
                   ` (3 preceding siblings ...)
  2023-02-15 11:52 ` [PATCH v1 4/6] net/mlx5: add flex item modify field implementation Rongwei Liu
@ 2023-02-15 11:52 ` Rongwei Liu
  2023-02-15 11:52 ` [PATCH v1 6/6] doc/mlx5: update mlx5 doc Rongwei Liu
  5 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-15 11:52 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland, Dariusz Sosnowski

Return unsupported error message when application tries to
modify flex item field.

Validation of packet modifications actions for SW Steering checked
if either source or destination field of MODIFY_FIELD action
was a flex item.
When DEC_TTL action is used, DEC_TTL action does not have any
action configuration and dereferencing source or destination field
is invalid, so validation of source and destination field types
should be moved to MODIFY_FIELD specific validation function, then
field types are validated if and only if action type is MODIFY_FIELD.

Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_dv.c | 19 ++++++++++++-------
 1 file changed, 12 insertions(+), 7 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 8355249ce5..3d760d1913 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -4838,6 +4838,7 @@ flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 					  NULL, "action configuration not set");
+
 	if (action_flags & MLX5_FLOW_ACTION_ENCAP)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
@@ -5163,17 +5164,21 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
 	const struct rte_flow_action_modify_field *action_modify_field =
 		action->conf;
-	uint32_t dst_width = mlx5_flow_item_field_width(dev,
-				action_modify_field->dst.field,
-				-1, attr, error);
-	uint32_t src_width = mlx5_flow_item_field_width(dev,
-				action_modify_field->src.field,
-				dst_width, attr, error);
+	uint32_t dst_width, src_width;
 
 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
 	if (ret)
 		return ret;
-
+	if (action_modify_field->src.field == RTE_FLOW_FIELD_FLEX_ITEM ||
+	    action_modify_field->dst.field == RTE_FLOW_FIELD_FLEX_ITEM)
+		return rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION, action,
+				"flex item fields modification"
+				" is not supported");
+	dst_width = mlx5_flow_item_field_width(dev, action_modify_field->dst.field,
+					       -1, attr, error);
+	src_width = mlx5_flow_item_field_width(dev, action_modify_field->src.field,
+					       dst_width, attr, error);
 	if (action_modify_field->width == 0)
 		return rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ACTION, action,
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v1 6/6] doc/mlx5: update mlx5 doc
  2023-02-15 11:52 [PATCH v1 0/6] add flex item implementation Rongwei Liu
                   ` (4 preceding siblings ...)
  2023-02-15 11:52 ` [PATCH v1 5/6] net/mlx5: return error for sws modify field Rongwei Liu
@ 2023-02-15 11:52 ` Rongwei Liu
  2023-02-22  9:37   ` [PATCH v2 0/6] add flex item implementation Rongwei Liu
  2023-02-23  7:06   ` [PATCH v3 0/7] add flex item implementation Rongwei Liu
  5 siblings, 2 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-15 11:52 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Add flex item matching and modify field feature into
mlx5 documents.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 doc/guides/nics/mlx5.rst | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index ee2df66e77..0f4a989bbc 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -107,6 +107,7 @@ Features
 - Sub-Function.
 - Matching on represented port.
 - Matching on IPv6 routing extension header.
+- Modify flex item field.
 
 
 Limitations
@@ -292,11 +293,12 @@ Limitations
   - Firmware supports 8 global sample fields.
     Each flex item allocates non-shared sample fields from that pool.
   - Supported flex item can have 1 input link - ``eth`` or ``udp``
-    and up to 2 output links - ``ipv4`` or ``ipv6``.
+    and up to 3 output links - ``ipv4`` or ``ipv6``.
   - Flex item fields (``next_header``, ``next_protocol``, ``samples``)
     do not participate in RSS hash functions.
   - In flex item configuration, ``next_header.field_base`` value
     must be byte aligned (multiple of 8).
+  - Modify field with flex item, the offset must be byte aligned (multiple of 8).
 
 - No Tx metadata go to the E-Switch steering domain for the Flow group 0.
   The flows within group 0 and set metadata action are rejected by hardware.
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v2 0/6] add flex item implementation
  2023-02-15 11:52 ` [PATCH v1 6/6] doc/mlx5: update mlx5 doc Rongwei Liu
@ 2023-02-22  9:37   ` Rongwei Liu
  2023-02-22  9:37     ` [PATCH v2 1/6] net/mlx5: enable hws flex item create Rongwei Liu
                       ` (5 more replies)
  2023-02-23  7:06   ` [PATCH v3 0/7] add flex item implementation Rongwei Liu
  1 sibling, 6 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-22  9:37 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Implement the flex item matching and modify field in HWS.

Rongwei Liu (6):
  net/mlx5: enable hws flex item create
  net/mlx5: add IPv6 protocol as flex item input
  net/mlx5/hws: add hws flex item matching support
  net/mlx5: add flex item modify field implementation
  net/mlx5: return error for sws modify field
  doc/mlx5: update mlx5 doc

 doc/guides/nics/mlx5.rst              |   4 +-
 drivers/common/mlx5/mlx5_prm.h        |   1 +
 drivers/net/mlx5/hws/mlx5dr_definer.c |  83 ++++++++++++
 drivers/net/mlx5/linux/mlx5_os.c      |  27 ++--
 drivers/net/mlx5/mlx5.c               |   2 +-
 drivers/net/mlx5/mlx5.h               |   6 +
 drivers/net/mlx5/mlx5_flow.h          |   4 +
 drivers/net/mlx5/mlx5_flow_dv.c       | 186 +++++++++++++++++++++++---
 drivers/net/mlx5/mlx5_flow_flex.c     | 135 ++++++++++++++++---
 drivers/net/mlx5/mlx5_flow_hw.c       |  64 ++++++++-
 10 files changed, 460 insertions(+), 52 deletions(-)

-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v2 1/6] net/mlx5: enable hws flex item create
  2023-02-22  9:37   ` [PATCH v2 0/6] add flex item implementation Rongwei Liu
@ 2023-02-22  9:37     ` Rongwei Liu
  2023-02-22  9:37     ` [PATCH v2 2/6] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
                       ` (4 subsequent siblings)
  5 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-22  9:37 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Enable flex item create and destroy with dv_flow_en=2

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c | 27 +++++++++++++++------------
 drivers/net/mlx5/mlx5_flow_hw.c  |  2 ++
 2 files changed, 17 insertions(+), 12 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index a71474c90a..f5b3edea99 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -474,10 +474,20 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 	err = mlx5_alloc_table_hash_list(priv);
 	if (err)
 		goto error;
-	if (priv->sh->config.dv_flow_en == 2)
-		return 0;
 	/* The resources below are only valid with DV support. */
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	/* Init shared flex parsers list, no need lcore_share */
+	snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
+	sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
+					       mlx5_flex_parser_create_cb,
+					       mlx5_flex_parser_match_cb,
+					       mlx5_flex_parser_remove_cb,
+					       mlx5_flex_parser_clone_cb,
+					       mlx5_flex_parser_clone_free_cb);
+	if (!sh->flex_parsers_dv)
+		goto error;
+	if (priv->sh->config.dv_flow_en == 2)
+		return 0;
 	/* Init port id action list. */
 	snprintf(s, sizeof(s), "%s_port_id_action_list", sh->ibdev_name);
 	sh->port_id_action_list = mlx5_list_create(s, sh, true,
@@ -518,16 +528,9 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 					      flow_dv_dest_array_clone_free_cb);
 	if (!sh->dest_array_list)
 		goto error;
-	/* Init shared flex parsers list, no need lcore_share */
-	snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
-	sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
-					       mlx5_flex_parser_create_cb,
-					       mlx5_flex_parser_match_cb,
-					       mlx5_flex_parser_remove_cb,
-					       mlx5_flex_parser_clone_cb,
-					       mlx5_flex_parser_clone_free_cb);
-	if (!sh->flex_parsers_dv)
-		goto error;
+#else
+	if (priv->sh->config.dv_flow_en == 2)
+		return 0;
 #endif
 #ifdef HAVE_MLX5DV_DR
 	void *domain;
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 7457187b19..9e1912ec69 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -8436,6 +8436,8 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
 	.query = flow_hw_query,
 	.get_aged_flows = flow_hw_get_aged_flows,
 	.get_q_aged_flows = flow_hw_get_q_aged_flows,
+	.item_create = flow_dv_item_create,
+	.item_release = flow_dv_item_release,
 };
 
 /**
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v2 2/6] net/mlx5: add IPv6 protocol as flex item input
  2023-02-22  9:37   ` [PATCH v2 0/6] add flex item implementation Rongwei Liu
  2023-02-22  9:37     ` [PATCH v2 1/6] net/mlx5: enable hws flex item create Rongwei Liu
@ 2023-02-22  9:37     ` Rongwei Liu
  2023-02-22  9:37     ` [PATCH v2 3/6] net/mlx5/hws: add hws flex item matching support Rongwei Liu
                       ` (3 subsequent siblings)
  5 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-22  9:37 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Support IPv6 protocol as new flex item input link.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_flex.c | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 35f2a9923d..24b7226ee6 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -1050,6 +1050,22 @@ mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
 	return rte_be_to_cpu_16(spec->hdr.dst_port);
 }
 
+static int
+mlx5_flex_arc_in_ipv6(const struct rte_flow_item *item,
+		      struct rte_flow_error *error)
+{
+	const struct rte_flow_item_ipv6 *spec = item->spec;
+	const struct rte_flow_item_ipv6 *mask = item->mask;
+	struct rte_flow_item_ipv6 ip = { .hdr.proto = 0xff };
+
+	if (memcmp(mask, &ip, sizeof(struct rte_flow_item_ipv6))) {
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+			 "invalid ipv6 item mask, full mask is desired");
+	}
+	return spec->hdr.proto;
+}
+
 static int
 mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
 			   const struct rte_flow_item_flex_conf *conf,
@@ -1096,6 +1112,9 @@ mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
 		case RTE_FLOW_ITEM_TYPE_UDP:
 			ret = mlx5_flex_arc_in_udp(rte_item, error);
 			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ret = mlx5_flex_arc_in_ipv6(rte_item, error);
+			break;
 		default:
 			MLX5_ASSERT(false);
 			return rte_flow_error_set
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v2 3/6] net/mlx5/hws: add hws flex item matching support
  2023-02-22  9:37   ` [PATCH v2 0/6] add flex item implementation Rongwei Liu
  2023-02-22  9:37     ` [PATCH v2 1/6] net/mlx5: enable hws flex item create Rongwei Liu
  2023-02-22  9:37     ` [PATCH v2 2/6] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
@ 2023-02-22  9:37     ` Rongwei Liu
  2023-02-22  9:37     ` [PATCH v2 4/6] net/mlx5: add flex item modify field implementation Rongwei Liu
                       ` (2 subsequent siblings)
  5 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-22  9:37 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland, Alex Vesker

Support flex item matching in hws and syntax follows
sws exactly.

Flex item should be created in advance and follow current
json mapping logic.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Reviewed-by: Alex Vesker <valex@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr_definer.c |  83 ++++++++++++++++++
 drivers/net/mlx5/mlx5.c               |   2 +-
 drivers/net/mlx5/mlx5.h               |   6 ++
 drivers/net/mlx5/mlx5_flow.h          |   1 +
 drivers/net/mlx5/mlx5_flow_dv.c       |   2 +-
 drivers/net/mlx5/mlx5_flow_flex.c     | 116 ++++++++++++++++++++++----
 drivers/net/mlx5/mlx5_flow_hw.c       |  48 ++++++++++-
 7 files changed, 239 insertions(+), 19 deletions(-)

diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 6374f9df33..5b78092843 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -311,6 +311,43 @@ mlx5dr_definer_ipv6_routing_ext_set(struct mlx5dr_definer_fc *fc,
 	DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
 }
 
+static void
+mlx5dr_definer_flex_parser_set(struct mlx5dr_definer_fc *fc,
+			       const void *item,
+			       uint8_t *tag, bool is_inner)
+{
+	const struct rte_flow_item_flex *flex = item;
+	uint32_t byte_off, val, idx;
+	int ret;
+
+	val = 0;
+	byte_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
+	idx = fc->fname - MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
+	byte_off -= idx * sizeof(uint32_t);
+	ret = mlx5_flex_get_parser_value_per_byte_off(flex, flex->handle, byte_off,
+						      false, is_inner, &val);
+	if (ret == -1 || !val)
+		return;
+
+	DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_flex_parser_inner_set(struct mlx5dr_definer_fc *fc,
+				     const void *item,
+				     uint8_t *tag)
+{
+	mlx5dr_definer_flex_parser_set(fc, item, tag, true);
+}
+
+static void
+mlx5dr_definer_flex_parser_outer_set(struct mlx5dr_definer_fc *fc,
+				     const void *item,
+				     uint8_t *tag)
+{
+	mlx5dr_definer_flex_parser_set(fc, item, tag, false);
+}
+
 static void
 mlx5dr_definer_gre_key_set(struct mlx5dr_definer_fc *fc,
 			   const void *item_spec,
@@ -1782,6 +1819,47 @@ mlx5dr_definer_conv_item_esp(struct mlx5dr_definer_conv_data *cd,
 	return 0;
 }
 
+static int
+mlx5dr_definer_conv_item_flex_parser(struct mlx5dr_definer_conv_data *cd,
+				     struct rte_flow_item *item,
+				     int item_idx)
+{
+	uint32_t base_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
+	const struct rte_flow_item_flex *v, *m;
+	enum mlx5dr_definer_fname fname;
+	struct mlx5dr_definer_fc *fc;
+	uint32_t i, mask, byte_off;
+	bool is_inner = cd->tunnel;
+	int ret;
+
+	m = item->mask;
+	v = item->spec;
+	mask = 0;
+	for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
+		byte_off = base_off - i * sizeof(uint32_t);
+		ret = mlx5_flex_get_parser_value_per_byte_off(m, v->handle, byte_off,
+							      true, is_inner, &mask);
+		if (ret == -1) {
+			rte_errno = EINVAL;
+			return rte_errno;
+		}
+
+		if (!mask)
+			continue;
+
+		fname = MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
+		fname += (enum mlx5dr_definer_fname)i;
+		fc = &cd->fc[fname];
+		fc->byte_off = byte_off;
+		fc->item_idx = item_idx;
+		fc->tag_set = cd->tunnel ? &mlx5dr_definer_flex_parser_inner_set :
+					   &mlx5dr_definer_flex_parser_outer_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		fc->bit_mask = mask;
+	}
+	return 0;
+}
+
 static int
 mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 				struct mlx5dr_match_template *mt,
@@ -1913,6 +1991,11 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 			ret = mlx5dr_definer_conv_item_esp(&cd, items, i);
 			item_flags |= MLX5_FLOW_ITEM_ESP;
 			break;
+		case RTE_FLOW_ITEM_TYPE_FLEX:
+			ret = mlx5dr_definer_conv_item_flex_parser(&cd, items, i);
+			item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
+						  MLX5_FLOW_ITEM_OUTER_FLEX;
+			break;
 		default:
 			DR_LOG(ERR, "Unsupported item type %d", items->type);
 			rte_errno = ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index cfc4609448..9b9ece7ad0 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1033,7 +1033,7 @@ static void
 mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_ecpri_parser_profile *prf =	&priv->sh->ecpri_parser;
+	struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
 
 	if (prf->obj)
 		mlx5_devx_cmd_destroy(prf->obj);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index a766fb408e..af6380bc80 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -2257,6 +2257,12 @@ void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
 void mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, void *matcher,
 				   void *key, const struct rte_flow_item *item,
 				   bool is_inner);
+int mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp,
+			    uint32_t idx, uint32_t *pos,
+			    bool is_inner, uint32_t *def);
+int mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item,
+					    void *flex, uint32_t byte_off,
+					    bool is_mask, bool tunnel, uint32_t *value);
 int mlx5_flex_acquire_index(struct rte_eth_dev *dev,
 			    struct rte_flow_item_flex_handle *handle,
 			    bool acquire);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 4bef2296b8..ae2fc0aabe 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1229,6 +1229,7 @@ struct rte_flow_pattern_template {
 	 * tag pattern item for representor matching.
 	 */
 	bool implicit_tag;
+	uint8_t flex_item; /* flex item index. */
 };
 
 /* Flow action template struct. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index f93dd4073c..9e7ab08b32 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -10668,7 +10668,7 @@ flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
 		(const struct rte_flow_item_flex *)item->spec;
 	int index = mlx5_flex_acquire_index(dev, spec->handle, false);
 
-	MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
+	MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
 	if (index < 0)
 		return;
 	if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 24b7226ee6..aa317fc958 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -198,6 +198,99 @@ mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
 	}
 #undef SET_FP_MATCH_SAMPLE_ID
 }
+
+/**
+ * Get the flex parser sample id and corresponding mask
+ * per shift and width information.
+ *
+ * @param[in] tp
+ *   Mlx5 flex item sample mapping handle.
+ * @param[in] idx
+ *   Mapping index.
+ * @param[in, out] pos
+ *   Where to search the value and mask.
+ * @param[in] is_inner
+ *   For inner matching or not.
+ * @param[in, def] def
+ *   Mask generated by mapping shift and width.
+ *
+ * @return
+ *   0 on success, -1 to ignore.
+ */
+int
+mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp,
+			uint32_t idx, uint32_t *pos,
+			bool is_inner, uint32_t *def)
+{
+	const struct mlx5_flex_pattern_field *map = tp->map + idx;
+	uint32_t id = map->reg_id;
+
+	*def = (RTE_BIT64(map->width) - 1) << map->shift;
+	/* Skip placeholders for DUMMY fields. */
+	if (id == MLX5_INVALID_SAMPLE_REG_ID) {
+		*pos += map->width;
+		return -1;
+	}
+	MLX5_ASSERT(map->width);
+	MLX5_ASSERT(id < tp->devx_fp->num_samples);
+	if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
+		uint32_t num_samples = tp->devx_fp->num_samples / 2;
+
+		MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
+		MLX5_ASSERT(id < num_samples);
+		id += num_samples;
+	}
+	return id;
+}
+
+/**
+ * Get the flex parser mapping value per definer format_select_dw.
+ *
+ * @param[in] item
+ *   Rte flex item pointer.
+ * @param[in] flex
+ *   Mlx5 flex item sample mapping handle.
+ * @param[in] byte_off
+ *   Mlx5 flex item format_select_dw.
+ * @param[in] is_mask
+ *   Spec or mask.
+ * @param[in] tunnel
+ *   Tunnel mode or not.
+ * @param[in, def] value
+ *   Value calculated for this flex parser, either spec or mask.
+ *
+ * @return
+ *   0 on success, -1 for error.
+ */
+int
+mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item,
+					void *flex, uint32_t byte_off,
+					bool is_mask, bool tunnel, uint32_t *value)
+{
+	struct mlx5_flex_pattern_field *map;
+	struct mlx5_flex_item *tp = flex;
+	uint32_t def, i, pos, val;
+	int id;
+
+	*value = 0;
+	for (i = 0, pos = 0; i < tp->mapnum && pos < item->length * CHAR_BIT; i++) {
+		map = tp->map + i;
+		id = mlx5_flex_get_sample_id(tp, i, &pos, tunnel, &def);
+		if (id == -1)
+			continue;
+		if (id >= (int)tp->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+			return -1;
+		if (byte_off == tp->devx_fp->sample_ids[id].format_select_dw * sizeof(uint32_t)) {
+			val = mlx5_flex_get_bitfield(item, pos, map->width, map->shift);
+			if (is_mask)
+				val &= RTE_BE32(def);
+			*value |= val;
+		}
+		pos += map->width;
+	}
+	return 0;
+}
+
 /**
  * Translate item pattern into matcher fields according to translation
  * array.
@@ -240,26 +333,17 @@ mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
 	MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0);
 	for (i = 0; i < tp->mapnum; i++) {
 		struct mlx5_flex_pattern_field *map = tp->map + i;
-		uint32_t id = map->reg_id;
-		uint32_t def = (RTE_BIT64(map->width) - 1) << map->shift;
-		uint32_t val, msk;
+		uint32_t val, msk, def;
+		int id = mlx5_flex_get_sample_id(tp, i, &pos, is_inner, &def);
 
-		/* Skip placeholders for DUMMY fields. */
-		if (id == MLX5_INVALID_SAMPLE_REG_ID) {
-			pos += map->width;
+		if (id == -1)
 			continue;
-		}
+		MLX5_ASSERT(id < (int)tp->devx_fp->num_samples);
+		if (id >= (int)tp->devx_fp->num_samples ||
+		    id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+			return;
 		val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift);
 		msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift);
-		MLX5_ASSERT(map->width);
-		MLX5_ASSERT(id < tp->devx_fp->num_samples);
-		if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
-			uint32_t num_samples = tp->devx_fp->num_samples / 2;
-
-			MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
-			MLX5_ASSERT(id < num_samples);
-			id += num_samples;
-		}
 		if (attr->ext_sample_id)
 			sample_id = tp->devx_fp->sample_ids[id].sample_id;
 		else
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 9e1912ec69..1066829ca5 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4343,6 +4343,36 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
 					      &modify_action);
 }
 
+static int
+flow_hw_flex_item_acquire(struct rte_eth_dev *dev,
+			  struct rte_flow_item_flex_handle *handle,
+			  uint8_t *flex_item)
+{
+	int index = mlx5_flex_acquire_index(dev, handle, false);
+
+	MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
+	if (index < 0)
+		return -1;
+	if (!(*flex_item & RTE_BIT32(index))) {
+		/* Don't count same flex item again. */
+		if (mlx5_flex_acquire_index(dev, handle, true) != index)
+			MLX5_ASSERT(false);
+		*flex_item |= (uint8_t)RTE_BIT32(index);
+	}
+	return 0;
+}
+
+static void
+flow_hw_flex_item_release(struct rte_eth_dev *dev, uint8_t *flex_item)
+{
+	while (*flex_item) {
+		int index = rte_bsf32(*flex_item);
+
+		mlx5_flex_release_index(dev, index);
+		*flex_item &= ~(uint8_t)RTE_BIT32(index);
+	}
+}
+
 /**
  * Create flow action template.
  *
@@ -4748,6 +4778,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
 		case RTE_FLOW_ITEM_TYPE_ESP:
+		case RTE_FLOW_ITEM_TYPE_FLEX:
 			break;
 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
 			/*
@@ -4825,6 +4856,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 		.mask = &tag_m,
 		.last = NULL
 	};
+	unsigned int i = 0;
 
 	if (flow_hw_pattern_validate(dev, attr, items, error))
 		return NULL;
@@ -4895,6 +4927,19 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 			return NULL;
 		}
 	}
+	for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; ++i) {
+		if (items[i].type == RTE_FLOW_ITEM_TYPE_FLEX) {
+			const struct rte_flow_item_flex *spec =
+				(const struct rte_flow_item_flex *)items[i].spec;
+			struct rte_flow_item_flex_handle *handle = spec->handle;
+
+			if (flow_hw_flex_item_acquire(dev, handle, &it->flex_item)) {
+				claim_zero(mlx5dr_match_template_destroy(it->mt));
+				mlx5_free(it);
+				return NULL;
+			}
+		}
+	}
 	__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
 	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
 	return it;
@@ -4914,7 +4959,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
+flow_hw_pattern_template_destroy(struct rte_eth_dev *dev,
 			      struct rte_flow_pattern_template *template,
 			      struct rte_flow_error *error __rte_unused)
 {
@@ -4930,6 +4975,7 @@ flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
 				    MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT))
 		mlx5_free_srh_flex_parser(dev);
 	LIST_REMOVE(template, next);
+	flow_hw_flex_item_release(dev, &template->flex_item);
 	claim_zero(mlx5dr_match_template_destroy(template->mt));
 	mlx5_free(template);
 	return 0;
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v2 4/6] net/mlx5: add flex item modify field implementation
  2023-02-22  9:37   ` [PATCH v2 0/6] add flex item implementation Rongwei Liu
                       ` (2 preceding siblings ...)
  2023-02-22  9:37     ` [PATCH v2 3/6] net/mlx5/hws: add hws flex item matching support Rongwei Liu
@ 2023-02-22  9:37     ` Rongwei Liu
  2023-02-22  9:37     ` [PATCH v2 5/6] net/mlx5: return error for sws modify field Rongwei Liu
  2023-02-22  9:37     ` [PATCH v2 6/6] doc/mlx5: update mlx5 doc Rongwei Liu
  5 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-22  9:37 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Add flex item modify field HWS implementation.
The minimum modify boundary is one byte.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/common/mlx5/mlx5_prm.h  |   1 +
 drivers/net/mlx5/mlx5_flow.h    |   3 +
 drivers/net/mlx5/mlx5_flow_dv.c | 165 +++++++++++++++++++++++++++++---
 drivers/net/mlx5/mlx5_flow_hw.c |  14 ++-
 4 files changed, 170 insertions(+), 13 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 613cc6face..74c5e2e371 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -761,6 +761,7 @@ enum mlx5_modification_field {
 	MLX5_MODI_GTPU_FIRST_EXT_DW_0 = 0x76,
 	MLX5_MODI_HASH_RESULT = 0x81,
 	MLX5_MODI_OUT_IPV6_NEXT_HDR = 0x4A,
+	MLX5_MODI_INVALID = INT_MAX,
 };
 
 /* Total number of metadata reg_c's. */
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index ae2fc0aabe..d6831d849d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1084,6 +1084,8 @@ struct field_modify_info {
 	uint32_t size; /* Size of field in protocol header, in bytes. */
 	uint32_t offset; /* Offset of field in protocol header, in bytes. */
 	enum mlx5_modification_field id;
+	uint32_t shift;
+	uint8_t is_flex; /* Temporary indicator for flex item modify filed WA. */
 };
 
 /* HW steering flow attributes. */
@@ -1248,6 +1250,7 @@ struct rte_flow_actions_template {
 	uint16_t mhdr_off; /* Offset of DR modify header action. */
 	uint32_t refcnt; /* Reference counter. */
 	uint16_t rx_cpy_pos; /* Action position of Rx metadata to be copied. */
+	uint8_t flex_item; /* flex item index. */
 };
 
 /* Jump action struct. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 9e7ab08b32..8355249ce5 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -414,10 +414,15 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
 			++field;
 			continue;
 		}
-		/* Deduce actual data width in bits from mask value. */
-		off_b = rte_bsf32(mask) + carry_b;
-		size_b = sizeof(uint32_t) * CHAR_BIT -
-			 off_b - __builtin_clz(mask);
+		if (type == MLX5_MODIFICATION_TYPE_COPY && field->is_flex) {
+			off_b = 32 - field->shift + carry_b - field->size * CHAR_BIT;
+			size_b = field->size * CHAR_BIT - carry_b;
+		} else {
+			/* Deduce actual data width in bits from mask value. */
+			off_b = rte_bsf32(mask) + carry_b;
+			size_b = sizeof(uint32_t) * CHAR_BIT -
+				 off_b - __builtin_clz(mask);
+		}
 		MLX5_ASSERT(size_b);
 		actions[i] = (struct mlx5_modification_cmd) {
 			.action_type = type,
@@ -437,40 +442,46 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
 			 * Destination field overflow. Copy leftovers of
 			 * a source field to the next destination field.
 			 */
-			carry_b = 0;
 			if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
 			    dcopy->size != 0) {
 				actions[i].length =
 					dcopy->size * CHAR_BIT - dcopy->offset;
-				carry_b = actions[i].length;
+				carry_b += actions[i].length;
 				next_field = false;
+			} else {
+				carry_b = 0;
 			}
 			/*
 			 * Not enough bits in a source filed to fill a
 			 * destination field. Switch to the next source.
 			 */
 			if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
-			    (size_b == field->size * CHAR_BIT - off_b)) {
-				actions[i].length =
-					field->size * CHAR_BIT - off_b;
+			    ((size_b == field->size * CHAR_BIT - off_b) ||
+			     field->is_flex)) {
+				actions[i].length = size_b;
 				dcopy->offset += actions[i].length;
 				next_dcopy = false;
 			}
-			if (next_dcopy)
-				++dcopy;
 		} else {
 			MLX5_ASSERT(item->spec);
 			data = flow_dv_fetch_field((const uint8_t *)item->spec +
 						   field->offset, field->size);
 			/* Shift out the trailing masked bits from data. */
 			data = (data & mask) >> off_b;
+			if (field->is_flex)
+				actions[i].offset = 32 - field->shift - field->size * CHAR_BIT;
 			actions[i].data1 = rte_cpu_to_be_32(data);
 		}
 		/* Convert entire record to expected big-endian format. */
 		actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
+		if ((type != MLX5_MODIFICATION_TYPE_COPY ||
+		     dcopy->id != (enum mlx5_modification_field)UINT32_MAX) &&
+		    field->id != (enum mlx5_modification_field)UINT32_MAX)
+			++i;
+		if (next_dcopy && type == MLX5_MODIFICATION_TYPE_COPY)
+			++dcopy;
 		if (next_field)
 			++field;
-		++i;
 	} while (field->size);
 	if (resource->actions_num == i)
 		return rte_flow_error_set(error, EINVAL,
@@ -1422,6 +1433,131 @@ flow_modify_info_mask_32_masked(uint32_t length, uint32_t off, uint32_t post_mas
 	return rte_cpu_to_be_32(mask & post_mask);
 }
 
+static void
+mlx5_modify_flex_item(const struct rte_eth_dev *dev,
+		      const struct mlx5_flex_item *flex,
+		      const struct rte_flow_action_modify_data *data,
+		      struct field_modify_info *info,
+		      uint32_t *mask, uint32_t width)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
+	uint32_t i, j;
+	int id = 0;
+	uint32_t pos = 0;
+	const struct mlx5_flex_pattern_field *map;
+	uint32_t offset = data->offset;
+	uint32_t width_left = width;
+	uint32_t def;
+	uint32_t cur_width = 0;
+	uint32_t tmp_ofs;
+	uint32_t idx = 0;
+	struct field_modify_info tmp;
+	int tmp_id;
+
+	if (!attr->ext_sample_id) {
+		DRV_LOG(ERR, "FW doesn't support modify field with flex item.");
+		return;
+	}
+	/*
+	 * search for the mapping instance until Accumulated width is no
+	 * less than data->offset.
+	 */
+	for (i = 0; i < flex->mapnum; i++) {
+		if (flex->map[i].width + pos > data->offset)
+			break;
+		pos += flex->map[i].width;
+	}
+	if (i >= flex->mapnum)
+		return;
+	tmp_ofs = pos < data->offset ? data->offset - pos : 0;
+	for (j = i; i < flex->mapnum && width_left > 0; ) {
+		map = flex->map + i;
+		id = mlx5_flex_get_sample_id(flex, i, &pos, false, &def);
+		if (id == -1) {
+			i++;
+			/* All left length is dummy */
+			if (pos >= data->offset + width)
+				return;
+			cur_width = map->width;
+		/* One mapping instance covers the whole width. */
+		} else if (pos + map->width >= (data->offset + width)) {
+			cur_width = width_left;
+		} else {
+			cur_width = cur_width + map->width - tmp_ofs;
+			pos += map->width;
+			/*
+			 * Continue to search next until:
+			 * 1. Another flex parser ID.
+			 * 2. Width has been covered.
+			 */
+			for (j = i + 1; j < flex->mapnum; j++) {
+				tmp_id = mlx5_flex_get_sample_id(flex, j, &pos, false, &def);
+				if (tmp_id == -1) {
+					i = j;
+					pos -= flex->map[j].width;
+					break;
+				}
+				if (id >= (int)flex->devx_fp->num_samples ||
+				    id >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
+				    tmp_id >= (int)flex->devx_fp->num_samples ||
+				    tmp_id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+					return;
+				if (flex->devx_fp->sample_ids[id].id !=
+						flex->devx_fp->sample_ids[tmp_id].id ||
+				    flex->map[j].shift != flex->map[j - 1].width +
+							  flex->map[j - 1].shift) {
+					i = j;
+					break;
+				}
+				if ((pos + flex->map[j].width) >= (data->offset + width)) {
+					cur_width = width_left;
+					break;
+				}
+				pos += flex->map[j].width;
+				cur_width += flex->map[j].width;
+			}
+		}
+		if (cur_width > width_left)
+			cur_width = width_left;
+		else if (cur_width < width_left && (j == flex->mapnum || i == flex->mapnum))
+			return;
+
+		MLX5_ASSERT(id < (int)flex->devx_fp->num_samples);
+		if (id >= (int)flex->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+			return;
+		/* Use invalid entry as placeholder for DUMMY mapping. */
+		info[idx] = (struct field_modify_info){cur_width / CHAR_BIT, offset / CHAR_BIT,
+			     id == -1 ? MLX5_MODI_INVALID :
+			     (enum mlx5_modification_field)
+			     flex->devx_fp->sample_ids[id].modify_field_id,
+			     map->shift + tmp_ofs, 1};
+		offset += cur_width;
+		width_left -= cur_width;
+		if (!mask) {
+			info[idx].offset = (32 - cur_width - map->shift - tmp_ofs);
+			info[idx].size = cur_width / CHAR_BIT + info[idx].offset / CHAR_BIT;
+		}
+		cur_width = 0;
+		tmp_ofs = 0;
+		idx++;
+	}
+	if (unlikely(width_left > 0)) {
+		MLX5_ASSERT(false);
+		return;
+	}
+	if (mask)
+		memset(mask, 0xff, data->offset / CHAR_BIT + width / CHAR_BIT);
+	/* Re-order the info to follow IPv6 address. */
+	for (i = 0; i < idx / 2; i++) {
+		tmp = info[i];
+		MLX5_ASSERT(info[i].id);
+		MLX5_ASSERT(info[idx - 1 - i].id);
+		info[i] = info[idx - 1 - i];
+		info[idx - 1 - i] = tmp;
+	}
+}
+
 void
 mlx5_flow_field_id_to_modify_info
 		(const struct rte_flow_action_modify_data *data,
@@ -1893,6 +2029,11 @@ mlx5_flow_field_id_to_modify_info
 		else
 			info[idx].offset = off_be;
 		break;
+	case RTE_FLOW_FIELD_FLEX_ITEM:
+		MLX5_ASSERT(data->flex_handle != NULL && !(data->offset & 0x7));
+		mlx5_modify_flex_item(dev, (const struct mlx5_flex_item *)data->flex_handle,
+				      data, info, mask, width);
+		break;
 	case RTE_FLOW_FIELD_POINTER:
 	case RTE_FLOW_FIELD_VALUE:
 	default:
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 1066829ca5..907aab8bf3 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4562,6 +4562,17 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
 			at->actions[i].conf = actions->conf;
 			at->masks[i].conf = masks->conf;
 		}
+		if (actions->type == RTE_FLOW_ACTION_TYPE_MODIFY_FIELD) {
+			const struct rte_flow_action_modify_field *info = actions->conf;
+
+			if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+			     flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
+						       &at->flex_item)) ||
+			     (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+			      flow_hw_flex_item_acquire(dev, info->src.flex_handle,
+							&at->flex_item)))
+				goto error;
+		}
 	}
 	at->tmpl = flow_hw_dr_actions_template_create(at);
 	if (!at->tmpl)
@@ -4593,7 +4604,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
+flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
 				 struct rte_flow_actions_template *template,
 				 struct rte_flow_error *error __rte_unused)
 {
@@ -4606,6 +4617,7 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
 				   "action template in using");
 	}
 	LIST_REMOVE(template, next);
+	flow_hw_flex_item_release(dev, &template->flex_item);
 	if (template->tmpl)
 		mlx5dr_action_template_destroy(template->tmpl);
 	mlx5_free(template);
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v2 5/6] net/mlx5: return error for sws modify field
  2023-02-22  9:37   ` [PATCH v2 0/6] add flex item implementation Rongwei Liu
                       ` (3 preceding siblings ...)
  2023-02-22  9:37     ` [PATCH v2 4/6] net/mlx5: add flex item modify field implementation Rongwei Liu
@ 2023-02-22  9:37     ` Rongwei Liu
  2023-02-22  9:37     ` [PATCH v2 6/6] doc/mlx5: update mlx5 doc Rongwei Liu
  5 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-22  9:37 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland, Dariusz Sosnowski

Return unsupported error message when application tries to
modify flex item field.

Validation of packet modifications actions for SW Steering checked
if either source or destination field of MODIFY_FIELD action
was a flex item.
When DEC_TTL action is used, DEC_TTL action does not have any
action configuration and dereferencing source or destination field
is invalid, so validation of source and destination field types
should be moved to MODIFY_FIELD specific validation function, then
field types are validated if and only if action type is MODIFY_FIELD.

Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_dv.c | 19 ++++++++++++-------
 1 file changed, 12 insertions(+), 7 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 8355249ce5..3d760d1913 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -4838,6 +4838,7 @@ flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 					  NULL, "action configuration not set");
+
 	if (action_flags & MLX5_FLOW_ACTION_ENCAP)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
@@ -5163,17 +5164,21 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
 	const struct rte_flow_action_modify_field *action_modify_field =
 		action->conf;
-	uint32_t dst_width = mlx5_flow_item_field_width(dev,
-				action_modify_field->dst.field,
-				-1, attr, error);
-	uint32_t src_width = mlx5_flow_item_field_width(dev,
-				action_modify_field->src.field,
-				dst_width, attr, error);
+	uint32_t dst_width, src_width;
 
 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
 	if (ret)
 		return ret;
-
+	if (action_modify_field->src.field == RTE_FLOW_FIELD_FLEX_ITEM ||
+	    action_modify_field->dst.field == RTE_FLOW_FIELD_FLEX_ITEM)
+		return rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION, action,
+				"flex item fields modification"
+				" is not supported");
+	dst_width = mlx5_flow_item_field_width(dev, action_modify_field->dst.field,
+					       -1, attr, error);
+	src_width = mlx5_flow_item_field_width(dev, action_modify_field->src.field,
+					       dst_width, attr, error);
 	if (action_modify_field->width == 0)
 		return rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ACTION, action,
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v2 6/6] doc/mlx5: update mlx5 doc
  2023-02-22  9:37   ` [PATCH v2 0/6] add flex item implementation Rongwei Liu
                       ` (4 preceding siblings ...)
  2023-02-22  9:37     ` [PATCH v2 5/6] net/mlx5: return error for sws modify field Rongwei Liu
@ 2023-02-22  9:37     ` Rongwei Liu
  5 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-22  9:37 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Add flex item matching and modify field feature into
mlx5 documents.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 doc/guides/nics/mlx5.rst | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index f182baa37e..09828a5cf4 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -108,6 +108,7 @@ Features
 - Sub-Function.
 - Matching on represented port.
 - Matching on IPv6 routing extension header.
+- Modify flex item field.
 
 
 Limitations
@@ -291,11 +292,12 @@ Limitations
   - Firmware supports 8 global sample fields.
     Each flex item allocates non-shared sample fields from that pool.
   - Supported flex item can have 1 input link - ``eth`` or ``udp``
-    and up to 2 output links - ``ipv4`` or ``ipv6``.
+    and up to 3 output links - ``ipv4`` or ``ipv6``.
   - Flex item fields (``next_header``, ``next_protocol``, ``samples``)
     do not participate in RSS hash functions.
   - In flex item configuration, ``next_header.field_base`` value
     must be byte aligned (multiple of 8).
+  - Modify field with flex item, the offset must be byte aligned (multiple of 8).
 
 - No Tx metadata go to the E-Switch steering domain for the Flow group 0.
   The flows within group 0 and set metadata action are rejected by hardware.
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v3 0/7] add flex item implementation
  2023-02-15 11:52 ` [PATCH v1 6/6] doc/mlx5: update mlx5 doc Rongwei Liu
  2023-02-22  9:37   ` [PATCH v2 0/6] add flex item implementation Rongwei Liu
@ 2023-02-23  7:06   ` Rongwei Liu
  2023-02-23  7:06     ` [PATCH v3 1/7] net/mlx5: enable hws flex item create Rongwei Liu
                       ` (6 more replies)
  1 sibling, 7 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-23  7:06 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Implement the flex item matching and modify field in HWS.

v3: add error message for testpmd output.

Rongwei Liu (7):
  net/mlx5: enable hws flex item create
  net/mlx5: add IPv6 protocol as flex item input
  net/mlx5/hws: add hws flex item matching support
  net/mlx5: add flex item modify field implementation
  net/mlx5: return error for sws modify field
  doc/mlx5: update mlx5 doc
  net/mlx5: add error message

 doc/guides/nics/mlx5.rst              |   4 +-
 drivers/common/mlx5/mlx5_prm.h        |   1 +
 drivers/net/mlx5/hws/mlx5dr_definer.c |  83 ++++++++++++
 drivers/net/mlx5/linux/mlx5_os.c      |  27 ++--
 drivers/net/mlx5/mlx5.c               |   2 +-
 drivers/net/mlx5/mlx5.h               |   6 +
 drivers/net/mlx5/mlx5_flow.h          |   4 +
 drivers/net/mlx5/mlx5_flow_dv.c       | 186 +++++++++++++++++++++++---
 drivers/net/mlx5/mlx5_flow_flex.c     | 135 ++++++++++++++++---
 drivers/net/mlx5/mlx5_flow_hw.c       |  77 ++++++++++-
 10 files changed, 473 insertions(+), 52 deletions(-)

-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v3 1/7] net/mlx5: enable hws flex item create
  2023-02-23  7:06   ` [PATCH v3 0/7] add flex item implementation Rongwei Liu
@ 2023-02-23  7:06     ` Rongwei Liu
  2023-02-23  7:06     ` [PATCH v3 2/7] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
                       ` (5 subsequent siblings)
  6 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-23  7:06 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Enable flex item create and destroy with dv_flow_en=2

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c | 27 +++++++++++++++------------
 drivers/net/mlx5/mlx5_flow_hw.c  |  2 ++
 2 files changed, 17 insertions(+), 12 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index a71474c90a..f5b3edea99 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -474,10 +474,20 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 	err = mlx5_alloc_table_hash_list(priv);
 	if (err)
 		goto error;
-	if (priv->sh->config.dv_flow_en == 2)
-		return 0;
 	/* The resources below are only valid with DV support. */
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	/* Init shared flex parsers list, no need lcore_share */
+	snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
+	sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
+					       mlx5_flex_parser_create_cb,
+					       mlx5_flex_parser_match_cb,
+					       mlx5_flex_parser_remove_cb,
+					       mlx5_flex_parser_clone_cb,
+					       mlx5_flex_parser_clone_free_cb);
+	if (!sh->flex_parsers_dv)
+		goto error;
+	if (priv->sh->config.dv_flow_en == 2)
+		return 0;
 	/* Init port id action list. */
 	snprintf(s, sizeof(s), "%s_port_id_action_list", sh->ibdev_name);
 	sh->port_id_action_list = mlx5_list_create(s, sh, true,
@@ -518,16 +528,9 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 					      flow_dv_dest_array_clone_free_cb);
 	if (!sh->dest_array_list)
 		goto error;
-	/* Init shared flex parsers list, no need lcore_share */
-	snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
-	sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
-					       mlx5_flex_parser_create_cb,
-					       mlx5_flex_parser_match_cb,
-					       mlx5_flex_parser_remove_cb,
-					       mlx5_flex_parser_clone_cb,
-					       mlx5_flex_parser_clone_free_cb);
-	if (!sh->flex_parsers_dv)
-		goto error;
+#else
+	if (priv->sh->config.dv_flow_en == 2)
+		return 0;
 #endif
 #ifdef HAVE_MLX5DV_DR
 	void *domain;
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 7457187b19..9e1912ec69 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -8436,6 +8436,8 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
 	.query = flow_hw_query,
 	.get_aged_flows = flow_hw_get_aged_flows,
 	.get_q_aged_flows = flow_hw_get_q_aged_flows,
+	.item_create = flow_dv_item_create,
+	.item_release = flow_dv_item_release,
 };
 
 /**
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v3 2/7] net/mlx5: add IPv6 protocol as flex item input
  2023-02-23  7:06   ` [PATCH v3 0/7] add flex item implementation Rongwei Liu
  2023-02-23  7:06     ` [PATCH v3 1/7] net/mlx5: enable hws flex item create Rongwei Liu
@ 2023-02-23  7:06     ` Rongwei Liu
  2023-02-23  7:06     ` [PATCH v3 3/7] net/mlx5/hws: add hws flex item matching support Rongwei Liu
                       ` (4 subsequent siblings)
  6 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-23  7:06 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Support IPv6 protocol as new flex item input link.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_flex.c | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 35f2a9923d..24b7226ee6 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -1050,6 +1050,22 @@ mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
 	return rte_be_to_cpu_16(spec->hdr.dst_port);
 }
 
+static int
+mlx5_flex_arc_in_ipv6(const struct rte_flow_item *item,
+		      struct rte_flow_error *error)
+{
+	const struct rte_flow_item_ipv6 *spec = item->spec;
+	const struct rte_flow_item_ipv6 *mask = item->mask;
+	struct rte_flow_item_ipv6 ip = { .hdr.proto = 0xff };
+
+	if (memcmp(mask, &ip, sizeof(struct rte_flow_item_ipv6))) {
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+			 "invalid ipv6 item mask, full mask is desired");
+	}
+	return spec->hdr.proto;
+}
+
 static int
 mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
 			   const struct rte_flow_item_flex_conf *conf,
@@ -1096,6 +1112,9 @@ mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
 		case RTE_FLOW_ITEM_TYPE_UDP:
 			ret = mlx5_flex_arc_in_udp(rte_item, error);
 			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ret = mlx5_flex_arc_in_ipv6(rte_item, error);
+			break;
 		default:
 			MLX5_ASSERT(false);
 			return rte_flow_error_set
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v3 3/7] net/mlx5/hws: add hws flex item matching support
  2023-02-23  7:06   ` [PATCH v3 0/7] add flex item implementation Rongwei Liu
  2023-02-23  7:06     ` [PATCH v3 1/7] net/mlx5: enable hws flex item create Rongwei Liu
  2023-02-23  7:06     ` [PATCH v3 2/7] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
@ 2023-02-23  7:06     ` Rongwei Liu
  2023-02-23  7:06     ` [PATCH v3 4/7] net/mlx5: add flex item modify field implementation Rongwei Liu
                       ` (3 subsequent siblings)
  6 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-23  7:06 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland, Alex Vesker

Support flex item matching in hws and syntax follows
sws exactly.

Flex item should be created in advance and follow current
json mapping logic.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Reviewed-by: Alex Vesker <valex@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr_definer.c |  83 ++++++++++++++++++
 drivers/net/mlx5/mlx5.c               |   2 +-
 drivers/net/mlx5/mlx5.h               |   6 ++
 drivers/net/mlx5/mlx5_flow.h          |   1 +
 drivers/net/mlx5/mlx5_flow_dv.c       |   2 +-
 drivers/net/mlx5/mlx5_flow_flex.c     | 116 ++++++++++++++++++++++----
 drivers/net/mlx5/mlx5_flow_hw.c       |  48 ++++++++++-
 7 files changed, 239 insertions(+), 19 deletions(-)

diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 6374f9df33..5b78092843 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -311,6 +311,43 @@ mlx5dr_definer_ipv6_routing_ext_set(struct mlx5dr_definer_fc *fc,
 	DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
 }
 
+static void
+mlx5dr_definer_flex_parser_set(struct mlx5dr_definer_fc *fc,
+			       const void *item,
+			       uint8_t *tag, bool is_inner)
+{
+	const struct rte_flow_item_flex *flex = item;
+	uint32_t byte_off, val, idx;
+	int ret;
+
+	val = 0;
+	byte_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
+	idx = fc->fname - MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
+	byte_off -= idx * sizeof(uint32_t);
+	ret = mlx5_flex_get_parser_value_per_byte_off(flex, flex->handle, byte_off,
+						      false, is_inner, &val);
+	if (ret == -1 || !val)
+		return;
+
+	DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_flex_parser_inner_set(struct mlx5dr_definer_fc *fc,
+				     const void *item,
+				     uint8_t *tag)
+{
+	mlx5dr_definer_flex_parser_set(fc, item, tag, true);
+}
+
+static void
+mlx5dr_definer_flex_parser_outer_set(struct mlx5dr_definer_fc *fc,
+				     const void *item,
+				     uint8_t *tag)
+{
+	mlx5dr_definer_flex_parser_set(fc, item, tag, false);
+}
+
 static void
 mlx5dr_definer_gre_key_set(struct mlx5dr_definer_fc *fc,
 			   const void *item_spec,
@@ -1782,6 +1819,47 @@ mlx5dr_definer_conv_item_esp(struct mlx5dr_definer_conv_data *cd,
 	return 0;
 }
 
+static int
+mlx5dr_definer_conv_item_flex_parser(struct mlx5dr_definer_conv_data *cd,
+				     struct rte_flow_item *item,
+				     int item_idx)
+{
+	uint32_t base_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
+	const struct rte_flow_item_flex *v, *m;
+	enum mlx5dr_definer_fname fname;
+	struct mlx5dr_definer_fc *fc;
+	uint32_t i, mask, byte_off;
+	bool is_inner = cd->tunnel;
+	int ret;
+
+	m = item->mask;
+	v = item->spec;
+	mask = 0;
+	for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
+		byte_off = base_off - i * sizeof(uint32_t);
+		ret = mlx5_flex_get_parser_value_per_byte_off(m, v->handle, byte_off,
+							      true, is_inner, &mask);
+		if (ret == -1) {
+			rte_errno = EINVAL;
+			return rte_errno;
+		}
+
+		if (!mask)
+			continue;
+
+		fname = MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
+		fname += (enum mlx5dr_definer_fname)i;
+		fc = &cd->fc[fname];
+		fc->byte_off = byte_off;
+		fc->item_idx = item_idx;
+		fc->tag_set = cd->tunnel ? &mlx5dr_definer_flex_parser_inner_set :
+					   &mlx5dr_definer_flex_parser_outer_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		fc->bit_mask = mask;
+	}
+	return 0;
+}
+
 static int
 mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 				struct mlx5dr_match_template *mt,
@@ -1913,6 +1991,11 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 			ret = mlx5dr_definer_conv_item_esp(&cd, items, i);
 			item_flags |= MLX5_FLOW_ITEM_ESP;
 			break;
+		case RTE_FLOW_ITEM_TYPE_FLEX:
+			ret = mlx5dr_definer_conv_item_flex_parser(&cd, items, i);
+			item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
+						  MLX5_FLOW_ITEM_OUTER_FLEX;
+			break;
 		default:
 			DR_LOG(ERR, "Unsupported item type %d", items->type);
 			rte_errno = ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index cfc4609448..9b9ece7ad0 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1033,7 +1033,7 @@ static void
 mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_ecpri_parser_profile *prf =	&priv->sh->ecpri_parser;
+	struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
 
 	if (prf->obj)
 		mlx5_devx_cmd_destroy(prf->obj);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index a766fb408e..af6380bc80 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -2257,6 +2257,12 @@ void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
 void mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, void *matcher,
 				   void *key, const struct rte_flow_item *item,
 				   bool is_inner);
+int mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp,
+			    uint32_t idx, uint32_t *pos,
+			    bool is_inner, uint32_t *def);
+int mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item,
+					    void *flex, uint32_t byte_off,
+					    bool is_mask, bool tunnel, uint32_t *value);
 int mlx5_flex_acquire_index(struct rte_eth_dev *dev,
 			    struct rte_flow_item_flex_handle *handle,
 			    bool acquire);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 4bef2296b8..ae2fc0aabe 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1229,6 +1229,7 @@ struct rte_flow_pattern_template {
 	 * tag pattern item for representor matching.
 	 */
 	bool implicit_tag;
+	uint8_t flex_item; /* flex item index. */
 };
 
 /* Flow action template struct. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index f93dd4073c..9e7ab08b32 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -10668,7 +10668,7 @@ flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
 		(const struct rte_flow_item_flex *)item->spec;
 	int index = mlx5_flex_acquire_index(dev, spec->handle, false);
 
-	MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
+	MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
 	if (index < 0)
 		return;
 	if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 24b7226ee6..aa317fc958 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -198,6 +198,99 @@ mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
 	}
 #undef SET_FP_MATCH_SAMPLE_ID
 }
+
+/**
+ * Get the flex parser sample id and corresponding mask
+ * per shift and width information.
+ *
+ * @param[in] tp
+ *   Mlx5 flex item sample mapping handle.
+ * @param[in] idx
+ *   Mapping index.
+ * @param[in, out] pos
+ *   Where to search the value and mask.
+ * @param[in] is_inner
+ *   For inner matching or not.
+ * @param[in, def] def
+ *   Mask generated by mapping shift and width.
+ *
+ * @return
+ *   0 on success, -1 to ignore.
+ */
+int
+mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp,
+			uint32_t idx, uint32_t *pos,
+			bool is_inner, uint32_t *def)
+{
+	const struct mlx5_flex_pattern_field *map = tp->map + idx;
+	uint32_t id = map->reg_id;
+
+	*def = (RTE_BIT64(map->width) - 1) << map->shift;
+	/* Skip placeholders for DUMMY fields. */
+	if (id == MLX5_INVALID_SAMPLE_REG_ID) {
+		*pos += map->width;
+		return -1;
+	}
+	MLX5_ASSERT(map->width);
+	MLX5_ASSERT(id < tp->devx_fp->num_samples);
+	if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
+		uint32_t num_samples = tp->devx_fp->num_samples / 2;
+
+		MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
+		MLX5_ASSERT(id < num_samples);
+		id += num_samples;
+	}
+	return id;
+}
+
+/**
+ * Get the flex parser mapping value per definer format_select_dw.
+ *
+ * @param[in] item
+ *   Rte flex item pointer.
+ * @param[in] flex
+ *   Mlx5 flex item sample mapping handle.
+ * @param[in] byte_off
+ *   Mlx5 flex item format_select_dw.
+ * @param[in] is_mask
+ *   Spec or mask.
+ * @param[in] tunnel
+ *   Tunnel mode or not.
+ * @param[in, def] value
+ *   Value calculated for this flex parser, either spec or mask.
+ *
+ * @return
+ *   0 on success, -1 for error.
+ */
+int
+mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item,
+					void *flex, uint32_t byte_off,
+					bool is_mask, bool tunnel, uint32_t *value)
+{
+	struct mlx5_flex_pattern_field *map;
+	struct mlx5_flex_item *tp = flex;
+	uint32_t def, i, pos, val;
+	int id;
+
+	*value = 0;
+	for (i = 0, pos = 0; i < tp->mapnum && pos < item->length * CHAR_BIT; i++) {
+		map = tp->map + i;
+		id = mlx5_flex_get_sample_id(tp, i, &pos, tunnel, &def);
+		if (id == -1)
+			continue;
+		if (id >= (int)tp->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+			return -1;
+		if (byte_off == tp->devx_fp->sample_ids[id].format_select_dw * sizeof(uint32_t)) {
+			val = mlx5_flex_get_bitfield(item, pos, map->width, map->shift);
+			if (is_mask)
+				val &= RTE_BE32(def);
+			*value |= val;
+		}
+		pos += map->width;
+	}
+	return 0;
+}
+
 /**
  * Translate item pattern into matcher fields according to translation
  * array.
@@ -240,26 +333,17 @@ mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
 	MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0);
 	for (i = 0; i < tp->mapnum; i++) {
 		struct mlx5_flex_pattern_field *map = tp->map + i;
-		uint32_t id = map->reg_id;
-		uint32_t def = (RTE_BIT64(map->width) - 1) << map->shift;
-		uint32_t val, msk;
+		uint32_t val, msk, def;
+		int id = mlx5_flex_get_sample_id(tp, i, &pos, is_inner, &def);
 
-		/* Skip placeholders for DUMMY fields. */
-		if (id == MLX5_INVALID_SAMPLE_REG_ID) {
-			pos += map->width;
+		if (id == -1)
 			continue;
-		}
+		MLX5_ASSERT(id < (int)tp->devx_fp->num_samples);
+		if (id >= (int)tp->devx_fp->num_samples ||
+		    id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+			return;
 		val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift);
 		msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift);
-		MLX5_ASSERT(map->width);
-		MLX5_ASSERT(id < tp->devx_fp->num_samples);
-		if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
-			uint32_t num_samples = tp->devx_fp->num_samples / 2;
-
-			MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
-			MLX5_ASSERT(id < num_samples);
-			id += num_samples;
-		}
 		if (attr->ext_sample_id)
 			sample_id = tp->devx_fp->sample_ids[id].sample_id;
 		else
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 9e1912ec69..1066829ca5 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4343,6 +4343,36 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
 					      &modify_action);
 }
 
+static int
+flow_hw_flex_item_acquire(struct rte_eth_dev *dev,
+			  struct rte_flow_item_flex_handle *handle,
+			  uint8_t *flex_item)
+{
+	int index = mlx5_flex_acquire_index(dev, handle, false);
+
+	MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
+	if (index < 0)
+		return -1;
+	if (!(*flex_item & RTE_BIT32(index))) {
+		/* Don't count same flex item again. */
+		if (mlx5_flex_acquire_index(dev, handle, true) != index)
+			MLX5_ASSERT(false);
+		*flex_item |= (uint8_t)RTE_BIT32(index);
+	}
+	return 0;
+}
+
+static void
+flow_hw_flex_item_release(struct rte_eth_dev *dev, uint8_t *flex_item)
+{
+	while (*flex_item) {
+		int index = rte_bsf32(*flex_item);
+
+		mlx5_flex_release_index(dev, index);
+		*flex_item &= ~(uint8_t)RTE_BIT32(index);
+	}
+}
+
 /**
  * Create flow action template.
  *
@@ -4748,6 +4778,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
 		case RTE_FLOW_ITEM_TYPE_ESP:
+		case RTE_FLOW_ITEM_TYPE_FLEX:
 			break;
 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
 			/*
@@ -4825,6 +4856,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 		.mask = &tag_m,
 		.last = NULL
 	};
+	unsigned int i = 0;
 
 	if (flow_hw_pattern_validate(dev, attr, items, error))
 		return NULL;
@@ -4895,6 +4927,19 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 			return NULL;
 		}
 	}
+	for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; ++i) {
+		if (items[i].type == RTE_FLOW_ITEM_TYPE_FLEX) {
+			const struct rte_flow_item_flex *spec =
+				(const struct rte_flow_item_flex *)items[i].spec;
+			struct rte_flow_item_flex_handle *handle = spec->handle;
+
+			if (flow_hw_flex_item_acquire(dev, handle, &it->flex_item)) {
+				claim_zero(mlx5dr_match_template_destroy(it->mt));
+				mlx5_free(it);
+				return NULL;
+			}
+		}
+	}
 	__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
 	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
 	return it;
@@ -4914,7 +4959,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
+flow_hw_pattern_template_destroy(struct rte_eth_dev *dev,
 			      struct rte_flow_pattern_template *template,
 			      struct rte_flow_error *error __rte_unused)
 {
@@ -4930,6 +4975,7 @@ flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
 				    MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT))
 		mlx5_free_srh_flex_parser(dev);
 	LIST_REMOVE(template, next);
+	flow_hw_flex_item_release(dev, &template->flex_item);
 	claim_zero(mlx5dr_match_template_destroy(template->mt));
 	mlx5_free(template);
 	return 0;
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v3 4/7] net/mlx5: add flex item modify field implementation
  2023-02-23  7:06   ` [PATCH v3 0/7] add flex item implementation Rongwei Liu
                       ` (2 preceding siblings ...)
  2023-02-23  7:06     ` [PATCH v3 3/7] net/mlx5/hws: add hws flex item matching support Rongwei Liu
@ 2023-02-23  7:06     ` Rongwei Liu
  2023-02-23  7:06     ` [PATCH v3 5/7] net/mlx5: return error for sws modify field Rongwei Liu
                       ` (2 subsequent siblings)
  6 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-23  7:06 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Add flex item modify field HWS implementation.
The minimum modify boundary is one byte.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/common/mlx5/mlx5_prm.h  |   1 +
 drivers/net/mlx5/mlx5_flow.h    |   3 +
 drivers/net/mlx5/mlx5_flow_dv.c | 165 +++++++++++++++++++++++++++++---
 drivers/net/mlx5/mlx5_flow_hw.c |  14 ++-
 4 files changed, 170 insertions(+), 13 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 613cc6face..74c5e2e371 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -761,6 +761,7 @@ enum mlx5_modification_field {
 	MLX5_MODI_GTPU_FIRST_EXT_DW_0 = 0x76,
 	MLX5_MODI_HASH_RESULT = 0x81,
 	MLX5_MODI_OUT_IPV6_NEXT_HDR = 0x4A,
+	MLX5_MODI_INVALID = INT_MAX,
 };
 
 /* Total number of metadata reg_c's. */
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index ae2fc0aabe..d6831d849d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1084,6 +1084,8 @@ struct field_modify_info {
 	uint32_t size; /* Size of field in protocol header, in bytes. */
 	uint32_t offset; /* Offset of field in protocol header, in bytes. */
 	enum mlx5_modification_field id;
+	uint32_t shift;
+	uint8_t is_flex; /* Temporary indicator for flex item modify filed WA. */
 };
 
 /* HW steering flow attributes. */
@@ -1248,6 +1250,7 @@ struct rte_flow_actions_template {
 	uint16_t mhdr_off; /* Offset of DR modify header action. */
 	uint32_t refcnt; /* Reference counter. */
 	uint16_t rx_cpy_pos; /* Action position of Rx metadata to be copied. */
+	uint8_t flex_item; /* flex item index. */
 };
 
 /* Jump action struct. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 9e7ab08b32..8355249ce5 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -414,10 +414,15 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
 			++field;
 			continue;
 		}
-		/* Deduce actual data width in bits from mask value. */
-		off_b = rte_bsf32(mask) + carry_b;
-		size_b = sizeof(uint32_t) * CHAR_BIT -
-			 off_b - __builtin_clz(mask);
+		if (type == MLX5_MODIFICATION_TYPE_COPY && field->is_flex) {
+			off_b = 32 - field->shift + carry_b - field->size * CHAR_BIT;
+			size_b = field->size * CHAR_BIT - carry_b;
+		} else {
+			/* Deduce actual data width in bits from mask value. */
+			off_b = rte_bsf32(mask) + carry_b;
+			size_b = sizeof(uint32_t) * CHAR_BIT -
+				 off_b - __builtin_clz(mask);
+		}
 		MLX5_ASSERT(size_b);
 		actions[i] = (struct mlx5_modification_cmd) {
 			.action_type = type,
@@ -437,40 +442,46 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
 			 * Destination field overflow. Copy leftovers of
 			 * a source field to the next destination field.
 			 */
-			carry_b = 0;
 			if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
 			    dcopy->size != 0) {
 				actions[i].length =
 					dcopy->size * CHAR_BIT - dcopy->offset;
-				carry_b = actions[i].length;
+				carry_b += actions[i].length;
 				next_field = false;
+			} else {
+				carry_b = 0;
 			}
 			/*
 			 * Not enough bits in a source filed to fill a
 			 * destination field. Switch to the next source.
 			 */
 			if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
-			    (size_b == field->size * CHAR_BIT - off_b)) {
-				actions[i].length =
-					field->size * CHAR_BIT - off_b;
+			    ((size_b == field->size * CHAR_BIT - off_b) ||
+			     field->is_flex)) {
+				actions[i].length = size_b;
 				dcopy->offset += actions[i].length;
 				next_dcopy = false;
 			}
-			if (next_dcopy)
-				++dcopy;
 		} else {
 			MLX5_ASSERT(item->spec);
 			data = flow_dv_fetch_field((const uint8_t *)item->spec +
 						   field->offset, field->size);
 			/* Shift out the trailing masked bits from data. */
 			data = (data & mask) >> off_b;
+			if (field->is_flex)
+				actions[i].offset = 32 - field->shift - field->size * CHAR_BIT;
 			actions[i].data1 = rte_cpu_to_be_32(data);
 		}
 		/* Convert entire record to expected big-endian format. */
 		actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
+		if ((type != MLX5_MODIFICATION_TYPE_COPY ||
+		     dcopy->id != (enum mlx5_modification_field)UINT32_MAX) &&
+		    field->id != (enum mlx5_modification_field)UINT32_MAX)
+			++i;
+		if (next_dcopy && type == MLX5_MODIFICATION_TYPE_COPY)
+			++dcopy;
 		if (next_field)
 			++field;
-		++i;
 	} while (field->size);
 	if (resource->actions_num == i)
 		return rte_flow_error_set(error, EINVAL,
@@ -1422,6 +1433,131 @@ flow_modify_info_mask_32_masked(uint32_t length, uint32_t off, uint32_t post_mas
 	return rte_cpu_to_be_32(mask & post_mask);
 }
 
+static void
+mlx5_modify_flex_item(const struct rte_eth_dev *dev,
+		      const struct mlx5_flex_item *flex,
+		      const struct rte_flow_action_modify_data *data,
+		      struct field_modify_info *info,
+		      uint32_t *mask, uint32_t width)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
+	uint32_t i, j;
+	int id = 0;
+	uint32_t pos = 0;
+	const struct mlx5_flex_pattern_field *map;
+	uint32_t offset = data->offset;
+	uint32_t width_left = width;
+	uint32_t def;
+	uint32_t cur_width = 0;
+	uint32_t tmp_ofs;
+	uint32_t idx = 0;
+	struct field_modify_info tmp;
+	int tmp_id;
+
+	if (!attr->ext_sample_id) {
+		DRV_LOG(ERR, "FW doesn't support modify field with flex item.");
+		return;
+	}
+	/*
+	 * search for the mapping instance until Accumulated width is no
+	 * less than data->offset.
+	 */
+	for (i = 0; i < flex->mapnum; i++) {
+		if (flex->map[i].width + pos > data->offset)
+			break;
+		pos += flex->map[i].width;
+	}
+	if (i >= flex->mapnum)
+		return;
+	tmp_ofs = pos < data->offset ? data->offset - pos : 0;
+	for (j = i; i < flex->mapnum && width_left > 0; ) {
+		map = flex->map + i;
+		id = mlx5_flex_get_sample_id(flex, i, &pos, false, &def);
+		if (id == -1) {
+			i++;
+			/* All left length is dummy */
+			if (pos >= data->offset + width)
+				return;
+			cur_width = map->width;
+		/* One mapping instance covers the whole width. */
+		} else if (pos + map->width >= (data->offset + width)) {
+			cur_width = width_left;
+		} else {
+			cur_width = cur_width + map->width - tmp_ofs;
+			pos += map->width;
+			/*
+			 * Continue to search next until:
+			 * 1. Another flex parser ID.
+			 * 2. Width has been covered.
+			 */
+			for (j = i + 1; j < flex->mapnum; j++) {
+				tmp_id = mlx5_flex_get_sample_id(flex, j, &pos, false, &def);
+				if (tmp_id == -1) {
+					i = j;
+					pos -= flex->map[j].width;
+					break;
+				}
+				if (id >= (int)flex->devx_fp->num_samples ||
+				    id >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
+				    tmp_id >= (int)flex->devx_fp->num_samples ||
+				    tmp_id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+					return;
+				if (flex->devx_fp->sample_ids[id].id !=
+						flex->devx_fp->sample_ids[tmp_id].id ||
+				    flex->map[j].shift != flex->map[j - 1].width +
+							  flex->map[j - 1].shift) {
+					i = j;
+					break;
+				}
+				if ((pos + flex->map[j].width) >= (data->offset + width)) {
+					cur_width = width_left;
+					break;
+				}
+				pos += flex->map[j].width;
+				cur_width += flex->map[j].width;
+			}
+		}
+		if (cur_width > width_left)
+			cur_width = width_left;
+		else if (cur_width < width_left && (j == flex->mapnum || i == flex->mapnum))
+			return;
+
+		MLX5_ASSERT(id < (int)flex->devx_fp->num_samples);
+		if (id >= (int)flex->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+			return;
+		/* Use invalid entry as placeholder for DUMMY mapping. */
+		info[idx] = (struct field_modify_info){cur_width / CHAR_BIT, offset / CHAR_BIT,
+			     id == -1 ? MLX5_MODI_INVALID :
+			     (enum mlx5_modification_field)
+			     flex->devx_fp->sample_ids[id].modify_field_id,
+			     map->shift + tmp_ofs, 1};
+		offset += cur_width;
+		width_left -= cur_width;
+		if (!mask) {
+			info[idx].offset = (32 - cur_width - map->shift - tmp_ofs);
+			info[idx].size = cur_width / CHAR_BIT + info[idx].offset / CHAR_BIT;
+		}
+		cur_width = 0;
+		tmp_ofs = 0;
+		idx++;
+	}
+	if (unlikely(width_left > 0)) {
+		MLX5_ASSERT(false);
+		return;
+	}
+	if (mask)
+		memset(mask, 0xff, data->offset / CHAR_BIT + width / CHAR_BIT);
+	/* Re-order the info to follow IPv6 address. */
+	for (i = 0; i < idx / 2; i++) {
+		tmp = info[i];
+		MLX5_ASSERT(info[i].id);
+		MLX5_ASSERT(info[idx - 1 - i].id);
+		info[i] = info[idx - 1 - i];
+		info[idx - 1 - i] = tmp;
+	}
+}
+
 void
 mlx5_flow_field_id_to_modify_info
 		(const struct rte_flow_action_modify_data *data,
@@ -1893,6 +2029,11 @@ mlx5_flow_field_id_to_modify_info
 		else
 			info[idx].offset = off_be;
 		break;
+	case RTE_FLOW_FIELD_FLEX_ITEM:
+		MLX5_ASSERT(data->flex_handle != NULL && !(data->offset & 0x7));
+		mlx5_modify_flex_item(dev, (const struct mlx5_flex_item *)data->flex_handle,
+				      data, info, mask, width);
+		break;
 	case RTE_FLOW_FIELD_POINTER:
 	case RTE_FLOW_FIELD_VALUE:
 	default:
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 1066829ca5..907aab8bf3 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4562,6 +4562,17 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
 			at->actions[i].conf = actions->conf;
 			at->masks[i].conf = masks->conf;
 		}
+		if (actions->type == RTE_FLOW_ACTION_TYPE_MODIFY_FIELD) {
+			const struct rte_flow_action_modify_field *info = actions->conf;
+
+			if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+			     flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
+						       &at->flex_item)) ||
+			     (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+			      flow_hw_flex_item_acquire(dev, info->src.flex_handle,
+							&at->flex_item)))
+				goto error;
+		}
 	}
 	at->tmpl = flow_hw_dr_actions_template_create(at);
 	if (!at->tmpl)
@@ -4593,7 +4604,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
+flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
 				 struct rte_flow_actions_template *template,
 				 struct rte_flow_error *error __rte_unused)
 {
@@ -4606,6 +4617,7 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
 				   "action template in using");
 	}
 	LIST_REMOVE(template, next);
+	flow_hw_flex_item_release(dev, &template->flex_item);
 	if (template->tmpl)
 		mlx5dr_action_template_destroy(template->tmpl);
 	mlx5_free(template);
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v3 5/7] net/mlx5: return error for sws modify field
  2023-02-23  7:06   ` [PATCH v3 0/7] add flex item implementation Rongwei Liu
                       ` (3 preceding siblings ...)
  2023-02-23  7:06     ` [PATCH v3 4/7] net/mlx5: add flex item modify field implementation Rongwei Liu
@ 2023-02-23  7:06     ` Rongwei Liu
  2023-02-23  7:06     ` [PATCH v3 6/7] doc/mlx5: update mlx5 doc Rongwei Liu
  2023-02-23  7:06     ` [PATCH v3 7/7] net/mlx5: add error message Rongwei Liu
  6 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-23  7:06 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland, Dariusz Sosnowski

Return unsupported error message when application tries to
modify flex item field.

Validation of packet modifications actions for SW Steering checked
if either source or destination field of MODIFY_FIELD action
was a flex item.
When DEC_TTL action is used, DEC_TTL action does not have any
action configuration and dereferencing source or destination field
is invalid, so validation of source and destination field types
should be moved to MODIFY_FIELD specific validation function, then
field types are validated if and only if action type is MODIFY_FIELD.

Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_dv.c | 19 ++++++++++++-------
 1 file changed, 12 insertions(+), 7 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 8355249ce5..3d760d1913 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -4838,6 +4838,7 @@ flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 					  NULL, "action configuration not set");
+
 	if (action_flags & MLX5_FLOW_ACTION_ENCAP)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
@@ -5163,17 +5164,21 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
 	const struct rte_flow_action_modify_field *action_modify_field =
 		action->conf;
-	uint32_t dst_width = mlx5_flow_item_field_width(dev,
-				action_modify_field->dst.field,
-				-1, attr, error);
-	uint32_t src_width = mlx5_flow_item_field_width(dev,
-				action_modify_field->src.field,
-				dst_width, attr, error);
+	uint32_t dst_width, src_width;
 
 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
 	if (ret)
 		return ret;
-
+	if (action_modify_field->src.field == RTE_FLOW_FIELD_FLEX_ITEM ||
+	    action_modify_field->dst.field == RTE_FLOW_FIELD_FLEX_ITEM)
+		return rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION, action,
+				"flex item fields modification"
+				" is not supported");
+	dst_width = mlx5_flow_item_field_width(dev, action_modify_field->dst.field,
+					       -1, attr, error);
+	src_width = mlx5_flow_item_field_width(dev, action_modify_field->src.field,
+					       dst_width, attr, error);
 	if (action_modify_field->width == 0)
 		return rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ACTION, action,
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v3 6/7] doc/mlx5: update mlx5 doc
  2023-02-23  7:06   ` [PATCH v3 0/7] add flex item implementation Rongwei Liu
                       ` (4 preceding siblings ...)
  2023-02-23  7:06     ` [PATCH v3 5/7] net/mlx5: return error for sws modify field Rongwei Liu
@ 2023-02-23  7:06     ` Rongwei Liu
  2023-02-23 12:37       ` Thomas Monjalon
  2023-02-23  7:06     ` [PATCH v3 7/7] net/mlx5: add error message Rongwei Liu
  6 siblings, 1 reply; 31+ messages in thread
From: Rongwei Liu @ 2023-02-23  7:06 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Add flex item matching and modify field feature into
mlx5 documents.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 doc/guides/nics/mlx5.rst | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index f182baa37e..09828a5cf4 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -108,6 +108,7 @@ Features
 - Sub-Function.
 - Matching on represented port.
 - Matching on IPv6 routing extension header.
+- Modify flex item field.
 
 
 Limitations
@@ -291,11 +292,12 @@ Limitations
   - Firmware supports 8 global sample fields.
     Each flex item allocates non-shared sample fields from that pool.
   - Supported flex item can have 1 input link - ``eth`` or ``udp``
-    and up to 2 output links - ``ipv4`` or ``ipv6``.
+    and up to 3 output links - ``ipv4`` or ``ipv6``.
   - Flex item fields (``next_header``, ``next_protocol``, ``samples``)
     do not participate in RSS hash functions.
   - In flex item configuration, ``next_header.field_base`` value
     must be byte aligned (multiple of 8).
+  - Modify field with flex item, the offset must be byte aligned (multiple of 8).
 
 - No Tx metadata go to the E-Switch steering domain for the Flow group 0.
   The flows within group 0 and set metadata action are rejected by hardware.
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v3 7/7] net/mlx5: add error message
  2023-02-23  7:06   ` [PATCH v3 0/7] add flex item implementation Rongwei Liu
                       ` (5 preceding siblings ...)
  2023-02-23  7:06     ` [PATCH v3 6/7] doc/mlx5: update mlx5 doc Rongwei Liu
@ 2023-02-23  7:06     ` Rongwei Liu
  6 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-23  7:06 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

When pattern/action template creation fails the testpmd expects error
details.
The driver did not set the error structure and testpmd failed with a
segmentation fault.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_hw.c | 15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 907aab8bf3..38b9d55f3a 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4570,8 +4570,12 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
 						       &at->flex_item)) ||
 			     (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
 			      flow_hw_flex_item_acquire(dev, info->src.flex_handle,
-							&at->flex_item)))
+							&at->flex_item))) {
+				rte_flow_error_set(error, rte_errno,
+						   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+						   "Failed to acquire flex item");
 				goto error;
+			}
 		}
 	}
 	at->tmpl = flow_hw_dr_actions_template_create(at);
@@ -4587,6 +4591,9 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
 			mlx5dr_action_template_destroy(at->tmpl);
 		mlx5_free(at);
 	}
+	rte_flow_error_set(error, rte_errno,
+			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+			   "Failed to create action template");
 	return NULL;
 }
 
@@ -4936,6 +4943,9 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 		    (mlx5_alloc_srh_flex_parser(dev))) {
 			claim_zero(mlx5dr_match_template_destroy(it->mt));
 			mlx5_free(it);
+			rte_flow_error_set(error, rte_errno,
+					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+					   "cannot create IPv6 routing extension support");
 			return NULL;
 		}
 	}
@@ -4948,6 +4958,9 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 			if (flow_hw_flex_item_acquire(dev, handle, &it->flex_item)) {
 				claim_zero(mlx5dr_match_template_destroy(it->mt));
 				mlx5_free(it);
+				rte_flow_error_set(error, rte_errno,
+						   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+						   "Failed to accquire flex item");
 				return NULL;
 			}
 		}
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH v3 6/7] doc/mlx5: update mlx5 doc
  2023-02-23  7:06     ` [PATCH v3 6/7] doc/mlx5: update mlx5 doc Rongwei Liu
@ 2023-02-23 12:37       ` Thomas Monjalon
  2023-02-23 12:48         ` [PATCH v4 0/6] add flex item implementation Rongwei Liu
  0 siblings, 1 reply; 31+ messages in thread
From: Thomas Monjalon @ 2023-02-23 12:37 UTC (permalink / raw)
  To: matan, viacheslavo, orika, Rongwei Liu; +Cc: dev, rasland

23/02/2023 08:06, Rongwei Liu:
> Add flex item matching and modify field feature into
> mlx5 documents.
> 
> Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

Please don't do a separate commit for doc update.
The doc should be updated with the related code in the same patch.




^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v4 0/6] add flex item implementation
  2023-02-23 12:37       ` Thomas Monjalon
@ 2023-02-23 12:48         ` Rongwei Liu
  2023-02-23 12:48           ` [PATCH v4 1/6] net/mlx5: enable hws flex item create Rongwei Liu
                             ` (6 more replies)
  0 siblings, 7 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-23 12:48 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Implement the flex item matching and modify field in HWS.

v4: squash doc commit.
v3: add error message for testpmd output.

Rongwei Liu (6):
  net/mlx5: enable hws flex item create
  net/mlx5: add IPv6 protocol as flex item input
  net/mlx5/hws: add hws flex item matching support
  net/mlx5: add flex item modify field implementation
  net/mlx5: return error for sws modify field
  net/mlx5: add error message

 doc/guides/nics/mlx5.rst              |   4 +-
 drivers/common/mlx5/mlx5_prm.h        |   1 +
 drivers/net/mlx5/hws/mlx5dr_definer.c |  83 ++++++++++++
 drivers/net/mlx5/linux/mlx5_os.c      |  27 ++--
 drivers/net/mlx5/mlx5.c               |   2 +-
 drivers/net/mlx5/mlx5.h               |   6 +
 drivers/net/mlx5/mlx5_flow.h          |   4 +
 drivers/net/mlx5/mlx5_flow_dv.c       | 186 +++++++++++++++++++++++---
 drivers/net/mlx5/mlx5_flow_flex.c     | 135 ++++++++++++++++---
 drivers/net/mlx5/mlx5_flow_hw.c       |  77 ++++++++++-
 10 files changed, 473 insertions(+), 52 deletions(-)

-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v4 1/6] net/mlx5: enable hws flex item create
  2023-02-23 12:48         ` [PATCH v4 0/6] add flex item implementation Rongwei Liu
@ 2023-02-23 12:48           ` Rongwei Liu
  2023-02-23 12:48           ` [PATCH v4 2/6] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
                             ` (5 subsequent siblings)
  6 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-23 12:48 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Enable flex item create and destroy with dv_flow_en=2

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c | 27 +++++++++++++++------------
 drivers/net/mlx5/mlx5_flow_hw.c  |  2 ++
 2 files changed, 17 insertions(+), 12 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index a71474c90a..f5b3edea99 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -474,10 +474,20 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 	err = mlx5_alloc_table_hash_list(priv);
 	if (err)
 		goto error;
-	if (priv->sh->config.dv_flow_en == 2)
-		return 0;
 	/* The resources below are only valid with DV support. */
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	/* Init shared flex parsers list, no need lcore_share */
+	snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
+	sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
+					       mlx5_flex_parser_create_cb,
+					       mlx5_flex_parser_match_cb,
+					       mlx5_flex_parser_remove_cb,
+					       mlx5_flex_parser_clone_cb,
+					       mlx5_flex_parser_clone_free_cb);
+	if (!sh->flex_parsers_dv)
+		goto error;
+	if (priv->sh->config.dv_flow_en == 2)
+		return 0;
 	/* Init port id action list. */
 	snprintf(s, sizeof(s), "%s_port_id_action_list", sh->ibdev_name);
 	sh->port_id_action_list = mlx5_list_create(s, sh, true,
@@ -518,16 +528,9 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 					      flow_dv_dest_array_clone_free_cb);
 	if (!sh->dest_array_list)
 		goto error;
-	/* Init shared flex parsers list, no need lcore_share */
-	snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
-	sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
-					       mlx5_flex_parser_create_cb,
-					       mlx5_flex_parser_match_cb,
-					       mlx5_flex_parser_remove_cb,
-					       mlx5_flex_parser_clone_cb,
-					       mlx5_flex_parser_clone_free_cb);
-	if (!sh->flex_parsers_dv)
-		goto error;
+#else
+	if (priv->sh->config.dv_flow_en == 2)
+		return 0;
 #endif
 #ifdef HAVE_MLX5DV_DR
 	void *domain;
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 7457187b19..9e1912ec69 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -8436,6 +8436,8 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
 	.query = flow_hw_query,
 	.get_aged_flows = flow_hw_get_aged_flows,
 	.get_q_aged_flows = flow_hw_get_q_aged_flows,
+	.item_create = flow_dv_item_create,
+	.item_release = flow_dv_item_release,
 };
 
 /**
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v4 2/6] net/mlx5: add IPv6 protocol as flex item input
  2023-02-23 12:48         ` [PATCH v4 0/6] add flex item implementation Rongwei Liu
  2023-02-23 12:48           ` [PATCH v4 1/6] net/mlx5: enable hws flex item create Rongwei Liu
@ 2023-02-23 12:48           ` Rongwei Liu
  2023-02-23 12:48           ` [PATCH v4 3/6] net/mlx5/hws: add hws flex item matching support Rongwei Liu
                             ` (4 subsequent siblings)
  6 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-23 12:48 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Support IPv6 protocol as new flex item input link.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_flex.c | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 35f2a9923d..24b7226ee6 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -1050,6 +1050,22 @@ mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
 	return rte_be_to_cpu_16(spec->hdr.dst_port);
 }
 
+static int
+mlx5_flex_arc_in_ipv6(const struct rte_flow_item *item,
+		      struct rte_flow_error *error)
+{
+	const struct rte_flow_item_ipv6 *spec = item->spec;
+	const struct rte_flow_item_ipv6 *mask = item->mask;
+	struct rte_flow_item_ipv6 ip = { .hdr.proto = 0xff };
+
+	if (memcmp(mask, &ip, sizeof(struct rte_flow_item_ipv6))) {
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+			 "invalid ipv6 item mask, full mask is desired");
+	}
+	return spec->hdr.proto;
+}
+
 static int
 mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
 			   const struct rte_flow_item_flex_conf *conf,
@@ -1096,6 +1112,9 @@ mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
 		case RTE_FLOW_ITEM_TYPE_UDP:
 			ret = mlx5_flex_arc_in_udp(rte_item, error);
 			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ret = mlx5_flex_arc_in_ipv6(rte_item, error);
+			break;
 		default:
 			MLX5_ASSERT(false);
 			return rte_flow_error_set
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v4 3/6] net/mlx5/hws: add hws flex item matching support
  2023-02-23 12:48         ` [PATCH v4 0/6] add flex item implementation Rongwei Liu
  2023-02-23 12:48           ` [PATCH v4 1/6] net/mlx5: enable hws flex item create Rongwei Liu
  2023-02-23 12:48           ` [PATCH v4 2/6] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
@ 2023-02-23 12:48           ` Rongwei Liu
  2023-02-23 12:48           ` [PATCH v4 4/6] net/mlx5: add flex item modify field implementation Rongwei Liu
                             ` (3 subsequent siblings)
  6 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-23 12:48 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland, Alex Vesker

Support flex item matching in hws and syntax follows
sws exactly.

Flex item should be created in advance and follow current
json mapping logic.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Reviewed-by: Alex Vesker <valex@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr_definer.c |  83 ++++++++++++++++++
 drivers/net/mlx5/mlx5.c               |   2 +-
 drivers/net/mlx5/mlx5.h               |   6 ++
 drivers/net/mlx5/mlx5_flow.h          |   1 +
 drivers/net/mlx5/mlx5_flow_dv.c       |   2 +-
 drivers/net/mlx5/mlx5_flow_flex.c     | 116 ++++++++++++++++++++++----
 drivers/net/mlx5/mlx5_flow_hw.c       |  48 ++++++++++-
 7 files changed, 239 insertions(+), 19 deletions(-)

diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 6374f9df33..5b78092843 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -311,6 +311,43 @@ mlx5dr_definer_ipv6_routing_ext_set(struct mlx5dr_definer_fc *fc,
 	DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
 }
 
+static void
+mlx5dr_definer_flex_parser_set(struct mlx5dr_definer_fc *fc,
+			       const void *item,
+			       uint8_t *tag, bool is_inner)
+{
+	const struct rte_flow_item_flex *flex = item;
+	uint32_t byte_off, val, idx;
+	int ret;
+
+	val = 0;
+	byte_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
+	idx = fc->fname - MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
+	byte_off -= idx * sizeof(uint32_t);
+	ret = mlx5_flex_get_parser_value_per_byte_off(flex, flex->handle, byte_off,
+						      false, is_inner, &val);
+	if (ret == -1 || !val)
+		return;
+
+	DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_flex_parser_inner_set(struct mlx5dr_definer_fc *fc,
+				     const void *item,
+				     uint8_t *tag)
+{
+	mlx5dr_definer_flex_parser_set(fc, item, tag, true);
+}
+
+static void
+mlx5dr_definer_flex_parser_outer_set(struct mlx5dr_definer_fc *fc,
+				     const void *item,
+				     uint8_t *tag)
+{
+	mlx5dr_definer_flex_parser_set(fc, item, tag, false);
+}
+
 static void
 mlx5dr_definer_gre_key_set(struct mlx5dr_definer_fc *fc,
 			   const void *item_spec,
@@ -1782,6 +1819,47 @@ mlx5dr_definer_conv_item_esp(struct mlx5dr_definer_conv_data *cd,
 	return 0;
 }
 
+static int
+mlx5dr_definer_conv_item_flex_parser(struct mlx5dr_definer_conv_data *cd,
+				     struct rte_flow_item *item,
+				     int item_idx)
+{
+	uint32_t base_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
+	const struct rte_flow_item_flex *v, *m;
+	enum mlx5dr_definer_fname fname;
+	struct mlx5dr_definer_fc *fc;
+	uint32_t i, mask, byte_off;
+	bool is_inner = cd->tunnel;
+	int ret;
+
+	m = item->mask;
+	v = item->spec;
+	mask = 0;
+	for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
+		byte_off = base_off - i * sizeof(uint32_t);
+		ret = mlx5_flex_get_parser_value_per_byte_off(m, v->handle, byte_off,
+							      true, is_inner, &mask);
+		if (ret == -1) {
+			rte_errno = EINVAL;
+			return rte_errno;
+		}
+
+		if (!mask)
+			continue;
+
+		fname = MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
+		fname += (enum mlx5dr_definer_fname)i;
+		fc = &cd->fc[fname];
+		fc->byte_off = byte_off;
+		fc->item_idx = item_idx;
+		fc->tag_set = cd->tunnel ? &mlx5dr_definer_flex_parser_inner_set :
+					   &mlx5dr_definer_flex_parser_outer_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		fc->bit_mask = mask;
+	}
+	return 0;
+}
+
 static int
 mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 				struct mlx5dr_match_template *mt,
@@ -1913,6 +1991,11 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 			ret = mlx5dr_definer_conv_item_esp(&cd, items, i);
 			item_flags |= MLX5_FLOW_ITEM_ESP;
 			break;
+		case RTE_FLOW_ITEM_TYPE_FLEX:
+			ret = mlx5dr_definer_conv_item_flex_parser(&cd, items, i);
+			item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
+						  MLX5_FLOW_ITEM_OUTER_FLEX;
+			break;
 		default:
 			DR_LOG(ERR, "Unsupported item type %d", items->type);
 			rte_errno = ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index cfc4609448..9b9ece7ad0 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1033,7 +1033,7 @@ static void
 mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_ecpri_parser_profile *prf =	&priv->sh->ecpri_parser;
+	struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
 
 	if (prf->obj)
 		mlx5_devx_cmd_destroy(prf->obj);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index a766fb408e..af6380bc80 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -2257,6 +2257,12 @@ void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
 void mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, void *matcher,
 				   void *key, const struct rte_flow_item *item,
 				   bool is_inner);
+int mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp,
+			    uint32_t idx, uint32_t *pos,
+			    bool is_inner, uint32_t *def);
+int mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item,
+					    void *flex, uint32_t byte_off,
+					    bool is_mask, bool tunnel, uint32_t *value);
 int mlx5_flex_acquire_index(struct rte_eth_dev *dev,
 			    struct rte_flow_item_flex_handle *handle,
 			    bool acquire);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 4bef2296b8..ae2fc0aabe 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1229,6 +1229,7 @@ struct rte_flow_pattern_template {
 	 * tag pattern item for representor matching.
 	 */
 	bool implicit_tag;
+	uint8_t flex_item; /* flex item index. */
 };
 
 /* Flow action template struct. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index f93dd4073c..9e7ab08b32 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -10668,7 +10668,7 @@ flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
 		(const struct rte_flow_item_flex *)item->spec;
 	int index = mlx5_flex_acquire_index(dev, spec->handle, false);
 
-	MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
+	MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
 	if (index < 0)
 		return;
 	if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 24b7226ee6..aa317fc958 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -198,6 +198,99 @@ mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
 	}
 #undef SET_FP_MATCH_SAMPLE_ID
 }
+
+/**
+ * Get the flex parser sample id and corresponding mask
+ * per shift and width information.
+ *
+ * @param[in] tp
+ *   Mlx5 flex item sample mapping handle.
+ * @param[in] idx
+ *   Mapping index.
+ * @param[in, out] pos
+ *   Where to search the value and mask.
+ * @param[in] is_inner
+ *   For inner matching or not.
+ * @param[in, def] def
+ *   Mask generated by mapping shift and width.
+ *
+ * @return
+ *   0 on success, -1 to ignore.
+ */
+int
+mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp,
+			uint32_t idx, uint32_t *pos,
+			bool is_inner, uint32_t *def)
+{
+	const struct mlx5_flex_pattern_field *map = tp->map + idx;
+	uint32_t id = map->reg_id;
+
+	*def = (RTE_BIT64(map->width) - 1) << map->shift;
+	/* Skip placeholders for DUMMY fields. */
+	if (id == MLX5_INVALID_SAMPLE_REG_ID) {
+		*pos += map->width;
+		return -1;
+	}
+	MLX5_ASSERT(map->width);
+	MLX5_ASSERT(id < tp->devx_fp->num_samples);
+	if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
+		uint32_t num_samples = tp->devx_fp->num_samples / 2;
+
+		MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
+		MLX5_ASSERT(id < num_samples);
+		id += num_samples;
+	}
+	return id;
+}
+
+/**
+ * Get the flex parser mapping value per definer format_select_dw.
+ *
+ * @param[in] item
+ *   Rte flex item pointer.
+ * @param[in] flex
+ *   Mlx5 flex item sample mapping handle.
+ * @param[in] byte_off
+ *   Mlx5 flex item format_select_dw.
+ * @param[in] is_mask
+ *   Spec or mask.
+ * @param[in] tunnel
+ *   Tunnel mode or not.
+ * @param[in, def] value
+ *   Value calculated for this flex parser, either spec or mask.
+ *
+ * @return
+ *   0 on success, -1 for error.
+ */
+int
+mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item,
+					void *flex, uint32_t byte_off,
+					bool is_mask, bool tunnel, uint32_t *value)
+{
+	struct mlx5_flex_pattern_field *map;
+	struct mlx5_flex_item *tp = flex;
+	uint32_t def, i, pos, val;
+	int id;
+
+	*value = 0;
+	for (i = 0, pos = 0; i < tp->mapnum && pos < item->length * CHAR_BIT; i++) {
+		map = tp->map + i;
+		id = mlx5_flex_get_sample_id(tp, i, &pos, tunnel, &def);
+		if (id == -1)
+			continue;
+		if (id >= (int)tp->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+			return -1;
+		if (byte_off == tp->devx_fp->sample_ids[id].format_select_dw * sizeof(uint32_t)) {
+			val = mlx5_flex_get_bitfield(item, pos, map->width, map->shift);
+			if (is_mask)
+				val &= RTE_BE32(def);
+			*value |= val;
+		}
+		pos += map->width;
+	}
+	return 0;
+}
+
 /**
  * Translate item pattern into matcher fields according to translation
  * array.
@@ -240,26 +333,17 @@ mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
 	MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0);
 	for (i = 0; i < tp->mapnum; i++) {
 		struct mlx5_flex_pattern_field *map = tp->map + i;
-		uint32_t id = map->reg_id;
-		uint32_t def = (RTE_BIT64(map->width) - 1) << map->shift;
-		uint32_t val, msk;
+		uint32_t val, msk, def;
+		int id = mlx5_flex_get_sample_id(tp, i, &pos, is_inner, &def);
 
-		/* Skip placeholders for DUMMY fields. */
-		if (id == MLX5_INVALID_SAMPLE_REG_ID) {
-			pos += map->width;
+		if (id == -1)
 			continue;
-		}
+		MLX5_ASSERT(id < (int)tp->devx_fp->num_samples);
+		if (id >= (int)tp->devx_fp->num_samples ||
+		    id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+			return;
 		val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift);
 		msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift);
-		MLX5_ASSERT(map->width);
-		MLX5_ASSERT(id < tp->devx_fp->num_samples);
-		if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
-			uint32_t num_samples = tp->devx_fp->num_samples / 2;
-
-			MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
-			MLX5_ASSERT(id < num_samples);
-			id += num_samples;
-		}
 		if (attr->ext_sample_id)
 			sample_id = tp->devx_fp->sample_ids[id].sample_id;
 		else
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 9e1912ec69..1066829ca5 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4343,6 +4343,36 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
 					      &modify_action);
 }
 
+static int
+flow_hw_flex_item_acquire(struct rte_eth_dev *dev,
+			  struct rte_flow_item_flex_handle *handle,
+			  uint8_t *flex_item)
+{
+	int index = mlx5_flex_acquire_index(dev, handle, false);
+
+	MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
+	if (index < 0)
+		return -1;
+	if (!(*flex_item & RTE_BIT32(index))) {
+		/* Don't count same flex item again. */
+		if (mlx5_flex_acquire_index(dev, handle, true) != index)
+			MLX5_ASSERT(false);
+		*flex_item |= (uint8_t)RTE_BIT32(index);
+	}
+	return 0;
+}
+
+static void
+flow_hw_flex_item_release(struct rte_eth_dev *dev, uint8_t *flex_item)
+{
+	while (*flex_item) {
+		int index = rte_bsf32(*flex_item);
+
+		mlx5_flex_release_index(dev, index);
+		*flex_item &= ~(uint8_t)RTE_BIT32(index);
+	}
+}
+
 /**
  * Create flow action template.
  *
@@ -4748,6 +4778,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
 		case RTE_FLOW_ITEM_TYPE_ESP:
+		case RTE_FLOW_ITEM_TYPE_FLEX:
 			break;
 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
 			/*
@@ -4825,6 +4856,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 		.mask = &tag_m,
 		.last = NULL
 	};
+	unsigned int i = 0;
 
 	if (flow_hw_pattern_validate(dev, attr, items, error))
 		return NULL;
@@ -4895,6 +4927,19 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 			return NULL;
 		}
 	}
+	for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; ++i) {
+		if (items[i].type == RTE_FLOW_ITEM_TYPE_FLEX) {
+			const struct rte_flow_item_flex *spec =
+				(const struct rte_flow_item_flex *)items[i].spec;
+			struct rte_flow_item_flex_handle *handle = spec->handle;
+
+			if (flow_hw_flex_item_acquire(dev, handle, &it->flex_item)) {
+				claim_zero(mlx5dr_match_template_destroy(it->mt));
+				mlx5_free(it);
+				return NULL;
+			}
+		}
+	}
 	__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
 	LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
 	return it;
@@ -4914,7 +4959,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
+flow_hw_pattern_template_destroy(struct rte_eth_dev *dev,
 			      struct rte_flow_pattern_template *template,
 			      struct rte_flow_error *error __rte_unused)
 {
@@ -4930,6 +4975,7 @@ flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
 				    MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT))
 		mlx5_free_srh_flex_parser(dev);
 	LIST_REMOVE(template, next);
+	flow_hw_flex_item_release(dev, &template->flex_item);
 	claim_zero(mlx5dr_match_template_destroy(template->mt));
 	mlx5_free(template);
 	return 0;
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v4 4/6] net/mlx5: add flex item modify field implementation
  2023-02-23 12:48         ` [PATCH v4 0/6] add flex item implementation Rongwei Liu
                             ` (2 preceding siblings ...)
  2023-02-23 12:48           ` [PATCH v4 3/6] net/mlx5/hws: add hws flex item matching support Rongwei Liu
@ 2023-02-23 12:48           ` Rongwei Liu
  2023-02-23 12:48           ` [PATCH v4 5/6] net/mlx5: return error for sws modify field Rongwei Liu
                             ` (2 subsequent siblings)
  6 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-23 12:48 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

Add flex item modify field HWS implementation.
The minimum modify boundary is one byte.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 doc/guides/nics/mlx5.rst        |   4 +-
 drivers/common/mlx5/mlx5_prm.h  |   1 +
 drivers/net/mlx5/mlx5_flow.h    |   3 +
 drivers/net/mlx5/mlx5_flow_dv.c | 165 +++++++++++++++++++++++++++++---
 drivers/net/mlx5/mlx5_flow_hw.c |  14 ++-
 5 files changed, 173 insertions(+), 14 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index f182baa37e..09828a5cf4 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -108,6 +108,7 @@ Features
 - Sub-Function.
 - Matching on represented port.
 - Matching on IPv6 routing extension header.
+- Modify flex item field.
 
 
 Limitations
@@ -291,11 +292,12 @@ Limitations
   - Firmware supports 8 global sample fields.
     Each flex item allocates non-shared sample fields from that pool.
   - Supported flex item can have 1 input link - ``eth`` or ``udp``
-    and up to 2 output links - ``ipv4`` or ``ipv6``.
+    and up to 3 output links - ``ipv4`` or ``ipv6``.
   - Flex item fields (``next_header``, ``next_protocol``, ``samples``)
     do not participate in RSS hash functions.
   - In flex item configuration, ``next_header.field_base`` value
     must be byte aligned (multiple of 8).
+  - Modify field with flex item, the offset must be byte aligned (multiple of 8).
 
 - No Tx metadata go to the E-Switch steering domain for the Flow group 0.
   The flows within group 0 and set metadata action are rejected by hardware.
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 613cc6face..74c5e2e371 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -761,6 +761,7 @@ enum mlx5_modification_field {
 	MLX5_MODI_GTPU_FIRST_EXT_DW_0 = 0x76,
 	MLX5_MODI_HASH_RESULT = 0x81,
 	MLX5_MODI_OUT_IPV6_NEXT_HDR = 0x4A,
+	MLX5_MODI_INVALID = INT_MAX,
 };
 
 /* Total number of metadata reg_c's. */
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index ae2fc0aabe..d6831d849d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1084,6 +1084,8 @@ struct field_modify_info {
 	uint32_t size; /* Size of field in protocol header, in bytes. */
 	uint32_t offset; /* Offset of field in protocol header, in bytes. */
 	enum mlx5_modification_field id;
+	uint32_t shift;
+	uint8_t is_flex; /* Temporary indicator for flex item modify filed WA. */
 };
 
 /* HW steering flow attributes. */
@@ -1248,6 +1250,7 @@ struct rte_flow_actions_template {
 	uint16_t mhdr_off; /* Offset of DR modify header action. */
 	uint32_t refcnt; /* Reference counter. */
 	uint16_t rx_cpy_pos; /* Action position of Rx metadata to be copied. */
+	uint8_t flex_item; /* flex item index. */
 };
 
 /* Jump action struct. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 9e7ab08b32..8355249ce5 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -414,10 +414,15 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
 			++field;
 			continue;
 		}
-		/* Deduce actual data width in bits from mask value. */
-		off_b = rte_bsf32(mask) + carry_b;
-		size_b = sizeof(uint32_t) * CHAR_BIT -
-			 off_b - __builtin_clz(mask);
+		if (type == MLX5_MODIFICATION_TYPE_COPY && field->is_flex) {
+			off_b = 32 - field->shift + carry_b - field->size * CHAR_BIT;
+			size_b = field->size * CHAR_BIT - carry_b;
+		} else {
+			/* Deduce actual data width in bits from mask value. */
+			off_b = rte_bsf32(mask) + carry_b;
+			size_b = sizeof(uint32_t) * CHAR_BIT -
+				 off_b - __builtin_clz(mask);
+		}
 		MLX5_ASSERT(size_b);
 		actions[i] = (struct mlx5_modification_cmd) {
 			.action_type = type,
@@ -437,40 +442,46 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
 			 * Destination field overflow. Copy leftovers of
 			 * a source field to the next destination field.
 			 */
-			carry_b = 0;
 			if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
 			    dcopy->size != 0) {
 				actions[i].length =
 					dcopy->size * CHAR_BIT - dcopy->offset;
-				carry_b = actions[i].length;
+				carry_b += actions[i].length;
 				next_field = false;
+			} else {
+				carry_b = 0;
 			}
 			/*
 			 * Not enough bits in a source filed to fill a
 			 * destination field. Switch to the next source.
 			 */
 			if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
-			    (size_b == field->size * CHAR_BIT - off_b)) {
-				actions[i].length =
-					field->size * CHAR_BIT - off_b;
+			    ((size_b == field->size * CHAR_BIT - off_b) ||
+			     field->is_flex)) {
+				actions[i].length = size_b;
 				dcopy->offset += actions[i].length;
 				next_dcopy = false;
 			}
-			if (next_dcopy)
-				++dcopy;
 		} else {
 			MLX5_ASSERT(item->spec);
 			data = flow_dv_fetch_field((const uint8_t *)item->spec +
 						   field->offset, field->size);
 			/* Shift out the trailing masked bits from data. */
 			data = (data & mask) >> off_b;
+			if (field->is_flex)
+				actions[i].offset = 32 - field->shift - field->size * CHAR_BIT;
 			actions[i].data1 = rte_cpu_to_be_32(data);
 		}
 		/* Convert entire record to expected big-endian format. */
 		actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
+		if ((type != MLX5_MODIFICATION_TYPE_COPY ||
+		     dcopy->id != (enum mlx5_modification_field)UINT32_MAX) &&
+		    field->id != (enum mlx5_modification_field)UINT32_MAX)
+			++i;
+		if (next_dcopy && type == MLX5_MODIFICATION_TYPE_COPY)
+			++dcopy;
 		if (next_field)
 			++field;
-		++i;
 	} while (field->size);
 	if (resource->actions_num == i)
 		return rte_flow_error_set(error, EINVAL,
@@ -1422,6 +1433,131 @@ flow_modify_info_mask_32_masked(uint32_t length, uint32_t off, uint32_t post_mas
 	return rte_cpu_to_be_32(mask & post_mask);
 }
 
+static void
+mlx5_modify_flex_item(const struct rte_eth_dev *dev,
+		      const struct mlx5_flex_item *flex,
+		      const struct rte_flow_action_modify_data *data,
+		      struct field_modify_info *info,
+		      uint32_t *mask, uint32_t width)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
+	uint32_t i, j;
+	int id = 0;
+	uint32_t pos = 0;
+	const struct mlx5_flex_pattern_field *map;
+	uint32_t offset = data->offset;
+	uint32_t width_left = width;
+	uint32_t def;
+	uint32_t cur_width = 0;
+	uint32_t tmp_ofs;
+	uint32_t idx = 0;
+	struct field_modify_info tmp;
+	int tmp_id;
+
+	if (!attr->ext_sample_id) {
+		DRV_LOG(ERR, "FW doesn't support modify field with flex item.");
+		return;
+	}
+	/*
+	 * search for the mapping instance until Accumulated width is no
+	 * less than data->offset.
+	 */
+	for (i = 0; i < flex->mapnum; i++) {
+		if (flex->map[i].width + pos > data->offset)
+			break;
+		pos += flex->map[i].width;
+	}
+	if (i >= flex->mapnum)
+		return;
+	tmp_ofs = pos < data->offset ? data->offset - pos : 0;
+	for (j = i; i < flex->mapnum && width_left > 0; ) {
+		map = flex->map + i;
+		id = mlx5_flex_get_sample_id(flex, i, &pos, false, &def);
+		if (id == -1) {
+			i++;
+			/* All left length is dummy */
+			if (pos >= data->offset + width)
+				return;
+			cur_width = map->width;
+		/* One mapping instance covers the whole width. */
+		} else if (pos + map->width >= (data->offset + width)) {
+			cur_width = width_left;
+		} else {
+			cur_width = cur_width + map->width - tmp_ofs;
+			pos += map->width;
+			/*
+			 * Continue to search next until:
+			 * 1. Another flex parser ID.
+			 * 2. Width has been covered.
+			 */
+			for (j = i + 1; j < flex->mapnum; j++) {
+				tmp_id = mlx5_flex_get_sample_id(flex, j, &pos, false, &def);
+				if (tmp_id == -1) {
+					i = j;
+					pos -= flex->map[j].width;
+					break;
+				}
+				if (id >= (int)flex->devx_fp->num_samples ||
+				    id >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
+				    tmp_id >= (int)flex->devx_fp->num_samples ||
+				    tmp_id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+					return;
+				if (flex->devx_fp->sample_ids[id].id !=
+						flex->devx_fp->sample_ids[tmp_id].id ||
+				    flex->map[j].shift != flex->map[j - 1].width +
+							  flex->map[j - 1].shift) {
+					i = j;
+					break;
+				}
+				if ((pos + flex->map[j].width) >= (data->offset + width)) {
+					cur_width = width_left;
+					break;
+				}
+				pos += flex->map[j].width;
+				cur_width += flex->map[j].width;
+			}
+		}
+		if (cur_width > width_left)
+			cur_width = width_left;
+		else if (cur_width < width_left && (j == flex->mapnum || i == flex->mapnum))
+			return;
+
+		MLX5_ASSERT(id < (int)flex->devx_fp->num_samples);
+		if (id >= (int)flex->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+			return;
+		/* Use invalid entry as placeholder for DUMMY mapping. */
+		info[idx] = (struct field_modify_info){cur_width / CHAR_BIT, offset / CHAR_BIT,
+			     id == -1 ? MLX5_MODI_INVALID :
+			     (enum mlx5_modification_field)
+			     flex->devx_fp->sample_ids[id].modify_field_id,
+			     map->shift + tmp_ofs, 1};
+		offset += cur_width;
+		width_left -= cur_width;
+		if (!mask) {
+			info[idx].offset = (32 - cur_width - map->shift - tmp_ofs);
+			info[idx].size = cur_width / CHAR_BIT + info[idx].offset / CHAR_BIT;
+		}
+		cur_width = 0;
+		tmp_ofs = 0;
+		idx++;
+	}
+	if (unlikely(width_left > 0)) {
+		MLX5_ASSERT(false);
+		return;
+	}
+	if (mask)
+		memset(mask, 0xff, data->offset / CHAR_BIT + width / CHAR_BIT);
+	/* Re-order the info to follow IPv6 address. */
+	for (i = 0; i < idx / 2; i++) {
+		tmp = info[i];
+		MLX5_ASSERT(info[i].id);
+		MLX5_ASSERT(info[idx - 1 - i].id);
+		info[i] = info[idx - 1 - i];
+		info[idx - 1 - i] = tmp;
+	}
+}
+
 void
 mlx5_flow_field_id_to_modify_info
 		(const struct rte_flow_action_modify_data *data,
@@ -1893,6 +2029,11 @@ mlx5_flow_field_id_to_modify_info
 		else
 			info[idx].offset = off_be;
 		break;
+	case RTE_FLOW_FIELD_FLEX_ITEM:
+		MLX5_ASSERT(data->flex_handle != NULL && !(data->offset & 0x7));
+		mlx5_modify_flex_item(dev, (const struct mlx5_flex_item *)data->flex_handle,
+				      data, info, mask, width);
+		break;
 	case RTE_FLOW_FIELD_POINTER:
 	case RTE_FLOW_FIELD_VALUE:
 	default:
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 1066829ca5..907aab8bf3 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4562,6 +4562,17 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
 			at->actions[i].conf = actions->conf;
 			at->masks[i].conf = masks->conf;
 		}
+		if (actions->type == RTE_FLOW_ACTION_TYPE_MODIFY_FIELD) {
+			const struct rte_flow_action_modify_field *info = actions->conf;
+
+			if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+			     flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
+						       &at->flex_item)) ||
+			     (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+			      flow_hw_flex_item_acquire(dev, info->src.flex_handle,
+							&at->flex_item)))
+				goto error;
+		}
 	}
 	at->tmpl = flow_hw_dr_actions_template_create(at);
 	if (!at->tmpl)
@@ -4593,7 +4604,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
+flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
 				 struct rte_flow_actions_template *template,
 				 struct rte_flow_error *error __rte_unused)
 {
@@ -4606,6 +4617,7 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
 				   "action template in using");
 	}
 	LIST_REMOVE(template, next);
+	flow_hw_flex_item_release(dev, &template->flex_item);
 	if (template->tmpl)
 		mlx5dr_action_template_destroy(template->tmpl);
 	mlx5_free(template);
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v4 5/6] net/mlx5: return error for sws modify field
  2023-02-23 12:48         ` [PATCH v4 0/6] add flex item implementation Rongwei Liu
                             ` (3 preceding siblings ...)
  2023-02-23 12:48           ` [PATCH v4 4/6] net/mlx5: add flex item modify field implementation Rongwei Liu
@ 2023-02-23 12:48           ` Rongwei Liu
  2023-02-23 12:48           ` [PATCH v4 6/6] net/mlx5: add error message Rongwei Liu
  2023-02-27 10:27           ` [PATCH v4 0/6] add flex item implementation Raslan Darawsheh
  6 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-23 12:48 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland, Dariusz Sosnowski

Return unsupported error message when application tries to
modify flex item field.

Validation of packet modifications actions for SW Steering checked
if either source or destination field of MODIFY_FIELD action
was a flex item.
When DEC_TTL action is used, DEC_TTL action does not have any
action configuration and dereferencing source or destination field
is invalid, so validation of source and destination field types
should be moved to MODIFY_FIELD specific validation function, then
field types are validated if and only if action type is MODIFY_FIELD.

Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_dv.c | 19 ++++++++++++-------
 1 file changed, 12 insertions(+), 7 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 8355249ce5..3d760d1913 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -4838,6 +4838,7 @@ flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 					  NULL, "action configuration not set");
+
 	if (action_flags & MLX5_FLOW_ACTION_ENCAP)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
@@ -5163,17 +5164,21 @@ flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
 	const struct rte_flow_action_modify_field *action_modify_field =
 		action->conf;
-	uint32_t dst_width = mlx5_flow_item_field_width(dev,
-				action_modify_field->dst.field,
-				-1, attr, error);
-	uint32_t src_width = mlx5_flow_item_field_width(dev,
-				action_modify_field->src.field,
-				dst_width, attr, error);
+	uint32_t dst_width, src_width;
 
 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
 	if (ret)
 		return ret;
-
+	if (action_modify_field->src.field == RTE_FLOW_FIELD_FLEX_ITEM ||
+	    action_modify_field->dst.field == RTE_FLOW_FIELD_FLEX_ITEM)
+		return rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION, action,
+				"flex item fields modification"
+				" is not supported");
+	dst_width = mlx5_flow_item_field_width(dev, action_modify_field->dst.field,
+					       -1, attr, error);
+	src_width = mlx5_flow_item_field_width(dev, action_modify_field->src.field,
+					       dst_width, attr, error);
 	if (action_modify_field->width == 0)
 		return rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ACTION, action,
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH v4 6/6] net/mlx5: add error message
  2023-02-23 12:48         ` [PATCH v4 0/6] add flex item implementation Rongwei Liu
                             ` (4 preceding siblings ...)
  2023-02-23 12:48           ` [PATCH v4 5/6] net/mlx5: return error for sws modify field Rongwei Liu
@ 2023-02-23 12:48           ` Rongwei Liu
  2023-02-27 10:27           ` [PATCH v4 0/6] add flex item implementation Raslan Darawsheh
  6 siblings, 0 replies; 31+ messages in thread
From: Rongwei Liu @ 2023-02-23 12:48 UTC (permalink / raw)
  To: dev, matan, viacheslavo, orika, thomas; +Cc: rasland

When pattern/action template creation fails the testpmd expects error
details.
The driver did not set the error structure and testpmd failed with a
segmentation fault.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_hw.c | 15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 907aab8bf3..b43661674f 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4570,8 +4570,12 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
 						       &at->flex_item)) ||
 			     (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
 			      flow_hw_flex_item_acquire(dev, info->src.flex_handle,
-							&at->flex_item)))
+							&at->flex_item))) {
+				rte_flow_error_set(error, rte_errno,
+						   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+						   "Failed to acquire flex item");
 				goto error;
+			}
 		}
 	}
 	at->tmpl = flow_hw_dr_actions_template_create(at);
@@ -4587,6 +4591,9 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
 			mlx5dr_action_template_destroy(at->tmpl);
 		mlx5_free(at);
 	}
+	rte_flow_error_set(error, rte_errno,
+			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+			   "Failed to create action template");
 	return NULL;
 }
 
@@ -4936,6 +4943,9 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 		    (mlx5_alloc_srh_flex_parser(dev))) {
 			claim_zero(mlx5dr_match_template_destroy(it->mt));
 			mlx5_free(it);
+			rte_flow_error_set(error, rte_errno,
+					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+					   "cannot create IPv6 routing extension support");
 			return NULL;
 		}
 	}
@@ -4948,6 +4958,9 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 			if (flow_hw_flex_item_acquire(dev, handle, &it->flex_item)) {
 				claim_zero(mlx5dr_match_template_destroy(it->mt));
 				mlx5_free(it);
+				rte_flow_error_set(error, rte_errno,
+						   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+						   "Failed to acquire flex item");
 				return NULL;
 			}
 		}
-- 
2.27.0


^ permalink raw reply	[flat|nested] 31+ messages in thread

* RE: [PATCH v4 0/6] add flex item implementation
  2023-02-23 12:48         ` [PATCH v4 0/6] add flex item implementation Rongwei Liu
                             ` (5 preceding siblings ...)
  2023-02-23 12:48           ` [PATCH v4 6/6] net/mlx5: add error message Rongwei Liu
@ 2023-02-27 10:27           ` Raslan Darawsheh
  6 siblings, 0 replies; 31+ messages in thread
From: Raslan Darawsheh @ 2023-02-27 10:27 UTC (permalink / raw)
  To: Rongwei Liu, dev, Matan Azrad, Slava Ovsiienko, Ori Kam,
	NBU-Contact-Thomas Monjalon (EXTERNAL)

Hi,


> -----Original Message-----
> From: Rongwei Liu <rongweil@nvidia.com>
> Sent: Thursday, February 23, 2023 2:49 PM
> To: dev@dpdk.org; Matan Azrad <matan@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Ori Kam <orika@nvidia.com>; NBU-Contact-
> Thomas Monjalon (EXTERNAL) <thomas@monjalon.net>
> Cc: Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH v4 0/6] add flex item implementation
> 
> Implement the flex item matching and modify field in HWS.
> 
> v4: squash doc commit.
> v3: add error message for testpmd output.
> 
> Rongwei Liu (6):
>   net/mlx5: enable hws flex item create
>   net/mlx5: add IPv6 protocol as flex item input
>   net/mlx5/hws: add hws flex item matching support
>   net/mlx5: add flex item modify field implementation
>   net/mlx5: return error for sws modify field
>   net/mlx5: add error message
> 
>  doc/guides/nics/mlx5.rst              |   4 +-
>  drivers/common/mlx5/mlx5_prm.h        |   1 +
>  drivers/net/mlx5/hws/mlx5dr_definer.c |  83 ++++++++++++
>  drivers/net/mlx5/linux/mlx5_os.c      |  27 ++--
>  drivers/net/mlx5/mlx5.c               |   2 +-
>  drivers/net/mlx5/mlx5.h               |   6 +
>  drivers/net/mlx5/mlx5_flow.h          |   4 +
>  drivers/net/mlx5/mlx5_flow_dv.c       | 186 +++++++++++++++++++++++---
>  drivers/net/mlx5/mlx5_flow_flex.c     | 135 ++++++++++++++++---
>  drivers/net/mlx5/mlx5_flow_hw.c       |  77 ++++++++++-
>  10 files changed, 473 insertions(+), 52 deletions(-)
> 
> --
> 2.27.0

Series applied to next-net-mlx, 

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 31+ messages in thread

end of thread, other threads:[~2023-02-27 10:28 UTC | newest]

Thread overview: 31+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-02-15 11:52 [PATCH v1 0/6] add flex item implementation Rongwei Liu
2023-02-15 11:52 ` [PATCH v1 1/6] net/mlx5: enable hws flex item create Rongwei Liu
2023-02-15 11:52 ` [PATCH v1 2/6] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
2023-02-15 11:52 ` [PATCH v1 3/6] net/mlx5/hws: add hws flex item matching support Rongwei Liu
2023-02-15 11:52 ` [PATCH v1 4/6] net/mlx5: add flex item modify field implementation Rongwei Liu
2023-02-15 11:52 ` [PATCH v1 5/6] net/mlx5: return error for sws modify field Rongwei Liu
2023-02-15 11:52 ` [PATCH v1 6/6] doc/mlx5: update mlx5 doc Rongwei Liu
2023-02-22  9:37   ` [PATCH v2 0/6] add flex item implementation Rongwei Liu
2023-02-22  9:37     ` [PATCH v2 1/6] net/mlx5: enable hws flex item create Rongwei Liu
2023-02-22  9:37     ` [PATCH v2 2/6] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
2023-02-22  9:37     ` [PATCH v2 3/6] net/mlx5/hws: add hws flex item matching support Rongwei Liu
2023-02-22  9:37     ` [PATCH v2 4/6] net/mlx5: add flex item modify field implementation Rongwei Liu
2023-02-22  9:37     ` [PATCH v2 5/6] net/mlx5: return error for sws modify field Rongwei Liu
2023-02-22  9:37     ` [PATCH v2 6/6] doc/mlx5: update mlx5 doc Rongwei Liu
2023-02-23  7:06   ` [PATCH v3 0/7] add flex item implementation Rongwei Liu
2023-02-23  7:06     ` [PATCH v3 1/7] net/mlx5: enable hws flex item create Rongwei Liu
2023-02-23  7:06     ` [PATCH v3 2/7] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
2023-02-23  7:06     ` [PATCH v3 3/7] net/mlx5/hws: add hws flex item matching support Rongwei Liu
2023-02-23  7:06     ` [PATCH v3 4/7] net/mlx5: add flex item modify field implementation Rongwei Liu
2023-02-23  7:06     ` [PATCH v3 5/7] net/mlx5: return error for sws modify field Rongwei Liu
2023-02-23  7:06     ` [PATCH v3 6/7] doc/mlx5: update mlx5 doc Rongwei Liu
2023-02-23 12:37       ` Thomas Monjalon
2023-02-23 12:48         ` [PATCH v4 0/6] add flex item implementation Rongwei Liu
2023-02-23 12:48           ` [PATCH v4 1/6] net/mlx5: enable hws flex item create Rongwei Liu
2023-02-23 12:48           ` [PATCH v4 2/6] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
2023-02-23 12:48           ` [PATCH v4 3/6] net/mlx5/hws: add hws flex item matching support Rongwei Liu
2023-02-23 12:48           ` [PATCH v4 4/6] net/mlx5: add flex item modify field implementation Rongwei Liu
2023-02-23 12:48           ` [PATCH v4 5/6] net/mlx5: return error for sws modify field Rongwei Liu
2023-02-23 12:48           ` [PATCH v4 6/6] net/mlx5: add error message Rongwei Liu
2023-02-27 10:27           ` [PATCH v4 0/6] add flex item implementation Raslan Darawsheh
2023-02-23  7:06     ` [PATCH v3 7/7] net/mlx5: add error message Rongwei Liu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).