DPDK patches and discussions
 help / color / mirror / Atom feed
From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
To: <dev@dpdk.org>
Cc: <rasland@nvidia.com>, <matan@nvidia.com>, <shahafs@nvidia.com>,
	<orika@nvidia.com>, <getelson@nvidia.com>, <thomas@monjalon.net>
Subject: [dpdk-dev] [PATCH v2 12/14] net/mlx5: translate flex item configuration
Date: Fri, 1 Oct 2021 22:34:13 +0300	[thread overview]
Message-ID: <20211001193415.23288-13-viacheslavo@nvidia.com> (raw)
In-Reply-To: <20211001193415.23288-1-viacheslavo@nvidia.com>

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset="N", Size: 28157 bytes --]

RTE Flow flex item configuration should be translated
into actual hardware settings:

  - translate header length and next protocol field samplings
  - translate data field sampling, the similar fields with the
    same mode and matching related parameters are relocated
    and grouped to be covered with minimal amount of hardware
    sampling registers (each register can cover arbitrary
    neighbour 32 bits (aligned to byte boundary) in the packet
    and we can combine the fields with smaller lengths or
    segments of bigger fields)
  - input and output links translation
  - preparing data for parsing flex item pattern on flow creation

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.h           |  16 +-
 drivers/net/mlx5/mlx5_flow_flex.c | 748 +++++++++++++++++++++++++++++-
 2 files changed, 762 insertions(+), 2 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 629ff6ebfe..d4fa946485 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -52,6 +52,9 @@
 /* Maximal number of flex items created on the port.*/
 #define MLX5_PORT_FLEX_ITEM_NUM			4
 
+/* Maximal number of field/field parts to map into sample registers .*/
+#define MLX5_FLEX_ITEM_MAPPING_NUM		32
+
 enum mlx5_ipool_index {
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
 	MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
@@ -1124,10 +1127,21 @@ struct mlx5_flex_parser_devx {
 	uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
 };
 
+/* Pattern field dscriptor - how to translate flex pattern into samples. */
+__extension__
+struct mlx5_flex_pattern_field {
+	uint16_t width:6;
+	uint16_t shift:5;
+	uint16_t reg_id:5;
+};
+
 /* Port flex item context. */
 struct mlx5_flex_item {
 	struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
-	uint32_t refcnt; /**< Atomically accessed refcnt by flows. */
+	uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+	uint32_t tunnel:1; /* Flex item presents tunnel protocol. */
+	uint32_t mapnum; /* Number of pattern translation entries. */
+	struct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];
 };
 
 /*
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index b8a091e259..56b91da839 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -113,6 +113,750 @@ mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
 	}
 }
 
+static int
+mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct mlx5_flex_parser_devx *devx,
+			   struct rte_flow_error *error)
+{
+	const struct rte_flow_item_flex_field *field = &conf->next_header;
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+	uint32_t len_width;
+
+	if (field->field_base % CHAR_BIT)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "not byte aligned header length field");
+	switch (field->field_mode) {
+	case FIELD_MODE_DUMMY:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "invalid header length field mode (DUMMY)");
+	case FIELD_MODE_FIXED:
+		if (!(attr->header_length_mode &
+		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIXED)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported header length field mode (FIXED)");
+		if (attr->header_length_mask_width < field->field_size)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "header length field width exceeds limit");
+		if (field->offset_shift < 0 ||
+		    field->offset_shift > attr->header_length_mask_width)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid header length field shift (FIXED");
+		if (field->field_base < 0)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "negative header length field base (FIXED)");
+		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
+		break;
+	case FIELD_MODE_OFFSET:
+		if (!(attr->header_length_mode &
+		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIELD)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported header length field mode (OFFSET)");
+		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
+		if (field->offset_mask == 0 ||
+		    !rte_is_power_of_2(field->offset_mask + 1))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid length field offset mask (OFFSET)");
+		len_width = rte_fls_u32(field->offset_mask);
+		if (len_width > attr->header_length_mask_width)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "length field offset mask too wide (OFFSET)");
+		node->header_length_field_mask = field->offset_mask;
+		break;
+	case FIELD_MODE_BITMASK:
+		if (!(attr->header_length_mode &
+		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_BITMASK)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported header length field mode (BITMASK)");
+		if (attr->header_length_mask_width < field->field_size)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "header length field width exceeds limit");
+		node->header_length_mode = MLX5_GRAPH_NODE_LEN_BITMASK;
+		node->header_length_field_mask = field->offset_mask;
+		break;
+	default:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unknown header length field mode");
+	}
+	if (field->field_base / CHAR_BIT >= 0 &&
+	    field->field_base / CHAR_BIT > attr->max_base_header_length)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "header length field base exceeds limit");
+	node->header_length_base_value = field->field_base / CHAR_BIT;
+	if (field->field_mode == FIELD_MODE_OFFSET ||
+	    field->field_mode == FIELD_MODE_BITMASK) {
+		if (field->offset_shift > 15 || field->offset_shift < 0)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "header length field shift exceeeds limit");
+		node->header_length_field_shift	= field->offset_shift;
+		node->header_length_field_offset = field->offset_base;
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_translate_next(struct mlx5_hca_flex_attr *attr,
+			 const struct rte_flow_item_flex_conf *conf,
+			 struct mlx5_flex_parser_devx *devx,
+			 struct rte_flow_error *error)
+{
+	const struct rte_flow_item_flex_field *field = &conf->next_protocol;
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+
+	switch (field->field_mode) {
+	case FIELD_MODE_DUMMY:
+		if (conf->output_num)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "next protocof field is required (DUMMY)");
+		return 0;
+	case FIELD_MODE_FIXED:
+		break;
+	case FIELD_MODE_OFFSET:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unsupported next protocol field mode (OFFSET)");
+		break;
+	case FIELD_MODE_BITMASK:
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unsupported next protocol field mode (BITMASK)");
+	default:
+		return rte_flow_error_set
+			(error, EINVAL,
+			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unknown next protocol field mode");
+	}
+	MLX5_ASSERT(field->field_mode == FIELD_MODE_FIXED);
+	if (attr->max_next_header_offset < field->field_base)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "next protocol field base exceeds limit");
+	if (field->offset_shift)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unsupported next protocol field shift");
+	node->next_header_field_offset = field->field_base;
+	node->next_header_field_size = field->field_size;
+	return 0;
+}
+
+/* Helper structure to handle field bit intervals. */
+struct mlx5_flex_field_cover {
+	uint16_t num;
+	int32_t start[MLX5_FLEX_ITEM_MAPPING_NUM];
+	int32_t end[MLX5_FLEX_ITEM_MAPPING_NUM];
+	uint8_t mapped[MLX5_FLEX_ITEM_MAPPING_NUM / CHAR_BIT + 1];
+};
+
+static void
+mlx5_flex_insert_field(struct mlx5_flex_field_cover *cover,
+		       uint16_t num, int32_t start, int32_t end)
+{
+	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
+	MLX5_ASSERT(num <= cover->num);
+	if (num < cover->num) {
+		memmove(&cover->start[num + 1],	&cover->start[num],
+			(cover->num - num) * sizeof(int32_t));
+		memmove(&cover->end[num + 1],	&cover->end[num],
+			(cover->num - num) * sizeof(int32_t));
+	}
+	cover->start[num] = start;
+	cover->end[num] = end;
+	cover->num++;
+}
+
+static void
+mlx5_flex_merge_field(struct mlx5_flex_field_cover *cover, uint16_t num)
+{
+	uint32_t i, del = 0;
+	int32_t end;
+
+	MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
+	MLX5_ASSERT(num < (cover->num - 1));
+	end = cover->end[num];
+	for (i = num + 1; i < cover->num; i++) {
+		if (end < cover->start[i])
+			break;
+		del++;
+		if (end <= cover->end[i]) {
+			cover->end[num] = cover->end[i];
+			break;
+		}
+	}
+	if (del) {
+		MLX5_ASSERT(del < (cover->num - 1u - num));
+		cover->num -= del;
+		MLX5_ASSERT(cover->num > num);
+		if ((cover->num - num) > 1) {
+			memmove(&cover->start[num + 1],
+				&cover->start[num + 1 + del],
+				(cover->num - num - 1) * sizeof(int32_t));
+			memmove(&cover->end[num + 1],
+				&cover->end[num + 1 + del],
+				(cover->num - num - 1) * sizeof(int32_t));
+		}
+	}
+}
+
+/*
+ * Validate the sample field and update interval array
+ * if parameters match with the 'match" field.
+ * Returns:
+ *    < 0  - error
+ *    == 0 - no match, interval array not updated
+ *    > 0  - match, interval array updated
+ */
+static int
+mlx5_flex_cover_sample(struct mlx5_flex_field_cover *cover,
+		       struct rte_flow_item_flex_field *field,
+		       struct rte_flow_item_flex_field *match,
+		       struct mlx5_hca_flex_attr *attr,
+		       struct rte_flow_error *error)
+{
+	int32_t start, end;
+	uint32_t i;
+
+	switch (field->field_mode) {
+	case FIELD_MODE_DUMMY:
+		return 0;
+	case FIELD_MODE_FIXED:
+		if (!(attr->sample_offset_mode &
+		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIXED)))
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported sample field mode (FIXED)");
+		if (field->offset_shift)
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid sample field shift (FIXED");
+		if (field->field_base < 0)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid sample field base (FIXED)");
+		if (field->field_base / CHAR_BIT > attr->max_sample_base_offset)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "sample field base exceeds limit (FIXED)");
+		break;
+	case FIELD_MODE_OFFSET:
+		if (!(attr->sample_offset_mode &
+		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIELD)))
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported sample field mode (OFFSET)");
+		if (field->field_base / CHAR_BIT >= 0 &&
+		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"sample field base exceeds limit");
+		break;
+	case FIELD_MODE_BITMASK:
+		if (!(attr->sample_offset_mode &
+		    RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_BITMASK)))
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported sample field mode (BITMASK)");
+		if (field->field_base / CHAR_BIT >= 0 &&
+		    field->field_base / CHAR_BIT > attr->max_sample_base_offset)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"sample field base exceeds limit");
+		break;
+	default:
+		return rte_flow_error_set
+			(error, EINVAL,
+			 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "unknown data sample field mode");
+	}
+	if (!match) {
+		if (!field->field_size)
+			return rte_flow_error_set
+				(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"zero sample field width");
+		if (field->rss_hash)
+			return rte_flow_error_set
+				(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"unsupported RSS hash over flex item fields");
+		if (field->tunnel_count != FLEX_TUNNEL_MODE_FIRST &&
+		    field->tunnel_count != FLEX_TUNNEL_MODE_OUTER &&
+		    field->tunnel_count != FLEX_TUNNEL_MODE_INNER)
+			return rte_flow_error_set
+				(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				"unsupported sample field tunnel mode");
+		if (field->field_id)
+			DRV_LOG(DEBUG, "sample field id hint ignored\n");
+	} else {
+		if (field->field_mode != match->field_mode ||
+		    field->rss_hash != match->rss_hash ||
+		    field->tunnel_count != match->tunnel_count ||
+		    field->offset_base | match->offset_base ||
+		    field->offset_mask | match->offset_mask ||
+		    field->offset_shift | match->offset_shift)
+			return 0;
+	}
+	start = field->field_base;
+	end = start + field->field_size;
+	/* Add the new or similar field to interval array. */
+	if (!cover->num) {
+		cover->start[cover->num] = start;
+		cover->end[cover->num] = end;
+		cover->num = 1;
+		return 1;
+	}
+	for (i = 0; i < cover->num; i++) {
+		if (start > cover->end[i]) {
+			if (i >= (cover->num - 1u)) {
+				mlx5_flex_insert_field(cover, cover->num,
+						       start, end);
+				break;
+			}
+			continue;
+		}
+		if (end < cover->start[i]) {
+			mlx5_flex_insert_field(cover, i, start, end);
+			break;
+		}
+		if (start < cover->start[i])
+			cover->start[i] = start;
+		if (end > cover->end[i]) {
+			cover->end[i] = end;
+			if (i < (cover->num - 1u))
+				mlx5_flex_merge_field(cover, i);
+		}
+		break;
+	}
+	return 1;
+}
+
+static void
+mlx5_flex_config_sample(struct mlx5_devx_match_sample_attr *na,
+		       struct rte_flow_item_flex_field *field)
+{
+	memset(na, 0, sizeof(struct mlx5_devx_match_sample_attr));
+	na->flow_match_sample_en = 1;
+	switch (field->field_mode) {
+	case FIELD_MODE_FIXED:
+		na->flow_match_sample_offset_mode =
+			MLX5_GRAPH_SAMPLE_OFFSET_FIXED;
+		break;
+	case FIELD_MODE_OFFSET:
+		na->flow_match_sample_offset_mode =
+			MLX5_GRAPH_SAMPLE_OFFSET_FIELD;
+		na->flow_match_sample_field_offset = field->offset_base;
+		na->flow_match_sample_field_offset_mask = field->offset_mask;
+		na->flow_match_sample_field_offset_shift = field->offset_shift;
+		break;
+	case FIELD_MODE_BITMASK:
+		na->flow_match_sample_offset_mode =
+			MLX5_GRAPH_SAMPLE_OFFSET_BITMASK;
+		na->flow_match_sample_field_offset = field->offset_base;
+		na->flow_match_sample_field_offset_mask = field->offset_mask;
+		na->flow_match_sample_field_offset_shift = field->offset_shift;
+		break;
+	default:
+		MLX5_ASSERT(false);
+		break;
+	}
+	switch (field->tunnel_count) {
+	case FLEX_TUNNEL_MODE_FIRST:
+		na->flow_match_sample_tunnel_mode =
+			MLX5_GRAPH_SAMPLE_TUNNEL_FIRST;
+		break;
+	case FLEX_TUNNEL_MODE_OUTER:
+		na->flow_match_sample_tunnel_mode =
+			MLX5_GRAPH_SAMPLE_TUNNEL_OUTER;
+		break;
+	case FLEX_TUNNEL_MODE_INNER:
+		na->flow_match_sample_tunnel_mode =
+			MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
+		break;
+	default:
+		MLX5_ASSERT(false);
+		break;
+	}
+}
+
+/* Map specified field to set/subset of allocated sample registers. */
+static int
+mlx5_flex_map_sample(struct rte_flow_item_flex_field *field,
+		     struct mlx5_flex_parser_devx *parser,
+		     struct mlx5_flex_item *item,
+		     struct rte_flow_error *error)
+{
+	struct mlx5_devx_match_sample_attr node;
+	int32_t start = field->field_base;
+	int32_t end = start + field->field_size;
+	uint32_t i, done_bits = 0;
+
+	mlx5_flex_config_sample(&node, field);
+	for (i = 0; i < parser->num_samples; i++) {
+		struct mlx5_devx_match_sample_attr *sample =
+			&parser->devx_conf.sample[i];
+		int32_t reg_start, reg_end;
+		int32_t cov_start, cov_end;
+		struct mlx5_flex_pattern_field *trans;
+
+		MLX5_ASSERT(sample->flow_match_sample_en);
+		if (!sample->flow_match_sample_en)
+			break;
+		node.flow_match_sample_field_base_offset =
+			sample->flow_match_sample_field_base_offset;
+		if (memcmp(&node, sample, sizeof(node)))
+			continue;
+		reg_start = (int8_t)sample->flow_match_sample_field_base_offset;
+		reg_start *= CHAR_BIT;
+		reg_end = reg_start + 32;
+		if (end <= reg_start || start >= reg_end)
+			continue;
+		cov_start = RTE_MAX(reg_start, start);
+		cov_end = RTE_MIN(reg_end, end);
+		MLX5_ASSERT(cov_end > cov_start);
+		done_bits += cov_end - cov_start;
+		if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "too many flex item pattern translations");
+		trans = &item->map[item->mapnum];
+		item->mapnum++;
+		trans->reg_id = i;
+		trans->shift = cov_start - reg_start;
+		trans->width = cov_end - cov_start;
+	}
+	if (done_bits != field->field_size) {
+		MLX5_ASSERT(false);
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "failed to map field to sample register");
+	}
+	return 0;
+}
+
+/* Allocate sample registers for the specified field type and interval array. */
+static int
+mlx5_flex_alloc_sample(struct mlx5_flex_field_cover *cover,
+		       struct mlx5_flex_parser_devx *parser,
+		       struct rte_flow_item_flex_field *field,
+		       struct mlx5_hca_flex_attr *attr,
+		       struct rte_flow_error *error)
+{
+	struct mlx5_devx_match_sample_attr node;
+	uint32_t idx = 0;
+
+	mlx5_flex_config_sample(&node, field);
+	while (idx < cover->num) {
+		int32_t start, end;
+
+		/* Sample base offsets are in bytes, should align. */
+		start = RTE_ALIGN_FLOOR(cover->start[idx], CHAR_BIT);
+		node.flow_match_sample_field_base_offset =
+						(start / CHAR_BIT) & 0xFF;
+		/* Allocate sample register. */
+		if (parser->num_samples >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
+		    parser->num_samples >= attr->max_num_sample ||
+		    parser->num_samples >= attr->max_num_prog_sample)
+			return rte_flow_error_set
+				(error, EINVAL,
+				 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "no sample registers to handle all flex item fields");
+		parser->devx_conf.sample[parser->num_samples] = node;
+		parser->num_samples++;
+		/* Remove or update covered intervals. */
+		end = start + 32;
+		while (idx < cover->num) {
+			if (end >= cover->end[idx]) {
+				idx++;
+				continue;
+			}
+			if (end > cover->start[idx])
+				cover->start[idx] = end;
+			break;
+		}
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_translate_sample(struct mlx5_hca_flex_attr *attr,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct mlx5_flex_parser_devx *parser,
+			   struct mlx5_flex_item *item,
+			   struct rte_flow_error *error)
+{
+	struct mlx5_flex_field_cover cover;
+	uint32_t i, j;
+	int ret;
+
+	if (conf->sample_num > MLX5_FLEX_ITEM_MAPPING_NUM)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "sample field number exceeds limit");
+	/*
+	 * The application can specify fields smaller or bigger than 32 bits
+	 * covered with single sample register and it can specify field
+	 * offsets in any order.
+	 *
+	 * Gather all similar fields together, build array of bit intervals
+	 * in asсending order and try to cover with the smallest set of sample
+	 * refgisters.
+	 */
+	memset(&cover, 0, sizeof(cover));
+	for (i = 0; i < conf->sample_num; i++) {
+		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
+
+		/* Check whether field was covered in the previous iteration. */
+		if (cover.mapped[i / CHAR_BIT] & (1u << (i % CHAR_BIT)))
+			continue;
+		if (fl->field_mode == FIELD_MODE_DUMMY)
+			continue;
+		/* Build an interval array for the field and similar ones */
+		cover.num = 0;
+		/* Add the first field to array unconditionally. */
+		ret = mlx5_flex_cover_sample(&cover, fl, NULL, attr, error);
+		if (ret < 0)
+			return ret;
+		MLX5_ASSERT(ret > 0);
+		cover.mapped[i / CHAR_BIT] |= 1u << (i % CHAR_BIT);
+		for (j = i + 1; j < conf->sample_num; j++) {
+			struct rte_flow_item_flex_field *ft;
+
+			/* Add field to array if its type matches. */
+			ft = conf->sample_data + j;
+			ret = mlx5_flex_cover_sample(&cover, ft, fl,
+						     attr, error);
+			if (ret < 0)
+				return ret;
+			if (!ret)
+				continue;
+			cover.mapped[j / CHAR_BIT] |= 1u << (j % CHAR_BIT);
+		}
+		/* Allocate sample registers to cover array of intervals. */
+		ret = mlx5_flex_alloc_sample(&cover, parser, fl, attr, error);
+		if (ret)
+			return ret;
+	}
+	/* Build the item pattern translating data on flow creation. */
+	item->mapnum = 0;
+	memset(&item->map, 0, sizeof(item->map));
+	for (i = 0; i < conf->sample_num; i++) {
+		struct rte_flow_item_flex_field *fl = conf->sample_data + i;
+
+		ret = mlx5_flex_map_sample(fl, parser, item, error);
+		if (ret) {
+			MLX5_ASSERT(false);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_arc_type(enum rte_flow_item_type type, int in)
+{
+	switch (type) {
+	case RTE_FLOW_ITEM_TYPE_ETH:
+		return  MLX5_GRAPH_ARC_NODE_MAC;
+	case RTE_FLOW_ITEM_TYPE_IPV4:
+		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV4;
+	case RTE_FLOW_ITEM_TYPE_IPV6:
+		return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV6;
+	case RTE_FLOW_ITEM_TYPE_UDP:
+		return MLX5_GRAPH_ARC_NODE_UDP;
+	case RTE_FLOW_ITEM_TYPE_TCP:
+		return MLX5_GRAPH_ARC_NODE_TCP;
+	case RTE_FLOW_ITEM_TYPE_MPLS:
+		return MLX5_GRAPH_ARC_NODE_MPLS;
+	case RTE_FLOW_ITEM_TYPE_GRE:
+		return MLX5_GRAPH_ARC_NODE_GRE;
+	case RTE_FLOW_ITEM_TYPE_GENEVE:
+		return MLX5_GRAPH_ARC_NODE_GENEVE;
+	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+		return MLX5_GRAPH_ARC_NODE_VXLAN_GPE;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int
+mlx5_flex_arc_in_eth(const struct rte_flow_item *item,
+		     struct rte_flow_error *error)
+{
+	const struct rte_flow_item_eth *spec = item->spec;
+	const struct rte_flow_item_eth *mask = item->mask;
+	struct rte_flow_item_eth eth = { .hdr.ether_type = RTE_BE16(0xFFFF) };
+
+	if (memcmp(mask, &eth, sizeof(struct rte_flow_item_eth))) {
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+			 "invalid eth item mask");
+	}
+	return rte_be_to_cpu_16(spec->hdr.ether_type);
+}
+
+static int
+mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
+		     struct rte_flow_error *error)
+{
+	const struct rte_flow_item_udp *spec = item->spec;
+	const struct rte_flow_item_udp *mask = item->mask;
+	struct rte_flow_item_udp udp = { .hdr.dst_port = RTE_BE16(0xFFFF) };
+
+	if (memcmp(mask, &udp, sizeof(struct rte_flow_item_udp))) {
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+			 "invalid eth item mask");
+	}
+	return rte_be_to_cpu_16(spec->hdr.dst_port);
+}
+
+static int
+mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct mlx5_flex_parser_devx *devx,
+			   struct mlx5_flex_item *item,
+			   struct rte_flow_error *error)
+{
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+	uint32_t i;
+
+	RTE_SET_USED(item);
+	if (conf->input_num > attr->max_num_arc_in)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "too many input links");
+	for (i = 0; i < conf->input_num; i++) {
+		struct mlx5_devx_graph_arc_attr *arc = node->in + i;
+		struct rte_flow_item_flex_link *link = conf->input_link + i;
+		const struct rte_flow_item *rte_item = &link->item;
+		int arc_type;
+		int ret;
+
+		if (!rte_item->spec || !rte_item->mask || rte_item->last)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "invalid flex item IN arc format");
+		arc_type = mlx5_flex_arc_type(rte_item->type, true);
+		if (arc_type < 0 || !(attr->node_in & RTE_BIT32(arc_type)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported flex item IN arc type");
+		arc->arc_parse_graph_node = arc_type;
+		arc->start_inner_tunnel = link->tunnel ? 1 : 0;
+		/*
+		 * Configure arc IN condition value. The value location depends
+		 * on protocol. Current FW version supports IP & UDP for IN
+		 * arcs only, and locations for these protocols are defined.
+		 * Add more protocols when available.
+		 */
+		switch (rte_item->type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			ret = mlx5_flex_arc_in_eth(rte_item, error);
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			ret = mlx5_flex_arc_in_udp(rte_item, error);
+			break;
+		default:
+			MLX5_ASSERT(false);
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported flex item IN arc type");
+		}
+		if (ret < 0)
+			return ret;
+		arc->compare_condition_value = (uint16_t)ret;
+	}
+	return 0;
+}
+
+static int
+mlx5_flex_translate_arc_out(struct mlx5_hca_flex_attr *attr,
+			    const struct rte_flow_item_flex_conf *conf,
+			    struct mlx5_flex_parser_devx *devx,
+			    struct mlx5_flex_item *item,
+			    struct rte_flow_error *error)
+{
+	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
+	uint32_t i;
+
+	if (conf->output_num > attr->max_num_arc_out)
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			 "too many output links");
+	for (i = 0; i < conf->output_num; i++) {
+		struct mlx5_devx_graph_arc_attr *arc = node->out + i;
+		struct rte_flow_item_flex_link *link = conf->output_link + i;
+		const struct rte_flow_item *rte_item = &link->item;
+		int arc_type;
+
+		if (rte_item->spec || rte_item->mask || rte_item->last)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "flex node: invalid OUT arc format");
+		arc_type = mlx5_flex_arc_type(rte_item->type, false);
+		if (arc_type < 0 || !(attr->node_out & RTE_BIT32(arc_type)))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "unsupported flex item OUT arc type");
+		arc->arc_parse_graph_node = arc_type;
+		arc->start_inner_tunnel = link->tunnel ? 1 : 0;
+		arc->compare_condition_value = link->next;
+		if (link->tunnel)
+			item->tunnel = 1;
+	}
+	return 0;
+}
+
+/* Translate RTE flex item API configuration into flaex parser settings. */
+static int
+mlx5_flex_translate_conf(struct rte_eth_dev *dev,
+			 const struct rte_flow_item_flex_conf *conf,
+			 struct mlx5_flex_parser_devx *devx,
+			 struct mlx5_flex_item *item,
+			 struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_hca_flex_attr *attr = &priv->config.hca_attr.flex;
+	int ret;
+
+	ret = mlx5_flex_translate_length(attr, conf, devx, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_next(attr, conf, devx, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_sample(attr, conf, devx, item, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_arc_in(attr, conf, devx, item, error);
+	if (ret)
+		return ret;
+	ret = mlx5_flex_translate_arc_out(attr, conf, devx, item, error);
+	if (ret)
+		return ret;
+	return 0;
+}
+
 /**
  * Create the flex item with specified configuration over the Ethernet device.
  *
@@ -145,6 +889,8 @@ flow_dv_item_create(struct rte_eth_dev *dev,
 				   "too many flex items created on the port");
 		return NULL;
 	}
+	if (mlx5_flex_translate_conf(dev, conf, &devx_config, flex, error))
+		goto error;
 	ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
 	if (!ent) {
 		rte_flow_error_set(error, ENOMEM,
@@ -153,7 +899,6 @@ flow_dv_item_create(struct rte_eth_dev *dev,
 		goto error;
 	}
 	flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
-	RTE_SET_USED(conf);
 	/* Mark initialized flex item valid. */
 	__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
 	return (struct rte_flow_item_flex_handle *)flex;
@@ -278,6 +1023,7 @@ mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
 	RTE_SET_USED(list_ctx);
 	MLX5_ASSERT(fp->devx_obj);
 	claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
+	DRV_LOG(DEBUG, "DEVx flex parser %p destroyed\n", (const void *)fp);
 	mlx5_free(entry);
 }
 
-- 
2.18.1


  parent reply	other threads:[~2021-10-01 19:36 UTC|newest]

Thread overview: 73+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-22 18:04 [dpdk-dev] [PATCH 0/3] ethdev: introduce configurable flexible item Viacheslav Ovsiienko
2021-09-22 18:04 ` [dpdk-dev] [PATCH 1/3] " Viacheslav Ovsiienko
2021-09-22 18:04 ` [dpdk-dev] [PATCH 2/3] ethdev: support flow elements with variable length Viacheslav Ovsiienko
2021-09-22 18:04 ` [dpdk-dev] [PATCH 3/3] ethdev: implement RTE flex item API Viacheslav Ovsiienko
2021-10-01 19:34 ` [dpdk-dev] [PATCH v2 00/14] ethdev: introduce configurable flexible item Viacheslav Ovsiienko
2021-10-01 19:34   ` [dpdk-dev] [PATCH v2 01/14] " Viacheslav Ovsiienko
2021-10-07 11:08     ` Ori Kam
2021-10-12  6:42       ` Slava Ovsiienko
2021-10-01 19:34   ` [dpdk-dev] [PATCH v2 02/14] ethdev: support flow elements with variable length Viacheslav Ovsiienko
2021-10-01 19:34   ` [dpdk-dev] [PATCH v2 03/14] ethdev: implement RTE flex item API Viacheslav Ovsiienko
2021-10-01 19:34   ` [dpdk-dev] [PATCH v2 04/14] app/testpmd: add jansson library Viacheslav Ovsiienko
2021-10-01 19:34   ` [dpdk-dev] [PATCH v2 05/14] app/testpmd: add flex item CLI commands Viacheslav Ovsiienko
2021-10-01 19:34   ` [dpdk-dev] [PATCH v2 06/14] common/mlx5: refactor HCA attributes query Viacheslav Ovsiienko
2021-10-01 19:34   ` [dpdk-dev] [PATCH v2 07/14] common/mlx5: extend flex parser capabilities Viacheslav Ovsiienko
2021-10-01 19:34   ` [dpdk-dev] [PATCH v2 08/14] common/mlx5: fix flex parser DevX creation routine Viacheslav Ovsiienko
2021-10-01 19:34   ` [dpdk-dev] [PATCH v2 09/14] net/mlx5: update eCPRI flex parser structures Viacheslav Ovsiienko
2021-10-01 19:34   ` [dpdk-dev] [PATCH v2 10/14] net/mlx5: add flex item API Viacheslav Ovsiienko
2021-10-01 19:34   ` [dpdk-dev] [PATCH v2 11/14] net/mlx5: add flex parser DevX object management Viacheslav Ovsiienko
2021-10-01 19:34   ` Viacheslav Ovsiienko [this message]
2021-10-01 19:34   ` [dpdk-dev] [PATCH v2 13/14] net/mlx5: translate flex item pattern into matcher Viacheslav Ovsiienko
2021-10-01 19:34   ` [dpdk-dev] [PATCH v2 14/14] net/mlx5: handle flex item in flows Viacheslav Ovsiienko
2021-10-11 18:15 ` [dpdk-dev] [PATCH v3 0/5] ethdev: introduce configurable flexible item Viacheslav Ovsiienko
2021-10-11 18:15   ` [dpdk-dev] [PATCH v3 1/5] " Viacheslav Ovsiienko
2021-10-12  6:41     ` Ori Kam
2021-10-11 18:15   ` [dpdk-dev] [PATCH v3 2/5] ethdev: support flow elements with variable length Viacheslav Ovsiienko
2021-10-12  7:53     ` Ori Kam
2021-10-11 18:15   ` [dpdk-dev] [PATCH v3 3/5] ethdev: implement RTE flex item API Viacheslav Ovsiienko
2021-10-11 18:15   ` [dpdk-dev] [PATCH v3 4/5] app/testpmd: add jansson library Viacheslav Ovsiienko
2021-10-12  7:56     ` Ori Kam
2021-10-11 18:15   ` [dpdk-dev] [PATCH v3 5/5] app/testpmd: add flex item CLI commands Viacheslav Ovsiienko
2021-10-12 10:49 ` [dpdk-dev] [PATCH v4 0/5] ethdev: update modify field flow action Viacheslav Ovsiienko
2021-10-12 10:49   ` [dpdk-dev] [PATCH v4 1/5] " Viacheslav Ovsiienko
2021-10-12 10:49   ` [dpdk-dev] [PATCH v4 2/5] ethdev: fix missed experimental tag for modify field action Viacheslav Ovsiienko
2021-10-12 10:49   ` [dpdk-dev] [PATCH v4 3/5] app/testpmd: update modify field flow action support Viacheslav Ovsiienko
2021-10-12 10:49   ` [dpdk-dev] [PATCH v4 4/5] app/testpmd: fix hex string parser in flow commands Viacheslav Ovsiienko
2021-10-12 10:49   ` [dpdk-dev] [PATCH v4 5/5] net/mlx5: update modify field action Viacheslav Ovsiienko
2021-10-12 11:32 ` [dpdk-dev] [PATCH v4 0/5] ethdev: introduce configurable flexible item Viacheslav Ovsiienko
2021-10-12 11:32   ` [dpdk-dev] [PATCH v4 1/5] " Viacheslav Ovsiienko
2021-10-12 11:42     ` Ori Kam
2021-10-12 11:32   ` [dpdk-dev] [PATCH v4 2/5] ethdev: support flow elements with variable length Viacheslav Ovsiienko
2021-10-12 11:32   ` [dpdk-dev] [PATCH v4 3/5] ethdev: implement RTE flex item API Viacheslav Ovsiienko
2021-10-12 11:32   ` [dpdk-dev] [PATCH v4 4/5] app/testpmd: add jansson library Viacheslav Ovsiienko
2021-10-12 11:32   ` [dpdk-dev] [PATCH v4 5/5] app/testpmd: add flex item CLI commands Viacheslav Ovsiienko
2021-10-12 12:54 ` [dpdk-dev] [PATCH v5 0/5] ethdev: introduce configurable flexible item Viacheslav Ovsiienko
2021-10-12 12:54   ` [dpdk-dev] [PATCH v5 1/5] " Viacheslav Ovsiienko
2021-10-12 12:54   ` [dpdk-dev] [PATCH v5 2/5] ethdev: support flow elements with variable length Viacheslav Ovsiienko
2021-10-12 12:54   ` [dpdk-dev] [PATCH v5 3/5] ethdev: implement RTE flex item API Viacheslav Ovsiienko
2021-10-12 14:39     ` Ori Kam
2021-10-12 12:54   ` [dpdk-dev] [PATCH v5 4/5] app/testpmd: add jansson library Viacheslav Ovsiienko
2021-10-12 12:54   ` [dpdk-dev] [PATCH v5 5/5] app/testpmd: add flex item CLI commands Viacheslav Ovsiienko
2021-10-14 16:42     ` Ferruh Yigit
2021-10-14 18:13       ` Gregory Etelson
2021-10-14 16:09   ` [dpdk-dev] [PATCH v5 0/5] ethdev: introduce configurable flexible item Ferruh Yigit
2021-10-14 18:55     ` Slava Ovsiienko
2021-10-18 18:02 ` [dpdk-dev] [PATCH v6 0/6] " Viacheslav Ovsiienko
2021-10-18 18:02   ` [dpdk-dev] [PATCH v6 1/6] " Viacheslav Ovsiienko
2021-10-18 18:02   ` [dpdk-dev] [PATCH v6 2/6] ethdev: support flow elements with variable length Viacheslav Ovsiienko
2021-10-18 18:02   ` [dpdk-dev] [PATCH v6 3/6] ethdev: implement RTE flex item API Viacheslav Ovsiienko
2021-10-19  6:12     ` Ori Kam
2021-10-18 18:02   ` [dpdk-dev] [PATCH v6 4/6] app/testpmd: add jansson library Viacheslav Ovsiienko
2021-10-18 18:02   ` [dpdk-dev] [PATCH v6 5/6] app/testpmd: add dedicated flow command parsing routine Viacheslav Ovsiienko
2021-10-18 18:02   ` [dpdk-dev] [PATCH v6 6/6] app/testpmd: add flex item CLI commands Viacheslav Ovsiienko
2021-10-20 15:06 ` [dpdk-dev] [PATCH v7 0/4] ethdev: introduce configurable flexible item Viacheslav Ovsiienko
2021-10-20 15:06   ` [dpdk-dev] [PATCH v7 1/4] ethdev: support flow elements with variable length Viacheslav Ovsiienko
2021-10-20 15:06   ` [dpdk-dev] [PATCH v7 2/4] ethdev: introduce configurable flexible item Viacheslav Ovsiienko
2021-10-20 15:06   ` [dpdk-dev] [PATCH v7 3/4] app/testpmd: add dedicated flow command parsing routine Viacheslav Ovsiienko
2021-10-20 15:06   ` [dpdk-dev] [PATCH v7 4/4] app/testpmd: add flex item CLI commands Viacheslav Ovsiienko
2021-10-20 15:14 ` [dpdk-dev] [PATCH v8 0/4] ethdev: introduce configurable flexible item Viacheslav Ovsiienko
2021-10-20 15:14   ` [dpdk-dev] [PATCH v8 1/4] ethdev: support flow elements with variable length Viacheslav Ovsiienko
2021-10-20 15:14   ` [dpdk-dev] [PATCH v8 2/4] ethdev: introduce configurable flexible item Viacheslav Ovsiienko
2021-10-20 15:14   ` [dpdk-dev] [PATCH v8 3/4] app/testpmd: add dedicated flow command parsing routine Viacheslav Ovsiienko
2021-10-20 15:14   ` [dpdk-dev] [PATCH v8 4/4] app/testpmd: add flex item CLI commands Viacheslav Ovsiienko
2021-10-20 17:05   ` [dpdk-dev] [PATCH v8 0/4] ethdev: introduce configurable flexible item Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211001193415.23288-13-viacheslavo@nvidia.com \
    --to=viacheslavo@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=getelson@nvidia.com \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=shahafs@nvidia.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).