DPDK patches and discussions
 help / color / mirror / Atom feed
From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
To: <dev@dpdk.org>
Cc: <matan@nvidia.com>, <rasland@nvidia.com>, <orika@nvidia.com>,
	<dsosnowski@nvidia.com>, <stable@dpdk.org>
Subject: [PATCH 9/9] net/mlx5: fix flex item header length field translation
Date: Wed, 11 Sep 2024 19:04:58 +0300	[thread overview]
Message-ID: <20240911160458.524732-9-viacheslavo@nvidia.com> (raw)
In-Reply-To: <20240911160458.524732-1-viacheslavo@nvidia.com>

There are hardware imposed limitations on the header length
field description for the mask and shift combinations in the
FIELD_MODE_OFFSET mode.

The patch updates:
  - parameter check for FIELD_MODE_OFFSET for the header length
    field
  - check whether length field crosses dword boundaries in header
  - correct mask extension to the hardware required width 6-bits
  - correct adjusting the mask left margin offset, preventing
    dword offset

Fixes: b293e8e49d78 ("net/mlx5: translate flex item configuration")
Cc: stable@dpdk.org

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_flex.c | 120 ++++++++++++++++--------------
 1 file changed, 66 insertions(+), 54 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index bf38643a23..afed16985a 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -449,12 +449,14 @@ mlx5_flex_release_index(struct rte_eth_dev *dev,
  *
  *   shift      mask
  * ------- ---------------
- *    0     b111100  0x3C
- *    1     b111110  0x3E
- *    2     b111111  0x3F
- *    3     b011111  0x1F
- *    4     b001111  0x0F
- *    5     b000111  0x07
+ *    0     b11111100  0x3C
+ *    1     b01111110  0x3E
+ *    2     b00111111  0x3F
+ *    3     b00011111  0x1F
+ *    4     b00001111  0x0F
+ *    5     b00000111  0x07
+ *    6     b00000011  0x03
+ *    7     b00000001  0x01
  */
 static uint8_t
 mlx5_flex_hdr_len_mask(uint8_t shift,
@@ -464,8 +466,7 @@ mlx5_flex_hdr_len_mask(uint8_t shift,
 	int diff = shift - MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD;
 
 	base_mask = mlx5_hca_parse_graph_node_base_hdr_len_mask(attr);
-	return diff == 0 ? base_mask :
-	       diff < 0 ? (base_mask << -diff) & base_mask : base_mask >> diff;
+	return diff < 0 ? base_mask << -diff : base_mask >> diff;
 }
 
 static int
@@ -476,7 +477,6 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
 {
 	const struct rte_flow_item_flex_field *field = &conf->next_header;
 	struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
-	uint32_t len_width, mask;
 
 	if (field->field_base % CHAR_BIT)
 		return rte_flow_error_set
@@ -504,7 +504,14 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
 				 "negative header length field base (FIXED)");
 		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
 		break;
-	case FIELD_MODE_OFFSET:
+	case FIELD_MODE_OFFSET: {
+		uint32_t msb, lsb;
+		int32_t shift = field->offset_shift;
+		uint32_t offset = field->offset_base;
+		uint32_t mask = field->offset_mask;
+		uint32_t wmax = attr->header_length_mask_width +
+				MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD;
+
 		if (!(attr->header_length_mode &
 		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIELD)))
 			return rte_flow_error_set
@@ -514,47 +521,73 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
 			return rte_flow_error_set
 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
 				 "field size is a must for offset mode");
-		if (field->field_size + field->offset_base < attr->header_length_mask_width)
+		if ((offset ^ (field->field_size + offset)) >> 5)
 			return rte_flow_error_set
 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
-				 "field size plus offset_base is too small");
-		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
-		if (field->offset_mask == 0 ||
-		    !rte_is_power_of_2(field->offset_mask + 1))
+				 "field crosses the 32-bit word boundary");
+		/* Hardware counts in dwords, all shifts done by offset within mask */
+		if (shift < 0 || (uint32_t)shift >= wmax)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "header length field shift exceeds limits (OFFSET)");
+		if (!mask)
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "zero length field offset mask (OFFSET)");
+		msb = rte_fls_u32(mask) - 1;
+		lsb = rte_bsf32(mask);
+		if (!rte_is_power_of_2((mask >> lsb) + 1))
 			return rte_flow_error_set
 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
-				 "invalid length field offset mask (OFFSET)");
-		len_width = rte_fls_u32(field->offset_mask);
-		if (len_width > attr->header_length_mask_width)
+				 "length field offset mask not contiguous (OFFSET)");
+		if (msb >= field->field_size)
 			return rte_flow_error_set
 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
-				 "length field offset mask too wide (OFFSET)");
-		mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
-		if (mask < field->offset_mask)
+				 "length field offset mask exceeds field size (OFFSET)");
+		if (msb >= wmax)
 			return rte_flow_error_set
 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
-				 "length field shift too big (OFFSET)");
-		node->header_length_field_mask = RTE_MIN(mask,
-							 field->offset_mask);
+				 "length field offset mask exceeds supported width (OFFSET)");
+		if (mask & ~mlx5_flex_hdr_len_mask(shift, attr))
+			return rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+				 "mask and shift combination not supported (OFFSET)");
+		msb++;
+		offset += field->field_size - msb;
+		if (msb < attr->header_length_mask_width) {
+			if (attr->header_length_mask_width - msb > offset)
+				return rte_flow_error_set
+					(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					 "field size plus offset_base is too small");
+			offset += msb;
+			/*
+			 * Here we can move to preceding dword. Hardware does
+			 * cyclic left shift so we should avoid this and stay
+			 * at current dword offset.
+			 */
+			offset = (offset & ~0x1Fu) |
+				 ((offset - attr->header_length_mask_width) & 0x1F);
+		}
+		node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
+		node->header_length_field_mask = mask;
+		node->header_length_field_shift = shift;
+		node->header_length_field_offset = offset;
 		break;
+	}
 	case FIELD_MODE_BITMASK:
 		if (!(attr->header_length_mode &
 		    RTE_BIT32(MLX5_GRAPH_NODE_LEN_BITMASK)))
 			return rte_flow_error_set
 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
 				 "unsupported header length field mode (BITMASK)");
-		if (attr->header_length_mask_width < field->field_size)
+		if (field->offset_shift > 15 || field->offset_shift < 0)
 			return rte_flow_error_set
 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
-				 "header length field width exceeds limit");
+				 "header length field shift exceeds limit (BITMASK)");
 		node->header_length_mode = MLX5_GRAPH_NODE_LEN_BITMASK;
-		mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
-		if (mask < field->offset_mask)
-			return rte_flow_error_set
-				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
-				 "length field shift too big (BITMASK)");
-		node->header_length_field_mask = RTE_MIN(mask,
-							 field->offset_mask);
+		node->header_length_field_mask = field->offset_mask;
+		node->header_length_field_shift = field->offset_shift;
+		node->header_length_field_offset = field->offset_base;
 		break;
 	default:
 		return rte_flow_error_set
@@ -567,27 +600,6 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
 			 "header length field base exceeds limit");
 	node->header_length_base_value = field->field_base / CHAR_BIT;
-	if (field->field_mode == FIELD_MODE_OFFSET ||
-	    field->field_mode == FIELD_MODE_BITMASK) {
-		if (field->offset_shift > 15 || field->offset_shift < 0)
-			return rte_flow_error_set
-				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
-				 "header length field shift exceeds limit");
-		node->header_length_field_shift = field->offset_shift;
-		node->header_length_field_offset = field->offset_base;
-	}
-	if (field->field_mode == FIELD_MODE_OFFSET) {
-		if (field->field_size > attr->header_length_mask_width) {
-			node->header_length_field_offset +=
-				field->field_size - attr->header_length_mask_width;
-		} else if (field->field_size < attr->header_length_mask_width) {
-			node->header_length_field_offset -=
-				attr->header_length_mask_width - field->field_size;
-			node->header_length_field_mask =
-					RTE_MIN(node->header_length_field_mask,
-						(1u << field->field_size) - 1);
-		}
-	}
 	return 0;
 }
 
-- 
2.34.1


  parent reply	other threads:[~2024-09-11 16:06 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-11 16:04 [PATCH 1/9] net/mlx5: update flex parser arc types support Viacheslav Ovsiienko
2024-09-11 16:04 ` [PATCH 2/9] net/mlx5: add flex item query tunnel mode routine Viacheslav Ovsiienko
2024-09-11 16:04 ` [PATCH 3/9] net/mlx5/hws: fix flex item support as tunnel header Viacheslav Ovsiienko
2024-09-11 16:04 ` [PATCH 4/9] net/mlx5: fix flex item tunnel mode handling Viacheslav Ovsiienko
2024-09-11 16:04 ` [PATCH 5/9] net/mlx5: fix number of supported flex parsers Viacheslav Ovsiienko
2024-09-11 16:04 ` [PATCH 6/9] app/testpmd: remove flex item init command leftover Viacheslav Ovsiienko
2024-09-11 16:04 ` [PATCH 7/9] net/mlx5: fix next protocol validation after flex item Viacheslav Ovsiienko
2024-09-11 16:04 ` [PATCH 8/9] net/mlx5: fix non full word sample fields in " Viacheslav Ovsiienko
2024-09-11 16:04 ` Viacheslav Ovsiienko [this message]
2024-09-18 13:46 ` [PATCH v2 0/9] net/mlx5: cumulative fix series for " Viacheslav Ovsiienko
2024-09-18 13:46   ` [PATCH v2 1/9] net/mlx5: update flex parser arc types support Viacheslav Ovsiienko
2024-09-18 13:57     ` Dariusz Sosnowski
2024-09-18 13:46   ` [PATCH v2 2/9] net/mlx5: add flex item query tunnel mode routine Viacheslav Ovsiienko
2024-09-18 13:57     ` Dariusz Sosnowski
2024-09-18 13:46   ` [PATCH v2 3/9] net/mlx5/hws: fix flex item support as tunnel header Viacheslav Ovsiienko
2024-09-18 13:57     ` Dariusz Sosnowski
2024-09-18 13:46   ` [PATCH v2 4/9] net/mlx5: fix flex item tunnel mode handling Viacheslav Ovsiienko
2024-09-18 13:57     ` Dariusz Sosnowski
2024-09-18 13:46   ` [PATCH v2 5/9] net/mlx5: fix number of supported flex parsers Viacheslav Ovsiienko
2024-09-18 13:57     ` Dariusz Sosnowski
2024-09-18 13:46   ` [PATCH v2 6/9] app/testpmd: remove flex item init command leftover Viacheslav Ovsiienko
2024-09-18 13:58     ` Dariusz Sosnowski
2024-09-18 13:46   ` [PATCH v2 7/9] net/mlx5: fix next protocol validation after flex item Viacheslav Ovsiienko
2024-09-18 13:58     ` Dariusz Sosnowski
2024-09-18 13:46   ` [PATCH v2 8/9] net/mlx5: fix non full word sample fields in " Viacheslav Ovsiienko
2024-09-18 13:58     ` Dariusz Sosnowski
2024-09-18 13:46   ` [PATCH v2 9/9] net/mlx5: fix flex item header length field translation Viacheslav Ovsiienko
2024-09-18 13:58     ` Dariusz Sosnowski
2024-09-18 13:51   ` [PATCH v2 0/9] net/mlx5: cumulative fix series for flex item Dariusz Sosnowski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240911160458.524732-9-viacheslavo@nvidia.com \
    --to=viacheslavo@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=dsosnowski@nvidia.com \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).