From: Rongwei Liu <rongweil@nvidia.com>
To: <matan@nvidia.com>, <viacheslavo@nvidia.com>, <orika@nvidia.com>,
<thomas@monjalon.net>
Cc: <dev@dpdk.org>, <rasland@nvidia.com>
Subject: [PATCH v2 07/11] net/mlx5: add flex item modify field implementation
Date: Thu, 19 Jan 2023 06:58:20 +0200 [thread overview]
Message-ID: <20230119045824.665663-8-rongweil@nvidia.com> (raw)
In-Reply-To: <20230119045824.665663-1-rongweil@nvidia.com>
Add flex item modify field HWS implementation.
The minimum modify boundary is one byte.
Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 1 +
drivers/net/mlx5/mlx5_flow.h | 3 +
drivers/net/mlx5/mlx5_flow_dv.c | 165 +++++++++++++++++++++++++++++---
drivers/net/mlx5/mlx5_flow_hw.c | 14 ++-
4 files changed, 170 insertions(+), 13 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index ce6cd98fd7..0c2a516e9d 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -759,6 +759,7 @@ enum mlx5_modification_field {
MLX5_MODI_OUT_IP_ECN = 0x73,
MLX5_MODI_TUNNEL_HDR_DW_1 = 0x75,
MLX5_MODI_GTPU_FIRST_EXT_DW_0 = 0x76,
+ MLX5_MODI_INVALID = INT_MAX,
};
/* Total number of metadata reg_c's. */
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index c8761c4e5a..c71fa1c0ad 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1080,6 +1080,8 @@ struct field_modify_info {
uint32_t size; /* Size of field in protocol header, in bytes. */
uint32_t offset; /* Offset of field in protocol header, in bytes. */
enum mlx5_modification_field id;
+ uint32_t shift;
+ uint8_t is_flex; /* Temporary indicator for flex item modify filed WA. */
};
/* HW steering flow attributes. */
@@ -1244,6 +1246,7 @@ struct rte_flow_actions_template {
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint32_t refcnt; /* Reference counter. */
uint16_t rx_cpy_pos; /* Action position of Rx metadata to be copied. */
+ uint8_t flex_item; /* flex item index. */
};
/* Jump action struct. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 284f18da11..92a5914d4b 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -414,10 +414,15 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
++field;
continue;
}
- /* Deduce actual data width in bits from mask value. */
- off_b = rte_bsf32(mask) + carry_b;
- size_b = sizeof(uint32_t) * CHAR_BIT -
- off_b - __builtin_clz(mask);
+ if (type == MLX5_MODIFICATION_TYPE_COPY && field->is_flex) {
+ off_b = 32 - field->shift + carry_b - field->size * CHAR_BIT;
+ size_b = field->size * CHAR_BIT - carry_b;
+ } else {
+ /* Deduce actual data width in bits from mask value. */
+ off_b = rte_bsf32(mask) + carry_b;
+ size_b = sizeof(uint32_t) * CHAR_BIT -
+ off_b - __builtin_clz(mask);
+ }
MLX5_ASSERT(size_b);
actions[i] = (struct mlx5_modification_cmd) {
.action_type = type,
@@ -437,40 +442,46 @@ flow_dv_convert_modify_action(struct rte_flow_item *item,
* Destination field overflow. Copy leftovers of
* a source field to the next destination field.
*/
- carry_b = 0;
if ((size_b > dcopy->size * CHAR_BIT - dcopy->offset) &&
dcopy->size != 0) {
actions[i].length =
dcopy->size * CHAR_BIT - dcopy->offset;
- carry_b = actions[i].length;
+ carry_b += actions[i].length;
next_field = false;
+ } else {
+ carry_b = 0;
}
/*
* Not enough bits in a source filed to fill a
* destination field. Switch to the next source.
*/
if ((size_b < dcopy->size * CHAR_BIT - dcopy->offset) &&
- (size_b == field->size * CHAR_BIT - off_b)) {
- actions[i].length =
- field->size * CHAR_BIT - off_b;
+ ((size_b == field->size * CHAR_BIT - off_b) ||
+ field->is_flex)) {
+ actions[i].length = size_b;
dcopy->offset += actions[i].length;
next_dcopy = false;
}
- if (next_dcopy)
- ++dcopy;
} else {
MLX5_ASSERT(item->spec);
data = flow_dv_fetch_field((const uint8_t *)item->spec +
field->offset, field->size);
/* Shift out the trailing masked bits from data. */
data = (data & mask) >> off_b;
+ if (field->is_flex)
+ actions[i].offset = 32 - field->shift - field->size * CHAR_BIT;
actions[i].data1 = rte_cpu_to_be_32(data);
}
/* Convert entire record to expected big-endian format. */
actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
+ if ((type != MLX5_MODIFICATION_TYPE_COPY ||
+ dcopy->id != (enum mlx5_modification_field)UINT32_MAX) &&
+ field->id != (enum mlx5_modification_field)UINT32_MAX)
+ ++i;
+ if (next_dcopy && type == MLX5_MODIFICATION_TYPE_COPY)
+ ++dcopy;
if (next_field)
++field;
- ++i;
} while (field->size);
if (resource->actions_num == i)
return rte_flow_error_set(error, EINVAL,
@@ -1421,6 +1432,131 @@ flow_modify_info_mask_32_masked(uint32_t length, uint32_t off, uint32_t post_mas
return rte_cpu_to_be_32(mask & post_mask);
}
+static void
+mlx5_modify_flex_item(const struct rte_eth_dev *dev,
+ const struct mlx5_flex_item *flex,
+ const struct rte_flow_action_modify_data *data,
+ struct field_modify_info *info,
+ uint32_t *mask, uint32_t width)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
+ uint32_t i, j;
+ int id = 0;
+ uint32_t pos = 0;
+ const struct mlx5_flex_pattern_field *map;
+ uint32_t offset = data->offset;
+ uint32_t width_left = width;
+ uint32_t def;
+ uint32_t cur_width = 0;
+ uint32_t tmp_ofs;
+ uint32_t idx = 0;
+ struct field_modify_info tmp;
+ int tmp_id;
+
+ if (!attr->ext_sample_id) {
+ DRV_LOG(ERR, "FW doesn't support modify field with flex item.");
+ return;
+ }
+ /*
+ * search for the mapping instance until Accumulated width is no
+ * less than data->offset.
+ */
+ for (i = 0; i < flex->mapnum; i++) {
+ if (flex->map[i].width + pos > data->offset)
+ break;
+ pos += flex->map[i].width;
+ }
+ if (i >= flex->mapnum)
+ return;
+ tmp_ofs = pos < data->offset ? data->offset - pos : 0;
+ for (j = i; i < flex->mapnum && width_left > 0; ) {
+ map = flex->map + i;
+ id = mlx5_flex_get_sample_id(flex, i, &pos, false, &def);
+ if (id == -1) {
+ i++;
+ /* All left length is dummy */
+ if (pos >= data->offset + width)
+ return;
+ cur_width = map->width;
+ /* One mapping instance covers the whole width. */
+ } else if (pos + map->width >= (data->offset + width)) {
+ cur_width = width_left;
+ } else {
+ cur_width = cur_width + map->width - tmp_ofs;
+ pos += map->width;
+ /*
+ * Continue to search next until:
+ * 1. Another flex parser ID.
+ * 2. Width has been covered.
+ */
+ for (j = i + 1; j < flex->mapnum; j++) {
+ tmp_id = mlx5_flex_get_sample_id(flex, j, &pos, false, &def);
+ if (tmp_id == -1) {
+ i = j;
+ pos -= flex->map[j].width;
+ break;
+ }
+ if (id >= (int)flex->devx_fp->num_samples ||
+ id >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
+ tmp_id >= (int)flex->devx_fp->num_samples ||
+ tmp_id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+ return;
+ if (flex->devx_fp->sample_ids[id].id !=
+ flex->devx_fp->sample_ids[tmp_id].id ||
+ flex->map[j].shift != flex->map[j - 1].width +
+ flex->map[j - 1].shift) {
+ i = j;
+ break;
+ }
+ if ((pos + flex->map[j].width) >= (data->offset + width)) {
+ cur_width = width_left;
+ break;
+ }
+ pos += flex->map[j].width;
+ cur_width += flex->map[j].width;
+ }
+ }
+ if (cur_width > width_left)
+ cur_width = width_left;
+ else if (cur_width < width_left && (j == flex->mapnum || i == flex->mapnum))
+ return;
+
+ MLX5_ASSERT(id < (int)flex->devx_fp->num_samples);
+ if (id >= (int)flex->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+ return;
+ /* Use invalid entry as placeholder for DUMMY mapping. */
+ info[idx] = (struct field_modify_info){cur_width / CHAR_BIT, offset / CHAR_BIT,
+ id == -1 ? MLX5_MODI_INVALID :
+ (enum mlx5_modification_field)
+ flex->devx_fp->sample_ids[id].modify_field_id,
+ map->shift + tmp_ofs, 1};
+ offset += cur_width;
+ width_left -= cur_width;
+ if (!mask) {
+ info[idx].offset = (32 - cur_width - map->shift - tmp_ofs);
+ info[idx].size = cur_width / CHAR_BIT + info[idx].offset / CHAR_BIT;
+ }
+ cur_width = 0;
+ tmp_ofs = 0;
+ idx++;
+ }
+ if (unlikely(width_left > 0)) {
+ MLX5_ASSERT(false);
+ return;
+ }
+ if (mask)
+ memset(mask, 0xff, data->offset / CHAR_BIT + width / CHAR_BIT);
+ /* Re-order the info to follow IPv6 address. */
+ for (i = 0; i < idx / 2; i++) {
+ tmp = info[i];
+ MLX5_ASSERT(info[i].id);
+ MLX5_ASSERT(info[idx - 1 - i].id);
+ info[i] = info[idx - 1 - i];
+ info[idx - 1 - i] = tmp;
+ }
+}
+
void
mlx5_flow_field_id_to_modify_info
(const struct rte_flow_action_modify_data *data,
@@ -1883,6 +2019,11 @@ mlx5_flow_field_id_to_modify_info
info[idx].offset = data->offset;
}
break;
+ case RTE_FLOW_FIELD_FLEX_ITEM:
+ MLX5_ASSERT(data->flex_handle != NULL && !(data->offset & 0x7));
+ mlx5_modify_flex_item(dev, (const struct mlx5_flex_item *)data->flex_handle,
+ data, info, mask, width);
+ break;
case RTE_FLOW_FIELD_POINTER:
case RTE_FLOW_FIELD_VALUE:
default:
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 742b0b0d43..2e263880d7 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4550,6 +4550,17 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
at->actions[i].conf = actions->conf;
at->masks[i].conf = masks->conf;
}
+ if (actions->type == RTE_FLOW_ACTION_TYPE_MODIFY_FIELD) {
+ const struct rte_flow_action_modify_field *info = actions->conf;
+
+ if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+ flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
+ &at->flex_item)) ||
+ (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+ flow_hw_flex_item_acquire(dev, info->src.flex_handle,
+ &at->flex_item)))
+ goto error;
+ }
}
at->tmpl = flow_hw_dr_actions_template_create(at);
if (!at->tmpl)
@@ -4581,7 +4592,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
+flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
struct rte_flow_actions_template *template,
struct rte_flow_error *error __rte_unused)
{
@@ -4594,6 +4605,7 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused,
"action template in using");
}
LIST_REMOVE(template, next);
+ flow_hw_flex_item_release(dev, &template->flex_item);
if (template->tmpl)
mlx5dr_action_template_destroy(template->tmpl);
mlx5_free(template);
--
2.27.0
next prev parent reply other threads:[~2023-01-19 4:59 UTC|newest]
Thread overview: 59+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-21 8:39 [RFC 0/9] support flex item matching and modify field Rongwei Liu
2022-12-21 8:39 ` [RFC 1/9] ethdev: add flex item modify field support Rongwei Liu
2023-01-11 16:34 ` Ori Kam
2023-01-19 4:58 ` [PATCH v2 00/11] add flex item support Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 01/11] ethdev: add flex item modify field support Rongwei Liu
2023-01-20 9:07 ` Andrew Rybchenko
2023-01-30 4:29 ` Rongwei Liu
2023-01-30 4:35 ` Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 00/11] add flex item support Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 01/11] ethdev: add flex item modify field support Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 02/11] app/testpmd: pass flex handle into matching mask Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 03/11] net/mlx5: enable hws flex item create Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 04/11] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 05/11] net/mlx5: adopt new flex item prm definition Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 06/11] net/mlx5/hws: add hws flex item matching support Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 07/11] net/mlx5: add flex item modify field implementation Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 08/11] net/mlx5: return error for sws modify field Rongwei Liu
2023-01-30 4:53 ` [PATCH v3 09/11] app/testpmd: raw encap with flex item support Rongwei Liu
2023-01-30 4:53 ` [PATCH v3 10/11] doc/mlx5: update mlx5 doc Rongwei Liu
2023-01-30 4:53 ` [PATCH v3 11/11] app/testpmd: adjust cleanup sequence when quitting Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 00/11] add flex item support Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 01/11] ethdev: add flex item modify field support Rongwei Liu
2023-02-02 2:59 ` Rongwei Liu
2023-02-06 3:39 ` [PATCH v4 0/4] add flex item support Rongwei Liu
2023-02-06 3:39 ` [PATCH v4 1/4] ethdev: add flex item modify field support Rongwei Liu
2023-02-09 15:55 ` Ferruh Yigit
2023-02-06 3:39 ` [PATCH v4 2/4] app/testpmd: pass flex handle into matching mask Rongwei Liu
2023-02-06 3:39 ` [PATCH v4 3/4] app/testpmd: raw encap with flex item support Rongwei Liu
2023-02-06 3:39 ` [PATCH v4 4/4] app/testpmd: adjust cleanup sequence when quitting Rongwei Liu
2023-02-09 14:49 ` [PATCH v4 0/4] add flex item support Ferruh Yigit
2023-01-30 13:19 ` [PATCH v3 02/11] app/testpmd: pass flex handle into matching mask Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 03/11] net/mlx5: enable hws flex item create Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 04/11] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 05/11] net/mlx5: adopt new flex item prm definition Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 06/11] net/mlx5/hws: add hws flex item matching support Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 07/11] net/mlx5: add flex item modify field implementation Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 08/11] net/mlx5: return error for sws modify field Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 09/11] app/testpmd: raw encap with flex item support Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 10/11] doc/mlx5: update mlx5 doc Rongwei Liu
2023-01-30 13:20 ` [PATCH v3 11/11] app/testpmd: adjust cleanup sequence when quitting Rongwei Liu
2023-02-03 11:00 ` Singh, Aman Deep
2023-01-19 4:58 ` [PATCH v2 02/11] app/testpmd: pass flex handle into matching mask Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 03/11] net/mlx5: enable hws flex item create Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 04/11] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 05/11] net/mlx5: adopt new flex item prm definition Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 06/11] net/mlx5/hws: add hws flex item matching support Rongwei Liu
2023-01-19 4:58 ` Rongwei Liu [this message]
2023-01-19 4:58 ` [PATCH v2 08/11] net/mlx5: return error for sws modify field Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 09/11] app/testpmd: raw encap with flex item support Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 10/11] doc/mlx5: update mlx5 doc Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 11/11] app/testpmd: adjust cleanup sequence when quitting Rongwei Liu
2022-12-21 8:39 ` [RFC 2/9] app/testpmd: add flex item modify field cmdline support Rongwei Liu
2022-12-21 8:39 ` [RFC 3/9] app/testpmd: pass flex handle into matching mask Rongwei Liu
2022-12-21 8:39 ` [RFC 4/9] net/mlx5: enable hws flex item create Rongwei Liu
2022-12-21 8:39 ` [RFC 5/9] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
2022-12-21 8:39 ` [RFC 6/9] net/mlx5/hws: add hws flex item matching support Rongwei Liu
2022-12-21 8:39 ` [RFC 7/9] net/mlx5/hws: add flex item modify field implementation Rongwei Liu
2022-12-21 8:39 ` [RFC 8/9] net/mlx5: return error for sws modify field Rongwei Liu
2022-12-21 8:40 ` [RFC 9/9] app/testpmd: raw encap with flex item support Rongwei Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230119045824.665663-8-rongweil@nvidia.com \
--to=rongweil@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=thomas@monjalon.net \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).