From: Gregory Etelson <getelson@nvidia.com>
To: <dev@dpdk.org>
Cc: <getelson@nvidia.com>, <mkashani@nvidia.com>,
<rasland@nvidia.com>, Rongwei Liu <rongweil@nvidia.com>,
Alex Vesker <valex@nvidia.com>, Ori Kam <orika@nvidia.com>,
Matan Azrad <matan@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
Suanming Mou <suanmingm@nvidia.com>
Subject: [PATCH 27/30] net/mlx5/hws: add IPv6 routing extension push remove actions
Date: Sun, 29 Oct 2023 18:31:59 +0200 [thread overview]
Message-ID: <20231029163202.216450-27-getelson@nvidia.com> (raw)
In-Reply-To: <20231029163202.216450-1-getelson@nvidia.com>
From: Rongwei Liu <rongweil@nvidia.com>
Add two dr_actions to implement IPv6 routing extension push and
remove, the new actions are multiple actions combination instead
of new types.
Basically, there are two modify headers plus one reformat action.
Action order is the same as encap and decap actions.
Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Reviewed-by: Alex Vesker <valex@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 1 +
drivers/net/mlx5/hws/mlx5dr.h | 29 +++
drivers/net/mlx5/hws/mlx5dr_action.c | 358 ++++++++++++++++++++++++++-
drivers/net/mlx5/hws/mlx5dr_action.h | 7 +
drivers/net/mlx5/hws/mlx5dr_debug.c | 2 +
drivers/net/mlx5/mlx5_flow.h | 44 ++++
6 files changed, 438 insertions(+), 3 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index a5ecce98e9..32ec3df7ef 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -3586,6 +3586,7 @@ enum mlx5_ifc_header_anchors {
MLX5_HEADER_ANCHOR_PACKET_START = 0x0,
MLX5_HEADER_ANCHOR_FIRST_VLAN_START = 0x2,
MLX5_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+ MLX5_HEADER_ANCHOR_TCP_UDP = 0x09,
MLX5_HEADER_ANCHOR_INNER_MAC = 0x13,
MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
};
diff --git a/drivers/net/mlx5/hws/mlx5dr.h b/drivers/net/mlx5/hws/mlx5dr.h
index 2e692f76c3..9e7dd9c429 100644
--- a/drivers/net/mlx5/hws/mlx5dr.h
+++ b/drivers/net/mlx5/hws/mlx5dr.h
@@ -54,6 +54,8 @@ enum mlx5dr_action_type {
MLX5DR_ACTION_TYP_REMOVE_HEADER,
MLX5DR_ACTION_TYP_DEST_ROOT,
MLX5DR_ACTION_TYP_DEST_ARRAY,
+ MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT,
+ MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT,
MLX5DR_ACTION_TYP_MAX,
};
@@ -278,6 +280,11 @@ struct mlx5dr_rule_action {
uint8_t *data;
} reformat;
+ struct {
+ uint32_t offset;
+ uint8_t *header;
+ } ipv6_ext;
+
struct {
rte_be32_t vlan_hdr;
} push_vlan;
@@ -889,6 +896,28 @@ mlx5dr_action_create_remove_header(struct mlx5dr_context *ctx,
struct mlx5dr_action_remove_header_attr *attr,
uint32_t flags);
+/* Create action to push or remove IPv6 extension header.
+ *
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] type
+ * Type of direct rule action: MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT or
+ * MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT.
+ * @param[in] hdr
+ * Header for packet reformat.
+ * @param[in] log_bulk_size
+ * Number of unique values used with this pattern.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5dr_action_flags)
+ * @return pointer to mlx5dr_action on success NULL otherwise.
+ */
+struct mlx5dr_action *
+mlx5dr_action_create_reformat_ipv6_ext(struct mlx5dr_context *ctx,
+ enum mlx5dr_action_type type,
+ struct mlx5dr_action_reformat_header *hdr,
+ uint32_t log_bulk_size,
+ uint32_t flags);
+
/* Destroy direct rule action.
*
* @param[in] action
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index 76ca57d302..6ac3c2f782 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -26,7 +26,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_REFORMAT_TRAILER),
BIT(MLX5DR_ACTION_TYP_REMOVE_HEADER) |
BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
- BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
+ BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2) |
+ BIT(MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT),
BIT(MLX5DR_ACTION_TYP_POP_VLAN),
BIT(MLX5DR_ACTION_TYP_POP_VLAN),
BIT(MLX5DR_ACTION_TYP_CTR),
@@ -39,6 +40,7 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_PUSH_VLAN),
BIT(MLX5DR_ACTION_TYP_MODIFY_HDR),
BIT(MLX5DR_ACTION_TYP_INSERT_HEADER) |
+ BIT(MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) |
BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
BIT(MLX5DR_ACTION_TYP_TBL) |
@@ -61,6 +63,7 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_PUSH_VLAN),
BIT(MLX5DR_ACTION_TYP_MODIFY_HDR),
BIT(MLX5DR_ACTION_TYP_INSERT_HEADER) |
+ BIT(MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) |
BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
BIT(MLX5DR_ACTION_TYP_REFORMAT_TRAILER),
@@ -75,7 +78,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_REFORMAT_TRAILER),
BIT(MLX5DR_ACTION_TYP_REMOVE_HEADER) |
BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
- BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
+ BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2) |
+ BIT(MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT),
BIT(MLX5DR_ACTION_TYP_POP_VLAN),
BIT(MLX5DR_ACTION_TYP_POP_VLAN),
BIT(MLX5DR_ACTION_TYP_CTR),
@@ -88,6 +92,7 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_PUSH_VLAN),
BIT(MLX5DR_ACTION_TYP_MODIFY_HDR),
BIT(MLX5DR_ACTION_TYP_INSERT_HEADER) |
+ BIT(MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) |
BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
BIT(MLX5DR_ACTION_TYP_REFORMAT_TRAILER),
@@ -1710,7 +1715,7 @@ mlx5dr_action_create_reformat(struct mlx5dr_context *ctx,
if (!mlx5dr_action_is_hws_flags(flags) ||
((flags & MLX5DR_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1))) {
- DR_LOG(ERR, "Reformat flags don't fit HWS (flags: %x0x)", flags);
+ DR_LOG(ERR, "Reformat flags don't fit HWS (flags: 0x%x)", flags);
rte_errno = EINVAL;
goto free_action;
}
@@ -2382,6 +2387,347 @@ mlx5dr_action_create_remove_header(struct mlx5dr_context *ctx,
return NULL;
}
+static void *
+mlx5dr_action_create_pop_ipv6_route_ext_mhdr1(struct mlx5dr_action *action)
+{
+ struct mlx5dr_action_mh_pattern pattern;
+ __be64 cmd[3] = {0};
+ uint16_t mod_id;
+
+ mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0);
+ if (!mod_id) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /*
+ * Backup ipv6_route_ext.next_hdr to ipv6_route_ext.seg_left.
+ * Next_hdr will be copied to ipv6.protocol after pop done.
+ */
+ MLX5_SET(copy_action_in, &cmd[0], action_type, MLX5_MODIFICATION_TYPE_COPY);
+ MLX5_SET(copy_action_in, &cmd[0], length, 8);
+ MLX5_SET(copy_action_in, &cmd[0], src_offset, 24);
+ MLX5_SET(copy_action_in, &cmd[0], src_field, mod_id);
+ MLX5_SET(copy_action_in, &cmd[0], dst_field, mod_id);
+
+ /* Add nop between the continuous same modify field id */
+ MLX5_SET(copy_action_in, &cmd[1], action_type, MLX5_MODIFICATION_TYPE_NOP);
+
+ /* Clear next_hdr for right checksum */
+ MLX5_SET(set_action_in, &cmd[2], action_type, MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, &cmd[2], length, 8);
+ MLX5_SET(set_action_in, &cmd[2], offset, 24);
+ MLX5_SET(set_action_in, &cmd[2], field, mod_id);
+
+ pattern.data = cmd;
+ pattern.sz = sizeof(cmd);
+
+ return mlx5dr_action_create_modify_header(action->ctx, 1, &pattern,
+ 0, action->flags);
+}
+
+static void *
+mlx5dr_action_create_pop_ipv6_route_ext_mhdr2(struct mlx5dr_action *action)
+{
+ enum mlx5_modification_field field[MLX5_ST_SZ_DW(definer_hl_ipv6_addr)] = {
+ MLX5_MODI_OUT_DIPV6_127_96,
+ MLX5_MODI_OUT_DIPV6_95_64,
+ MLX5_MODI_OUT_DIPV6_63_32,
+ MLX5_MODI_OUT_DIPV6_31_0
+ };
+ struct mlx5dr_action_mh_pattern pattern;
+ __be64 cmd[5] = {0};
+ uint16_t mod_id;
+ uint32_t i;
+
+ /* Copy ipv6_route_ext[first_segment].dst_addr by flex parser to ipv6.dst_addr */
+ for (i = 0; i < MLX5_ST_SZ_DW(definer_hl_ipv6_addr); i++) {
+ mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, i + 1);
+ if (!mod_id) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ MLX5_SET(copy_action_in, &cmd[i], action_type, MLX5_MODIFICATION_TYPE_COPY);
+ MLX5_SET(copy_action_in, &cmd[i], dst_field, field[i]);
+ MLX5_SET(copy_action_in, &cmd[i], src_field, mod_id);
+ }
+
+ mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0);
+ if (!mod_id) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* Restore next_hdr from seg_left for flex parser identifying */
+ MLX5_SET(copy_action_in, &cmd[4], action_type, MLX5_MODIFICATION_TYPE_COPY);
+ MLX5_SET(copy_action_in, &cmd[4], length, 8);
+ MLX5_SET(copy_action_in, &cmd[4], dst_offset, 24);
+ MLX5_SET(copy_action_in, &cmd[4], src_field, mod_id);
+ MLX5_SET(copy_action_in, &cmd[4], dst_field, mod_id);
+
+ pattern.data = cmd;
+ pattern.sz = sizeof(cmd);
+
+ return mlx5dr_action_create_modify_header(action->ctx, 1, &pattern,
+ 0, action->flags);
+}
+
+static void *
+mlx5dr_action_create_pop_ipv6_route_ext_mhdr3(struct mlx5dr_action *action)
+{
+ uint8_t cmd[MLX5DR_MODIFY_ACTION_SIZE] = {0};
+ struct mlx5dr_action_mh_pattern pattern;
+ uint16_t mod_id;
+
+ mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0);
+ if (!mod_id) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* Copy ipv6_route_ext.next_hdr to ipv6.protocol */
+ MLX5_SET(copy_action_in, cmd, action_type, MLX5_MODIFICATION_TYPE_COPY);
+ MLX5_SET(copy_action_in, cmd, length, 8);
+ MLX5_SET(copy_action_in, cmd, src_offset, 24);
+ MLX5_SET(copy_action_in, cmd, src_field, mod_id);
+ MLX5_SET(copy_action_in, cmd, dst_field, MLX5_MODI_OUT_IPV6_NEXT_HDR);
+
+ pattern.data = (__be64 *)cmd;
+ pattern.sz = sizeof(cmd);
+
+ return mlx5dr_action_create_modify_header(action->ctx, 1, &pattern,
+ 0, action->flags);
+}
+
+static int
+mlx5dr_action_create_pop_ipv6_route_ext(struct mlx5dr_action *action)
+{
+ uint8_t anchor_id = flow_hw_get_ipv6_route_ext_anchor_from_ctx(action->ctx);
+ struct mlx5dr_action_remove_header_attr hdr_attr;
+ uint32_t i;
+
+ if (!anchor_id) {
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+
+ action->ipv6_route_ext.action[0] =
+ mlx5dr_action_create_pop_ipv6_route_ext_mhdr1(action);
+ action->ipv6_route_ext.action[1] =
+ mlx5dr_action_create_pop_ipv6_route_ext_mhdr2(action);
+ action->ipv6_route_ext.action[2] =
+ mlx5dr_action_create_pop_ipv6_route_ext_mhdr3(action);
+
+ hdr_attr.by_anchor.decap = 1;
+ hdr_attr.by_anchor.start_anchor = anchor_id;
+ hdr_attr.by_anchor.end_anchor = MLX5_HEADER_ANCHOR_TCP_UDP;
+ hdr_attr.type = MLX5DR_ACTION_REMOVE_HEADER_TYPE_BY_HEADER;
+ action->ipv6_route_ext.action[3] =
+ mlx5dr_action_create_remove_header(action->ctx, &hdr_attr, action->flags);
+
+ if (!action->ipv6_route_ext.action[0] || !action->ipv6_route_ext.action[1] ||
+ !action->ipv6_route_ext.action[2] || !action->ipv6_route_ext.action[3]) {
+ DR_LOG(ERR, "Failed to create ipv6_route_ext pop subaction");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < MLX5DR_ACTION_IPV6_EXT_MAX_SA; i++)
+ if (action->ipv6_route_ext.action[i])
+ mlx5dr_action_destroy(action->ipv6_route_ext.action[i]);
+
+ return rte_errno;
+}
+
+static void *
+mlx5dr_action_create_push_ipv6_route_ext_mhdr1(struct mlx5dr_action *action)
+{
+ uint8_t cmd[MLX5DR_MODIFY_ACTION_SIZE] = {0};
+ struct mlx5dr_action_mh_pattern pattern;
+
+ /* Set ipv6.protocol to IPPROTO_ROUTING */
+ MLX5_SET(set_action_in, cmd, action_type, MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, cmd, length, 8);
+ MLX5_SET(set_action_in, cmd, field, MLX5_MODI_OUT_IPV6_NEXT_HDR);
+ MLX5_SET(set_action_in, cmd, data, IPPROTO_ROUTING);
+
+ pattern.data = (__be64 *)cmd;
+ pattern.sz = sizeof(cmd);
+
+ return mlx5dr_action_create_modify_header(action->ctx, 1, &pattern, 0,
+ action->flags | MLX5DR_ACTION_FLAG_SHARED);
+}
+
+static void *
+mlx5dr_action_create_push_ipv6_route_ext_mhdr2(struct mlx5dr_action *action,
+ uint32_t bulk_size,
+ uint8_t *data)
+{
+ enum mlx5_modification_field field[MLX5_ST_SZ_DW(definer_hl_ipv6_addr)] = {
+ MLX5_MODI_OUT_DIPV6_127_96,
+ MLX5_MODI_OUT_DIPV6_95_64,
+ MLX5_MODI_OUT_DIPV6_63_32,
+ MLX5_MODI_OUT_DIPV6_31_0
+ };
+ struct mlx5dr_action_mh_pattern pattern;
+ uint32_t *ipv6_dst_addr = NULL;
+ uint8_t seg_left, next_hdr;
+ __be64 cmd[5] = {0};
+ uint16_t mod_id;
+ uint32_t i;
+
+ /* Fetch the last IPv6 address in the segment list */
+ if (action->flags & MLX5DR_ACTION_FLAG_SHARED) {
+ seg_left = MLX5_GET(header_ipv6_routing_ext, data, segments_left) - 1;
+ ipv6_dst_addr = (uint32_t *)data + MLX5_ST_SZ_DW(header_ipv6_routing_ext) +
+ seg_left * MLX5_ST_SZ_DW(definer_hl_ipv6_addr);
+ }
+
+ /* Copy IPv6 destination address from ipv6_route_ext.last_segment */
+ for (i = 0; i < MLX5_ST_SZ_DW(definer_hl_ipv6_addr); i++) {
+ MLX5_SET(set_action_in, &cmd[i], action_type, MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, &cmd[i], field, field[i]);
+ if (action->flags & MLX5DR_ACTION_FLAG_SHARED)
+ MLX5_SET(set_action_in, &cmd[i], data, be32toh(*ipv6_dst_addr++));
+ }
+
+ mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0);
+ if (!mod_id) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* Set ipv6_route_ext.next_hdr since initially pushed as 0 for right checksum */
+ MLX5_SET(set_action_in, &cmd[4], action_type, MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, &cmd[4], length, 8);
+ MLX5_SET(set_action_in, &cmd[4], offset, 24);
+ MLX5_SET(set_action_in, &cmd[4], field, mod_id);
+ if (action->flags & MLX5DR_ACTION_FLAG_SHARED) {
+ next_hdr = MLX5_GET(header_ipv6_routing_ext, data, next_hdr);
+ MLX5_SET(set_action_in, &cmd[4], data, next_hdr);
+ }
+
+ pattern.data = cmd;
+ pattern.sz = sizeof(cmd);
+
+ return mlx5dr_action_create_modify_header(action->ctx, 1, &pattern,
+ bulk_size, action->flags);
+}
+
+static int
+mlx5dr_action_create_push_ipv6_route_ext(struct mlx5dr_action *action,
+ struct mlx5dr_action_reformat_header *hdr,
+ uint32_t bulk_size)
+{
+ struct mlx5dr_action_insert_header insert_hdr = { {0} };
+ uint8_t header[MLX5_PUSH_MAX_LEN];
+ uint32_t i;
+
+ if (!hdr || !hdr->sz || hdr->sz > MLX5_PUSH_MAX_LEN ||
+ ((action->flags & MLX5DR_ACTION_FLAG_SHARED) && !hdr->data)) {
+ DR_LOG(ERR, "Invalid ipv6_route_ext header");
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+
+ if (action->flags & MLX5DR_ACTION_FLAG_SHARED) {
+ memcpy(header, hdr->data, hdr->sz);
+ /* Clear ipv6_route_ext.next_hdr for right checksum */
+ MLX5_SET(header_ipv6_routing_ext, header, next_hdr, 0);
+ }
+
+ insert_hdr.anchor = MLX5_HEADER_ANCHOR_TCP_UDP;
+ insert_hdr.encap = 1;
+ insert_hdr.hdr.sz = hdr->sz;
+ insert_hdr.hdr.data = header;
+ action->ipv6_route_ext.action[0] =
+ mlx5dr_action_create_insert_header(action->ctx, 1, &insert_hdr,
+ bulk_size, action->flags);
+ action->ipv6_route_ext.action[1] =
+ mlx5dr_action_create_push_ipv6_route_ext_mhdr1(action);
+ action->ipv6_route_ext.action[2] =
+ mlx5dr_action_create_push_ipv6_route_ext_mhdr2(action, bulk_size, hdr->data);
+
+ if (!action->ipv6_route_ext.action[0] ||
+ !action->ipv6_route_ext.action[1] ||
+ !action->ipv6_route_ext.action[2]) {
+ DR_LOG(ERR, "Failed to create ipv6_route_ext push subaction");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < MLX5DR_ACTION_IPV6_EXT_MAX_SA; i++)
+ if (action->ipv6_route_ext.action[i])
+ mlx5dr_action_destroy(action->ipv6_route_ext.action[i]);
+
+ return rte_errno;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_reformat_ipv6_ext(struct mlx5dr_context *ctx,
+ enum mlx5dr_action_type action_type,
+ struct mlx5dr_action_reformat_header *hdr,
+ uint32_t log_bulk_size,
+ uint32_t flags)
+{
+ struct mlx5dr_action *action;
+ int ret;
+
+ if (mlx5dr_context_cap_dynamic_reparse(ctx)) {
+ DR_LOG(ERR, "IPv6 extension actions is not supported");
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+
+ if (!mlx5dr_action_is_hws_flags(flags) ||
+ ((flags & MLX5DR_ACTION_FLAG_SHARED) && log_bulk_size)) {
+ DR_LOG(ERR, "IPv6 extension flags don't fit HWS (flags: 0x%x)", flags);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ action = mlx5dr_action_create_generic(ctx, flags, action_type);
+ if (!action) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ switch (action_type) {
+ case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT:
+ if (!(flags & MLX5DR_ACTION_FLAG_SHARED)) {
+ DR_LOG(ERR, "Pop ipv6_route_ext must be shared");
+ rte_errno = EINVAL;
+ goto free_action;
+ }
+
+ ret = mlx5dr_action_create_pop_ipv6_route_ext(action);
+ break;
+ case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT:
+ ret = mlx5dr_action_create_push_ipv6_route_ext(action, hdr, log_bulk_size);
+ break;
+ default:
+ DR_LOG(ERR, "Unsupported action type %d\n", action_type);
+ rte_errno = ENOTSUP;
+ goto free_action;
+ }
+
+ if (ret) {
+ DR_LOG(ERR, "Failed to create IPv6 extension reformat action");
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ simple_free(action);
+ return NULL;
+}
+
static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
{
struct mlx5dr_devx_obj *obj = NULL;
@@ -2455,6 +2801,12 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
mlx5dr_action_destroy_stcs(&action[i]);
mlx5dr_cmd_destroy_obj(action->reformat.arg_obj);
break;
+ case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT:
+ case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT:
+ for (i = 0; i < MLX5DR_ACTION_IPV6_EXT_MAX_SA; i++)
+ if (action->ipv6_route_ext.action[i])
+ mlx5dr_action_destroy(action->ipv6_route_ext.action[i]);
+ break;
}
}
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index e56f5b59c7..d0152dde3b 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -8,6 +8,9 @@
/* Max number of STEs needed for a rule (including match) */
#define MLX5DR_ACTION_MAX_STE 10
+/* Max number of internal subactions of ipv6_ext */
+#define MLX5DR_ACTION_IPV6_EXT_MAX_SA 4
+
enum mlx5dr_action_stc_idx {
MLX5DR_ACTION_STC_IDX_CTRL = 0,
MLX5DR_ACTION_STC_IDX_HIT = 1,
@@ -143,6 +146,10 @@ struct mlx5dr_action {
uint8_t offset;
bool encap;
} reformat;
+ struct {
+ struct mlx5dr_action
+ *action[MLX5DR_ACTION_IPV6_EXT_MAX_SA];
+ } ipv6_route_ext;
struct {
struct mlx5dr_devx_obj *devx_obj;
uint8_t return_reg_id;
diff --git a/drivers/net/mlx5/hws/mlx5dr_debug.c b/drivers/net/mlx5/hws/mlx5dr_debug.c
index 5111f41648..1e5ef9cf67 100644
--- a/drivers/net/mlx5/hws/mlx5dr_debug.c
+++ b/drivers/net/mlx5/hws/mlx5dr_debug.c
@@ -31,6 +31,8 @@ const char *mlx5dr_debug_action_type_str[] = {
[MLX5DR_ACTION_TYP_CRYPTO_DECRYPT] = "CRYPTO_DECRYPT",
[MLX5DR_ACTION_TYP_INSERT_HEADER] = "INSERT_HEADER",
[MLX5DR_ACTION_TYP_REMOVE_HEADER] = "REMOVE_HEADER",
+ [MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT] = "POP_IPV6_ROUTE_EXT",
+ [MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT] = "PUSH_IPV6_ROUTE_EXT",
};
static_assert(ARRAY_SIZE(mlx5dr_debug_action_type_str) == MLX5DR_ACTION_TYP_MAX,
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index ddb3b7b6fd..8174c03d50 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -589,6 +589,7 @@ struct mlx5_flow_dv_matcher {
struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
};
+#define MLX5_PUSH_MAX_LEN 128
#define MLX5_ENCAP_MAX_LEN 132
/* Encap/decap resource structure. */
@@ -2872,6 +2873,49 @@ flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
#endif
return UINT32_MAX;
}
+
+static __rte_always_inline uint8_t
+flow_hw_get_ipv6_route_ext_anchor_from_ctx(void *dr_ctx)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ uint16_t port;
+ struct mlx5_priv *priv;
+
+ MLX5_ETH_FOREACH_DEV(port, NULL) {
+ priv = rte_eth_devices[port].data->dev_private;
+ if (priv->dr_ctx == dr_ctx)
+ return priv->sh->srh_flex_parser.flex.devx_fp->anchor_id;
+ }
+#else
+ RTE_SET_USED(dr_ctx);
+#endif
+ return 0;
+}
+
+static __rte_always_inline uint16_t
+flow_hw_get_ipv6_route_ext_mod_id_from_ctx(void *dr_ctx, uint8_t idx)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ uint16_t port;
+ struct mlx5_priv *priv;
+ struct mlx5_flex_parser_devx *fp;
+
+ if (idx >= MLX5_GRAPH_NODE_SAMPLE_NUM || idx >= MLX5_SRV6_SAMPLE_NUM)
+ return 0;
+ MLX5_ETH_FOREACH_DEV(port, NULL) {
+ priv = rte_eth_devices[port].data->dev_private;
+ if (priv->dr_ctx == dr_ctx) {
+ fp = priv->sh->srh_flex_parser.flex.devx_fp;
+ return fp->sample_info[idx].modify_field_id;
+ }
+ }
+#else
+ RTE_SET_USED(dr_ctx);
+ RTE_SET_USED(idx);
+#endif
+ return 0;
+}
+
void
mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
void
--
2.39.2
next prev parent reply other threads:[~2023-10-29 16:36 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-29 16:31 [PATCH 01/30] net/mlx5/hws: Definer, add mlx5dr context to definer_conv_data Gregory Etelson
2023-10-29 16:31 ` [PATCH 02/30] net/mlx5: add flow_hw_get_reg_id_from_ctx() Gregory Etelson
2023-10-29 16:31 ` [PATCH 03/30] net/mlx5/hws: Definer, use flow_hw_get_reg_id_from_ctx function call Gregory Etelson
2023-10-29 16:31 ` [PATCH 04/30] net/mlx5: add rte_device parameter to locate HWS registers Gregory Etelson
2023-11-05 20:27 ` Thomas Monjalon
2023-10-29 16:31 ` [PATCH 05/30] net/mlx5: separate port REG_C registers usage Gregory Etelson
2023-10-29 16:31 ` [PATCH 06/30] net/mlx5: merge REG_C aliases Gregory Etelson
2023-10-29 16:31 ` [PATCH 07/30] net/mlx5: initialize HWS flow tags registers in shared dev context Gregory Etelson
2023-10-29 16:31 ` [PATCH 08/30] net/mlx5/hws: adding method to query rule hash Gregory Etelson
2023-10-29 16:31 ` [PATCH 09/30] net/mlx5: add support for calc hash Gregory Etelson
2023-10-29 16:31 ` [PATCH 10/30] net/mlx5: fix insert by index Gregory Etelson
2023-10-29 16:31 ` [PATCH 11/30] net/mlx5: fix query for NIC flow cap Gregory Etelson
2023-10-29 16:31 ` [PATCH 12/30] net/mlx5: add support for more registers Gregory Etelson
2023-10-29 16:31 ` [PATCH 13/30] net/mlx5: add validation support for tags Gregory Etelson
2023-10-29 16:31 ` [PATCH 14/30] net/mlx5: reuse reformat and modify header actions in a table Gregory Etelson
2023-10-29 16:31 ` [PATCH 15/30] net/mlx5/hws: check the rule status on rule update Gregory Etelson
2023-10-29 16:31 ` [PATCH 16/30] net/mlx5/hws: support IPsec encryption/decryption action Gregory Etelson
2023-10-29 16:31 ` [PATCH 17/30] net/mlx5/hws: support ASO IPsec action Gregory Etelson
2023-10-29 16:31 ` [PATCH 18/30] net/mlx5/hws: support reformat trailer action Gregory Etelson
2023-10-29 16:31 ` [PATCH 19/30] net/mlx5/hws: support ASO first hit action Gregory Etelson
2023-10-29 16:31 ` [PATCH 20/30] net/mlx5/hws: support insert header action Gregory Etelson
2023-10-29 16:31 ` [PATCH 21/30] net/mlx5/hws: support remove " Gregory Etelson
2023-10-29 16:31 ` [PATCH 22/30] net/mlx5/hws: allow jump to TIR over FDB Gregory Etelson
2023-10-29 16:31 ` [PATCH 23/30] net/mlx5/hws: support dynamic re-parse Gregory Etelson
2023-10-29 16:31 ` [PATCH 24/30] net/mlx5/hws: dynamic re-parse for modify header Gregory Etelson
2023-10-29 16:31 ` [PATCH 25/30] net/mlx5: sample the srv6 last segment Gregory Etelson
2023-10-29 16:31 ` [PATCH 26/30] net/mlx5/hws: fix potential wrong errno value Gregory Etelson
2023-10-29 16:31 ` Gregory Etelson [this message]
2023-10-29 16:32 ` [PATCH 28/30] net/mlx5/hws: add setter for IPv6 routing push remove Gregory Etelson
2023-10-29 16:32 ` [PATCH 29/30] net/mlx5: implement " Gregory Etelson
2023-10-29 16:32 ` [PATCH 30/30] net/mlx5/hws: add stc reparse support for srv6 push pop Gregory Etelson
2023-11-05 18:49 ` [PATCH 01/30] net/mlx5/hws: Definer, add mlx5dr context to definer_conv_data Thomas Monjalon
2023-11-06 7:32 ` Etelson, Gregory
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231029163202.216450-27-getelson@nvidia.com \
--to=getelson@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=mkashani@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=rongweil@nvidia.com \
--cc=suanmingm@nvidia.com \
--cc=valex@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).