DPDK patches and discussions
 help / color / mirror / Atom feed
From: Rongwei Liu <rongweil@nvidia.com>
To: <dev@dpdk.org>, <matan@nvidia.com>, <viacheslavo@nvidia.com>,
	<orika@nvidia.com>, <suanmingm@nvidia.com>, <thomas@monjalon.net>
Cc: Alex Vesker <valex@nvidia.com>
Subject: [PATCH v3 3/6] net/mlx5/hws: add IPv6 routing extension push remove actions
Date: Tue, 31 Oct 2023 12:51:28 +0200	[thread overview]
Message-ID: <20231031105131.441078-4-rongweil@nvidia.com> (raw)
In-Reply-To: <20231031105131.441078-1-rongweil@nvidia.com>

Add two dr_actions to implement IPv6 routing extension push and
remove, the new actions are multiple actions combination instead
of new types.

Basically, there are two modify headers plus one reformat action.
Action order is the same as encap and decap actions.

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Reviewed-by: Alex Vesker <valex@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
---
 drivers/common/mlx5/mlx5_prm.h       |   1 +
 drivers/net/mlx5/hws/mlx5dr.h        |  29 +++
 drivers/net/mlx5/hws/mlx5dr_action.c | 358 ++++++++++++++++++++++++++-
 drivers/net/mlx5/hws/mlx5dr_action.h |   7 +
 drivers/net/mlx5/hws/mlx5dr_debug.c  |   2 +
 drivers/net/mlx5/mlx5_flow.h         |  44 ++++
 6 files changed, 438 insertions(+), 3 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index a5ecce98e9..32ec3df7ef 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -3586,6 +3586,7 @@ enum mlx5_ifc_header_anchors {
 	MLX5_HEADER_ANCHOR_PACKET_START = 0x0,
 	MLX5_HEADER_ANCHOR_FIRST_VLAN_START = 0x2,
 	MLX5_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+	MLX5_HEADER_ANCHOR_TCP_UDP = 0x09,
 	MLX5_HEADER_ANCHOR_INNER_MAC = 0x13,
 	MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
 };
diff --git a/drivers/net/mlx5/hws/mlx5dr.h b/drivers/net/mlx5/hws/mlx5dr.h
index 2e692f76c3..9e7dd9c429 100644
--- a/drivers/net/mlx5/hws/mlx5dr.h
+++ b/drivers/net/mlx5/hws/mlx5dr.h
@@ -54,6 +54,8 @@ enum mlx5dr_action_type {
 	MLX5DR_ACTION_TYP_REMOVE_HEADER,
 	MLX5DR_ACTION_TYP_DEST_ROOT,
 	MLX5DR_ACTION_TYP_DEST_ARRAY,
+	MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT,
+	MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT,
 	MLX5DR_ACTION_TYP_MAX,
 };
 
@@ -278,6 +280,11 @@ struct mlx5dr_rule_action {
 			uint8_t *data;
 		} reformat;
 
+		struct {
+			uint32_t offset;
+			uint8_t *header;
+		} ipv6_ext;
+
 		struct {
 			rte_be32_t vlan_hdr;
 		} push_vlan;
@@ -889,6 +896,28 @@ mlx5dr_action_create_remove_header(struct mlx5dr_context *ctx,
 				   struct mlx5dr_action_remove_header_attr *attr,
 				   uint32_t flags);
 
+/* Create action to push or remove IPv6 extension header.
+ *
+ * @param[in] ctx
+ *	The context in which the new action will be created.
+ * @param[in] type
+ *	Type of direct rule action: MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT or
+ *	MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT.
+ * @param[in] hdr
+ *	Header for packet reformat.
+ * @param[in] log_bulk_size
+ *	Number of unique values used with this pattern.
+ * @param[in] flags
+ *	Action creation flags. (enum mlx5dr_action_flags)
+ * @return pointer to mlx5dr_action on success NULL otherwise.
+ */
+struct mlx5dr_action *
+mlx5dr_action_create_reformat_ipv6_ext(struct mlx5dr_context *ctx,
+				       enum mlx5dr_action_type type,
+				       struct mlx5dr_action_reformat_header *hdr,
+				       uint32_t log_bulk_size,
+				       uint32_t flags);
+
 /* Destroy direct rule action.
  *
  * @param[in] action
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index 76ca57d302..6ac3c2f782 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -26,7 +26,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
 		BIT(MLX5DR_ACTION_TYP_REFORMAT_TRAILER),
 		BIT(MLX5DR_ACTION_TYP_REMOVE_HEADER) |
 		BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
-		BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
+		BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2) |
+		BIT(MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT),
 		BIT(MLX5DR_ACTION_TYP_POP_VLAN),
 		BIT(MLX5DR_ACTION_TYP_POP_VLAN),
 		BIT(MLX5DR_ACTION_TYP_CTR),
@@ -39,6 +40,7 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
 		BIT(MLX5DR_ACTION_TYP_PUSH_VLAN),
 		BIT(MLX5DR_ACTION_TYP_MODIFY_HDR),
 		BIT(MLX5DR_ACTION_TYP_INSERT_HEADER) |
+		BIT(MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) |
 		BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
 		BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
 		BIT(MLX5DR_ACTION_TYP_TBL) |
@@ -61,6 +63,7 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
 		BIT(MLX5DR_ACTION_TYP_PUSH_VLAN),
 		BIT(MLX5DR_ACTION_TYP_MODIFY_HDR),
 		BIT(MLX5DR_ACTION_TYP_INSERT_HEADER) |
+		BIT(MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) |
 		BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
 		BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
 		BIT(MLX5DR_ACTION_TYP_REFORMAT_TRAILER),
@@ -75,7 +78,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
 		BIT(MLX5DR_ACTION_TYP_REFORMAT_TRAILER),
 		BIT(MLX5DR_ACTION_TYP_REMOVE_HEADER) |
 		BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
-		BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
+		BIT(MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2) |
+		BIT(MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT),
 		BIT(MLX5DR_ACTION_TYP_POP_VLAN),
 		BIT(MLX5DR_ACTION_TYP_POP_VLAN),
 		BIT(MLX5DR_ACTION_TYP_CTR),
@@ -88,6 +92,7 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
 		BIT(MLX5DR_ACTION_TYP_PUSH_VLAN),
 		BIT(MLX5DR_ACTION_TYP_MODIFY_HDR),
 		BIT(MLX5DR_ACTION_TYP_INSERT_HEADER) |
+		BIT(MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) |
 		BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
 		BIT(MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
 		BIT(MLX5DR_ACTION_TYP_REFORMAT_TRAILER),
@@ -1710,7 +1715,7 @@ mlx5dr_action_create_reformat(struct mlx5dr_context *ctx,
 
 	if (!mlx5dr_action_is_hws_flags(flags) ||
 	    ((flags & MLX5DR_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1))) {
-		DR_LOG(ERR, "Reformat flags don't fit HWS (flags: %x0x)", flags);
+		DR_LOG(ERR, "Reformat flags don't fit HWS (flags: 0x%x)", flags);
 		rte_errno = EINVAL;
 		goto free_action;
 	}
@@ -2382,6 +2387,347 @@ mlx5dr_action_create_remove_header(struct mlx5dr_context *ctx,
 	return NULL;
 }
 
+static void *
+mlx5dr_action_create_pop_ipv6_route_ext_mhdr1(struct mlx5dr_action *action)
+{
+	struct mlx5dr_action_mh_pattern pattern;
+	__be64 cmd[3] = {0};
+	uint16_t mod_id;
+
+	mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0);
+	if (!mod_id) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	/*
+	 * Backup ipv6_route_ext.next_hdr to ipv6_route_ext.seg_left.
+	 * Next_hdr will be copied to ipv6.protocol after pop done.
+	 */
+	MLX5_SET(copy_action_in, &cmd[0], action_type, MLX5_MODIFICATION_TYPE_COPY);
+	MLX5_SET(copy_action_in, &cmd[0], length, 8);
+	MLX5_SET(copy_action_in, &cmd[0], src_offset, 24);
+	MLX5_SET(copy_action_in, &cmd[0], src_field, mod_id);
+	MLX5_SET(copy_action_in, &cmd[0], dst_field, mod_id);
+
+	/* Add nop between the continuous same modify field id */
+	MLX5_SET(copy_action_in, &cmd[1], action_type, MLX5_MODIFICATION_TYPE_NOP);
+
+	/* Clear next_hdr for right checksum */
+	MLX5_SET(set_action_in, &cmd[2], action_type, MLX5_MODIFICATION_TYPE_SET);
+	MLX5_SET(set_action_in, &cmd[2], length, 8);
+	MLX5_SET(set_action_in, &cmd[2], offset, 24);
+	MLX5_SET(set_action_in, &cmd[2], field, mod_id);
+
+	pattern.data = cmd;
+	pattern.sz = sizeof(cmd);
+
+	return mlx5dr_action_create_modify_header(action->ctx, 1, &pattern,
+						  0, action->flags);
+}
+
+static void *
+mlx5dr_action_create_pop_ipv6_route_ext_mhdr2(struct mlx5dr_action *action)
+{
+	enum mlx5_modification_field field[MLX5_ST_SZ_DW(definer_hl_ipv6_addr)] = {
+		MLX5_MODI_OUT_DIPV6_127_96,
+		MLX5_MODI_OUT_DIPV6_95_64,
+		MLX5_MODI_OUT_DIPV6_63_32,
+		MLX5_MODI_OUT_DIPV6_31_0
+	};
+	struct mlx5dr_action_mh_pattern pattern;
+	__be64 cmd[5] = {0};
+	uint16_t mod_id;
+	uint32_t i;
+
+	/* Copy ipv6_route_ext[first_segment].dst_addr by flex parser to ipv6.dst_addr */
+	for (i = 0; i < MLX5_ST_SZ_DW(definer_hl_ipv6_addr); i++) {
+		mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, i + 1);
+		if (!mod_id) {
+			rte_errno = EINVAL;
+			return NULL;
+		}
+
+		MLX5_SET(copy_action_in, &cmd[i], action_type, MLX5_MODIFICATION_TYPE_COPY);
+		MLX5_SET(copy_action_in, &cmd[i], dst_field, field[i]);
+		MLX5_SET(copy_action_in, &cmd[i], src_field, mod_id);
+	}
+
+	mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0);
+	if (!mod_id) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	/* Restore next_hdr from seg_left for flex parser identifying */
+	MLX5_SET(copy_action_in, &cmd[4], action_type, MLX5_MODIFICATION_TYPE_COPY);
+	MLX5_SET(copy_action_in, &cmd[4], length, 8);
+	MLX5_SET(copy_action_in, &cmd[4], dst_offset, 24);
+	MLX5_SET(copy_action_in, &cmd[4], src_field, mod_id);
+	MLX5_SET(copy_action_in, &cmd[4], dst_field, mod_id);
+
+	pattern.data = cmd;
+	pattern.sz = sizeof(cmd);
+
+	return mlx5dr_action_create_modify_header(action->ctx, 1, &pattern,
+						  0, action->flags);
+}
+
+static void *
+mlx5dr_action_create_pop_ipv6_route_ext_mhdr3(struct mlx5dr_action *action)
+{
+	uint8_t cmd[MLX5DR_MODIFY_ACTION_SIZE] = {0};
+	struct mlx5dr_action_mh_pattern pattern;
+	uint16_t mod_id;
+
+	mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0);
+	if (!mod_id) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	/* Copy ipv6_route_ext.next_hdr to ipv6.protocol */
+	MLX5_SET(copy_action_in, cmd, action_type, MLX5_MODIFICATION_TYPE_COPY);
+	MLX5_SET(copy_action_in, cmd, length, 8);
+	MLX5_SET(copy_action_in, cmd, src_offset, 24);
+	MLX5_SET(copy_action_in, cmd, src_field, mod_id);
+	MLX5_SET(copy_action_in, cmd, dst_field, MLX5_MODI_OUT_IPV6_NEXT_HDR);
+
+	pattern.data = (__be64 *)cmd;
+	pattern.sz = sizeof(cmd);
+
+	return mlx5dr_action_create_modify_header(action->ctx, 1, &pattern,
+						  0, action->flags);
+}
+
+static int
+mlx5dr_action_create_pop_ipv6_route_ext(struct mlx5dr_action *action)
+{
+	uint8_t anchor_id = flow_hw_get_ipv6_route_ext_anchor_from_ctx(action->ctx);
+	struct mlx5dr_action_remove_header_attr hdr_attr;
+	uint32_t i;
+
+	if (!anchor_id) {
+		rte_errno = EINVAL;
+		return rte_errno;
+	}
+
+	action->ipv6_route_ext.action[0] =
+		mlx5dr_action_create_pop_ipv6_route_ext_mhdr1(action);
+	action->ipv6_route_ext.action[1] =
+		mlx5dr_action_create_pop_ipv6_route_ext_mhdr2(action);
+	action->ipv6_route_ext.action[2] =
+		mlx5dr_action_create_pop_ipv6_route_ext_mhdr3(action);
+
+	hdr_attr.by_anchor.decap = 1;
+	hdr_attr.by_anchor.start_anchor = anchor_id;
+	hdr_attr.by_anchor.end_anchor = MLX5_HEADER_ANCHOR_TCP_UDP;
+	hdr_attr.type = MLX5DR_ACTION_REMOVE_HEADER_TYPE_BY_HEADER;
+	action->ipv6_route_ext.action[3] =
+		mlx5dr_action_create_remove_header(action->ctx, &hdr_attr, action->flags);
+
+	if (!action->ipv6_route_ext.action[0] || !action->ipv6_route_ext.action[1] ||
+	    !action->ipv6_route_ext.action[2] || !action->ipv6_route_ext.action[3]) {
+		DR_LOG(ERR, "Failed to create ipv6_route_ext pop subaction");
+		goto err;
+	}
+
+	return 0;
+
+err:
+	for (i = 0; i < MLX5DR_ACTION_IPV6_EXT_MAX_SA; i++)
+		if (action->ipv6_route_ext.action[i])
+			mlx5dr_action_destroy(action->ipv6_route_ext.action[i]);
+
+	return rte_errno;
+}
+
+static void *
+mlx5dr_action_create_push_ipv6_route_ext_mhdr1(struct mlx5dr_action *action)
+{
+	uint8_t cmd[MLX5DR_MODIFY_ACTION_SIZE] = {0};
+	struct mlx5dr_action_mh_pattern pattern;
+
+	/* Set ipv6.protocol to IPPROTO_ROUTING */
+	MLX5_SET(set_action_in, cmd, action_type, MLX5_MODIFICATION_TYPE_SET);
+	MLX5_SET(set_action_in, cmd, length, 8);
+	MLX5_SET(set_action_in, cmd, field, MLX5_MODI_OUT_IPV6_NEXT_HDR);
+	MLX5_SET(set_action_in, cmd, data, IPPROTO_ROUTING);
+
+	pattern.data = (__be64 *)cmd;
+	pattern.sz = sizeof(cmd);
+
+	return mlx5dr_action_create_modify_header(action->ctx, 1, &pattern, 0,
+						  action->flags | MLX5DR_ACTION_FLAG_SHARED);
+}
+
+static void *
+mlx5dr_action_create_push_ipv6_route_ext_mhdr2(struct mlx5dr_action *action,
+					       uint32_t bulk_size,
+					       uint8_t *data)
+{
+	enum mlx5_modification_field field[MLX5_ST_SZ_DW(definer_hl_ipv6_addr)] = {
+		MLX5_MODI_OUT_DIPV6_127_96,
+		MLX5_MODI_OUT_DIPV6_95_64,
+		MLX5_MODI_OUT_DIPV6_63_32,
+		MLX5_MODI_OUT_DIPV6_31_0
+	};
+	struct mlx5dr_action_mh_pattern pattern;
+	uint32_t *ipv6_dst_addr = NULL;
+	uint8_t seg_left, next_hdr;
+	__be64 cmd[5] = {0};
+	uint16_t mod_id;
+	uint32_t i;
+
+	/* Fetch the last IPv6 address in the segment list */
+	if (action->flags & MLX5DR_ACTION_FLAG_SHARED) {
+		seg_left = MLX5_GET(header_ipv6_routing_ext, data, segments_left) - 1;
+		ipv6_dst_addr = (uint32_t *)data + MLX5_ST_SZ_DW(header_ipv6_routing_ext) +
+				seg_left * MLX5_ST_SZ_DW(definer_hl_ipv6_addr);
+	}
+
+	/* Copy IPv6 destination address from ipv6_route_ext.last_segment */
+	for (i = 0; i < MLX5_ST_SZ_DW(definer_hl_ipv6_addr); i++) {
+		MLX5_SET(set_action_in, &cmd[i], action_type, MLX5_MODIFICATION_TYPE_SET);
+		MLX5_SET(set_action_in, &cmd[i], field, field[i]);
+		if (action->flags & MLX5DR_ACTION_FLAG_SHARED)
+			MLX5_SET(set_action_in, &cmd[i], data, be32toh(*ipv6_dst_addr++));
+	}
+
+	mod_id = flow_hw_get_ipv6_route_ext_mod_id_from_ctx(action->ctx, 0);
+	if (!mod_id) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	/* Set ipv6_route_ext.next_hdr since initially pushed as 0 for right checksum */
+	MLX5_SET(set_action_in, &cmd[4], action_type, MLX5_MODIFICATION_TYPE_SET);
+	MLX5_SET(set_action_in, &cmd[4], length, 8);
+	MLX5_SET(set_action_in, &cmd[4], offset, 24);
+	MLX5_SET(set_action_in, &cmd[4], field, mod_id);
+	if (action->flags & MLX5DR_ACTION_FLAG_SHARED) {
+		next_hdr = MLX5_GET(header_ipv6_routing_ext, data, next_hdr);
+		MLX5_SET(set_action_in, &cmd[4], data, next_hdr);
+	}
+
+	pattern.data = cmd;
+	pattern.sz = sizeof(cmd);
+
+	return mlx5dr_action_create_modify_header(action->ctx, 1, &pattern,
+						  bulk_size, action->flags);
+}
+
+static int
+mlx5dr_action_create_push_ipv6_route_ext(struct mlx5dr_action *action,
+					 struct mlx5dr_action_reformat_header *hdr,
+					 uint32_t bulk_size)
+{
+	struct mlx5dr_action_insert_header insert_hdr = { {0} };
+	uint8_t header[MLX5_PUSH_MAX_LEN];
+	uint32_t i;
+
+	if (!hdr || !hdr->sz || hdr->sz > MLX5_PUSH_MAX_LEN ||
+	    ((action->flags & MLX5DR_ACTION_FLAG_SHARED) && !hdr->data)) {
+		DR_LOG(ERR, "Invalid ipv6_route_ext header");
+		rte_errno = EINVAL;
+		return rte_errno;
+	}
+
+	if (action->flags & MLX5DR_ACTION_FLAG_SHARED) {
+		memcpy(header, hdr->data, hdr->sz);
+		/* Clear ipv6_route_ext.next_hdr for right checksum */
+		MLX5_SET(header_ipv6_routing_ext, header, next_hdr, 0);
+	}
+
+	insert_hdr.anchor = MLX5_HEADER_ANCHOR_TCP_UDP;
+	insert_hdr.encap = 1;
+	insert_hdr.hdr.sz = hdr->sz;
+	insert_hdr.hdr.data = header;
+	action->ipv6_route_ext.action[0] =
+		mlx5dr_action_create_insert_header(action->ctx, 1, &insert_hdr,
+						   bulk_size, action->flags);
+	action->ipv6_route_ext.action[1] =
+		mlx5dr_action_create_push_ipv6_route_ext_mhdr1(action);
+	action->ipv6_route_ext.action[2] =
+		mlx5dr_action_create_push_ipv6_route_ext_mhdr2(action, bulk_size, hdr->data);
+
+	if (!action->ipv6_route_ext.action[0] ||
+	    !action->ipv6_route_ext.action[1] ||
+	    !action->ipv6_route_ext.action[2]) {
+		DR_LOG(ERR, "Failed to create ipv6_route_ext push subaction");
+		goto err;
+	}
+
+	return 0;
+
+err:
+	for (i = 0; i < MLX5DR_ACTION_IPV6_EXT_MAX_SA; i++)
+		if (action->ipv6_route_ext.action[i])
+			mlx5dr_action_destroy(action->ipv6_route_ext.action[i]);
+
+	return rte_errno;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_reformat_ipv6_ext(struct mlx5dr_context *ctx,
+				       enum mlx5dr_action_type action_type,
+				       struct mlx5dr_action_reformat_header *hdr,
+				       uint32_t log_bulk_size,
+				       uint32_t flags)
+{
+	struct mlx5dr_action *action;
+	int ret;
+
+	if (mlx5dr_context_cap_dynamic_reparse(ctx)) {
+		DR_LOG(ERR, "IPv6 extension actions is not supported");
+		rte_errno = ENOTSUP;
+		return NULL;
+	}
+
+	if (!mlx5dr_action_is_hws_flags(flags) ||
+	    ((flags & MLX5DR_ACTION_FLAG_SHARED) && log_bulk_size)) {
+		DR_LOG(ERR, "IPv6 extension flags don't fit HWS (flags: 0x%x)", flags);
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	action = mlx5dr_action_create_generic(ctx, flags, action_type);
+	if (!action) {
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	switch (action_type) {
+	case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT:
+		if (!(flags & MLX5DR_ACTION_FLAG_SHARED)) {
+			DR_LOG(ERR, "Pop ipv6_route_ext must be shared");
+			rte_errno = EINVAL;
+			goto free_action;
+		}
+
+		ret = mlx5dr_action_create_pop_ipv6_route_ext(action);
+		break;
+	case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT:
+		ret = mlx5dr_action_create_push_ipv6_route_ext(action, hdr, log_bulk_size);
+		break;
+	default:
+		DR_LOG(ERR, "Unsupported action type %d\n", action_type);
+		rte_errno = ENOTSUP;
+		goto free_action;
+	}
+
+	if (ret) {
+		DR_LOG(ERR, "Failed to create IPv6 extension reformat action");
+		goto free_action;
+	}
+
+	return action;
+
+free_action:
+	simple_free(action);
+	return NULL;
+}
+
 static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
 {
 	struct mlx5dr_devx_obj *obj = NULL;
@@ -2455,6 +2801,12 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
 			mlx5dr_action_destroy_stcs(&action[i]);
 		mlx5dr_cmd_destroy_obj(action->reformat.arg_obj);
 		break;
+	case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT:
+	case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT:
+		for (i = 0; i < MLX5DR_ACTION_IPV6_EXT_MAX_SA; i++)
+			if (action->ipv6_route_ext.action[i])
+				mlx5dr_action_destroy(action->ipv6_route_ext.action[i]);
+		break;
 	}
 }
 
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index e56f5b59c7..d0152dde3b 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -8,6 +8,9 @@
 /* Max number of STEs needed for a rule (including match) */
 #define MLX5DR_ACTION_MAX_STE 10
 
+/* Max number of internal subactions of ipv6_ext */
+#define MLX5DR_ACTION_IPV6_EXT_MAX_SA 4
+
 enum mlx5dr_action_stc_idx {
 	MLX5DR_ACTION_STC_IDX_CTRL = 0,
 	MLX5DR_ACTION_STC_IDX_HIT = 1,
@@ -143,6 +146,10 @@ struct mlx5dr_action {
 					uint8_t offset;
 					bool encap;
 				} reformat;
+				struct {
+					struct mlx5dr_action
+						*action[MLX5DR_ACTION_IPV6_EXT_MAX_SA];
+				} ipv6_route_ext;
 				struct {
 					struct mlx5dr_devx_obj *devx_obj;
 					uint8_t return_reg_id;
diff --git a/drivers/net/mlx5/hws/mlx5dr_debug.c b/drivers/net/mlx5/hws/mlx5dr_debug.c
index 5111f41648..1e5ef9cf67 100644
--- a/drivers/net/mlx5/hws/mlx5dr_debug.c
+++ b/drivers/net/mlx5/hws/mlx5dr_debug.c
@@ -31,6 +31,8 @@ const char *mlx5dr_debug_action_type_str[] = {
 	[MLX5DR_ACTION_TYP_CRYPTO_DECRYPT] = "CRYPTO_DECRYPT",
 	[MLX5DR_ACTION_TYP_INSERT_HEADER] = "INSERT_HEADER",
 	[MLX5DR_ACTION_TYP_REMOVE_HEADER] = "REMOVE_HEADER",
+	[MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT] = "POP_IPV6_ROUTE_EXT",
+	[MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT] = "PUSH_IPV6_ROUTE_EXT",
 };
 
 static_assert(ARRAY_SIZE(mlx5dr_debug_action_type_str) == MLX5DR_ACTION_TYP_MAX,
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index e637c98b95..43608e15d2 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -595,6 +595,7 @@ struct mlx5_flow_dv_matcher {
 	struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
 };
 
+#define MLX5_PUSH_MAX_LEN 128
 #define MLX5_ENCAP_MAX_LEN 132
 
 /* Encap/decap resource structure. */
@@ -2898,6 +2899,49 @@ flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
 #endif
 	return UINT32_MAX;
 }
+
+static __rte_always_inline uint8_t
+flow_hw_get_ipv6_route_ext_anchor_from_ctx(void *dr_ctx)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	uint16_t port;
+	struct mlx5_priv *priv;
+
+	MLX5_ETH_FOREACH_DEV(port, NULL) {
+		priv = rte_eth_devices[port].data->dev_private;
+		if (priv->dr_ctx == dr_ctx)
+			return priv->sh->srh_flex_parser.flex.devx_fp->anchor_id;
+	}
+#else
+	RTE_SET_USED(dr_ctx);
+#endif
+	return 0;
+}
+
+static __rte_always_inline uint16_t
+flow_hw_get_ipv6_route_ext_mod_id_from_ctx(void *dr_ctx, uint8_t idx)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	uint16_t port;
+	struct mlx5_priv *priv;
+	struct mlx5_flex_parser_devx *fp;
+
+	if (idx >= MLX5_GRAPH_NODE_SAMPLE_NUM || idx >= MLX5_SRV6_SAMPLE_NUM)
+		return 0;
+	MLX5_ETH_FOREACH_DEV(port, NULL) {
+		priv = rte_eth_devices[port].data->dev_private;
+		if (priv->dr_ctx == dr_ctx) {
+			fp = priv->sh->srh_flex_parser.flex.devx_fp;
+			return fp->sample_info[idx].modify_field_id;
+		}
+	}
+#else
+	RTE_SET_USED(dr_ctx);
+	RTE_SET_USED(idx);
+#endif
+	return 0;
+}
+
 void
 mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
 void
-- 
2.27.0


  parent reply	other threads:[~2023-10-31 10:52 UTC|newest]

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-17  9:25 [PATCH v1 0/8] add IPv6 extension push remove Rongwei Liu
2023-04-17  9:25 ` [PATCH v1 1/8] ethdev: add IPv6 extension push remove action Rongwei Liu
2023-05-24  6:55   ` Ori Kam
2023-05-24  7:39     ` [PATCH v1 0/2] add IPv6 extension push remove Rongwei Liu
2023-05-24  7:39       ` [PATCH v1 1/2] ethdev: add IPv6 extension push remove action Rongwei Liu
2023-05-24 10:30         ` Ori Kam
2023-05-24  7:39       ` [PATCH v1 2/2] app/testpmd: add IPv6 extension push remove cli Rongwei Liu
2023-06-02 14:39       ` [PATCH v1 0/2] add IPv6 extension push remove Ferruh Yigit
2023-07-10  2:32         ` Rongwei Liu
2023-07-10  8:55           ` Ferruh Yigit
2023-07-10 14:41             ` Stephen Hemminger
2023-07-11  6:16               ` Thomas Monjalon
2023-09-19  8:12                 ` [PATCH v3] net/mlx5: add test for live migration Rongwei Liu
2023-10-16  8:19                   ` Thomas Monjalon
2023-10-16  8:25                     ` Rongwei Liu
2023-10-16  9:26                       ` Rongwei Liu
2023-10-16  9:26                       ` Thomas Monjalon
2023-10-16  9:29                         ` Rongwei Liu
2023-10-25  9:07                           ` Rongwei Liu
2023-10-16  9:22                     ` [PATCH v4] " Rongwei Liu
2023-10-25  9:36                       ` [PATCH v5] " Rongwei Liu
2023-10-25  9:41                         ` Thomas Monjalon
2023-10-25  9:45                           ` [PATCH v6] " Rongwei Liu
2023-10-25  9:48                           ` [PATCH v5] " Rongwei Liu
2023-10-25  9:50                           ` [PATCH v7] " Rongwei Liu
2023-10-25 13:10                             ` Thomas Monjalon
2023-10-26  8:15                             ` Raslan Darawsheh
2023-04-17  9:25 ` [PATCH v1 2/8] app/testpmd: add IPv6 extension push remove cli Rongwei Liu
2023-05-24  7:06   ` Ori Kam
2023-04-17  9:25 ` [PATCH v1 3/8] net/mlx5/hws: add no reparse support Rongwei Liu
2023-04-17  9:25 ` [PATCH v1 4/8] net/mlx5: sample the srv6 last segment Rongwei Liu
2023-10-31  9:42   ` [PATCH v2 0/6] support IPv6 extension push remove Rongwei Liu
2023-10-31  9:42     ` [PATCH v2 1/6] net/mlx5: sample the srv6 last segment Rongwei Liu
2023-10-31  9:42     ` [PATCH v2 2/6] net/mlx5/hws: fix potential wrong errno value Rongwei Liu
2023-10-31  9:42     ` [PATCH v2 3/6] net/mlx5/hws: add IPv6 routing extension push remove actions Rongwei Liu
2023-10-31  9:42     ` [PATCH v2 4/6] net/mlx5/hws: add setter for IPv6 routing push remove Rongwei Liu
2023-10-31  9:42     ` [PATCH v2 5/6] net/mlx5: implement " Rongwei Liu
2023-10-31  9:42     ` [PATCH v2 6/6] net/mlx5/hws: add stc reparse support for srv6 push pop Rongwei Liu
2023-10-31 10:51     ` [PATCH v3 0/6] support IPv6 extension push remove Rongwei Liu
2023-10-31 10:51       ` [PATCH v3 1/6] net/mlx5: sample the srv6 last segment Rongwei Liu
2023-10-31 10:51       ` [PATCH v3 2/6] net/mlx5/hws: fix potential wrong errno value Rongwei Liu
2023-10-31 10:51       ` Rongwei Liu [this message]
2023-10-31 10:51       ` [PATCH v3 4/6] net/mlx5/hws: add setter for IPv6 routing push remove Rongwei Liu
2023-10-31 10:51       ` [PATCH v3 5/6] net/mlx5: implement " Rongwei Liu
2023-10-31 10:51       ` [PATCH v3 6/6] net/mlx5/hws: add stc reparse support for srv6 push pop Rongwei Liu
2023-11-01  4:44       ` [PATCH v4 00/13] support IPv6 push remove action Rongwei Liu
2023-11-01  4:44         ` [PATCH v4 01/13] net/mlx5/hws: support insert header action Rongwei Liu
2023-11-01  4:44         ` [PATCH v4 02/13] net/mlx5/hws: support remove " Rongwei Liu
2023-11-01  4:44         ` [PATCH v4 03/13] net/mlx5/hws: allow jump to TIR over FDB Rongwei Liu
2023-11-01  4:44         ` [PATCH v4 04/13] net/mlx5/hws: support dynamic re-parse Rongwei Liu
2023-11-01  4:44         ` [PATCH v4 05/13] net/mlx5/hws: dynamic re-parse for modify header Rongwei Liu
2023-11-01  4:44         ` [PATCH v4 06/13] net/mlx5/hws: fix incorrect re-parse on complex rules Rongwei Liu
2023-11-01  4:44         ` [PATCH v4 07/13] net/mlx5: sample the srv6 last segment Rongwei Liu
2023-11-01  4:44         ` [PATCH v4 08/13] net/mlx5/hws: fix potential wrong rte_errno value Rongwei Liu
2023-11-01  4:44         ` [PATCH v4 09/13] net/mlx5/hws: add IPv6 routing extension push remove actions Rongwei Liu
2023-11-01  4:44         ` [PATCH v4 10/13] net/mlx5/hws: add setter for IPv6 routing push remove Rongwei Liu
2023-11-01  4:44         ` [PATCH v4 11/13] net/mlx5: implement " Rongwei Liu
2023-11-01  4:44         ` [PATCH v4 12/13] net/mlx5/hws: fix srv6 push compilation failure Rongwei Liu
2023-11-01  4:44         ` [PATCH v4 13/13] net/mlx5/hws: add stc reparse support for srv6 push pop Rongwei Liu
2023-11-02 13:44         ` [PATCH v4 00/13] support IPv6 push remove action Raslan Darawsheh
2023-04-17  9:25 ` [PATCH v1 5/8] net/mlx5: generate srv6 modify header resource Rongwei Liu
2023-04-17  9:25 ` [PATCH v1 6/8] net/mlx5/hws: add IPv6 routing extension push pop actions Rongwei Liu
2023-04-17  9:25 ` [PATCH v1 7/8] net/mlx5/hws: add setter for IPv6 routing push pop Rongwei Liu
2023-04-17  9:25 ` [PATCH v1 8/8] net/mlx5: implement " Rongwei Liu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231031105131.441078-4-rongweil@nvidia.com \
    --to=rongweil@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=suanmingm@nvidia.com \
    --cc=thomas@monjalon.net \
    --cc=valex@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).