From: Gregory Etelson <getelson@nvidia.com>
To: <dev@dpdk.org>
Cc: getelson@nvidia.com, <mkashani@nvidia.com>,
rasland@nvidia.com, "Dariusz Sosnowski" <dsosnowski@nvidia.com>,
"Viacheslav Ovsiienko" <viacheslavo@nvidia.com>,
"Bing Zhao" <bingz@nvidia.com>, "Ori Kam" <orika@nvidia.com>,
"Suanming Mou" <suanmingm@nvidia.com>,
"Matan Azrad" <matan@nvidia.com>
Subject: [PATCH 3/5] net/mlx5: create utility functions for non-template sample action
Date: Tue, 17 Jun 2025 16:39:31 +0300 [thread overview]
Message-ID: <20250617133933.313443-3-getelson@nvidia.com> (raw)
In-Reply-To: <20250617133933.313443-1-getelson@nvidia.com>
The patch initiates non-template sample action environment and
adds function to create hws mirror object.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
drivers/net/mlx5/meson.build | 1 +
drivers/net/mlx5/mlx5.h | 7 +
drivers/net/mlx5/mlx5_flow.h | 7 +
drivers/net/mlx5/mlx5_flow_hw.c | 22 +-
drivers/net/mlx5/mlx5_nta_sample.c | 462 +++++++++++++++++++++++++++++
5 files changed, 483 insertions(+), 16 deletions(-)
create mode 100644 drivers/net/mlx5/mlx5_nta_sample.c
diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build
index 6a91692759..f16fe18193 100644
--- a/drivers/net/mlx5/meson.build
+++ b/drivers/net/mlx5/meson.build
@@ -53,6 +53,7 @@ if is_linux
'mlx5_flow_verbs.c',
'mlx5_hws_cnt.c',
'mlx5_nta_split.c',
+ 'mlx5_nta_sample.c',
)
endif
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 5695d0f54a..f085656196 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1255,6 +1255,11 @@ struct mlx5_flow_tbl_resource {
#define MLX5_FLOW_TABLE_PTYPE_RSS_LAST (MLX5_MAX_TABLES - 11)
#define MLX5_FLOW_TABLE_PTYPE_RSS_BASE \
(1 + MLX5_FLOW_TABLE_PTYPE_RSS_LAST - MLX5_FLOW_TABLE_PTYPE_RSS_NUM)
+#define MLX5_FLOW_TABLE_SAMPLE_NUM 1024
+#define MLX5_FLOW_TABLE_SAMPLE_LAST (MLX5_FLOW_TABLE_PTYPE_RSS_BASE - 1)
+#define MLX5_FLOW_TABLE_SAMPLE_BASE \
+(1 + MLX5_FLOW_TABLE_SAMPLE_LAST - MLX5_FLOW_TABLE_SAMPLE_NUM)
+
#define MLX5_FLOW_TABLE_FACTOR 10
/* ID generation structure. */
@@ -1962,6 +1967,7 @@ struct mlx5_quota_ctx {
struct mlx5_indexed_pool *quota_ipool; /* Manage quota objects */
};
+struct mlx5_nta_sample_ctx;
struct mlx5_priv {
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct mlx5_dev_ctx_shared *sh; /* Shared device context. */
@@ -2128,6 +2134,7 @@ struct mlx5_priv {
*/
struct mlx5dr_action *action_nat64[MLX5DR_TABLE_TYPE_MAX][2];
struct mlx5_indexed_pool *ptype_rss_groups;
+ struct mlx5_nta_sample_ctx *nta_sample_ctx;
#endif
struct rte_eth_dev *shared_host; /* Host device for HW steering. */
RTE_ATOMIC(uint16_t) shared_refcnt; /* HW steering host reference counter. */
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 23c5833290..4bce136e1f 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -3743,5 +3743,12 @@ mlx5_hw_create_mirror(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error);
+struct rte_flow_hw *
+mlx5_flow_nta_handle_sample(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
#endif
#endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 9b3e56938a..f1b90d6e56 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -62,9 +62,6 @@ static struct rte_flow_fp_ops mlx5_flow_hw_fp_ops;
#define MLX5_HW_VLAN_PUSH_VID_IDX 1
#define MLX5_HW_VLAN_PUSH_PCP_IDX 2
-#define MLX5_MIRROR_MAX_CLONES_NUM 3
-#define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
-
#define MLX5_HW_PORT_IS_PROXY(priv) \
(!!((priv)->sh->esw_mode && (priv)->master))
@@ -327,18 +324,6 @@ get_mlx5dr_table_type(const struct rte_flow_attr *attr, uint32_t specialize,
/* Non template default queue size used for inner ctrl queue. */
#define MLX5_NT_DEFAULT_QUEUE_SIZE 32
-struct mlx5_mirror_clone {
- enum rte_flow_action_type type;
- void *action_ctx;
-};
-
-struct mlx5_mirror {
- struct mlx5_indirect_list indirect;
- uint32_t clones_num;
- struct mlx5dr_action *mirror_action;
- struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
-};
-
static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
static int flow_hw_translate_group(struct rte_eth_dev *dev,
const struct mlx5_flow_template_table_cfg *cfg,
@@ -707,6 +692,9 @@ flow_hw_action_flags_get(const struct rte_flow_action actions[],
case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
action_flags |= MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX;
break;
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ action_flags |= MLX5_FLOW_ACTION_SAMPLE;
+ break;
case RTE_FLOW_ACTION_TYPE_VOID:
case RTE_FLOW_ACTION_TYPE_END:
break;
@@ -14231,7 +14219,9 @@ static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
if (ret)
goto free;
}
-
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
+ mlx5_flow_nta_handle_sample(dev, attr, items, actions, error);
+ }
if (action_flags & MLX5_FLOW_ACTION_RSS) {
const struct rte_flow_action_rss
*rss_conf = flow_nta_locate_rss(dev, actions, error);
diff --git a/drivers/net/mlx5/mlx5_nta_sample.c b/drivers/net/mlx5/mlx5_nta_sample.c
new file mode 100644
index 0000000000..d6ffbd8e33
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_nta_sample.c
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 NVIDIA Corporation & Affiliates
+ */
+
+#include <rte_flow.h>
+#include "mlx5_malloc.h"
+#include "mlx5.h"
+#include "mlx5_defs.h"
+#include "mlx5_flow.h"
+#include "mlx5_rx.h"
+
+struct mlx5_nta_sample_ctx {
+ uint32_t groups_num;
+ struct mlx5_indexed_pool *group_ids;
+ struct mlx5_list *mirror_actions; /* cache FW mirror actions */
+ struct mlx5_list *sample_groups; /* cache groups for sample actions */
+ struct mlx5_list *suffix_groups; /* cache groups for suffix actions */
+};
+
+static uint32_t
+alloc_cached_group(struct rte_eth_dev *dev)
+{
+ void *obj;
+ uint32_t idx = 0;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_nta_sample_ctx *ctx = priv->nta_sample_ctx;
+
+ obj = mlx5_ipool_malloc(ctx->group_ids, &idx);
+ if (obj == NULL)
+ return 0;
+ return idx + MLX5_FLOW_TABLE_SAMPLE_BASE;
+}
+
+static void
+release_cached_group(struct rte_eth_dev *dev, uint32_t group)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_nta_sample_ctx *sample_ctx = priv->nta_sample_ctx;
+
+ mlx5_ipool_free(sample_ctx->group_ids, group - MLX5_FLOW_TABLE_SAMPLE_BASE);
+}
+
+static void
+mlx5_free_sample_context(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_nta_sample_ctx *ctx = priv->nta_sample_ctx;
+
+ if (ctx == NULL)
+ return;
+ if (ctx->sample_groups != NULL)
+ mlx5_list_destroy(ctx->sample_groups);
+ if (ctx->suffix_groups != NULL)
+ mlx5_list_destroy(ctx->suffix_groups);
+ if (ctx->group_ids != NULL)
+ mlx5_ipool_destroy(ctx->group_ids);
+ if (ctx->mirror_actions != NULL)
+ mlx5_list_destroy(ctx->mirror_actions);
+ mlx5_free(ctx);
+ priv->nta_sample_ctx = NULL;
+}
+
+struct mlx5_nta_sample_cached_mirror {
+ struct mlx5_flow_template_table_cfg table_cfg;
+ uint32_t sample_group;
+ uint32_t suffix_group;
+ struct mlx5_mirror *mirror;
+ struct mlx5_list_entry entry;
+};
+
+struct mlx5_nta_sample_cached_mirror_ctx {
+ struct mlx5_flow_template_table_cfg *table_cfg;
+ uint32_t sample_group;
+ uint32_t suffix_group;
+};
+
+static struct mlx5_list_entry *
+mlx5_nta_sample_create_cached_mirror(void *cache_ctx, void *cb_ctx)
+{
+ struct rte_eth_dev *dev = cache_ctx;
+ struct mlx5_nta_sample_cached_mirror_ctx *ctx = cb_ctx;
+ struct rte_flow_action_jump mirror_jump_conf = { .group = ctx->sample_group };
+ struct rte_flow_action_jump suffix_jump_conf = { .group = ctx->suffix_group };
+ struct rte_flow_action mirror_sample_actions[2] = {
+ [0] = {
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = &mirror_jump_conf,
+ },
+ [1] = {
+ .type = RTE_FLOW_ACTION_TYPE_END
+ }
+ };
+ struct rte_flow_action_sample mirror_conf = {
+ .ratio = 1,
+ .actions = mirror_sample_actions,
+ };
+ struct rte_flow_action mirror_actions[3] = {
+ [0] = {
+ .type = RTE_FLOW_ACTION_TYPE_SAMPLE,
+ .conf = &mirror_conf,
+ },
+ [1] = {
+ .type = RTE_FLOW_ACTION_TYPE_JUMP,
+ .conf = &suffix_jump_conf,
+ },
+ [2] = {
+ .type = RTE_FLOW_ACTION_TYPE_END
+ }
+ };
+ struct mlx5_nta_sample_cached_mirror *obj = mlx5_malloc(MLX5_MEM_ANY,
+ sizeof(*obj), 0,
+ SOCKET_ID_ANY);
+ if (obj == NULL)
+ return NULL;
+ obj->mirror = mlx5_hw_create_mirror(dev, ctx->table_cfg, mirror_actions, NULL);
+ if (obj->mirror == NULL) {
+ mlx5_free(obj);
+ return NULL;
+ }
+ obj->sample_group = ctx->sample_group;
+ obj->suffix_group = ctx->suffix_group;
+ obj->table_cfg = *ctx->table_cfg;
+ return &obj->entry;
+}
+
+static struct mlx5_list_entry *
+mlx5_nta_sample_clone_cached_mirror(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry,
+ void *cb_ctx __rte_unused)
+{
+ struct mlx5_nta_sample_cached_mirror *cached_obj =
+ container_of(entry, struct mlx5_nta_sample_cached_mirror, entry);
+ struct mlx5_nta_sample_cached_mirror *new_obj = mlx5_malloc(MLX5_MEM_ANY,
+ sizeof(*new_obj), 0,
+ SOCKET_ID_ANY);
+
+ if (new_obj == NULL)
+ return NULL;
+ memcpy(new_obj, cached_obj, sizeof(*new_obj));
+ return &new_obj->entry;
+}
+
+static int
+mlx5_nta_sample_match_cached_mirror(void *cache_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
+{
+ bool match;
+ struct mlx5_nta_sample_cached_mirror_ctx *ctx = cb_ctx;
+ struct mlx5_nta_sample_cached_mirror *obj =
+ container_of(entry, struct mlx5_nta_sample_cached_mirror, entry);
+
+ match = obj->sample_group == ctx->sample_group &&
+ obj->suffix_group == ctx->suffix_group &&
+ memcmp(&obj->table_cfg, ctx->table_cfg, sizeof(obj->table_cfg)) == 0;
+
+ return match ? 0 : ~0;
+}
+
+static void
+mlx5_nta_sample_remove_cached_mirror(void *cache_ctx, struct mlx5_list_entry *entry)
+{
+ struct rte_eth_dev *dev = cache_ctx;
+ struct mlx5_nta_sample_cached_mirror *obj =
+ container_of(entry, struct mlx5_nta_sample_cached_mirror, entry);
+ mlx5_hw_mirror_destroy(dev, obj->mirror);
+ mlx5_free(obj);
+}
+
+static void
+mlx5_nta_sample_clone_free_cached_mirror(void *cache_ctx __rte_unused,
+ struct mlx5_list_entry *entry)
+{
+ struct mlx5_nta_sample_cached_mirror *cloned_obj =
+ container_of(entry, struct mlx5_nta_sample_cached_mirror, entry);
+
+ mlx5_free(cloned_obj);
+}
+
+struct mlx5_nta_sample_cached_group {
+ const struct rte_flow_action *actions;
+ size_t actions_size;
+ uint32_t group;
+ struct mlx5_list_entry entry;
+};
+
+struct mlx5_nta_sample_cached_group_ctx {
+ struct rte_flow_action *actions;
+ size_t actions_size;
+};
+
+static int
+serialize_actions(struct mlx5_nta_sample_cached_group_ctx *obj_ctx)
+{
+ if (obj_ctx->actions_size == 0) {
+ uint8_t *tgt_buffer;
+ int size = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, obj_ctx->actions, NULL);
+ if (size < 0)
+ return size;
+ tgt_buffer = mlx5_malloc(MLX5_MEM_ANY, size, 0, SOCKET_ID_ANY);
+ if (tgt_buffer == NULL)
+ return -ENOMEM;
+ obj_ctx->actions_size = size;
+ size = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, tgt_buffer, size,
+ obj_ctx->actions, NULL);
+ if (size < 0) {
+ mlx5_free(tgt_buffer);
+ return size;
+ }
+ obj_ctx->actions = (struct rte_flow_action *)tgt_buffer;
+ }
+ return obj_ctx->actions_size;
+}
+
+static struct mlx5_list_entry *
+mlx5_nta_sample_create_cached_group(void *cache_ctx, void *cb_ctx)
+{
+ struct rte_eth_dev *dev = cache_ctx;
+ struct mlx5_nta_sample_cached_group_ctx *obj_ctx = cb_ctx;
+ struct mlx5_nta_sample_cached_group *obj;
+ int actions_size = serialize_actions(obj_ctx);
+
+ if (actions_size < 0)
+ return NULL;
+ obj = mlx5_malloc(MLX5_MEM_ANY, sizeof(*obj), 0, SOCKET_ID_ANY);
+ if (obj == NULL)
+ return NULL;
+ obj->group = alloc_cached_group(dev);
+ if (obj->group == 0) {
+ mlx5_free(obj);
+ return NULL;
+ }
+ obj->actions = obj_ctx->actions;
+ obj->actions_size = obj_ctx->actions_size;
+ return &obj->entry;
+}
+
+static int
+mlx5_nta_sample_match_cached_group(void *cache_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx)
+{
+ struct mlx5_nta_sample_cached_group_ctx *obj_ctx = cb_ctx;
+ int actions_size = serialize_actions(obj_ctx);
+ struct mlx5_nta_sample_cached_group *cached_obj =
+ container_of(entry, struct mlx5_nta_sample_cached_group, entry);
+ if (actions_size < 0)
+ return ~0;
+ return memcmp(cached_obj->actions, obj_ctx->actions, actions_size);
+}
+
+static void
+mlx5_nta_sample_remove_cached_group(void *cache_ctx, struct mlx5_list_entry *entry)
+{
+ struct rte_eth_dev *dev = cache_ctx;
+ struct mlx5_nta_sample_cached_group *cached_obj =
+ container_of(entry, struct mlx5_nta_sample_cached_group, entry);
+
+ release_cached_group(dev, cached_obj->group);
+ mlx5_free((void *)(uintptr_t)cached_obj->actions);
+ mlx5_free(cached_obj);
+}
+
+static struct mlx5_list_entry *
+mlx5_nta_sample_clone_cached_group(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry,
+ void *cb_ctx __rte_unused)
+{
+ struct mlx5_nta_sample_cached_group *cached_obj =
+ container_of(entry, struct mlx5_nta_sample_cached_group, entry);
+ struct mlx5_nta_sample_cached_group *new_obj;
+
+ new_obj = mlx5_malloc(MLX5_MEM_ANY, sizeof(*new_obj), 0, SOCKET_ID_ANY);
+ if (new_obj == NULL)
+ return NULL;
+ memcpy(new_obj, cached_obj, sizeof(*new_obj));
+ return &new_obj->entry;
+}
+
+static void
+mlx5_nta_sample_free_cloned_cached_group(void *cache_ctx __rte_unused,
+ struct mlx5_list_entry *entry)
+{
+ struct mlx5_nta_sample_cached_group *cloned_obj =
+ container_of(entry, struct mlx5_nta_sample_cached_group, entry);
+
+ mlx5_free(cloned_obj);
+}
+
+static int
+mlx5_init_nta_sample_context(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indexed_pool_config ipool_cfg = {
+ .size = 0,
+ .trunk_size = 32,
+ .grow_trunk = 5,
+ .grow_shift = 1,
+ .need_lock = 1,
+ .release_mem_en = !!priv->sh->config.reclaim_mode,
+ .max_idx = MLX5_FLOW_TABLE_SAMPLE_NUM,
+ .type = "mlx5_nta_sample"
+ };
+ struct mlx5_nta_sample_ctx *ctx = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*ctx), 0, SOCKET_ID_ANY);
+
+ if (ctx == NULL)
+ return -ENOMEM;
+ priv->nta_sample_ctx = ctx;
+ ctx->group_ids = mlx5_ipool_create(&ipool_cfg);
+ if (ctx->group_ids == NULL)
+ goto error;
+ ctx->sample_groups = mlx5_list_create("nta sample groups", dev, true,
+ mlx5_nta_sample_create_cached_group,
+ mlx5_nta_sample_match_cached_group,
+ mlx5_nta_sample_remove_cached_group,
+ mlx5_nta_sample_clone_cached_group,
+ mlx5_nta_sample_free_cloned_cached_group);
+ if (ctx->sample_groups == NULL)
+ goto error;
+ ctx->suffix_groups = mlx5_list_create("nta sample suffix groups", dev, true,
+ mlx5_nta_sample_create_cached_group,
+ mlx5_nta_sample_match_cached_group,
+ mlx5_nta_sample_remove_cached_group,
+ mlx5_nta_sample_clone_cached_group,
+ mlx5_nta_sample_free_cloned_cached_group);
+ if (ctx->suffix_groups == NULL)
+ goto error;
+ ctx->mirror_actions = mlx5_list_create("nta sample mirror actions", dev, true,
+ mlx5_nta_sample_create_cached_mirror,
+ mlx5_nta_sample_match_cached_mirror,
+ mlx5_nta_sample_remove_cached_mirror,
+ mlx5_nta_sample_clone_cached_mirror,
+ mlx5_nta_sample_clone_free_cached_mirror);
+ if (ctx->mirror_actions == NULL)
+ goto error;
+ return 0;
+
+error:
+ mlx5_free_sample_context(dev);
+ return -ENOMEM;
+}
+
+static struct mlx5_mirror *
+get_registered_mirror(struct mlx5_flow_template_table_cfg *table_cfg,
+ struct mlx5_list *cache,
+ uint32_t sample_group,
+ uint32_t suffix_group)
+{
+ struct mlx5_nta_sample_cached_mirror_ctx ctx = {
+ .table_cfg = table_cfg,
+ .sample_group = sample_group,
+ .suffix_group = suffix_group
+ };
+ struct mlx5_list_entry *ent = mlx5_list_register(cache, &ctx);
+ return ent ? container_of(ent, struct mlx5_nta_sample_cached_mirror, entry)->mirror : NULL;
+}
+
+static uint32_t
+get_registered_group(struct rte_flow_action *actions, struct mlx5_list *cache)
+{
+ struct mlx5_nta_sample_cached_group_ctx ctx = {
+ .actions = actions
+ };
+ struct mlx5_list_entry *ent = mlx5_list_register(cache, &ctx);
+ return ent ? container_of(ent, struct mlx5_nta_sample_cached_group, entry)->group : 0;
+}
+
+static struct mlx5_mirror *
+mlx5_create_nta_mirror(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_action *sample_actions,
+ struct rte_flow_action *suffix_actions,
+ struct rte_flow_error *error)
+{
+ struct mlx5_mirror *mirror;
+ uint32_t sample_group, suffix_group;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_nta_sample_ctx *ctx = priv->nta_sample_ctx;
+ struct mlx5_flow_template_table_cfg table_cfg = {
+ .external = true,
+ .attr = {
+ .flow_attr = {
+ .ingress = attr->ingress,
+ .egress = attr->egress,
+ .transfer = attr->transfer
+ }
+ }
+ };
+
+ sample_group = get_registered_group(sample_actions, ctx->sample_groups);
+ if (sample_group == 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Failed to register sample group");
+ return NULL;
+ }
+ suffix_group = get_registered_group(suffix_actions, ctx->suffix_groups);
+ if (suffix_group == 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Failed to register suffix group");
+ return NULL;
+ }
+ mirror = get_registered_mirror(&table_cfg, ctx->mirror_actions, sample_group, suffix_group);
+ return mirror;
+}
+
+static void
+mlx5_nta_parse_sample_actions(const struct rte_flow_action *action,
+ const struct rte_flow_action **sample_action,
+ struct rte_flow_action *prefix_actions,
+ struct rte_flow_action *suffix_actions)
+{
+ struct rte_flow_action *pa = prefix_actions;
+ struct rte_flow_action *sa = suffix_actions;
+
+ *sample_action = NULL;
+ do {
+ if (action->type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ *sample_action = action;
+ } else if (*sample_action == NULL) {
+ if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+ *(pa++) = *action;
+ } else {
+ if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+ *(sa++) = *action;
+ }
+ } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
+}
+
+struct rte_flow_hw *
+mlx5_flow_nta_handle_sample(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[] __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_mirror *mirror;
+ const struct rte_flow_action *sample;
+ struct rte_flow_action *sample_actions;
+ const struct rte_flow_action_sample *sample_conf;
+ struct rte_flow_action prefix_actions[MLX5_HW_MAX_ACTS] = { 0 };
+ struct rte_flow_action suffix_actions[MLX5_HW_MAX_ACTS] = { 0 };
+
+ if (priv->nta_sample_ctx == NULL) {
+ int rc = mlx5_init_nta_sample_context(dev);
+ if (rc != 0) {
+ rte_flow_error_set(error, -rc, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Failed to allocate sample context");
+ return NULL;
+ }
+ }
+ mlx5_nta_parse_sample_actions(actions, &sample, prefix_actions, suffix_actions);
+ sample_conf = (const struct rte_flow_action_sample *)sample->conf;
+ sample_actions = (struct rte_flow_action *)(uintptr_t)sample_conf->actions;
+ mirror = mlx5_create_nta_mirror(dev, attr, sample_actions,
+ suffix_actions, error);
+ if (mirror == NULL)
+ goto error;
+error:
+ return NULL;
+}
--
2.48.1
next prev parent reply other threads:[~2025-06-17 13:40 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-17 13:39 [PATCH 1/5] net/mlx5: fix the table flags of mirror action Gregory Etelson
2025-06-17 13:39 ` [PATCH 2/5] net/mlx5: add mlx5_hw_create_mirror function Gregory Etelson
2025-06-17 13:39 ` Gregory Etelson [this message]
2025-06-17 13:39 ` [PATCH 4/5] net/mlx5: add MLX5 mirror flow action Gregory Etelson
2025-06-17 13:39 ` [PATCH 5/5] net/mlx5: support non-template SAMPLE " Gregory Etelson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250617133933.313443-3-getelson@nvidia.com \
--to=getelson@nvidia.com \
--cc=bingz@nvidia.com \
--cc=dev@dpdk.org \
--cc=dsosnowski@nvidia.com \
--cc=matan@nvidia.com \
--cc=mkashani@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=suanmingm@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).