From: Maayan Kashani <mkashani@nvidia.com>
To: <dev@dpdk.org>
Cc: <mkashani@nvidia.com>, <dsosnowski@nvidia.com>,
<rasland@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
Ori Kam <orika@nvidia.com>, Suanming Mou <suanmingm@nvidia.com>,
Matan Azrad <matan@nvidia.com>
Subject: [PATCH v4 01/11] net/mlx5: initial design of non template to hws
Date: Thu, 6 Jun 2024 13:23:06 +0300 [thread overview]
Message-ID: <20240606102317.172553-2-mkashani@nvidia.com> (raw)
In-Reply-To: <20240606102317.172553-1-mkashani@nvidia.com>
[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset="y", Size: 54017 bytes --]
Implement the frame and needed building
blocks for non template to hws API's.
Added validate, list_create and list_destroy to mlx5_flow_hw_drv_ops.
Rename old list_create/list_destroy functions to legacy_*
and added a call from verbs/dv ops to the legacy functions.
Updated rte_flow_hw as needed.
Added rte_flow_nt2hws structure for non-template rule data.
Signed-off-by: Maayan Kashani <mkashani@nvidia.com>
squash to net/mlx5: initial design of non template to hws
Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 8 +-
drivers/net/mlx5/mlx5.h | 9 +
drivers/net/mlx5/mlx5_flow.c | 139 +++++---
drivers/net/mlx5/mlx5_flow.h | 74 ++++-
drivers/net/mlx5/mlx5_flow_dv.c | 192 +++++++++---
drivers/net/mlx5/mlx5_flow_hw.c | 487 +++++++++++++++++++++++++++--
drivers/net/mlx5/mlx5_flow_verbs.c | 2 +
drivers/net/mlx5/mlx5_trigger.c | 6 +
drivers/net/mlx5/windows/mlx5_os.c | 8 +-
9 files changed, 808 insertions(+), 117 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 6dd12f0f68..346e6c7bf9 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -69,7 +69,7 @@ static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
static struct mlx5_local_data mlx5_local_data;
/* rte flow indexed pool configuration. */
-static struct mlx5_indexed_pool_config icfg[] = {
+static const struct mlx5_indexed_pool_config default_icfg[] = {
{
.size = sizeof(struct rte_flow),
.trunk_size = 64,
@@ -1068,7 +1068,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
struct mlx5_port_info vport_info = { .query_flags = 0 };
int nl_rdma;
int i;
+ struct mlx5_indexed_pool_config icfg[RTE_DIM(default_icfg)];
+ memcpy(icfg, default_icfg, sizeof(icfg));
/* Determine if this port representor is supposed to be spawned. */
if (switch_info->representor && dpdk_dev->devargs &&
!mlx5_representor_match(spawn, eth_da))
@@ -1539,6 +1541,10 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
icfg[i].release_mem_en = !!sh->config.reclaim_mode;
if (sh->config.reclaim_mode)
icfg[i].per_core_cache = 0;
+#ifdef HAVE_MLX5_HWS_SUPPORT
+ if (priv->sh->config.dv_flow_en == 2)
+ icfg[i].size = sizeof(struct rte_flow_hw) + sizeof(struct rte_flow_nt2hws);
+#endif
priv->flows[i] = mlx5_ipool_create(&icfg[i]);
if (!priv->flows[i])
goto error;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 9e4a5feb49..92ad33d486 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -2293,6 +2293,15 @@ int mlx5_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+uint32_t
+mlx5_flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, struct rte_flow_error *error);
+void
+mlx5_flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+ uint32_t flow_idx);
struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8eafceff37..c1bcb0a548 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4929,15 +4929,16 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,
}
/* Declare flow create/destroy prototype in advance. */
+
static uint32_t
-flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+flow_drv_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
bool external, struct rte_flow_error *error);
static void
-flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+flow_drv_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
uint32_t flow_idx);
int
@@ -5058,7 +5059,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx)
* be applied, removed, deleted in arbitrary order
* by list traversing.
*/
- mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,
+ mcp_res->rix_flow = flow_drv_list_create(dev, MLX5_FLOW_TYPE_MCP,
&attr, items, actions, false, error);
if (!mcp_res->rix_flow) {
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
@@ -5152,7 +5153,7 @@ flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
struct mlx5_priv *priv = dev->data->dev_private;
MLX5_ASSERT(mcp_res->rix_flow);
- flow_list_destroy(dev, MLX5_FLOW_TYPE_MCP, mcp_res->rix_flow);
+ flow_drv_list_destroy(dev, MLX5_FLOW_TYPE_MCP, mcp_res->rix_flow);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
}
@@ -7242,12 +7243,12 @@ flow_tunnel_from_rule(const struct mlx5_flow *flow)
* @return
* A flow index on success, 0 otherwise and rte_errno is set.
*/
-static uint32_t
-flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action original_actions[],
- bool external, struct rte_flow_error *error)
+uint32_t
+flow_legacy_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action original_actions[],
+ bool external, struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct rte_flow *flow = NULL;
@@ -7294,8 +7295,14 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
.table_id = 0
};
int ret;
+ struct mlx5_shared_action_rss *shared_rss_action;
- MLX5_ASSERT(wks);
+ if (!wks)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "failed to push flow workspace");
+ memset(indir_actions, 0, sizeof(indir_actions));
rss_desc = &wks->rss_desc;
ret = flow_action_handles_translate(dev, original_actions,
indir_actions,
@@ -7469,11 +7476,16 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
ret = rte_errno; /* Save rte_errno before cleanup. */
flow_mreg_del_copy_action(dev, flow);
flow_drv_destroy(dev, flow);
- if (rss_desc->shared_rss)
- rte_atomic_fetch_sub_explicit(&((struct mlx5_shared_action_rss *)
+
+ if (rss_desc->shared_rss) {
+ shared_rss_action = (struct mlx5_shared_action_rss *)
mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
- rss_desc->shared_rss))->refcnt, 1, rte_memory_order_relaxed);
+ rss_desc->shared_rss);
+ if (shared_rss_action)
+ rte_atomic_fetch_sub_explicit(&(shared_rss_action)->refcnt, 1,
+ rte_memory_order_relaxed);
+ }
mlx5_ipool_free(priv->flows[type], idx);
rte_errno = ret; /* Restore rte_errno. */
ret = rte_errno;
@@ -7528,7 +7540,7 @@ mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
};
struct rte_flow_error error;
- return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+ return (void *)(uintptr_t)flow_drv_list_create(dev, MLX5_FLOW_TYPE_CTL,
&attr, &pattern,
actions, false, &error);
}
@@ -7596,14 +7608,14 @@ mlx5_flow_create_devx_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sq_num)
* Creates group 0, highest priority jump flow.
* Matches txq to bypass kernel packets.
*/
- if (flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, actions,
+ if (flow_drv_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern, actions,
false, &error) == 0)
return 0;
/* Create group 1, lowest priority redirect flow for txq. */
attr.group = 1;
actions[0].conf = &port;
actions[0].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
- return flow_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern,
+ return flow_drv_list_create(dev, MLX5_FLOW_TYPE_CTL, &attr, pattern,
actions, false, &error);
}
@@ -7737,8 +7749,9 @@ mlx5_flow_cache_flow_toggle(struct rte_eth_dev *dev, bool orig_prio)
attr = flow_info->attr;
if (orig_prio)
attr.priority = flow_info->orig_prio;
- flow_info->flow_idx_high_prio = flow_list_create(dev, MLX5_FLOW_TYPE_GEN,
- &attr, flow_info->items, flow_info->actions,
+ flow_info->flow_idx_high_prio = mlx5_flow_list_create(dev,
+ MLX5_FLOW_TYPE_GEN, &attr,
+ flow_info->items, flow_info->actions,
true, &error);
if (!flow_info->flow_idx_high_prio) {
DRV_LOG(ERR, "Priority toggle failed internally.");
@@ -7758,7 +7771,7 @@ mlx5_flow_cache_flow_toggle(struct rte_eth_dev *dev, bool orig_prio)
flow_info->flow_idx_low_prio);
if (high && low) {
RTE_SWAP(*low, *high);
- flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
+ flow_drv_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
flow_info->flow_idx_low_prio);
flow_info->flow_idx_high_prio = 0;
}
@@ -7772,7 +7785,7 @@ mlx5_flow_cache_flow_toggle(struct rte_eth_dev *dev, bool orig_prio)
while (flow_info) {
if (flow_info->orig_prio != flow_info->attr.priority) {
if (flow_info->flow_idx_high_prio)
- flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
+ flow_drv_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
flow_info->flow_idx_high_prio);
else
break;
@@ -7907,13 +7920,6 @@ mlx5_flow_create(struct rte_eth_dev *dev,
uint32_t prio = attr->priority;
uint32_t flow_idx;
- if (priv->sh->config.dv_flow_en == 2) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "Flow non-Q creation not supported");
- return NULL;
- }
/*
* If the device is not started yet, it is not allowed to created a
* flow from application. PMD default flows and traffic control flows
@@ -7934,18 +7940,44 @@ mlx5_flow_create(struct rte_eth_dev *dev,
RTE_PMD_MLX5_FLOW_ENGINE_FLAG_STANDBY_DUP_INGRESS)))
new_attr->priority += 1;
}
- flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, attr, items, actions, true, error);
+ flow_idx = flow_drv_list_create(dev, MLX5_FLOW_TYPE_GEN, attr, items, actions, true, error);
if (!flow_idx)
return NULL;
if (unlikely(mlx5_need_cache_flow(priv, attr))) {
if (mlx5_flow_cache_flow_info(dev, attr, prio, items, actions, flow_idx)) {
- flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
+ flow_drv_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
flow_idx = 0;
}
}
return (void *)(uintptr_t)flow_idx;
}
+uint32_t
+mlx5_flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, struct rte_flow_error *error)
+{
+ return flow_drv_list_create(dev, type, attr, items, actions, external,
+ error);
+}
+
+uint32_t
+flow_drv_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, attr);
+
+ fops = flow_get_drv_ops(drv_type);
+ return fops->list_create(dev, type, attr, items, actions, external,
+ error);
+}
+
/**
* Destroy a flow in a list.
*
@@ -7954,15 +7986,16 @@ mlx5_flow_create(struct rte_eth_dev *dev,
* @param[in] flow_idx
* Index of flow to destroy.
*/
-static void
-flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
- uint32_t flow_idx)
+void
+flow_legacy_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+ uint32_t flow_idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = mlx5_ipool_get(priv->flows[type], flow_idx);
+ struct rte_flow *flow = mlx5_ipool_get(priv->flows[type], (uint32_t)flow_idx);
if (!flow)
return;
+ MLX5_ASSERT((type >= MLX5_FLOW_TYPE_CTL) && (type < MLX5_FLOW_TYPE_MAXI));
MLX5_ASSERT(flow->type == type);
/*
* Update RX queue flags only if port is started, otherwise it is
@@ -7984,6 +8017,25 @@ flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
mlx5_ipool_free(priv->flows[type], flow_idx);
}
+static void
+flow_drv_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+ uint32_t flow_idx)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ struct rte_flow_attr attr = { .transfer = 0 };
+ enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, &attr);
+
+ fops = flow_get_drv_ops(drv_type);
+ fops->list_destroy(dev, type, flow_idx);
+}
+
+void
+mlx5_flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+ uint32_t flow_idx)
+{
+ flow_drv_list_destroy(dev, type, flow_idx);
+}
+
/**
* Destroy all flows.
*
@@ -8013,7 +8065,7 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
#endif
MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) {
- flow_list_destroy(dev, type, fidx);
+ flow_drv_list_destroy(dev, type, fidx);
if (unlikely(mlx5_need_cache_flow(priv, NULL) && type == MLX5_FLOW_TYPE_GEN)) {
flow_info = LIST_FIRST(&mode_info->hot_upgrade);
while (flow_info) {
@@ -8285,7 +8337,7 @@ mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
actions[0].conf = &jump;
actions[1].type = RTE_FLOW_ACTION_TYPE_END;
- flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+ flow_idx = flow_drv_list_create(dev, MLX5_FLOW_TYPE_CTL,
&attr, items, actions, false, &error);
if (!flow_idx) {
DRV_LOG(DEBUG,
@@ -8375,7 +8427,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
action_rss.types = 0;
for (i = 0; i != priv->reta_idx_n; ++i)
queue[i] = (*priv->reta_idx)[i];
- flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+ flow_idx = flow_drv_list_create(dev, MLX5_FLOW_TYPE_CTL,
&attr, items, actions, false, &error);
if (!flow_idx)
return -rte_errno;
@@ -8450,7 +8502,7 @@ mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
},
};
struct rte_flow_error error;
- uint32_t flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+ uint32_t flow_idx = flow_drv_list_create(dev, MLX5_FLOW_TYPE_CTL,
&attr, items, actions,
false, &error);
@@ -8479,7 +8531,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"Flow non-Q destruction not supported");
- flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
+ flow_drv_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
(uintptr_t)(void *)flow);
if (unlikely(mlx5_need_cache_flow(priv, NULL))) {
flow_info = LIST_FIRST(&mode_info->hot_upgrade);
@@ -9791,14 +9843,14 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
if (!priv->sh->config.dv_flow_en)
break;
/* Create internal flow, validation skips copy action. */
- flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
+ flow_idx = flow_drv_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
items, actions, false, &error);
flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
flow_idx);
if (!flow)
continue;
priv->sh->flow_mreg_c[n++] = idx;
- flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
+ flow_drv_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
}
for (; n < MLX5_MREG_C_NUM; ++n)
priv->sh->flow_mreg_c[n] = REG_NON;
@@ -12015,11 +12067,12 @@ mlx5_flow_discover_ipv6_tc_support(struct rte_eth_dev *dev)
actions[0].conf = &set_dscp;
actions[1].type = RTE_FLOW_ACTION_TYPE_END;
- flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr, items, actions, true, &error);
+ flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr, items,
+ actions, true, &error);
if (!flow_idx)
return -EOPNOTSUPP;
- flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
+ mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 8b4088e35e..e06e7d5cc8 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -620,8 +620,12 @@ struct mlx5_flow_dv_match_params {
/* Matcher structure. */
struct mlx5_flow_dv_matcher {
struct mlx5_list_entry entry; /**< Pointer to the next element. */
- struct mlx5_flow_tbl_resource *tbl;
- /**< Pointer to the table(group) the matcher associated with. */
+ union {
+ struct mlx5_flow_tbl_resource *tbl;
+ /**< Pointer to the table(group) the matcher associated with for DV flow. */
+ struct mlx5_flow_group *group;
+ /* Group of this matcher for HWS non template flow. */
+ };
void *matcher_object; /**< Pointer to DV matcher */
uint16_t crc; /**< CRC of key. */
uint16_t priority; /**< Priority of matcher. */
@@ -1303,10 +1307,24 @@ enum {
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
+#define MLX5_DR_RULE_SIZE 72
+
+/** HWS non template flow data. */
+struct rte_flow_nt2hws {
+ /** BWC rule pointer. */
+ struct mlx5dr_bwc_rule *nt_rule;
+ /** The matcher for non template api. */
+ struct mlx5_flow_dv_matcher *matcher;
+} __rte_packed;
+
/** HWS flow struct. */
struct rte_flow_hw {
- /** The table flow allcated from. */
- struct rte_flow_template_table *table;
+ union {
+ /** The table flow allcated from. */
+ struct rte_flow_template_table *table;
+ /** Data needed for non template flows. */
+ struct rte_flow_nt2hws *nt2hws;
+ };
/** Application's private data passed to enqueued flow operation. */
void *user_data;
/** Flow index from indexed pool. */
@@ -1591,6 +1609,8 @@ struct mlx5_flow_group {
enum mlx5dr_table_type type; /* Table type. */
uint32_t group_id; /* Group id. */
uint32_t idx; /* Group memory index. */
+ /* List of all matchers created for this group in non template api */
+ struct mlx5_list *matchers;
};
@@ -2092,7 +2112,20 @@ void flow_hw_set_port_info(struct rte_eth_dev *dev);
void flow_hw_clear_port_info(struct rte_eth_dev *dev);
int flow_hw_create_vport_action(struct rte_eth_dev *dev);
void flow_hw_destroy_vport_action(struct rte_eth_dev *dev);
-
+int
+flow_hw_init(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
+
+typedef uint32_t (*mlx5_flow_list_create_t)(struct rte_eth_dev *dev,
+ enum mlx5_flow_type type,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external,
+ struct rte_flow_error *error);
+typedef void (*mlx5_flow_list_destroy_t)(struct rte_eth_dev *dev,
+ enum mlx5_flow_type type,
+ uint32_t flow_idx);
typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
@@ -2455,6 +2488,8 @@ typedef int (*table_resize_complete_t)(struct rte_eth_dev *dev,
struct rte_flow_error *error);
struct mlx5_flow_driver_ops {
+ mlx5_flow_list_create_t list_create;
+ mlx5_flow_list_destroy_t list_destroy;
mlx5_flow_validate_t validate;
mlx5_flow_prepare_t prepare;
mlx5_flow_translate_t translate;
@@ -3071,11 +3106,14 @@ struct mlx5_list_entry *flow_dv_encap_decap_clone_cb(void *tool_ctx,
void flow_dv_encap_decap_clone_free_cb(void *tool_ctx,
struct mlx5_list_entry *entry);
-int flow_dv_matcher_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+int flow_matcher_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
void *ctx);
-struct mlx5_list_entry *flow_dv_matcher_create_cb(void *tool_ctx, void *ctx);
-void flow_dv_matcher_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
-
+struct mlx5_list_entry *flow_matcher_create_cb(void *tool_ctx, void *ctx);
+void flow_matcher_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
+struct mlx5_list_entry *flow_matcher_clone_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry, void *cb_ctx);
+void flow_matcher_clone_free_cb(void *tool_ctx __rte_unused,
+ struct mlx5_list_entry *entry);
int flow_dv_port_id_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
void *cb_ctx);
struct mlx5_list_entry *flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx);
@@ -3121,6 +3159,10 @@ void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
uint64_t *hash_field);
uint32_t flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
const uint64_t hash_fields);
+int flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, int hairpin, struct rte_flow_error *error);
struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx);
void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
@@ -3226,6 +3268,13 @@ int flow_dv_translate_items_hws(const struct rte_flow_item *items,
uint8_t *match_criteria,
struct rte_flow_error *error);
+int __flow_dv_translate_items_hws(const struct rte_flow_item *items,
+ struct mlx5_flow_attr *attr, void *key,
+ uint32_t key_type, uint64_t *item_flags,
+ uint8_t *match_criteria,
+ bool nt_flow,
+ struct rte_flow_error *error);
+
int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
uint16_t *proxy_port_id,
struct rte_flow_error *error);
@@ -3268,6 +3317,13 @@ int mlx5_flow_item_field_width(struct rte_eth_dev *dev,
enum rte_flow_field_id field, int inherit,
const struct rte_flow_attr *attr,
struct rte_flow_error *error);
+uint32_t flow_legacy_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external, struct rte_flow_error *error);
+void flow_legacy_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+ uint32_t flow_idx);
static __rte_always_inline int
flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index f9c56af86c..e3855731b8 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7622,7 +7622,7 @@ mlx5_flow_validate_item_ib_bth(struct rte_eth_dev *dev,
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
@@ -8076,6 +8076,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
&item_flags, error);
if (ret < 0)
return ret;
+ last_item = MLX5_FLOW_LAYER_ASO_CT;
break;
case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
/* tunnel offload item was processed before
@@ -8214,6 +8215,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
rw_act_num += MLX5_ACT_NUM_SET_MARK;
break;
case RTE_FLOW_ACTION_TYPE_SET_META:
+ if (priv->sh->config.dv_flow_en == 2)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
ret = flow_dv_validate_action_set_meta(dev, actions,
action_flags,
attr, error);
@@ -8568,6 +8574,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
rw_act_num += MLX5_ACT_NUM_SET_TAG;
break;
case RTE_FLOW_ACTION_TYPE_METER:
+ if (priv->sh->config.dv_flow_en == 2)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
ret = mlx5_flow_validate_action_meter(dev,
action_flags,
item_flags,
@@ -10408,6 +10419,8 @@ flow_dv_match_meta_reg(void *key, enum modify_reg reg_type,
MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
uint32_t temp;
+ if (!key)
+ return;
data &= mask;
switch (reg_type) {
case REG_A:
@@ -11505,8 +11518,8 @@ __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria)
}
}
-static struct mlx5_list_entry *
-flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
+struct mlx5_list_entry *
+flow_matcher_clone_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
@@ -11528,8 +11541,8 @@ flow_dv_matcher_clone_cb(void *tool_ctx __rte_unused,
return &resource->entry;
}
-static void
-flow_dv_matcher_clone_free_cb(void *tool_ctx __rte_unused,
+void
+flow_matcher_clone_free_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry)
{
mlx5_free(entry);
@@ -11602,11 +11615,11 @@ flow_dv_tbl_create_cb(void *tool_ctx, void *cb_ctx)
key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
key.level, key.id);
tbl_data->matchers = mlx5_list_create(matcher_name, sh, true,
- flow_dv_matcher_create_cb,
- flow_dv_matcher_match_cb,
- flow_dv_matcher_remove_cb,
- flow_dv_matcher_clone_cb,
- flow_dv_matcher_clone_free_cb);
+ flow_matcher_create_cb,
+ flow_matcher_match_cb,
+ flow_matcher_remove_cb,
+ flow_matcher_clone_cb,
+ flow_matcher_clone_free_cb);
if (!tbl_data->matchers) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -11809,7 +11822,7 @@ flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
}
int
-flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
+flow_matcher_match_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry, void *cb_ctx)
{
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
@@ -11824,7 +11837,7 @@ flow_dv_matcher_match_cb(void *tool_ctx __rte_unused,
}
struct mlx5_list_entry *
-flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
+flow_matcher_create_cb(void *tool_ctx, void *cb_ctx)
{
struct mlx5_dev_ctx_shared *sh = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
@@ -11846,23 +11859,26 @@ flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
"cannot create matcher");
return NULL;
}
+ /*Consider memcpy(resource, ref, sizeof(*resource));*/
*resource = *ref;
- dv_attr.match_criteria_enable =
- flow_dv_matcher_enable(resource->mask.buf);
- __flow_dv_adjust_buf_size(&ref->mask.size,
- dv_attr.match_criteria_enable);
- dv_attr.priority = ref->priority;
- if (tbl->is_egress)
- dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
- ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
- tbl->tbl.obj,
- &resource->matcher_object);
- if (ret) {
- mlx5_free(resource);
- rte_flow_error_set(ctx->error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot create matcher");
- return NULL;
+ if (sh->config.dv_flow_en != 2) {
+ dv_attr.match_criteria_enable =
+ flow_dv_matcher_enable(resource->mask.buf);
+ __flow_dv_adjust_buf_size(&ref->mask.size,
+ dv_attr.match_criteria_enable);
+ dv_attr.priority = ref->priority;
+ if (tbl->is_egress)
+ dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
+ ret = mlx5_flow_os_create_flow_matcher(sh->cdev->ctx, &dv_attr,
+ tbl->tbl.obj,
+ &resource->matcher_object);
+ if (ret) {
+ mlx5_free(resource);
+ rte_flow_error_set(ctx->error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot create matcher");
+ return NULL;
+ }
}
return &resource->entry;
}
@@ -14228,6 +14244,60 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * Fill the flow matcher with DV spec for items supported in non template mode.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] wks
+ * Pointer to the matcher workspace.
+ * @param[in] key
+ * Pointer to the flow matcher key.
+ * @param[in] key_type
+ * Key type.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_translate_items_nta(struct rte_eth_dev *dev,
+ const struct rte_flow_item *items,
+ struct mlx5_dv_matcher_workspace *wks,
+ void *key, uint32_t key_type,
+ struct rte_flow_error *error)
+{
+ int item_type;
+ int ret = 0;
+ int tunnel;
+ /* Dummy structure to enable the key calculation for flex item. */
+ struct mlx5_flow_dv_match_params flex_item_key;
+
+ tunnel = !!(wks->item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ item_type = items->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_CONNTRACK:
+ flow_dv_translate_item_aso_ct(dev, key, NULL, items);
+ wks->last_item = MLX5_FLOW_LAYER_ASO_CT;
+ break;
+ /* TODO: remove once flex item translation is added to flow_dv_translate_items. */
+ case RTE_FLOW_ITEM_TYPE_FLEX:
+ mlx5_flex_flow_translate_item(dev, key, flex_item_key.buf, items, tunnel != 0);
+ wks->last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX : MLX5_FLOW_ITEM_OUTER_FLEX;
+ break;
+ default:
+ ret = flow_dv_translate_items(dev, items, wks, key, key_type, error);
+ if (ret)
+ return ret;
+ break;
+ }
+ wks->item_flags |= wks->last_item;
+ return 0;
+}
+
/**
* Fill the HW steering flow with DV spec.
*
@@ -14241,6 +14311,8 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
* Key type.
* @param[in, out] item_flags
* Pointer to the flow item flags.
+ * @param[in, out] nt_flow
+ * Non template flow.
* @param[out] error
* Pointer to the error structure.
*
@@ -14248,10 +14320,11 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-flow_dv_translate_items_hws(const struct rte_flow_item *items,
+__flow_dv_translate_items_hws(const struct rte_flow_item *items,
struct mlx5_flow_attr *attr, void *key,
uint32_t key_type, uint64_t *item_flags,
uint8_t *match_criteria,
+ bool nt_flow,
struct rte_flow_error *error)
{
struct mlx5_flow_workspace *flow_wks = mlx5_flow_push_thread_workspace();
@@ -14281,10 +14354,18 @@ flow_dv_translate_items_hws(const struct rte_flow_item *items,
NULL, "item not supported");
goto exit;
}
- ret = flow_dv_translate_items(&rte_eth_devices[attr->port_id],
- items, &wks, key, key_type, NULL);
- if (ret)
- goto exit;
+ /* Non template flow. */
+ if (nt_flow) {
+ ret = flow_dv_translate_items_nta(&rte_eth_devices[attr->port_id],
+ items, &wks, key, key_type, NULL);
+ if (ret)
+ goto exit;
+ } else {
+ ret = flow_dv_translate_items(&rte_eth_devices[attr->port_id],
+ items, &wks, key, key_type, NULL);
+ if (ret)
+ goto exit;
+ }
}
if (wks.item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
flow_dv_translate_item_integrity_post(key,
@@ -14333,6 +14414,37 @@ flow_dv_translate_items_hws(const struct rte_flow_item *items,
return ret;
}
+/**
+ * Fill the HW steering flow with DV spec.
+ * This function assumes given flow is created from template API.
+ *
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] key
+ * Pointer to the flow matcher key.
+ * @param[in] key_type
+ * Key type.
+ * @param[in, out] item_flags
+ * Pointer to the flow item flags.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+flow_dv_translate_items_hws(const struct rte_flow_item *items,
+ struct mlx5_flow_attr *attr, void *key,
+ uint32_t key_type, uint64_t *item_flags,
+ uint8_t *match_criteria,
+ struct rte_flow_error *error)
+{
+ return __flow_dv_translate_items_hws(items, attr, key, key_type, item_flags, match_criteria,
+ false, error);
+}
+
/**
* Fill the SW steering flow with DV spec.
*
@@ -14389,6 +14501,7 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_CONNTRACK:
flow_dv_translate_item_aso_ct(dev, match_mask,
match_value, items);
+ wks.last_item = MLX5_FLOW_LAYER_ASO_CT;
break;
case RTE_FLOW_ITEM_TYPE_FLEX:
flow_dv_translate_item_flex(dev, match_mask,
@@ -15670,14 +15783,21 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
}
void
-flow_dv_matcher_remove_cb(void *tool_ctx __rte_unused,
+flow_matcher_remove_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry)
{
struct mlx5_flow_dv_matcher *resource = container_of(entry,
typeof(*resource),
entry);
+#ifdef HAVE_MLX5_HWS_SUPPORT
+ struct mlx5_dev_ctx_shared *sh = tool_ctx;
- claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
+ if (sh->config.dv_flow_en == 2)
+ claim_zero(mlx5dr_bwc_matcher_destroy((struct mlx5dr_bwc_matcher *)
+ resource->matcher_object));
+ else
+#endif
+ claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
mlx5_free(resource);
}
@@ -20082,6 +20202,8 @@ flow_dv_discover_priorities(struct rte_eth_dev *dev,
}
const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
+ .list_create = flow_legacy_list_create,
+ .list_destroy = flow_legacy_list_destroy,
.validate = flow_dv_validate,
.prepare = flow_dv_prepare,
.translate = flow_dv_translate,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 80efcf44fa..efd2141913 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -256,6 +256,9 @@ get_mlx5dr_table_type(const struct rte_flow_attr *attr)
return type;
}
+/* Non template default queue size used for inner ctrl queue. */
+#define MLX5_NT_DEFAULT_QUEUE_SIZE 32
+
struct mlx5_mirror_clone {
enum rte_flow_action_type type;
void *action_ctx;
@@ -4859,41 +4862,20 @@ flow_hw_table_update(struct rte_eth_dev *dev,
return 0;
}
-/**
- * Translates group index specified by the user in @p attr to internal
- * group index.
- *
- * Translation is done by incrementing group index, so group n becomes n + 1.
- *
- * @param[in] dev
- * Pointer to Ethernet device.
- * @param[in] cfg
- * Pointer to the template table configuration.
- * @param[in] group
- * Currently used group index (table group or jump destination).
- * @param[out] table_group
- * Pointer to output group index.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * 0 on success. Otherwise, returns negative error code, rte_errno is set
- * and error structure is filled.
- */
-static int
-flow_hw_translate_group(struct rte_eth_dev *dev,
- const struct mlx5_flow_template_table_cfg *cfg,
+static inline int
+__translate_group(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *flow_attr,
+ bool external,
uint32_t group,
uint32_t *table_group,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_sh_config *config = &priv->sh->config;
- const struct rte_flow_attr *flow_attr = &cfg->attr.flow_attr;
if (config->dv_esw_en &&
priv->fdb_def_rule &&
- cfg->external &&
+ external &&
flow_attr->transfer) {
if (group > MLX5_HW_MAX_TRANSFER_GROUP)
return rte_flow_error_set(error, EINVAL,
@@ -4903,7 +4885,7 @@ flow_hw_translate_group(struct rte_eth_dev *dev,
*table_group = group + 1;
} else if (config->dv_esw_en &&
(config->repr_matching || config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) &&
- cfg->external &&
+ external &&
flow_attr->egress) {
/*
* On E-Switch setups, default egress flow rules are inserted to allow
@@ -4927,6 +4909,39 @@ flow_hw_translate_group(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * Translates group index specified by the user in @p attr to internal
+ * group index.
+ *
+ * Translation is done by incrementing group index, so group n becomes n + 1.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] cfg
+ * Pointer to the template table configuration.
+ * @param[in] group
+ * Currently used group index (table group or jump destination).
+ * @param[out] table_group
+ * Pointer to output group index.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success. Otherwise, returns negative error code, rte_errno is set
+ * and error structure is filled.
+ */
+static int
+flow_hw_translate_group(struct rte_eth_dev *dev,
+ const struct mlx5_flow_template_table_cfg *cfg,
+ uint32_t group,
+ uint32_t *table_group,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_attr *flow_attr = &cfg->attr.flow_attr;
+
+ return __translate_group(dev, flow_attr, cfg->external, group, table_group, error);
+}
+
/**
* Create flow table.
*
@@ -7113,7 +7128,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
unsigned int act_num;
unsigned int i;
struct rte_flow_actions_template *at = NULL;
- uint16_t pos = UINT16_MAX;
+ uint16_t pos;
uint64_t action_flags = 0;
struct rte_flow_action tmp_action[MLX5_HW_MAX_ACTS];
struct rte_flow_action tmp_mask[MLX5_HW_MAX_ACTS];
@@ -8032,6 +8047,9 @@ flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
struct mlx5dr_table *tbl = NULL;
struct mlx5dr_action *jump;
uint32_t idx = 0;
+ MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
+ attr->transfer ? "FDB" : "NIC", attr->egress ? "egress" : "ingress",
+ attr->group, idx);
grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
if (!grp_data) {
@@ -8069,6 +8087,13 @@ flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
goto error;
grp_data->jump.root_action = jump;
}
+
+ grp_data->matchers = mlx5_list_create(matcher_name, sh, true,
+ flow_matcher_create_cb,
+ flow_matcher_match_cb,
+ flow_matcher_remove_cb,
+ flow_matcher_clone_cb,
+ flow_matcher_clone_free_cb);
grp_data->dev = dev;
grp_data->idx = idx;
grp_data->group_id = attr->group;
@@ -8112,6 +8137,7 @@ flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
if (grp_data->jump.root_action)
mlx5dr_action_destroy(grp_data->jump.root_action);
mlx5dr_table_destroy(grp_data->tbl);
+ mlx5_list_destroy(grp_data->matchers);
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
}
@@ -10387,6 +10413,10 @@ flow_hw_configure(struct rte_eth_dev *dev,
int ret = 0;
uint32_t action_flags;
+ if (mlx5dr_rule_get_handle_size() != MLX5_DR_RULE_SIZE) {
+ rte_errno = EINVAL;
+ goto err;
+ }
if (flow_hw_validate_attributes(port_attr, nb_queue, queue_attr, error))
return -rte_errno;
/*
@@ -10531,6 +10561,8 @@ flow_hw_configure(struct rte_eth_dev *dev,
rte_atomic_fetch_add_explicit(&host_priv->shared_refcnt, 1,
rte_memory_order_relaxed);
}
+ /* Set backward compatibale mode to support non template RTE FLOW API.*/
+ dr_ctx_attr.bwc = true;
dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
/* rte_errno has been updated by HWS layer. */
if (!dr_ctx)
@@ -11981,6 +12013,402 @@ flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
dev->data->port_id);
return flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);
}
+/**
+ * Initialization function for non template API which calls
+ * flow_hw_configure with default values.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+ /* Configure non queues cause 1 queue is configured by default for inner usage. */
+
+int
+flow_hw_init(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_port_attr port_attr = {0};
+ struct rte_flow_queue_attr queue_attr = {.size = MLX5_NT_DEFAULT_QUEUE_SIZE};
+ const struct rte_flow_queue_attr *attr_list = &queue_attr;
+
+ /**
+ * If user uses template and non template API:
+ * User will call flow_hw_configure and non template
+ * API will use the allocated actions.
+ * Init function will not call flow_hw_configure.
+ *
+ * If user uses only non template API's:
+ * Init function will call flow_hw_configure.
+ * It will not allocate memory for actions.
+ * When needed allocation, it will handle same as for SWS today,
+ * meaning using bulk allocations and resize as needed.
+ */
+ /* Configure hws with default values. */
+ DRV_LOG(DEBUG, "Apply default configuration, zero number of queues, inner control queue size is %u",
+ MLX5_NT_DEFAULT_QUEUE_SIZE);
+ return flow_hw_configure(dev, &port_attr, 0, &attr_list, error);
+}
+
+static int flow_hw_prepare(struct rte_eth_dev *dev,
+ const struct rte_flow_action actions[] __rte_unused,
+ enum mlx5_flow_type type,
+ struct rte_flow_hw **flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t idx = 0;
+
+ /*
+ * Notice pool idx size = (sizeof(struct rte_flow_hw)
+ * + sizeof(struct rte_flow_nt2hws)) for HWS mode.
+ */
+ *flow = mlx5_ipool_zmalloc(priv->flows[type], &idx);
+ if (!(*flow))
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate flow memory");
+ /* Allocating 2 structures in one pool slot, updating nt2hw pointer.*/
+ (*flow)->nt2hws = (struct rte_flow_nt2hws *)
+ ((uintptr_t)(*flow) + sizeof(struct rte_flow_hw));
+ (*flow)->idx = idx;
+ /*TODO: consider if other allocation is needed for actions translate. */
+ return 0;
+}
+
+static int flow_hw_translate_actions(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_hw *flow __rte_unused,
+ struct rte_flow_error *error __rte_unused)
+{
+ /* TODO implement */
+ return 0;
+}
+
+static int flow_hw_register_matcher(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ bool external,
+ struct rte_flow_hw *flow,
+ struct mlx5_flow_dv_matcher *matcher,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_error sub_error = {
+ .type = RTE_FLOW_ERROR_TYPE_NONE,
+ .cause = NULL,
+ .message = NULL,
+ };
+ struct rte_flow_attr flow_attr = *attr;
+ struct mlx5_flow_cb_ctx ctx = {
+ .dev = dev,
+ .error = &sub_error,
+ .data = &flow_attr,
+ };
+ struct mlx5_flow_cb_ctx matcher_ctx = {
+ .error = &sub_error,
+ .data = matcher,
+ };
+ struct mlx5_list_entry *group_entry;
+ struct mlx5_list_entry *matcher_entry;
+ struct mlx5_flow_dv_matcher *resource;
+ struct mlx5_list *matchers_list;
+ struct mlx5_flow_group *flow_group;
+ uint32_t group = 0;
+ int ret;
+
+
+ matcher->crc = rte_raw_cksum((const void *)matcher->mask.buf,
+ matcher->mask.size);
+ matcher->priority = mlx5_get_matcher_priority(dev, attr,
+ matcher->priority,
+ external);
+
+ ret = __translate_group(dev, attr, external, attr->group, &group, error);
+ if (ret)
+ return ret;
+
+ /* Register the flow group. */
+ group_entry = mlx5_hlist_register(priv->sh->groups, group, &ctx);
+ if (!group_entry)
+ goto error;
+ flow_group = container_of(group_entry, struct mlx5_flow_group, entry);
+
+ matchers_list = flow_group->matchers;
+ matcher->group = flow_group;
+ matcher_entry = mlx5_list_register(matchers_list, &matcher_ctx);
+ if (!matcher_entry)
+ goto error;
+ resource = container_of(matcher_entry, typeof(*resource), entry);
+ if (!resource)
+ goto error;
+ flow->nt2hws->matcher = resource;
+
+ /* If matcher was not found and reused in list, create matcher object. */
+ if (!resource->matcher_object) {
+ resource->matcher_object = (void *)mlx5dr_bwc_matcher_create
+ (flow_group->tbl, matcher->priority, items);
+ }
+ /* If matcher create failed */
+ if (!(resource->matcher_object))
+ goto error;
+ return 0;
+
+error:
+ if (error) {
+ if (sub_error.type != RTE_FLOW_ERROR_TYPE_NONE)
+ rte_memcpy(error, &sub_error, sizeof(sub_error));
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "fail to register matcher");
+ } else {
+ return -ENOMEM;
+ }
+}
+
+static int flow_hw_apply(struct rte_eth_dev *dev __rte_unused, /* TODO: remove if not used */
+ const struct rte_flow_item items[],
+ struct mlx5dr_rule_action rule_actions[],
+ struct rte_flow_hw *flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5dr_bwc_rule *rule = NULL;
+
+ rule = mlx5dr_bwc_rule_create((struct mlx5dr_bwc_matcher *)
+ flow->nt2hws->matcher->matcher_object,
+ items, rule_actions);
+ flow->nt2hws->nt_rule = rule;
+ if (!rule) {
+ if (error)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "fail to create rte flow");
+ else
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#ifdef HAVE_MLX5_HWS_SUPPORT
+/**
+ * Create a flow.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] type
+ * Flow type.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
+ * @param[out] flow
+ * Flow pointer
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno set.
+ */
+static int flow_hw_create_flow(struct rte_eth_dev *dev,
+ enum mlx5_flow_type type,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external,
+ struct rte_flow_hw **flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct mlx5_hw_actions hw_act;
+ struct mlx5_flow_dv_matcher matcher = {
+ .mask = {
+ .size = sizeof(matcher.mask.buf),
+ },
+ };
+ uint32_t tbl_type;
+
+ struct mlx5_flow_attr flow_attr = {
+ .port_id = dev->data->port_id,
+ .group = attr->group,
+ .priority = attr->priority,
+ .rss_level = 0,
+ /*
+ * TODO: currently only mlx5_flow_lacp_miss rule is relevant:
+ * action type=(enum rte_flow_action_type) MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS.
+ * I don't want to waist time going over all actions for this corner case.
+ * Needs to use another preparation code to update this action flags.
+ * if (action_type == (enum rte_flow_action_type)
+ * MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS)
+ * act_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
+ */
+ .act_flags = 0, /*TODO update*/
+ .tbl_type = 0,
+ };
+
+ memset(&hw_act, 0, sizeof(hw_act));
+ if (attr->transfer)
+ tbl_type = MLX5DR_TABLE_TYPE_FDB;
+ else if (attr->egress)
+ tbl_type = MLX5DR_TABLE_TYPE_NIC_TX;
+ else
+ tbl_type = MLX5DR_TABLE_TYPE_NIC_RX;
+ flow_attr.tbl_type = tbl_type;
+
+ /* Allocate needed memory. */
+ ret = flow_hw_prepare(dev, actions, type, flow, error);
+ if (ret)
+ goto error;
+
+ /* TODO TBD flow_hw_handle_tunnel_offload(). */
+
+ (*flow)->nt2hws->matcher = &matcher;
+ ret = __flow_dv_translate_items_hws(items, &flow_attr, &matcher.mask.buf,
+ MLX5_SET_MATCHER_HS_M, 0,
+ NULL, true, error);
+ if (ret)
+ goto error;
+
+ ret = flow_hw_register_matcher(dev, attr, items, external, *flow, &matcher, error);
+ if (ret)
+ goto error;
+
+ ret = flow_hw_translate_actions(dev, attr, actions, *flow, error);
+ if (ret)
+ goto error;
+
+ /*
+ * If the flow is external (from application) OR device is started,
+ * OR mreg discover, then apply immediately.
+ */
+ if (external || dev->data->dev_started ||
+ (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
+ attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) {
+ ret = flow_hw_apply(dev, items, hw_act.rule_acts, *flow, error);
+ if (ret)
+ goto error;
+ }
+ return 0;
+
+error:
+ if (error)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "fail to create rte flow");
+ else
+ return -EINVAL;
+}
+#endif
+
+static void
+flow_hw_destroy(struct rte_eth_dev *dev, struct rte_flow_hw *flow)
+{
+ int ret;
+
+ if (!flow || !flow->nt2hws)
+ return;
+
+ if (flow->nt2hws->nt_rule) {
+ ret = mlx5dr_bwc_rule_destroy(flow->nt2hws->nt_rule);
+ if (likely(!ret))
+ DRV_LOG(ERR, "bwc rule destroy failed");
+ }
+
+ /* TODO: notice this function does not handle shared/static actions. */
+ hw_cmpl_flow_update_or_destroy(dev, NULL, 0, NULL);
+
+ /**
+ * TODO: TBD - Release tunnel related memory allocations(mlx5_flow_tunnel_free)
+ * – needed only if supporting tunnel offloads, notice update RX queue flags in SWS.
+ */
+
+ /**
+ * Notice matcher destroy will take place when matcher's list is destroyed
+ * , same as for DV.
+ */
+}
+
+#ifdef HAVE_MLX5_HWS_SUPPORT
+/**
+ * Destroy a flow.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] type
+ * Flow type.
+ * @param[in] flow_idx
+ * Index of flow to destroy.
+ */
+static void flow_hw_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+ uint32_t flow_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ /* Get flow via idx */
+ struct rte_flow_hw *flow = mlx5_ipool_get(priv->flows[type], flow_idx);
+
+ DRV_LOG(DEBUG, "Non template flow index %u destroy", flow_idx);
+ if (!flow)
+ return;
+ flow_hw_destroy(dev, flow);
+ /* Release flow memory by idx */
+ mlx5_ipool_free(priv->flows[type], flow_idx);
+}
+#endif
+
+/**
+ * Create a flow.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] type
+ * Flow type.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow index on success, 0 otherwise and rte_errno is set.
+ */
+static uint32_t flow_hw_list_create(struct rte_eth_dev *dev,
+ enum mlx5_flow_type type,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ bool external,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_flow_hw *flow = NULL;
+
+ /*TODO: Handle split/expand to num_flows. */
+
+ DRV_LOG(DEBUG, "Non template flow creation");
+ /* Create single flow. */
+ ret = flow_hw_create_flow(dev, type, attr, items, actions, external, &flow, error);
+ if (ret)
+ goto free;
+ if (flow)
+ return flow->idx;
+
+free:
+ if (flow)
+ flow_hw_list_destroy(dev, type, flow->idx);
+ return 0;
+}
static void
mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
@@ -12971,6 +13399,9 @@ flow_hw_update_resized(struct rte_eth_dev *dev, uint32_t queue,
}
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
+ .list_create = flow_hw_list_create,
+ .list_destroy = flow_hw_list_destroy,
+ .validate = flow_dv_validate,
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
.pattern_validate = flow_hw_pattern_validate,
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index fe9c818abc..6324ee3e3c 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -2183,6 +2183,8 @@ flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains,
}
const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
+ .list_create = flow_legacy_list_create,
+ .list_destroy = flow_legacy_list_destroy,
.validate = flow_verbs_validate,
.prepare = flow_verbs_prepare,
.translate = flow_verbs_translate,
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index c241a1dbd7..6fa7c01cd0 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1158,6 +1158,12 @@ mlx5_dev_start(struct rte_eth_dev *dev)
DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
#ifdef HAVE_MLX5_HWS_SUPPORT
if (priv->sh->config.dv_flow_en == 2) {
+ /*If previous configuration does not exist. */
+ if (!(priv->dr_ctx)) {
+ ret = flow_hw_init(dev, NULL);
+ if (ret)
+ return ret;
+ }
/* If there is no E-Switch, then there are no start/stop order limitations. */
if (!priv->sh->config.dv_esw_en)
goto continue_dev_start;
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index b731bdff06..98022ed3c7 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -35,7 +35,7 @@ static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
/* rte flow indexed pool configuration. */
-static struct mlx5_indexed_pool_config icfg[] = {
+static const struct mlx5_indexed_pool_config default_icfg[] = {
{
.size = sizeof(struct rte_flow),
.trunk_size = 64,
@@ -352,7 +352,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
int own_domain_id = 0;
uint16_t port_id;
int i;
+ struct mlx5_indexed_pool_config icfg[RTE_DIM(default_icfg)];
+ memcpy(icfg, default_icfg, sizeof(icfg));
/* Build device name. */
strlcpy(name, dpdk_dev->name, sizeof(name));
/* check if the device is already spawned */
@@ -538,6 +540,10 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
icfg[i].release_mem_en = !!sh->config.reclaim_mode;
if (sh->config.reclaim_mode)
icfg[i].per_core_cache = 0;
+#ifdef HAVE_MLX5_HWS_SUPPORT
+ if (priv->sh->config.dv_flow_en == 2)
+ icfg[i].size = sizeof(struct rte_flow_hw) + sizeof(struct rte_flow_nt2hws);
+#endif
priv->flows[i] = mlx5_ipool_create(&icfg[i]);
if (!priv->flows[i])
goto error;
--
2.21.0
next prev parent reply other threads:[~2024-06-06 10:23 UTC|newest]
Thread overview: 69+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-06-02 10:28 [PATCH " Maayan Kashani
2024-06-03 8:04 ` [PATCH v2 05/34] " Maayan Kashani
2024-06-03 8:04 ` [PATCH v2 06/34] net/mlx5: add dummy last action Maayan Kashani
2024-06-03 8:04 ` [PATCH v2 07/34] net/mlx5: add basic actions support for non-template API Maayan Kashani
2024-06-03 8:04 ` [PATCH v2 08/34] net/mlx5: add default miss action support in nt2hws mode Maayan Kashani
2024-06-03 8:04 ` [PATCH v2 09/34] net/mlx5: add ASO actions support to non-template mode Maayan Kashani
2024-06-03 8:04 ` [PATCH v2 10/34] net/mlx5: fix segfault on counter pool destroy Maayan Kashani
2024-06-03 8:05 ` [PATCH v2 11/34] net/mlx5: abstract action handling and enable reconfigure Maayan Kashani
2024-06-03 8:05 ` [PATCH v2 12/34] common/mlx5: read connection tracking attributes Maayan Kashani
2024-06-03 8:05 ` [PATCH v2 13/34] net/mlx5: support bulk actions in non template mode Maayan Kashani
2024-06-03 8:05 ` [PATCH v2 14/34] net/mlx5: use non const max number for ASO actions Maayan Kashani
2024-06-03 8:05 ` [PATCH v2 15/34] net/mlx5: initial design changes Maayan Kashani
2024-06-03 10:48 ` [PATCH v3 01/11] net/mlx5: initial design of non template to hws Maayan Kashani
2024-06-03 10:48 ` [PATCH v3 02/11] net/mlx5: add dummy last action Maayan Kashani
2024-06-03 10:48 ` [PATCH v3 03/11] net/mlx5: add basic actions support for non-template API Maayan Kashani
2024-06-03 10:48 ` [PATCH v3 04/11] net/mlx5: add default miss action support in nt2hws mode Maayan Kashani
2024-06-03 10:48 ` [PATCH v3 05/11] net/mlx5: add ASO actions support to non-template mode Maayan Kashani
2024-06-03 10:48 ` [PATCH v3 06/11] net/mlx5: fix segfault on counter pool destroy Maayan Kashani
2024-06-03 10:48 ` [PATCH v3 07/11] net/mlx5: abstract action handling and enable reconfigure Maayan Kashani
2024-06-03 10:48 ` [PATCH v3 08/11] common/mlx5: read connection tracking attributes Maayan Kashani
2024-06-03 10:48 ` [PATCH v3 09/11] net/mlx5: support bulk actions in non template mode Maayan Kashani
2024-06-03 10:48 ` [PATCH v3 10/11] net/mlx5: use non const max number for ASO actions Maayan Kashani
2024-06-03 10:48 ` [PATCH v3 11/11] net/mlx5: initial design changes Maayan Kashani
2024-06-06 10:23 ` [PATCH v4 00/11] non-template pmd basic func Maayan Kashani
2024-06-06 10:23 ` Maayan Kashani [this message]
2024-06-06 10:23 ` [PATCH v4 02/11] net/mlx5: add dummy last action Maayan Kashani
2024-06-06 10:23 ` [PATCH v4 03/11] net/mlx5: add basic actions support for non-template API Maayan Kashani
2024-06-06 10:23 ` [PATCH v4 04/11] net/mlx5: add default miss action support in nt2hws mode Maayan Kashani
2024-06-06 10:23 ` [PATCH v4 05/11] net/mlx5: add ASO actions support to non-template mode Maayan Kashani
2024-06-06 10:23 ` [PATCH v4 06/11] net/mlx5: fix segfault on counter pool destroy Maayan Kashani
2024-06-06 10:23 ` [PATCH v4 07/11] net/mlx5: abstract action handling and enable reconfigure Maayan Kashani
2024-06-06 10:23 ` [PATCH v4 08/11] common/mlx5: read connection tracking attributes Maayan Kashani
2024-06-06 10:23 ` [PATCH v4 09/11] net/mlx5: support bulk actions in non template mode Maayan Kashani
2024-06-06 10:23 ` [PATCH v4 10/11] net/mlx5: use non const max number for ASO actions Maayan Kashani
2024-06-06 10:23 ` [PATCH v4 11/11] net/mlx5: initial design changes Maayan Kashani
2024-06-06 12:32 ` [PATCH v5 01/11] net/mlx5: initial design of non template to hws Maayan Kashani
2024-06-06 12:32 ` [PATCH v5 02/11] net/mlx5: add dummy last action Maayan Kashani
2024-06-06 12:32 ` [PATCH v5 03/11] net/mlx5: add basic actions support for non-template API Maayan Kashani
2024-06-06 12:32 ` [PATCH v5 04/11] net/mlx5: add default miss action support in nt2hws mode Maayan Kashani
2024-06-06 12:32 ` [PATCH v5 05/11] net/mlx5: add ASO actions support to non-template mode Maayan Kashani
2024-06-06 12:32 ` [PATCH v5 06/11] net/mlx5: fix segfault on counter pool destroy Maayan Kashani
2024-06-06 12:32 ` [PATCH v5 07/11] net/mlx5: abstract action handling and enable reconfigure Maayan Kashani
2024-06-06 12:32 ` [PATCH v5 08/11] common/mlx5: read connection tracking attributes Maayan Kashani
2024-06-06 12:32 ` [PATCH v5 09/11] net/mlx5: support bulk actions in non template mode Maayan Kashani
2024-06-06 12:32 ` [PATCH v5 10/11] net/mlx5: use non const max number for ASO actions Maayan Kashani
2024-06-06 12:32 ` [PATCH v5 11/11] net/mlx5: initial design changes Maayan Kashani
2024-06-09 8:55 ` [PATCH v6 01/11] net/mlx5: initial design of non template to hws Maayan Kashani
2024-06-09 8:55 ` [PATCH v6 02/11] net/mlx5: add dummy last action Maayan Kashani
2024-06-09 8:55 ` [PATCH v6 03/11] net/mlx5: add basic actions support for non-template API Maayan Kashani
2024-06-09 8:55 ` [PATCH v6 04/11] net/mlx5: add default miss action support in nt2hws mode Maayan Kashani
2024-06-09 8:55 ` [PATCH v6 05/11] net/mlx5: add ASO actions support to non-template mode Maayan Kashani
2024-06-09 8:55 ` [PATCH v6 06/11] net/mlx5: fix segfault on counter pool destroy Maayan Kashani
2024-06-09 8:55 ` [PATCH v6 07/11] net/mlx5: abstract action handling and enable reconfigure Maayan Kashani
2024-06-09 8:55 ` [PATCH v6 08/11] common/mlx5: read connection tracking attributes Maayan Kashani
2024-06-09 8:55 ` [PATCH v6 09/11] net/mlx5: support bulk actions in non template mode Maayan Kashani
2024-06-09 8:55 ` [PATCH v6 10/11] net/mlx5: use non const max number for ASO actions Maayan Kashani
2024-06-09 8:56 ` [PATCH v6 11/11] net/mlx5: initial design changes Maayan Kashani
2024-06-09 11:00 ` [PATCH v7 01/11] net/mlx5: initial design of non template to hws Maayan Kashani
2024-06-09 11:00 ` [PATCH v7 02/11] net/mlx5: add dummy last action Maayan Kashani
2024-06-09 11:00 ` [PATCH v7 03/11] net/mlx5: add basic actions support for non-template API Maayan Kashani
2024-06-09 11:01 ` [PATCH v7 04/11] net/mlx5: add default miss action support in nt2hws mode Maayan Kashani
2024-06-09 11:01 ` [PATCH v7 05/11] net/mlx5: add ASO actions support to non-template mode Maayan Kashani
2024-06-09 11:01 ` [PATCH v7 06/11] net/mlx5: fix segfault on counter pool destroy Maayan Kashani
2024-06-09 11:01 ` [PATCH v7 07/11] net/mlx5: abstract action handling and enable reconfigure Maayan Kashani
2024-06-09 11:01 ` [PATCH v7 08/11] common/mlx5: read connection tracking attributes Maayan Kashani
2024-06-09 11:01 ` [PATCH v7 09/11] net/mlx5: support bulk actions in non template mode Maayan Kashani
2024-06-09 11:01 ` [PATCH v7 10/11] net/mlx5: use non const max number for ASO actions Maayan Kashani
2024-06-09 11:01 ` [PATCH v7 11/11] net/mlx5: initial design changes Maayan Kashani
2024-06-10 8:34 ` [PATCH v7 01/11] net/mlx5: initial design of non template to hws Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240606102317.172553-2-mkashani@nvidia.com \
--to=mkashani@nvidia.com \
--cc=dev@dpdk.org \
--cc=dsosnowski@nvidia.com \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=suanmingm@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).