From: Suanming Mou <suanmingm@nvidia.com>
To: <viacheslavo@nvidia.com>, <matan@nvidia.com>
Cc: <rasland@nvidia.com>, <orika@nvidia.com>, <dev@dpdk.org>
Subject: [PATCH 10/13] net/mlx5: add queue and RSS action
Date: Thu, 10 Feb 2022 18:29:23 +0200 [thread overview]
Message-ID: <20220210162926.20436-11-suanmingm@nvidia.com> (raw)
In-Reply-To: <20220210162926.20436-1-suanmingm@nvidia.com>
This commit adds the queue and RSS action. Similar to the jump action,
dynamic ones will be added to the action construct list.
Due to the queue and RSS action in template should not be destroyed
during port restart, the actions are created with standalone indirect
table as indirect action does. When port stops, detaches the indirect
table from action, when port starts, attaches the indirect table back
to the action.
One more change is made to accelerate the action creation. Currently
the mlx5_hrxq_get() function returns the object index instead of object
pointer. This introduced an extra converting the index to the object by
calling mlx5_ipool_get() in most of the case. And that extra converting
hurts multi-thread performance since mlx5_ipool_get() uses the global
lock inside. As the hash Rx queue object itself also contains the index,
returns the object directly will achieve better performance without the
global lock.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 18 ++--
drivers/net/mlx5/mlx5.h | 4 +
drivers/net/mlx5/mlx5_devx.c | 10 ++
drivers/net/mlx5/mlx5_flow.c | 38 +++-----
drivers/net/mlx5/mlx5_flow.h | 7 ++
drivers/net/mlx5/mlx5_flow_dv.c | 150 ++++++++++++++---------------
drivers/net/mlx5/mlx5_flow_hw.c | 101 +++++++++++++++++++
drivers/net/mlx5/mlx5_flow_verbs.c | 7 +-
drivers/net/mlx5/mlx5_rx.h | 9 +-
drivers/net/mlx5/mlx5_rxq.c | 78 +++++++++------
10 files changed, 271 insertions(+), 151 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 52e52a4ad7..8f0b15aad0 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1714,6 +1714,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev);
if (!priv->drop_queue.hrxq)
goto error;
+ priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
+ mlx5_hrxq_create_cb,
+ mlx5_hrxq_match_cb,
+ mlx5_hrxq_remove_cb,
+ mlx5_hrxq_clone_cb,
+ mlx5_hrxq_clone_free_cb);
+ if (!priv->hrxqs)
+ goto error;
+ rte_rwlock_init(&priv->ind_tbls_lock);
if (priv->config.dv_flow_en == 2)
return eth_dev;
/* Port representor shares the same max priority with pf port. */
@@ -1744,15 +1753,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = ENOTSUP;
goto error;
}
- priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
- mlx5_hrxq_create_cb,
- mlx5_hrxq_match_cb,
- mlx5_hrxq_remove_cb,
- mlx5_hrxq_clone_cb,
- mlx5_hrxq_clone_free_cb);
- if (!priv->hrxqs)
- goto error;
- rte_rwlock_init(&priv->ind_tbls_lock);
/* Query availability of metadata reg_c's. */
if (!priv->sh->metadata_regc_check_flag) {
err = mlx5_flow_discover_mreg_c(eth_dev);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 0bc9897101..6fb82bf1f3 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1286,6 +1286,7 @@ struct mlx5_flow_rss_desc {
uint64_t hash_fields; /* Verbs Hash fields. */
uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
uint32_t key_len; /**< RSS hash key len. */
+ uint32_t hws_flags; /**< HW steering action. */
uint32_t tunnel; /**< Queue in tunnel. */
uint32_t shared_rss; /**< Shared RSS index. */
struct mlx5_ind_table_obj *ind_tbl;
@@ -1347,6 +1348,7 @@ struct mlx5_hrxq {
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
void *action; /* DV QP action pointer. */
#endif
+ uint32_t hws_flags; /* Hw steering flags. */
uint64_t hash_fields; /* Verbs Hash fields. */
uint32_t rss_key_len; /* Hash key length in bytes. */
uint32_t idx; /* Hash Rx queue index. */
@@ -1477,6 +1479,8 @@ struct mlx5_priv {
LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */
/* Indirection tables. */
LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;
+ /* Standalone indirect tables. */
+ LIST_HEAD(stdl_ind_tables, mlx5_ind_table_obj) standalone_ind_tbls;
/* Pointer to next element. */
rte_rwlock_t ind_tbls_lock;
uint32_t refcnt; /**< Reference counter. */
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 91243f684f..af131bcd1b 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -807,6 +807,14 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
goto error;
}
#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
+ if (hrxq->hws_flags) {
+ hrxq->action = mlx5dr_action_create_dest_tir
+ (priv->dr_ctx,
+ (struct mlx5dr_devx_obj *)hrxq->tir, hrxq->hws_flags);
+ if (!hrxq->action)
+ goto error;
+ return 0;
+ }
if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir,
&hrxq->action)) {
rte_errno = errno;
@@ -1042,6 +1050,8 @@ mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
DRV_LOG(ERR, "Cannot create drop RX queue");
return ret;
}
+ if (priv->config.dv_flow_en == 2)
+ return 0;
/* hrxq->ind_table queues are NULL, drop RX queue ID will be used */
ret = mlx5_devx_ind_table_new(dev, 0, hrxq->ind_table);
if (ret != 0) {
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 9ac96ac979..9cad84ebc6 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -9302,14 +9302,10 @@ int
mlx5_action_handle_attach(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_indexed_pool *ipool =
- priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];
- struct mlx5_shared_action_rss *shared_rss, *shared_rss_last;
int ret = 0;
- uint32_t idx;
+ struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last;
- ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
- struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+ LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
const char *message;
uint32_t queue_idx;
@@ -9325,9 +9321,7 @@ mlx5_action_handle_attach(struct rte_eth_dev *dev)
}
if (ret != 0)
return ret;
- ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
- struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
-
+ LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
ret = mlx5_ind_table_obj_attach(dev, ind_tbl);
if (ret != 0) {
DRV_LOG(ERR, "Port %u could not attach "
@@ -9336,13 +9330,12 @@ mlx5_action_handle_attach(struct rte_eth_dev *dev)
goto error;
}
}
+
return 0;
error:
- shared_rss_last = shared_rss;
- ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
- struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
-
- if (shared_rss == shared_rss_last)
+ ind_tbl_last = ind_tbl;
+ LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
+ if (ind_tbl == ind_tbl_last)
break;
if (mlx5_ind_table_obj_detach(dev, ind_tbl) != 0)
DRV_LOG(CRIT, "Port %u could not detach "
@@ -9365,15 +9358,10 @@ int
mlx5_action_handle_detach(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_indexed_pool *ipool =
- priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS];
- struct mlx5_shared_action_rss *shared_rss, *shared_rss_last;
int ret = 0;
- uint32_t idx;
-
- ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
- struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl, *ind_tbl_last;
+ LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
ret = mlx5_ind_table_obj_detach(dev, ind_tbl);
if (ret != 0) {
DRV_LOG(ERR, "Port %u could not detach "
@@ -9384,11 +9372,9 @@ mlx5_action_handle_detach(struct rte_eth_dev *dev)
}
return 0;
error:
- shared_rss_last = shared_rss;
- ILIST_FOREACH(ipool, priv->rss_shared_actions, idx, shared_rss, next) {
- struct mlx5_ind_table_obj *ind_tbl = shared_rss->ind_tbl;
-
- if (shared_rss == shared_rss_last)
+ ind_tbl_last = ind_tbl;
+ LIST_FOREACH(ind_tbl, &priv->standalone_ind_tbls, next) {
+ if (ind_tbl == ind_tbl_last)
break;
if (mlx5_ind_table_obj_attach(dev, ind_tbl) != 0)
DRV_LOG(CRIT, "Port %u could not attach "
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index a1ab9173d9..33094c8c07 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1022,6 +1022,7 @@ struct rte_flow_hw {
union {
/* Jump action. */
struct mlx5_hw_jump_action *jump;
+ struct mlx5_hrxq *hrxq; /* TIR action. */
};
struct rte_flow_template_table *table; /* The table flow allcated from. */
struct mlx5dr_rule rule; /* HWS layer data struct. */
@@ -1077,6 +1078,7 @@ struct mlx5_hw_actions {
/* Dynamic action list. */
LIST_HEAD(act_list, mlx5_action_construct_data) act_list;
struct mlx5_hw_jump_action *jump; /* Jump action. */
+ struct mlx5_hrxq *tir; /* TIR action. */
uint32_t acts_num:4; /* Total action number. */
/* Translated DR action array from action template. */
struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
@@ -1907,6 +1909,11 @@ int flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,
int
flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
struct rte_flow_error *error);
+void flow_dv_hashfields_set(uint64_t item_flags,
+ struct mlx5_flow_rss_desc *rss_desc,
+ uint64_t *hash_fields);
+void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
+ uint64_t *hash_field);
struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx);
void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index ef9c66eddf..c3d9d30dba 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -10966,78 +10966,83 @@ flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
/**
* Set the hash fields according to the @p flow information.
*
- * @param[in] dev_flow
- * Pointer to the mlx5_flow.
+ * @param[in] item_flags
+ * The match pattern item flags.
* @param[in] rss_desc
* Pointer to the mlx5_flow_rss_desc.
+ * @param[out] hash_fields
+ * Pointer to the RSS hash fields.
*/
-static void
-flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
- struct mlx5_flow_rss_desc *rss_desc)
+void
+flow_dv_hashfields_set(uint64_t item_flags,
+ struct mlx5_flow_rss_desc *rss_desc,
+ uint64_t *hash_fields)
{
- uint64_t items = dev_flow->handle->layers;
+ uint64_t items = item_flags;
+ uint64_t fields = 0;
int rss_inner = 0;
uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
- dev_flow->hash_fields = 0;
+ *hash_fields = 0;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
if (rss_desc->level >= 2)
rss_inner = 1;
#endif
if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4)) ||
+ !items) {
if (rss_types & MLX5_IPV4_LAYER_TYPES) {
if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
+ fields |= IBV_RX_HASH_SRC_IPV4;
else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
+ fields |= IBV_RX_HASH_DST_IPV4;
else
- dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
+ fields |= MLX5_IPV4_IBV_RX_HASH;
}
} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6)) ||
+ !items) {
if (rss_types & MLX5_IPV6_LAYER_TYPES) {
if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
+ fields |= IBV_RX_HASH_SRC_IPV6;
else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
- dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
+ fields |= IBV_RX_HASH_DST_IPV6;
else
- dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
+ fields |= MLX5_IPV6_IBV_RX_HASH;
}
}
- if (dev_flow->hash_fields == 0)
+ if (fields == 0)
/*
* There is no match between the RSS types and the
* L3 protocol (IPv4/IPv6) defined in the flow rule.
*/
return;
if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP)) ||
+ !items) {
if (rss_types & RTE_ETH_RSS_UDP) {
if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_SRC_PORT_UDP;
+ fields |= IBV_RX_HASH_SRC_PORT_UDP;
else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_DST_PORT_UDP;
+ fields |= IBV_RX_HASH_DST_PORT_UDP;
else
- dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
+ fields |= MLX5_UDP_IBV_RX_HASH;
}
} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
- (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP)) ||
+ !items) {
if (rss_types & RTE_ETH_RSS_TCP) {
if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_SRC_PORT_TCP;
+ fields |= IBV_RX_HASH_SRC_PORT_TCP;
else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
- dev_flow->hash_fields |=
- IBV_RX_HASH_DST_PORT_TCP;
+ fields |= IBV_RX_HASH_DST_PORT_TCP;
else
- dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
+ fields |= MLX5_TCP_IBV_RX_HASH;
}
}
if (rss_inner)
- dev_flow->hash_fields |= IBV_RX_HASH_INNER;
+ fields |= IBV_RX_HASH_INNER;
+ *hash_fields = fields;
}
/**
@@ -11061,7 +11066,6 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
struct mlx5_flow_rss_desc *rss_desc,
uint32_t *hrxq_idx)
{
- struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_handle *dh = dev_flow->handle;
struct mlx5_hrxq *hrxq;
@@ -11072,11 +11076,8 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
rss_desc->shared_rss = 0;
if (rss_desc->hash_fields == 0)
rss_desc->queue_num = 1;
- *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
- if (!*hrxq_idx)
- return NULL;
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
- *hrxq_idx);
+ hrxq = mlx5_hrxq_get(dev, rss_desc);
+ *hrxq_idx = hrxq ? hrxq->idx : 0;
return hrxq;
}
@@ -11622,7 +11623,9 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,
* rss->level and rss.types should be set in advance
* when expanding items for RSS.
*/
- flow_dv_hashfields_set(dev_flow, rss_desc);
+ flow_dv_hashfields_set(dev_flow->handle->layers,
+ rss_desc,
+ &dev_flow->hash_fields);
hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
rss_desc, &hrxq_idx);
if (!hrxq)
@@ -13647,7 +13650,9 @@ flow_dv_translate(struct rte_eth_dev *dev,
*/
handle->layers |= item_flags;
if (action_flags & MLX5_FLOW_ACTION_RSS)
- flow_dv_hashfields_set(dev_flow, rss_desc);
+ flow_dv_hashfields_set(dev_flow->handle->layers,
+ rss_desc,
+ &dev_flow->hash_fields);
/* If has RSS action in the sample action, the Sample/Mirror resource
* should be registered after the hash filed be update.
*/
@@ -14596,20 +14601,18 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
* MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
* same slot in mlx5_rss_hash_fields.
*
- * @param[in] rss
- * Pointer to the shared action RSS conf.
+ * @param[in] rss_types
+ * RSS type.
* @param[in, out] hash_field
* hash_field variable needed to be adjusted.
*
* @return
* void
*/
-static void
-__flow_dv_action_rss_l34_hash_adjust(struct mlx5_shared_action_rss *rss,
- uint64_t *hash_field)
+void
+flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
+ uint64_t *hash_field)
{
- uint64_t rss_types = rss->origin.types;
-
switch (*hash_field & ~IBV_RX_HASH_INNER) {
case MLX5_RSS_HASH_IPV4:
if (rss_types & MLX5_IPV4_LAYER_TYPES) {
@@ -14692,12 +14695,15 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
size_t i;
int err;
- if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl,
- !!dev->data->dev_started)) {
+ shared_rss->ind_tbl = mlx5_ind_table_obj_new
+ (dev, shared_rss->origin.queue,
+ shared_rss->origin.queue_num,
+ true,
+ !!dev->data->dev_started);
+ if (!shared_rss->ind_tbl)
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot setup indirection table");
- }
memcpy(rss_desc.key, shared_rss->origin.key, MLX5_RSS_HASH_KEY_LEN);
rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
rss_desc.const_q = shared_rss->origin.queue;
@@ -14706,19 +14712,20 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
rss_desc.shared_rss = action_idx;
rss_desc.ind_tbl = shared_rss->ind_tbl;
for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
- uint32_t hrxq_idx;
+ struct mlx5_hrxq *hrxq;
uint64_t hash_fields = mlx5_rss_hash_fields[i];
int tunnel = 0;
- __flow_dv_action_rss_l34_hash_adjust(shared_rss, &hash_fields);
+ flow_dv_action_rss_l34_hash_adjust(shared_rss->origin.types,
+ &hash_fields);
if (shared_rss->origin.level > 1) {
hash_fields |= IBV_RX_HASH_INNER;
tunnel = 1;
}
rss_desc.tunnel = tunnel;
rss_desc.hash_fields = hash_fields;
- hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
- if (!hrxq_idx) {
+ hrxq = mlx5_hrxq_get(dev, &rss_desc);
+ if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -14726,14 +14733,14 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
goto error_hrxq_new;
}
err = __flow_dv_action_rss_hrxq_set
- (shared_rss, hash_fields, hrxq_idx);
+ (shared_rss, hash_fields, hrxq->idx);
MLX5_ASSERT(!err);
}
return 0;
error_hrxq_new:
err = rte_errno;
__flow_dv_action_rss_hrxqs_release(dev, shared_rss);
- if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true))
+ if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
shared_rss->ind_tbl = NULL;
rte_errno = err;
return -rte_errno;
@@ -14764,18 +14771,14 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_shared_action_rss *shared_rss = NULL;
- void *queue = NULL;
struct rte_flow_action_rss *origin;
const uint8_t *rss_key;
- uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
uint32_t idx;
RTE_SET_USED(conf);
- queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
- 0, SOCKET_ID_ANY);
shared_rss = mlx5_ipool_zmalloc
(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
- if (!shared_rss || !queue) {
+ if (!shared_rss) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot allocate resource memory");
@@ -14787,18 +14790,6 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,
"rss action number out of range");
goto error_rss_init;
}
- shared_rss->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
- sizeof(*shared_rss->ind_tbl),
- 0, SOCKET_ID_ANY);
- if (!shared_rss->ind_tbl) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot allocate resource memory");
- goto error_rss_init;
- }
- memcpy(queue, rss->queue, queue_size);
- shared_rss->ind_tbl->queues = queue;
- shared_rss->ind_tbl->queues_n = rss->queue_num;
origin = &shared_rss->origin;
origin->func = rss->func;
origin->level = rss->level;
@@ -14809,10 +14800,12 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,
memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
origin->key = &shared_rss->key[0];
origin->key_len = MLX5_RSS_HASH_KEY_LEN;
- origin->queue = queue;
+ origin->queue = rss->queue;
origin->queue_num = rss->queue_num;
if (__flow_dv_action_rss_setup(dev, idx, shared_rss, error))
goto error_rss_init;
+ /* Update queue with indirect table queue memoyr. */
+ origin->queue = shared_rss->ind_tbl->queues;
rte_spinlock_init(&shared_rss->action_rss_sl);
__atomic_add_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
rte_spinlock_lock(&priv->shared_act_sl);
@@ -14823,12 +14816,11 @@ __flow_dv_action_rss_create(struct rte_eth_dev *dev,
error_rss_init:
if (shared_rss) {
if (shared_rss->ind_tbl)
- mlx5_free(shared_rss->ind_tbl);
+ mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,
+ !!dev->data->dev_started);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
idx);
}
- if (queue)
- mlx5_free(queue);
return 0;
}
@@ -14856,7 +14848,6 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
uint32_t old_refcnt = 1;
int remaining;
- uint16_t *queue = NULL;
if (!shared_rss)
return rte_flow_error_set(error, EINVAL,
@@ -14875,8 +14866,7 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"shared rss hrxq has references");
- queue = shared_rss->ind_tbl->queues;
- remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true,
+ remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl,
!!dev->data->dev_started);
if (remaining)
return rte_flow_error_set(error, EBUSY,
@@ -14884,7 +14874,6 @@ __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
NULL,
"shared rss indirection table has"
" references");
- mlx5_free(queue);
rte_spinlock_lock(&priv->shared_act_sl);
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
&priv->rss_shared_actions, idx, shared_rss, next);
@@ -16878,11 +16867,12 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
if (!rss_desc[i])
continue;
- hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
- if (!hrxq_idx[i]) {
+ hrxq = mlx5_hrxq_get(dev, rss_desc[i]);
+ if (!hrxq) {
rte_spinlock_unlock(&mtr_policy->sl);
return NULL;
}
+ hrxq_idx[i] = hrxq->idx;
}
sub_policy_num = (mtr_policy->sub_policy_num >>
(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index a825766245..e59d812072 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -7,6 +7,7 @@
#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5_flow.h"
+#include "mlx5_rx.h"
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
@@ -89,6 +90,56 @@ flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)
mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
}
+/**
+ * Register queue/RSS action.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] hws_flags
+ * DR action flags.
+ * @param[in] action
+ * rte flow action.
+ *
+ * @return
+ * Table on success, NULL otherwise and rte_errno is set.
+ */
+static inline struct mlx5_hrxq*
+flow_hw_tir_action_register(struct rte_eth_dev *dev,
+ uint32_t hws_flags,
+ const struct rte_flow_action *action)
+{
+ struct mlx5_flow_rss_desc rss_desc = {
+ .hws_flags = hws_flags,
+ };
+ struct mlx5_hrxq *hrxq;
+
+ if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ const struct rte_flow_action_queue *queue = action->conf;
+
+ rss_desc.const_q = &queue->index;
+ rss_desc.queue_num = 1;
+ } else {
+ const struct rte_flow_action_rss *rss = action->conf;
+
+ rss_desc.queue_num = rss->queue_num;
+ rss_desc.const_q = rss->queue;
+ memcpy(rss_desc.key,
+ !rss->key ? rss_hash_default_key : rss->key,
+ MLX5_RSS_HASH_KEY_LEN);
+ rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
+ rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
+ flow_dv_hashfields_set(0, &rss_desc, &rss_desc.hash_fields);
+ flow_dv_action_rss_l34_hash_adjust(rss->types,
+ &rss_desc.hash_fields);
+ if (rss->level > 1) {
+ rss_desc.hash_fields |= IBV_RX_HASH_INNER;
+ rss_desc.tunnel = 1;
+ }
+ }
+ hrxq = mlx5_hrxq_get(dev, &rss_desc);
+ return hrxq;
+}
+
/**
* Destroy DR actions created by action template.
*
@@ -260,6 +311,40 @@ flow_hw_actions_translate(struct rte_eth_dev *dev,
}
i++;
break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ if (masks->conf) {
+ acts->tir = flow_hw_tir_action_register
+ (dev,
+ mlx5_hw_act_flag[!!attr->group][type],
+ actions);
+ if (!acts->tir)
+ goto err;
+ acts->rule_acts[i].action =
+ acts->tir->action;
+ } else if (__flow_hw_act_data_general_append
+ (priv, acts, actions->type,
+ actions - action_start, i)) {
+ goto err;
+ }
+ i++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ if (masks->conf) {
+ acts->tir = flow_hw_tir_action_register
+ (dev,
+ mlx5_hw_act_flag[!!attr->group][type],
+ actions);
+ if (!acts->tir)
+ goto err;
+ acts->rule_acts[i].action =
+ acts->tir->action;
+ } else if (__flow_hw_act_data_general_append
+ (priv, acts, actions->type,
+ actions - action_start, i)) {
+ goto err;
+ }
+ i++;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
break;
@@ -313,6 +398,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
struct rte_flow_attr attr = {
.ingress = 1,
};
+ uint32_t ft_flag;
memcpy(rule_acts, hw_acts->rule_acts,
sizeof(*rule_acts) * hw_acts->acts_num);
@@ -320,6 +406,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
if (LIST_EMPTY(&hw_acts->act_list))
return 0;
attr.group = table->grp->group_id;
+ ft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];
if (table->type == MLX5DR_TABLE_TYPE_FDB) {
attr.transfer = 1;
attr.ingress = 1;
@@ -332,6 +419,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
LIST_FOREACH(act_data, &hw_acts->act_list, next) {
uint32_t jump_group;
struct mlx5_hw_jump_action *jump;
+ struct mlx5_hrxq *hrxq;
action = &actions[act_data->action_src];
MLX5_ASSERT(action->type == RTE_FLOW_ACTION_TYPE_INDIRECT ||
@@ -353,6 +441,17 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
job->flow->jump = jump;
job->flow->fate_type = MLX5_FLOW_FATE_JUMP;
break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ hrxq = flow_hw_tir_action_register(dev,
+ ft_flag,
+ action);
+ if (!hrxq)
+ return -1;
+ rule_acts[act_data->action_dst].action = hrxq->action;
+ job->flow->hrxq = hrxq;
+ job->flow->fate_type = MLX5_FLOW_FATE_QUEUE;
+ break;
default:
break;
}
@@ -553,6 +652,8 @@ flow_hw_q_pull(struct rte_eth_dev *dev,
if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
if (job->flow->fate_type == MLX5_FLOW_FATE_JUMP)
flow_hw_jump_release(dev, job->flow->jump);
+ else if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE)
+ mlx5_hrxq_obj_release(dev, job->flow->hrxq);
mlx5_ipool_free(job->flow->table->flow, job->flow->idx);
}
priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job;
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 90ccb9aaff..f08aa7a770 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1943,7 +1943,6 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
MLX5_ASSERT(priv->drop_queue.hrxq);
hrxq = priv->drop_queue.hrxq;
} else {
- uint32_t hrxq_idx;
struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
MLX5_ASSERT(rss_desc->queue_num);
@@ -1952,9 +1951,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
rss_desc->tunnel = !!(handle->layers &
MLX5_FLOW_LAYER_TUNNEL);
rss_desc->shared_rss = 0;
- hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
- hrxq_idx);
+ hrxq = mlx5_hrxq_get(dev, rss_desc);
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
@@ -1962,7 +1959,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
"cannot get hash queue");
goto error;
}
- handle->rix_hrxq = hrxq_idx;
+ handle->rix_hrxq = hrxq->idx;
}
MLX5_ASSERT(hrxq);
handle->drv_flow = mlx5_glue->create_flow
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index cb5d51340d..468772ee27 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -225,9 +225,13 @@ int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,
const uint16_t *queues,
uint32_t queues_n);
+struct mlx5_ind_table_obj *mlx5_ind_table_obj_new(struct rte_eth_dev *dev,
+ const uint16_t *queues,
+ uint32_t queues_n,
+ bool standalone,
+ bool ref_qs);
int mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
- bool standalone,
bool deref_rxqs);
int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
@@ -250,8 +254,9 @@ struct mlx5_list_entry *mlx5_hrxq_clone_cb(void *tool_ctx,
void *cb_ctx __rte_unused);
void mlx5_hrxq_clone_free_cb(void *tool_ctx __rte_unused,
struct mlx5_list_entry *entry);
-uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
+struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
struct mlx5_flow_rss_desc *rss_desc);
+int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq);
int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);
uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev);
enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 580d7ae868..a892675646 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -2284,8 +2284,6 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
* Pointer to Ethernet device.
* @param ind_table
* Indirection table to release.
- * @param standalone
- * Indirection table for Standalone queue.
* @param deref_rxqs
* If true, then dereference RX queues related to indirection table.
* Otherwise, no additional action will be taken.
@@ -2296,7 +2294,6 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
int
mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
- bool standalone,
bool deref_rxqs)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -2304,7 +2301,7 @@ mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
rte_rwlock_write_lock(&priv->ind_tbls_lock);
ret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
- if (!ret && !standalone)
+ if (!ret)
LIST_REMOVE(ind_tbl, next);
rte_rwlock_write_unlock(&priv->ind_tbls_lock);
if (ret)
@@ -2413,7 +2410,7 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
* @return
* The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
*/
-static struct mlx5_ind_table_obj *
+struct mlx5_ind_table_obj *
mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n, bool standalone, bool ref_qs)
{
@@ -2435,11 +2432,13 @@ mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
mlx5_free(ind_tbl);
return NULL;
}
- if (!standalone) {
- rte_rwlock_write_lock(&priv->ind_tbls_lock);
+ rte_rwlock_write_lock(&priv->ind_tbls_lock);
+ if (!standalone)
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
- rte_rwlock_write_unlock(&priv->ind_tbls_lock);
- }
+ else
+ LIST_INSERT_HEAD(&priv->standalone_ind_tbls, ind_tbl, next);
+ rte_rwlock_write_unlock(&priv->ind_tbls_lock);
+
return ind_tbl;
}
@@ -2605,6 +2604,7 @@ mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
return (hrxq->rss_key_len != rss_desc->key_len ||
memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
+ hrxq->hws_flags != rss_desc->hws_flags ||
hrxq->hash_fields != rss_desc->hash_fields ||
hrxq->ind_table->queues_n != rss_desc->queue_num ||
memcmp(hrxq->ind_table->queues, rss_desc->queue,
@@ -2689,8 +2689,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
}
if (ind_tbl != hrxq->ind_table) {
MLX5_ASSERT(!hrxq->standalone);
- mlx5_ind_table_obj_release(dev, hrxq->ind_table,
- hrxq->standalone, true);
+ mlx5_ind_table_obj_release(dev, hrxq->ind_table, true);
hrxq->ind_table = ind_tbl;
}
hrxq->hash_fields = hash_fields;
@@ -2700,8 +2699,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
err = rte_errno;
if (ind_tbl != hrxq->ind_table) {
MLX5_ASSERT(!hrxq->standalone);
- mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone,
- true);
+ mlx5_ind_table_obj_release(dev, ind_tbl, true);
}
rte_errno = err;
return -rte_errno;
@@ -2713,12 +2711,16 @@ __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
struct mlx5_priv *priv = dev->data->dev_private;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- mlx5_glue->destroy_flow_action(hrxq->action);
+ if (hrxq->hws_flags)
+ mlx5dr_action_destroy(hrxq->action);
+ else
+ mlx5_glue->destroy_flow_action(hrxq->action);
#endif
priv->obj_ops.hrxq_destroy(hrxq);
if (!hrxq->standalone) {
mlx5_ind_table_obj_release(dev, hrxq->ind_table,
- hrxq->standalone, true);
+ hrxq->hws_flags ?
+ (!!dev->data->dev_started) : true);
}
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
}
@@ -2762,11 +2764,12 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,
int ret;
queues_n = rss_desc->hash_fields ? queues_n : 1;
- if (!ind_tbl)
+ if (!ind_tbl && !rss_desc->hws_flags)
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
- standalone,
+ standalone ||
+ rss_desc->hws_flags,
!!dev->data->dev_started);
if (!ind_tbl)
return NULL;
@@ -2778,6 +2781,7 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,
hrxq->ind_table = ind_tbl;
hrxq->rss_key_len = rss_key_len;
hrxq->hash_fields = rss_desc->hash_fields;
+ hrxq->hws_flags = rss_desc->hws_flags;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);
if (ret < 0)
@@ -2785,7 +2789,7 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,
return hrxq;
error:
if (!rss_desc->ind_tbl)
- mlx5_ind_table_obj_release(dev, ind_tbl, standalone, true);
+ mlx5_ind_table_obj_release(dev, ind_tbl, true);
if (hrxq)
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
return NULL;
@@ -2839,13 +2843,13 @@ mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
* RSS configuration for the Rx hash queue.
*
* @return
- * An hash Rx queue index on success.
+ * An hash Rx queue on success.
*/
-uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
+struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
struct mlx5_flow_rss_desc *rss_desc)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hrxq *hrxq;
+ struct mlx5_hrxq *hrxq = NULL;
struct mlx5_list_entry *entry;
struct mlx5_flow_cb_ctx ctx = {
.data = rss_desc,
@@ -2856,12 +2860,10 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
} else {
entry = mlx5_list_register(priv->hrxqs, &ctx);
if (!entry)
- return 0;
+ return NULL;
hrxq = container_of(entry, typeof(*hrxq), entry);
}
- if (hrxq)
- return hrxq->idx;
- return 0;
+ return hrxq;
}
/**
@@ -2870,17 +2872,15 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
* @param dev
* Pointer to Ethernet device.
* @param hrxq_idx
- * Index to Hash Rx queue to release.
+ * Hash Rx queue to release.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
-int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
+int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hrxq *hrxq;
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
if (!hrxq)
return 0;
if (!hrxq->standalone)
@@ -2889,6 +2889,26 @@ int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
return 0;
}
+/**
+ * Release the hash Rx queue with index.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param hrxq_idx
+ * Index to Hash Rx queue to release.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+ return mlx5_hrxq_obj_release(dev, hrxq);
+}
+
/**
* Create a drop Rx Hash queue.
*
--
2.25.1
next prev parent reply other threads:[~2022-02-10 16:31 UTC|newest]
Thread overview: 62+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-10 16:29 [PATCH 00/13] net/mlx5: add hardware steering Suanming Mou
2022-02-10 16:29 ` [PATCH 01/13] net/mlx5: introduce hardware steering operation Suanming Mou
2022-02-10 16:29 ` [PATCH 02/13] net/mlx5: introduce hardware steering enable routine Suanming Mou
2022-02-10 16:29 ` [PATCH 03/13] net/mlx5: add port flow configuration Suanming Mou
2022-02-10 16:29 ` [PATCH 04/13] net/mlx5: add pattern template management Suanming Mou
2022-02-10 16:29 ` [PATCH 05/13] net/mlx5: add action " Suanming Mou
2022-02-10 16:29 ` [PATCH 06/13] net/mlx5: add table management Suanming Mou
2022-02-10 16:29 ` [PATCH 07/13] net/mlx5: add basic flow queue operation Suanming Mou
2022-02-10 16:29 ` [PATCH 08/13] net/mlx5: add flow flush function Suanming Mou
2022-02-10 16:29 ` [PATCH 09/13] net/mlx5: add flow jump action Suanming Mou
2022-02-10 16:29 ` Suanming Mou [this message]
2022-02-10 16:29 ` [PATCH 11/13] net/mlx5: add mark action Suanming Mou
2022-02-10 16:29 ` [PATCH 12/13] net/mlx5: add indirect action Suanming Mou
2022-02-10 16:29 ` [PATCH 13/13] net/mlx5: add header reformat action Suanming Mou
2022-02-22 8:51 ` [PATCH v2 00/14] net/mlx5: add hardware steering Suanming Mou
2022-02-22 8:51 ` [PATCH v2 01/14] net/mlx5: introduce hardware steering operation Suanming Mou
2022-02-22 8:51 ` [PATCH v2 02/14] net/mlx5: add HW steering low-level abstract code Suanming Mou
2022-02-22 8:51 ` [PATCH v2 03/14] net/mlx5: introduce hardware steering enable routine Suanming Mou
2022-02-22 8:51 ` [PATCH v2 04/14] net/mlx5: add port flow configuration Suanming Mou
2022-02-22 8:51 ` [PATCH v2 05/14] net/mlx5: add pattern template management Suanming Mou
2022-02-22 8:51 ` [PATCH v2 06/14] net/mlx5: add action " Suanming Mou
2022-02-22 8:51 ` [PATCH v2 07/14] net/mlx5: add table management Suanming Mou
2022-02-22 8:51 ` [PATCH v2 08/14] net/mlx5: add basic flow queue operation Suanming Mou
2022-02-22 8:51 ` [PATCH v2 09/14] net/mlx5: add flow flush function Suanming Mou
2022-02-22 8:51 ` [PATCH v2 10/14] net/mlx5: add flow jump action Suanming Mou
2022-02-22 8:51 ` [PATCH v2 11/14] net/mlx5: add queue and RSS action Suanming Mou
2022-02-22 8:51 ` [PATCH v2 12/14] net/mlx5: add mark action Suanming Mou
2022-02-22 8:51 ` [PATCH v2 13/14] net/mlx5: add indirect action Suanming Mou
2022-02-22 8:51 ` [PATCH v2 14/14] net/mlx5: add header reformat action Suanming Mou
2022-02-24 3:10 ` [PATCH v3 00/14] net/mlx5: add hardware steering Suanming Mou
2022-02-24 3:10 ` [PATCH v3 01/14] net/mlx5: introduce hardware steering operation Suanming Mou
2022-02-24 3:10 ` [PATCH v3 02/14] net/mlx5: add HW steering low-level abstract code Suanming Mou
2022-02-24 3:10 ` [PATCH v3 03/14] net/mlx5: introduce hardware steering enable routine Suanming Mou
2022-02-24 3:10 ` [PATCH v3 04/14] net/mlx5: add port flow configuration Suanming Mou
2022-02-24 3:10 ` [PATCH v3 05/14] net/mlx5: add pattern template management Suanming Mou
2022-02-24 3:10 ` [PATCH v3 06/14] net/mlx5: add action " Suanming Mou
2022-02-24 3:10 ` [PATCH v3 07/14] net/mlx5: add table management Suanming Mou
2022-02-24 3:10 ` [PATCH v3 08/14] net/mlx5: add basic flow queue operation Suanming Mou
2022-02-24 3:10 ` [PATCH v3 09/14] net/mlx5: add flow flush function Suanming Mou
2022-02-24 3:10 ` [PATCH v3 10/14] net/mlx5: add flow jump action Suanming Mou
2022-02-24 3:10 ` [PATCH v3 11/14] net/mlx5: add queue and RSS action Suanming Mou
2022-02-24 3:10 ` [PATCH v3 12/14] net/mlx5: add mark action Suanming Mou
2022-02-24 3:10 ` [PATCH v3 13/14] net/mlx5: add indirect action Suanming Mou
2022-02-24 3:10 ` [PATCH v3 14/14] net/mlx5: add header reformat action Suanming Mou
2022-02-24 13:40 ` [PATCH v4 00/14] net/mlx5: add hardware steering Suanming Mou
2022-02-24 13:40 ` [PATCH v4 01/14] net/mlx5: introduce hardware steering operation Suanming Mou
2022-02-24 13:40 ` [PATCH v4 02/14] net/mlx5: add HW steering low-level abstract code Suanming Mou
2022-02-24 22:57 ` Ferruh Yigit
2022-02-24 23:49 ` Suanming Mou
2022-02-24 13:40 ` [PATCH v4 03/14] net/mlx5: introduce hardware steering enable routine Suanming Mou
2022-02-24 13:40 ` [PATCH v4 04/14] net/mlx5: add port flow configuration Suanming Mou
2022-02-24 13:40 ` [PATCH v4 05/14] net/mlx5: add pattern template management Suanming Mou
2022-02-24 13:40 ` [PATCH v4 06/14] net/mlx5: add action " Suanming Mou
2022-02-24 13:40 ` [PATCH v4 07/14] net/mlx5: add table management Suanming Mou
2022-02-24 13:40 ` [PATCH v4 08/14] net/mlx5: add basic flow queue operation Suanming Mou
2022-02-24 13:40 ` [PATCH v4 09/14] net/mlx5: add flow flush function Suanming Mou
2022-02-24 13:40 ` [PATCH v4 10/14] net/mlx5: add flow jump action Suanming Mou
2022-02-24 13:40 ` [PATCH v4 11/14] net/mlx5: add queue and RSS action Suanming Mou
2022-02-24 13:40 ` [PATCH v4 12/14] net/mlx5: add mark action Suanming Mou
2022-02-24 13:40 ` [PATCH v4 13/14] net/mlx5: add indirect action Suanming Mou
2022-02-24 13:40 ` [PATCH v4 14/14] net/mlx5: add header reformat action Suanming Mou
2022-02-24 21:12 ` [PATCH v4 00/14] net/mlx5: add hardware steering Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220210162926.20436-11-suanmingm@nvidia.com \
--to=suanmingm@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).