From: Xueming Li <xuemingl@nvidia.com>
To: <dev@dpdk.org>
Cc: <xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>,
Matan Azrad <matan@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [PATCH v2 11/13] net/mlx5: remove Rx queue data list from device
Date: Sat, 16 Oct 2021 17:12:11 +0800 [thread overview]
Message-ID: <20211016091214.1831902-12-xuemingl@nvidia.com> (raw)
In-Reply-To: <20211016091214.1831902-1-xuemingl@nvidia.com>
Rx queue data list(priv->rxqs) can be replaced by Rx queue
list(priv->rxq_privs), removes it and replace with universal wrapper
API.
Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_verbs.c | 7 ++---
drivers/net/mlx5/mlx5.c | 10 +------
drivers/net/mlx5/mlx5.h | 1 -
drivers/net/mlx5/mlx5_devx.c | 13 +++++----
drivers/net/mlx5/mlx5_ethdev.c | 6 +---
drivers/net/mlx5/mlx5_flow.c | 45 +++++++++++++++--------------
| 6 ++--
drivers/net/mlx5/mlx5_rx.c | 19 +++++-------
drivers/net/mlx5/mlx5_rx.h | 9 +++---
drivers/net/mlx5/mlx5_rxq.c | 23 ++++++---------
drivers/net/mlx5/mlx5_rxtx_vec.c | 6 ++--
drivers/net/mlx5/mlx5_stats.c | 9 +++---
drivers/net/mlx5/mlx5_trigger.c | 2 +-
13 files changed, 69 insertions(+), 87 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index a2a9b9c1f98..0e68a13208b 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -527,11 +527,10 @@ mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
MLX5_ASSERT(ind_tbl);
for (i = 0; i != ind_tbl->queues_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev,
+ ind_tbl->queues[i]);
- wq[i] = rxq_ctrl->obj->wq;
+ wq[i] = rxq->ctrl->obj->wq;
}
MLX5_ASSERT(i > 0);
/* Finalise indirection table. */
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 477ad8c1bc9..6240f6f5dc6 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1578,20 +1578,12 @@ mlx5_dev_close(struct rte_eth_dev *dev)
mlx5_mp_os_req_stop_rxtx(dev);
/* Free the eCPRI flex parser resource. */
mlx5_flex_parser_ecpri_release(dev);
- if (priv->rxqs != NULL) {
+ if (priv->rxq_privs != NULL) {
/* XXX race condition if mlx5_rx_burst() is still running. */
rte_delay_us_sleep(1000);
for (i = 0; (i != priv->rxqs_n); ++i)
mlx5_rxq_release(dev, i);
priv->rxqs_n = 0;
- priv->rxqs = NULL;
- }
- if (priv->representor) {
- /* Each representor has a dedicated interrupts handler */
- mlx5_free(dev->intr_handle);
- dev->intr_handle = NULL;
- }
- if (priv->rxq_privs != NULL) {
mlx5_free(priv->rxq_privs);
priv->rxq_privs = NULL;
}
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index a2735cbb350..55612f777ea 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1406,7 +1406,6 @@ struct mlx5_priv {
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
struct mlx5_rxq_priv *(*rxq_privs)[]; /* RX queue non-shared data. */
- struct mlx5_rxq_data *(*rxqs)[]; /* (Shared) RX queues. */
struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
struct rte_eth_rss_conf rss_conf; /* RSS configuration. */
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index f7f7526dbf6..b767470dea0 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -682,15 +682,16 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
/* NULL queues designate drop queue. */
if (ind_tbl->queues != NULL) {
- struct mlx5_rxq_data *rxq_data =
- (*priv->rxqs)[ind_tbl->queues[0]];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- rxq_obj_type = rxq_ctrl->type;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev,
+ ind_tbl->queues[0]);
+ rxq_obj_type = rxq->ctrl->type;
/* Enable TIR LRO only if all the queues were configured for. */
for (i = 0; i < ind_tbl->queues_n; ++i) {
- if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
+ struct mlx5_rxq_data *rxq_i =
+ mlx5_rxq_data_get(dev, ind_tbl->queues[i]);
+
+ if (rxq_i != NULL && !rxq_i->lro) {
lro = false;
break;
}
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index ee1189b929d..070ff149488 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -114,7 +114,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
rte_errno = ENOMEM;
return -rte_errno;
}
- priv->rxqs = (void *)dev->data->rx_queues;
priv->txqs = (void *)dev->data->tx_queues;
if (txqs_n != priv->txqs_n) {
DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u",
@@ -171,11 +170,8 @@ mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev)
return -rte_errno;
}
for (i = 0, j = 0; i < rxqs_n; i++) {
- struct mlx5_rxq_data *rxq_data;
- struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
- rxq_data = (*priv->rxqs)[i];
- rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (rxq_ctrl && rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
rss_queue_arr[j++] = i;
}
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index c10b9112593..49a74edd2e6 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1166,10 +1166,11 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
return;
for (i = 0; i != ind_tbl->queues_n; ++i) {
int idx = ind_tbl->queues[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
+ MLX5_ASSERT(rxq_ctrl != NULL);
+ if (rxq_ctrl == NULL)
+ continue;
/*
* To support metadata register copy on Tx loopback,
* this must be always enabled (metadata may arive
@@ -1261,10 +1262,11 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
MLX5_ASSERT(dev->data->dev_started);
for (i = 0; i != ind_tbl->queues_n; ++i) {
int idx = ind_tbl->queues[i];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
+ MLX5_ASSERT(rxq_ctrl != NULL);
+ if (rxq_ctrl == NULL)
+ continue;
if (priv->config.dv_flow_en &&
priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
mlx5_flow_ext_mreg_supported(dev)) {
@@ -1325,18 +1327,16 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev)
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
unsigned int j;
- if (!(*priv->rxqs)[i])
+ if (rxq == NULL || rxq->ctrl == NULL)
continue;
- rxq_ctrl = container_of((*priv->rxqs)[i],
- struct mlx5_rxq_ctrl, rxq);
- rxq_ctrl->flow_mark_n = 0;
- rxq_ctrl->rxq.mark = 0;
+ rxq->ctrl->flow_mark_n = 0;
+ rxq->ctrl->rxq.mark = 0;
for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
- rxq_ctrl->flow_tunnels_n[j] = 0;
- rxq_ctrl->rxq.tunnel = 0;
+ rxq->ctrl->flow_tunnels_n[j] = 0;
+ rxq->ctrl->rxq.tunnel = 0;
}
}
@@ -1350,13 +1350,15 @@ void
mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *data;
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i) {
- if (!(*priv->rxqs)[i])
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+ struct mlx5_rxq_data *data;
+
+ if (rxq == NULL || rxq->ctrl == NULL)
continue;
- data = (*priv->rxqs)[i];
+ data = &rxq->ctrl->rxq;
if (!rte_flow_dynf_metadata_avail()) {
data->dynf_meta = 0;
data->flow_meta_mask = 0;
@@ -1547,7 +1549,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&queue->index,
"queue index out of range");
- if (!(*priv->rxqs)[queue->index])
+ if (mlx5_rxq_get(dev, queue->index) == NULL)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
&queue->index,
@@ -1578,7 +1580,7 @@ mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
* 0 on success, a negative errno code on error.
*/
static int
-mlx5_validate_rss_queues(const struct rte_eth_dev *dev,
+mlx5_validate_rss_queues(struct rte_eth_dev *dev,
const uint16_t *queues, uint32_t queues_n,
const char **error, uint32_t *queue_idx)
{
@@ -1594,13 +1596,12 @@ mlx5_validate_rss_queues(const struct rte_eth_dev *dev,
*queue_idx = i;
return -EINVAL;
}
- if (!(*priv->rxqs)[queues[i]]) {
+ rxq_ctrl = mlx5_rxq_ctrl_get(dev, queues[i]);
+ if (rxq_ctrl == NULL) {
*error = "queue is not configured";
*queue_idx = i;
return -EINVAL;
}
- rxq_ctrl = container_of((*priv->rxqs)[queues[i]],
- struct mlx5_rxq_ctrl, rxq);
if (i == 0)
rxq_type = rxq_ctrl->type;
if (rxq_type != rxq_ctrl->type) {
--git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index c32129cdc2b..9ffc44b179f 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -65,9 +65,11 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
priv->rss_conf.rss_hf = rss_conf->rss_hf;
/* Enable the RSS hash in all Rx queues. */
for (i = 0, idx = 0; idx != priv->rxqs_n; ++i) {
- if (!(*priv->rxqs)[i])
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+
+ if (rxq == NULL || rxq->ctrl == NULL)
continue;
- (*priv->rxqs)[i]->rss_hash = !!rss_conf->rss_hf &&
+ rxq->ctrl->rxq.rss_hash = !!rss_conf->rss_hf &&
!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS);
++idx;
}
diff --git a/drivers/net/mlx5/mlx5_rx.c b/drivers/net/mlx5/mlx5_rx.c
index 09de26c0d39..3017a8da20c 100644
--- a/drivers/net/mlx5/mlx5_rx.c
+++ b/drivers/net/mlx5/mlx5_rx.c
@@ -148,10 +148,8 @@ void
mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
struct rte_eth_rxq_info *qinfo)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, rx_queue_id);
+ struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, rx_queue_id);
if (!rxq)
return;
@@ -162,7 +160,10 @@ mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
qinfo->conf.rx_thresh.wthresh = 0;
qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
qinfo->conf.rx_drop_en = 1;
- qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
+ if (rxq_ctrl == NULL || rxq_ctrl->obj == NULL)
+ qinfo->conf.rx_deferred_start = 0;
+ else
+ qinfo->conf.rx_deferred_start = 1;
qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
qinfo->scattered_rx = dev->data->scattered_rx;
qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ?
@@ -191,10 +192,8 @@ mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
struct rte_eth_burst_mode *mode)
{
eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
- rxq = (*priv->rxqs)[rx_queue_id];
if (!rxq) {
rte_errno = EINVAL;
return -rte_errno;
@@ -245,15 +244,13 @@ mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
uint32_t
mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq;
+ struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, rx_queue_id);
if (dev->rx_pkt_burst == NULL ||
dev->rx_pkt_burst == removed_rx_burst) {
rte_errno = ENOTSUP;
return -rte_errno;
}
- rxq = (*priv->rxqs)[rx_queue_id];
if (!rxq) {
rte_errno = EINVAL;
return -rte_errno;
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 25f7fc2071a..161399c764d 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -606,14 +606,13 @@ mlx5_mprq_enabled(struct rte_eth_dev *dev)
return 0;
/* All the configured queues should be enabled. */
for (i = 0; i < priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl = container_of
- (rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
- if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
+ if (rxq_ctrl == NULL ||
+ rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
continue;
n_ibv++;
- if (mlx5_rxq_mprq_enabled(rxq))
+ if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
++n;
}
/* Multi-Packet RQ can't be partially configured. */
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index b9d39a28d78..8eec9a4ed8d 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -729,7 +729,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
}
DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
dev->data->port_id, idx);
- (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
+ dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
return 0;
}
@@ -811,7 +811,7 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
}
DRV_LOG(DEBUG, "port %u adding hairpin Rx queue %u to list",
dev->data->port_id, idx);
- (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
+ dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
return 0;
}
@@ -1712,8 +1712,7 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (priv->rxq_privs == NULL)
- return NULL;
+ MLX5_ASSERT(priv->rxq_privs != NULL);
return (*priv->rxq_privs)[idx];
}
@@ -1799,7 +1798,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
LIST_REMOVE(rxq, owner_entry);
LIST_REMOVE(rxq_ctrl, next);
mlx5_free(rxq_ctrl);
- (*priv->rxqs)[idx] = NULL;
+ dev->data->rx_queues[idx] = NULL;
mlx5_free(rxq);
(*priv->rxq_privs)[idx] = NULL;
}
@@ -1845,14 +1844,10 @@ enum mlx5_rxq_type
mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
- if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
- rxq_ctrl = container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl,
- rxq);
+ if (idx < priv->rxqs_n && rxq_ctrl != NULL)
return rxq_ctrl->type;
- }
return MLX5_RXQ_TYPE_UNDEFINED;
}
@@ -2619,13 +2614,13 @@ mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_ctx_shared *sh = priv->sh;
- struct mlx5_rxq_data *data;
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i) {
- if (!(*priv->rxqs)[i])
+ struct mlx5_rxq_data *data = mlx5_rxq_data_get(dev, i);
+
+ if (data == NULL)
continue;
- data = (*priv->rxqs)[i];
data->sh = sh;
data->rt_timestamp = priv->config.rt_timestamp;
}
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 511681841ca..6212ce8247d 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -578,11 +578,11 @@ mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
return -ENOTSUP;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_data *rxq_data = mlx5_rxq_data_get(dev, i);
- if (!rxq)
+ if (!rxq_data)
continue;
- if (mlx5_rxq_check_vec_support(rxq) < 0)
+ if (mlx5_rxq_check_vec_support(rxq_data) < 0)
break;
}
if (i != priv->rxqs_n)
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index ae2f5668a74..732775954ad 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -107,7 +107,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
memset(&tmp, 0, sizeof(tmp));
/* Add software counters. */
for (i = 0; (i != priv->rxqs_n); ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, i);
if (rxq == NULL)
continue;
@@ -181,10 +181,11 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
unsigned int i;
for (i = 0; (i != priv->rxqs_n); ++i) {
- if ((*priv->rxqs)[i] == NULL)
+ struct mlx5_rxq_data *rxq_data = mlx5_rxq_data_get(dev, i);
+
+ if (rxq_data == NULL)
continue;
- memset(&(*priv->rxqs)[i]->stats, 0,
- sizeof(struct mlx5_rxq_stats));
+ memset(&rxq_data->stats, 0, sizeof(struct mlx5_rxq_stats));
}
for (i = 0; (i != priv->txqs_n); ++i) {
if ((*priv->txqs)[i] == NULL)
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index b3188f510fb..1e865e74e39 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -176,7 +176,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
if (!rxq_ctrl->obj) {
DRV_LOG(ERR,
"Port %u Rx queue %u can't allocate resources.",
- dev->data->port_id, (*priv->rxqs)[i]->idx);
+ dev->data->port_id, i);
rte_errno = ENOMEM;
goto error;
}
--
2.33.0
next prev parent reply other threads:[~2021-10-16 9:13 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-26 11:18 [dpdk-dev] [PATCH 00/11] net/mlx5: support shared Rx queue Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 01/11] common/mlx5: support receive queue user index Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 02/11] common/mlx5: support receive memory pool Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 03/11] net/mlx5: clean Rx queue code Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 04/11] net/mlx5: split multiple packet Rq memory pool Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 05/11] net/mlx5: split Rx queue Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 06/11] net/mlx5: move Rx queue reference count Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 07/11] net/mlx5: move Rx queue hairpin info to private data Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 08/11] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 09/11] net/mlx5: move Rx queue DevX resource Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 10/11] net/mlx5: remove Rx queue data list from device Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 11/11] net/mlx5: support shared Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 00/13] " Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 01/13] common/mlx5: support receive queue user index Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 02/13] common/mlx5: support receive memory pool Xueming Li
2021-10-20 9:32 ` Kinsella, Ray
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 03/13] net/mlx5: fix Rx queue memory allocation return value Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 04/13] net/mlx5: clean Rx queue code Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 05/13] net/mlx5: split multiple packet Rq memory pool Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 06/13] net/mlx5: split Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 07/13] net/mlx5: move Rx queue reference count Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 08/13] net/mlx5: move Rx queue hairpin info to private data Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 09/13] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 10/13] net/mlx5: move Rx queue DevX resource Xueming Li
2021-10-16 9:12 ` Xueming Li [this message]
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 12/13] net/mlx5: support shared Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 13/13] net/mlx5: add shared Rx queue port datapath support Xueming Li
2021-10-19 8:19 ` [dpdk-dev] [PATCH v2 00/13] net/mlx5: support shared Rx queue Slava Ovsiienko
2021-10-19 8:22 ` Slava Ovsiienko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211016091214.1831902-12-xuemingl@nvidia.com \
--to=xuemingl@nvidia.com \
--cc=dev@dpdk.org \
--cc=lmargalit@nvidia.com \
--cc=matan@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).