From: Xueming Li <xuemingl@nvidia.com>
To: <dev@dpdk.org>
Cc: <xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>,
Matan Azrad <matan@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [PATCH 06/11] net/mlx5: move Rx queue reference count
Date: Sun, 26 Sep 2021 19:18:59 +0800 [thread overview]
Message-ID: <20210926111904.237736-7-xuemingl@nvidia.com> (raw)
In-Reply-To: <20210926111904.237736-1-xuemingl@nvidia.com>
Rx queue reference count is counter of RQ, used on RQ table.
To prepare for shared Rx queue, move it from rxq_ctrl to Rx queue
private data.
Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
drivers/net/mlx5/mlx5_rx.h | 8 +-
drivers/net/mlx5/mlx5_rxq.c | 173 +++++++++++++++++++++-----------
drivers/net/mlx5/mlx5_trigger.c | 57 +++++------
3 files changed, 144 insertions(+), 94 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index db6252e8e86..fe19414c130 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -160,7 +160,6 @@ enum mlx5_rxq_type {
struct mlx5_rxq_ctrl {
struct mlx5_rxq_data rxq; /* Data path structure. */
LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
- uint32_t refcnt; /* Reference counter. */
LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */
struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
struct mlx5_dev_ctx_shared *sh; /* Shared context. */
@@ -179,6 +178,7 @@ struct mlx5_rxq_ctrl {
/* RX queue private data. */
struct mlx5_rxq_priv {
uint16_t idx; /* Queue index. */
+ uint32_t refcnt; /* Reference counter. */
struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
struct mlx5_priv *priv; /* Back pointer to private data. */
@@ -216,7 +216,11 @@ struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev,
struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
const struct rte_eth_hairpin_conf *hairpin_conf);
-struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_priv *mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx);
+uint32_t mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_priv *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_ctrl *mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_data *mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_verify(struct rte_eth_dev *dev);
int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 70e73690aa7..7f28646f55c 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -386,15 +386,13 @@ mlx5_get_rx_port_offloads(void)
static int
mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
- if (!(*priv->rxqs)[idx]) {
+ if (rxq == NULL) {
rte_errno = EINVAL;
return -rte_errno;
}
- rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
- return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1);
+ return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
}
/* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
@@ -874,8 +872,8 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
intr_handle->type = RTE_INTR_HANDLE_EXT;
for (i = 0; i != n; ++i) {
/* This rxq obj must not be released in this function. */
- struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
- struct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+ struct mlx5_rxq_obj *rxq_obj = rxq ? rxq->ctrl->obj : NULL;
int rc;
/* Skip queues that cannot request interrupts. */
@@ -885,11 +883,9 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
intr_handle->intr_vec[i] =
RTE_INTR_VEC_RXTX_OFFSET +
RTE_MAX_RXTX_INTR_VEC_ID;
- /* Decrease the rxq_ctrl's refcnt */
- if (rxq_ctrl)
- mlx5_rxq_release(dev, i);
continue;
}
+ mlx5_rxq_ref(dev, i);
if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
DRV_LOG(ERR,
"port %u too many Rx queues for interrupt"
@@ -949,7 +945,7 @@ mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
* Need to access directly the queue to release the reference
* kept in mlx5_rx_intr_vec_enable().
*/
- mlx5_rxq_release(dev, i);
+ mlx5_rxq_deref(dev, i);
}
free:
rte_intr_free_epoll_fd(intr_handle);
@@ -998,19 +994,14 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
int
mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct mlx5_rxq_ctrl *rxq_ctrl;
-
- rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
- if (!rxq_ctrl)
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
+ if (!rxq)
goto error;
- if (rxq_ctrl->irq) {
- if (!rxq_ctrl->obj) {
- mlx5_rxq_release(dev, rx_queue_id);
+ if (rxq->ctrl->irq) {
+ if (!rxq->ctrl->obj)
goto error;
- }
- mlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn);
+ mlx5_arm_cq(&rxq->ctrl->rxq, rxq->ctrl->rxq.cq_arm_sn);
}
- mlx5_rxq_release(dev, rx_queue_id);
return 0;
error:
rte_errno = EINVAL;
@@ -1032,23 +1023,21 @@ int
mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
int ret = 0;
- rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
- if (!rxq_ctrl) {
+ if (!rxq) {
rte_errno = EINVAL;
return -rte_errno;
}
- if (!rxq_ctrl->obj)
+ if (!rxq->ctrl->obj)
goto error;
- if (rxq_ctrl->irq) {
- ret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj);
+ if (rxq->ctrl->irq) {
+ ret = priv->obj_ops.rxq_event_get(rxq->ctrl->obj);
if (ret < 0)
goto error;
- rxq_ctrl->rxq.cq_arm_sn++;
+ rxq->ctrl->rxq.cq_arm_sn++;
}
- mlx5_rxq_release(dev, rx_queue_id);
return 0;
error:
/**
@@ -1059,12 +1048,9 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rte_errno = errno;
else
rte_errno = EINVAL;
- ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_rxq_release(dev, rx_queue_id);
- if (ret != EAGAIN)
+ if (rte_errno != EAGAIN)
DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
dev->data->port_id, rx_queue_id);
- rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
@@ -1611,7 +1597,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
#endif
tmpl->rxq.idx = idx;
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ mlx5_rxq_ref(dev, idx);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
@@ -1665,11 +1651,53 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
tmpl->hairpin_conf = *hairpin_conf;
tmpl->rxq.idx = idx;
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ mlx5_rxq_ref(dev, idx);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
}
+/**
+ * Increase Rx queue reference count.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists, NULL otherwise.
+ */
+inline struct mlx5_rxq_priv *
+mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ if (rxq != NULL)
+ __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ return rxq;
+}
+
+/**
+ * Dereference a Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * Updated reference count.
+ */
+inline uint32_t
+mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ if (rxq == NULL)
+ return 0;
+ return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+}
+
/**
* Get a Rx queue.
*
@@ -1681,18 +1709,52 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
* @return
* A pointer to the queue if it exists, NULL otherwise.
*/
-struct mlx5_rxq_ctrl *
+inline struct mlx5_rxq_priv *
mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
- if (rxq_data) {
- rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- __atomic_fetch_add(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);
- }
- return rxq_ctrl;
+ if (priv->rxq_privs == NULL)
+ return NULL;
+ return (*priv->rxq_privs)[idx];
+}
+
+/**
+ * Get Rx queue shareable control.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * A pointer to the queue control if it exists, NULL otherwise.
+ */
+inline struct mlx5_rxq_ctrl *
+mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ return rxq == NULL ? NULL : rxq->ctrl;
+}
+
+/**
+ * Get Rx queue shareable data.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * A pointer to the queue data if it exists, NULL otherwise.
+ */
+inline struct mlx5_rxq_data *
+mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ return rxq == NULL ? NULL : &rxq->ctrl->rxq;
}
/**
@@ -1710,13 +1772,12 @@ int
mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl;
- struct mlx5_rxq_priv *rxq = (*priv->rxq_privs)[idx];
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+ struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
if (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)
return 0;
- rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
- if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
+ if (mlx5_rxq_deref(dev, idx) > 1)
return 1;
if (rxq_ctrl->obj) {
priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
@@ -1728,7 +1789,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
rxq_free_elts(rxq_ctrl);
dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
}
- if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+ if (!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED)) {
if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
mlx5_mprq_free_mp(dev, rxq_ctrl);
@@ -1908,7 +1969,7 @@ mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
return 1;
priv->obj_ops.ind_table_destroy(ind_tbl);
for (i = 0; i != ind_tbl->queues_n; ++i)
- claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
+ claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
mlx5_free(ind_tbl);
return 0;
}
@@ -1965,7 +2026,7 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
log2above(priv->config.ind_table_max_size);
for (i = 0; i != queues_n; ++i) {
- if (!mlx5_rxq_get(dev, queues[i])) {
+ if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
ret = -rte_errno;
goto error;
}
@@ -1978,7 +2039,7 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
error:
err = rte_errno;
for (j = 0; j < i; j++)
- mlx5_rxq_release(dev, ind_tbl->queues[j]);
+ mlx5_rxq_deref(dev, ind_tbl->queues[j]);
rte_errno = err;
DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
dev->data->port_id);
@@ -2074,7 +2135,7 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
bool standalone)
{
struct mlx5_priv *priv = dev->data->dev_private;
- unsigned int i, j;
+ unsigned int i;
int ret = 0, err;
const unsigned int n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
@@ -2094,15 +2155,11 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
if (ret)
goto error;
- for (j = 0; j < ind_tbl->queues_n; j++)
- mlx5_rxq_release(dev, ind_tbl->queues[j]);
ind_tbl->queues_n = queues_n;
ind_tbl->queues = queues;
return 0;
error:
err = rte_errno;
- for (j = 0; j < i; j++)
- mlx5_rxq_release(dev, queues[j]);
rte_errno = err;
DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
dev->data->port_id);
@@ -2135,7 +2192,7 @@ mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
return ret;
}
for (i = 0; i < ind_tbl->queues_n; i++)
- mlx5_rxq_get(dev, ind_tbl->queues[i]);
+ mlx5_rxq_ref(dev, ind_tbl->queues[i]);
return 0;
}
@@ -2172,7 +2229,7 @@ mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,
return ret;
}
for (i = 0; i < ind_tbl->queues_n; i++)
- mlx5_rxq_release(dev, ind_tbl->queues[i]);
+ mlx5_rxq_deref(dev, ind_tbl->queues[i]);
return ret;
}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 0753dbad053..a49254c96f6 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -143,10 +143,12 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.",
dev->data->port_id, priv->sh->device_attr.max_sge);
for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_ref(dev, i);
+ struct mlx5_rxq_ctrl *rxq_ctrl;
- if (!rxq_ctrl)
+ if (rxq == NULL)
continue;
+ rxq_ctrl = rxq->ctrl;
if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
/* Allocate/reuse/resize mempool for MPRQ. */
@@ -215,6 +217,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
struct mlx5_txq_ctrl *txq_ctrl;
+ struct mlx5_rxq_priv *rxq;
struct mlx5_rxq_ctrl *rxq_ctrl;
struct mlx5_devx_obj *sq;
struct mlx5_devx_obj *rq;
@@ -259,9 +262,8 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
return -rte_errno;
}
sq = txq_ctrl->obj->sq;
- rxq_ctrl = mlx5_rxq_get(dev,
- txq_ctrl->hairpin_conf.peers[0].queue);
- if (!rxq_ctrl) {
+ rxq = mlx5_rxq_get(dev, txq_ctrl->hairpin_conf.peers[0].queue);
+ if (rxq == NULL) {
mlx5_txq_release(dev, i);
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u no rxq object found: %d",
@@ -269,6 +271,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
txq_ctrl->hairpin_conf.peers[0].queue);
return -rte_errno;
}
+ rxq_ctrl = rxq->ctrl;
if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
rxq_ctrl->hairpin_conf.peers[0].queue != i) {
rte_errno = ENOMEM;
@@ -303,12 +306,10 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
rxq_ctrl->hairpin_status = 1;
txq_ctrl->hairpin_status = 1;
mlx5_txq_release(dev, i);
- mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
}
return 0;
error:
mlx5_txq_release(dev, i);
- mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
return -rte_errno;
}
@@ -381,27 +382,26 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
peer_info->manual_bind = txq_ctrl->hairpin_conf.manual_bind;
mlx5_txq_release(dev, peer_queue);
} else { /* Peer port used as ingress. */
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, peer_queue);
struct mlx5_rxq_ctrl *rxq_ctrl;
- rxq_ctrl = mlx5_rxq_get(dev, peer_queue);
- if (rxq_ctrl == NULL) {
+ if (rxq == NULL) {
rte_errno = EINVAL;
DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
dev->data->port_id, peer_queue);
return -rte_errno;
}
+ rxq_ctrl = rxq->ctrl;
if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u queue %d is not a hairpin Rxq",
dev->data->port_id, peer_queue);
- mlx5_rxq_release(dev, peer_queue);
return -rte_errno;
}
if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
rte_errno = ENOMEM;
DRV_LOG(ERR, "port %u no Rxq object found: %d",
dev->data->port_id, peer_queue);
- mlx5_rxq_release(dev, peer_queue);
return -rte_errno;
}
peer_info->qp_id = rxq_ctrl->obj->rq->id;
@@ -409,7 +409,6 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
peer_info->peer_q = rxq_ctrl->hairpin_conf.peers[0].queue;
peer_info->tx_explicit = rxq_ctrl->hairpin_conf.tx_explicit;
peer_info->manual_bind = rxq_ctrl->hairpin_conf.manual_bind;
- mlx5_rxq_release(dev, peer_queue);
}
return 0;
}
@@ -508,34 +507,32 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
txq_ctrl->hairpin_status = 1;
mlx5_txq_release(dev, cur_queue);
} else {
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue);
struct mlx5_rxq_ctrl *rxq_ctrl;
struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
- rxq_ctrl = mlx5_rxq_get(dev, cur_queue);
- if (rxq_ctrl == NULL) {
+ if (rxq == NULL) {
rte_errno = EINVAL;
DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
dev->data->port_id, cur_queue);
return -rte_errno;
}
+ rxq_ctrl = rxq->ctrl;
if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return -rte_errno;
}
if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
rte_errno = ENOMEM;
DRV_LOG(ERR, "port %u no Rxq object found: %d",
dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return -rte_errno;
}
if (rxq_ctrl->hairpin_status != 0) {
DRV_LOG(DEBUG, "port %u Rx queue %d is already bound",
dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return 0;
}
if (peer_info->tx_explicit !=
@@ -543,7 +540,6 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u Rx queue %d and peer Tx rule mode"
" mismatch", dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return -rte_errno;
}
if (peer_info->manual_bind !=
@@ -551,7 +547,6 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u Rx queue %d and peer binding mode"
" mismatch", dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return -rte_errno;
}
rq_attr.state = MLX5_SQC_STATE_RDY;
@@ -561,7 +556,6 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
if (ret == 0)
rxq_ctrl->hairpin_status = 1;
- mlx5_rxq_release(dev, cur_queue);
}
return ret;
}
@@ -626,34 +620,32 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
txq_ctrl->hairpin_status = 0;
mlx5_txq_release(dev, cur_queue);
} else {
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue);
struct mlx5_rxq_ctrl *rxq_ctrl;
struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
- rxq_ctrl = mlx5_rxq_get(dev, cur_queue);
- if (rxq_ctrl == NULL) {
+ if (rxq == NULL) {
rte_errno = EINVAL;
DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
dev->data->port_id, cur_queue);
return -rte_errno;
}
+ rxq_ctrl = rxq->ctrl;
if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return -rte_errno;
}
if (rxq_ctrl->hairpin_status == 0) {
DRV_LOG(DEBUG, "port %u Rx queue %d is already unbound",
dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return 0;
}
if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
rte_errno = ENOMEM;
DRV_LOG(ERR, "port %u no Rxq object found: %d",
dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return -rte_errno;
}
rq_attr.state = MLX5_SQC_STATE_RST;
@@ -661,7 +653,6 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
if (ret == 0)
rxq_ctrl->hairpin_status = 0;
- mlx5_rxq_release(dev, cur_queue);
}
return ret;
}
@@ -963,7 +954,6 @@ mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq_ctrl;
- struct mlx5_rxq_ctrl *rxq_ctrl;
uint32_t i;
uint16_t pp;
uint32_t bits[(RTE_MAX_ETHPORTS + 31) / 32] = {0};
@@ -992,24 +982,23 @@ mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
}
} else {
for (i = 0; i < priv->rxqs_n; i++) {
- rxq_ctrl = mlx5_rxq_get(dev, i);
- if (!rxq_ctrl)
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (rxq == NULL)
continue;
- if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
- mlx5_rxq_release(dev, i);
+ rxq_ctrl = rxq->ctrl;
+ if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN)
continue;
- }
pp = rxq_ctrl->hairpin_conf.peers[0].port;
if (pp >= RTE_MAX_ETHPORTS) {
rte_errno = ERANGE;
- mlx5_rxq_release(dev, i);
DRV_LOG(ERR, "port %hu queue %u peer port "
"out of range %hu",
priv->dev_data->port_id, i, pp);
return -rte_errno;
}
bits[pp / 32] |= 1 << (pp % 32);
- mlx5_rxq_release(dev, i);
}
}
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
--
2.33.0
next prev parent reply other threads:[~2021-09-26 11:20 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-26 11:18 [dpdk-dev] [PATCH 00/11] net/mlx5: support shared Rx queue Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 01/11] common/mlx5: support receive queue user index Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 02/11] common/mlx5: support receive memory pool Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 03/11] net/mlx5: clean Rx queue code Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 04/11] net/mlx5: split multiple packet Rq memory pool Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 05/11] net/mlx5: split Rx queue Xueming Li
2021-09-26 11:18 ` Xueming Li [this message]
2021-09-26 11:19 ` [dpdk-dev] [PATCH 07/11] net/mlx5: move Rx queue hairpin info to private data Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 08/11] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 09/11] net/mlx5: move Rx queue DevX resource Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 10/11] net/mlx5: remove Rx queue data list from device Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 11/11] net/mlx5: support shared Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 00/13] " Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 01/13] common/mlx5: support receive queue user index Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 02/13] common/mlx5: support receive memory pool Xueming Li
2021-10-20 9:32 ` Kinsella, Ray
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 03/13] net/mlx5: fix Rx queue memory allocation return value Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 04/13] net/mlx5: clean Rx queue code Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 05/13] net/mlx5: split multiple packet Rq memory pool Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 06/13] net/mlx5: split Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 07/13] net/mlx5: move Rx queue reference count Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 08/13] net/mlx5: move Rx queue hairpin info to private data Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 09/13] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 10/13] net/mlx5: move Rx queue DevX resource Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 11/13] net/mlx5: remove Rx queue data list from device Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 12/13] net/mlx5: support shared Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 13/13] net/mlx5: add shared Rx queue port datapath support Xueming Li
2021-10-19 8:19 ` [dpdk-dev] [PATCH v2 00/13] net/mlx5: support shared Rx queue Slava Ovsiienko
2021-10-19 8:22 ` Slava Ovsiienko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210926111904.237736-7-xuemingl@nvidia.com \
--to=xuemingl@nvidia.com \
--cc=dev@dpdk.org \
--cc=lmargalit@nvidia.com \
--cc=matan@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).