From: Michael Baum <michaelba@nvidia.com>
To: dev@dpdk.org
Cc: Matan Azrad <matan@nvidia.com>,
Raslan Darawsheh <rasland@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [PATCH v1 08/15] net/mlx5: share Tx control code
Date: Thu, 1 Oct 2020 14:09:19 +0000 [thread overview]
Message-ID: <1601561366-1821-9-git-send-email-michaelba@nvidia.com> (raw)
In-Reply-To: <1601561366-1821-1-git-send-email-michaelba@nvidia.com>
Move Tx object similar resources allocations and debug logs from DevX
and Verbs modules to a shared location.
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 4 +-
drivers/net/mlx5/linux/mlx5_verbs.c | 84 ++++++++++++-------------------------
drivers/net/mlx5/linux/mlx5_verbs.h | 3 +-
drivers/net/mlx5/mlx5.h | 3 +-
drivers/net/mlx5/mlx5_devx.c | 75 +++++++--------------------------
drivers/net/mlx5/mlx5_devx.h | 3 +-
drivers/net/mlx5/mlx5_trigger.c | 31 +++++++++++++-
drivers/net/mlx5/mlx5_txq.c | 28 ++++++++-----
8 files changed, 93 insertions(+), 138 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index c5332a0..0db2b5a 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -520,9 +520,9 @@
* Queue index in DPDK Tx queue array.
*
* @return
- * The DevX/Verbs object initialized, NULL otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static struct mlx5_txq_obj *
+static int
mlx5_os_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index c79c4a2..5568c75 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -807,7 +807,7 @@
struct ibv_qp_init_attr_ex qp_attr = { 0 };
const int desc = 1 << txq_data->elts_n;
- MLX5_ASSERT(!txq_ctrl->obj);
+ MLX5_ASSERT(txq_ctrl->obj);
/* CQ to be associated with the send queue. */
qp_attr.send_cq = txq_obj->cq;
/* CQ to be associated with the receive queue. */
@@ -851,17 +851,16 @@
* Queue index in DPDK Tx queue array.
*
* @return
- * The Verbs object initialized, NULL otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-struct mlx5_txq_obj *
+int
mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq_data, struct mlx5_txq_ctrl, txq);
- struct mlx5_txq_obj tmpl;
- struct mlx5_txq_obj *txq_obj = NULL;
+ struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
struct ibv_qp_attr mod;
unsigned int cqe_n;
struct mlx5dv_qp qp;
@@ -871,26 +870,28 @@ struct mlx5_txq_obj *
int ret = 0;
MLX5_ASSERT(txq_data);
+ MLX5_ASSERT(txq_obj);
+ txq_obj->type = MLX5_TXQ_OBJ_TYPE_IBV;
+ txq_obj->txq_ctrl = txq_ctrl;
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
priv->verbs_alloc_ctx.obj = txq_ctrl;
if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
DRV_LOG(ERR, "Port %u MLX5_ENABLE_CQE_COMPRESSION "
"must never be set.", dev->data->port_id);
rte_errno = EINVAL;
- return NULL;
+ return -rte_errno;
}
- memset(&tmpl, 0, sizeof(struct mlx5_txq_obj));
cqe_n = desc / MLX5_TX_COMP_THRESH +
1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
- tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
- if (tmpl.cq == NULL) {
+ txq_obj->cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
+ if (txq_obj->cq == NULL) {
DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
dev->data->port_id, idx);
rte_errno = errno;
goto error;
}
- tmpl.qp = mlx5_ibv_qp_new(dev, idx, &tmpl);
- if (tmpl.qp == NULL) {
+ txq_obj->qp = mlx5_ibv_qp_new(dev, idx, txq_obj);
+ if (txq_obj->qp == NULL) {
rte_errno = errno;
goto error;
}
@@ -900,7 +901,8 @@ struct mlx5_txq_obj *
/* IB device port number. */
.port_num = (uint8_t)priv->dev_port,
};
- ret = mlx5_glue->modify_qp(tmpl.qp, &mod, (IBV_QP_STATE | IBV_QP_PORT));
+ ret = mlx5_glue->modify_qp(txq_obj->qp, &mod,
+ (IBV_QP_STATE | IBV_QP_PORT));
if (ret) {
DRV_LOG(ERR,
"Port %u Tx queue %u QP state to IBV_QPS_INIT failed.",
@@ -911,7 +913,7 @@ struct mlx5_txq_obj *
mod = (struct ibv_qp_attr){
.qp_state = IBV_QPS_RTR
};
- ret = mlx5_glue->modify_qp(tmpl.qp, &mod, IBV_QP_STATE);
+ ret = mlx5_glue->modify_qp(txq_obj->qp, &mod, IBV_QP_STATE);
if (ret) {
DRV_LOG(ERR,
"Port %u Tx queue %u QP state to IBV_QPS_RTR failed.",
@@ -920,7 +922,7 @@ struct mlx5_txq_obj *
goto error;
}
mod.qp_state = IBV_QPS_RTS;
- ret = mlx5_glue->modify_qp(tmpl.qp, &mod, IBV_QP_STATE);
+ ret = mlx5_glue->modify_qp(txq_obj->qp, &mod, IBV_QP_STATE);
if (ret) {
DRV_LOG(ERR,
"Port %u Tx queue %u QP state to IBV_QPS_RTS failed.",
@@ -928,24 +930,15 @@ struct mlx5_txq_obj *
rte_errno = errno;
goto error;
}
- txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
- sizeof(struct mlx5_txq_obj), 0,
- txq_ctrl->socket);
- if (!txq_obj) {
- DRV_LOG(ERR, "Port %u Tx queue %u cannot allocate memory.",
- dev->data->port_id, idx);
- rte_errno = ENOMEM;
- goto error;
- }
qp.comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
/* If using DevX, need additional mask to read tisn value. */
if (priv->sh->devx && !priv->sh->tdn)
qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
#endif
- obj.cq.in = tmpl.cq;
+ obj.cq.in = txq_obj->cq;
obj.cq.out = &cq_info;
- obj.qp.in = tmpl.qp;
+ obj.qp.in = txq_obj->qp;
obj.qp.out = &qp;
ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
if (ret != 0) {
@@ -963,7 +956,7 @@ struct mlx5_txq_obj *
txq_data->cqe_n = log2above(cq_info.cqe_cnt);
txq_data->cqe_s = 1 << txq_data->cqe_n;
txq_data->cqe_m = txq_data->cqe_s - 1;
- txq_data->qp_num_8s = ((struct ibv_qp *)tmpl.qp)->qp_num << 8;
+ txq_data->qp_num_8s = ((struct ibv_qp *)txq_obj->qp)->qp_num << 8;
txq_data->wqes = qp.sq.buf;
txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
txq_data->wqe_s = 1 << txq_data->wqe_n;
@@ -978,15 +971,6 @@ struct mlx5_txq_obj *
txq_data->wqe_pi = 0;
txq_data->wqe_comp = 0;
txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
- txq_data->fcqs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
- txq_data->cqe_s * sizeof(*txq_data->fcqs),
- RTE_CACHE_LINE_SIZE, txq_ctrl->socket);
- if (!txq_data->fcqs) {
- DRV_LOG(ERR, "Port %u Tx queue %u can't allocate memory (FCQ).",
- dev->data->port_id, idx);
- rte_errno = ENOMEM;
- goto error;
- }
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
/*
* If using DevX need to query and store TIS transport domain value.
@@ -994,7 +978,7 @@ struct mlx5_txq_obj *
* Will use this value on Rx, when creating matching TIR.
*/
if (priv->sh->devx && !priv->sh->tdn) {
- ret = mlx5_devx_cmd_qp_query_tis_td(tmpl.qp, qp.tisn,
+ ret = mlx5_devx_cmd_qp_query_tis_td(txq_obj->qp, qp.tisn,
&priv->sh->tdn);
if (ret) {
DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS "
@@ -1008,8 +992,6 @@ struct mlx5_txq_obj *
}
}
#endif
- txq_obj->qp = tmpl.qp;
- txq_obj->cq = tmpl.cq;
txq_ctrl->bf_reg = qp.bf.reg;
if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
@@ -1024,25 +1006,17 @@ struct mlx5_txq_obj *
goto error;
}
txq_uar_init(txq_ctrl);
- txq_obj->txq_ctrl = txq_ctrl;
- LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
- return txq_obj;
+ return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
- if (tmpl.cq)
- claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
- if (tmpl.qp)
- claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
- if (txq_data->fcqs) {
- mlx5_free(txq_data->fcqs);
- txq_data->fcqs = NULL;
- }
- if (txq_obj)
- mlx5_free(txq_obj);
+ if (txq_obj->cq)
+ claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
+ if (txq_obj->qp)
+ claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
rte_errno = ret; /* Restore rte_errno. */
- return NULL;
+ return -rte_errno;
}
/**
@@ -1057,12 +1031,6 @@ struct mlx5_txq_obj *
MLX5_ASSERT(txq_obj);
claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
- if (txq_obj->txq_ctrl->txq.fcqs) {
- mlx5_free(txq_obj->txq_ctrl->txq.fcqs);
- txq_obj->txq_ctrl->txq.fcqs = NULL;
- }
- LIST_REMOVE(txq_obj, next);
- mlx5_free(txq_obj);
}
struct mlx5_obj_ops ibv_obj_ops = {
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.h b/drivers/net/mlx5/linux/mlx5_verbs.h
index 7f6bb99..0670f6c 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.h
+++ b/drivers/net/mlx5/linux/mlx5_verbs.h
@@ -12,8 +12,7 @@ struct mlx5_verbs_ops {
mlx5_dereg_mr_t dereg_mr;
};
-struct mlx5_txq_obj *mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev,
- uint16_t idx);
+int mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx);
void mlx5_txq_ibv_obj_release(struct mlx5_txq_obj *txq_obj);
/* Verbs ops struct */
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 8679750..3093f6e 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -789,8 +789,7 @@ struct mlx5_obj_ops {
void (*hrxq_destroy)(struct mlx5_hrxq *hrxq);
int (*drop_action_create)(struct rte_eth_dev *dev);
void (*drop_action_destroy)(struct rte_eth_dev *dev);
- struct mlx5_txq_obj *(*txq_obj_new)(struct rte_eth_dev *dev,
- uint16_t idx);
+ int (*txq_obj_new)(struct rte_eth_dev *dev, uint16_t idx);
void (*txq_obj_release)(struct mlx5_txq_obj *txq_obj);
};
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 0b6e116..f3437a6 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -819,9 +819,9 @@
* Queue index in DPDK Tx queue array.
*
* @return
- * The hairpin DevX object initialized, NULL otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static struct mlx5_txq_obj *
+static int
mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -829,20 +829,11 @@
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq_data, struct mlx5_txq_ctrl, txq);
struct mlx5_devx_create_sq_attr attr = { 0 };
- struct mlx5_txq_obj *tmpl = NULL;
+ struct mlx5_txq_obj *tmpl = txq_ctrl->obj;
uint32_t max_wq_data;
MLX5_ASSERT(txq_data);
- MLX5_ASSERT(!txq_ctrl->obj);
- tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
- txq_ctrl->socket);
- if (!tmpl) {
- DRV_LOG(ERR,
- "Port %u Tx queue %u cannot allocate memory resources.",
- dev->data->port_id, txq_data->idx);
- rte_errno = ENOMEM;
- return NULL;
- }
+ MLX5_ASSERT(tmpl);
tmpl->type = MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN;
tmpl->txq_ctrl = txq_ctrl;
attr.hairpin = 1;
@@ -854,9 +845,8 @@
DRV_LOG(ERR, "Total data size %u power of 2 is "
"too large for hairpin.",
priv->config.log_hp_size);
- mlx5_free(tmpl);
rte_errno = ERANGE;
- return NULL;
+ return -rte_errno;
}
attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
} else {
@@ -874,14 +864,10 @@
DRV_LOG(ERR,
"Port %u tx hairpin queue %u can't create SQ object.",
dev->data->port_id, idx);
- mlx5_free(tmpl);
rte_errno = errno;
- return NULL;
+ return -rte_errno;
}
- DRV_LOG(DEBUG, "Port %u sxq %u updated with %p.", dev->data->port_id,
- idx, (void *)&tmpl);
- LIST_INSERT_HEAD(&priv->txqsobj, tmpl, next);
- return tmpl;
+ return 0;
}
#ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
@@ -1179,9 +1165,9 @@
* Queue index in DPDK Tx queue array.
*
* @return
- * The DevX object initialized, NULL otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-struct mlx5_txq_obj *
+int
mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -1195,27 +1181,17 @@ struct mlx5_txq_obj *
DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
dev->data->port_id, idx);
rte_errno = ENOMEM;
- return NULL;
+ return -rte_errno;
#else
struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
- struct mlx5_txq_obj *txq_obj = NULL;
+ struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
void *reg_addr;
uint32_t cqe_n;
int ret = 0;
MLX5_ASSERT(txq_data);
- MLX5_ASSERT(!txq_ctrl->obj);
- txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
- sizeof(struct mlx5_txq_obj), 0,
- txq_ctrl->socket);
- if (!txq_obj) {
- DRV_LOG(ERR,
- "Port %u Tx queue %u cannot allocate memory resources.",
- dev->data->port_id, txq_data->idx);
- rte_errno = ENOMEM;
- return NULL;
- }
+ MLX5_ASSERT(txq_obj);
txq_obj->type = MLX5_TXQ_OBJ_TYPE_DEVX_SQ;
txq_obj->txq_ctrl = txq_ctrl;
txq_obj->dev = dev;
@@ -1267,17 +1243,6 @@ struct mlx5_txq_obj *
dev->data->port_id, idx);
goto error;
}
- txq_data->fcqs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
- txq_data->cqe_s * sizeof(*txq_data->fcqs),
- RTE_CACHE_LINE_SIZE,
- txq_ctrl->socket);
- if (!txq_data->fcqs) {
- DRV_LOG(ERR,
- "Port %u Tx queue %u cannot allocate memory (FCQ).",
- dev->data->port_id, idx);
- rte_errno = ENOMEM;
- goto error;
- }
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
/*
* If using DevX need to query and store TIS transport domain value.
@@ -1294,18 +1259,12 @@ struct mlx5_txq_obj *
txq_ctrl->uar_mmap_offset =
mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
txq_uar_init(txq_ctrl);
- LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
- return txq_obj;
+ return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
txq_release_devx_resources(txq_obj);
- if (txq_data->fcqs) {
- mlx5_free(txq_data->fcqs);
- txq_data->fcqs = NULL;
- }
- mlx5_free(txq_obj);
rte_errno = ret; /* Restore rte_errno. */
- return NULL;
+ return -rte_errno;
#endif
}
@@ -1327,12 +1286,6 @@ struct mlx5_txq_obj *
txq_release_devx_resources(txq_obj);
#endif
}
- if (txq_obj->txq_ctrl->txq.fcqs) {
- mlx5_free(txq_obj->txq_ctrl->txq.fcqs);
- txq_obj->txq_ctrl->txq.fcqs = NULL;
- }
- LIST_REMOVE(txq_obj, next);
- mlx5_free(txq_obj);
}
struct mlx5_obj_ops devx_obj_ops = {
diff --git a/drivers/net/mlx5/mlx5_devx.h b/drivers/net/mlx5/mlx5_devx.h
index 0bbbbc0..bc8a8d6 100644
--- a/drivers/net/mlx5/mlx5_devx.h
+++ b/drivers/net/mlx5/mlx5_devx.h
@@ -7,8 +7,7 @@
#include "mlx5.h"
-struct mlx5_txq_obj *mlx5_txq_devx_obj_new(struct rte_eth_dev *dev,
- uint16_t idx);
+int mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx);
void mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj);
extern struct mlx5_obj_ops devx_obj_ops;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 6763042..e72e5fb 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -52,16 +52,45 @@
for (i = 0; i != priv->txqs_n; ++i) {
struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
+ struct mlx5_txq_data *txq_data = &txq_ctrl->txq;
+ uint32_t flags = MLX5_MEM_RTE | MLX5_MEM_ZERO;
if (!txq_ctrl)
continue;
if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
txq_alloc_elts(txq_ctrl);
- txq_ctrl->obj = priv->obj_ops.txq_obj_new(dev, i);
+ MLX5_ASSERT(!txq_ctrl->obj);
+ txq_ctrl->obj = mlx5_malloc(flags, sizeof(struct mlx5_txq_obj),
+ 0, txq_ctrl->socket);
if (!txq_ctrl->obj) {
+ DRV_LOG(ERR, "Port %u Tx queue %u cannot allocate "
+ "memory resources.", dev->data->port_id,
+ txq_data->idx);
rte_errno = ENOMEM;
goto error;
}
+ ret = priv->obj_ops.txq_obj_new(dev, i);
+ if (ret < 0) {
+ mlx5_free(txq_ctrl->obj);
+ txq_ctrl->obj = NULL;
+ goto error;
+ }
+ if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
+ size_t size = txq_data->cqe_s * sizeof(*txq_data->fcqs);
+ txq_data->fcqs = mlx5_malloc(flags, size,
+ RTE_CACHE_LINE_SIZE,
+ txq_ctrl->socket);
+ if (!txq_data->fcqs) {
+ DRV_LOG(ERR, "Port %u Tx queue %u cannot "
+ "allocate memory (FCQ).",
+ dev->data->port_id, i);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ }
+ DRV_LOG(DEBUG, "Port %u txq %u updated with %p.",
+ dev->data->port_id, i, (void *)&txq_ctrl->obj);
+ LIST_INSERT_HEAD(&priv->txqsobj, txq_ctrl->obj, next);
}
return 0;
error:
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index c1d36c3..23213d9 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1298,21 +1298,29 @@ struct mlx5_txq_ctrl *
mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_txq_ctrl *txq;
+ struct mlx5_txq_ctrl *txq_ctrl;
if (!(*priv->txqs)[idx])
return 0;
- txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- if (!rte_atomic32_dec_and_test(&txq->refcnt))
+ txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
+ if (!rte_atomic32_dec_and_test(&txq_ctrl->refcnt))
return 1;
- if (txq->obj) {
- priv->obj_ops.txq_obj_release(txq->obj);
- txq->obj = NULL;
+ if (txq_ctrl->obj) {
+ priv->obj_ops.txq_obj_release(txq_ctrl->obj);
+ LIST_REMOVE(txq_ctrl->obj, next);
+ mlx5_free(txq_ctrl->obj);
+ txq_ctrl->obj = NULL;
+ }
+ if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
+ if (txq_ctrl->txq.fcqs) {
+ mlx5_free(txq_ctrl->txq.fcqs);
+ txq_ctrl->txq.fcqs = NULL;
+ }
+ txq_free_elts(txq_ctrl);
+ mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
}
- txq_free_elts(txq);
- mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
- LIST_REMOVE(txq, next);
- mlx5_free(txq);
+ LIST_REMOVE(txq_ctrl, next);
+ mlx5_free(txq_ctrl);
(*priv->txqs)[idx] = NULL;
dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
--
1.8.3.1
next prev parent reply other threads:[~2020-10-01 14:13 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-10-01 14:09 [dpdk-dev] [PATCH v1 00/15] mlx5 Tx DevX/Verbs separation Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 01/15] net/mlx5: fix send queue doorbell typo Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 02/15] net/mlx5: fix unused variable in Txq creation Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 03/15] net/mlx5: mitigate Tx queue reference counters Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 04/15] net/mlx5: reorder Tx queue DevX object creation Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 05/15] net/mlx5: reorder Tx queue Verbs " Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 06/15] net/mlx5: reposition the event queue number field Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 07/15] net/mlx5: separate Tx queue object creations Michael Baum
2020-10-01 14:09 ` Michael Baum [this message]
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 09/15] net/mlx5: rearrange SQ and CQ creation in DevX module Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 10/15] net/mlx5: rearrange QP creation in Verbs module Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 11/15] net/mlx5: separate Tx queue object modification Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 12/15] net/mlx5: share " Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 13/15] net/mlx5: remove Tx queue object type field Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 14/15] net/mlx5: separate Rx queue state modification Michael Baum
2020-10-01 14:09 ` [dpdk-dev] [PATCH v1 15/15] net/mlx5: remove Rx queue object type field Michael Baum
2020-10-06 15:25 ` [dpdk-dev] [PATCH v1 00/15] mlx5 Tx DevX/Verbs separation Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1601561366-1821-9-git-send-email-michaelba@nvidia.com \
--to=michaelba@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=rasland@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).