From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3292BA04BA; Thu, 1 Oct 2020 16:11:21 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 78E821DB7F; Thu, 1 Oct 2020 16:10:39 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 718891BF7B for ; Thu, 1 Oct 2020 16:10:37 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from michaelba@nvidia.com) with SMTP; 1 Oct 2020 17:10:31 +0300 Received: from nvidia.com (pegasus07.mtr.labs.mlnx [10.210.16.112]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 091EAAEJ012743; Thu, 1 Oct 2020 17:10:31 +0300 From: Michael Baum To: dev@dpdk.org Cc: Matan Azrad , Raslan Darawsheh , Viacheslav Ovsiienko Date: Thu, 1 Oct 2020 14:09:14 +0000 Message-Id: <1601561366-1821-4-git-send-email-michaelba@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1601561366-1821-1-git-send-email-michaelba@nvidia.com> References: <1601561366-1821-1-git-send-email-michaelba@nvidia.com> Subject: [dpdk-dev] [PATCH v1 03/15] net/mlx5: mitigate Tx queue reference counters X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The Tx queue structures manage 2 different reference counter per queue: txq_ctrl reference counter and txq_obj reference counter. There is no real need to use two different counters, it just complicates the release functions. Remove the txq_obj counter and use only the txq_ctrl counter. Signed-off-by: Michael Baum Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_rxtx.h | 4 +- drivers/net/mlx5/mlx5_txq.c | 98 ++++++++++++++------------------------------ 2 files changed, 32 insertions(+), 70 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 9ffa028..d947e0e 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -276,7 +276,6 @@ enum mlx5_txq_type { /* Verbs/DevX Tx queue elements. */ struct mlx5_txq_obj { LIST_ENTRY(mlx5_txq_obj) next; /* Pointer to the next element. */ - rte_atomic32_t refcnt; /* Reference counter. */ struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */ enum mlx5_txq_obj_type type; /* The txq object type. */ RTE_STD_C11 @@ -405,8 +404,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev); struct mlx5_txq_obj *mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx, enum mlx5_txq_obj_type type); -struct mlx5_txq_obj *mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx); -int mlx5_txq_obj_release(struct mlx5_txq_obj *txq_ibv); +void mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj); int mlx5_txq_obj_verify(struct rte_eth_dev *dev); struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index ef3137b..e8bf7d7 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -437,6 +437,7 @@ mlx5_txq_release(dev, idx); return 0; } + /** * DPDK callback to configure a TX queue. * @@ -833,7 +834,6 @@ } DRV_LOG(DEBUG, "port %u sxq %u updated with %p", dev->data->port_id, idx, (void *)&tmpl); - rte_atomic32_inc(&tmpl->refcnt); LIST_INSERT_HEAD(&priv->txqsobj, tmpl, next); return tmpl; } @@ -1126,7 +1126,6 @@ txq_ctrl->bf_reg = reg_addr; txq_ctrl->uar_mmap_offset = mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar); - rte_atomic32_set(&txq_obj->refcnt, 1); txq_uar_init(txq_ctrl); LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next); return txq_obj; @@ -1360,7 +1359,6 @@ struct mlx5_txq_obj * #endif txq_obj->qp = tmpl.qp; txq_obj->cq = tmpl.cq; - rte_atomic32_inc(&txq_obj->refcnt); txq_ctrl->bf_reg = qp.bf.reg; if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) { txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset; @@ -1397,64 +1395,30 @@ struct mlx5_txq_obj * } /** - * Get an Tx queue Verbs object. - * - * @param dev - * Pointer to Ethernet device. - * @param idx - * Queue index in DPDK Tx queue array. - * - * @return - * The Verbs object if it exists. - */ -struct mlx5_txq_obj * -mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_txq_ctrl *txq_ctrl; - - if (idx >= priv->txqs_n) - return NULL; - if (!(*priv->txqs)[idx]) - return NULL; - txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - if (txq_ctrl->obj) - rte_atomic32_inc(&txq_ctrl->obj->refcnt); - return txq_ctrl->obj; -} - -/** * Release an Tx verbs queue object. * * @param txq_obj - * Verbs Tx queue object. - * - * @return - * 1 while a reference on it exists, 0 when freed. + * Verbs Tx queue object.. */ -int +void mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj) { MLX5_ASSERT(txq_obj); - if (rte_atomic32_dec_and_test(&txq_obj->refcnt)) { - if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) { - if (txq_obj->tis) - claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis)); - } else if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ) { - txq_release_sq_resources(txq_obj); - } else { - claim_zero(mlx5_glue->destroy_qp(txq_obj->qp)); - claim_zero(mlx5_glue->destroy_cq(txq_obj->cq)); - } - if (txq_obj->txq_ctrl->txq.fcqs) { - mlx5_free(txq_obj->txq_ctrl->txq.fcqs); - txq_obj->txq_ctrl->txq.fcqs = NULL; - } - LIST_REMOVE(txq_obj, next); - mlx5_free(txq_obj); - return 0; + if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) { + if (txq_obj->tis) + claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis)); + } else if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ) { + txq_release_sq_resources(txq_obj); + } else { + claim_zero(mlx5_glue->destroy_qp(txq_obj->qp)); + claim_zero(mlx5_glue->destroy_cq(txq_obj->cq)); } - return 1; + if (txq_obj->txq_ctrl->txq.fcqs) { + mlx5_free(txq_obj->txq_ctrl->txq.fcqs); + txq_obj->txq_ctrl->txq.fcqs = NULL; + } + LIST_REMOVE(txq_obj, next); + mlx5_free(txq_obj); } /** @@ -1967,12 +1931,11 @@ struct mlx5_txq_ctrl * mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; struct mlx5_txq_ctrl *ctrl = NULL; - if ((*priv->txqs)[idx]) { - ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, - txq); - mlx5_txq_obj_get(dev, idx); + if (txq_data) { + ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); rte_atomic32_inc(&ctrl->refcnt); } return ctrl; @@ -1998,18 +1961,19 @@ struct mlx5_txq_ctrl * if (!(*priv->txqs)[idx]) return 0; txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - if (txq->obj && !mlx5_txq_obj_release(txq->obj)) + if (!rte_atomic32_dec_and_test(&txq->refcnt)) + return 1; + if (txq->obj) { + mlx5_txq_obj_release(txq->obj); txq->obj = NULL; - if (rte_atomic32_dec_and_test(&txq->refcnt)) { - txq_free_elts(txq); - mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh); - LIST_REMOVE(txq, next); - mlx5_free(txq); - (*priv->txqs)[idx] = NULL; - dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED; - return 0; } - return 1; + txq_free_elts(txq); + mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh); + LIST_REMOVE(txq, next); + mlx5_free(txq); + (*priv->txqs)[idx] = NULL; + dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } /** -- 1.8.3.1