From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id A784EA046B for ; Mon, 22 Jul 2019 16:56:43 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id DB0EC1BEEC; Mon, 22 Jul 2019 16:53:37 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id A36AB1BE5C for ; Mon, 22 Jul 2019 16:52:33 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE2 (envelope-from matan@mellanox.com) with ESMTPS (AES256-SHA encrypted); 22 Jul 2019 17:52:29 +0300 Received: from pegasus07.mtr.labs.mlnx (pegasus07.mtr.labs.mlnx [10.210.16.112]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x6MEqRVi012492; Mon, 22 Jul 2019 17:52:28 +0300 From: Matan Azrad To: Ferruh Yigit , Shahaf Shuler , Yongseok Koh , Viacheslav Ovsiienko Cc: dev@dpdk.org, Dekel Peled Date: Mon, 22 Jul 2019 14:52:12 +0000 Message-Id: <1563807145-16577-16-git-send-email-matan@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1563807145-16577-1-git-send-email-matan@mellanox.com> References: <1563786795-14027-1-git-send-email-matan@mellanox.com> <1563807145-16577-1-git-send-email-matan@mellanox.com> Subject: [dpdk-dev] [PATCH v2 15/28] net/mlx5: rename verbs indirection table to obj X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Dekel Peled Prepare for introducing of DevX RQT object. Rx indirection table object is currently created using verbs only. The next patches will add the option to create an RQT object using DevX. This patch renames ind_table_ibv to ind_table_obj wherever relevant, and adds the DevX items to relevant structs. Signed-off-by: Dekel Peled Acked-by: Matan Azrad Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5.c | 2 +- drivers/net/mlx5/mlx5.h | 4 +-- drivers/net/mlx5/mlx5_rxq.c | 64 ++++++++++++++++++++++---------------------- drivers/net/mlx5/mlx5_rxtx.h | 20 ++++++++++---- 4 files changed, 50 insertions(+), 40 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 946a22b..da1a47f 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -819,7 +819,7 @@ struct mlx5_dev_spawn_data { if (ret) DRV_LOG(WARNING, "port %u some hash Rx queue still remain", dev->data->port_id); - ret = mlx5_ind_table_ibv_verify(dev); + ret = mlx5_ind_table_obj_verify(dev); if (ret) DRV_LOG(WARNING, "port %u some indirection table still remain", dev->data->port_id); diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 697e6c4..b080064 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -617,8 +617,8 @@ struct mlx5_priv { LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */ LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */ LIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */ - /* Verbs Indirection tables. */ - LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls; + /* Indirection tables. */ + LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls; /* Pointer to next element. */ rte_atomic32_t refcnt; /**< Reference counter. */ struct ibv_flow_action *verbs_action; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 20a4695..507a1ab 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1576,14 +1576,14 @@ struct mlx5_rxq_ctrl * * Number of queues in the array. * * @return - * The Verbs object initialised, NULL otherwise and rte_errno is set. + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. */ -static struct mlx5_ind_table_ibv * -mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues, +static struct mlx5_ind_table_obj * +mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, uint32_t queues_n) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl; const unsigned int wq_n = rte_is_power_of_2(queues_n) ? log2above(queues_n) : log2above(priv->config.ind_table_max_size); @@ -1642,12 +1642,12 @@ struct mlx5_rxq_ctrl * * @return * An indirection table if found. */ -static struct mlx5_ind_table_ibv * -mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues, +static struct mlx5_ind_table_obj * +mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, uint32_t queues_n) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl; LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { if ((ind_tbl->queues_n == queues_n) && @@ -1678,8 +1678,8 @@ struct mlx5_rxq_ctrl * * 1 while a reference on it exists, 0 when freed. */ static int -mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, - struct mlx5_ind_table_ibv *ind_tbl) +mlx5_ind_table_obj_release(struct rte_eth_dev *dev, + struct mlx5_ind_table_obj *ind_tbl) { unsigned int i; @@ -1706,15 +1706,15 @@ struct mlx5_rxq_ctrl * * The number of object not released. */ int -mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev) +mlx5_ind_table_obj_verify(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl; int ret = 0; LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { DRV_LOG(DEBUG, - "port %u Verbs indirection table %p still referenced", + "port %u indirection table obj %p still referenced", dev->data->port_id, (void *)ind_tbl); ++ret; } @@ -1752,7 +1752,7 @@ struct mlx5_hrxq * { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; - struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl; struct ibv_qp *qp; #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT struct mlx5dv_qp_init_attr qp_init_attr; @@ -1760,9 +1760,9 @@ struct mlx5_hrxq * int err; queues_n = hash_fields ? queues_n : 1; - ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); + ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) - ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n); + ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n); if (!ind_tbl) { rte_errno = ENOMEM; return NULL; @@ -1844,7 +1844,7 @@ struct mlx5_hrxq * return hrxq; error: err = rte_errno; /* Save rte_errno before cleanup. */ - mlx5_ind_table_ibv_release(dev, ind_tbl); + mlx5_ind_table_obj_release(dev, ind_tbl); if (qp) claim_zero(mlx5_glue->destroy_qp(qp)); rte_errno = err; /* Restore rte_errno. */ @@ -1878,7 +1878,7 @@ struct mlx5_hrxq * queues_n = hash_fields ? queues_n : 1; LIST_FOREACH(hrxq, &priv->hrxqs, next) { - struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl; if (hrxq->rss_key_len != rss_key_len) continue; @@ -1886,11 +1886,11 @@ struct mlx5_hrxq * continue; if (hrxq->hash_fields != hash_fields) continue; - ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); + ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) continue; if (ind_tbl != hrxq->ind_table) { - mlx5_ind_table_ibv_release(dev, ind_tbl); + mlx5_ind_table_obj_release(dev, ind_tbl); continue; } rte_atomic32_inc(&hrxq->refcnt); @@ -1918,12 +1918,12 @@ struct mlx5_hrxq * mlx5_glue->destroy_flow_action(hrxq->action); #endif claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); - mlx5_ind_table_ibv_release(dev, hrxq->ind_table); + mlx5_ind_table_obj_release(dev, hrxq->ind_table); LIST_REMOVE(hrxq, next); rte_free(hrxq); return 0; } - claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table)); + claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table)); return 1; } @@ -2042,15 +2042,15 @@ struct mlx5_hrxq * * Pointer to Ethernet device. * * @return - * The Verbs object initialised, NULL otherwise and rte_errno is set. + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. */ -static struct mlx5_ind_table_ibv * -mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev) +static struct mlx5_ind_table_obj * +mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl; struct mlx5_rxq_obj *rxq; - struct mlx5_ind_table_ibv tmpl; + struct mlx5_ind_table_obj tmpl; rxq = mlx5_rxq_obj_drop_new(dev); if (!rxq) @@ -2088,10 +2088,10 @@ struct mlx5_hrxq * * Pointer to Ethernet device. */ static void -mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev) +mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table; + struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table; claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table)); mlx5_rxq_obj_drop_release(dev); @@ -2112,7 +2112,7 @@ struct mlx5_hrxq * mlx5_hrxq_drop_new(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl; struct ibv_qp *qp; struct mlx5_hrxq *hrxq; @@ -2120,7 +2120,7 @@ struct mlx5_hrxq * rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt); return priv->drop_queue.hrxq; } - ind_tbl = mlx5_ind_table_ibv_drop_new(dev); + ind_tbl = mlx5_ind_table_obj_drop_new(dev); if (!ind_tbl) return NULL; qp = mlx5_glue->create_qp_ex(priv->sh->ctx, @@ -2168,7 +2168,7 @@ struct mlx5_hrxq * return hrxq; error: if (ind_tbl) - mlx5_ind_table_ibv_drop_release(dev); + mlx5_ind_table_obj_drop_release(dev); return NULL; } @@ -2189,7 +2189,7 @@ struct mlx5_hrxq * mlx5_glue->destroy_flow_action(hrxq->action); #endif claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); - mlx5_ind_table_ibv_drop_release(dev); + mlx5_ind_table_obj_drop_release(dev); rte_free(hrxq); priv->drop_queue.hrxq = NULL; } diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index eb20a07..0e7b428 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -176,11 +176,21 @@ struct mlx5_rxq_ctrl { uint16_t dump_file_n; /* Number of dump files. */ }; +enum mlx5_ind_tbl_type { + MLX5_IND_TBL_TYPE_IBV, + MLX5_IND_TBL_TYPE_DEVX, +}; + /* Indirection table. */ -struct mlx5_ind_table_ibv { - LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */ +struct mlx5_ind_table_obj { + LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ - struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */ + enum mlx5_ind_tbl_type type; + RTE_STD_C11 + union { + struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */ + struct mlx5_devx_obj *rqt; /* DevX RQT object. */ + }; uint32_t queues_n; /**< Number of queues in the list. */ uint16_t queues[]; /**< Queue list. */ }; @@ -189,7 +199,7 @@ struct mlx5_ind_table_ibv { struct mlx5_hrxq { LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ - struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */ + struct mlx5_ind_table_obj *ind_table; /* Indirection table. */ struct ibv_qp *qp; /* Verbs queue pair. */ #ifdef HAVE_IBV_FLOW_DV_SUPPORT void *action; /* DV QP action pointer. */ @@ -320,7 +330,7 @@ struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx); int mlx5_rxq_verify(struct rte_eth_dev *dev); int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); -int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev); +int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev); struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, -- 1.8.3.1