From: Michael Baum <michaelba@nvidia.com>
To: dev@dpdk.org
Cc: Matan Azrad <matan@nvidia.com>,
Raslan Darawsheh <rasland@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [PATCH v1 15/18] net/mlx5: share Rx queue indirection table code
Date: Thu, 3 Sep 2020 10:13:46 +0000 [thread overview]
Message-ID: <1599128029-2092-16-git-send-email-michaelba@nvidia.com> (raw)
In-Reply-To: <1599128029-2092-1-git-send-email-michaelba@nvidia.com>
Move Rx indirection table object similar resources allocations from DevX
and Verbs modules to a shared location.
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_verbs.c | 75 ++++++++++++++----------------------
drivers/net/mlx5/mlx5.h | 7 ++--
drivers/net/mlx5/mlx5_devx.c | 76 ++++++++++++++-----------------------
drivers/net/mlx5/mlx5_rxq.c | 56 ++++++++++++++++++++++++++-
drivers/net/mlx5/mlx5_rxtx.h | 3 ++
5 files changed, 117 insertions(+), 100 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index 6eef85e..be810b1 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -441,67 +441,49 @@
}
/**
- * Create an indirection table.
+ * Creates a receive work queue as a filed of indirection table.
*
* @param dev
* Pointer to Ethernet device.
- * @param queues
- * Queues entering in the indirection table.
- * @param queues_n
- * Number of queues in the array.
+ * @param log_n
+ * Log of number of queues in the array.
+ * @param ind_tbl
+ * Verbs indirection table object.
*
* @return
- * The Verbs object initialized, NULL otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static struct mlx5_ind_table_obj *
-mlx5_ibv_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
- uint32_t queues_n)
+static int
+mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
+ struct mlx5_ind_table_obj *ind_tbl)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_obj *ind_tbl;
- const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
- log2above(queues_n) :
- log2above(priv->config.ind_table_max_size);
- struct ibv_wq *wq[1 << wq_n];
- unsigned int i = 0, j = 0, k = 0;
+ struct ibv_wq *wq[1 << log_n];
+ unsigned int i, j;
- ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
- queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
- if (!ind_tbl) {
- rte_errno = ENOMEM;
- return NULL;
- }
- for (i = 0; i != queues_n; ++i) {
- struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
- if (!rxq)
- goto error;
- wq[i] = rxq->obj->wq;
- ind_tbl->queues[i] = queues[i];
+ MLX5_ASSERT(ind_tbl);
+ for (i = 0; i != ind_tbl->queues_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+
+ wq[i] = rxq_ctrl->obj->wq;
}
- ind_tbl->queues_n = queues_n;
+ MLX5_ASSERT(i > 0);
/* Finalise indirection table. */
- k = i; /* Retain value of i for use in error case. */
- for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
- wq[k] = wq[j];
+ for (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i)
+ wq[i] = wq[j];
ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table(priv->sh->ctx,
&(struct ibv_rwq_ind_table_init_attr){
- .log_ind_tbl_size = wq_n,
+ .log_ind_tbl_size = log_n,
.ind_tbl = wq,
.comp_mask = 0,
});
if (!ind_tbl->ind_table) {
rte_errno = errno;
- goto error;
+ return -rte_errno;
}
- rte_atomic32_inc(&ind_tbl->refcnt);
- LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
- return ind_tbl;
-error:
- for (j = 0; j < i; j++)
- mlx5_rxq_release(dev, ind_tbl->queues[j]);
- mlx5_free(ind_tbl);
- DEBUG("Port %u cannot create indirection table.", dev->data->port_id);
- return NULL;
+ return 0;
}
/**
@@ -511,7 +493,7 @@
* Indirection table to release.
*/
static void
-mlx5_ibv_ind_table_obj_destroy(struct mlx5_ind_table_obj *ind_tbl)
+mlx5_ibv_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
{
claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
}
@@ -555,8 +537,7 @@
queues_n = hash_fields ? queues_n : 1;
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
- ind_tbl = priv->obj_ops->ind_table_obj_new(dev, queues,
- queues_n);
+ ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
if (!ind_tbl) {
rte_errno = ENOMEM;
return 0;
@@ -672,8 +653,8 @@ struct mlx5_obj_ops ibv_obj_ops = {
.rxq_event_get = mlx5_rx_ibv_get_event,
.rxq_obj_modify = mlx5_ibv_modify_wq,
.rxq_obj_release = mlx5_rxq_ibv_obj_release,
- .ind_table_obj_new = mlx5_ibv_ind_table_obj_new,
- .ind_table_obj_destroy = mlx5_ibv_ind_table_obj_destroy,
+ .ind_table_new = mlx5_ibv_ind_table_new,
+ .ind_table_destroy = mlx5_ibv_ind_table_destroy,
.hrxq_new = mlx5_ibv_hrxq_new,
.hrxq_destroy = mlx5_ibv_qp_destroy,
};
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 9594856..12017e8 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -742,10 +742,9 @@ struct mlx5_obj_ops {
int (*rxq_event_get)(struct mlx5_rxq_obj *rxq_obj);
int (*rxq_obj_modify)(struct mlx5_rxq_obj *rxq_obj, bool is_start);
void (*rxq_obj_release)(struct mlx5_rxq_obj *rxq_obj);
- struct mlx5_ind_table_obj *(*ind_table_obj_new)(struct rte_eth_dev *dev,
- const uint16_t *queues,
- uint32_t queues_n);
- void (*ind_table_obj_destroy)(struct mlx5_ind_table_obj *ind_tbl);
+ int (*ind_table_new)(struct rte_eth_dev *dev, const unsigned int log_n,
+ struct mlx5_ind_table_obj *ind_tbl);
+ void (*ind_table_destroy)(struct mlx5_ind_table_obj *ind_tbl);
uint32_t (*hrxq_new)(struct rte_eth_dev *dev, const uint8_t *rss_key,
uint32_t rss_key_len, uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 5fa41f1..ebc3929 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -609,76 +609,57 @@
}
/**
- * Create an indirection table.
+ * Create RQT using DevX API as a filed of indirection table.
*
* @param dev
* Pointer to Ethernet device.
- * @param queues
- * Queues entering in the indirection table.
- * @param queues_n
- * Number of queues in the array.
+ * @param log_n
+ * Log of number of queues in the array.
+ * @param ind_tbl
+ * DevX indirection table object.
*
* @return
- * The DevX object initialized, NULL otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static struct mlx5_ind_table_obj *
-mlx5_devx_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
- uint32_t queues_n)
+static int
+mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
+ struct mlx5_ind_table_obj *ind_tbl)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_obj *ind_tbl;
struct mlx5_devx_rqt_attr *rqt_attr = NULL;
- const unsigned int rqt_n = 1 << (rte_is_power_of_2(queues_n) ?
- log2above(queues_n) :
- log2above(priv->config.ind_table_max_size));
- unsigned int i = 0, j = 0, k = 0;
+ const unsigned int rqt_n = 1 << log_n;
+ unsigned int i, j;
- ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
- queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
- if (!ind_tbl) {
- rte_errno = ENOMEM;
- return NULL;
- }
+ MLX5_ASSERT(ind_tbl);
rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
if (!rqt_attr) {
DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
dev->data->port_id);
rte_errno = ENOMEM;
- goto error;
+ return -rte_errno;
}
rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
rqt_attr->rqt_actual_size = rqt_n;
- for (i = 0; i != queues_n; ++i) {
- struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
- if (!rxq) {
- mlx5_free(rqt_attr);
- goto error;
- }
- rqt_attr->rq_list[i] = rxq->obj->rq->id;
- ind_tbl->queues[i] = queues[i];
+ for (i = 0; i != ind_tbl->queues_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+
+ rqt_attr->rq_list[i] = rxq_ctrl->obj->rq->id;
}
- k = i; /* Retain value of i for use in error case. */
- for (j = 0; k != rqt_n; ++k, ++j)
- rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
+ MLX5_ASSERT(i > 0);
+ for (j = 0; i != rqt_n; ++j, ++i)
+ rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
mlx5_free(rqt_attr);
if (!ind_tbl->rqt) {
DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
dev->data->port_id);
rte_errno = errno;
- goto error;
+ return -rte_errno;
}
- ind_tbl->queues_n = queues_n;
- rte_atomic32_inc(&ind_tbl->refcnt);
- LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
- return ind_tbl;
-error:
- for (j = 0; j < i; j++)
- mlx5_rxq_release(dev, ind_tbl->queues[j]);
- mlx5_free(ind_tbl);
- DEBUG("Port %u cannot create indirection table.", dev->data->port_id);
- return NULL;
+ return 0;
}
/**
@@ -688,7 +669,7 @@
* Indirection table to release.
*/
static void
-mlx5_devx_ind_table_obj_destroy(struct mlx5_ind_table_obj *ind_tbl)
+mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
{
claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
}
@@ -738,8 +719,7 @@
queues_n = hash_fields ? queues_n : 1;
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
- ind_tbl = priv->obj_ops->ind_table_obj_new(dev, queues,
- queues_n);
+ ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
if (!ind_tbl) {
rte_errno = ENOMEM;
return 0;
@@ -852,8 +832,8 @@ struct mlx5_obj_ops devx_obj_ops = {
.rxq_event_get = mlx5_rx_devx_get_event,
.rxq_obj_modify = mlx5_devx_modify_rq,
.rxq_obj_release = mlx5_rxq_devx_obj_release,
- .ind_table_obj_new = mlx5_devx_ind_table_obj_new,
- .ind_table_obj_destroy = mlx5_devx_ind_table_obj_destroy,
+ .ind_table_new = mlx5_devx_ind_table_new,
+ .ind_table_destroy = mlx5_devx_ind_table_destroy,
.hrxq_new = mlx5_devx_hrxq_new,
.hrxq_destroy = mlx5_devx_tir_destroy,
};
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index d84dfe1..c353139 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1762,7 +1762,7 @@ struct mlx5_ind_table_obj *
unsigned int i;
if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
- priv->obj_ops->ind_table_obj_destroy(ind_tbl);
+ priv->obj_ops->ind_table_destroy(ind_tbl);
for (i = 0; i != ind_tbl->queues_n; ++i)
claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
if (!rte_atomic32_read(&ind_tbl->refcnt)) {
@@ -1799,6 +1799,60 @@ struct mlx5_ind_table_obj *
}
/**
+ * Create an indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param queues
+ * Queues entering in the indirection table.
+ * @param queues_n
+ * Number of queues in the array.
+ *
+ * @return
+ * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_ind_table_obj *
+mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
+ uint32_t queues_n)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_obj *ind_tbl;
+ const unsigned int n = rte_is_power_of_2(queues_n) ?
+ log2above(queues_n) :
+ log2above(priv->config.ind_table_max_size);
+ unsigned int i, j;
+ int ret;
+
+ ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
+ queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ ind_tbl->queues_n = queues_n;
+ for (i = 0; i != queues_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
+ if (!rxq)
+ goto error;
+ ind_tbl->queues[i] = queues[i];
+ }
+ ret = priv->obj_ops->ind_table_new(dev, n, ind_tbl);
+ if (ret < 0)
+ goto error;
+ rte_atomic32_inc(&ind_tbl->refcnt);
+ LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
+ return ind_tbl;
+error:
+ ret = rte_errno;
+ for (j = 0; j < i; j++)
+ mlx5_rxq_release(dev, ind_tbl->queues[j]);
+ rte_errno = ret;
+ mlx5_free(ind_tbl);
+ DEBUG("Port %u cannot create indirection table.", dev->data->port_id);
+ return NULL;
+}
+
+/**
* Get an Rx Hash queue.
*
* @param dev
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 14a3535..237344f 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -365,6 +365,9 @@ struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
int mlx5_rxq_verify(struct rte_eth_dev *dev);
int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
+struct mlx5_ind_table_obj *mlx5_ind_table_obj_new(struct rte_eth_dev *dev,
+ const uint16_t *queues,
+ uint32_t queues_n);
struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,
const uint16_t *queues,
uint32_t queues_n);
--
1.8.3.1
next prev parent reply other threads:[~2020-09-03 10:17 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-03 10:13 [dpdk-dev] [PATCH v1 00/18] mlx5 Rx DevX/Verbs separation Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 01/18] net/mlx5: fix Rx hash queue creation error flow Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 02/18] net/mlx5: fix Rx queue state update Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 03/18] net/mlx5: fix types differentiation in Rxq create Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 04/18] net/mlx5: mitigate Rx queue reference counters Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 05/18] net/mlx5: separate Rx queue object creations Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 06/18] net/mlx5: separate Rx interrupt handling Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 07/18] net/mlx5: share Rx control code Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 08/18] net/mlx5: rearrange the creation of RQ and CQ resources Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 09/18] net/mlx5: rearrange the creation of WQ and CQ object Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 10/18] net/mlx5: separate Rx queue object modification Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 11/18] net/mlx5: share " Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 12/18] net/mlx5: separate Rx indirection table object creation Michael Baum
2020-09-09 11:29 ` Ferruh Yigit
2020-09-09 14:37 ` Matan Azrad
2020-09-09 16:28 ` Ferruh Yigit
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 13/18] net/mlx5: separate Rx hash queue creation Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 14/18] net/mlx5: remove indirection table type field Michael Baum
2020-09-03 10:13 ` Michael Baum [this message]
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 16/18] net/mlx5: share Rx hash queue code Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 17/18] net/mlx5: separate Rx queue drop Michael Baum
2020-09-03 10:13 ` [dpdk-dev] [PATCH v1 18/18] net/mlx5: share Rx queue drop action code Michael Baum
2020-09-03 14:34 ` [dpdk-dev] [PATCH v1 00/18] mlx5 Rx DevX/Verbs separation Tom Barbette
2020-09-03 20:59 ` Michael Baum
2020-09-04 7:30 ` David Marchand
2020-09-04 7:47 ` Thomas Monjalon
2020-09-06 7:32 ` Michael Baum
2020-09-08 11:46 ` Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1599128029-2092-16-git-send-email-michaelba@nvidia.com \
--to=michaelba@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=rasland@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).