From: Xueming Li <xuemingl@nvidia.com>
To: <dev@dpdk.org>
Cc: <xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>,
Matan Azrad <matan@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [PATCH v2 05/13] net/mlx5: split multiple packet Rq memory pool
Date: Sat, 16 Oct 2021 17:12:05 +0800 [thread overview]
Message-ID: <20211016091214.1831902-6-xuemingl@nvidia.com> (raw)
In-Reply-To: <20211016091214.1831902-1-xuemingl@nvidia.com>
Port info is invisible from shared Rx queue, split MPR mempool from
device to Rx queue, also changed pool flag to mp_sc.
Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
drivers/net/mlx5/mlx5.c | 1 -
drivers/net/mlx5/mlx5_rx.h | 4 +-
drivers/net/mlx5/mlx5_rxq.c | 109 ++++++++++++--------------------
drivers/net/mlx5/mlx5_trigger.c | 10 ++-
4 files changed, 47 insertions(+), 77 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 45ccfe27845..1033c29cb82 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1608,7 +1608,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
mlx5_drop_action_destroy(dev);
if (priv->mreg_cp_tbl)
mlx5_hlist_destroy(priv->mreg_cp_tbl);
- mlx5_mprq_free_mp(dev);
if (priv->sh->ct_mng)
mlx5_flow_aso_ct_mng_close(priv->sh);
mlx5_os_free_shared_dr(priv);
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index d44c8078dea..a8e0c3162b0 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -179,8 +179,8 @@ struct mlx5_rxq_ctrl {
extern uint8_t rss_hash_default_key[];
unsigned int mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data);
-int mlx5_mprq_free_mp(struct rte_eth_dev *dev);
-int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev);
+int mlx5_mprq_free_mp(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl);
+int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl);
int mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
int mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
int mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t queue_id);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 1cb99de1ae7..f29a8143967 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1087,7 +1087,7 @@ mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
}
/**
- * Free mempool of Multi-Packet RQ.
+ * Free RXQ mempool of Multi-Packet RQ.
*
* @param dev
* Pointer to Ethernet device.
@@ -1096,16 +1096,15 @@ mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
* 0 on success, negative errno value on failure.
*/
int
-mlx5_mprq_free_mp(struct rte_eth_dev *dev)
+mlx5_mprq_free_mp(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_mempool *mp = priv->mprq_mp;
- unsigned int i;
+ struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+ struct rte_mempool *mp = rxq->mprq_mp;
if (mp == NULL)
return 0;
- DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
- dev->data->port_id, mp->name);
+ DRV_LOG(DEBUG, "port %u queue %hu freeing mempool (%s) for Multi-Packet RQ",
+ dev->data->port_id, rxq->idx, mp->name);
/*
* If a buffer in the pool has been externally attached to a mbuf and it
* is still in use by application, destroying the Rx queue can spoil
@@ -1123,34 +1122,28 @@ mlx5_mprq_free_mp(struct rte_eth_dev *dev)
return -rte_errno;
}
rte_mempool_free(mp);
- /* Unset mempool for each Rx queue. */
- for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
-
- if (rxq == NULL)
- continue;
- rxq->mprq_mp = NULL;
- }
- priv->mprq_mp = NULL;
+ rxq->mprq_mp = NULL;
return 0;
}
/**
- * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
- * mempool. If already allocated, reuse it if there're enough elements.
+ * Allocate RXQ a mempool for Multi-Packet RQ.
+ * If already allocated, reuse it if there're enough elements.
* Otherwise, resize it.
*
* @param dev
* Pointer to Ethernet device.
+ * @param rxq_ctrl
+ * Pointer to RXQ.
*
* @return
* 0 on success, negative errno value on failure.
*/
int
-mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
+mlx5_mprq_alloc_mp(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_mempool *mp = priv->mprq_mp;
+ struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+ struct rte_mempool *mp = rxq->mprq_mp;
char name[RTE_MEMPOOL_NAMESIZE];
unsigned int desc = 0;
unsigned int buf_len;
@@ -1158,28 +1151,15 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
unsigned int obj_size;
unsigned int strd_num_n = 0;
unsigned int strd_sz_n = 0;
- unsigned int i;
- unsigned int n_ibv = 0;
- if (!mlx5_mprq_enabled(dev))
+ if (rxq_ctrl == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
return 0;
- /* Count the total number of descriptors configured. */
- for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl = container_of
- (rxq, struct mlx5_rxq_ctrl, rxq);
-
- if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
- continue;
- n_ibv++;
- desc += 1 << rxq->elts_n;
- /* Get the max number of strides. */
- if (strd_num_n < rxq->strd_num_n)
- strd_num_n = rxq->strd_num_n;
- /* Get the max size of a stride. */
- if (strd_sz_n < rxq->strd_sz_n)
- strd_sz_n = rxq->strd_sz_n;
- }
+ /* Number of descriptors configured. */
+ desc = 1 << rxq->elts_n;
+ /* Get the max number of strides. */
+ strd_num_n = rxq->strd_num_n;
+ /* Get the max size of a stride. */
+ strd_sz_n = rxq->strd_sz_n;
MLX5_ASSERT(strd_num_n && strd_sz_n);
buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
@@ -1196,7 +1176,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
* this Mempool gets available again.
*/
desc *= 4;
- obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
+ obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ;
/*
* rte_mempool_create_empty() has sanity check to refuse large cache
* size compared to the number of elements.
@@ -1209,50 +1189,41 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
DRV_LOG(DEBUG, "port %u mempool %s is being reused",
dev->data->port_id, mp->name);
/* Reuse. */
- goto exit;
- } else if (mp != NULL) {
- DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
- dev->data->port_id, mp->name);
+ return 0;
+ }
+ if (mp != NULL) {
+ DRV_LOG(DEBUG, "port %u queue %u mempool %s should be resized, freeing it",
+ dev->data->port_id, rxq->idx, mp->name);
/*
* If failed to free, which means it may be still in use, no way
* but to keep using the existing one. On buffer underrun,
* packets will be memcpy'd instead of external buffer
* attachment.
*/
- if (mlx5_mprq_free_mp(dev)) {
+ if (mlx5_mprq_free_mp(dev, rxq_ctrl) != 0) {
if (mp->elt_size >= obj_size)
- goto exit;
+ return 0;
else
return -rte_errno;
}
}
- snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
+ snprintf(name, sizeof(name), "port-%u-queue-%hu-mprq",
+ dev->data->port_id, rxq->idx);
mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
0, NULL, NULL, mlx5_mprq_buf_init,
- (void *)((uintptr_t)1 << strd_num_n),
- dev->device->numa_node, 0);
+ (void *)(((uintptr_t)1) << strd_num_n),
+ dev->device->numa_node, MEMPOOL_F_SC_GET);
if (mp == NULL) {
DRV_LOG(ERR,
- "port %u failed to allocate a mempool for"
+ "port %u queue %hu failed to allocate a mempool for"
" Multi-Packet RQ, count=%u, size=%u",
- dev->data->port_id, obj_num, obj_size);
+ dev->data->port_id, rxq->idx, obj_num, obj_size);
rte_errno = ENOMEM;
return -rte_errno;
}
- priv->mprq_mp = mp;
-exit:
- /* Set mempool for each Rx queue. */
- for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl = container_of
- (rxq, struct mlx5_rxq_ctrl, rxq);
-
- if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
- continue;
- rxq->mprq_mp = mp;
- }
- DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
- dev->data->port_id);
+ rxq->mprq_mp = mp;
+ DRV_LOG(INFO, "port %u queue %hu Multi-Packet RQ is configured",
+ dev->data->port_id, rxq->idx);
return 0;
}
@@ -1717,8 +1688,10 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
}
if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+ mlx5_mprq_free_mp(dev, rxq_ctrl);
+ }
LIST_REMOVE(rxq_ctrl, next);
mlx5_free(rxq_ctrl);
(*priv->rxqs)[idx] = NULL;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index c3adf5082e6..0753dbad053 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -138,11 +138,6 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
unsigned int i;
int ret = 0;
- /* Allocate/reuse/resize mempool for Multi-Packet RQ. */
- if (mlx5_mprq_alloc_mp(dev)) {
- /* Should not release Rx queues but return immediately. */
- return -rte_errno;
- }
DRV_LOG(DEBUG, "Port %u device_attr.max_qp_wr is %d.",
dev->data->port_id, priv->sh->device_attr.max_qp_wr);
DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.",
@@ -153,8 +148,11 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
if (!rxq_ctrl)
continue;
if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
- /* Pre-register Rx mempools. */
if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+ /* Allocate/reuse/resize mempool for MPRQ. */
+ if (mlx5_mprq_alloc_mp(dev, rxq_ctrl) < 0)
+ goto error;
+ /* Pre-register Rx mempools. */
mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
rxq_ctrl->rxq.mprq_mp);
} else {
--
2.33.0
next prev parent reply other threads:[~2021-10-16 9:13 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-26 11:18 [dpdk-dev] [PATCH 00/11] net/mlx5: support shared Rx queue Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 01/11] common/mlx5: support receive queue user index Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 02/11] common/mlx5: support receive memory pool Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 03/11] net/mlx5: clean Rx queue code Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 04/11] net/mlx5: split multiple packet Rq memory pool Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 05/11] net/mlx5: split Rx queue Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 06/11] net/mlx5: move Rx queue reference count Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 07/11] net/mlx5: move Rx queue hairpin info to private data Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 08/11] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 09/11] net/mlx5: move Rx queue DevX resource Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 10/11] net/mlx5: remove Rx queue data list from device Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 11/11] net/mlx5: support shared Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 00/13] " Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 01/13] common/mlx5: support receive queue user index Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 02/13] common/mlx5: support receive memory pool Xueming Li
2021-10-20 9:32 ` Kinsella, Ray
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 03/13] net/mlx5: fix Rx queue memory allocation return value Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 04/13] net/mlx5: clean Rx queue code Xueming Li
2021-10-16 9:12 ` Xueming Li [this message]
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 06/13] net/mlx5: split Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 07/13] net/mlx5: move Rx queue reference count Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 08/13] net/mlx5: move Rx queue hairpin info to private data Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 09/13] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 10/13] net/mlx5: move Rx queue DevX resource Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 11/13] net/mlx5: remove Rx queue data list from device Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 12/13] net/mlx5: support shared Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 13/13] net/mlx5: add shared Rx queue port datapath support Xueming Li
2021-10-19 8:19 ` [dpdk-dev] [PATCH v2 00/13] net/mlx5: support shared Rx queue Slava Ovsiienko
2021-10-19 8:22 ` Slava Ovsiienko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211016091214.1831902-6-xuemingl@nvidia.com \
--to=xuemingl@nvidia.com \
--cc=dev@dpdk.org \
--cc=lmargalit@nvidia.com \
--cc=matan@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).