From: Xueming Li <xuemingl@nvidia.com>
To: <dev@dpdk.org>
Cc: <xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>,
Matan Azrad <matan@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [PATCH v2 06/13] net/mlx5: split Rx queue
Date: Sat, 16 Oct 2021 17:12:06 +0800 [thread overview]
Message-ID: <20211016091214.1831902-7-xuemingl@nvidia.com> (raw)
In-Reply-To: <20211016091214.1831902-1-xuemingl@nvidia.com>
To prepare shared RX queue, splits rxq data into shareable and private.
Struct mlx5_rxq_priv is per queue data.
Struct mlx5_rxq_ctrl is shared queue resources and data.
Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
drivers/net/mlx5/mlx5.c | 4 +++
drivers/net/mlx5/mlx5.h | 5 ++-
drivers/net/mlx5/mlx5_ethdev.c | 10 ++++++
drivers/net/mlx5/mlx5_rx.h | 15 ++++++--
drivers/net/mlx5/mlx5_rxq.c | 66 ++++++++++++++++++++++++++++------
5 files changed, 86 insertions(+), 14 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 1033c29cb82..477ad8c1bc9 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1591,6 +1591,10 @@ mlx5_dev_close(struct rte_eth_dev *dev)
mlx5_free(dev->intr_handle);
dev->intr_handle = NULL;
}
+ if (priv->rxq_privs != NULL) {
+ mlx5_free(priv->rxq_privs);
+ priv->rxq_privs = NULL;
+ }
if (priv->txqs != NULL) {
/* XXX race condition if mlx5_tx_burst() is still running. */
rte_delay_us_sleep(1000);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 3581414b789..b18ddb0b0fa 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1335,6 +1335,8 @@ enum mlx5_txq_modify_type {
MLX5_TXQ_MOD_ERR2RDY, /* modify state from error to ready. */
};
+struct mlx5_rxq_priv;
+
/* HW objects operations structure. */
struct mlx5_obj_ops {
int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_obj *rxq_obj, int on);
@@ -1404,7 +1406,8 @@ struct mlx5_priv {
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
- struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */
+ struct mlx5_rxq_priv *(*rxq_privs)[]; /* RX queue non-shared data. */
+ struct mlx5_rxq_data *(*rxqs)[]; /* (Shared) RX queues. */
struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
struct rte_eth_rss_conf rss_conf; /* RSS configuration. */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 8ebfd0bccb3..ee1189b929d 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -104,6 +104,16 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
MLX5_RSS_HASH_KEY_LEN);
priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN;
priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+ priv->rxq_privs = mlx5_realloc(priv->rxq_privs,
+ MLX5_MEM_ANY | MLX5_MEM_ZERO,
+ sizeof(void *) * rxqs_n, 0,
+ SOCKET_ID_ANY);
+ if (priv->rxq_privs == NULL) {
+ DRV_LOG(ERR, "port %u cannot allocate rxq private data",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
priv->rxqs = (void *)dev->data->rx_queues;
priv->txqs = (void *)dev->data->tx_queues;
if (txqs_n != priv->txqs_n) {
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index a8e0c3162b0..db6252e8e86 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -161,7 +161,9 @@ struct mlx5_rxq_ctrl {
struct mlx5_rxq_data rxq; /* Data path structure. */
LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
uint32_t refcnt; /* Reference counter. */
+ LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */
struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
+ struct mlx5_dev_ctx_shared *sh; /* Shared context. */
struct mlx5_priv *priv; /* Back pointer to private data. */
enum mlx5_rxq_type type; /* Rxq type. */
unsigned int socket; /* CPU socket ID for allocations. */
@@ -174,6 +176,14 @@ struct mlx5_rxq_ctrl {
uint32_t hairpin_status; /* Hairpin binding status. */
};
+/* RX queue private data. */
+struct mlx5_rxq_priv {
+ uint16_t idx; /* Queue index. */
+ struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
+ LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
+ struct mlx5_priv *priv; /* Back pointer to private data. */
+};
+
/* mlx5_rxq.c */
extern uint8_t rss_hash_default_key[];
@@ -197,13 +207,14 @@ void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);
-struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
+struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev,
+ struct mlx5_rxq_priv *rxq,
uint16_t desc, unsigned int socket,
const struct rte_eth_rxconf *conf,
const struct rte_eth_rxseg_split *rx_seg,
uint16_t n_seg);
struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
- (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ (struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
const struct rte_eth_hairpin_conf *hairpin_conf);
struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index f29a8143967..acd77a7ecc1 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -674,6 +674,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct rte_mempool *mp)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_priv *rxq;
struct mlx5_rxq_ctrl *rxq_ctrl;
struct rte_eth_rxseg_split *rx_seg =
(struct rte_eth_rxseg_split *)conf->rx_seg;
@@ -708,10 +709,23 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
if (res)
return res;
- rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
+ rxq = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sizeof(*rxq), 0,
+ SOCKET_ID_ANY);
+ if (!rxq) {
+ DRV_LOG(ERR, "port %u unable to allocate rx queue index %u private data",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ rxq->priv = priv;
+ rxq->idx = idx;
+ (*priv->rxq_privs)[idx] = rxq;
+ rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg, n_seg);
if (!rxq_ctrl) {
- DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
dev->data->port_id, idx);
+ mlx5_free(rxq);
+ (*priv->rxq_privs)[idx] = NULL;
rte_errno = ENOMEM;
return -rte_errno;
}
@@ -741,6 +755,7 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
const struct rte_eth_hairpin_conf *hairpin_conf)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_priv *rxq;
struct mlx5_rxq_ctrl *rxq_ctrl;
int res;
@@ -776,14 +791,27 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
return -rte_errno;
}
}
- rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
+ rxq = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sizeof(*rxq), 0,
+ SOCKET_ID_ANY);
+ if (!rxq) {
+ DRV_LOG(ERR, "port %u unable to allocate hairpin rx queue index %u private data",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ rxq->priv = priv;
+ rxq->idx = idx;
+ (*priv->rxq_privs)[idx] = rxq;
+ rxq_ctrl = mlx5_rxq_hairpin_new(dev, rxq, desc, hairpin_conf);
if (!rxq_ctrl) {
- DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ DRV_LOG(ERR, "port %u unable to allocate hairpin queue index %u",
dev->data->port_id, idx);
+ mlx5_free(rxq);
+ (*priv->rxq_privs)[idx] = NULL;
rte_errno = ENOMEM;
return -rte_errno;
}
- DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
+ DRV_LOG(DEBUG, "port %u adding hairpin Rx queue %u to list",
dev->data->port_id, idx);
(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
return 0;
@@ -1274,8 +1302,8 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
*
* @param dev
* Pointer to Ethernet device.
- * @param idx
- * RX queue index.
+ * @param rxq
+ * RX queue private data.
* @param desc
* Number of descriptors to configure in queue.
* @param socket
@@ -1285,10 +1313,12 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
* A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
struct mlx5_rxq_ctrl *
-mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
+ uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
{
+ uint16_t idx = rxq->idx;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
@@ -1331,6 +1361,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
rte_errno = ENOMEM;
return NULL;
}
+ LIST_INIT(&tmpl->owners);
+ rxq->ctrl = tmpl;
+ LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
/*
* Build the array of actual buffer offsets and lengths.
@@ -1564,6 +1597,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
(!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
tmpl->rxq.port_id = dev->data->port_id;
+ tmpl->sh = priv->sh;
tmpl->priv = priv;
tmpl->rxq.mp = rx_seg[0].mp;
tmpl->rxq.elts_n = log2above(desc);
@@ -1591,8 +1625,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
*
* @param dev
* Pointer to Ethernet device.
- * @param idx
- * RX queue index.
+ * @param rxq
+ * RX queue.
* @param desc
* Number of descriptors to configure in queue.
* @param hairpin_conf
@@ -1602,9 +1636,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
* A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
struct mlx5_rxq_ctrl *
-mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
+ uint16_t desc,
const struct rte_eth_hairpin_conf *hairpin_conf)
{
+ uint16_t idx = rxq->idx;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
@@ -1614,10 +1650,14 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
rte_errno = ENOMEM;
return NULL;
}
+ LIST_INIT(&tmpl->owners);
+ rxq->ctrl = tmpl;
+ LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
tmpl->socket = SOCKET_ID_ANY;
tmpl->rxq.rss_hash = 0;
tmpl->rxq.port_id = dev->data->port_id;
+ tmpl->sh = priv->sh;
tmpl->priv = priv;
tmpl->rxq.mp = NULL;
tmpl->rxq.elts_n = log2above(desc);
@@ -1671,6 +1711,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_priv *rxq = (*priv->rxq_privs)[idx];
if (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)
return 0;
@@ -1692,9 +1733,12 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
mlx5_mprq_free_mp(dev, rxq_ctrl);
}
+ LIST_REMOVE(rxq, owner_entry);
LIST_REMOVE(rxq_ctrl, next);
mlx5_free(rxq_ctrl);
(*priv->rxqs)[idx] = NULL;
+ mlx5_free(rxq);
+ (*priv->rxq_privs)[idx] = NULL;
}
return 0;
}
--
2.33.0
next prev parent reply other threads:[~2021-10-16 9:13 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-26 11:18 [dpdk-dev] [PATCH 00/11] net/mlx5: support shared " Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 01/11] common/mlx5: support receive queue user index Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 02/11] common/mlx5: support receive memory pool Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 03/11] net/mlx5: clean Rx queue code Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 04/11] net/mlx5: split multiple packet Rq memory pool Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 05/11] net/mlx5: split Rx queue Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 06/11] net/mlx5: move Rx queue reference count Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 07/11] net/mlx5: move Rx queue hairpin info to private data Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 08/11] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 09/11] net/mlx5: move Rx queue DevX resource Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 10/11] net/mlx5: remove Rx queue data list from device Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 11/11] net/mlx5: support shared Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 00/13] " Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 01/13] common/mlx5: support receive queue user index Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 02/13] common/mlx5: support receive memory pool Xueming Li
2021-10-20 9:32 ` Kinsella, Ray
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 03/13] net/mlx5: fix Rx queue memory allocation return value Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 04/13] net/mlx5: clean Rx queue code Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 05/13] net/mlx5: split multiple packet Rq memory pool Xueming Li
2021-10-16 9:12 ` Xueming Li [this message]
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 07/13] net/mlx5: move Rx queue reference count Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 08/13] net/mlx5: move Rx queue hairpin info to private data Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 09/13] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 10/13] net/mlx5: move Rx queue DevX resource Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 11/13] net/mlx5: remove Rx queue data list from device Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 12/13] net/mlx5: support shared Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 13/13] net/mlx5: add shared Rx queue port datapath support Xueming Li
2021-10-19 8:19 ` [dpdk-dev] [PATCH v2 00/13] net/mlx5: support shared Rx queue Slava Ovsiienko
2021-10-19 8:22 ` Slava Ovsiienko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211016091214.1831902-7-xuemingl@nvidia.com \
--to=xuemingl@nvidia.com \
--cc=dev@dpdk.org \
--cc=lmargalit@nvidia.com \
--cc=matan@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).