From: Dariusz Sosnowski <dsosnowski@nvidia.com>
To: Matan Azrad <matan@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Cc: <dev@dpdk.org>
Subject: [PATCH 4/7] net/mlx5: allow hairpin Tx queue in RTE memory
Date: Mon, 19 Sep 2022 16:37:27 +0000 [thread overview]
Message-ID: <20220919163731.1540454-5-dsosnowski@nvidia.com> (raw)
In-Reply-To: <20220919163731.1540454-1-dsosnowski@nvidia.com>
This patch adds a capability to place hairpin Tx queue in host memory
managed by DPDK. This capability is equivalent to storing hairpin SQ's
WQ buffer in host memory.
Hairpin Tx queue creation is extended with allocating a memory buffer of
proper size (calculated from required number of packets and WQE BB size
advertised in HCA capabilities).
force_memory flag of hairpin queue configuration is also supported.
If it is set and:
- allocation of memory buffer fails,
- or hairpin SQ creation fails,
then device start will fail. If it is unset, PMD will fallback to
creating the hairpin SQ with WQ buffer located in unlocked device
memory.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/mlx5.h | 2 +
drivers/net/mlx5/mlx5_devx.c | 119 ++++++++++++++++++++++++++++++---
drivers/net/mlx5/mlx5_ethdev.c | 4 ++
3 files changed, 116 insertions(+), 9 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 8af84aef50..f564d4b771 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1384,6 +1384,8 @@ struct mlx5_txq_obj {
struct mlx5_devx_obj *sq;
/* DevX object for Sx queue. */
struct mlx5_devx_obj *tis; /* The TIS object. */
+ void *umem_buf_wq_buffer;
+ struct mlx5dv_devx_umem *umem_obj_wq_buffer;
};
struct {
struct rte_eth_dev *dev;
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 6886ae1f22..a81b1bae47 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -1185,18 +1185,23 @@ static int
mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq_data, struct mlx5_txq_ctrl, txq);
- struct mlx5_devx_create_sq_attr attr = { 0 };
+ struct mlx5_devx_create_sq_attr dev_mem_attr = { 0 };
+ struct mlx5_devx_create_sq_attr host_mem_attr = { 0 };
struct mlx5_txq_obj *tmpl = txq_ctrl->obj;
+ struct mlx5dv_devx_umem *umem_obj = NULL;
+ void *umem_buf = NULL;
uint32_t max_wq_data;
MLX5_ASSERT(txq_data);
MLX5_ASSERT(tmpl);
tmpl->txq_ctrl = txq_ctrl;
- attr.hairpin = 1;
- attr.tis_lst_sz = 1;
+ dev_mem_attr.hairpin = 1;
+ dev_mem_attr.tis_lst_sz = 1;
+ dev_mem_attr.tis_num = mlx5_get_txq_tis_num(dev, idx);
max_wq_data =
priv->sh->cdev->config.hca_attr.log_max_hairpin_wq_data_sz;
/* Jumbo frames > 9KB should be supported, and more packets. */
@@ -1208,19 +1213,103 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
rte_errno = ERANGE;
return -rte_errno;
}
- attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
+ dev_mem_attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
} else {
- attr.wq_attr.log_hairpin_data_sz =
+ dev_mem_attr.wq_attr.log_hairpin_data_sz =
(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
}
/* Set the packets number to the maximum value for performance. */
- attr.wq_attr.log_hairpin_num_packets =
- attr.wq_attr.log_hairpin_data_sz -
+ dev_mem_attr.wq_attr.log_hairpin_num_packets =
+ dev_mem_attr.wq_attr.log_hairpin_data_sz -
MLX5_HAIRPIN_QUEUE_STRIDE;
+ dev_mem_attr.hairpin_wq_buffer_type = MLX5_SQC_HAIRPIN_WQ_BUFFER_TYPE_INTERNAL_BUFFER;
+ if (txq_ctrl->hairpin_conf.use_rte_memory) {
+ uint32_t umem_size;
+ uint32_t umem_dbrec;
+ size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
- attr.tis_num = mlx5_get_txq_tis_num(dev, idx);
- tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &attr);
+ if (alignment == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get WQE buf alignment.");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ /*
+ * It is assumed that configuration is verified against capabilities
+ * during queue setup.
+ */
+ MLX5_ASSERT(hca_attr->hairpin_sq_wq_in_host_mem);
+ MLX5_ASSERT(hca_attr->hairpin_sq_wqe_bb_size > 0);
+ rte_memcpy(&host_mem_attr, &dev_mem_attr, sizeof(host_mem_attr));
+ umem_size = MLX5_WQE_SIZE *
+ RTE_BIT32(host_mem_attr.wq_attr.log_hairpin_num_packets);
+ umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
+ umem_size += MLX5_DBR_SIZE;
+ umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
+ alignment, priv->sh->numa_node);
+ if (umem_buf == NULL && txq_ctrl->hairpin_conf.force_memory) {
+ DRV_LOG(ERR, "Failed to allocate memory for hairpin TX queue");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ } else if (umem_buf == NULL && !txq_ctrl->hairpin_conf.force_memory) {
+ DRV_LOG(WARNING, "Failed to allocate memory for hairpin TX queue."
+ " Falling back to TX queue located on the device.");
+ goto create_sq_on_device;
+ }
+ umem_obj = mlx5_glue->devx_umem_reg(priv->sh->cdev->ctx,
+ (void *)(uintptr_t)umem_buf,
+ umem_size,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (umem_obj == NULL && txq_ctrl->hairpin_conf.force_memory) {
+ DRV_LOG(ERR, "Failed to register UMEM for hairpin TX queue");
+ mlx5_free(umem_buf);
+ return -rte_errno;
+ } else if (umem_obj == NULL && !txq_ctrl->hairpin_conf.force_memory) {
+ DRV_LOG(WARNING, "Failed to register UMEM for hairpin TX queue."
+ " Falling back to TX queue located on the device.");
+ rte_errno = 0;
+ mlx5_free(umem_buf);
+ goto create_sq_on_device;
+ }
+ host_mem_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
+ host_mem_attr.wq_attr.wq_umem_valid = 1;
+ host_mem_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(umem_obj);
+ host_mem_attr.wq_attr.wq_umem_offset = 0;
+ host_mem_attr.wq_attr.dbr_umem_valid = 1;
+ host_mem_attr.wq_attr.dbr_umem_id = host_mem_attr.wq_attr.wq_umem_id;
+ host_mem_attr.wq_attr.dbr_addr = umem_dbrec;
+ host_mem_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
+ host_mem_attr.wq_attr.log_wq_sz =
+ host_mem_attr.wq_attr.log_hairpin_num_packets *
+ hca_attr->hairpin_sq_wqe_bb_size;
+ host_mem_attr.wq_attr.log_wq_pg_sz = MLX5_LOG_PAGE_SIZE;
+ host_mem_attr.hairpin_wq_buffer_type = MLX5_SQC_HAIRPIN_WQ_BUFFER_TYPE_HOST_MEMORY;
+ tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &host_mem_attr);
+ if (!tmpl->sq && txq_ctrl->hairpin_conf.force_memory) {
+ DRV_LOG(ERR,
+ "Port %u tx hairpin queue %u can't create SQ object.",
+ dev->data->port_id, idx);
+ claim_zero(mlx5_glue->devx_umem_dereg(umem_obj));
+ mlx5_free(umem_buf);
+ return -rte_errno;
+ } else if (!tmpl->sq && !txq_ctrl->hairpin_conf.force_memory) {
+ DRV_LOG(WARNING,
+ "Port %u tx hairpin queue %u failed to allocate SQ object"
+ " using host memory. Falling back to TX queue located"
+ " on the device",
+ dev->data->port_id, idx);
+ rte_errno = 0;
+ claim_zero(mlx5_glue->devx_umem_dereg(umem_obj));
+ mlx5_free(umem_buf);
+ goto create_sq_on_device;
+ }
+ tmpl->umem_buf_wq_buffer = umem_buf;
+ tmpl->umem_obj_wq_buffer = umem_obj;
+ return 0;
+ }
+
+create_sq_on_device:
+ tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &dev_mem_attr);
if (!tmpl->sq) {
DRV_LOG(ERR,
"Port %u tx hairpin queue %u can't create SQ object.",
@@ -1452,8 +1541,20 @@ mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
{
MLX5_ASSERT(txq_obj);
if (txq_obj->txq_ctrl->is_hairpin) {
+ if (txq_obj->sq) {
+ claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq));
+ txq_obj->sq = NULL;
+ }
if (txq_obj->tis)
claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
+ if (txq_obj->umem_obj_wq_buffer) {
+ claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->umem_obj_wq_buffer));
+ txq_obj->umem_obj_wq_buffer = NULL;
+ }
+ if (txq_obj->umem_buf_wq_buffer) {
+ mlx5_free(txq_obj->umem_buf_wq_buffer);
+ txq_obj->umem_buf_wq_buffer = NULL;
+ }
#if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
} else {
mlx5_txq_release_devx_resources(txq_obj);
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 05c919ed39..7f5b01ac74 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -729,6 +729,7 @@ int
mlx5_hairpin_cap_get(struct rte_eth_dev *dev, struct rte_eth_hairpin_cap *cap)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hca_attr *hca_attr;
if (!mlx5_devx_obj_ops_en(priv->sh)) {
rte_errno = ENOTSUP;
@@ -738,5 +739,8 @@ mlx5_hairpin_cap_get(struct rte_eth_dev *dev, struct rte_eth_hairpin_cap *cap)
cap->max_rx_2_tx = 1;
cap->max_tx_2_rx = 1;
cap->max_nb_desc = 8192;
+ hca_attr = &priv->sh->cdev->config.hca_attr;
+ cap->tx_cap.locked_device_memory = 0;
+ cap->tx_cap.rte_memory = hca_attr->hairpin_sq_wq_in_host_mem;
return 0;
}
--
2.25.1
next prev parent reply other threads:[~2022-09-19 16:39 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-09-19 16:37 [PATCH 0/7] ethdev: introduce hairpin memory capabilities Dariusz Sosnowski
2022-09-19 16:37 ` [PATCH 1/7] " Dariusz Sosnowski
2022-10-04 16:50 ` Thomas Monjalon
2022-10-06 11:21 ` Dariusz Sosnowski
2022-09-19 16:37 ` [PATCH 2/7] common/mlx5: add hairpin SQ buffer type capabilities Dariusz Sosnowski
2022-09-27 13:03 ` Slava Ovsiienko
2022-09-19 16:37 ` [PATCH 3/7] common/mlx5: add hairpin RQ " Dariusz Sosnowski
2022-09-27 13:04 ` Slava Ovsiienko
2022-09-19 16:37 ` Dariusz Sosnowski [this message]
2022-09-27 13:05 ` [PATCH 4/7] net/mlx5: allow hairpin Tx queue in RTE memory Slava Ovsiienko
2022-09-19 16:37 ` [PATCH 5/7] net/mlx5: allow hairpin Rx queue in locked memory Dariusz Sosnowski
2022-09-27 13:04 ` Slava Ovsiienko
2022-11-25 14:06 ` Kenneth Klette Jonassen
2022-09-19 16:37 ` [PATCH 6/7] app/testpmd: add hairpin queues memory modes Dariusz Sosnowski
2022-09-19 16:37 ` [PATCH 7/7] app/flow-perf: add hairpin queue memory config Dariusz Sosnowski
2022-10-04 12:24 ` Wisam Monther
2022-10-06 11:06 ` Dariusz Sosnowski
2022-10-04 16:44 ` [PATCH 0/7] ethdev: introduce hairpin memory capabilities Thomas Monjalon
2022-10-06 11:08 ` Dariusz Sosnowski
2022-10-06 11:00 ` [PATCH v2 0/8] " Dariusz Sosnowski
2022-10-06 11:00 ` [PATCH v2 1/8] " Dariusz Sosnowski
2022-10-06 11:00 ` [PATCH v2 2/8] common/mlx5: add hairpin SQ buffer type capabilities Dariusz Sosnowski
2022-10-06 11:01 ` [PATCH v2 3/8] common/mlx5: add hairpin RQ " Dariusz Sosnowski
2022-10-06 11:01 ` [PATCH v2 4/8] net/mlx5: allow hairpin Tx queue in RTE memory Dariusz Sosnowski
2022-10-06 11:01 ` [PATCH v2 5/8] net/mlx5: allow hairpin Rx queue in locked memory Dariusz Sosnowski
2022-10-06 11:01 ` [PATCH v2 6/8] doc: add notes for hairpin to mlx5 documentation Dariusz Sosnowski
2022-10-06 11:01 ` [PATCH v2 7/8] app/testpmd: add hairpin queues memory modes Dariusz Sosnowski
2022-10-06 11:01 ` [PATCH v2 8/8] app/flow-perf: add hairpin queue memory config Dariusz Sosnowski
2022-10-15 16:30 ` Wisam Monther
2022-10-08 16:31 ` [PATCH v2 0/8] ethdev: introduce hairpin memory capabilities Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220919163731.1540454-5-dsosnowski@nvidia.com \
--to=dsosnowski@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).