DPDK patches and discussions
 help / color / mirror / Atom feed
From: Alexander Kozyrev <akozyrev@nvidia.com>
To: <dev@dpdk.org>
Cc: <rasland@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>
Subject: [PATCH v3] net/mlx5: handle MPRQ incompatibility with external buffers
Date: Sat, 12 Mar 2022 01:08:25 +0200	[thread overview]
Message-ID: <20220311230825.3904165-1-akozyrev@nvidia.com> (raw)
In-Reply-To: <20220310234518.3823100-1-akozyrev@nvidia.com>

Multi-Packet Rx queue uses PMD-managed buffers to store packets.
These buffers are externally attached to user mbufs.
This conflicts with the feature that allows using user-managed
externally attached buffers in an application.
Fall back to SPRQ in case external buffers mempool is configured.
Add the corresponding limitation to MLX5 documentation that MPRQ
and external data buffers cannot be used together.

Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
v3: fixed coverity issue with NULL pointer derefence
v2: fixed typo in warning message

 doc/guides/nics/mlx5.rst    |  4 +++-
 drivers/net/mlx5/mlx5_rx.h  |  2 +-
 drivers/net/mlx5/mlx5_rxq.c | 23 +++++++++++++++--------
 3 files changed, 19 insertions(+), 10 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 679481bed5..4799875263 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -260,7 +260,9 @@ Limitations
   ol_flags. As the mempool for the external buffer is managed by PMD, all the
   Rx mbufs must be freed before the device is closed. Otherwise, the mempool of
   the external buffers will be freed by PMD and the application which still
-  holds the external buffers may be corrupted.
+  holds the external buffers may be corrupted. User-managed mempools with
+  external pinned data buffers cannot be used in conjunction with MPRQ
+  since packets may be already attached to PMD-managed external buffers.
 
 - If Multi-Packet Rx queue is configured (``mprq_en``) and Rx CQE compression is
   enabled (``rxq_cqe_comp_en``) at the same time, RSS hash result is not fully
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index acebe3348c..5bf88b6181 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -209,7 +209,7 @@ struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
 				   uint16_t desc, unsigned int socket,
 				   const struct rte_eth_rxconf *conf,
 				   const struct rte_eth_rxseg_split *rx_seg,
-				   uint16_t n_seg);
+				   uint16_t n_seg, bool is_extmem);
 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
 	(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
 	 const struct rte_eth_hairpin_conf *hairpin_conf);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index f16795bac3..925544ae3d 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -840,6 +840,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	int res;
 	uint64_t offloads = conf->offloads |
 			    dev->data->dev_conf.rxmode.offloads;
+	bool is_extmem = false;
 
 	if (mp) {
 		/*
@@ -849,6 +850,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		 */
 		rx_seg = &rx_single;
 		n_seg = 1;
+		is_extmem = rte_pktmbuf_priv_flags(mp) &
+			    RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
 	}
 	if (n_seg > 1) {
 		/* The offloads should be checked on rte_eth_dev layer. */
@@ -912,7 +915,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	if (rxq_ctrl == NULL) {
 		rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg,
-					n_seg);
+					n_seg, is_extmem);
 		if (rxq_ctrl == NULL) {
 			DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
 				dev->data->port_id, idx);
@@ -1548,7 +1551,8 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
  *   Log number of strides to configure for this queue.
  * @param actual_log_stride_size
  *   Log stride size to configure for this queue.
- *
+ * @param is_extmem
+ *   Is external pinned memory pool used.
  * @return
  *   0 if Multi-Packet RQ is supported, otherwise -1.
  */
@@ -1556,7 +1560,8 @@ static int
 mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		  bool rx_seg_en, uint32_t min_mbuf_size,
 		  uint32_t *actual_log_stride_num,
-		  uint32_t *actual_log_stride_size)
+		  uint32_t *actual_log_stride_size,
+		  bool is_extmem)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_port_config *config = &priv->config;
@@ -1575,7 +1580,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 				log_max_stride_size);
 	uint32_t log_stride_wqe_size;
 
-	if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en)
+	if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en || is_extmem)
 		goto unsupport;
 	/* Checks if chosen number of strides is in supported range. */
 	if (config->mprq.log_stride_num > log_max_stride_num ||
@@ -1641,7 +1646,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			" rxq_num = %u, stride_sz = %u, stride_num = %u\n"
 			"  supported: min_rxqs_num = %u, min_buf_wqe_sz = %u"
 			" min_stride_sz = %u, max_stride_sz = %u).\n"
-			"Rx segment is %senable.",
+			"Rx segment is %senabled. External mempool is %sused.",
 			dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n,
 			RTE_BIT32(config->mprq.log_stride_size),
 			RTE_BIT32(config->mprq.log_stride_num),
@@ -1649,7 +1654,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			RTE_BIT32(dev_cap->mprq.log_min_stride_wqe_size),
 			RTE_BIT32(dev_cap->mprq.log_min_stride_size),
 			RTE_BIT32(dev_cap->mprq.log_max_stride_size),
-			rx_seg_en ? "" : "not ");
+			rx_seg_en ? "" : "not ", is_extmem ? "" : "not ");
 	return -1;
 }
 
@@ -1671,7 +1676,8 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 struct mlx5_rxq_ctrl *
 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	     unsigned int socket, const struct rte_eth_rxconf *conf,
-	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
+	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg,
+	     bool is_extmem)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rxq_ctrl *tmpl;
@@ -1694,7 +1700,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	const int mprq_en = !mlx5_mprq_prepare(dev, idx, desc, rx_seg_en,
 					       non_scatter_min_mbuf_size,
 					       &mprq_log_actual_stride_num,
-					       &mprq_log_actual_stride_size);
+					       &mprq_log_actual_stride_size,
+					       is_extmem);
 	/*
 	 * Always allocate extra slots, even if eventually
 	 * the vector Rx will not be used.
-- 
2.18.2


  reply	other threads:[~2022-03-11 23:08 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-10 23:40 [PATCH] " Alexander Kozyrev
2022-03-10 23:45 ` [PATCH v2] " Alexander Kozyrev
2022-03-11 23:08   ` Alexander Kozyrev [this message]
2022-03-14  8:50     ` [PATCH v3] " Slava Ovsiienko
2022-04-12  8:08     ` Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220311230825.3904165-1-akozyrev@nvidia.com \
    --to=akozyrev@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).