DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/mlx5: handle MPRQ incompatibility with external buffers
@ 2022-03-10 23:40 Alexander Kozyrev
  2022-03-10 23:45 ` [PATCH v2] " Alexander Kozyrev
  0 siblings, 1 reply; 5+ messages in thread
From: Alexander Kozyrev @ 2022-03-10 23:40 UTC (permalink / raw)
  To: dev; +Cc: rasland, viacheslavo, matan

Multi-Packet Rx queue uses PMD-managed buffers to store packets.
These buffers are externally attached to user mbufs.
This conflicts with the feature that allows using user-managed
externally attached buffers in an application.
Fall back to SPRQ in case external buffers mempool is configured.
Add the corresponding limitation to MLX5 documentation that MPRQ
and external data buffers cannot be used together.

Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
 doc/guides/nics/mlx5.rst    |  4 +++-
 drivers/net/mlx5/mlx5_rx.h  |  2 +-
 drivers/net/mlx5/mlx5_rxq.c | 22 ++++++++++++++--------
 3 files changed, 18 insertions(+), 10 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 679481bed5..4799875263 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -260,7 +260,9 @@ Limitations
   ol_flags. As the mempool for the external buffer is managed by PMD, all the
   Rx mbufs must be freed before the device is closed. Otherwise, the mempool of
   the external buffers will be freed by PMD and the application which still
-  holds the external buffers may be corrupted.
+  holds the external buffers may be corrupted. User-managed mempools with
+  external pinned data buffers cannot be used in conjunction with MPRQ
+  since packets may be already attached to PMD-managed external buffers.
 
 - If Multi-Packet Rx queue is configured (``mprq_en``) and Rx CQE compression is
   enabled (``rxq_cqe_comp_en``) at the same time, RSS hash result is not fully
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index acebe3348c..5bf88b6181 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -209,7 +209,7 @@ struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
 				   uint16_t desc, unsigned int socket,
 				   const struct rte_eth_rxconf *conf,
 				   const struct rte_eth_rxseg_split *rx_seg,
-				   uint16_t n_seg);
+				   uint16_t n_seg, bool is_extmem);
 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
 	(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
 	 const struct rte_eth_hairpin_conf *hairpin_conf);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index f16795bac3..4f58b90cfe 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -840,6 +840,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	int res;
 	uint64_t offloads = conf->offloads |
 			    dev->data->dev_conf.rxmode.offloads;
+	bool is_extmem = rte_pktmbuf_priv_flags(mp) &
+			 RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
 
 	if (mp) {
 		/*
@@ -912,7 +914,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	if (rxq_ctrl == NULL) {
 		rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg,
-					n_seg);
+					n_seg, is_extmem);
 		if (rxq_ctrl == NULL) {
 			DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
 				dev->data->port_id, idx);
@@ -1548,7 +1550,8 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
  *   Log number of strides to configure for this queue.
  * @param actual_log_stride_size
  *   Log stride size to configure for this queue.
- *
+ * @param is_extmem
+ *   Is external pinned memory pool used.
  * @return
  *   0 if Multi-Packet RQ is supported, otherwise -1.
  */
@@ -1556,7 +1559,8 @@ static int
 mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		  bool rx_seg_en, uint32_t min_mbuf_size,
 		  uint32_t *actual_log_stride_num,
-		  uint32_t *actual_log_stride_size)
+		  uint32_t *actual_log_stride_size,
+		  bool is_extmem)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_port_config *config = &priv->config;
@@ -1575,7 +1579,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 				log_max_stride_size);
 	uint32_t log_stride_wqe_size;
 
-	if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en)
+	if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en || is_extmem)
 		goto unsupport;
 	/* Checks if chosen number of strides is in supported range. */
 	if (config->mprq.log_stride_num > log_max_stride_num ||
@@ -1641,7 +1645,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			" rxq_num = %u, stride_sz = %u, stride_num = %u\n"
 			"  supported: min_rxqs_num = %u, min_buf_wqe_sz = %u"
 			" min_stride_sz = %u, max_stride_sz = %u).\n"
-			"Rx segment is %senable.",
+			"Rx segment is %senabled. Extenal mempool is %sused.",
 			dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n,
 			RTE_BIT32(config->mprq.log_stride_size),
 			RTE_BIT32(config->mprq.log_stride_num),
@@ -1649,7 +1653,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			RTE_BIT32(dev_cap->mprq.log_min_stride_wqe_size),
 			RTE_BIT32(dev_cap->mprq.log_min_stride_size),
 			RTE_BIT32(dev_cap->mprq.log_max_stride_size),
-			rx_seg_en ? "" : "not ");
+			rx_seg_en ? "" : "not ", is_extmem ? "" : "not ");
 	return -1;
 }
 
@@ -1671,7 +1675,8 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 struct mlx5_rxq_ctrl *
 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	     unsigned int socket, const struct rte_eth_rxconf *conf,
-	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
+	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg,
+	     bool is_extmem)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rxq_ctrl *tmpl;
@@ -1694,7 +1699,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	const int mprq_en = !mlx5_mprq_prepare(dev, idx, desc, rx_seg_en,
 					       non_scatter_min_mbuf_size,
 					       &mprq_log_actual_stride_num,
-					       &mprq_log_actual_stride_size);
+					       &mprq_log_actual_stride_size,
+					       is_extmem);
 	/*
 	 * Always allocate extra slots, even if eventually
 	 * the vector Rx will not be used.
-- 
2.18.2


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2] net/mlx5: handle MPRQ incompatibility with external buffers
  2022-03-10 23:40 [PATCH] net/mlx5: handle MPRQ incompatibility with external buffers Alexander Kozyrev
@ 2022-03-10 23:45 ` Alexander Kozyrev
  2022-03-11 23:08   ` [PATCH v3] " Alexander Kozyrev
  0 siblings, 1 reply; 5+ messages in thread
From: Alexander Kozyrev @ 2022-03-10 23:45 UTC (permalink / raw)
  To: dev; +Cc: rasland, viacheslavo, matan

Multi-Packet Rx queue uses PMD-managed buffers to store packets.
These buffers are externally attached to user mbufs.
This conflicts with the feature that allows using user-managed
externally attached buffers in an application.
Fall back to SPRQ in case external buffers mempool is configured.
Add the corresponding limitation to MLX5 documentation that MPRQ
and external data buffers cannot be used together.

Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
v2: fixed typo in warning message

 doc/guides/nics/mlx5.rst    |  4 +++-
 drivers/net/mlx5/mlx5_rx.h  |  2 +-
 drivers/net/mlx5/mlx5_rxq.c | 22 ++++++++++++++--------
 3 files changed, 18 insertions(+), 10 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 679481bed5..4799875263 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -260,7 +260,9 @@ Limitations
   ol_flags. As the mempool for the external buffer is managed by PMD, all the
   Rx mbufs must be freed before the device is closed. Otherwise, the mempool of
   the external buffers will be freed by PMD and the application which still
-  holds the external buffers may be corrupted.
+  holds the external buffers may be corrupted. User-managed mempools with
+  external pinned data buffers cannot be used in conjunction with MPRQ
+  since packets may be already attached to PMD-managed external buffers.
 
 - If Multi-Packet Rx queue is configured (``mprq_en``) and Rx CQE compression is
   enabled (``rxq_cqe_comp_en``) at the same time, RSS hash result is not fully
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index acebe3348c..5bf88b6181 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -209,7 +209,7 @@ struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
 				   uint16_t desc, unsigned int socket,
 				   const struct rte_eth_rxconf *conf,
 				   const struct rte_eth_rxseg_split *rx_seg,
-				   uint16_t n_seg);
+				   uint16_t n_seg, bool is_extmem);
 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
 	(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
 	 const struct rte_eth_hairpin_conf *hairpin_conf);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index f16795bac3..7a214dcca9 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -840,6 +840,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	int res;
 	uint64_t offloads = conf->offloads |
 			    dev->data->dev_conf.rxmode.offloads;
+	bool is_extmem = rte_pktmbuf_priv_flags(mp) &
+			 RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
 
 	if (mp) {
 		/*
@@ -912,7 +914,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	if (rxq_ctrl == NULL) {
 		rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg,
-					n_seg);
+					n_seg, is_extmem);
 		if (rxq_ctrl == NULL) {
 			DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
 				dev->data->port_id, idx);
@@ -1548,7 +1550,8 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
  *   Log number of strides to configure for this queue.
  * @param actual_log_stride_size
  *   Log stride size to configure for this queue.
- *
+ * @param is_extmem
+ *   Is external pinned memory pool used.
  * @return
  *   0 if Multi-Packet RQ is supported, otherwise -1.
  */
@@ -1556,7 +1559,8 @@ static int
 mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		  bool rx_seg_en, uint32_t min_mbuf_size,
 		  uint32_t *actual_log_stride_num,
-		  uint32_t *actual_log_stride_size)
+		  uint32_t *actual_log_stride_size,
+		  bool is_extmem)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_port_config *config = &priv->config;
@@ -1575,7 +1579,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 				log_max_stride_size);
 	uint32_t log_stride_wqe_size;
 
-	if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en)
+	if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en || is_extmem)
 		goto unsupport;
 	/* Checks if chosen number of strides is in supported range. */
 	if (config->mprq.log_stride_num > log_max_stride_num ||
@@ -1641,7 +1645,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			" rxq_num = %u, stride_sz = %u, stride_num = %u\n"
 			"  supported: min_rxqs_num = %u, min_buf_wqe_sz = %u"
 			" min_stride_sz = %u, max_stride_sz = %u).\n"
-			"Rx segment is %senable.",
+			"Rx segment is %senabled. External mempool is %sused.",
 			dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n,
 			RTE_BIT32(config->mprq.log_stride_size),
 			RTE_BIT32(config->mprq.log_stride_num),
@@ -1649,7 +1653,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			RTE_BIT32(dev_cap->mprq.log_min_stride_wqe_size),
 			RTE_BIT32(dev_cap->mprq.log_min_stride_size),
 			RTE_BIT32(dev_cap->mprq.log_max_stride_size),
-			rx_seg_en ? "" : "not ");
+			rx_seg_en ? "" : "not ", is_extmem ? "" : "not ");
 	return -1;
 }
 
@@ -1671,7 +1675,8 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 struct mlx5_rxq_ctrl *
 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	     unsigned int socket, const struct rte_eth_rxconf *conf,
-	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
+	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg,
+	     bool is_extmem)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rxq_ctrl *tmpl;
@@ -1694,7 +1699,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	const int mprq_en = !mlx5_mprq_prepare(dev, idx, desc, rx_seg_en,
 					       non_scatter_min_mbuf_size,
 					       &mprq_log_actual_stride_num,
-					       &mprq_log_actual_stride_size);
+					       &mprq_log_actual_stride_size,
+					       is_extmem);
 	/*
 	 * Always allocate extra slots, even if eventually
 	 * the vector Rx will not be used.
-- 
2.18.2


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v3] net/mlx5: handle MPRQ incompatibility with external buffers
  2022-03-10 23:45 ` [PATCH v2] " Alexander Kozyrev
@ 2022-03-11 23:08   ` Alexander Kozyrev
  2022-03-14  8:50     ` Slava Ovsiienko
  2022-04-12  8:08     ` Raslan Darawsheh
  0 siblings, 2 replies; 5+ messages in thread
From: Alexander Kozyrev @ 2022-03-11 23:08 UTC (permalink / raw)
  To: dev; +Cc: rasland, viacheslavo, matan

Multi-Packet Rx queue uses PMD-managed buffers to store packets.
These buffers are externally attached to user mbufs.
This conflicts with the feature that allows using user-managed
externally attached buffers in an application.
Fall back to SPRQ in case external buffers mempool is configured.
Add the corresponding limitation to MLX5 documentation that MPRQ
and external data buffers cannot be used together.

Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
v3: fixed coverity issue with NULL pointer derefence
v2: fixed typo in warning message

 doc/guides/nics/mlx5.rst    |  4 +++-
 drivers/net/mlx5/mlx5_rx.h  |  2 +-
 drivers/net/mlx5/mlx5_rxq.c | 23 +++++++++++++++--------
 3 files changed, 19 insertions(+), 10 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 679481bed5..4799875263 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -260,7 +260,9 @@ Limitations
   ol_flags. As the mempool for the external buffer is managed by PMD, all the
   Rx mbufs must be freed before the device is closed. Otherwise, the mempool of
   the external buffers will be freed by PMD and the application which still
-  holds the external buffers may be corrupted.
+  holds the external buffers may be corrupted. User-managed mempools with
+  external pinned data buffers cannot be used in conjunction with MPRQ
+  since packets may be already attached to PMD-managed external buffers.
 
 - If Multi-Packet Rx queue is configured (``mprq_en``) and Rx CQE compression is
   enabled (``rxq_cqe_comp_en``) at the same time, RSS hash result is not fully
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index acebe3348c..5bf88b6181 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -209,7 +209,7 @@ struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
 				   uint16_t desc, unsigned int socket,
 				   const struct rte_eth_rxconf *conf,
 				   const struct rte_eth_rxseg_split *rx_seg,
-				   uint16_t n_seg);
+				   uint16_t n_seg, bool is_extmem);
 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
 	(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
 	 const struct rte_eth_hairpin_conf *hairpin_conf);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index f16795bac3..925544ae3d 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -840,6 +840,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	int res;
 	uint64_t offloads = conf->offloads |
 			    dev->data->dev_conf.rxmode.offloads;
+	bool is_extmem = false;
 
 	if (mp) {
 		/*
@@ -849,6 +850,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		 */
 		rx_seg = &rx_single;
 		n_seg = 1;
+		is_extmem = rte_pktmbuf_priv_flags(mp) &
+			    RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
 	}
 	if (n_seg > 1) {
 		/* The offloads should be checked on rte_eth_dev layer. */
@@ -912,7 +915,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	if (rxq_ctrl == NULL) {
 		rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg,
-					n_seg);
+					n_seg, is_extmem);
 		if (rxq_ctrl == NULL) {
 			DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
 				dev->data->port_id, idx);
@@ -1548,7 +1551,8 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
  *   Log number of strides to configure for this queue.
  * @param actual_log_stride_size
  *   Log stride size to configure for this queue.
- *
+ * @param is_extmem
+ *   Is external pinned memory pool used.
  * @return
  *   0 if Multi-Packet RQ is supported, otherwise -1.
  */
@@ -1556,7 +1560,8 @@ static int
 mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		  bool rx_seg_en, uint32_t min_mbuf_size,
 		  uint32_t *actual_log_stride_num,
-		  uint32_t *actual_log_stride_size)
+		  uint32_t *actual_log_stride_size,
+		  bool is_extmem)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_port_config *config = &priv->config;
@@ -1575,7 +1580,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 				log_max_stride_size);
 	uint32_t log_stride_wqe_size;
 
-	if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en)
+	if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en || is_extmem)
 		goto unsupport;
 	/* Checks if chosen number of strides is in supported range. */
 	if (config->mprq.log_stride_num > log_max_stride_num ||
@@ -1641,7 +1646,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			" rxq_num = %u, stride_sz = %u, stride_num = %u\n"
 			"  supported: min_rxqs_num = %u, min_buf_wqe_sz = %u"
 			" min_stride_sz = %u, max_stride_sz = %u).\n"
-			"Rx segment is %senable.",
+			"Rx segment is %senabled. External mempool is %sused.",
 			dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n,
 			RTE_BIT32(config->mprq.log_stride_size),
 			RTE_BIT32(config->mprq.log_stride_num),
@@ -1649,7 +1654,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			RTE_BIT32(dev_cap->mprq.log_min_stride_wqe_size),
 			RTE_BIT32(dev_cap->mprq.log_min_stride_size),
 			RTE_BIT32(dev_cap->mprq.log_max_stride_size),
-			rx_seg_en ? "" : "not ");
+			rx_seg_en ? "" : "not ", is_extmem ? "" : "not ");
 	return -1;
 }
 
@@ -1671,7 +1676,8 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 struct mlx5_rxq_ctrl *
 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	     unsigned int socket, const struct rte_eth_rxconf *conf,
-	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
+	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg,
+	     bool is_extmem)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rxq_ctrl *tmpl;
@@ -1694,7 +1700,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	const int mprq_en = !mlx5_mprq_prepare(dev, idx, desc, rx_seg_en,
 					       non_scatter_min_mbuf_size,
 					       &mprq_log_actual_stride_num,
-					       &mprq_log_actual_stride_size);
+					       &mprq_log_actual_stride_size,
+					       is_extmem);
 	/*
 	 * Always allocate extra slots, even if eventually
 	 * the vector Rx will not be used.
-- 
2.18.2


^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH v3] net/mlx5: handle MPRQ incompatibility with external buffers
  2022-03-11 23:08   ` [PATCH v3] " Alexander Kozyrev
@ 2022-03-14  8:50     ` Slava Ovsiienko
  2022-04-12  8:08     ` Raslan Darawsheh
  1 sibling, 0 replies; 5+ messages in thread
From: Slava Ovsiienko @ 2022-03-14  8:50 UTC (permalink / raw)
  To: Alexander Kozyrev, dev; +Cc: Raslan Darawsheh, Matan Azrad

> -----Original Message-----
> From: Alexander Kozyrev <akozyrev@nvidia.com>
> Sent: Saturday, March 12, 2022 1:08
> To: dev@dpdk.org
> Cc: Raslan Darawsheh <rasland@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Matan Azrad <matan@nvidia.com>
> Subject: [PATCH v3] net/mlx5: handle MPRQ incompatibility with external
> buffers
> 
> Multi-Packet Rx queue uses PMD-managed buffers to store packets.
> These buffers are externally attached to user mbufs.
> This conflicts with the feature that allows using user-managed externally
> attached buffers in an application.
> Fall back to SPRQ in case external buffers mempool is configured.
> Add the corresponding limitation to MLX5 documentation that MPRQ and
> external data buffers cannot be used together.
> 
> Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>


^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH v3] net/mlx5: handle MPRQ incompatibility with external buffers
  2022-03-11 23:08   ` [PATCH v3] " Alexander Kozyrev
  2022-03-14  8:50     ` Slava Ovsiienko
@ 2022-04-12  8:08     ` Raslan Darawsheh
  1 sibling, 0 replies; 5+ messages in thread
From: Raslan Darawsheh @ 2022-04-12  8:08 UTC (permalink / raw)
  To: Alexander Kozyrev, dev; +Cc: Slava Ovsiienko, Matan Azrad

Hi,

> -----Original Message-----
> From: Alexander Kozyrev <akozyrev@nvidia.com>
> Sent: Saturday, March 12, 2022 1:08 AM
> To: dev@dpdk.org
> Cc: Raslan Darawsheh <rasland@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Matan Azrad <matan@nvidia.com>
> Subject: [PATCH v3] net/mlx5: handle MPRQ incompatibility with external
> buffers
> 
> Multi-Packet Rx queue uses PMD-managed buffers to store packets.
> These buffers are externally attached to user mbufs.
> This conflicts with the feature that allows using user-managed externally
> attached buffers in an application.
> Fall back to SPRQ in case external buffers mempool is configured.
> Add the corresponding limitation to MLX5 documentation that MPRQ and
> external data buffers cannot be used together.
> 
> Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
> ---
> v3: fixed coverity issue with NULL pointer derefence
> v2: fixed typo in warning message
> 

Patch rebased and applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-04-12  8:08 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-10 23:40 [PATCH] net/mlx5: handle MPRQ incompatibility with external buffers Alexander Kozyrev
2022-03-10 23:45 ` [PATCH v2] " Alexander Kozyrev
2022-03-11 23:08   ` [PATCH v3] " Alexander Kozyrev
2022-03-14  8:50     ` Slava Ovsiienko
2022-04-12  8:08     ` Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).