patches for DPDK stable branches
 help / color / mirror / Atom feed
* [PATCH] net/mlx5: fix MPRQ pool registration
@ 2022-08-15  7:26 Dmitry Kozlyuk
  2022-08-15  7:51 ` Xueming(Steven) Li
  0 siblings, 1 reply; 2+ messages in thread
From: Dmitry Kozlyuk @ 2022-08-15  7:26 UTC (permalink / raw)
  To: stable; +Cc: Xueming Li, Luca Boccassi, Viacheslav Ovsiienko

mlx5_mr_update_mp() was checking pktmbuf pool private flags.
However, this function may be passed an MPRQ pool,
which is not a pktmbuf pool and has no private data.
Random data is accessed instead of pktmbuf flags, causing a crash.
Move the flags check to the RxQ start and only for pools
that are known to be of pktmbuf type.

Fixes: 23b584d6cc85 ("net/mlx5: fix external buffer pool registration for Rx queue")

Signed-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_mr.c      | 11 -----------
 drivers/net/mlx5/mlx5_trigger.c | 21 +++++++++++++++++++--
 2 files changed, 19 insertions(+), 13 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 1dd14ddfe5..2a7fac8ad3 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -444,18 +444,7 @@ mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
 		.mr_ctrl = mr_ctrl,
 		.ret = 0,
 	};
-	uint32_t flags = rte_pktmbuf_priv_flags(mp);
 
-	if (flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) {
-		/*
-		 * The pinned external buffer should be registered for DMA
-		 * operations by application. The mem_list of the pool contains
-		 * the list of chunks with mbuf structures w/o built-in data
-		 * buffers and DMA actually does not happen there, no need
-		 * to create MR for these chunks.
-		 */
-		return 0;
-	}
 	DRV_LOG(DEBUG, "Port %u Rx queue registering mp %s "
 		       "having %u chunks.", dev->data->port_id,
 		       mp->name, mp->nb_mem_chunks);
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index c8dc0398ea..9b82ee40fd 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -156,12 +156,29 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
 				mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
 						  rxq_ctrl->rxq.mprq_mp);
 			} else {
+				struct rte_mempool *mp;
+				uint32_t flags;
 				uint32_t s;
 
-				for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
+				/*
+				 * The pinned external buffer should be
+				 * registered for DMA operations by application.
+				 * The mem_list of the pool contains
+				 * the list of chunks with mbuf structures
+				 * w/o built-in data buffers
+				 * and DMA actually does not happen there,
+				 * no need to create MR for these chunks.
+				 */
+				for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
+					mp = rxq_ctrl->rxq.rxseg[s].mp;
+					flags = rte_pktmbuf_priv_flags(mp);
+					if (flags &
+					    RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
+						continue;
 					mlx5_mr_update_mp
 						(dev, &rxq_ctrl->rxq.mr_ctrl,
-						rxq_ctrl->rxq.rxseg[s].mp);
+						 mp);
+				}
 			}
 			ret = rxq_alloc_elts(rxq_ctrl);
 			if (ret)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* RE: [PATCH] net/mlx5: fix MPRQ pool registration
  2022-08-15  7:26 [PATCH] net/mlx5: fix MPRQ pool registration Dmitry Kozlyuk
@ 2022-08-15  7:51 ` Xueming(Steven) Li
  0 siblings, 0 replies; 2+ messages in thread
From: Xueming(Steven) Li @ 2022-08-15  7:51 UTC (permalink / raw)
  To: Dmitry Kozlyuk, stable; +Cc: Luca Boccassi, Slava Ovsiienko

It's 20.11 LTS patch, applied and thanks!

> -----Original Message-----
> From: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
> Sent: Monday, August 15, 2022 3:27 PM
> To: stable@dpdk.org
> Cc: Xueming(Steven) Li <xuemingl@nvidia.com>; Luca Boccassi <bluca@debian.org>; Slava Ovsiienko <viacheslavo@nvidia.com>
> Subject: [PATCH] net/mlx5: fix MPRQ pool registration
> 
> mlx5_mr_update_mp() was checking pktmbuf pool private flags.
> However, this function may be passed an MPRQ pool, which is not a pktmbuf pool and has no private data.
> Random data is accessed instead of pktmbuf flags, causing a crash.
> Move the flags check to the RxQ start and only for pools that are known to be of pktmbuf type.
> 
> Fixes: 23b584d6cc85 ("net/mlx5: fix external buffer pool registration for Rx queue")
> 
> Signed-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> ---
>  drivers/net/mlx5/mlx5_mr.c      | 11 -----------
>  drivers/net/mlx5/mlx5_trigger.c | 21 +++++++++++++++++++--
>  2 files changed, 19 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 1dd14ddfe5..2a7fac8ad3 100644
> --- a/drivers/net/mlx5/mlx5_mr.c
> +++ b/drivers/net/mlx5/mlx5_mr.c
> @@ -444,18 +444,7 @@ mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
>  		.mr_ctrl = mr_ctrl,
>  		.ret = 0,
>  	};
> -	uint32_t flags = rte_pktmbuf_priv_flags(mp);
> 
> -	if (flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) {
> -		/*
> -		 * The pinned external buffer should be registered for DMA
> -		 * operations by application. The mem_list of the pool contains
> -		 * the list of chunks with mbuf structures w/o built-in data
> -		 * buffers and DMA actually does not happen there, no need
> -		 * to create MR for these chunks.
> -		 */
> -		return 0;
> -	}
>  	DRV_LOG(DEBUG, "Port %u Rx queue registering mp %s "
>  		       "having %u chunks.", dev->data->port_id,
>  		       mp->name, mp->nb_mem_chunks);
> diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index c8dc0398ea..9b82ee40fd 100644
> --- a/drivers/net/mlx5/mlx5_trigger.c
> +++ b/drivers/net/mlx5/mlx5_trigger.c
> @@ -156,12 +156,29 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
>  				mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
>  						  rxq_ctrl->rxq.mprq_mp);
>  			} else {
> +				struct rte_mempool *mp;
> +				uint32_t flags;
>  				uint32_t s;
> 
> -				for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
> +				/*
> +				 * The pinned external buffer should be
> +				 * registered for DMA operations by application.
> +				 * The mem_list of the pool contains
> +				 * the list of chunks with mbuf structures
> +				 * w/o built-in data buffers
> +				 * and DMA actually does not happen there,
> +				 * no need to create MR for these chunks.
> +				 */
> +				for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
> +					mp = rxq_ctrl->rxq.rxseg[s].mp;
> +					flags = rte_pktmbuf_priv_flags(mp);
> +					if (flags &
> +					    RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
> +						continue;
>  					mlx5_mr_update_mp
>  						(dev, &rxq_ctrl->rxq.mr_ctrl,
> -						rxq_ctrl->rxq.rxseg[s].mp);
> +						 mp);
> +				}
>  			}
>  			ret = rxq_alloc_elts(rxq_ctrl);
>  			if (ret)
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-08-15  7:51 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-15  7:26 [PATCH] net/mlx5: fix MPRQ pool registration Dmitry Kozlyuk
2022-08-15  7:51 ` Xueming(Steven) Li

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).