patches for DPDK stable branches
 help / color / mirror / Atom feed
From: "Xueming(Steven) Li" <xuemingl@nvidia.com>
To: Alexander Kozyrev <akozyrev@nvidia.com>,
	"stable@dpdk.org" <stable@dpdk.org>
Cc: Slava Ovsiienko <viacheslavo@nvidia.com>,
	"christian.ehrhardt@canonical.com"
	<christian.ehrhardt@canonical.com>,
	"ktraynor@redhat.com" <ktraynor@redhat.com>,
	"bluca@debian.org" <bluca@debian.org>
Subject: RE: [PATCH 20.11] net/mlx5: handle MPRQ incompatibility with external buffers
Date: Fri, 12 Aug 2022 07:27:02 +0000	[thread overview]
Message-ID: <DM4PR12MB5373F268B162C794974B38D5A1679@DM4PR12MB5373.namprd12.prod.outlook.com> (raw)
In-Reply-To: <20220811003556.1081794-1-akozyrev@nvidia.com>

Applied, thanks!

> -----Original Message-----
> From: Alexander Kozyrev <akozyrev@nvidia.com>
> Sent: Thursday, August 11, 2022 8:36 AM
> To: stable@dpdk.org
> Cc: Slava Ovsiienko <viacheslavo@nvidia.com>; Xueming(Steven) Li <xuemingl@nvidia.com>; christian.ehrhardt@canonical.com;
> ktraynor@redhat.com; bluca@debian.org
> Subject: [PATCH 20.11] net/mlx5: handle MPRQ incompatibility with external buffers
> 
> [ upstream commit 3a29cb3a730ba0def6b088c969da379a9ffea988 ]
> 
> Multi-Packet Rx queue uses PMD-managed buffers to store packets.
> These buffers are externally attached to user mbufs.
> This conflicts with the feature that allows using user-managed externally attached buffers in an application.
> Fall back to SPRQ in case external buffers mempool is configured.
> The limitation is already documented in mlx5 guide.
> 
> Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> ---
>  drivers/net/mlx5/mlx5_rxq.c  | 23 ++++++++++++++++-------  drivers/net/mlx5/mlx5_rxtx.h |  2 +-
>  2 files changed, 17 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 4a263a5803..80d9d2fe12 100644
> --- a/drivers/net/mlx5/mlx5_rxq.c
> +++ b/drivers/net/mlx5/mlx5_rxq.c
> @@ -754,6 +754,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
>  				(struct rte_eth_rxseg_split *)conf->rx_seg;
>  	struct rte_eth_rxseg_split rx_single = {.mp = mp};
>  	uint16_t n_seg = conf->rx_nseg;
> +	bool is_extmem = false;
>  	int res;
> 
>  	if (mp) {
> @@ -764,6 +765,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
>  		 */
>  		rx_seg = &rx_single;
>  		n_seg = 1;
> +		is_extmem = rte_pktmbuf_priv_flags(mp) &
> +				RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
>  	}
>  	if (n_seg > 1) {
>  		uint64_t offloads = conf->offloads |
> @@ -783,7 +786,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
>  	res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
>  	if (res)
>  		return res;
> -	rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
> +	rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket,
> +				conf, rx_seg, n_seg, is_extmem);
>  	if (!rxq_ctrl) {
>  		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
>  			dev->data->port_id, idx);
> @@ -1397,6 +1401,8 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
>   *   Log number of strides to configure for this queue.
>   * @param actual_log_stride_size
>   *   Log stride size to configure for this queue.
> + * @param is_extmem
> + *   Is external pinned memory pool used.
>   *
>   * @return
>   *   0 if Multi-Packet RQ is supported, otherwise -1.
> @@ -1405,7 +1411,8 @@ static int
>  mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
>  		  bool rx_seg_en, uint32_t min_mbuf_size,
>  		  uint32_t *actual_log_stride_num,
> -		  uint32_t *actual_log_stride_size)
> +		  uint32_t *actual_log_stride_size,
> +		  bool is_extmem)
>  {
>  	struct mlx5_priv *priv = dev->data->dev_private;
>  	struct mlx5_dev_config *config = &priv->config; @@ -1423,7 +1430,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t
> idx, uint16_t desc,
>  				log_max_stride_size);
>  	uint32_t log_stride_wqe_size;
> 
> -	if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en)
> +	if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en || is_extmem)
>  		goto unsupport;
>  	/* Checks if chosen number of strides is in supported range. */
>  	if (config->mprq.log_stride_num > log_max_stride_num || @@ -1489,7 +1496,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev,
> uint16_t idx, uint16_t desc,
>  			" rxq_num = %u, stride_sz = %u, stride_num = %u\n"
>  			"  supported: min_rxqs_num = %u, min_buf_wqe_sz = %u"
>  			" min_stride_sz = %u, max_stride_sz = %u).\n"
> -			"Rx segment is %senable.",
> +			"Rx segment is %senabled. External mempool is %sused.",
>  			dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n,
>  			RTE_BIT32(config->mprq.log_stride_size),
>  			RTE_BIT32(config->mprq.log_stride_num),
> @@ -1497,7 +1504,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
>  			RTE_BIT32(config->mprq.log_min_stride_wqe_size),
>  			RTE_BIT32(config->mprq.log_min_stride_size),
>  			RTE_BIT32(config->mprq.log_max_stride_size),
> -			rx_seg_en ? "" : "not ");
> +			rx_seg_en ? "" : "not ", is_extmem ? "" : "not ");
>  	return -1;
>  }
> 
> @@ -1519,7 +1526,8 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,  struct mlx5_rxq_ctrl *
> mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
>  	     unsigned int socket, const struct rte_eth_rxconf *conf,
> -	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
> +	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg,
> +	     bool is_extmem)
>  {
>  	struct mlx5_priv *priv = dev->data->dev_private;
>  	struct mlx5_rxq_ctrl *tmpl;
> @@ -1541,7 +1549,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
>  	const int mprq_en = !mlx5_mprq_prepare(dev, idx, desc, rx_seg_en,
>  					       non_scatter_min_mbuf_size,
>  					       &mprq_log_actual_stride_num,
> -					       &mprq_log_actual_stride_size);
> +					       &mprq_log_actual_stride_size,
> +					       is_extmem);
>  	/*
>  	 * Always allocate extra slots, even if eventually
>  	 * the vector Rx will not be used.
> diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 237a7faa5c..9e00031ed6 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.h
> +++ b/drivers/net/mlx5/mlx5_rxtx.h
> @@ -336,7 +336,7 @@ struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
>  				   uint16_t desc, unsigned int socket,
>  				   const struct rte_eth_rxconf *conf,
>  				   const struct rte_eth_rxseg_split *rx_seg,
> -				   uint16_t n_seg);
> +				   uint16_t n_seg, bool is_extmem);
>  struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
>  	(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
>  	 const struct rte_eth_hairpin_conf *hairpin_conf);
> --
> 2.18.2


      reply	other threads:[~2022-08-12  7:27 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-11  0:35 Alexander Kozyrev
2022-08-12  7:27 ` Xueming(Steven) Li [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=DM4PR12MB5373F268B162C794974B38D5A1679@DM4PR12MB5373.namprd12.prod.outlook.com \
    --to=xuemingl@nvidia.com \
    --cc=akozyrev@nvidia.com \
    --cc=bluca@debian.org \
    --cc=christian.ehrhardt@canonical.com \
    --cc=ktraynor@redhat.com \
    --cc=stable@dpdk.org \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).