patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Alexander Kozyrev <akozyrev@mellanox.com>
To: dev@dpdk.org
Cc: stable@dpdk.org, rasland@mellanox.com, matan@mellanox.com
Subject: [dpdk-stable] [PATCH] net/mlx5: fix Rx/Tx descriptors number adjustment
Date: Thu, 11 Jun 2020 17:43:27 +0000	[thread overview]
Message-ID: <1591897407-15381-1-git-send-email-akozyrev@mellanox.com> (raw)

The number of descriptors to configure in a Rx/Tx queue is passed to
the mlx5_tx/rx_queue_pre_setup() function by value. That means any
adjustments of this variable are local and cannot affect the actual
value that is used to allocate mbufs in the mlx5_txq/rxq_new()
functions. Pass the number as a reference to actually update it.

Fixes: 6218063b ("net/mlx5: refactor Rx data path")
Fixes: 1d88ba17 ("net/mlx5: refactor Tx data path")
Cc: stable@dpdk.org

Signed-off-by: Alexander Kozyrev <akozyrev@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
---
 drivers/net/mlx5/mlx5_rxq.c | 14 +++++++-------
 drivers/net/mlx5/mlx5_txq.c | 22 +++++++++++-----------
 2 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 78046fd..dda0073 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -453,19 +453,19 @@
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc)
+mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	if (!rte_is_power_of_2(desc)) {
-		desc = 1 << log2above(desc);
+	if (!rte_is_power_of_2(*desc)) {
+		*desc = 1 << log2above(*desc);
 		DRV_LOG(WARNING,
 			"port %u increased number of descriptors in Rx queue %u"
 			" to the next power of two (%d)",
-			dev->data->port_id, idx, desc);
+			dev->data->port_id, idx, *desc);
 	}
 	DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
-		dev->data->port_id, idx, desc);
+		dev->data->port_id, idx, *desc);
 	if (idx >= priv->rxqs_n) {
 		DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
 			dev->data->port_id, idx, priv->rxqs_n);
@@ -511,7 +511,7 @@
 		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
 	int res;
 
-	res = mlx5_rx_queue_pre_setup(dev, idx, desc);
+	res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
 	if (res)
 		return res;
 	rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
@@ -552,7 +552,7 @@
 		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
 	int res;
 
-	res = mlx5_rx_queue_pre_setup(dev, idx, desc);
+	res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
 	if (res)
 		return res;
 	if (hairpin_conf->peer_count != 1 ||
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index f7b548f..90f3296 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -150,27 +150,27 @@
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc)
+mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	if (desc <= MLX5_TX_COMP_THRESH) {
+	if (*desc <= MLX5_TX_COMP_THRESH) {
 		DRV_LOG(WARNING,
 			"port %u number of descriptors requested for Tx queue"
 			" %u must be higher than MLX5_TX_COMP_THRESH, using %u"
-			" instead of %u",
-			dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
-		desc = MLX5_TX_COMP_THRESH + 1;
+			" instead of %u", dev->data->port_id, idx,
+			MLX5_TX_COMP_THRESH + 1, *desc);
+		*desc = MLX5_TX_COMP_THRESH + 1;
 	}
-	if (!rte_is_power_of_2(desc)) {
-		desc = 1 << log2above(desc);
+	if (!rte_is_power_of_2(*desc)) {
+		*desc = 1 << log2above(*desc);
 		DRV_LOG(WARNING,
 			"port %u increased number of descriptors in Tx queue"
 			" %u to the next power of two (%d)",
-			dev->data->port_id, idx, desc);
+			dev->data->port_id, idx, *desc);
 	}
 	DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
-		dev->data->port_id, idx, desc);
+		dev->data->port_id, idx, *desc);
 	if (idx >= priv->txqs_n) {
 		DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
 			dev->data->port_id, idx, priv->txqs_n);
@@ -213,7 +213,7 @@
 		container_of(txq, struct mlx5_txq_ctrl, txq);
 	int res;
 
-	res = mlx5_tx_queue_pre_setup(dev, idx, desc);
+	res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
 	if (res)
 		return res;
 	txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
@@ -254,7 +254,7 @@
 		container_of(txq, struct mlx5_txq_ctrl, txq);
 	int res;
 
-	res = mlx5_tx_queue_pre_setup(dev, idx, desc);
+	res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
 	if (res)
 		return res;
 	if (hairpin_conf->peer_count != 1 ||
-- 
1.8.3.1


             reply	other threads:[~2020-06-11 17:43 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-11 17:43 Alexander Kozyrev [this message]
2020-06-15 10:51 ` Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1591897407-15381-1-git-send-email-akozyrev@mellanox.com \
    --to=akozyrev@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=matan@mellanox.com \
    --cc=rasland@mellanox.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).