patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Somnath Kotur <somnath.kotur@broadcom.com>
To: stable@dpdk.org
Cc: Somnath Kotur <somnath.kotur@broadcom.com>,
	Ajit Khaparde <ajit.khaparde@broadcom.com>
Subject: [dpdk-stable] [PATCH 18.11 2/2] net/bnxt: fix queue release
Date: Thu,  3 Dec 2020 10:02:59 +0530	[thread overview]
Message-ID: <20201203043259.3423-2-somnath.kotur@broadcom.com> (raw)
In-Reply-To: <20201203043259.3423-1-somnath.kotur@broadcom.com>

[upstream commit 97c3271781bf1094b0ab0235f472fb5a468b02d3]

Some of the ring related memory was not being freed in both the release
ops. Fix to free them now.
Add some more NULL ptr checks in the corresponding queue_release_mbufs()
and queue_release_op() respectively.
Also call queue_release_op() in the error path of the corresponding
queue_setup_op()

Fixes: 6133f207970c ("net/bnxt: add Rx queue create/destroy")
Fixes: 51c87ebafc7d ("net/bnxt: add Tx queue create/destroy")
Cc: stable@dpdk.org

Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt_rxq.c | 45 +++++++++++++++++++++++--------------
 drivers/net/bnxt/bnxt_txq.c | 32 +++++++++++++++-----------
 2 files changed, 47 insertions(+), 30 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index d1664dbc09..34dbb1dbdb 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -205,7 +205,7 @@ void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
 	struct bnxt_tpa_info *tpa_info;
 	uint16_t i;
 
-	if (!rxq)
+	if (!rxq || !rxq->rx_ring)
 		return;
 
 	rte_spinlock_lock(&rxq->lock);
@@ -265,12 +265,21 @@ void bnxt_rx_queue_release_op(void *rx_queue)
 		bnxt_rx_queue_release_mbufs(rxq);
 
 		/* Free RX ring hardware descriptors */
-		bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
-		/* Free RX Agg ring hardware descriptors */
-		bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
-
+		if (rxq->rx_ring) {
+			bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
+			rte_free(rxq->rx_ring->rx_ring_struct);
+			/* Free RX Agg ring hardware descriptors */
+			bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
+			rte_free(rxq->rx_ring->ag_ring_struct);
+
+			rte_free(rxq->rx_ring);
+		}
 		/* Free RX completion ring hardware descriptors */
-		bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
+		if (rxq->cp_ring) {
+			bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
+			rte_free(rxq->cp_ring->cp_ring_struct);
+			rte_free(rxq->cp_ring);
+		}
 
 		bnxt_free_rxq_stats(rxq);
 		rte_memzone_free(rxq->mz);
@@ -302,8 +311,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 
 	if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
 		PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
-		rc = -EINVAL;
-		goto out;
+		return -EINVAL;
 	}
 
 	if (eth_dev->data->rx_queues) {
@@ -315,8 +323,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 				 RTE_CACHE_LINE_SIZE, socket_id);
 	if (!rxq) {
 		PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
-		rc = -ENOMEM;
-		goto out;
+		return -ENOMEM;
 	}
 	rxq->bp = bp;
 	rxq->mb_pool = mp;
@@ -326,8 +333,11 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 	PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
 
 	rc = bnxt_init_rx_ring_struct(rxq, socket_id);
-	if (rc)
-		goto out;
+	if (rc) {
+		PMD_DRV_LOG(ERR,
+			    "init_rx_ring_struct failed!\n");
+		goto err;
+	}
 
 	PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
 	rxq->queue_id = queue_idx;
@@ -342,10 +352,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 	if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
 			"rxr")) {
 		PMD_DRV_LOG(ERR,
-			"ring_dma_zone_reserve for rx_ring failed!\n");
-		bnxt_rx_queue_release_op(rxq);
-		rc = -ENOMEM;
-		goto out;
+			    "ring_dma_zone_reserve for rx_ring failed!\n");
+		goto err;
 	}
 	rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
 
@@ -369,7 +377,10 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 	/* Configure mtu if it is different from what was configured before */
 	if (!queue_idx)
 		bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
-out:
+
+	return 0;
+err:
+	bnxt_rx_queue_release_op(rxq);
 	return rc;
 }
 
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
index cf6ddfb69b..707712729d 100644
--- a/drivers/net/bnxt/bnxt_txq.c
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -28,7 +28,7 @@ static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq)
 	struct bnxt_sw_tx_bd *sw_ring;
 	uint16_t i;
 
-	if (!txq)
+	if (!txq || !txq->tx_ring)
 		return;
 
 	sw_ring = txq->tx_ring->tx_buf_ring;
@@ -60,10 +60,18 @@ void bnxt_tx_queue_release_op(void *tx_queue)
 	if (txq) {
 		/* Free TX ring hardware descriptors */
 		bnxt_tx_queue_release_mbufs(txq);
-		bnxt_free_ring(txq->tx_ring->tx_ring_struct);
+		if (txq->tx_ring) {
+			bnxt_free_ring(txq->tx_ring->tx_ring_struct);
+			rte_free(txq->tx_ring->tx_ring_struct);
+			rte_free(txq->tx_ring);
+		}
 
 		/* Free TX completion ring hardware descriptors */
-		bnxt_free_ring(txq->cp_ring->cp_ring_struct);
+		if (txq->cp_ring) {
+			bnxt_free_ring(txq->cp_ring->cp_ring_struct);
+			rte_free(txq->cp_ring->cp_ring_struct);
+			rte_free(txq->cp_ring);
+		}
 
 		bnxt_free_txq_stats(txq);
 		rte_memzone_free(txq->mz);
@@ -92,8 +100,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
 
 	if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
 		PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
-		rc = -EINVAL;
-		goto out;
+		return -EINVAL;
 	}
 
 	if (eth_dev->data->tx_queues) {
@@ -107,8 +114,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
 				 RTE_CACHE_LINE_SIZE, socket_id);
 	if (!txq) {
 		PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
-		rc = -ENOMEM;
-		goto out;
+		return -ENOMEM;
 	}
 	txq->bp = bp;
 	txq->nb_tx_desc = nb_desc;
@@ -117,7 +123,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
 
 	rc = bnxt_init_tx_ring_struct(txq, socket_id);
 	if (rc)
-		goto out;
+		goto err;
 
 	txq->queue_id = queue_idx;
 	txq->port_id = eth_dev->data->port_id;
@@ -126,16 +132,14 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
 	if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring,
 			"txr")) {
 		PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
-		bnxt_tx_queue_release_op(txq);
 		rc = -ENOMEM;
-		goto out;
+		goto err;
 	}
 
 	if (bnxt_init_one_tx_ring(txq)) {
 		PMD_DRV_LOG(ERR, "bnxt_init_one_tx_ring failed!");
-		bnxt_tx_queue_release_op(txq);
 		rc = -ENOMEM;
-		goto out;
+		goto err;
 	}
 
 	eth_dev->data->tx_queues[queue_idx] = txq;
@@ -145,6 +149,8 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
 	else
 		txq->tx_started = true;
 
-out:
+	return 0;
+err:
+	bnxt_tx_queue_release_op(txq);
 	return rc;
 }
-- 
2.28.0.497.g54e85e7


  reply	other threads:[~2020-12-03  4:41 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-03  4:32 [dpdk-stable] [PATCH 18.11 1/2] net/bnxt: fix checking VNIC in shutdown path Somnath Kotur
2020-12-03  4:32 ` Somnath Kotur [this message]
2020-12-03 16:43 ` Kevin Traynor

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201203043259.3423-2-somnath.kotur@broadcom.com \
    --to=somnath.kotur@broadcom.com \
    --cc=ajit.khaparde@broadcom.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).