patches for DPDK stable branches
 help / color / Atom feed
* [dpdk-stable] [PATCH v2 1/9] net/bnxt: increase tqm entry allocation
       [not found] <20191004034903.85233-1-ajit.khaparde@broadcom.com>
@ 2019-10-04  3:48 ` Ajit Khaparde
  2019-10-04  3:48 ` [dpdk-stable] [PATCH v2 5/9] net/bnxt: use common receive transmit nq ring Ajit Khaparde
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 4+ messages in thread
From: Ajit Khaparde @ 2019-10-04  3:48 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, Lance Richardson, stable, Kalesh Anakkur Purayil

From: Lance Richardson <lance.richardson@broadcom.com>

The current TQM backing store size isn't sufficient to allow 512
transmit rings. Fix by correcting TQM SP queue size calculation.

Fixes: f8168ca0e690 ("net/bnxt: support thor controller")
Cc: stable@dpdk.org
Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Kalesh Anakkur Purayil <kalesh-anakkur.purayil@broadcom.com>
---
 drivers/net/bnxt/bnxt_ethdev.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 02eacf7965..0e893cc956 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -4292,7 +4292,9 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp)
 	if (rc)
 		return rc;
 
-	entries = ctx->qp_max_l2_entries;
+	entries = ctx->qp_max_l2_entries +
+		  ctx->vnic_max_vnic_entries +
+		  ctx->tqm_min_entries_per_ring;
 	entries = bnxt_roundup(entries, ctx->tqm_entries_multiple);
 	entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring,
 			  ctx->tqm_max_entries_per_ring);
-- 
2.20.1 (Apple Git-117)


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [dpdk-stable] [PATCH v2 5/9] net/bnxt: use common receive transmit nq ring
       [not found] <20191004034903.85233-1-ajit.khaparde@broadcom.com>
  2019-10-04  3:48 ` [dpdk-stable] [PATCH v2 1/9] net/bnxt: increase tqm entry allocation Ajit Khaparde
@ 2019-10-04  3:48 ` Ajit Khaparde
  2019-10-04  3:49 ` [dpdk-stable] [PATCH v2 6/9] net/bnxt: fix stats context calculation Ajit Khaparde
  2019-10-04  3:49 ` [dpdk-stable] [PATCH v2 8/9] net/bnxt: advertise scatter receive offload capability Ajit Khaparde
  3 siblings, 0 replies; 4+ messages in thread
From: Ajit Khaparde @ 2019-10-04  3:48 UTC (permalink / raw)
  To: dev
  Cc: ferruh.yigit, Lance Richardson, stable, Somnath Kotur,
	Kalesh Anakkur Purayil

From: Lance Richardson <lance.richardson@broadcom.com>

Thor queue scaling is currently limited by the number of NQs that
can be allocated. Fix by using a common NQ for all receive/transmit
rings instead of allocating a separate NQ for each ring.

Fixes: f8168ca0e690 ("net/bnxt: support thor controller")
Cc: stable@dpdk.org
Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Kalesh Anakkur Purayil <kalesh-anakkur.purayil@broadcom.com>
---
 drivers/net/bnxt/bnxt.h        |   1 +
 drivers/net/bnxt/bnxt_ethdev.c |   5 ++
 drivers/net/bnxt/bnxt_hwrm.c   |   7 +--
 drivers/net/bnxt/bnxt_ring.c   | 107 ++++++++++++++++++++++-----------
 drivers/net/bnxt/bnxt_ring.h   |   2 +
 drivers/net/bnxt/bnxt_rxq.c    |   4 +-
 drivers/net/bnxt/bnxt_rxq.h    |   1 -
 drivers/net/bnxt/bnxt_rxr.c    |  27 ---------
 drivers/net/bnxt/bnxt_txq.c    |   4 +-
 drivers/net/bnxt/bnxt_txq.h    |   1 -
 drivers/net/bnxt/bnxt_txr.c    |  25 --------
 11 files changed, 84 insertions(+), 100 deletions(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 5cfe5ee2c7..ad0b18dddd 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -497,6 +497,7 @@ struct bnxt {
 
 	/* Default completion ring */
 	struct bnxt_cp_ring_info	*async_cp_ring;
+	struct bnxt_cp_ring_info	*rxtx_nq_ring;
 	uint32_t		max_ring_grps;
 	struct bnxt_ring_grp_info	*grp_info;
 
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 9adcd94ff8..2845e9185a 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -223,6 +223,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool reconfig)
 		bnxt_free_rx_rings(bp);
 	}
 	bnxt_free_async_cp_ring(bp);
+	bnxt_free_rxtx_nq_ring(bp);
 }
 
 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
@@ -253,6 +254,10 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
 	if (rc)
 		goto alloc_mem_err;
 
+	rc = bnxt_alloc_rxtx_nq_ring(bp);
+	if (rc)
+		goto alloc_mem_err;
+
 	return 0;
 
 alloc_mem_err:
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 76ef004237..b5211aea75 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -2325,11 +2325,8 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
 			bp->grp_info[queue_index].ag_fw_ring_id =
 							INVALID_HW_RING_ID;
 	}
-	if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
+	if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
 		bnxt_free_cp_ring(bp, cpr);
-		if (rxq->nq_ring)
-			bnxt_free_nq_ring(bp, rxq->nq_ring);
-	}
 
 	if (BNXT_HAS_RING_GRPS(bp))
 		bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
@@ -2361,8 +2358,6 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
 		if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
 			bnxt_free_cp_ring(bp, cpr);
 			cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
-			if (txq->nq_ring)
-				bnxt_free_nq_ring(bp, txq->nq_ring);
 		}
 	}
 
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index cf0c24c9dc..19fc45395d 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -125,7 +125,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
 	int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
 	cp_vmem_len = RTE_ALIGN(cp_vmem_len, 128);
 
-	int nq_vmem_len = BNXT_CHIP_THOR(bp) ?
+	int nq_vmem_len = nq_ring_info ?
 		RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size) : 0;
 	nq_vmem_len = RTE_ALIGN(nq_vmem_len, 128);
 
@@ -159,7 +159,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
 	nq_ring_start = cp_ring_start + cp_ring_len;
 	nq_ring_start = RTE_ALIGN(nq_ring_start, 4096);
 
-	int nq_ring_len = BNXT_CHIP_THOR(bp) ? cp_ring_len : 0;
+	int nq_ring_len = nq_ring_info ? cp_ring_len : 0;
 
 	int tx_ring_start = nq_ring_start + nq_ring_len;
 	tx_ring_start = RTE_ALIGN(tx_ring_start, 4096);
@@ -403,12 +403,12 @@ static void bnxt_set_db(struct bnxt *bp,
 }
 
 static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
-				struct bnxt_cp_ring_info *cpr,
-				struct bnxt_cp_ring_info *nqr)
+				struct bnxt_cp_ring_info *cpr)
 {
 	struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 	uint32_t nq_ring_id = HWRM_NA_SIGNATURE;
 	int cp_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
+	struct bnxt_cp_ring_info *nqr = bp->rxtx_nq_ring;
 	uint8_t ring_type;
 	int rc = 0;
 
@@ -436,31 +436,85 @@ static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
 	return 0;
 }
 
-static int bnxt_alloc_nq_ring(struct bnxt *bp, int queue_index,
-			      struct bnxt_cp_ring_info *nqr)
+int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp)
 {
-	struct bnxt_ring *nq_ring = nqr->cp_ring_struct;
-	int nq_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
+	struct bnxt_cp_ring_info *nqr;
+	struct bnxt_ring *ring;
+	int ring_index = BNXT_NUM_ASYNC_CPR(bp);
+	unsigned int socket_id;
 	uint8_t ring_type;
 	int rc = 0;
 
-	if (!BNXT_HAS_NQ(bp))
-		return -EINVAL;
+	if (!BNXT_HAS_NQ(bp) || bp->rxtx_nq_ring)
+		return 0;
+
+	socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
+
+	nqr = rte_zmalloc_socket("nqr",
+				 sizeof(struct bnxt_cp_ring_info),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (nqr == NULL)
+		return -ENOMEM;
+
+	ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
+				  sizeof(struct bnxt_ring),
+				  RTE_CACHE_LINE_SIZE, socket_id);
+	if (ring == NULL) {
+		rte_free(nqr);
+		return -ENOMEM;
+	}
+
+	ring->bd = (void *)nqr->cp_desc_ring;
+	ring->bd_dma = nqr->cp_desc_mapping;
+	ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
+	ring->ring_mask = ring->ring_size - 1;
+	ring->vmem_size = 0;
+	ring->vmem = NULL;
+
+	nqr->cp_ring_struct = ring;
+	rc = bnxt_alloc_rings(bp, 0, NULL, NULL, nqr, NULL, "l2_nqr");
+	if (rc) {
+		rte_free(ring);
+		rte_free(nqr);
+		return -ENOMEM;
+	}
 
 	ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
 
-	rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, nq_ring_index,
+	rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, ring_index,
 				  HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0);
-	if (rc)
+	if (rc) {
+		rte_free(ring);
+		rte_free(nqr);
 		return rc;
+	}
 
-	bnxt_set_db(bp, &nqr->cp_db, ring_type, nq_ring_index,
-		    nq_ring->fw_ring_id);
+	bnxt_set_db(bp, &nqr->cp_db, ring_type, ring_index,
+		    ring->fw_ring_id);
 	bnxt_db_nq(nqr);
 
+	bp->rxtx_nq_ring = nqr;
+
 	return 0;
 }
 
+/* Free RX/TX NQ ring.  */
+void bnxt_free_rxtx_nq_ring(struct bnxt *bp)
+{
+	struct bnxt_cp_ring_info *nqr = bp->rxtx_nq_ring;
+
+	if (!nqr)
+		return;
+
+	bnxt_free_nq_ring(bp, nqr);
+
+	bnxt_free_ring(nqr->cp_ring_struct);
+	rte_free(nqr->cp_ring_struct);
+	nqr->cp_ring_struct = NULL;
+	rte_free(nqr);
+	bp->rxtx_nq_ring = NULL;
+}
+
 static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index)
 {
 	struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
@@ -529,17 +583,10 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
 	struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
 	struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
 	struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
-	struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
 	struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
 	int rc;
 
-	if (BNXT_HAS_NQ(bp)) {
-		rc = bnxt_alloc_nq_ring(bp, queue_index, nqr);
-		if (rc)
-			goto err_out;
-	}
-
-	rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr);
+	rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr);
 	if (rc)
 		goto err_out;
 
@@ -644,16 +691,10 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
 	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
 		struct bnxt_rx_queue *rxq = bp->rx_queues[i];
 		struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
-		struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
 		struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 		struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
 
-		if (BNXT_HAS_NQ(bp)) {
-			if (bnxt_alloc_nq_ring(bp, i, nqr))
-				goto err_out;
-		}
-
-		if (bnxt_alloc_cmpl_ring(bp, i, cpr, nqr))
+		if (bnxt_alloc_cmpl_ring(bp, i, cpr))
 			goto err_out;
 
 		if (BNXT_HAS_RING_GRPS(bp)) {
@@ -697,18 +738,12 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
 		struct bnxt_tx_queue *txq = bp->tx_queues[i];
 		struct bnxt_cp_ring_info *cpr = txq->cp_ring;
 		struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
-		struct bnxt_cp_ring_info *nqr = txq->nq_ring;
 		struct bnxt_tx_ring_info *txr = txq->tx_ring;
 		struct bnxt_ring *ring = txr->tx_ring_struct;
 		unsigned int idx = i + bp->rx_cp_nr_rings;
 		uint16_t tx_cosq_id = 0;
 
-		if (BNXT_HAS_NQ(bp)) {
-			if (bnxt_alloc_nq_ring(bp, idx, nqr))
-				goto err_out;
-		}
-
-		if (bnxt_alloc_cmpl_ring(bp, idx, cpr, nqr))
+		if (bnxt_alloc_cmpl_ring(bp, idx, cpr))
 			goto err_out;
 
 		if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
index a5d5106986..833118391b 100644
--- a/drivers/net/bnxt/bnxt_ring.h
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -78,6 +78,8 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp);
 int bnxt_alloc_async_cp_ring(struct bnxt *bp);
 void bnxt_free_async_cp_ring(struct bnxt *bp);
 int bnxt_alloc_async_ring_struct(struct bnxt *bp);
+int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp);
+void bnxt_free_rxtx_nq_ring(struct bnxt *bp);
 
 static inline void bnxt_db_write(struct bnxt_db_info *db, uint32_t idx)
 {
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 5d291cbafd..9439fcd1fb 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -341,8 +341,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 
 	eth_dev->data->rx_queues[queue_idx] = rxq;
 	/* Allocate RX ring hardware descriptors */
-	if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
-			rxq->nq_ring, "rxr")) {
+	if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, NULL,
+			     "rxr")) {
 		PMD_DRV_LOG(ERR,
 			"ring_dma_zone_reserve for rx_ring failed!\n");
 		bnxt_rx_queue_release_op(rxq);
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 3693d89a60..4f5182d9e9 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -39,7 +39,6 @@ struct bnxt_rx_queue {
 	uint32_t			rx_buf_size;
 	struct bnxt_rx_ring_info	*rx_ring;
 	struct bnxt_cp_ring_info	*cp_ring;
-	struct bnxt_cp_ring_info	*nq_ring;
 	rte_atomic64_t		rx_mbuf_alloc_fail;
 	const struct rte_memzone *mz;
 };
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 1a6fb7944b..bda4f4c1b9 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -742,7 +742,6 @@ void bnxt_free_rx_rings(struct bnxt *bp)
 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
 {
 	struct bnxt_cp_ring_info *cpr;
-	struct bnxt_cp_ring_info *nqr;
 	struct bnxt_rx_ring_info *rxr;
 	struct bnxt_ring *ring;
 
@@ -789,32 +788,6 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
 	ring->vmem_size = 0;
 	ring->vmem = NULL;
 
-	if (BNXT_HAS_NQ(rxq->bp)) {
-		nqr = rte_zmalloc_socket("bnxt_rx_ring_cq",
-					 sizeof(struct bnxt_cp_ring_info),
-					 RTE_CACHE_LINE_SIZE, socket_id);
-		if (nqr == NULL)
-			return -ENOMEM;
-
-		rxq->nq_ring = nqr;
-
-		ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
-					  sizeof(struct bnxt_ring),
-					  RTE_CACHE_LINE_SIZE, socket_id);
-		if (ring == NULL)
-			return -ENOMEM;
-
-		nqr->cp_ring_struct = ring;
-		ring->ring_size =
-			rte_align32pow2(rxr->rx_ring_struct->ring_size *
-					(2 + AGG_RING_SIZE_FACTOR));
-		ring->ring_mask = ring->ring_size - 1;
-		ring->bd = (void *)nqr->cp_desc_ring;
-		ring->bd_dma = nqr->cp_desc_mapping;
-		ring->vmem_size = 0;
-		ring->vmem = NULL;
-	}
-
 	/* Allocate Aggregator rings */
 	ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
 				   sizeof(struct bnxt_ring),
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
index ea20d737fe..5ad4ee155e 100644
--- a/drivers/net/bnxt/bnxt_txq.c
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -141,8 +141,8 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
 	txq->port_id = eth_dev->data->port_id;
 
 	/* Allocate TX ring hardware descriptors */
-	if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring,
-			txq->nq_ring, "txr")) {
+	if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring, NULL,
+			     "txr")) {
 		PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
 		bnxt_tx_queue_release_op(txq);
 		rc = -ENOMEM;
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index 7a442516d2..37a3f9539f 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -33,7 +33,6 @@ struct bnxt_tx_queue {
 
 	unsigned int		cp_nr_rings;
 	struct bnxt_cp_ring_info	*cp_ring;
-	struct bnxt_cp_ring_info        *nq_ring;
 	const struct rte_memzone *mz;
 	struct rte_mbuf **free;
 };
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 0ed6581bed..6e2ee86c05 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -57,7 +57,6 @@ int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq)
 int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
 {
 	struct bnxt_cp_ring_info *cpr;
-	struct bnxt_cp_ring_info *nqr;
 	struct bnxt_tx_ring_info *txr;
 	struct bnxt_ring *ring;
 
@@ -101,30 +100,6 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
 	ring->vmem_size = 0;
 	ring->vmem = NULL;
 
-	if (BNXT_HAS_NQ(txq->bp)) {
-		nqr = rte_zmalloc_socket("bnxt_tx_ring_nq",
-					 sizeof(struct bnxt_cp_ring_info),
-					 RTE_CACHE_LINE_SIZE, socket_id);
-		if (nqr == NULL)
-			return -ENOMEM;
-
-		txq->nq_ring = nqr;
-
-		ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
-					  sizeof(struct bnxt_ring),
-					  RTE_CACHE_LINE_SIZE, socket_id);
-		if (ring == NULL)
-			return -ENOMEM;
-
-		nqr->cp_ring_struct = ring;
-		ring->ring_size = txr->tx_ring_struct->ring_size;
-		ring->ring_mask = ring->ring_size - 1;
-		ring->bd = (void *)nqr->cp_desc_ring;
-		ring->bd_dma = nqr->cp_desc_mapping;
-		ring->vmem_size = 0;
-		ring->vmem = NULL;
-	}
-
 	return 0;
 }
 
-- 
2.20.1 (Apple Git-117)


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [dpdk-stable] [PATCH v2 6/9] net/bnxt: fix stats context calculation
       [not found] <20191004034903.85233-1-ajit.khaparde@broadcom.com>
  2019-10-04  3:48 ` [dpdk-stable] [PATCH v2 1/9] net/bnxt: increase tqm entry allocation Ajit Khaparde
  2019-10-04  3:48 ` [dpdk-stable] [PATCH v2 5/9] net/bnxt: use common receive transmit nq ring Ajit Khaparde
@ 2019-10-04  3:49 ` Ajit Khaparde
  2019-10-04  3:49 ` [dpdk-stable] [PATCH v2 8/9] net/bnxt: advertise scatter receive offload capability Ajit Khaparde
  3 siblings, 0 replies; 4+ messages in thread
From: Ajit Khaparde @ 2019-10-04  3:49 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, Lance Richardson, stable, Somnath Kotur

From: Lance Richardson <lance.richardson@broadcom.com>

The required number of statistics contexts is computed as the sum
of the number of receive and transmit rings plus one for the async
completion ring. A statistics context is not actually required for
the async completion ring, so remove it from the calculation.

Fixes: bd0a14c99f65 ("net/bnxt: use dedicated CPR for async events")
Cc: stable@dpdk.org

Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt_hwrm.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index b5211aea75..1e65c3b80b 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -851,9 +851,7 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
 	req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
 	req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
 					    AGG_RING_MULTIPLIER);
-	req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings +
-					     bp->tx_nr_rings +
-					     BNXT_NUM_ASYNC_CPR(bp));
+	req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
 	req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
 					      bp->tx_nr_rings +
 					      BNXT_NUM_ASYNC_CPR(bp));
-- 
2.20.1 (Apple Git-117)


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [dpdk-stable] [PATCH v2 8/9] net/bnxt: advertise scatter receive offload capability
       [not found] <20191004034903.85233-1-ajit.khaparde@broadcom.com>
                   ` (2 preceding siblings ...)
  2019-10-04  3:49 ` [dpdk-stable] [PATCH v2 6/9] net/bnxt: fix stats context calculation Ajit Khaparde
@ 2019-10-04  3:49 ` Ajit Khaparde
  3 siblings, 0 replies; 4+ messages in thread
From: Ajit Khaparde @ 2019-10-04  3:49 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, Lance Richardson, stable, Somnath Kotur

From: Lance Richardson <lance.richardson@broadcom.com>

Scattered receive is supported but not included in receive offload
capabilities. Fix by adding it and including in scattered receive
calculation.

Fixes: 9c1507d96ab8 ("net/bnxt: switch to the new offload API")
Cc: stable@dpdk.org
Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt_ethdev.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 2845e9185a..5160ac002b 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -163,7 +163,8 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
 				     DEV_RX_OFFLOAD_JUMBO_FRAME | \
 				     DEV_RX_OFFLOAD_KEEP_CRC | \
 				     DEV_RX_OFFLOAD_VLAN_EXTEND | \
-				     DEV_RX_OFFLOAD_TCP_LRO)
+				     DEV_RX_OFFLOAD_TCP_LRO | \
+				     DEV_RX_OFFLOAD_SCATTER)
 
 static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
@@ -749,6 +750,9 @@ static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
 	uint16_t buf_size;
 	int i;
 
+	if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
+		return 1;
+
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
 		struct bnxt_rx_queue *rxq = eth_dev->data->rx_queues[i];
 
-- 
2.20.1 (Apple Git-117)


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, back to index

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20191004034903.85233-1-ajit.khaparde@broadcom.com>
2019-10-04  3:48 ` [dpdk-stable] [PATCH v2 1/9] net/bnxt: increase tqm entry allocation Ajit Khaparde
2019-10-04  3:48 ` [dpdk-stable] [PATCH v2 5/9] net/bnxt: use common receive transmit nq ring Ajit Khaparde
2019-10-04  3:49 ` [dpdk-stable] [PATCH v2 6/9] net/bnxt: fix stats context calculation Ajit Khaparde
2019-10-04  3:49 ` [dpdk-stable] [PATCH v2 8/9] net/bnxt: advertise scatter receive offload capability Ajit Khaparde

patches for DPDK stable branches

Archives are clonable:
	git clone --mirror http://inbox.dpdk.org/stable/0 stable/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 stable stable/ http://inbox.dpdk.org/stable \
		stable@dpdk.org
	public-inbox-index stable


Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.stable


AGPL code for this site: git clone https://public-inbox.org/ public-inbox