DPDK patches and discussions
 help / color / mirror / Atom feed
From: Devendra Singh Rawat <dsinghrawat@marvell.com>
To: <dev@dpdk.org>, <thomas@monjalon.net>, <jerinj@marvell.com>,
	<ferruh.yigit@intel.com>, <rmody@marvell.com>
Cc: <palok@marvell.com>,
	Devendra Singh Rawat <dsinghrawat@marvell.com>, <stable@dpdk.org>
Subject: [PATCH 2/3] net/qede: fix Rx callback
Date: Fri, 4 Mar 2022 17:38:32 +0530	[thread overview]
Message-ID: <20220304120833.312776-2-dsinghrawat@marvell.com> (raw)
In-Reply-To: <20220304120833.312776-1-dsinghrawat@marvell.com>

qede_alloc_rx_bulk_mbufs was trimming the no. of requested mbufs count
to QEDE_MAX_BULK_ALLOC_COUNT. The RX callback was ignorant of this
trimming and it was always resetting the no. of empty RX BD ring
slots to 0. This resulted in RX BD ring getting into an inconsistent
state and ultimately the application fails to receive any traffic.

The fix trims the no. of requested mbufs count before making call to
qede_alloc_rx_bulk_mbufs. After qede_alloc_rx_bulk_mbufs returns
successfully, the no. of empty RX BD ring slots are decremented by the
correct count.

Fixes: 8f2312474529 ("net/qede: fix performance bottleneck in Rx path")
Cc: stable@dpdk.org

Signed-off-by: Devendra Singh Rawat <dsinghrawat@marvell.com>
Signed-off-by: Rasesh Mody <rmody@marvell.com>
---
 drivers/net/qede/qede_rxtx.c | 68 ++++++++++++++++--------------------
 1 file changed, 31 insertions(+), 37 deletions(-)

diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 0c52568180..02fa1fcaa1 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -38,48 +38,40 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
 
 static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
 {
+	void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned;
 	struct rte_mbuf *mbuf = NULL;
 	struct eth_rx_bd *rx_bd;
 	dma_addr_t mapping;
 	int i, ret = 0;
 	uint16_t idx;
-	uint16_t mask = NUM_RX_BDS(rxq);
-
-	if (count > QEDE_MAX_BULK_ALLOC_COUNT)
-		count = QEDE_MAX_BULK_ALLOC_COUNT;
 
 	idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
 
-	if (count > mask - idx + 1)
-		count = mask - idx + 1;
-
-	ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)&rxq->sw_rx_ring[idx],
-				   count);
-
+	ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count);
 	if (unlikely(ret)) {
 		PMD_RX_LOG(ERR, rxq,
 			   "Failed to allocate %d rx buffers "
 			    "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
-			    count,
-			    rxq->sw_rx_prod & NUM_RX_BDS(rxq),
-			    rxq->sw_rx_cons & NUM_RX_BDS(rxq),
+			    count, idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
 			    rte_mempool_avail_count(rxq->mb_pool),
 			    rte_mempool_in_use_count(rxq->mb_pool));
 		return -ENOMEM;
 	}
 
 	for (i = 0; i < count; i++) {
-		rte_prefetch0(rxq->sw_rx_ring[(idx + 1) & NUM_RX_BDS(rxq)]);
-		mbuf = rxq->sw_rx_ring[idx & NUM_RX_BDS(rxq)];
+		mbuf = obj_p[i];
+		if (likely(i < count - 1))
+			rte_prefetch0(obj_p[i + 1]);
 
+		idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
+		rxq->sw_rx_ring[idx] = mbuf;
 		mapping = rte_mbuf_data_iova_default(mbuf);
 		rx_bd = (struct eth_rx_bd *)
 			ecore_chain_produce(&rxq->rx_bd_ring);
 		rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
 		rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
-		idx++;
+		rxq->sw_rx_prod++;
 	}
-	rxq->sw_rx_prod = idx;
 
 	return 0;
 }
@@ -1544,25 +1536,26 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	uint8_t bitfield_val;
 #endif
 	uint8_t offset, flags, bd_num;
-
+	uint16_t count = 0;
 
 	/* Allocate buffers that we used in previous loop */
 	if (rxq->rx_alloc_count) {
-		if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
-			     rxq->rx_alloc_count))) {
+		count = rxq->rx_alloc_count > QEDE_MAX_BULK_ALLOC_COUNT ?
+			QEDE_MAX_BULK_ALLOC_COUNT : rxq->rx_alloc_count;
+
+		if (unlikely(qede_alloc_rx_bulk_mbufs(rxq, count))) {
 			struct rte_eth_dev *dev;
 
 			PMD_RX_LOG(ERR, rxq,
-				   "New buffer allocation failed,"
-				   "dropping incoming packetn");
+				   "New buffers allocation failed,"
+				   "dropping incoming packets\n");
 			dev = &rte_eth_devices[rxq->port_id];
-			dev->data->rx_mbuf_alloc_failed +=
-							rxq->rx_alloc_count;
-			rxq->rx_alloc_errors += rxq->rx_alloc_count;
+			dev->data->rx_mbuf_alloc_failed += count;
+			rxq->rx_alloc_errors += count;
 			return 0;
 		}
 		qede_update_rx_prod(qdev, rxq);
-		rxq->rx_alloc_count = 0;
+		rxq->rx_alloc_count -= count;
 	}
 
 	hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
@@ -1731,7 +1724,7 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	}
 
 	/* Request number of buffers to be allocated in next loop */
-	rxq->rx_alloc_count = rx_alloc_count;
+	rxq->rx_alloc_count += rx_alloc_count;
 
 	rxq->rcv_pkts += rx_pkt;
 	rxq->rx_segs += rx_pkt;
@@ -1771,25 +1764,26 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	struct qede_agg_info *tpa_info = NULL;
 	uint32_t rss_hash;
 	int rx_alloc_count = 0;
-
+	uint16_t count = 0;
 
 	/* Allocate buffers that we used in previous loop */
 	if (rxq->rx_alloc_count) {
-		if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
-			     rxq->rx_alloc_count))) {
+		count = rxq->rx_alloc_count > QEDE_MAX_BULK_ALLOC_COUNT ?
+			QEDE_MAX_BULK_ALLOC_COUNT : rxq->rx_alloc_count;
+
+		if (unlikely(qede_alloc_rx_bulk_mbufs(rxq, count))) {
 			struct rte_eth_dev *dev;
 
 			PMD_RX_LOG(ERR, rxq,
-				   "New buffer allocation failed,"
-				   "dropping incoming packetn");
+				   "New buffers allocation failed,"
+				   "dropping incoming packets\n");
 			dev = &rte_eth_devices[rxq->port_id];
-			dev->data->rx_mbuf_alloc_failed +=
-							rxq->rx_alloc_count;
-			rxq->rx_alloc_errors += rxq->rx_alloc_count;
+			dev->data->rx_mbuf_alloc_failed += count;
+			rxq->rx_alloc_errors += count;
 			return 0;
 		}
 		qede_update_rx_prod(qdev, rxq);
-		rxq->rx_alloc_count = 0;
+		rxq->rx_alloc_count -= count;
 	}
 
 	hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
@@ -2028,7 +2022,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	}
 
 	/* Request number of buffers to be allocated in next loop */
-	rxq->rx_alloc_count = rx_alloc_count;
+	rxq->rx_alloc_count += rx_alloc_count;
 
 	rxq->rcv_pkts += rx_pkt;
 
-- 
2.18.2


  reply	other threads:[~2022-03-04 12:09 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-04 12:08 [PATCH 1/3] net/qede: fix Tx callback completion routine Devendra Singh Rawat
2022-03-04 12:08 ` Devendra Singh Rawat [this message]
2022-03-04 12:08 ` [PATCH 3/3] net/qede: fix max Rx pktlen calculation Devendra Singh Rawat
2022-03-10  7:40 ` [PATCH 1/3] net/qede: fix Tx callback completion routine Jerin Jacob
2022-03-10  8:56   ` Thomas Monjalon
2022-03-10  9:07     ` [EXT] " Devendra Singh Rawat

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220304120833.312776-2-dsinghrawat@marvell.com \
    --to=dsinghrawat@marvell.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=jerinj@marvell.com \
    --cc=palok@marvell.com \
    --cc=rmody@marvell.com \
    --cc=stable@dpdk.org \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).