DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/bnxt: fix non-vector fast mbuf free offload
@ 2020-10-09 16:36 Lance Richardson
  2020-10-09 19:49 ` Ajit Khaparde
  0 siblings, 1 reply; 2+ messages in thread
From: Lance Richardson @ 2020-10-09 16:36 UTC (permalink / raw)
  To: Ajit Khaparde, Somnath Kotur; +Cc: dev

The fast mbuf free offload for non-vector mode requires
additional checks in order to handle long tx buffer
descriptors, so dedicated functions are needed for
vector- and non-vector-modes.

Fixes: 103169df2880 ("net/bnxt: support fast mbuf free")
Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Ajit Kumar Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt_rxtx_vec_common.h | 27 ++++++++++++++++++++
 drivers/net/bnxt/bnxt_rxtx_vec_neon.c   |  2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_sse.c    |  2 +-
 drivers/net/bnxt/bnxt_txr.c             | 34 +++++++++++++++++++++++++
 drivers/net/bnxt/bnxt_txr.h             | 27 --------------------
 5 files changed, 63 insertions(+), 29 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_common.h b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
index 33ac53568..d540e9eee 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_common.h
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_common.h
@@ -95,6 +95,33 @@ bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
 	rxq->rxrearm_nb -= nb;
 }
 
+/*
+ * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * is enabled.
+ */
+static inline void
+bnxt_tx_cmp_vec_fast(struct bnxt_tx_queue *txq, int nr_pkts)
+{
+	struct bnxt_tx_ring_info *txr = txq->tx_ring;
+	uint32_t ring_mask = txr->tx_ring_struct->ring_mask;
+	struct rte_mbuf **free = txq->free;
+	uint16_t cons = txr->tx_cons;
+	unsigned int blk = 0;
+
+	while (nr_pkts--) {
+		struct bnxt_sw_tx_bd *tx_buf;
+
+		tx_buf = &txr->tx_buf_ring[cons];
+		cons = (cons + 1) & ring_mask;
+		free[blk++] = tx_buf->mbuf;
+		tx_buf->mbuf = NULL;
+	}
+	if (blk)
+		rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
+
+	txr->tx_cons = cons;
+}
+
 static inline void
 bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)
 {
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
index 4c04cc43a..f49e29ccb 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
@@ -369,7 +369,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
 	if (nb_tx_pkts) {
 		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-			bnxt_tx_cmp_fast(txq, nb_tx_pkts);
+			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
 		cpr->cp_raw_cons = raw_cons;
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
index f71f46985..e4ba63551 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
@@ -340,7 +340,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
 	cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
 	if (nb_tx_pkts) {
 		if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
-			bnxt_tx_cmp_fast(txq, nb_tx_pkts);
+			bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
 		else
 			bnxt_tx_cmp_vec(txq, nb_tx_pkts);
 		cpr->cp_raw_cons = raw_cons;
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 125ac8341..fb358d6f1 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -337,6 +337,40 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 	return 0;
 }
 
+/*
+ * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * is enabled.
+ */
+static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
+{
+	struct bnxt_tx_ring_info *txr = txq->tx_ring;
+	struct rte_mbuf **free = txq->free;
+	uint16_t cons = txr->tx_cons;
+	unsigned int blk = 0;
+	int i, j;
+
+	for (i = 0; i < nr_pkts; i++) {
+		struct bnxt_sw_tx_bd *tx_buf;
+		unsigned short nr_bds;
+
+		tx_buf = &txr->tx_buf_ring[cons];
+		nr_bds = tx_buf->nr_bds;
+		for (j = 0; j < nr_bds; j++) {
+			if (tx_buf->mbuf) {
+				/* Add mbuf to the bulk free array */
+				free[blk++] = tx_buf->mbuf;
+				tx_buf->mbuf = NULL;
+			}
+			cons = RING_NEXT(txr->tx_ring_struct, cons);
+			tx_buf = &txr->tx_buf_ring[cons];
+		}
+	}
+	if (blk)
+		rte_mempool_put_bulk(free[0]->pool, (void *)free, blk);
+
+	txr->tx_cons = cons;
+}
+
 static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
 {
 	struct bnxt_tx_ring_info *txr = txq->tx_ring;
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index c0b1fde22..d241227d4 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -44,33 +44,6 @@ static inline uint32_t bnxt_tx_avail(struct bnxt_tx_queue *txq)
 		 bnxt_tx_bds_in_hw(txq)) - 1);
 }
 
-/*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
- * is enabled.
- */
-static inline void
-bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
-{
-	struct bnxt_tx_ring_info *txr = txq->tx_ring;
-	uint32_t ring_mask = txr->tx_ring_struct->ring_mask;
-	struct rte_mbuf **free = txq->free;
-	uint16_t cons = txr->tx_cons;
-	unsigned int blk = 0;
-
-	while (nr_pkts--) {
-		struct bnxt_sw_tx_bd *tx_buf;
-
-		tx_buf = &txr->tx_buf_ring[cons];
-		cons = (cons + 1) & ring_mask;
-		free[blk++] = tx_buf->mbuf;
-		tx_buf->mbuf = NULL;
-	}
-	if (blk)
-		rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
-
-	txr->tx_cons = cons;
-}
-
 void bnxt_free_tx_rings(struct bnxt *bp);
 int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
 int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-10-09 19:49 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-09 16:36 [dpdk-dev] [PATCH] net/bnxt: fix non-vector fast mbuf free offload Lance Richardson
2020-10-09 19:49 ` Ajit Khaparde

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).