DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 0/2] avoid using mempool fns directly
@ 2025-08-22 17:06 Bruce Richardson
  2025-08-22 17:06 ` [PATCH 1/2] net/intel: avoid allocating from mempool directly Bruce Richardson
  2025-08-22 17:06 ` [PATCH 2/2] net/intel: avoid accessing mempool directly on free Bruce Richardson
  0 siblings, 2 replies; 3+ messages in thread
From: Bruce Richardson @ 2025-08-22 17:06 UTC (permalink / raw)
  To: dev; +Cc: mb, Bruce Richardson

Use the new mbuf inline APIs to avoid using mempool functions directly
from drivers.

Bruce Richardson (2):
  net/intel: avoid allocating from mempool directly
  net/intel: avoid accessing mempool directly on free

 drivers/net/intel/common/rx_vec_arm.h            |  6 +++---
 drivers/net/intel/common/rx_vec_ppc.h            |  6 +++---
 drivers/net/intel/common/rx_vec_x86.h            |  6 +++---
 drivers/net/intel/common/tx.h                    |  4 ++--
 drivers/net/intel/fm10k/fm10k_ethdev.c           |  5 ++---
 drivers/net/intel/fm10k/fm10k_rxtx.c             | 13 ++++++-------
 drivers/net/intel/fm10k/fm10k_rxtx_vec.c         |  7 +++----
 drivers/net/intel/i40e/i40e_rxtx.c               |  7 +++----
 drivers/net/intel/iavf/iavf_rxtx.c               |  2 +-
 drivers/net/intel/ice/ice_rxtx.c                 |  6 +++---
 drivers/net/intel/idpf/idpf_common_rxtx_avx2.c   |  2 +-
 drivers/net/intel/idpf/idpf_common_rxtx_avx512.c |  4 ++--
 drivers/net/intel/ixgbe/ixgbe_rxtx.c             |  7 +++----
 drivers/net/intel/ixgbe/ixgbe_rxtx_vec_common.h  |  5 ++---
 14 files changed, 37 insertions(+), 43 deletions(-)

-- 
2.48.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 1/2] net/intel: avoid allocating from mempool directly
  2025-08-22 17:06 [PATCH 0/2] avoid using mempool fns directly Bruce Richardson
@ 2025-08-22 17:06 ` Bruce Richardson
  2025-08-22 17:06 ` [PATCH 2/2] net/intel: avoid accessing mempool directly on free Bruce Richardson
  1 sibling, 0 replies; 3+ messages in thread
From: Bruce Richardson @ 2025-08-22 17:06 UTC (permalink / raw)
  To: dev; +Cc: mb, Bruce Richardson

Rather than calling the mempool function rte_mempool_get_bulk we update
the code to use the mbuf function rte_mbuf_raw_alloc_bulk, which
properly supports debug flags and checks - when enabled.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 drivers/net/intel/common/rx_vec_arm.h            | 6 +++---
 drivers/net/intel/common/rx_vec_ppc.h            | 6 +++---
 drivers/net/intel/common/rx_vec_x86.h            | 6 +++---
 drivers/net/intel/fm10k/fm10k_ethdev.c           | 2 +-
 drivers/net/intel/fm10k/fm10k_rxtx.c             | 8 ++++----
 drivers/net/intel/fm10k/fm10k_rxtx_vec.c         | 2 +-
 drivers/net/intel/i40e/i40e_rxtx.c               | 2 +-
 drivers/net/intel/iavf/iavf_rxtx.c               | 2 +-
 drivers/net/intel/ice/ice_rxtx.c                 | 4 ++--
 drivers/net/intel/idpf/idpf_common_rxtx_avx2.c   | 2 +-
 drivers/net/intel/idpf/idpf_common_rxtx_avx512.c | 4 ++--
 drivers/net/intel/ixgbe/ixgbe_rxtx.c             | 2 +-
 12 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/drivers/net/intel/common/rx_vec_arm.h b/drivers/net/intel/common/rx_vec_arm.h
index 2e48d4b6c0..f7e7b8c396 100644
--- a/drivers/net/intel/common/rx_vec_arm.h
+++ b/drivers/net/intel/common/rx_vec_arm.h
@@ -16,19 +16,19 @@
 static inline int
 _ci_rxq_rearm_get_bufs(struct ci_rx_queue *rxq)
 {
-	struct ci_rx_entry *rxp = &rxq->sw_ring[rxq->rxrearm_start];
+	struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start].mbuf;
 	const uint16_t rearm_thresh = CI_VPMD_RX_REARM_THRESH;
 	volatile union ci_rx_desc *rxdp;
 	int i;
 
 	rxdp = &rxq->rx_ring[rxq->rxrearm_start];
 
-	if (rte_mempool_get_bulk(rxq->mp, (void **)rxp, rearm_thresh) < 0) {
+	if (rte_mbuf_raw_alloc_bulk(rxq->mp, rxp, rearm_thresh) < 0) {
 		if (rxq->rxrearm_nb + rearm_thresh >= rxq->nb_rx_desc) {
 			uint64x2_t zero = vdupq_n_u64(0);
 
 			for (i = 0; i < CI_VPMD_DESCS_PER_LOOP; i++) {
-				rxp[i].mbuf = &rxq->fake_mbuf;
+				rxp[i] = &rxq->fake_mbuf;
 				vst1q_u64(RTE_CAST_PTR(uint64_t *, &rxdp[i]), zero);
 			}
 		}
diff --git a/drivers/net/intel/common/rx_vec_ppc.h b/drivers/net/intel/common/rx_vec_ppc.h
index 97affc34c2..7d02eb067b 100644
--- a/drivers/net/intel/common/rx_vec_ppc.h
+++ b/drivers/net/intel/common/rx_vec_ppc.h
@@ -16,19 +16,19 @@
 static inline int
 _ci_rxq_rearm_get_bufs(struct ci_rx_queue *rxq)
 {
-	struct ci_rx_entry *rxp = &rxq->sw_ring[rxq->rxrearm_start];
+	struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start].mbuf;
 	const uint16_t rearm_thresh = CI_VPMD_RX_REARM_THRESH;
 	volatile union ci_rx_desc *rxdp;
 	int i;
 
 	rxdp = &rxq->rx_ring[rxq->rxrearm_start];
 
-	if (rte_mempool_get_bulk(rxq->mp, (void **)rxp, rearm_thresh) < 0) {
+	if (rte_mbuf_raw_alloc_bulk(rxq->mp, rxp, rearm_thresh) < 0) {
 		if (rxq->rxrearm_nb + rearm_thresh >= rxq->nb_rx_desc) {
 			__vector unsigned long dma_addr0 = (__vector unsigned long){};
 
 			for (i = 0; i < CI_VPMD_DESCS_PER_LOOP; i++) {
-				rxp[i].mbuf = &rxq->fake_mbuf;
+				rxp[i] = &rxq->fake_mbuf;
 				vec_st(dma_addr0, 0,
 					RTE_CAST_PTR(__vector unsigned long *, &rxdp[i]));
 			}
diff --git a/drivers/net/intel/common/rx_vec_x86.h b/drivers/net/intel/common/rx_vec_x86.h
index 3d7343b1ff..5160767385 100644
--- a/drivers/net/intel/common/rx_vec_x86.h
+++ b/drivers/net/intel/common/rx_vec_x86.h
@@ -21,19 +21,19 @@ enum ci_rx_vec_level {
 static inline int
 _ci_rxq_rearm_get_bufs(struct ci_rx_queue *rxq)
 {
-	struct ci_rx_entry *rxp = &rxq->sw_ring[rxq->rxrearm_start];
+	struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start].mbuf;
 	const uint16_t rearm_thresh = CI_VPMD_RX_REARM_THRESH;
 	volatile union ci_rx_desc *rxdp;
 	int i;
 
 	rxdp = &rxq->rx_ring[rxq->rxrearm_start];
 
-	if (rte_mempool_get_bulk(rxq->mp, (void **)rxp, rearm_thresh) < 0) {
+	if (rte_mbuf_raw_alloc_bulk(rxq->mp, rxp, rearm_thresh) < 0) {
 		if (rxq->rxrearm_nb + rearm_thresh >= rxq->nb_rx_desc) {
 			const __m128i zero = _mm_setzero_si128();
 
 			for (i = 0; i < CI_VPMD_DESCS_PER_LOOP; i++) {
-				rxp[i].mbuf = &rxq->fake_mbuf;
+				rxp[i] = &rxq->fake_mbuf;
 				_mm_store_si128(RTE_CAST_PTR(__m128i *, &rxdp[i]), zero);
 			}
 		}
diff --git a/drivers/net/intel/fm10k/fm10k_ethdev.c b/drivers/net/intel/fm10k/fm10k_ethdev.c
index 75ce2e19cf..216165a9bd 100644
--- a/drivers/net/intel/fm10k/fm10k_ethdev.c
+++ b/drivers/net/intel/fm10k/fm10k_ethdev.c
@@ -205,7 +205,7 @@ rx_queue_reset(struct fm10k_rx_queue *q)
 	int i, diag;
 	PMD_INIT_FUNC_TRACE();
 
-	diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
+	diag = rte_mbuf_raw_alloc_bulk(q->mp, (void *)q->sw_ring, q->nb_desc);
 	if (diag != 0)
 		return -ENOMEM;
 
diff --git a/drivers/net/intel/fm10k/fm10k_rxtx.c b/drivers/net/intel/fm10k/fm10k_rxtx.c
index 690142b357..0557ee88df 100644
--- a/drivers/net/intel/fm10k/fm10k_rxtx.c
+++ b/drivers/net/intel/fm10k/fm10k_rxtx.c
@@ -164,8 +164,8 @@ fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	q->next_dd = next_dd;
 
 	if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
-		ret = rte_mempool_get_bulk(q->mp,
-					(void **)&q->sw_ring[q->next_alloc],
+		ret = rte_mbuf_raw_alloc_bulk(q->mp,
+					(void *)&q->sw_ring[q->next_alloc],
 					q->alloc_thresh);
 
 		if (unlikely(ret != 0)) {
@@ -322,8 +322,8 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	q->next_dd = next_dd;
 
 	if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
-		ret = rte_mempool_get_bulk(q->mp,
-					(void **)&q->sw_ring[q->next_alloc],
+		ret = rte_mbuf_raw_alloc_bulk(q->mp,
+					(void *)&q->sw_ring[q->next_alloc],
 					q->alloc_thresh);
 
 		if (unlikely(ret != 0)) {
diff --git a/drivers/net/intel/fm10k/fm10k_rxtx_vec.c b/drivers/net/intel/fm10k/fm10k_rxtx_vec.c
index 715c891c30..1269a34e76 100644
--- a/drivers/net/intel/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/intel/fm10k/fm10k_rxtx_vec.c
@@ -259,7 +259,7 @@ fm10k_rxq_rearm(struct fm10k_rx_queue *rxq)
 	rxdp = rxq->hw_ring + rxq->rxrearm_start;
 
 	/* Pull 'n' more MBUFs into the software ring */
-	if (rte_mempool_get_bulk(rxq->mp,
+	if (rte_mbuf_raw_alloc_bulk(rxq->mp,
 				 (void *)mb_alloc,
 				 RTE_FM10K_RXQ_REARM_THRESH) < 0) {
 		dma_addr0 = _mm_setzero_si128();
diff --git a/drivers/net/intel/i40e/i40e_rxtx.c b/drivers/net/intel/i40e/i40e_rxtx.c
index aba3c11ee5..a3ca8254ff 100644
--- a/drivers/net/intel/i40e/i40e_rxtx.c
+++ b/drivers/net/intel/i40e/i40e_rxtx.c
@@ -590,7 +590,7 @@ i40e_rx_alloc_bufs(struct ci_rx_queue *rxq)
 	alloc_idx = (uint16_t)(rxq->rx_free_trigger -
 				(rxq->rx_free_thresh - 1));
 	rxep = &(rxq->sw_ring[alloc_idx]);
-	diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
+	diag = rte_mbuf_raw_alloc_bulk(rxq->mp, (void *)rxep,
 					rxq->rx_free_thresh);
 	if (unlikely(diag != 0)) {
 		PMD_DRV_LOG(ERR, "Failed to get mbufs in bulk");
diff --git a/drivers/net/intel/iavf/iavf_rxtx.c b/drivers/net/intel/iavf/iavf_rxtx.c
index 7033a74610..c0e3827066 100644
--- a/drivers/net/intel/iavf/iavf_rxtx.c
+++ b/drivers/net/intel/iavf/iavf_rxtx.c
@@ -2227,7 +2227,7 @@ iavf_rx_alloc_bufs(struct ci_rx_queue *rxq)
 	alloc_idx = (uint16_t)(rxq->rx_free_trigger -
 				(rxq->rx_free_thresh - 1));
 	rxep = &rxq->sw_ring[alloc_idx];
-	diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
+	diag = rte_mbuf_raw_alloc_bulk(rxq->mp, (void *)rxep,
 				    rxq->rx_free_thresh);
 	if (unlikely(diag != 0)) {
 		PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
diff --git a/drivers/net/intel/ice/ice_rxtx.c b/drivers/net/intel/ice/ice_rxtx.c
index da508592aa..0294794bed 100644
--- a/drivers/net/intel/ice/ice_rxtx.c
+++ b/drivers/net/intel/ice/ice_rxtx.c
@@ -2039,7 +2039,7 @@ ice_rx_alloc_bufs(struct ci_rx_queue *rxq)
 	alloc_idx = (uint16_t)(rxq->rx_free_trigger -
 			       (rxq->rx_free_thresh - 1));
 	rxep = &rxq->sw_ring[alloc_idx];
-	diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
+	diag = rte_mbuf_raw_alloc_bulk(rxq->mp, (void *)rxep,
 				    rxq->rx_free_thresh);
 	if (unlikely(diag != 0)) {
 		PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
@@ -2047,7 +2047,7 @@ ice_rx_alloc_bufs(struct ci_rx_queue *rxq)
 	}
 
 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
-		diag_pay = rte_mempool_get_bulk(rxq->rxseg[1].mp,
+		diag_pay = rte_mbuf_raw_alloc_bulk(rxq->rxseg[1].mp,
 				(void *)rxq->sw_split_buf, rxq->rx_free_thresh);
 		if (unlikely(diag_pay != 0)) {
 			rte_mempool_put_bulk(rxq->mp, (void *)rxep,
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c b/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
index 1babc5114b..21c8f79254 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
+++ b/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
@@ -20,7 +20,7 @@ idpf_singleq_rx_rearm(struct idpf_rx_queue *rxq)
 	rxdp += rxq->rxrearm_start;
 
 	/* Pull 'n' more MBUFs into the software ring */
-	if (rte_mempool_get_bulk(rxq->mp,
+	if (rte_mbuf_raw_alloc_bulk(rxq->mp,
 				 (void *)rxep,
 				 IDPF_RXQ_REARM_THRESH) < 0) {
 		if (rxq->rxrearm_nb + IDPF_RXQ_REARM_THRESH >=
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c b/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c
index 06e73c8725..bc2cadd738 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c
+++ b/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c
@@ -22,7 +22,7 @@ idpf_singleq_rearm_common(struct idpf_rx_queue *rxq)
 	rxdp += rxq->rxrearm_start;
 
 	/* Pull 'n' more MBUFs into the software ring */
-	if (rte_mempool_get_bulk(rxq->mp,
+	if (rte_mbuf_raw_alloc_bulk(rxq->mp,
 				 (void *)rxp,
 				 IDPF_RXQ_REARM_THRESH) < 0) {
 		if (rxq->rxrearm_nb + IDPF_RXQ_REARM_THRESH >=
@@ -551,7 +551,7 @@ idpf_splitq_rearm_common(struct idpf_rx_queue *rx_bufq)
 	rxdp += rx_bufq->rxrearm_start;
 
 	/* Pull 'n' more MBUFs into the software ring */
-	if (rte_mempool_get_bulk(rx_bufq->mp,
+	if (rte_mbuf_raw_alloc_bulk(rx_bufq->mp,
 				 (void *)rxp,
 				 IDPF_RXQ_REARM_THRESH) < 0) {
 		if (rx_bufq->rxrearm_nb + IDPF_RXQ_REARM_THRESH >=
diff --git a/drivers/net/intel/ixgbe/ixgbe_rxtx.c b/drivers/net/intel/ixgbe/ixgbe_rxtx.c
index bbe665a6ff..6143d16377 100644
--- a/drivers/net/intel/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/intel/ixgbe/ixgbe_rxtx.c
@@ -1659,7 +1659,7 @@ ixgbe_rx_alloc_bufs(struct ci_rx_queue *rxq, bool reset_mbuf)
 	/* allocate buffers in bulk directly into the S/W ring */
 	alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
 	rxep = &rxq->sw_ring[alloc_idx];
-	diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
+	diag = rte_mbuf_raw_alloc_bulk(rxq->mp, (void *)rxep,
 				    rxq->rx_free_thresh);
 	if (unlikely(diag != 0))
 		return -ENOMEM;
-- 
2.48.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 2/2] net/intel: avoid accessing mempool directly on free
  2025-08-22 17:06 [PATCH 0/2] avoid using mempool fns directly Bruce Richardson
  2025-08-22 17:06 ` [PATCH 1/2] net/intel: avoid allocating from mempool directly Bruce Richardson
@ 2025-08-22 17:06 ` Bruce Richardson
  1 sibling, 0 replies; 3+ messages in thread
From: Bruce Richardson @ 2025-08-22 17:06 UTC (permalink / raw)
  To: dev; +Cc: mb, Bruce Richardson

Rather than calling the mempool function rte_pktmbuf_put_bulk directly
in drivers, switch to using the rte_mbuf_raw_free_bulk() function which
properly supports debug flags and checks, when they are enabled.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 drivers/net/intel/common/tx.h                   | 4 ++--
 drivers/net/intel/fm10k/fm10k_ethdev.c          | 3 +--
 drivers/net/intel/fm10k/fm10k_rxtx.c            | 5 ++---
 drivers/net/intel/fm10k/fm10k_rxtx_vec.c        | 5 ++---
 drivers/net/intel/i40e/i40e_rxtx.c              | 5 ++---
 drivers/net/intel/ice/ice_rxtx.c                | 2 +-
 drivers/net/intel/ixgbe/ixgbe_rxtx.c            | 5 ++---
 drivers/net/intel/ixgbe/ixgbe_rxtx_vec_common.h | 5 ++---
 8 files changed, 14 insertions(+), 20 deletions(-)

diff --git a/drivers/net/intel/common/tx.h b/drivers/net/intel/common/tx.h
index 7dc84da170..27a3dd388c 100644
--- a/drivers/net/intel/common/tx.h
+++ b/drivers/net/intel/common/tx.h
@@ -203,13 +203,13 @@ ci_tx_free_bufs_vec(struct ci_tx_queue *txq, ci_desc_done_fn desc_done, bool ctx
 				if (likely(m->pool == free[0]->pool)) {
 					free[nb_free++] = m;
 				} else {
-					rte_mempool_put_bulk(free[0]->pool, (void *)free, nb_free);
+					rte_mbuf_raw_free_bulk(free[0]->pool, free, nb_free);
 					free[0] = m;
 					nb_free = 1;
 				}
 			}
 		}
-		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+		rte_mbuf_raw_free_bulk(free[0]->pool, free, nb_free);
 	} else {
 		for (uint32_t i = 1; i < n; i++) {
 			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
diff --git a/drivers/net/intel/fm10k/fm10k_ethdev.c b/drivers/net/intel/fm10k/fm10k_ethdev.c
index 216165a9bd..57f09be88e 100644
--- a/drivers/net/intel/fm10k/fm10k_ethdev.c
+++ b/drivers/net/intel/fm10k/fm10k_ethdev.c
@@ -212,8 +212,7 @@ rx_queue_reset(struct fm10k_rx_queue *q)
 	for (i = 0; i < q->nb_desc; ++i) {
 		fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
 		if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
-			rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
-						q->nb_desc);
+			rte_mbuf_raw_free_bulk(q->mp, q->sw_ring, q->nb_desc);
 			return -EINVAL;
 		}
 		dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
diff --git a/drivers/net/intel/fm10k/fm10k_rxtx.c b/drivers/net/intel/fm10k/fm10k_rxtx.c
index 0557ee88df..a6ca38a6da 100644
--- a/drivers/net/intel/fm10k/fm10k_rxtx.c
+++ b/drivers/net/intel/fm10k/fm10k_rxtx.c
@@ -495,15 +495,14 @@ static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num)
 				if (likely(m->pool == free[0]->pool))
 					free[nb_free++] = m;
 				else {
-					rte_mempool_put_bulk(free[0]->pool,
-							(void *)free, nb_free);
+					rte_mbuf_raw_free_bulk(free[0]->pool, free, nb_free);
 					free[0] = m;
 					nb_free = 1;
 				}
 			}
 			txep[i] = NULL;
 		}
-		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+		rte_mbuf_raw_free_bulk(free[0]->pool, free, nb_free);
 	} else {
 		for (i = 1; i < num; i++) {
 			m = rte_pktmbuf_prefree_seg(txep[i]);
diff --git a/drivers/net/intel/fm10k/fm10k_rxtx_vec.c b/drivers/net/intel/fm10k/fm10k_rxtx_vec.c
index 1269a34e76..0eada7275e 100644
--- a/drivers/net/intel/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/intel/fm10k/fm10k_rxtx_vec.c
@@ -775,14 +775,13 @@ fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
 				if (likely(m->pool == free[0]->pool))
 					free[nb_free++] = m;
 				else {
-					rte_mempool_put_bulk(free[0]->pool,
-							(void *)free, nb_free);
+					rte_mbuf_raw_free_bulk(free[0]->pool, free, nb_free);
 					free[0] = m;
 					nb_free = 1;
 				}
 			}
 		}
-		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+		rte_mbuf_raw_free_bulk(free[0]->pool, free, nb_free);
 	} else {
 		for (i = 1; i < n; i++) {
 			m = rte_pktmbuf_prefree_seg(txep[i]);
diff --git a/drivers/net/intel/i40e/i40e_rxtx.c b/drivers/net/intel/i40e/i40e_rxtx.c
index a3ca8254ff..ca439ebd8a 100644
--- a/drivers/net/intel/i40e/i40e_rxtx.c
+++ b/drivers/net/intel/i40e/i40e_rxtx.c
@@ -1355,8 +1355,7 @@ i40e_tx_free_bufs(struct ci_tx_queue *txq)
 					free[i] = txep->mbuf;
 					txep->mbuf = NULL;
 				}
-				rte_mempool_put_bulk(free[0]->pool, (void **)free,
-						I40E_TX_MAX_FREE_BUF_SZ);
+				rte_mbuf_raw_free_bulk(free[0]->pool, free, I40E_TX_MAX_FREE_BUF_SZ);
 			}
 		}
 
@@ -1365,7 +1364,7 @@ i40e_tx_free_bufs(struct ci_tx_queue *txq)
 				free[i] = txep->mbuf;
 				txep->mbuf = NULL;
 			}
-			rte_mempool_put_bulk(free[0]->pool, (void **)free, m);
+			rte_mbuf_raw_free_bulk(free[0]->pool, free, m);
 		}
 	} else {
 		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
diff --git a/drivers/net/intel/ice/ice_rxtx.c b/drivers/net/intel/ice/ice_rxtx.c
index 0294794bed..5889d51618 100644
--- a/drivers/net/intel/ice/ice_rxtx.c
+++ b/drivers/net/intel/ice/ice_rxtx.c
@@ -2050,7 +2050,7 @@ ice_rx_alloc_bufs(struct ci_rx_queue *rxq)
 		diag_pay = rte_mbuf_raw_alloc_bulk(rxq->rxseg[1].mp,
 				(void *)rxq->sw_split_buf, rxq->rx_free_thresh);
 		if (unlikely(diag_pay != 0)) {
-			rte_mempool_put_bulk(rxq->mp, (void *)rxep,
+			rte_mbuf_raw_free_bulk(rxq->mp, (void *)rxep,
 				    rxq->rx_free_thresh);
 			PMD_RX_LOG(ERR, "Failed to get payload mbufs in bulk");
 			return -ENOMEM;
diff --git a/drivers/net/intel/ixgbe/ixgbe_rxtx.c b/drivers/net/intel/ixgbe/ixgbe_rxtx.c
index 6143d16377..0aa7b9bf2e 100644
--- a/drivers/net/intel/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/intel/ixgbe/ixgbe_rxtx.c
@@ -141,8 +141,7 @@ ixgbe_tx_free_bufs(struct ci_tx_queue *txq)
 
 		if (nb_free >= IXGBE_TX_MAX_FREE_BUF_SZ ||
 		    (nb_free > 0 && m->pool != free[0]->pool)) {
-			rte_mempool_put_bulk(free[0]->pool,
-					     (void **)free, nb_free);
+			rte_mbuf_raw_free_bulk(free[0]->pool, free, nb_free);
 			nb_free = 0;
 		}
 
@@ -150,7 +149,7 @@ ixgbe_tx_free_bufs(struct ci_tx_queue *txq)
 	}
 
 	if (nb_free > 0)
-		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+		rte_mbuf_raw_free_bulk(free[0]->pool, free, nb_free);
 
 	/* buffers were freed, update counters */
 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
diff --git a/drivers/net/intel/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/intel/ixgbe/ixgbe_rxtx_vec_common.h
index e54f532497..7fb3c3dc24 100644
--- a/drivers/net/intel/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/intel/ixgbe/ixgbe_rxtx_vec_common.h
@@ -55,14 +55,13 @@ ixgbe_tx_free_bufs_vec(struct ci_tx_queue *txq)
 				if (likely(m->pool == free[0]->pool))
 					free[nb_free++] = m;
 				else {
-					rte_mempool_put_bulk(free[0]->pool,
-							(void *)free, nb_free);
+					rte_mbuf_raw_free_bulk(free[0]->pool, free, nb_free);
 					free[0] = m;
 					nb_free = 1;
 				}
 			}
 		}
-		rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+		rte_mbuf_raw_free_bulk(free[0]->pool, free, nb_free);
 	} else {
 		for (i = 1; i < n; i++) {
 			m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
-- 
2.48.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2025-08-22 17:07 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-08-22 17:06 [PATCH 0/2] avoid using mempool fns directly Bruce Richardson
2025-08-22 17:06 ` [PATCH 1/2] net/intel: avoid allocating from mempool directly Bruce Richardson
2025-08-22 17:06 ` [PATCH 2/2] net/intel: avoid accessing mempool directly on free Bruce Richardson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).