From: Shaiq Wani <shaiq.wani@intel.com>
To: dev@dpdk.org, bruce.richardson@intel.com, aman.deep.singh@intel.com
Subject: [PATCH v2 4/4] net/idpf: use common Tx free fn in idpf
Date: Mon, 24 Mar 2025 18:10:01 +0530 [thread overview]
Message-ID: <20250324124001.1282624-5-shaiq.wani@intel.com> (raw)
In-Reply-To: <20250324124001.1282624-1-shaiq.wani@intel.com>
Switch the idpf driver to use the common Tx free function for
AVX2 and AVX512.
Signed-off-by: Shaiq Wani <shaiq.wani@intel.com>
---
.../net/intel/idpf/idpf_common_rxtx_avx2.c | 68 +----
.../net/intel/idpf/idpf_common_rxtx_avx512.c | 237 +-----------------
drivers/net/intel/idpf/idpf_rxtx_vec_common.h | 2 +-
3 files changed, 23 insertions(+), 284 deletions(-)
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c b/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
index bce0257804..6399f357d3 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
+++ b/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
@@ -79,6 +79,14 @@ idpf_singleq_rx_rearm(struct idpf_rx_queue *rxq)
IDPF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
}
+static inline int
+idpf_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
+{
+ return (txq->idpf_tx_ring[idx].qw1 &
+ rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) ==
+ rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);
+}
+
static inline uint16_t
_idpf_singleq_recv_raw_pkts_vec_avx2(struct idpf_rx_queue *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
@@ -479,64 +487,6 @@ idpf_dp_singleq_recv_pkts_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16
return _idpf_singleq_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts);
}
-static __rte_always_inline int
-idpf_singleq_tx_free_bufs_vec(struct ci_tx_queue *txq)
-{
- struct ci_tx_entry_vec *txep;
- uint32_t n;
- uint32_t i;
- int nb_free = 0;
- struct rte_mbuf *m;
- struct rte_mbuf **free = alloca(sizeof(struct rte_mbuf *) * txq->tx_rs_thresh);
-
- /* check DD bits on threshold descriptor */
- if ((txq->idpf_tx_ring[txq->tx_next_dd].qw1 &
- rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) !=
- rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE))
- return 0;
-
- n = txq->tx_rs_thresh;
-
- /* first buffer to free from S/W ring is at index
- * tx_next_dd - (tx_rs_thresh-1)
- */
- txep = &txq->sw_ring_vec[txq->tx_next_dd - (n - 1)];
- m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
- if (likely(m)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < n; i++) {
- m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (likely(m)) {
- if (likely(m->pool == free[0]->pool)) {
- free[nb_free++] = m;
- } else {
- rte_mempool_put_bulk(free[0]->pool,
- (void *)free,
- nb_free);
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
- } else {
- for (i = 1; i < n; i++) {
- m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (m)
- rte_mempool_put(m->pool, m);
- }
- }
-
- /* buffers were freed, update counters */
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
- txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
- if (txq->tx_next_dd >= txq->nb_tx_desc)
- txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
-
- return txq->tx_rs_thresh;
-}
-
static inline void
idpf_singleq_vtx1(volatile struct idpf_base_tx_desc *txdp,
struct rte_mbuf *pkt, uint64_t flags)
@@ -621,7 +571,7 @@ idpf_singleq_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts
nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
if (txq->nb_tx_free < txq->tx_free_thresh)
- idpf_singleq_tx_free_bufs_vec(txq);
+ ci_tx_free_bufs_vec(txq, idpf_tx_desc_done, false);
nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
if (unlikely(nb_pkts == 0))
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c b/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c
index 715be52046..cb83dd3601 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c
+++ b/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c
@@ -121,6 +121,14 @@ idpf_singleq_rearm_common(struct idpf_rx_queue *rxq)
IDPF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
}
+static inline int
+idpf_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
+{
+ return (txq->idpf_tx_ring[idx].qw1 &
+ rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) ==
+ rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);
+}
+
static __rte_always_inline void
idpf_singleq_rearm(struct idpf_rx_queue *rxq)
{
@@ -995,122 +1003,6 @@ idpf_dp_splitq_recv_pkts_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_pkts);
}
-static __rte_always_inline int
-idpf_tx_singleq_free_bufs_avx512(struct ci_tx_queue *txq)
-{
- struct ci_tx_entry_vec *txep;
- uint32_t n;
- uint32_t i;
- int nb_free = 0;
- struct rte_mbuf *m;
- struct rte_mbuf **free = alloca(sizeof(struct rte_mbuf *) * txq->tx_rs_thresh);
-
- /* check DD bits on threshold descriptor */
- if ((txq->idpf_tx_ring[txq->tx_next_dd].qw1 &
- rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) !=
- rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE))
- return 0;
-
- n = txq->tx_rs_thresh;
-
- /* first buffer to free from S/W ring is at index
- * tx_next_dd - (tx_rs_thresh-1)
- */
- txep = (void *)txq->sw_ring;
- txep += txq->tx_next_dd - (n - 1);
-
- if (txq->offloads & IDPF_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
- struct rte_mempool *mp = txep[0].mbuf->pool;
- struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
- rte_lcore_id());
- void **cache_objs;
-
- if (cache == NULL || cache->len == 0)
- goto normal;
-
- cache_objs = &cache->objs[cache->len];
-
- if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
- rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
- goto done;
- }
-
- /* The cache follows the following algorithm
- * 1. Add the objects to the cache
- * 2. Anything greater than the cache min value (if it crosses the
- * cache flush threshold) is flushed to the ring.
- */
- /* Add elements back into the cache */
- uint32_t copied = 0;
- /* n is multiple of 32 */
- while (copied < n) {
-#ifdef RTE_ARCH_64
- const __m512i a = _mm512_loadu_si512(&txep[copied]);
- const __m512i b = _mm512_loadu_si512(&txep[copied + 8]);
- const __m512i c = _mm512_loadu_si512(&txep[copied + 16]);
- const __m512i d = _mm512_loadu_si512(&txep[copied + 24]);
-
- _mm512_storeu_si512(&cache_objs[copied], a);
- _mm512_storeu_si512(&cache_objs[copied + 8], b);
- _mm512_storeu_si512(&cache_objs[copied + 16], c);
- _mm512_storeu_si512(&cache_objs[copied + 24], d);
-#else
- const __m512i a = _mm512_loadu_si512(&txep[copied]);
- const __m512i b = _mm512_loadu_si512(&txep[copied + 16]);
- _mm512_storeu_si512(&cache_objs[copied], a);
- _mm512_storeu_si512(&cache_objs[copied + 16], b);
-#endif
- copied += 32;
- }
- cache->len += n;
-
- if (cache->len >= cache->flushthresh) {
- rte_mempool_ops_enqueue_bulk(mp,
- &cache->objs[cache->size],
- cache->len - cache->size);
- cache->len = cache->size;
- }
- goto done;
- }
-
-normal:
- m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
- if (likely(m != NULL)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < n; i++) {
- m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (likely(m != NULL)) {
- if (likely(m->pool == free[0]->pool)) {
- free[nb_free++] = m;
- } else {
- rte_mempool_put_bulk(free[0]->pool,
- (void *)free,
- nb_free);
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
- } else {
- for (i = 1; i < n; i++) {
- m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (m != NULL)
- rte_mempool_put(m->pool, m);
- }
- }
-
-done:
- /* buffers were freed, update counters */
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
- txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
- if (txq->tx_next_dd >= txq->nb_tx_desc)
- txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
-
- return txq->tx_rs_thresh;
-}
-
static __rte_always_inline void
idpf_singleq_vtx1(volatile struct idpf_base_tx_desc *txdp,
struct rte_mbuf *pkt, uint64_t flags)
@@ -1194,7 +1086,7 @@ idpf_singleq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pk
nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
if (txq->nb_tx_free < txq->tx_free_thresh)
- idpf_tx_singleq_free_bufs_avx512(txq);
+ ci_tx_free_bufs_vec(txq, idpf_tx_desc_done, false);
nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
nb_commit = nb_pkts;
@@ -1310,110 +1202,6 @@ idpf_splitq_scan_cq_ring(struct ci_tx_queue *cq)
cq->tx_tail = cq_qid;
}
-static __rte_always_inline int
-idpf_tx_splitq_free_bufs_avx512(struct ci_tx_queue *txq)
-{
- struct ci_tx_entry_vec *txep;
- uint32_t n;
- uint32_t i;
- int nb_free = 0;
- struct rte_mbuf *m;
- struct rte_mbuf **free = alloca(sizeof(struct rte_mbuf *) * txq->tx_rs_thresh);
-
- n = txq->tx_rs_thresh;
-
- /* first buffer to free from S/W ring is at index
- * tx_next_dd - (tx_rs_thresh-1)
- */
- txep = (void *)txq->sw_ring;
- txep += txq->tx_next_dd - (n - 1);
-
- if (txq->offloads & IDPF_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
- struct rte_mempool *mp = txep[0].mbuf->pool;
- struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
- rte_lcore_id());
- void **cache_objs;
-
- if (!cache || cache->len == 0)
- goto normal;
-
- cache_objs = &cache->objs[cache->len];
-
- if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
- rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
- goto done;
- }
-
- /* The cache follows the following algorithm
- * 1. Add the objects to the cache
- * 2. Anything greater than the cache min value (if it crosses the
- * cache flush threshold) is flushed to the ring.
- */
- /* Add elements back into the cache */
- uint32_t copied = 0;
- /* n is multiple of 32 */
- while (copied < n) {
- const __m512i a = _mm512_loadu_si512(&txep[copied]);
- const __m512i b = _mm512_loadu_si512(&txep[copied + 8]);
- const __m512i c = _mm512_loadu_si512(&txep[copied + 16]);
- const __m512i d = _mm512_loadu_si512(&txep[copied + 24]);
-
- _mm512_storeu_si512(&cache_objs[copied], a);
- _mm512_storeu_si512(&cache_objs[copied + 8], b);
- _mm512_storeu_si512(&cache_objs[copied + 16], c);
- _mm512_storeu_si512(&cache_objs[copied + 24], d);
- copied += 32;
- }
- cache->len += n;
-
- if (cache->len >= cache->flushthresh) {
- rte_mempool_ops_enqueue_bulk(mp,
- &cache->objs[cache->size],
- cache->len - cache->size);
- cache->len = cache->size;
- }
- goto done;
- }
-
-normal:
- m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
- if (likely(m)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < n; i++) {
- m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (likely(m)) {
- if (likely(m->pool == free[0]->pool)) {
- free[nb_free++] = m;
- } else {
- rte_mempool_put_bulk(free[0]->pool,
- (void *)free,
- nb_free);
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
- } else {
- for (i = 1; i < n; i++) {
- m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (m)
- rte_mempool_put(m->pool, m);
- }
- }
-
-done:
- /* buffers were freed, update counters */
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
- txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
- if (txq->tx_next_dd >= txq->nb_tx_desc)
- txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
- txq->ctype[IDPF_TXD_COMPLT_RS] -= txq->tx_rs_thresh;
-
- return txq->tx_rs_thresh;
-}
-
#define IDPF_TXD_FLEX_QW1_TX_BUF_SZ_S 48
static __rte_always_inline void
@@ -1555,11 +1343,12 @@ idpf_splitq_xmit_pkts_vec_avx512_cmn(void *tx_queue, struct rte_mbuf **tx_pkts,
while (nb_pkts) {
uint16_t ret, num;
-
idpf_splitq_scan_cq_ring(txq->complq);
- if (txq->ctype[IDPF_TXD_COMPLT_RS] > txq->tx_free_thresh)
- idpf_tx_splitq_free_bufs_avx512(txq);
+ if (txq->ctype[IDPF_TXD_COMPLT_RS] > txq->tx_free_thresh) {
+ ci_tx_free_bufs_vec(txq, idpf_tx_desc_done, false);
+ txq->ctype[IDPF_TXD_COMPLT_RS] -= txq->tx_rs_thresh;
+ }
num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
ret = idpf_splitq_xmit_fixed_burst_vec_avx512(tx_queue,
diff --git a/drivers/net/intel/idpf/idpf_rxtx_vec_common.h b/drivers/net/intel/idpf/idpf_rxtx_vec_common.h
index f97a9a6fce..e444addf85 100644
--- a/drivers/net/intel/idpf/idpf_rxtx_vec_common.h
+++ b/drivers/net/intel/idpf/idpf_rxtx_vec_common.h
@@ -10,7 +10,7 @@
#include "idpf_ethdev.h"
#include "idpf_rxtx.h"
-#include "../common/tx.h"
+#include "../common/rx.h"
#define IDPF_SCALAR_PATH 0
#define IDPF_VECTOR_PATH 1
--
2.34.1
prev parent reply other threads:[~2025-03-24 12:39 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-12 15:53 [PATCH] net/intel: using common functions in idpf driver Shaiq Wani
2025-03-12 16:38 ` Bruce Richardson
2025-03-24 12:39 ` [PATCH v2 0/4] Use common structures and fns in IDPF and Shaiq Wani
2025-03-24 12:39 ` [PATCH v2 1/4] net/intel: use common Tx queue structure Shaiq Wani
2025-03-24 12:49 ` [PATCH v3 0/4] using common functions in idpf driver Shaiq Wani
2025-03-24 12:49 ` [PATCH v3 1/4] net/intel: use common Tx queue structure Shaiq Wani
2025-03-27 10:44 ` [PATCH v4 0/4] net/intel: using common functions in idpf driver Shaiq Wani
2025-03-27 10:44 ` [PATCH v4 1/4] net/intel: align Tx queue struct field names Shaiq Wani
2025-03-27 16:04 ` [PATCH v5 0/4] net/intel: using common functions in idpf driver Shaiq Wani
2025-03-27 16:04 ` [PATCH v5 1/4] net/intel: align Tx queue struct field names Shaiq Wani
2025-03-28 16:57 ` Bruce Richardson
2025-03-27 16:04 ` [PATCH v5 2/4] net/intel: use common Tx queue structure Shaiq Wani
2025-03-28 17:22 ` Bruce Richardson
2025-03-28 17:55 ` Bruce Richardson
2025-03-27 16:04 ` [PATCH v5 3/4] net/intel: use common Tx entry structure Shaiq Wani
2025-03-28 17:17 ` Bruce Richardson
2025-03-27 16:04 ` [PATCH v5 4/4] net/idpf: use common Tx free fn in idpf Shaiq Wani
2025-03-28 17:25 ` Bruce Richardson
2025-03-28 15:29 ` [PATCH v5 0/4] net/intel: using common functions in idpf driver Bruce Richardson
2025-03-28 15:36 ` David Marchand
2025-03-28 17:58 ` Bruce Richardson
2025-03-27 10:45 ` [PATCH v4 2/4] net/intel: use common Tx queue structure Shaiq Wani
2025-03-27 10:45 ` [PATCH v4 3/4] net/intel: use common Tx entry structure Shaiq Wani
2025-03-27 10:45 ` [PATCH v4 4/4] net/idpf: use common Tx free fn in idpf Shaiq Wani
2025-03-24 12:49 ` [PATCH v3 2/4] net/intel: align Tx queue struct field names Shaiq Wani
2025-03-24 13:16 ` Bruce Richardson
2025-03-24 12:49 ` [PATCH v3 3/4] net/intel: use common Tx entry structure Shaiq Wani
2025-03-24 12:49 ` [PATCH v3 4/4] net/idpf: use common Tx free fn in idpf Shaiq Wani
2025-03-24 12:39 ` [PATCH v2 2/4] net/intel: align Tx queue struct field names Shaiq Wani
2025-03-24 12:40 ` [PATCH v2 3/4] net/intel: use common Tx entry structure Shaiq Wani
2025-03-24 12:40 ` Shaiq Wani [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250324124001.1282624-5-shaiq.wani@intel.com \
--to=shaiq.wani@intel.com \
--cc=aman.deep.singh@intel.com \
--cc=bruce.richardson@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).