DPDK patches and discussions
 help / color / mirror / Atom feed
From: Feifei Wang <feifei.wang2@arm.com>
To: Yuying Zhang <Yuying.Zhang@intel.com>,
	Beilei Xing <beilei.xing@intel.com>,
	Ruifeng Wang <ruifeng.wang@arm.com>
Cc: dev@dpdk.org, nd@arm.com, Feifei Wang <feifei.wang2@arm.com>,
	Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,
	Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Subject: [RFC PATCH v1] net/i40e: put mempool cache out of API
Date: Mon, 13 Jun 2022 13:51:36 +0800	[thread overview]
Message-ID: <20220613055136.1949784-1-feifei.wang2@arm.com> (raw)

Refer to "i40e_tx_free_bufs_avx512", this patch puts mempool cache
out of API to free buffers directly. There are two changes different
with previous version:
1. change txep from "i40e_entry" to "i40e_vec_entry"
2. put cache out of "mempool_bulk" API to copy buffers into it directly

Performance Test with l3fwd neon path:
		with this patch
n1sdp:		no perforamnce change
amper-altra:	+4.0%

Suggested-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Suggested-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
---
 drivers/net/i40e/i40e_rxtx_vec_common.h | 36 ++++++++++++++++++++-----
 drivers/net/i40e/i40e_rxtx_vec_neon.c   | 10 ++++---
 2 files changed, 36 insertions(+), 10 deletions(-)

diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 959832ed6a..e418225b4e 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -81,7 +81,7 @@ reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
 static __rte_always_inline int
 i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 {
-	struct i40e_tx_entry *txep;
+	struct i40e_vec_tx_entry *txep;
 	uint32_t n;
 	uint32_t i;
 	int nb_free = 0;
@@ -98,17 +98,39 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 	 /* first buffer to free from S/W ring is at index
 	  * tx_next_dd - (tx_rs_thresh-1)
 	  */
-	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+	txep = (void *)txq->sw_ring;
+	txep += txq->tx_next_dd - (n - 1);
 
 	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
-		for (i = 0; i < n; i++) {
-			free[i] = txep[i].mbuf;
-			/* no need to reset txep[i].mbuf in vector path */
+		struct rte_mempool *mp = txep[0].mbuf->pool;
+		void **cache_objs;
+		struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
+				rte_lcore_id());
+
+		if (!cache || cache->len == 0)
+			goto normal;
+
+		cache_objs = &cache->objs[cache->len];
+
+		if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) {
+			rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n);
+			goto done;
+		}
+
+		rte_memcpy(cache_objs, txep, sizeof(void *) * n);
+		/* no need to reset txep[i].mbuf in vector path */
+		cache->len += n;
+
+		if (cache->len >= cache->flushthresh) {
+			rte_mempool_ops_enqueue_bulk
+				(mp, &cache->objs[cache->size],
+				cache->len - cache->size);
+			cache->len = cache->size;
 		}
-		rte_mempool_put_bulk(free[0]->pool, (void **)free, n);
 		goto done;
 	}
 
+normal:
 	m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
 	if (likely(m != NULL)) {
 		free[0] = m;
@@ -147,7 +169,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
 }
 
 static __rte_always_inline void
-tx_backlog_entry(struct i40e_tx_entry *txep,
+tx_backlog_entry(struct i40e_vec_tx_entry *txep,
 		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
 	int i;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index 12e6f1cbcb..d2d61e8ef4 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -680,12 +680,15 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
 {
 	struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
 	volatile struct i40e_tx_desc *txdp;
-	struct i40e_tx_entry *txep;
+	struct i40e_vec_tx_entry *txep;
 	uint16_t n, nb_commit, tx_id;
 	uint64_t flags = I40E_TD_CMD;
 	uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
 	int i;
 
+	/* cross rx_thresh boundary is not allowed */
+	nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
 	if (txq->nb_tx_free < txq->tx_free_thresh)
 		i40e_tx_free_bufs(txq);
 
@@ -695,7 +698,8 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
 
 	tx_id = txq->tx_tail;
 	txdp = &txq->tx_ring[tx_id];
-	txep = &txq->sw_ring[tx_id];
+	txep = (void *)txq->sw_ring;
+	txep += tx_id;
 
 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
 
@@ -715,7 +719,7 @@ i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
 
 		/* avoid reach the end of ring */
 		txdp = &txq->tx_ring[tx_id];
-		txep = &txq->sw_ring[tx_id];
+		txep = (void *)txq->sw_ring;
 	}
 
 	tx_backlog_entry(txep, tx_pkts, nb_commit);
-- 
2.25.1


             reply	other threads:[~2022-06-13  5:51 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-13  5:51 Feifei Wang [this message]
2022-06-13  9:58 ` Morten Brørup
2022-06-22 19:07   ` Honnappa Nagarahalli
2022-07-03 12:20 ` Konstantin Ananyev
2022-07-06  8:52   ` 回复: " Feifei Wang
2022-07-06 11:35     ` Feifei Wang
2022-07-11  3:08       ` Feifei Wang
2022-07-11  6:19   ` Honnappa Nagarahalli
2022-07-13 18:48     ` Konstantin Ananyev
2022-07-15 21:52       ` Honnappa Nagarahalli
2022-07-21 10:02         ` Konstantin Ananyev
2022-07-15 22:06       ` Honnappa Nagarahalli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220613055136.1949784-1-feifei.wang2@arm.com \
    --to=feifei.wang2@arm.com \
    --cc=Yuying.Zhang@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=honnappa.nagarahalli@arm.com \
    --cc=konstantin.v.ananyev@yandex.ru \
    --cc=nd@arm.com \
    --cc=ruifeng.wang@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).