DPDK patches and discussions
 help / color / mirror / Atom feed
From: Feifei Wang <feifei.wang2@arm.com>
To: Yuying Zhang <Yuying.Zhang@intel.com>,
	Beilei Xing <beilei.xing@intel.com>
Cc: dev@dpdk.org, mb@smartsharesystems.com,
	konstantin.v.ananyev@yandex.ru, nd@arm.com,
	Feifei Wang <feifei.wang2@arm.com>,
	Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,
	Ruifeng Wang <ruifeng.wang@arm.com>
Subject: [PATCH v4 2/3] net/i40e: implement recycle buffer mode
Date: Thu, 23 Mar 2023 18:43:29 +0800	[thread overview]
Message-ID: <20230323104330.3823251-3-feifei.wang2@arm.com> (raw)
In-Reply-To: <20230323104330.3823251-1-feifei.wang2@arm.com>

Define specific function implementation for i40e driver.
Currently, recycle buffer mode can support 128bit
vector path. And can be enabled both in fast free and
no fast free mode.

Suggested-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
---
 drivers/net/i40e/i40e_ethdev.c          |   1 +
 drivers/net/i40e/i40e_ethdev.h          |   2 +
 drivers/net/i40e/i40e_rxtx.c            |  24 +++++
 drivers/net/i40e/i40e_rxtx.h            |   4 +
 drivers/net/i40e/i40e_rxtx_vec_common.h | 128 ++++++++++++++++++++++++
 5 files changed, 159 insertions(+)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index cb0070f94b..456fb256f5 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -496,6 +496,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
 	.flow_ops_get                 = i40e_dev_flow_ops_get,
 	.rxq_info_get                 = i40e_rxq_info_get,
 	.txq_info_get                 = i40e_txq_info_get,
+	.rxq_buf_recycle_info_get     = i40e_rxq_buf_recycle_info_get,
 	.rx_burst_mode_get            = i40e_rx_burst_mode_get,
 	.tx_burst_mode_get            = i40e_tx_burst_mode_get,
 	.timesync_enable              = i40e_timesync_enable,
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 9b806d130e..83c5ff5859 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -1355,6 +1355,8 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	struct rte_eth_rxq_info *qinfo);
 void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	struct rte_eth_txq_info *qinfo);
+void i40e_rxq_buf_recycle_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info);
 int i40e_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
 			   struct rte_eth_burst_mode *mode);
 int i40e_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 788ffb51c2..479964c6c4 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -3197,6 +3197,28 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.offloads = txq->offloads;
 }
 
+void
+i40e_rxq_buf_recycle_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info)
+{
+	struct i40e_rx_queue *rxq;
+
+	rxq = dev->data->rx_queues[queue_id];
+
+	rxq_buf_recycle_info->buf_ring = (void *)rxq->sw_ring;
+	rxq_buf_recycle_info->mp = rxq->mp;
+	rxq_buf_recycle_info->buf_ring_size = rxq->nb_rx_desc;
+	rxq_buf_recycle_info->refill_request = RTE_I40E_RXQ_REARM_THRESH;
+
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+	rxq_buf_recycle_info->refill_head = &rxq->rxrearm_start + 0xF;
+	rxq_buf_recycle_info->receive_tail = &rxq->rx_tail + 0xF;
+#else
+	rxq_buf_recycle_info->refill_head = &rxq->rxrearm_start;
+	rxq_buf_recycle_info->receive_tail = &rxq->rx_tail;
+#endif
+}
+
 #ifdef RTE_ARCH_X86
 static inline bool
 get_avx_supported(bool request_avx512)
@@ -3273,6 +3295,7 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
 
 	if (ad->rx_vec_allowed  &&
 	    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+		dev->rx_descriptors_refill = i40e_rx_descriptors_refill_vec;
 #ifdef RTE_ARCH_X86
 		if (dev->data->scattered_rx) {
 			if (ad->rx_use_avx512) {
@@ -3465,6 +3488,7 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
 	if (ad->tx_simple_allowed) {
 		if (ad->tx_vec_allowed &&
 		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+			dev->tx_buf_stash = i40e_tx_buf_stash_vec;
 #ifdef RTE_ARCH_X86
 			if (ad->tx_use_avx512) {
 #ifdef CC_AVX512_SUPPORT
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 5e6eecc501..0ad8f530f9 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -233,6 +233,10 @@ uint32_t i40e_dev_rx_queue_count(void *rx_queue);
 int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
 
+uint16_t i40e_tx_buf_stash_vec(void *tx_queue,
+		struct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info);
+uint16_t i40e_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb);
+
 uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 			    uint16_t nb_pkts);
 uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index fe1a6ec75e..068ce694f2 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -156,6 +156,134 @@ tx_backlog_entry(struct i40e_tx_entry *txep,
 		txep[i].mbuf = tx_pkts[i];
 }
 
+uint16_t
+i40e_tx_buf_stash_vec(void *tx_queue,
+	struct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info)
+{
+	struct i40e_tx_queue *txq = tx_queue;
+	struct i40e_tx_entry *txep;
+	struct rte_mbuf **rxep;
+	struct rte_mbuf *m[RTE_I40E_TX_MAX_FREE_BUF_SZ];
+	int i, j, n;
+	uint16_t avail = 0;
+	uint16_t buf_ring_size = rxq_buf_recycle_info->buf_ring_size;
+	uint16_t mask = rxq_buf_recycle_info->buf_ring_size - 1;
+	uint16_t refill_request = rxq_buf_recycle_info->refill_request;
+	uint16_t refill_head = *rxq_buf_recycle_info->refill_head;
+	uint16_t receive_tail = *rxq_buf_recycle_info->receive_tail;
+
+	/* Get available recycling Rx buffers. */
+	avail = (buf_ring_size - (refill_head - receive_tail)) & mask;
+
+	/* Check Tx free thresh and Rx available space. */
+	if (txq->nb_tx_free > txq->tx_free_thresh || avail <= txq->tx_rs_thresh)
+		return 0;
+
+	/* check DD bits on threshold descriptor */
+	if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+				rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+			rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+		return 0;
+
+	n = txq->tx_rs_thresh;
+
+	/* Buffer recycle can only support no ring buffer wraparound.
+	 * Two case for this:
+	 *
+	 * case 1: The refill head of Rx buffer ring needs to be aligned with
+	 * buffer ring size. In this case, the number of Tx freeing buffers
+	 * should be equal to refill_request.
+	 *
+	 * case 2: The refill head of Rx ring buffer does not need to be aligned
+	 * with buffer ring size. In this case, the update of refill head can not
+	 * exceed the Rx buffer ring size.
+	 */
+	if (refill_request != n ||
+		(!refill_request && (refill_head + n > buf_ring_size)))
+		return 0;
+
+	/* First buffer to free from S/W ring is at index
+	 * tx_next_dd - (tx_rs_thresh-1).
+	 */
+	txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+	rxep = rxq_buf_recycle_info->buf_ring;
+	rxep += refill_head;
+
+	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
+		/* Directly put mbufs from Tx to Rx. */
+		for (i = 0; i < n; i++, rxep++, txep++)
+			*rxep = txep[0].mbuf;
+	} else {
+		for (i = 0, j = 0; i < n; i++) {
+			/* Avoid txq contains buffers from expected mempoo. */
+			if (unlikely(rxq_buf_recycle_info->mp
+						!= txep[i].mbuf->pool))
+				return 0;
+
+			m[j] = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+
+			/* In case 1, each of Tx buffers should be the
+			 * last reference.
+			 */
+			if (unlikely(m[j] == NULL && refill_request))
+				return 0;
+			/* In case 2, the number of valid Tx free
+			 * buffers should be recorded.
+			 */
+			j++;
+		}
+		rte_memcpy(rxep, m, sizeof(void *) * j);
+	}
+
+	/* Update counters for Tx. */
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+	if (txq->tx_next_dd >= txq->nb_tx_desc)
+		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+	return n;
+}
+
+uint16_t
+i40e_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb)
+{
+	struct i40e_rx_queue *rxq = rx_queue;
+	struct i40e_rx_entry *rxep;
+	volatile union i40e_rx_desc *rxdp;
+	uint16_t rx_id;
+	uint64_t paddr;
+	uint64_t dma_addr;
+	uint16_t i;
+
+	rxdp = rxq->rx_ring + rxq->rxrearm_start;
+	rxep = &rxq->sw_ring[rxq->rxrearm_start];
+
+	for (i = 0; i < nb; i++) {
+		/* Initialize rxdp descs. */
+		paddr = (rxep[i].mbuf)->buf_iova + RTE_PKTMBUF_HEADROOM;
+		dma_addr = rte_cpu_to_le_64(paddr);
+		/* flush desc with pa dma_addr */
+		rxdp[i].read.hdr_addr = 0;
+		rxdp[i].read.pkt_addr = dma_addr;
+	}
+
+	/* Update the descriptor initializer index */
+	rxq->rxrearm_start += nb;
+	if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+		rxq->rxrearm_start = 0;
+
+	rxq->rxrearm_nb -= nb;
+
+	rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+			(rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+	rte_io_wmb();
+	/* Update the tail pointer on the NIC */
+	I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rx_id);
+
+	return nb;
+}
+
 static inline void
 _i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
 {
-- 
2.25.1


  parent reply	other threads:[~2023-03-23 10:43 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-12-24 16:46 [RFC PATCH v1 0/4] Direct re-arming of buffers on receive side Feifei Wang
2021-12-24 16:46 ` [RFC PATCH v1 1/4] net/i40e: enable direct re-arm mode Feifei Wang
2021-12-24 16:46 ` [RFC PATCH v1 2/4] ethdev: add API for " Feifei Wang
2021-12-24 19:38   ` Stephen Hemminger
2021-12-26  9:49     ` 回复: " Feifei Wang
2021-12-26 10:31       ` Morten Brørup
2021-12-24 16:46 ` [RFC PATCH v1 3/4] net/i40e: add direct re-arm mode internal API Feifei Wang
2021-12-24 16:46 ` [RFC PATCH v1 4/4] examples/l3fwd: give an example for direct rearm mode Feifei Wang
2021-12-26 10:25 ` [RFC PATCH v1 0/4] Direct re-arming of buffers on receive side Morten Brørup
2021-12-28  6:55   ` 回复: " Feifei Wang
2022-01-18 15:51     ` Ferruh Yigit
2022-01-18 16:53       ` Thomas Monjalon
2022-01-18 17:27         ` Morten Brørup
2022-01-27  5:24           ` Honnappa Nagarahalli
2022-01-27 16:45             ` Ananyev, Konstantin
2022-02-02 19:46               ` Honnappa Nagarahalli
2022-01-27  5:16         ` Honnappa Nagarahalli
2023-02-28  6:43       ` 回复: " Feifei Wang
2023-02-28  6:52         ` Feifei Wang
2022-01-27  4:06   ` Honnappa Nagarahalli
2022-01-27 17:13     ` Morten Brørup
2022-01-28 11:29     ` Morten Brørup
2023-03-23 10:43 ` [PATCH v4 0/3] Recycle buffers from Tx to Rx Feifei Wang
2023-03-23 10:43   ` [PATCH v4 1/3] ethdev: add API for buffer recycle mode Feifei Wang
2023-03-23 11:41     ` Morten Brørup
2023-03-29  2:16       ` Feifei Wang
2023-03-23 10:43   ` Feifei Wang [this message]
2023-03-23 10:43   ` [PATCH v4 3/3] net/ixgbe: implement recycle buffer mode Feifei Wang
2023-03-30  6:29 ` [PATCH v5 0/3] Recycle buffers from Tx to Rx Feifei Wang
2023-03-30  6:29   ` [PATCH v5 1/3] ethdev: add API for buffer recycle mode Feifei Wang
2023-03-30  7:19     ` Morten Brørup
2023-03-30  9:31       ` Feifei Wang
2023-03-30 15:15         ` Morten Brørup
2023-03-30 15:58         ` Morten Brørup
2023-04-26  6:59           ` Feifei Wang
2023-04-19 14:46     ` Ferruh Yigit
2023-04-26  7:29       ` Feifei Wang
2023-03-30  6:29   ` [PATCH v5 2/3] net/i40e: implement recycle buffer mode Feifei Wang
2023-03-30  6:29   ` [PATCH v5 3/3] net/ixgbe: " Feifei Wang
2023-04-19 14:46     ` Ferruh Yigit
2023-04-26  7:36       ` Feifei Wang
2023-03-30 15:04   ` [PATCH v5 0/3] Recycle buffers from Tx to Rx Stephen Hemminger
2023-04-03  2:48     ` Feifei Wang
2023-04-19 14:56   ` Ferruh Yigit
2023-04-25  7:57     ` Feifei Wang
2023-05-25  9:45 ` [PATCH v6 0/4] Recycle mbufs from Tx queue to Rx queue Feifei Wang
2023-05-25  9:45   ` [PATCH v6 1/4] ethdev: add API for mbufs recycle mode Feifei Wang
2023-05-25 15:08     ` Morten Brørup
2023-05-31  6:10       ` Feifei Wang
2023-06-05 12:53     ` Константин Ананьев
2023-06-06  2:55       ` Feifei Wang
2023-06-06  7:10         ` Konstantin Ananyev
2023-06-06  7:31           ` Feifei Wang
2023-06-06  8:34             ` Konstantin Ananyev
2023-06-07  0:00               ` Ferruh Yigit
2023-06-12  3:25                 ` Feifei Wang
2023-05-25  9:45   ` [PATCH v6 2/4] net/i40e: implement " Feifei Wang
2023-06-05 13:02     ` Константин Ананьев
2023-06-06  3:16       ` Feifei Wang
2023-06-06  7:18         ` Konstantin Ananyev
2023-06-06  7:58           ` Feifei Wang
2023-06-06  8:27             ` Konstantin Ananyev
2023-06-12  3:05               ` Feifei Wang
2023-05-25  9:45   ` [PATCH v6 3/4] net/ixgbe: " Feifei Wang
2023-05-25  9:45   ` [PATCH v6 4/4] app/testpmd: add recycle mbufs engine Feifei Wang
2023-06-05 13:08     ` Константин Ананьев
2023-06-06  6:32       ` Feifei Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230323104330.3823251-3-feifei.wang2@arm.com \
    --to=feifei.wang2@arm.com \
    --cc=Yuying.Zhang@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=honnappa.nagarahalli@arm.com \
    --cc=konstantin.v.ananyev@yandex.ru \
    --cc=mb@smartsharesystems.com \
    --cc=nd@arm.com \
    --cc=ruifeng.wang@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).