From: Feifei Wang <feifei.wang2@arm.com>
To: Qiming Yang <qiming.yang@intel.com>, Wenjun Wu <wenjun1.wu@intel.com>
Cc: dev@dpdk.org, mb@smartsharesystems.com,
konstantin.v.ananyev@yandex.ru, nd@arm.com,
Feifei Wang <feifei.wang2@arm.com>,
Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>,
Ruifeng Wang <ruifeng.wang@arm.com>
Subject: [PATCH v4 3/3] net/ixgbe: implement recycle buffer mode
Date: Thu, 23 Mar 2023 18:43:30 +0800 [thread overview]
Message-ID: <20230323104330.3823251-4-feifei.wang2@arm.com> (raw)
In-Reply-To: <20230323104330.3823251-1-feifei.wang2@arm.com>
Define specific function implementation for ixgbe driver.
Currently, recycle buffer mode can support 128bit
vector path. And can be enabled both in fast free and
no fast free mode.
Suggested-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
---
drivers/net/ixgbe/ixgbe_ethdev.c | 1 +
drivers/net/ixgbe/ixgbe_ethdev.h | 3 +
drivers/net/ixgbe/ixgbe_rxtx.c | 25 +++++
drivers/net/ixgbe/ixgbe_rxtx.h | 4 +
drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 127 ++++++++++++++++++++++
5 files changed, 160 insertions(+)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 88118bc305..3bada9abbd 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -543,6 +543,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
.rxq_info_get = ixgbe_rxq_info_get,
.txq_info_get = ixgbe_txq_info_get,
+ .rxq_buf_recycle_info_get = ixgbe_rxq_buf_recycle_info_get,
.timesync_enable = ixgbe_timesync_enable,
.timesync_disable = ixgbe_timesync_disable,
.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 48290af512..ca6aa0da64 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -625,6 +625,9 @@ void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+void ixgbe_rxq_buf_recycle_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info);
+
int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);
void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index c9d6ca9efe..ad276cbf33 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2558,6 +2558,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
ixgbe_txq_vec_setup(txq) == 0)) {
PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
+ dev->tx_buf_stash = ixgbe_tx_buf_stash_vec;
dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
} else
dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
@@ -4852,6 +4853,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
RTE_IXGBE_DESCS_PER_LOOP,
dev->data->port_id);
+ dev->rx_descriptors_refill = ixgbe_rx_descriptors_refill_vec;
dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
} else if (adapter->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -5623,6 +5625,29 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
+void
+ixgbe_rxq_buf_recycle_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info)
+{
+ struct ixgbe_rx_queue *rxq;
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ rxq_buf_recycle_info->buf_ring = (void *)rxq->sw_ring;
+ rxq_buf_recycle_info->mp = rxq->mb_pool;
+ rxq_buf_recycle_info->buf_ring_size = rxq->nb_rx_desc;
+ rxq_buf_recycle_info->receive_tail = &rxq->rx_tail;
+
+ if (adapter->rx_vec_allowed) {
+ rxq_buf_recycle_info->refill_request = RTE_IXGBE_RXQ_REARM_THRESH;
+ rxq_buf_recycle_info->refill_head = &rxq->rxrearm_start;
+ } else {
+ rxq_buf_recycle_info->refill_request = rxq->rx_free_thresh;
+ rxq_buf_recycle_info->refill_head = &rxq->rx_free_trigger;
+ }
+}
+
/*
* [VF] Initializes Receive Unit.
*/
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 668a5b9814..18f890f91a 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -295,6 +295,10 @@ int ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt);
extern const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX];
extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];
+uint16_t ixgbe_tx_buf_stash_vec(void *tx_queue,
+ struct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info);
+uint16_t ixgbe_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb);
+
uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index a4d9ec9b08..e66a4a2d5b 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -139,6 +139,133 @@ tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
txep[i].mbuf = tx_pkts[i];
}
+uint16_t
+ixgbe_tx_buf_stash_vec(void *tx_queue,
+ struct rte_eth_rxq_buf_recycle_info *rxq_buf_recycle_info)
+{
+ struct ixgbe_tx_queue *txq = tx_queue;
+ struct ixgbe_tx_entry *txep;
+ struct rte_mbuf **rxep;
+ struct rte_mbuf *m[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
+ int i, j, n;
+ uint32_t status;
+ uint16_t avail = 0;
+ uint16_t buf_ring_size = rxq_buf_recycle_info->buf_ring_size;
+ uint16_t mask = rxq_buf_recycle_info->buf_ring_size - 1;
+ uint16_t refill_request = rxq_buf_recycle_info->refill_request;
+ uint16_t refill_head = *rxq_buf_recycle_info->refill_head;
+ uint16_t receive_tail = *rxq_buf_recycle_info->receive_tail;
+
+ /* Get available recycling Rx buffers. */
+ avail = (buf_ring_size - (refill_head - receive_tail)) & mask;
+
+ /* Check Tx free thresh and Rx available space. */
+ if (txq->nb_tx_free > txq->tx_free_thresh || avail <= txq->tx_rs_thresh)
+ return 0;
+
+ /* check DD bits on threshold descriptor */
+ status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ if (!(status & IXGBE_ADVTXD_STAT_DD))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /* Buffer recycle can only support no ring buffer wraparound.
+ * Two case for this:
+ *
+ * case 1: The refill head of Rx buffer ring needs to be aligned with
+ * buffer ring size. In this case, the number of Tx freeing buffers
+ * should be equal to refill_request.
+ *
+ * case 2: The refill head of Rx ring buffer does not need to be aligned
+ * with buffer ring size. In this case, the update of refill head can not
+ * exceed the Rx buffer ring size.
+ */
+ if (refill_request != n ||
+ (!refill_request && (refill_head + n > buf_ring_size)))
+ return 0;
+
+ /* First buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1).
+ */
+ txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+ rxep = rxq_buf_recycle_info->buf_ring;
+ rxep += refill_head;
+
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
+ /* Directly put mbufs from Tx to Rx. */
+ for (i = 0; i < n; i++, rxep++, txep++)
+ *rxep = txep[0].mbuf;
+ } else {
+ for (i = 0, j = 0; i < n; i++) {
+ /* Avoid txq contains buffers from expected mempoo. */
+ if (unlikely(rxq_buf_recycle_info->mp
+ != txep[i].mbuf->pool))
+ return 0;
+
+ m[j] = rte_pktmbuf_prefree_seg(txep[i].mbuf);
+
+ /* In case 1, each of Tx buffers should be the
+ * last reference.
+ */
+ if (unlikely(m[j] == NULL && refill_request))
+ return 0;
+ /* In case 2, the number of valid Tx free
+ * buffers should be recorded.
+ */
+ j++;
+ }
+ rte_memcpy(rxep, m, sizeof(void *) * j);
+ }
+
+ /* Update counters for Tx. */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return n;
+}
+
+uint16_t
+ixgbe_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb)
+{
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ struct ixgbe_rx_entry *rxep;
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ uint16_t rx_id;
+ uint64_t paddr;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+ rxep = &rxq->sw_ring[rxq->rxrearm_start];
+
+ for (i = 0; i < nb; i++) {
+ /* Initialize rxdp descs. */
+ paddr = (rxep[i].mbuf)->buf_iova + RTE_PKTMBUF_HEADROOM;
+ dma_addr = rte_cpu_to_le_64(paddr);
+ /* flush desc with pa dma_addr */
+ rxdp[i].read.hdr_addr = 0;
+ rxdp[i].read.pkt_addr = dma_addr;
+ }
+
+ /* Update the descriptor initializer index */
+ rxq->rxrearm_start += nb;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= nb;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+
+ return nb;
+}
+
static inline void
_ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
{
--
2.25.1
next prev parent reply other threads:[~2023-03-23 10:43 UTC|newest]
Thread overview: 67+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-12-24 16:46 [RFC PATCH v1 0/4] Direct re-arming of buffers on receive side Feifei Wang
2021-12-24 16:46 ` [RFC PATCH v1 1/4] net/i40e: enable direct re-arm mode Feifei Wang
2021-12-24 16:46 ` [RFC PATCH v1 2/4] ethdev: add API for " Feifei Wang
2021-12-24 19:38 ` Stephen Hemminger
2021-12-26 9:49 ` 回复: " Feifei Wang
2021-12-26 10:31 ` Morten Brørup
2021-12-24 16:46 ` [RFC PATCH v1 3/4] net/i40e: add direct re-arm mode internal API Feifei Wang
2021-12-24 16:46 ` [RFC PATCH v1 4/4] examples/l3fwd: give an example for direct rearm mode Feifei Wang
2021-12-26 10:25 ` [RFC PATCH v1 0/4] Direct re-arming of buffers on receive side Morten Brørup
2021-12-28 6:55 ` 回复: " Feifei Wang
2022-01-18 15:51 ` Ferruh Yigit
2022-01-18 16:53 ` Thomas Monjalon
2022-01-18 17:27 ` Morten Brørup
2022-01-27 5:24 ` Honnappa Nagarahalli
2022-01-27 16:45 ` Ananyev, Konstantin
2022-02-02 19:46 ` Honnappa Nagarahalli
2022-01-27 5:16 ` Honnappa Nagarahalli
2023-02-28 6:43 ` 回复: " Feifei Wang
2023-02-28 6:52 ` Feifei Wang
2022-01-27 4:06 ` Honnappa Nagarahalli
2022-01-27 17:13 ` Morten Brørup
2022-01-28 11:29 ` Morten Brørup
2023-03-23 10:43 ` [PATCH v4 0/3] Recycle buffers from Tx to Rx Feifei Wang
2023-03-23 10:43 ` [PATCH v4 1/3] ethdev: add API for buffer recycle mode Feifei Wang
2023-03-23 11:41 ` Morten Brørup
2023-03-29 2:16 ` Feifei Wang
2023-03-23 10:43 ` [PATCH v4 2/3] net/i40e: implement recycle buffer mode Feifei Wang
2023-03-23 10:43 ` Feifei Wang [this message]
2023-03-30 6:29 ` [PATCH v5 0/3] Recycle buffers from Tx to Rx Feifei Wang
2023-03-30 6:29 ` [PATCH v5 1/3] ethdev: add API for buffer recycle mode Feifei Wang
2023-03-30 7:19 ` Morten Brørup
2023-03-30 9:31 ` Feifei Wang
2023-03-30 15:15 ` Morten Brørup
2023-03-30 15:58 ` Morten Brørup
2023-04-26 6:59 ` Feifei Wang
2023-04-19 14:46 ` Ferruh Yigit
2023-04-26 7:29 ` Feifei Wang
2023-03-30 6:29 ` [PATCH v5 2/3] net/i40e: implement recycle buffer mode Feifei Wang
2023-03-30 6:29 ` [PATCH v5 3/3] net/ixgbe: " Feifei Wang
2023-04-19 14:46 ` Ferruh Yigit
2023-04-26 7:36 ` Feifei Wang
2023-03-30 15:04 ` [PATCH v5 0/3] Recycle buffers from Tx to Rx Stephen Hemminger
2023-04-03 2:48 ` Feifei Wang
2023-04-19 14:56 ` Ferruh Yigit
2023-04-25 7:57 ` Feifei Wang
2023-05-25 9:45 ` [PATCH v6 0/4] Recycle mbufs from Tx queue to Rx queue Feifei Wang
2023-05-25 9:45 ` [PATCH v6 1/4] ethdev: add API for mbufs recycle mode Feifei Wang
2023-05-25 15:08 ` Morten Brørup
2023-05-31 6:10 ` Feifei Wang
2023-06-05 12:53 ` Константин Ананьев
2023-06-06 2:55 ` Feifei Wang
2023-06-06 7:10 ` Konstantin Ananyev
2023-06-06 7:31 ` Feifei Wang
2023-06-06 8:34 ` Konstantin Ananyev
2023-06-07 0:00 ` Ferruh Yigit
2023-06-12 3:25 ` Feifei Wang
2023-05-25 9:45 ` [PATCH v6 2/4] net/i40e: implement " Feifei Wang
2023-06-05 13:02 ` Константин Ананьев
2023-06-06 3:16 ` Feifei Wang
2023-06-06 7:18 ` Konstantin Ananyev
2023-06-06 7:58 ` Feifei Wang
2023-06-06 8:27 ` Konstantin Ananyev
2023-06-12 3:05 ` Feifei Wang
2023-05-25 9:45 ` [PATCH v6 3/4] net/ixgbe: " Feifei Wang
2023-05-25 9:45 ` [PATCH v6 4/4] app/testpmd: add recycle mbufs engine Feifei Wang
2023-06-05 13:08 ` Константин Ананьев
2023-06-06 6:32 ` Feifei Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230323104330.3823251-4-feifei.wang2@arm.com \
--to=feifei.wang2@arm.com \
--cc=dev@dpdk.org \
--cc=honnappa.nagarahalli@arm.com \
--cc=konstantin.v.ananyev@yandex.ru \
--cc=mb@smartsharesystems.com \
--cc=nd@arm.com \
--cc=qiming.yang@intel.com \
--cc=ruifeng.wang@arm.com \
--cc=wenjun1.wu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).