DPDK patches and discussions
 help / color / mirror / Atom feed
From: Wenbo Cao <caowenbo@mucse.com>
To: thomas@monjalon.net, Wenbo Cao <caowenbo@mucse.com>
Cc: stephen@networkplumber.org, dev@dpdk.org, ferruh.yigit@amd.com,
	andrew.rybchenko@oktetlabs.ru, yaojun@mucse.com
Subject: [PATCH v7 14/28] net/rnp: add Rx burst simple support
Date: Sat,  8 Feb 2025 10:43:51 +0800	[thread overview]
Message-ID: <1738982645-34550-15-git-send-email-caowenbo@mucse.com> (raw)
In-Reply-To: <1738982645-34550-1-git-send-email-caowenbo@mucse.com>

add only support simple recv pkts.

Signed-off-by: Wenbo Cao <caowenbo@mucse.com>
---
 drivers/net/rnp/rnp_ethdev.c |   7 +++
 drivers/net/rnp/rnp_rxtx.c   | 129 +++++++++++++++++++++++++++++++++++++++++++
 drivers/net/rnp/rnp_rxtx.h   |   5 ++
 3 files changed, 141 insertions(+)

diff --git a/drivers/net/rnp/rnp_ethdev.c b/drivers/net/rnp/rnp_ethdev.c
index e229b2e..e5f984f 100644
--- a/drivers/net/rnp/rnp_ethdev.c
+++ b/drivers/net/rnp/rnp_ethdev.c
@@ -329,6 +329,8 @@ static int rnp_dev_start(struct rte_eth_dev *eth_dev)
 	rnp_dev_set_link_up(eth_dev);
 	/* enable eth rx flow */
 	RNP_RX_ETH_ENABLE(hw, lane);
+	rnp_rx_func_select(eth_dev);
+	rnp_tx_func_select(eth_dev);
 	port->port_stopped = 0;
 
 	return 0;
@@ -568,6 +570,11 @@ static int rnp_dev_infos_get(struct rte_eth_dev *eth_dev,
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_drop_en = 0,
+		.rx_thresh = {
+			.pthresh = RNP_RX_DESC_FETCH_TH,
+			.hthresh = RNP_RX_DESC_FETCH_BURST,
+		},
+		.rx_free_thresh = RNP_DEFAULT_RX_FREE_THRESH,
 		.offloads = 0,
 	};
 
diff --git a/drivers/net/rnp/rnp_rxtx.c b/drivers/net/rnp/rnp_rxtx.c
index 2b172c8..8553fbf 100644
--- a/drivers/net/rnp/rnp_rxtx.c
+++ b/drivers/net/rnp/rnp_rxtx.c
@@ -641,3 +641,132 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
 
 	return 0;
 }
+
+#define RNP_CACHE_FETCH_RX (4)
+static __rte_always_inline int
+rnp_refill_rx_ring(struct rnp_rx_queue *rxq)
+{
+	volatile struct rnp_rx_desc *rxbd;
+	struct rnp_rxsw_entry *rx_swbd;
+	struct rte_mbuf *mb;
+	uint16_t j, i;
+	uint16_t rx_id;
+	int ret;
+
+	rxbd = rxq->rx_bdr + rxq->rxrearm_start;
+	rx_swbd = &rxq->sw_ring[rxq->rxrearm_start];
+	ret = rte_mempool_get_bulk(rxq->mb_pool, (void *)rx_swbd,
+			rxq->rx_free_thresh);
+	if (unlikely(ret != 0)) {
+		if (rxq->rxrearm_nb + rxq->rx_free_thresh >= rxq->attr.nb_desc) {
+			for (i = 0; i < RNP_CACHE_FETCH_RX; i++) {
+				rx_swbd[i].mbuf = &rxq->fake_mbuf;
+				rxbd[i].d.pkt_addr = 0;
+				rxbd[i].d.cmd = 0;
+			}
+		}
+		rte_eth_devices[rxq->attr.port_id].data->rx_mbuf_alloc_failed +=
+			rxq->rx_free_thresh;
+		return 0;
+	}
+	for (j = 0; j < rxq->rx_free_thresh; ++j) {
+		mb = rx_swbd[j].mbuf;
+		rte_mbuf_refcnt_set(mb, 1);
+		mb->data_off = RTE_PKTMBUF_HEADROOM;
+		mb->port = rxq->attr.port_id;
+
+		rxbd[j].d.pkt_addr = rnp_get_dma_addr(&rxq->attr, mb);
+		rxbd[j].d.cmd = 0;
+	}
+	rxq->rxrearm_start += rxq->rx_free_thresh;
+	if (rxq->rxrearm_start >= rxq->attr.nb_desc - 1)
+		rxq->rxrearm_start = 0;
+	rxq->rxrearm_nb -= rxq->rx_free_thresh;
+
+	rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+			(rxq->attr.nb_desc - 1) : (rxq->rxrearm_start - 1));
+	rte_wmb();
+	RNP_REG_WR(rxq->rx_tailreg, 0, rx_id);
+
+	return j;
+}
+
+static __rte_always_inline uint16_t
+rnp_recv_pkts(void *_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+	struct rnp_rx_queue *rxq = (struct rnp_rx_queue *)_rxq;
+	struct rnp_rxsw_entry *rx_swbd;
+	uint32_t state_cmd[RNP_CACHE_FETCH_RX];
+	uint32_t pkt_len[RNP_CACHE_FETCH_RX] = {0};
+	volatile struct rnp_rx_desc *rxbd;
+	struct rte_mbuf *nmb;
+	int nb_dd, nb_rx = 0;
+	int i, j;
+
+	if (unlikely(!rxq->rxq_started || !rxq->rx_link))
+		return 0;
+	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RNP_CACHE_FETCH_RX);
+	rxbd = &rxq->rx_bdr[rxq->rx_tail];
+	rte_prefetch0(rxbd);
+	if (rxq->rxrearm_nb > rxq->rx_free_thresh)
+		rnp_refill_rx_ring(rxq);
+
+	if (!(rxbd->wb.qword1.cmd & RNP_CMD_DD))
+		return 0;
+
+	rx_swbd = &rxq->sw_ring[rxq->rx_tail];
+	for (i = 0; i < nb_pkts;
+			i += RNP_CACHE_FETCH_RX, rxbd += RNP_CACHE_FETCH_RX,
+			rx_swbd += RNP_CACHE_FETCH_RX) {
+		for (j = 0; j < RNP_CACHE_FETCH_RX; j++)
+			state_cmd[j] = rxbd[j].wb.qword1.cmd;
+		rte_atomic_thread_fence(rte_memory_order_acquire);
+
+		for (nb_dd = 0; nb_dd < RNP_CACHE_FETCH_RX &&
+				(state_cmd[nb_dd] & rte_cpu_to_le_16(RNP_CMD_DD));
+				nb_dd++)
+			;
+		for (j = 0; j < nb_dd; j++)
+			pkt_len[j] = rxbd[j].wb.qword1.lens;
+
+		for (j = 0; j < nb_dd; ++j) {
+			nmb = rx_swbd[j].mbuf;
+
+			nmb->data_off = RTE_PKTMBUF_HEADROOM;
+			nmb->port = rxq->attr.port_id;
+			nmb->data_len = pkt_len[j];
+			nmb->pkt_len = pkt_len[j];
+			nmb->packet_type = 0;
+			nmb->ol_flags = 0;
+			nmb->nb_segs = 1;
+		}
+		for (j = 0; j < nb_dd; ++j) {
+			rx_pkts[i + j] = rx_swbd[j].mbuf;
+			rx_swbd[j].mbuf = NULL;
+		}
+
+		nb_rx += nb_dd;
+		rxq->nb_rx_free -= nb_dd;
+		if (nb_dd != RNP_CACHE_FETCH_RX)
+			break;
+	}
+	rxq->rx_tail = (rxq->rx_tail + nb_rx) & rxq->attr.nb_desc_mask;
+	rxq->rxrearm_nb = rxq->rxrearm_nb + nb_rx;
+
+	return nb_rx;
+}
+
+int rnp_rx_func_select(struct rte_eth_dev *dev)
+{
+	dev->rx_pkt_burst = rnp_recv_pkts;
+
+	return 0;
+}
+
+int rnp_tx_func_select(struct rte_eth_dev *dev)
+{
+	dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
+	dev->tx_pkt_prepare = rte_eth_pkt_burst_dummy;
+
+	return 0;
+}
diff --git a/drivers/net/rnp/rnp_rxtx.h b/drivers/net/rnp/rnp_rxtx.h
index 94e1f06..39e5184 100644
--- a/drivers/net/rnp/rnp_rxtx.h
+++ b/drivers/net/rnp/rnp_rxtx.h
@@ -63,6 +63,9 @@ struct rnp_rx_queue {
 	uint16_t rx_free_thresh; /* rx free desc desource thresh */
 	uint16_t rx_tail;
 
+	uint16_t rxrearm_start;
+	uint16_t rxrearm_nb;
+
 	uint32_t nodesc_tm_thresh; /* rx queue no desc timeout thresh */
 	uint8_t rx_deferred_start; /* do not start queue with dev_start(). */
 	uint8_t rxq_started; /* rx queue is started */
@@ -128,5 +131,7 @@ int rnp_tx_queue_setup(struct rte_eth_dev *dev,
 		       const struct rte_eth_txconf *tx_conf);
 int rnp_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx);
 int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx);
+int rnp_rx_func_select(struct rte_eth_dev *dev);
+int rnp_tx_func_select(struct rte_eth_dev *dev);
 
 #endif /* _RNP_RXTX_H_ */
-- 
1.8.3.1


  parent reply	other threads:[~2025-02-08  2:45 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-02-08  2:43 [PATCH v7 00/28] [v6]drivers/net Add Support mucse N10 Pmd Driver Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 01/28] net/rnp: add skeleton Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 02/28] net/rnp: add ethdev probe and remove Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 03/28] net/rnp: add log Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 04/28] net/rnp: support mailbox basic operate Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 05/28] net/rnp: add device init and uninit Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 06/28] net/rnp: add get device information operation Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 07/28] net/rnp: add support mac promisc mode Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 08/28] net/rnp: add queue setup and release operations Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 09/28] net/rnp: add queue stop and start operations Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 10/28] net/rnp: add support device start stop operations Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 11/28] net/rnp: add RSS support operations Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 12/28] net/rnp: add support link update operations Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 13/28] net/rnp: add support link setup operations Wenbo Cao
2025-02-08  2:43 ` Wenbo Cao [this message]
2025-02-08  2:43 ` [PATCH v7 15/28] net/rnp: add Tx burst simple support Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 16/28] net/rnp: add MTU set operation Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 17/28] net/rnp: add Rx scatter segment version Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 18/28] net/rnp: add Tx multiple " Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 19/28] net/rnp: add support basic stats operation Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 20/28] net/rnp: add support xstats operation Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 21/28] net/rnp: add unicast MAC filter operation Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 22/28] net/rnp: add supported packet types Wenbo Cao
2025-02-08  2:44 ` [PATCH v7 23/28] net/rnp: add support Rx checksum offload Wenbo Cao
2025-02-08  2:44 ` [PATCH v7 24/28] net/rnp: add support Tx TSO offload Wenbo Cao
2025-02-08  2:44 ` [PATCH v7 25/28] net/rnp: support VLAN offloads Wenbo Cao
2025-02-08  2:44 ` [PATCH v7 26/28] net/rnp: add support VLAN filters operations Wenbo Cao
2025-02-08  2:44 ` [PATCH v7 27/28] net/rnp: add queue info operation Wenbo Cao
2025-02-08  2:44 ` [PATCH v7 28/28] net/rnp: support Rx/Tx burst mode info Wenbo Cao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1738982645-34550-15-git-send-email-caowenbo@mucse.com \
    --to=caowenbo@mucse.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    --cc=stephen@networkplumber.org \
    --cc=thomas@monjalon.net \
    --cc=yaojun@mucse.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).