From: Wenbo Cao <caowenbo@mucse.com>
To: thomas@monjalon.net, Wenbo Cao <caowenbo@mucse.com>
Cc: stephen@networkplumber.org, dev@dpdk.org, ferruh.yigit@amd.com,
andrew.rybchenko@oktetlabs.ru, yaojun@mucse.com
Subject: [PATCH v7 17/28] net/rnp: add Rx scatter segment version
Date: Sat, 8 Feb 2025 10:43:54 +0800 [thread overview]
Message-ID: <1738982645-34550-18-git-send-email-caowenbo@mucse.com> (raw)
In-Reply-To: <1738982645-34550-1-git-send-email-caowenbo@mucse.com>
add support scatter multi segment received.
Signed-off-by: Wenbo Cao <caowenbo@mucse.com>
---
doc/guides/nics/features/rnp.ini | 2 +
doc/guides/nics/rnp.rst | 2 +
drivers/net/rnp/rnp_rxtx.c | 131 ++++++++++++++++++++++++++++++++++++++-
drivers/net/rnp/rnp_rxtx.h | 2 +
4 files changed, 135 insertions(+), 2 deletions(-)
diff --git a/doc/guides/nics/features/rnp.ini b/doc/guides/nics/features/rnp.ini
index 6d13370..c68d6fb 100644
--- a/doc/guides/nics/features/rnp.ini
+++ b/doc/guides/nics/features/rnp.ini
@@ -15,5 +15,7 @@ RSS hash = Y
RSS key update = Y
RSS reta update = Y
Inner RSS = Y
+Jumbo frame = Y
+Scattered Rx = Y
Linux = Y
x86-64 = Y
diff --git a/doc/guides/nics/rnp.rst b/doc/guides/nics/rnp.rst
index 9fa7ad9..db64104 100644
--- a/doc/guides/nics/rnp.rst
+++ b/doc/guides/nics/rnp.rst
@@ -17,6 +17,8 @@ Features
- Promiscuous mode
- Link state information
- MTU update
+- Jumbo frames
+- Scatter-Gather IO support
Prerequisites
-------------
diff --git a/drivers/net/rnp/rnp_rxtx.c b/drivers/net/rnp/rnp_rxtx.c
index e8c1444..c80cc8b 100644
--- a/drivers/net/rnp/rnp_rxtx.c
+++ b/drivers/net/rnp/rnp_rxtx.c
@@ -830,7 +830,6 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
if (txq->tx_next_rs > txq->attr.nb_desc)
txq->tx_next_rs = txq->tx_rs_thresh - 1;
}
-
txq->tx_tail = i;
rte_wmb();
@@ -839,9 +838,137 @@ int rnp_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
return start;
}
+static int
+rnp_rxq_bulk_alloc(struct rnp_rx_queue *rxq,
+ volatile struct rnp_rx_desc *rxbd,
+ struct rnp_rxsw_entry *rxe,
+ bool bulk_alloc)
+{
+ struct rte_mbuf *nmb = NULL;
+ uint16_t update_tail;
+
+ if (!bulk_alloc) {
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (unlikely(!nmb)) {
+ rte_eth_devices[rxq->attr.port_id].data->
+ rx_mbuf_alloc_failed++;
+ return -ENOMEM;
+ }
+ rxbd->d.pkt_addr = 0;
+ rxbd->d.cmd = 0;
+ rxe->mbuf = NULL;
+ rxe->mbuf = nmb;
+ rxbd->d.pkt_addr = rnp_get_dma_addr(&rxq->attr, nmb);
+ }
+ if (rxq->rxrearm_nb > rxq->rx_free_thresh) {
+ rxq->rxrearm_nb -= rxq->rx_free_thresh;
+ rxq->rxrearm_start += rxq->rx_free_thresh;
+ if (rxq->rxrearm_start >= rxq->attr.nb_desc)
+ rxq->rxrearm_start = 0;
+ update_tail = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->attr.nb_desc - 1) : (rxq->rxrearm_start - 1));
+ rte_io_wmb();
+ RNP_REG_WR(rxq->rx_tailreg, 0, update_tail);
+ }
+
+ return 0;
+}
+
+static __rte_always_inline uint16_t
+rnp_scattered_rx(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rnp_rx_queue *rxq = (struct rnp_rx_queue *)rx_queue;
+ volatile struct rnp_rx_desc *bd_ring = rxq->rx_bdr;
+ struct rte_mbuf *first_seg = rxq->pkt_first_seg;
+ struct rte_mbuf *last_seg = rxq->pkt_last_seg;
+ struct rnp_rxsw_entry *sw_ring = rxq->sw_ring;
+ volatile struct rnp_rx_desc *rxbd;
+ volatile struct rnp_rx_desc rxd;
+ struct rnp_rxsw_entry *rxe;
+ struct rte_mbuf *rxm;
+ uint16_t rx_pkt_len;
+ uint16_t nb_rx = 0;
+ uint16_t rx_status;
+ uint16_t rx_id;
+
+ if (unlikely(!rxq->rxq_started || !rxq->rx_link))
+ return 0;
+ rx_id = rxq->rx_tail;
+ while (nb_rx < nb_pkts) {
+ rxbd = &bd_ring[rx_id];
+ rx_status = rxbd->wb.qword1.cmd;
+ if (!(rx_status & rte_cpu_to_le_16(RNP_CMD_DD)))
+ break;
+ rte_atomic_thread_fence(rte_memory_order_acquire);
+ rxd = *rxbd;
+ rxe = &sw_ring[rx_id];
+ rxm = rxe->mbuf;
+ if (rnp_rxq_bulk_alloc(rxq, rxbd, rxe, false))
+ break;
+ rx_id = (rx_id + 1) & rxq->attr.nb_desc_mask;
+ rte_prefetch0(sw_ring[rx_id].mbuf);
+ if ((rx_id & 0x3) == 0) {
+ rte_prefetch0(&bd_ring[rx_id]);
+ rte_prefetch0(&sw_ring[rx_id]);
+ }
+ rx_pkt_len = rxd.wb.qword1.lens;
+ rxm->data_len = rx_pkt_len;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ if (!first_seg) {
+ /* first segment pkt */
+ first_seg = rxm;
+ first_seg->nb_segs = 1;
+ first_seg->pkt_len = rx_pkt_len;
+ } else {
+ /* follow-up segment pkt */
+ first_seg->pkt_len =
+ (uint16_t)(first_seg->pkt_len + rx_pkt_len);
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
+ }
+ rxq->rxrearm_nb++;
+ if (!(rx_status & rte_cpu_to_le_16(RNP_CMD_EOP))) {
+ last_seg = rxm;
+ continue;
+ }
+ rxm->next = NULL;
+ first_seg->port = rxq->attr.port_id;
+ /* this the end of packet the large pkt has been recv finish */
+ rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
+ first_seg->data_off));
+ rx_pkts[nb_rx++] = first_seg;
+ first_seg = NULL;
+ }
+ if (!nb_rx)
+ return 0;
+ /* update sw record point */
+ rxq->rx_tail = rx_id;
+ rxq->pkt_first_seg = first_seg;
+ rxq->pkt_last_seg = last_seg;
+
+ return nb_rx;
+}
+
+static int
+rnp_check_rx_simple_valid(struct rte_eth_dev *dev)
+{
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
+
+ if (dev->data->scattered_rx || rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
+ return -ENOTSUP;
+ return 0;
+}
+
int rnp_rx_func_select(struct rte_eth_dev *dev)
{
- dev->rx_pkt_burst = rnp_recv_pkts;
+ bool simple_allowed = false;
+
+ simple_allowed = rnp_check_rx_simple_valid(dev) == 0;
+ if (simple_allowed)
+ dev->rx_pkt_burst = rnp_recv_pkts;
+ else
+ dev->rx_pkt_burst = rnp_scattered_rx;
return 0;
}
diff --git a/drivers/net/rnp/rnp_rxtx.h b/drivers/net/rnp/rnp_rxtx.h
index a8fd8d0..973b667 100644
--- a/drivers/net/rnp/rnp_rxtx.h
+++ b/drivers/net/rnp/rnp_rxtx.h
@@ -76,6 +76,8 @@ struct rnp_rx_queue {
uint64_t rx_offloads; /* user set hw offload features */
struct rte_mbuf **free_mbufs; /* rx bulk alloc reserve of free mbufs */
struct rte_mbuf fake_mbuf; /* dummy mbuf */
+ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
};
struct rnp_txsw_entry {
--
1.8.3.1
next prev parent reply other threads:[~2025-02-08 2:46 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-08 2:43 [PATCH v7 00/28] [v6]drivers/net Add Support mucse N10 Pmd Driver Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 01/28] net/rnp: add skeleton Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 02/28] net/rnp: add ethdev probe and remove Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 03/28] net/rnp: add log Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 04/28] net/rnp: support mailbox basic operate Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 05/28] net/rnp: add device init and uninit Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 06/28] net/rnp: add get device information operation Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 07/28] net/rnp: add support mac promisc mode Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 08/28] net/rnp: add queue setup and release operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 09/28] net/rnp: add queue stop and start operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 10/28] net/rnp: add support device start stop operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 11/28] net/rnp: add RSS support operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 12/28] net/rnp: add support link update operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 13/28] net/rnp: add support link setup operations Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 14/28] net/rnp: add Rx burst simple support Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 15/28] net/rnp: add Tx " Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 16/28] net/rnp: add MTU set operation Wenbo Cao
2025-02-08 2:43 ` Wenbo Cao [this message]
2025-02-08 2:43 ` [PATCH v7 18/28] net/rnp: add Tx multiple segment version Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 19/28] net/rnp: add support basic stats operation Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 20/28] net/rnp: add support xstats operation Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 21/28] net/rnp: add unicast MAC filter operation Wenbo Cao
2025-02-08 2:43 ` [PATCH v7 22/28] net/rnp: add supported packet types Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 23/28] net/rnp: add support Rx checksum offload Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 24/28] net/rnp: add support Tx TSO offload Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 25/28] net/rnp: support VLAN offloads Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 26/28] net/rnp: add support VLAN filters operations Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 27/28] net/rnp: add queue info operation Wenbo Cao
2025-02-08 2:44 ` [PATCH v7 28/28] net/rnp: support Rx/Tx burst mode info Wenbo Cao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1738982645-34550-18-git-send-email-caowenbo@mucse.com \
--to=caowenbo@mucse.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@amd.com \
--cc=stephen@networkplumber.org \
--cc=thomas@monjalon.net \
--cc=yaojun@mucse.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).