From: "WanRenyong" <wanry@yunsilicon.com>
To: <dev@dpdk.org>
Cc: <ferruh.yigit@amd.com>, <thomas@monjalon.net>,
<andrew.rybchenko@oktetlabs.ru>, <qianr@yunsilicon.com>,
<nana@yunsilicon.com>, <zhangxx@yunsilicon.com>,
<zhangxx@yunsilicon.com>, <xudw@yunsilicon.com>,
<jacky@yunsilicon.com>, <weihg@yunsilicon.com>
Subject: [PATCH v4 11/15] net/xsc: add ethdev Rx burst
Date: Fri, 03 Jan 2025 23:04:28 +0800 [thread overview]
Message-ID: <20250103150427.1529663-12-wanry@yunsilicon.com> (raw)
In-Reply-To: <20250103150404.1529663-1-wanry@yunsilicon.com>
Implement xsc ethdev Rx burst function.
Signed-off-by: WanRenyong <wanry@yunsilicon.com>
Signed-off-by: Xiaoxiong Zhang <zhangxx@yunsilicon.com>
---
drivers/net/xsc/xsc_ethdev.c | 2 +
drivers/net/xsc/xsc_rx.c | 174 +++++++++++++++++++++++++++++++++++
drivers/net/xsc/xsc_rx.h | 1 +
drivers/net/xsc/xsc_rxtx.h | 13 +++
4 files changed, 190 insertions(+)
diff --git a/drivers/net/xsc/xsc_ethdev.c b/drivers/net/xsc/xsc_ethdev.c
index c5ee079d4a..00bd617c3e 100644
--- a/drivers/net/xsc/xsc_ethdev.c
+++ b/drivers/net/xsc/xsc_ethdev.c
@@ -336,6 +336,8 @@ xsc_ethdev_start(struct rte_eth_dev *dev)
dev->data->dev_started = 1;
rte_wmb();
+ dev->rx_pkt_burst = xsc_rx_burst;
+
ret = xsc_ethdev_enable(dev);
return 0;
diff --git a/drivers/net/xsc/xsc_rx.c b/drivers/net/xsc/xsc_rx.c
index 2081f3b619..58a9cc2f26 100644
--- a/drivers/net/xsc/xsc_rx.c
+++ b/drivers/net/xsc/xsc_rx.c
@@ -13,6 +13,180 @@
#define XSC_MAX_RECV_LEN 9800
+static inline void
+xsc_cq_to_mbuf(struct xsc_rxq_data *rxq, struct rte_mbuf *pkt,
+ volatile struct xsc_cqe *cqe)
+{
+ uint32_t rss_hash_res = 0;
+
+ pkt->port = rxq->port_id;
+ if (rxq->rss_hash) {
+ rss_hash_res = rte_be_to_cpu_32(cqe->vni);
+ if (rss_hash_res) {
+ pkt->hash.rss = rss_hash_res;
+ pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
+ }
+ }
+}
+
+static inline int
+xsc_rx_poll_len(struct xsc_rxq_data *rxq, volatile struct xsc_cqe *cqe)
+{
+ int len;
+
+ do {
+ len = 0;
+ int ret;
+
+ ret = xsc_check_cqe_own(cqe, rxq->cqe_n, rxq->cq_ci);
+ if (unlikely(ret != XSC_CQE_OWNER_SW)) {
+ if (unlikely(ret == XSC_CQE_OWNER_ERR)) {
+ ++rxq->stats.rx_errors;
+ if (ret == XSC_CQE_OWNER_HW || ret == -1)
+ return 0;
+ } else {
+ return 0;
+ }
+ }
+
+ rxq->cq_ci += 1;
+ len = rte_le_to_cpu_32(cqe->msg_len);
+ return len;
+ } while (1);
+}
+
+static __rte_always_inline void
+xsc_pkt_info_sync(struct rte_mbuf *rep, struct rte_mbuf *seg)
+{
+ if (rep != NULL && seg != NULL) {
+ rep->data_len = seg->data_len;
+ rep->pkt_len = seg->pkt_len;
+ rep->data_off = seg->data_off;
+ rep->port = seg->port;
+ }
+}
+
+uint16_t
+xsc_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct xsc_rxq_data *rxq = dpdk_rxq;
+ const uint32_t wqe_m = rxq->wqe_m;
+ const uint32_t cqe_m = rxq->cqe_m;
+ const uint32_t sge_n = rxq->sge_n;
+ struct rte_mbuf *pkt = NULL;
+ struct rte_mbuf *seg = NULL;
+ volatile struct xsc_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_m];
+ uint32_t nb_pkts = 0;
+ uint64_t nb_bytes = 0;
+ uint32_t rq_ci = rxq->rq_ci;
+ int len = 0;
+ uint32_t cq_ci_two = 0;
+ int valid_cqe_num = 0;
+ int cqe_msg_len = 0;
+ volatile struct xsc_cqe_u64 *cqe_u64 = NULL;
+ struct rte_mbuf *rep;
+
+ while (pkts_n) {
+ uint32_t idx = rq_ci & wqe_m;
+ volatile struct xsc_wqe_data_seg *wqe =
+ &((volatile struct xsc_wqe_data_seg *)rxq->wqes)[idx << sge_n];
+
+ seg = (*rxq->elts)[idx];
+ rte_prefetch0(cqe);
+ rte_prefetch0(wqe);
+
+ rep = rte_mbuf_raw_alloc(seg->pool);
+ if (unlikely(rep == NULL)) {
+ ++rxq->stats.rx_nombuf;
+ break;
+ }
+
+ if (!pkt) {
+ if (valid_cqe_num) {
+ cqe = cqe + 1;
+ len = cqe_msg_len;
+ valid_cqe_num = 0;
+ } else if ((rxq->cq_ci % 2 == 0) && (pkts_n > 1)) {
+ cq_ci_two = (rxq->cq_ci & rxq->cqe_m) / 2;
+ cqe_u64 = &(*rxq->cqes_u64)[cq_ci_two];
+ cqe = (volatile struct xsc_cqe *)cqe_u64;
+ len = xsc_rx_poll_len(rxq, cqe);
+ if (len > 0) {
+ cqe_msg_len = xsc_rx_poll_len(rxq, cqe + 1);
+ if (cqe_msg_len > 0)
+ valid_cqe_num = 1;
+ }
+ } else {
+ cqe = &(*rxq->cqes)[rxq->cq_ci & rxq->cqe_m];
+ len = xsc_rx_poll_len(rxq, cqe);
+ }
+
+ if (!len) {
+ rte_mbuf_raw_free(rep);
+ break;
+ }
+
+ if (len > rte_pktmbuf_data_len(seg)) {
+ rte_mbuf_raw_free(rep);
+ pkt = NULL;
+ ++rq_ci;
+ continue;
+ }
+
+ pkt = seg;
+ pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
+ xsc_cq_to_mbuf(rxq, pkt, cqe);
+
+ if (rxq->crc_present)
+ len -= RTE_ETHER_CRC_LEN;
+ rte_pktmbuf_pkt_len(pkt) = len;
+ }
+
+ xsc_pkt_info_sync(rep, seg);
+ (*rxq->elts)[idx] = rep;
+
+ /* Fill wqe */
+ wqe->va = rte_cpu_to_le_64(rte_pktmbuf_iova(rep));
+ rte_pktmbuf_data_len(seg) = len;
+ nb_bytes += rte_pktmbuf_pkt_len(pkt);
+
+ *(pkts++) = pkt;
+ pkt = NULL;
+ --pkts_n;
+ ++nb_pkts;
+ ++rq_ci;
+ }
+
+ if (unlikely(nb_pkts == 0 && rq_ci == rxq->rq_ci))
+ return 0;
+
+ rxq->rq_ci = rq_ci;
+ rxq->nb_rx_hold += nb_pkts;
+
+ if (rxq->nb_rx_hold >= rxq->rx_free_thresh) {
+ union xsc_cq_doorbell cq_db = {
+ .cq_data = 0
+ };
+ cq_db.next_cid = rxq->cq_ci;
+ cq_db.cq_num = rxq->cqn;
+
+ union xsc_recv_doorbell rq_db = {
+ .recv_data = 0
+ };
+ rq_db.next_pid = (rxq->rq_ci << sge_n);
+ rq_db.qp_num = rxq->qpn;
+
+ rte_write32(rte_cpu_to_le_32(cq_db.cq_data), rxq->cq_db);
+ rte_write32(rte_cpu_to_le_32(rq_db.recv_data), rxq->rq_db);
+ rxq->nb_rx_hold = 0;
+ }
+
+ rxq->stats.rx_pkts += nb_pkts;
+ rxq->stats.rx_bytes += nb_bytes;
+
+ return nb_pkts;
+}
+
static void
xsc_rxq_initialize(struct xsc_dev *xdev, struct xsc_rxq_data *rxq_data)
{
diff --git a/drivers/net/xsc/xsc_rx.h b/drivers/net/xsc/xsc_rx.h
index e24b1a8829..90fbb89197 100644
--- a/drivers/net/xsc/xsc_rx.h
+++ b/drivers/net/xsc/xsc_rx.h
@@ -56,6 +56,7 @@ struct __rte_cache_aligned xsc_rxq_data {
uint16_t rsv1:11;
};
+uint16_t xsc_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
int xsc_rxq_elts_alloc(struct xsc_rxq_data *rxq_data);
int xsc_rxq_rss_obj_new(struct xsc_ethdev_priv *priv, uint16_t port_id);
void xsc_rxq_rss_obj_release(struct xsc_dev *xdev, struct xsc_rxq_data *rxq_data);
diff --git a/drivers/net/xsc/xsc_rxtx.h b/drivers/net/xsc/xsc_rxtx.h
index 2771efafc6..fa068f8b29 100644
--- a/drivers/net/xsc/xsc_rxtx.h
+++ b/drivers/net/xsc/xsc_rxtx.h
@@ -175,4 +175,17 @@ struct xsc_rx_cq_info {
uint16_t cqe_n;
};
+static __rte_always_inline int
+xsc_check_cqe_own(volatile struct xsc_cqe *cqe, const uint16_t cqe_n, const uint16_t ci)
+{
+ if (unlikely(((cqe->owner & XSC_CQE_OWNER_MASK) != ((ci >> cqe_n) & XSC_CQE_OWNER_MASK))))
+ return XSC_CQE_OWNER_HW;
+
+ rte_io_rmb();
+ if (cqe->msg_len <= 0 && cqe->is_error)
+ return XSC_CQE_OWNER_ERR;
+
+ return XSC_CQE_OWNER_SW;
+}
+
#endif /* _XSC_RXTX_H_ */
--
2.25.1
next prev parent reply other threads:[~2025-01-03 15:05 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-03 15:04 [PATCH v4 00/15] XSC PMD for Yunsilicon NICs WanRenyong
2025-01-03 15:04 ` [PATCH v4 01/15] net/xsc: add xsc PMD framework WanRenyong
2025-01-03 19:00 ` Stephen Hemminger
2025-01-06 1:36 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 02/15] net/xsc: add xsc device initialization WanRenyong
2025-01-03 18:58 ` Stephen Hemminger
2025-01-06 3:29 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 03/15] net/xsc: add xsc mailbox WanRenyong
2025-01-03 15:04 ` [PATCH v4 04/15] net/xsc: add xsc dev ops to support VFIO driver WanRenyong
2025-01-03 19:02 ` Stephen Hemminger
2025-01-06 1:53 ` WanRenyong
2025-01-03 19:04 ` Stephen Hemminger
2025-01-06 2:01 ` WanRenyong
2025-01-03 19:06 ` Stephen Hemminger
2025-01-06 2:02 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 05/15] net/xsc: add PCT interfaces WanRenyong
2025-01-03 15:04 ` [PATCH v4 06/15] net/xsc: initialize xsc representors WanRenyong
2025-01-03 15:04 ` [PATCH v4 07/15] net/xsc: add ethdev configure and RSS ops WanRenyong
2025-01-03 19:14 ` Stephen Hemminger
2025-01-06 2:20 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 08/15] net/xsc: add Rx and Tx queue setup WanRenyong
2025-01-03 15:04 ` [PATCH v4 09/15] net/xsc: add ethdev start WanRenyong
2025-01-03 19:17 ` Stephen Hemminger
2025-01-06 3:01 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 10/15] net/xsc: add ethdev stop and close WanRenyong
2025-01-03 15:04 ` WanRenyong [this message]
2025-01-03 15:04 ` [PATCH v4 12/15] net/xsc: add ethdev Tx burst WanRenyong
2025-01-03 15:04 ` [PATCH v4 13/15] net/xsc: add basic stats ops WanRenyong
2025-01-03 15:04 ` [PATCH v4 14/15] net/xsc: add ethdev infos get WanRenyong
2025-01-03 19:22 ` Stephen Hemminger
2025-01-06 4:03 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 15/15] net/xsc: add ethdev link and MTU ops WanRenyong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250103150427.1529663-12-wanry@yunsilicon.com \
--to=wanry@yunsilicon.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@amd.com \
--cc=jacky@yunsilicon.com \
--cc=nana@yunsilicon.com \
--cc=qianr@yunsilicon.com \
--cc=thomas@monjalon.net \
--cc=weihg@yunsilicon.com \
--cc=xudw@yunsilicon.com \
--cc=zhangxx@yunsilicon.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).