From: "WanRenyong" <wanry@yunsilicon.com>
To: <dev@dpdk.org>
Cc: <ferruh.yigit@amd.com>, <thomas@monjalon.net>,
<andrew.rybchenko@oktetlabs.ru>, <qianr@yunsilicon.com>,
<nana@yunsilicon.com>, <zhangxx@yunsilicon.com>,
<zhangxx@yunsilicon.com>, <xudw@yunsilicon.com>,
<jacky@yunsilicon.com>, <weihg@yunsilicon.com>
Subject: [PATCH v4 12/15] net/xsc: add ethdev Tx burst
Date: Fri, 03 Jan 2025 23:04:31 +0800 [thread overview]
Message-ID: <20250103150429.1529663-13-wanry@yunsilicon.com> (raw)
In-Reply-To: <20250103150404.1529663-1-wanry@yunsilicon.com>
Implement xsc ethdev Tx burst function.
Signed-off-by: WanRenyong <wanry@yunsilicon.com>
Signed-off-by: Dongwei Xu <xudw@yunsilicon.com>
---
doc/guides/nics/features/xsc.ini | 4 +
drivers/net/xsc/xsc_ethdev.c | 1 +
drivers/net/xsc/xsc_tx.c | 228 +++++++++++++++++++++++++++++++
drivers/net/xsc/xsc_tx.h | 1 +
4 files changed, 234 insertions(+)
diff --git a/doc/guides/nics/features/xsc.ini b/doc/guides/nics/features/xsc.ini
index bdeb7a984b..772c6418c4 100644
--- a/doc/guides/nics/features/xsc.ini
+++ b/doc/guides/nics/features/xsc.ini
@@ -7,6 +7,10 @@
RSS hash = Y
RSS key update = Y
RSS reta update = Y
+L3 checksum offload = Y
+L4 checksum offload = Y
+Inner L3 checksum = Y
+Inner L4 checksum = Y
Linux = Y
ARMv8 = Y
x86-64 = Y
diff --git a/drivers/net/xsc/xsc_ethdev.c b/drivers/net/xsc/xsc_ethdev.c
index 00bd617c3e..0c49170313 100644
--- a/drivers/net/xsc/xsc_ethdev.c
+++ b/drivers/net/xsc/xsc_ethdev.c
@@ -337,6 +337,7 @@ xsc_ethdev_start(struct rte_eth_dev *dev)
rte_wmb();
dev->rx_pkt_burst = xsc_rx_burst;
+ dev->tx_pkt_burst = xsc_tx_burst;
ret = xsc_ethdev_enable(dev);
diff --git a/drivers/net/xsc/xsc_tx.c b/drivers/net/xsc/xsc_tx.c
index 56daf6b4c6..406fa95381 100644
--- a/drivers/net/xsc/xsc_tx.c
+++ b/drivers/net/xsc/xsc_tx.c
@@ -124,3 +124,231 @@ xsc_txq_elts_free(struct xsc_txq_data *txq_data)
}
PMD_DRV_LOG(DEBUG, "Port %u txq %u free elts", txq_data->port_id, txq_data->idx);
}
+
+static __rte_always_inline void
+xsc_tx_elts_flush(struct xsc_txq_data *__rte_restrict txq, uint16_t tail)
+{
+ uint16_t elts_n = tail - txq->elts_tail;
+ uint32_t free_n;
+
+ do {
+ free_n = txq->elts_s - (txq->elts_tail & txq->elts_m);
+ free_n = RTE_MIN(free_n, elts_n);
+ rte_pktmbuf_free_bulk(&txq->elts[txq->elts_tail & txq->elts_m], free_n);
+ txq->elts_tail += free_n;
+ elts_n -= free_n;
+ } while (elts_n > 0);
+}
+
+static void
+xsc_tx_cqes_handle(struct xsc_txq_data *__rte_restrict txq)
+{
+ uint32_t count = XSC_TX_COMP_CQE_HANDLE_MAX;
+ volatile struct xsc_cqe *last_cqe = NULL;
+ volatile struct xsc_cqe *cqe;
+ bool doorbell = false;
+ int ret;
+ uint16_t tail;
+
+ do {
+ cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
+ ret = xsc_check_cqe_own(cqe, txq->cqe_n, txq->cq_ci);
+ if (unlikely(ret != XSC_CQE_OWNER_SW)) {
+ if (likely(ret != XSC_CQE_OWNER_ERR))
+ /* No new CQEs in completion queue. */
+ break;
+ doorbell = true;
+ ++txq->cq_ci;
+ txq->cq_pi = txq->cq_ci;
+ last_cqe = NULL;
+ ++txq->stats.tx_errors;
+ continue;
+ }
+
+ doorbell = true;
+ ++txq->cq_ci;
+ last_cqe = cqe;
+ } while (--count > 0);
+
+ if (likely(doorbell)) {
+ union xsc_cq_doorbell cq_db = {
+ .cq_data = 0
+ };
+ cq_db.next_cid = txq->cq_ci;
+ cq_db.cq_num = txq->cqn;
+
+ /* Ring doorbell */
+ rte_write32(rte_cpu_to_le_32(cq_db.cq_data), txq->cq_db);
+
+ /* Release completed elts */
+ if (likely(last_cqe != NULL)) {
+ txq->wqe_pi = rte_le_to_cpu_16(last_cqe->wqe_id) >> txq->wqe_ds_n;
+ tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
+ if (likely(tail != txq->elts_tail))
+ xsc_tx_elts_flush(txq, tail);
+ }
+ }
+}
+
+static __rte_always_inline void
+xsc_tx_wqe_ctrl_seg_init(struct xsc_txq_data *__rte_restrict txq,
+ struct rte_mbuf *__rte_restrict mbuf,
+ struct xsc_wqe *__rte_restrict wqe)
+{
+ struct xsc_send_wqe_ctrl_seg *cs = &wqe->cseg;
+ int i = 0;
+ int ds_max = (1 << txq->wqe_ds_n) - 1;
+
+ cs->msg_opcode = XSC_OPCODE_RAW;
+ cs->wqe_id = rte_cpu_to_le_16(txq->wqe_ci << txq->wqe_ds_n);
+ cs->has_pph = 0;
+ /* Clear dseg's seg len */
+ if (cs->ds_data_num > 1 && cs->ds_data_num <= ds_max) {
+ for (i = 1; i < cs->ds_data_num; i++)
+ wqe->dseg[i].seg_len = 0;
+ }
+
+ cs->ds_data_num = mbuf->nb_segs;
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+ cs->csum_en = 0x2;
+ else
+ cs->csum_en = 0;
+
+ if (txq->tso_en == 1 && (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
+ cs->has_pph = 0;
+ cs->so_type = 1;
+ cs->so_hdr_len = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+ cs->so_data_size = rte_cpu_to_le_16(mbuf->tso_segsz);
+ }
+
+ cs->msg_len = rte_cpu_to_le_32(rte_pktmbuf_pkt_len(mbuf));
+ if (unlikely(cs->msg_len == 0))
+ cs->msg_len = rte_cpu_to_le_32(rte_pktmbuf_data_len(mbuf));
+
+ /* Do not generate cqe for every pkts */
+ cs->ce = 0;
+}
+
+static __rte_always_inline void
+xsc_tx_wqe_data_seg_init(struct rte_mbuf *mbuf, struct xsc_wqe *wqe)
+{
+ uint16_t i, nb_segs = mbuf->nb_segs;
+ uint32_t data_len;
+ rte_iova_t iova;
+ struct xsc_wqe_data_seg *dseg;
+
+ for (i = 0; i < nb_segs; ++i) {
+ dseg = &wqe->dseg[i];
+ iova = rte_pktmbuf_iova(mbuf);
+ data_len = rte_pktmbuf_data_len(mbuf);
+
+ dseg->in_line = 0;
+ dseg->seg_len = rte_cpu_to_le_32(data_len);
+ dseg->lkey = 0;
+ dseg->va = rte_cpu_to_le_64(iova);
+ mbuf = mbuf->next;
+ }
+}
+
+static __rte_always_inline struct xsc_wqe *
+xsc_tx_wqes_fill(struct xsc_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
+ uint32_t pkts_n)
+{
+ uint32_t i;
+ struct xsc_wqe *wqe = NULL;
+ struct rte_mbuf *mbuf;
+
+ for (i = 0; i < pkts_n; i++) {
+ rte_prefetch0(pkts[i]);
+ mbuf = pkts[i];
+ wqe = (struct xsc_wqe *)((struct xsc_send_wqe_ctrl_seg *)txq->wqes +
+ (txq->wqe_ci & txq->wqe_m) * (1 << txq->wqe_ds_n));
+
+ /* Init wqe ctrl seg */
+ xsc_tx_wqe_ctrl_seg_init(txq, mbuf, wqe);
+ /* Init wqe data segs */
+ xsc_tx_wqe_data_seg_init(mbuf, wqe);
+ ++txq->wqe_ci;
+ txq->stats.tx_bytes += rte_pktmbuf_pkt_len(mbuf);
+ }
+
+ return wqe;
+}
+
+static __rte_always_inline void
+xsc_tx_doorbell_ring(volatile uint32_t *db, uint32_t index,
+ uint32_t qpn, uint16_t ds_n)
+{
+ union xsc_send_doorbell tx_db;
+
+ tx_db.next_pid = index << ds_n;
+ tx_db.qp_num = qpn;
+
+ rte_write32(rte_cpu_to_le_32(tx_db.send_data), db);
+}
+
+static __rte_always_inline void
+xsc_tx_elts_store(struct xsc_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
+ uint32_t pkts_n)
+{
+ uint32_t part;
+ struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
+
+ part = txq->elts_s - (txq->elts_head & txq->elts_m);
+ rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
+ (void *)pkts,
+ RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
+
+ if (unlikely(part < pkts_n))
+ rte_memcpy((void *)elts, (void *)(pkts + part),
+ (pkts_n - part) * sizeof(struct rte_mbuf *));
+}
+
+uint16_t
+xsc_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct xsc_txq_data *txq = dpdk_txq;
+ uint32_t tx_n, remain_n = pkts_n;
+ uint16_t idx, elts_free, wqe_free;
+ uint16_t elts_head;
+ struct xsc_wqe *last_wqe;
+
+ if (unlikely(!pkts_n))
+ return 0;
+
+ do {
+ xsc_tx_cqes_handle(txq);
+
+ elts_free = txq->elts_s - (uint16_t)(txq->elts_head - txq->elts_tail);
+ wqe_free = txq->wqe_s - ((uint16_t)((txq->wqe_ci << txq->wqe_ds_n) -
+ (txq->wqe_pi << txq->wqe_ds_n)) >> txq->wqe_ds_n);
+ if (unlikely(elts_free == 0 || wqe_free == 0))
+ break;
+
+ /* Fill in WQEs */
+ tx_n = RTE_MIN(remain_n, wqe_free);
+ idx = pkts_n - remain_n;
+ last_wqe = xsc_tx_wqes_fill(txq, &pkts[idx], tx_n);
+ remain_n -= tx_n;
+ last_wqe->cseg.ce = 1;
+
+ /* Update free-cqs, elts_comp */
+ elts_head = txq->elts_head;
+ elts_head += tx_n;
+ if ((uint16_t)(elts_head - txq->elts_comp) > 0) {
+ txq->elts_comp = elts_head;
+ txq->fcqs[txq->cq_pi++ & txq->cqe_m] = elts_head;
+ }
+
+ /* Ring tx doorbell */
+ xsc_tx_doorbell_ring(txq->qp_db, txq->wqe_ci, txq->qpn, txq->wqe_ds_n);
+
+ xsc_tx_elts_store(txq, &pkts[idx], tx_n);
+ txq->elts_head += tx_n;
+ } while (remain_n > 0);
+
+ txq->stats.tx_pkts += (pkts_n - remain_n);
+ return pkts_n - remain_n;
+}
diff --git a/drivers/net/xsc/xsc_tx.h b/drivers/net/xsc/xsc_tx.h
index 208f1c8490..88419dd3a0 100644
--- a/drivers/net/xsc/xsc_tx.h
+++ b/drivers/net/xsc/xsc_tx.h
@@ -52,6 +52,7 @@ struct __rte_cache_aligned xsc_txq_data {
struct rte_mbuf *elts[]; /* Storage for queued packets, for free */
};
+uint16_t xsc_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n);
int xsc_txq_obj_new(struct xsc_dev *xdev, struct xsc_txq_data *txq_data,
uint64_t offloads, uint16_t idx);
void xsc_txq_elts_alloc(struct xsc_txq_data *txq_data);
--
2.25.1
next prev parent reply other threads:[~2025-01-03 15:05 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-03 15:04 [PATCH v4 00/15] XSC PMD for Yunsilicon NICs WanRenyong
2025-01-03 15:04 ` [PATCH v4 01/15] net/xsc: add xsc PMD framework WanRenyong
2025-01-03 19:00 ` Stephen Hemminger
2025-01-06 1:36 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 02/15] net/xsc: add xsc device initialization WanRenyong
2025-01-03 18:58 ` Stephen Hemminger
2025-01-06 3:29 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 03/15] net/xsc: add xsc mailbox WanRenyong
2025-01-03 15:04 ` [PATCH v4 04/15] net/xsc: add xsc dev ops to support VFIO driver WanRenyong
2025-01-03 19:02 ` Stephen Hemminger
2025-01-06 1:53 ` WanRenyong
2025-01-03 19:04 ` Stephen Hemminger
2025-01-06 2:01 ` WanRenyong
2025-01-03 19:06 ` Stephen Hemminger
2025-01-06 2:02 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 05/15] net/xsc: add PCT interfaces WanRenyong
2025-01-03 15:04 ` [PATCH v4 06/15] net/xsc: initialize xsc representors WanRenyong
2025-01-03 15:04 ` [PATCH v4 07/15] net/xsc: add ethdev configure and RSS ops WanRenyong
2025-01-03 19:14 ` Stephen Hemminger
2025-01-06 2:20 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 08/15] net/xsc: add Rx and Tx queue setup WanRenyong
2025-01-03 15:04 ` [PATCH v4 09/15] net/xsc: add ethdev start WanRenyong
2025-01-03 19:17 ` Stephen Hemminger
2025-01-06 3:01 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 10/15] net/xsc: add ethdev stop and close WanRenyong
2025-01-03 15:04 ` [PATCH v4 11/15] net/xsc: add ethdev Rx burst WanRenyong
2025-01-03 15:04 ` WanRenyong [this message]
2025-01-03 15:04 ` [PATCH v4 13/15] net/xsc: add basic stats ops WanRenyong
2025-01-03 15:04 ` [PATCH v4 14/15] net/xsc: add ethdev infos get WanRenyong
2025-01-03 19:22 ` Stephen Hemminger
2025-01-06 4:03 ` WanRenyong
2025-01-03 15:04 ` [PATCH v4 15/15] net/xsc: add ethdev link and MTU ops WanRenyong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250103150429.1529663-13-wanry@yunsilicon.com \
--to=wanry@yunsilicon.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@amd.com \
--cc=jacky@yunsilicon.com \
--cc=nana@yunsilicon.com \
--cc=qianr@yunsilicon.com \
--cc=thomas@monjalon.net \
--cc=weihg@yunsilicon.com \
--cc=xudw@yunsilicon.com \
--cc=zhangxx@yunsilicon.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).