From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id AB2D045FD7; Fri, 3 Jan 2025 16:05:33 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6D72D4067C; Fri, 3 Jan 2025 16:04:38 +0100 (CET) Received: from lf-2-53.ptr.blmpb.com (lf-2-53.ptr.blmpb.com [101.36.218.53]) by mails.dpdk.org (Postfix) with ESMTP id AAC8640662 for ; Fri, 3 Jan 2025 16:04:35 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; s=feishu2403070942; d=yunsilicon.com; t=1735916671; h=from:subject: mime-version:from:date:message-id:subject:to:cc:reply-to:content-type: mime-version:in-reply-to:message-id; bh=uepgFESmtaQjSgXZqyvP9e727+0YJw2ObbwBXUt/HhA=; b=PNGQ5ljE7lcXmpku8NbH5BjuahxLyL2udamXNMyZNDpIeaGzppcSXksVMfO8ZRggAKSSHL H8Qyn6KExaJ30ZvHBiaMIfGRBnvjoF+6VZpdyfSMDn1gxDWOztTUt4KOpeMTwN6XEC5ELV 3Rfn4OdF7Vo7qiZT6tHyKRPxFyz1xrdgHezlE10Kte4VjQEPDAZplu7RHly2sb6PpjzkcV F/TrbozPsMSTpbmMGNw0Nm5GBByCM1/a7yhEo70n9BmyFCEyHp48pzndg3pEbdmBOSUVWU zBGKIOaBPjjRX+DyVzKjOiILPQu/5qE4qeunL0Apz4CH/OFLYmQe5L+h8fnPJQ== From: "WanRenyong" Received: from ubuntu-liun.yunsilicon.com ([58.34.192.114]) by smtp.feishu.cn with ESMTPS; Fri, 03 Jan 2025 23:04:28 +0800 In-Reply-To: <20250103150404.1529663-1-wanry@yunsilicon.com> To: Subject: [PATCH v4 11/15] net/xsc: add ethdev Rx burst X-Lms-Return-Path: X-Original-From: WanRenyong X-Mailer: git-send-email 2.25.1 Content-Transfer-Encoding: 7bit Cc: , , , , , , , , , Date: Fri, 03 Jan 2025 23:04:28 +0800 References: <20250103150404.1529663-1-wanry@yunsilicon.com> Content-Type: text/plain; charset=UTF-8 Message-Id: <20250103150427.1529663-12-wanry@yunsilicon.com> Mime-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Implement xsc ethdev Rx burst function. Signed-off-by: WanRenyong Signed-off-by: Xiaoxiong Zhang --- drivers/net/xsc/xsc_ethdev.c | 2 + drivers/net/xsc/xsc_rx.c | 174 +++++++++++++++++++++++++++++++++++ drivers/net/xsc/xsc_rx.h | 1 + drivers/net/xsc/xsc_rxtx.h | 13 +++ 4 files changed, 190 insertions(+) diff --git a/drivers/net/xsc/xsc_ethdev.c b/drivers/net/xsc/xsc_ethdev.c index c5ee079d4a..00bd617c3e 100644 --- a/drivers/net/xsc/xsc_ethdev.c +++ b/drivers/net/xsc/xsc_ethdev.c @@ -336,6 +336,8 @@ xsc_ethdev_start(struct rte_eth_dev *dev) dev->data->dev_started = 1; rte_wmb(); + dev->rx_pkt_burst = xsc_rx_burst; + ret = xsc_ethdev_enable(dev); return 0; diff --git a/drivers/net/xsc/xsc_rx.c b/drivers/net/xsc/xsc_rx.c index 2081f3b619..58a9cc2f26 100644 --- a/drivers/net/xsc/xsc_rx.c +++ b/drivers/net/xsc/xsc_rx.c @@ -13,6 +13,180 @@ #define XSC_MAX_RECV_LEN 9800 +static inline void +xsc_cq_to_mbuf(struct xsc_rxq_data *rxq, struct rte_mbuf *pkt, + volatile struct xsc_cqe *cqe) +{ + uint32_t rss_hash_res = 0; + + pkt->port = rxq->port_id; + if (rxq->rss_hash) { + rss_hash_res = rte_be_to_cpu_32(cqe->vni); + if (rss_hash_res) { + pkt->hash.rss = rss_hash_res; + pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; + } + } +} + +static inline int +xsc_rx_poll_len(struct xsc_rxq_data *rxq, volatile struct xsc_cqe *cqe) +{ + int len; + + do { + len = 0; + int ret; + + ret = xsc_check_cqe_own(cqe, rxq->cqe_n, rxq->cq_ci); + if (unlikely(ret != XSC_CQE_OWNER_SW)) { + if (unlikely(ret == XSC_CQE_OWNER_ERR)) { + ++rxq->stats.rx_errors; + if (ret == XSC_CQE_OWNER_HW || ret == -1) + return 0; + } else { + return 0; + } + } + + rxq->cq_ci += 1; + len = rte_le_to_cpu_32(cqe->msg_len); + return len; + } while (1); +} + +static __rte_always_inline void +xsc_pkt_info_sync(struct rte_mbuf *rep, struct rte_mbuf *seg) +{ + if (rep != NULL && seg != NULL) { + rep->data_len = seg->data_len; + rep->pkt_len = seg->pkt_len; + rep->data_off = seg->data_off; + rep->port = seg->port; + } +} + +uint16_t +xsc_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + struct xsc_rxq_data *rxq = dpdk_rxq; + const uint32_t wqe_m = rxq->wqe_m; + const uint32_t cqe_m = rxq->cqe_m; + const uint32_t sge_n = rxq->sge_n; + struct rte_mbuf *pkt = NULL; + struct rte_mbuf *seg = NULL; + volatile struct xsc_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_m]; + uint32_t nb_pkts = 0; + uint64_t nb_bytes = 0; + uint32_t rq_ci = rxq->rq_ci; + int len = 0; + uint32_t cq_ci_two = 0; + int valid_cqe_num = 0; + int cqe_msg_len = 0; + volatile struct xsc_cqe_u64 *cqe_u64 = NULL; + struct rte_mbuf *rep; + + while (pkts_n) { + uint32_t idx = rq_ci & wqe_m; + volatile struct xsc_wqe_data_seg *wqe = + &((volatile struct xsc_wqe_data_seg *)rxq->wqes)[idx << sge_n]; + + seg = (*rxq->elts)[idx]; + rte_prefetch0(cqe); + rte_prefetch0(wqe); + + rep = rte_mbuf_raw_alloc(seg->pool); + if (unlikely(rep == NULL)) { + ++rxq->stats.rx_nombuf; + break; + } + + if (!pkt) { + if (valid_cqe_num) { + cqe = cqe + 1; + len = cqe_msg_len; + valid_cqe_num = 0; + } else if ((rxq->cq_ci % 2 == 0) && (pkts_n > 1)) { + cq_ci_two = (rxq->cq_ci & rxq->cqe_m) / 2; + cqe_u64 = &(*rxq->cqes_u64)[cq_ci_two]; + cqe = (volatile struct xsc_cqe *)cqe_u64; + len = xsc_rx_poll_len(rxq, cqe); + if (len > 0) { + cqe_msg_len = xsc_rx_poll_len(rxq, cqe + 1); + if (cqe_msg_len > 0) + valid_cqe_num = 1; + } + } else { + cqe = &(*rxq->cqes)[rxq->cq_ci & rxq->cqe_m]; + len = xsc_rx_poll_len(rxq, cqe); + } + + if (!len) { + rte_mbuf_raw_free(rep); + break; + } + + if (len > rte_pktmbuf_data_len(seg)) { + rte_mbuf_raw_free(rep); + pkt = NULL; + ++rq_ci; + continue; + } + + pkt = seg; + pkt->ol_flags &= RTE_MBUF_F_EXTERNAL; + xsc_cq_to_mbuf(rxq, pkt, cqe); + + if (rxq->crc_present) + len -= RTE_ETHER_CRC_LEN; + rte_pktmbuf_pkt_len(pkt) = len; + } + + xsc_pkt_info_sync(rep, seg); + (*rxq->elts)[idx] = rep; + + /* Fill wqe */ + wqe->va = rte_cpu_to_le_64(rte_pktmbuf_iova(rep)); + rte_pktmbuf_data_len(seg) = len; + nb_bytes += rte_pktmbuf_pkt_len(pkt); + + *(pkts++) = pkt; + pkt = NULL; + --pkts_n; + ++nb_pkts; + ++rq_ci; + } + + if (unlikely(nb_pkts == 0 && rq_ci == rxq->rq_ci)) + return 0; + + rxq->rq_ci = rq_ci; + rxq->nb_rx_hold += nb_pkts; + + if (rxq->nb_rx_hold >= rxq->rx_free_thresh) { + union xsc_cq_doorbell cq_db = { + .cq_data = 0 + }; + cq_db.next_cid = rxq->cq_ci; + cq_db.cq_num = rxq->cqn; + + union xsc_recv_doorbell rq_db = { + .recv_data = 0 + }; + rq_db.next_pid = (rxq->rq_ci << sge_n); + rq_db.qp_num = rxq->qpn; + + rte_write32(rte_cpu_to_le_32(cq_db.cq_data), rxq->cq_db); + rte_write32(rte_cpu_to_le_32(rq_db.recv_data), rxq->rq_db); + rxq->nb_rx_hold = 0; + } + + rxq->stats.rx_pkts += nb_pkts; + rxq->stats.rx_bytes += nb_bytes; + + return nb_pkts; +} + static void xsc_rxq_initialize(struct xsc_dev *xdev, struct xsc_rxq_data *rxq_data) { diff --git a/drivers/net/xsc/xsc_rx.h b/drivers/net/xsc/xsc_rx.h index e24b1a8829..90fbb89197 100644 --- a/drivers/net/xsc/xsc_rx.h +++ b/drivers/net/xsc/xsc_rx.h @@ -56,6 +56,7 @@ struct __rte_cache_aligned xsc_rxq_data { uint16_t rsv1:11; }; +uint16_t xsc_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); int xsc_rxq_elts_alloc(struct xsc_rxq_data *rxq_data); int xsc_rxq_rss_obj_new(struct xsc_ethdev_priv *priv, uint16_t port_id); void xsc_rxq_rss_obj_release(struct xsc_dev *xdev, struct xsc_rxq_data *rxq_data); diff --git a/drivers/net/xsc/xsc_rxtx.h b/drivers/net/xsc/xsc_rxtx.h index 2771efafc6..fa068f8b29 100644 --- a/drivers/net/xsc/xsc_rxtx.h +++ b/drivers/net/xsc/xsc_rxtx.h @@ -175,4 +175,17 @@ struct xsc_rx_cq_info { uint16_t cqe_n; }; +static __rte_always_inline int +xsc_check_cqe_own(volatile struct xsc_cqe *cqe, const uint16_t cqe_n, const uint16_t ci) +{ + if (unlikely(((cqe->owner & XSC_CQE_OWNER_MASK) != ((ci >> cqe_n) & XSC_CQE_OWNER_MASK)))) + return XSC_CQE_OWNER_HW; + + rte_io_rmb(); + if (cqe->msg_len <= 0 && cqe->is_error) + return XSC_CQE_OWNER_ERR; + + return XSC_CQE_OWNER_SW; +} + #endif /* _XSC_RXTX_H_ */ -- 2.25.1