From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 43EC043A04; Tue, 30 Jan 2024 02:13:10 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C1DE8402C2; Tue, 30 Jan 2024 02:13:09 +0100 (CET) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id D28FE4029A for ; Tue, 30 Jan 2024 02:13:07 +0100 (CET) Received: by linux.microsoft.com (Postfix, from userid 1004) id D13EF203B401; Mon, 29 Jan 2024 17:13:06 -0800 (PST) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com D13EF203B401 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linuxonhyperv.com; s=default; t=1706577186; bh=JT7zGlYgsVpQ/k/QcZEo5+tte/23SjloEimYEdSfScM=; h=From:To:Cc:Subject:Date:In-Reply-To:References:Reply-To:From; b=igcUrcha6YS2GdrdbcJPK8teMEDw7ESuOoIxXiyYllSKo8XVGwejd2WoDQl64f+1o u7cZYt5z8ejsqZpXYgXQkWMK19KQpuoo3TmzeOTRBgPxYZVGU5UI6yx7B09PdNI2R/ tS7/wqHSuJYkpj38QZYnYrJq/8GuUeEUHjbVstmo= From: longli@linuxonhyperv.com To: Ferruh Yigit , Andrew Rybchenko Cc: dev@dpdk.org, Long Li Subject: [Patch v2] net/mana: use rte_pktmbuf_alloc_bulk for allocating RX WQEs Date: Mon, 29 Jan 2024 17:13:01 -0800 Message-Id: <1706577181-27842-1-git-send-email-longli@linuxonhyperv.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1706150562-23248-1-git-send-email-longli@linuxonhyperv.com> References: <1706150562-23248-1-git-send-email-longli@linuxonhyperv.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: longli@microsoft.com Errors-To: dev-bounces@dpdk.org From: Long Li Instead of allocating mbufs one by one during RX, use rte_pktmbuf_alloc_bulk() to allocate them in a batch. Signed-off-by: Long Li --- Change in v2: use rte_calloc_socket() in place of rte_calloc() drivers/net/mana/rx.c | 68 ++++++++++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 24 deletions(-) diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c index acad5e26cd..b011bf3ea1 100644 --- a/drivers/net/mana/rx.c +++ b/drivers/net/mana/rx.c @@ -2,6 +2,7 @@ * Copyright 2022 Microsoft Corporation */ #include +#include #include #include @@ -59,9 +60,8 @@ mana_rq_ring_doorbell(struct mana_rxq *rxq) } static int -mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) +mana_post_rx_wqe(struct mana_rxq *rxq, struct rte_mbuf *mbuf) { - struct rte_mbuf *mbuf = NULL; struct gdma_sgl_element sgl[1]; struct gdma_work_request request; uint32_t wqe_size_in_bu; @@ -69,12 +69,6 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) int ret; struct mana_mr_cache *mr; - mbuf = rte_pktmbuf_alloc(rxq->mp); - if (!mbuf) { - rxq->stats.nombuf++; - return -ENOMEM; - } - mr = mana_alloc_pmd_mr(&rxq->mr_btree, priv, mbuf); if (!mr) { DP_LOG(ERR, "failed to register RX MR"); @@ -121,19 +115,32 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) * Post work requests for a Rx queue. */ static int -mana_alloc_and_post_rx_wqes(struct mana_rxq *rxq) +mana_alloc_and_post_rx_wqes(struct mana_rxq *rxq, uint32_t count) { int ret; uint32_t i; + struct rte_mbuf **mbufs; + + mbufs = rte_calloc_socket("mana_rx_mbufs", count, sizeof(struct rte_mbuf *), + 0, rxq->mp->socket_id); + if (!mbufs) + return -ENOMEM; + + ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, count); + if (ret) { + DP_LOG(ERR, "failed to allocate mbufs for RX"); + rxq->stats.nombuf += count; + goto fail; + } #ifdef RTE_ARCH_32 rxq->wqe_cnt_to_short_db = 0; #endif - for (i = 0; i < rxq->num_desc; i++) { - ret = mana_alloc_and_post_rx_wqe(rxq); + for (i = 0; i < count; i++) { + ret = mana_post_rx_wqe(rxq, mbufs[i]); if (ret) { DP_LOG(ERR, "failed to post RX ret = %d", ret); - return ret; + goto fail; } #ifdef RTE_ARCH_32 @@ -146,6 +153,8 @@ mana_alloc_and_post_rx_wqes(struct mana_rxq *rxq) mana_rq_ring_doorbell(rxq); +fail: + rte_free(mbufs); return ret; } @@ -404,7 +413,9 @@ mana_start_rx_queues(struct rte_eth_dev *dev) } for (i = 0; i < priv->num_queues; i++) { - ret = mana_alloc_and_post_rx_wqes(dev->data->rx_queues[i]); + struct mana_rxq *rxq = dev->data->rx_queues[i]; + + ret = mana_alloc_and_post_rx_wqes(rxq, rxq->num_desc); if (ret) goto fail; } @@ -423,7 +434,7 @@ uint16_t mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { uint16_t pkt_received = 0; - uint16_t wqe_posted = 0; + uint16_t wqe_consumed = 0; struct mana_rxq *rxq = dpdk_rxq; struct mana_priv *priv = rxq->priv; struct rte_mbuf *mbuf; @@ -535,18 +546,23 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) rxq->gdma_rq.tail += desc->wqe_size_in_bu; - /* Consume this request and post another request */ - ret = mana_alloc_and_post_rx_wqe(rxq); - if (ret) { - DP_LOG(ERR, "failed to post rx wqe ret=%d", ret); - break; - } - - wqe_posted++; + /* Record the number of the RX WQE we need to post to replenish + * consumed RX requests + */ + wqe_consumed++; if (pkt_received == pkts_n) break; #ifdef RTE_ARCH_32 + /* Always post WQE as soon as it's consumed for short DB */ + ret = mana_alloc_and_post_rx_wqes(rxq, wqe_consumed); + if (ret) { + DRV_LOG(ERR, "failed to post %d WQEs, ret %d", + wqe_consumed, ret); + return pkt_received; + } + wqe_consumed = 0; + /* Ring short doorbell if approaching the wqe increment * limit. */ @@ -569,8 +585,12 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) goto repoll; } - if (wqe_posted) - mana_rq_ring_doorbell(rxq); + if (wqe_consumed) { + ret = mana_alloc_and_post_rx_wqes(rxq, wqe_consumed); + if (ret) + DRV_LOG(ERR, "failed to post %d WQEs, ret %d", + wqe_consumed, ret); + } return pkt_received; } -- 2.17.1