From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 26A5CA04B6; Thu, 17 Sep 2020 07:11:13 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E9CF81C1A5; Thu, 17 Sep 2020 07:11:07 +0200 (CEST) Received: from tc-sys-mailedm02.tc.baidu.com (mx58.baidu.com [61.135.168.58]) by dpdk.org (Postfix) with ESMTP id E0ECC1D50B for ; Thu, 17 Sep 2020 07:11:04 +0200 (CEST) Received: from localhost (cp01-cos-dev01.cp01.baidu.com [10.92.119.46]) by tc-sys-mailedm02.tc.baidu.com (Postfix) with ESMTP id 85DEF11C0061 for ; Thu, 17 Sep 2020 13:11:02 +0800 (CST) From: Li RongQing To: dev@dpdk.org Date: Thu, 17 Sep 2020 13:11:02 +0800 Message-Id: <1600319462-2053-2-git-send-email-lirongqing@baidu.com> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1600319462-2053-1-git-send-email-lirongqing@baidu.com> References: <1600319462-2053-1-git-send-email-lirongqing@baidu.com> Subject: [dpdk-dev] [PATCH 2/2] af_xdp: avoid to unnecessary allocation and free mbuf X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" optimize rx performance, by allocating mbuf based on result of xsk_ring_cons__peek, to avoid to redundancy allocation, and free mbuf when receive packets Signed-off-by: Li RongQing Signed-off-by: Dongsheng Rong --- drivers/net/af_xdp/rte_eth_af_xdp.c | 64 ++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 37 deletions(-) diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c index 7ce4ad04a..48824050e 100644 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -229,28 +229,29 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct xsk_umem_info *umem = rxq->umem; uint32_t idx_rx = 0; unsigned long rx_bytes = 0; - int rcvd, i; + int i; struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE]; - /* allocate bufs for fill queue replenishment after rx */ - if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) { - AF_XDP_LOG(DEBUG, - "Failed to get enough buffers for fq.\n"); - return 0; - } - rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); + nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); - if (rcvd == 0) { + if (nb_pkts == 0) { #if defined(XDP_USE_NEED_WAKEUP) if (xsk_ring_prod__needs_wakeup(&umem->fq)) (void)poll(rxq->fds, 1, 1000); #endif - goto out; + return 0; } - for (i = 0; i < rcvd; i++) { + /* allocate bufs for fill queue replenishment after rx */ + if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) { + AF_XDP_LOG(DEBUG, + "Failed to get enough buffers for fq.\n"); + return 0; + } + + for (i = 0; i < nb_pkts; i++) { const struct xdp_desc *desc; uint64_t addr; uint32_t len; @@ -275,20 +276,15 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) rx_bytes += len; } - xsk_ring_cons__release(rx, rcvd); + xsk_ring_cons__release(rx, nb_pkts); - (void)reserve_fill_queue(umem, rcvd, fq_bufs); + (void)reserve_fill_queue(umem, nb_pkts, fq_bufs); /* statistics */ - rxq->stats.rx_pkts += rcvd; + rxq->stats.rx_pkts += nb_pkts; rxq->stats.rx_bytes += rx_bytes; -out: - if (rcvd != nb_pkts) - rte_mempool_put_bulk(umem->mb_pool, (void **)&fq_bufs[rcvd], - nb_pkts - rcvd); - - return rcvd; + return nb_pkts; } #else static uint16_t @@ -300,27 +296,26 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct xsk_ring_prod *fq = &umem->fq; uint32_t idx_rx = 0; unsigned long rx_bytes = 0; - int rcvd, i; + int i; uint32_t free_thresh = fq->size >> 1; struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE]; - if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0)) - return 0; - - rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); - if (rcvd == 0) { + nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); + if (nb_pkts == 0) { #if defined(XDP_USE_NEED_WAKEUP) if (xsk_ring_prod__needs_wakeup(fq)) (void)poll(rxq->fds, 1, 1000); #endif - - goto out; + return 0; } + if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0)) + return 0; + if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh) (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, NULL); - for (i = 0; i < rcvd; i++) { + for (i = 0; i < nb_pkts; i++) { const struct xdp_desc *desc; uint64_t addr; uint32_t len; @@ -339,18 +334,13 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) bufs[i] = mbufs[i]; } - xsk_ring_cons__release(rx, rcvd); + xsk_ring_cons__release(rx, nb_pkts); /* statistics */ - rxq->stats.rx_pkts += rcvd; + rxq->stats.rx_pkts += nb_pkts; rxq->stats.rx_bytes += rx_bytes; -out: - if (rcvd != nb_pkts) - rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd], - nb_pkts - rcvd); - - return rcvd; + return nb_pkts; } #endif -- 2.16.2