From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id CF13DA04DD; Fri, 20 Nov 2020 07:14:45 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 691B6F90; Fri, 20 Nov 2020 07:14:43 +0100 (CET) Received: from tc-sys-mailedm03.tc.baidu.com (mx139-tc.baidu.com [61.135.168.139]) by dpdk.org (Postfix) with ESMTP id 9A583DED for ; Fri, 20 Nov 2020 07:14:37 +0100 (CET) Received: from localhost (bjhw-sys-rpm015653cc5.bjhw.baidu.com [10.227.53.39]) by tc-sys-mailedm03.tc.baidu.com (Postfix) with ESMTP id C855D4500044 for ; Fri, 20 Nov 2020 14:14:28 +0800 (CST) From: Li RongQing To: dev@dpdk.org Date: Fri, 20 Nov 2020 14:14:28 +0800 Message-Id: <1605852868-25682-1-git-send-email-lirongqing@baidu.com> X-Mailer: git-send-email 1.7.1 Subject: [dpdk-dev] [PATCH][v3] net/af_xdp: optimize RX path by removing the unneeded allocation mbuf X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" when receive packets, the max bunch number of mbuf are allocated if hardware does not receive the max bunch number packets, it will free redundancy mbuf, this is low performance so optimize rx performance, by allocating number of mbuf based on result of xsk_ring_cons__peek, to avoid to redundancy allocation, and free mbuf when receive packets and rx cached_cons must be rollbacked if fail to allocating mbuf, found by Ciara Loftus Signed-off-by: Li RongQing Signed-off-by: Dongsheng Rong --- V2: rollback rx cached_cons if mbuf failed to be allocated V3: add comment when rollback rx cached_cons we should create a function for rollback as suggested by Ciara Loftus, like xsk_ring_cons__cancel, but this function should be in kernel, and I will send it to kernel drivers/net/af_xdp/rte_eth_af_xdp.c | 73 ++++++++++++++--------------- 1 file changed, 35 insertions(+), 38 deletions(-) diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c index 2c7892bd7..69a4d54a3 100644 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -255,28 +255,32 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct xsk_umem_info *umem = rxq->umem; uint32_t idx_rx = 0; unsigned long rx_bytes = 0; - int rcvd, i; + int i; struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE]; - /* allocate bufs for fill queue replenishment after rx */ - if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) { - AF_XDP_LOG(DEBUG, - "Failed to get enough buffers for fq.\n"); - return 0; - } + nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); - rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); - - if (rcvd == 0) { + if (nb_pkts == 0) { #if defined(XDP_USE_NEED_WAKEUP) if (xsk_ring_prod__needs_wakeup(fq)) (void)poll(rxq->fds, 1, 1000); #endif - goto out; + return 0; + } + + /* allocate bufs for fill queue replenishment after rx */ + if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) { + AF_XDP_LOG(DEBUG, + "Failed to get enough buffers for fq.\n"); + /* rollback cached_cons which is added by + * xsk_ring_prod__needs_wakeup + */ + rx->cached_cons -= nb_pkts; + return 0; } - for (i = 0; i < rcvd; i++) { + for (i = 0; i < nb_pkts; i++) { const struct xdp_desc *desc; uint64_t addr; uint32_t len; @@ -301,20 +305,14 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) rx_bytes += len; } - xsk_ring_cons__release(rx, rcvd); - - (void)reserve_fill_queue(umem, rcvd, fq_bufs, fq); + xsk_ring_cons__release(rx, nb_pkts); + (void)reserve_fill_queue(umem, nb_pkts, fq_bufs, fq); /* statistics */ - rxq->stats.rx_pkts += rcvd; + rxq->stats.rx_pkts += nb_pkts; rxq->stats.rx_bytes += rx_bytes; -out: - if (rcvd != nb_pkts) - rte_mempool_put_bulk(umem->mb_pool, (void **)&fq_bufs[rcvd], - nb_pkts - rcvd); - - return rcvd; + return nb_pkts; } #else static uint16_t @@ -326,7 +324,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct xsk_ring_prod *fq = &rxq->fq; uint32_t idx_rx = 0; unsigned long rx_bytes = 0; - int rcvd, i; + int i; uint32_t free_thresh = fq->size >> 1; struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE]; @@ -334,20 +332,24 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, NULL, fq); - if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0)) - return 0; - - rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); - if (rcvd == 0) { + nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); + if (nb_pkts == 0) { #if defined(XDP_USE_NEED_WAKEUP) if (xsk_ring_prod__needs_wakeup(fq)) (void)poll(rxq->fds, 1, 1000); #endif + return 0; + } - goto out; + if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts))) { + /* rollback cached_cons which is added by + * xsk_ring_prod__needs_wakeup + */ + rx->cached_cons -= nb_pkts; + return 0; } - for (i = 0; i < rcvd; i++) { + for (i = 0; i < nb_pkts; i++) { const struct xdp_desc *desc; uint64_t addr; uint32_t len; @@ -366,18 +368,13 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) bufs[i] = mbufs[i]; } - xsk_ring_cons__release(rx, rcvd); + xsk_ring_cons__release(rx, nb_pkts); /* statistics */ - rxq->stats.rx_pkts += rcvd; + rxq->stats.rx_pkts += nb_pkts; rxq->stats.rx_bytes += rx_bytes; -out: - if (rcvd != nb_pkts) - rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd], - nb_pkts - rcvd); - - return rcvd; + return nb_pkts; } #endif -- 2.17.3