From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 679C5A04C8; Fri, 18 Sep 2020 11:38:58 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id DF9C31D96F; Fri, 18 Sep 2020 11:38:57 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id B302C1D96D for ; Fri, 18 Sep 2020 11:38:55 +0200 (CEST) IronPort-SDR: o5qhSMUnxfSMtKxKdj2A/Imvvg7lkJBrzLbwrTECcsC5KK1UmeBW6FWy7Q/caUe1xeJI8MKOgx UNS54aPhYcLA== X-IronPort-AV: E=McAfee;i="6000,8403,9747"; a="177992531" X-IronPort-AV: E=Sophos;i="5.77,274,1596524400"; d="scan'208";a="177992531" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Sep 2020 02:38:54 -0700 IronPort-SDR: awZQ9ibT+/lEFgqPHyMw18ewewFEUVG3EvH68NWdN1KLfzVVz1uYiy5FD17eQg6LT4z2nqTZ9u AxMarcnjzuSg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,274,1596524400"; d="scan'208";a="344691968" Received: from irsmsx606.ger.corp.intel.com ([163.33.146.139]) by FMSMGA003.fm.intel.com with ESMTP; 18 Sep 2020 02:38:52 -0700 Received: from irsmsx604.ger.corp.intel.com (163.33.146.137) by IRSMSX606.ger.corp.intel.com (163.33.146.139) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.1713.5; Fri, 18 Sep 2020 10:38:32 +0100 Received: from irsmsx604.ger.corp.intel.com ([163.33.146.137]) by IRSMSX604.ger.corp.intel.com ([163.33.146.137]) with mapi id 15.01.1713.004; Fri, 18 Sep 2020 10:38:32 +0100 From: "Loftus, Ciara" To: Li RongQing CC: "dev@dpdk.org" Thread-Topic: [dpdk-dev] [PATCH 2/2] af_xdp: avoid to unnecessary allocation and free mbuf Thread-Index: AQHWjLEEV4v8TBv8l0q6KKHcoHZI/aluJHQQ Date: Fri, 18 Sep 2020 09:38:32 +0000 Message-ID: References: <1600319462-2053-1-git-send-email-lirongqing@baidu.com> <1600319462-2053-2-git-send-email-lirongqing@baidu.com> In-Reply-To: <1600319462-2053-2-git-send-email-lirongqing@baidu.com> Accept-Language: en-GB, en-US Content-Language: en-US X-MS-Has-Attach: X-MS-TNEF-Correlator: dlp-product: dlpe-windows dlp-reaction: no-action dlp-version: 11.5.1.3 x-originating-ip: [163.33.253.164] Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: quoted-printable MIME-Version: 1.0 Subject: Re: [dpdk-dev] [PATCH 2/2] af_xdp: avoid to unnecessary allocation and free mbuf X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" >=20 > optimize rx performance, by allocating mbuf based on result > of xsk_ring_cons__peek, to avoid to redundancy allocation, > and free mbuf when receive packets >=20 > Signed-off-by: Li RongQing > Signed-off-by: Dongsheng Rong > --- > drivers/net/af_xdp/rte_eth_af_xdp.c | 64 ++++++++++++++++--------------- > ------ > 1 file changed, 27 insertions(+), 37 deletions(-) >=20 > diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c > b/drivers/net/af_xdp/rte_eth_af_xdp.c > index 7ce4ad04a..48824050e 100644 > --- a/drivers/net/af_xdp/rte_eth_af_xdp.c > +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c > @@ -229,28 +229,29 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf > **bufs, uint16_t nb_pkts) > struct xsk_umem_info *umem =3D rxq->umem; > uint32_t idx_rx =3D 0; > unsigned long rx_bytes =3D 0; > - int rcvd, i; > + int i; > struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE]; >=20 > - /* allocate bufs for fill queue replenishment after rx */ > - if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) { > - AF_XDP_LOG(DEBUG, > - "Failed to get enough buffers for fq.\n"); > - return 0; > - } >=20 > - rcvd =3D xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); > + nb_pkts =3D xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); >=20 > - if (rcvd =3D=3D 0) { > + if (nb_pkts =3D=3D 0) { > #if defined(XDP_USE_NEED_WAKEUP) > if (xsk_ring_prod__needs_wakeup(&umem->fq)) > (void)poll(rxq->fds, 1, 1000); > #endif >=20 > - goto out; > + return 0; > } >=20 > - for (i =3D 0; i < rcvd; i++) { > + /* allocate bufs for fill queue replenishment after rx */ > + if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) { > + AF_XDP_LOG(DEBUG, > + "Failed to get enough buffers for fq.\n"); Thanks for this patch. I've considered this in the past. There is a problem if we hit this condition. We advance the rx producer @ xsk_ring_cons__peek. But if we have no mbufs to hold the rx data, it is lost. That's why we allocate the mbufs up front now. Agree that we might have wasteful allocations and it's not the most optimal= , but we don't drop packets due to failed mbuf allocs. > + return 0; > + } > + > + for (i =3D 0; i < nb_pkts; i++) { > const struct xdp_desc *desc; > uint64_t addr; > uint32_t len; > @@ -275,20 +276,15 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf > **bufs, uint16_t nb_pkts) > rx_bytes +=3D len; > } >=20 > - xsk_ring_cons__release(rx, rcvd); > + xsk_ring_cons__release(rx, nb_pkts); >=20 > - (void)reserve_fill_queue(umem, rcvd, fq_bufs); > + (void)reserve_fill_queue(umem, nb_pkts, fq_bufs); >=20 > /* statistics */ > - rxq->stats.rx_pkts +=3D rcvd; > + rxq->stats.rx_pkts +=3D nb_pkts; > rxq->stats.rx_bytes +=3D rx_bytes; >=20 > -out: > - if (rcvd !=3D nb_pkts) > - rte_mempool_put_bulk(umem->mb_pool, (void > **)&fq_bufs[rcvd], > - nb_pkts - rcvd); > - > - return rcvd; > + return nb_pkts; > } > #else > static uint16_t > @@ -300,27 +296,26 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf > **bufs, uint16_t nb_pkts) > struct xsk_ring_prod *fq =3D &umem->fq; > uint32_t idx_rx =3D 0; > unsigned long rx_bytes =3D 0; > - int rcvd, i; > + int i; > uint32_t free_thresh =3D fq->size >> 1; > struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE]; >=20 > - if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) > !=3D 0)) > - return 0; > - > - rcvd =3D xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); > - if (rcvd =3D=3D 0) { > + nb_pkts =3D xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); > + if (nb_pkts =3D=3D 0) { > #if defined(XDP_USE_NEED_WAKEUP) > if (xsk_ring_prod__needs_wakeup(fq)) > (void)poll(rxq->fds, 1, 1000); > #endif > - > - goto out; > + return 0; > } >=20 > + if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) > !=3D 0)) > + return 0; > + > if (xsk_prod_nb_free(fq, free_thresh) >=3D free_thresh) > (void)reserve_fill_queue(umem, > ETH_AF_XDP_RX_BATCH_SIZE, NULL); >=20 > - for (i =3D 0; i < rcvd; i++) { > + for (i =3D 0; i < nb_pkts; i++) { > const struct xdp_desc *desc; > uint64_t addr; > uint32_t len; > @@ -339,18 +334,13 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf > **bufs, uint16_t nb_pkts) > bufs[i] =3D mbufs[i]; > } >=20 > - xsk_ring_cons__release(rx, rcvd); > + xsk_ring_cons__release(rx, nb_pkts); >=20 > /* statistics */ > - rxq->stats.rx_pkts +=3D rcvd; > + rxq->stats.rx_pkts +=3D nb_pkts; > rxq->stats.rx_bytes +=3D rx_bytes; >=20 > -out: > - if (rcvd !=3D nb_pkts) > - rte_mempool_put_bulk(rxq->mb_pool, (void > **)&mbufs[rcvd], > - nb_pkts - rcvd); > - > - return rcvd; > + return nb_pkts; > } > #endif >=20 > -- > 2.16.2