From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by dpdk.org (Postfix) with ESMTP id 6AF231B600 for ; Fri, 22 Mar 2019 15:51:07 +0100 (CET) Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com [10.5.11.13]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 75CDC308FBA6; Fri, 22 Mar 2019 14:51:06 +0000 (UTC) Received: from [10.36.112.59] (ovpn-112-59.ams2.redhat.com [10.36.112.59]) by smtp.corp.redhat.com (Postfix) with ESMTPS id DB92F60857; Fri, 22 Mar 2019 14:51:04 +0000 (UTC) To: Xiaolong Ye , dev@dpdk.org Cc: Qi Zhang , Karlsson Magnus , Topel Bjorn References: <20190301080947.91086-1-xiaolong.ye@intel.com> <20190322130129.109964-1-xiaolong.ye@intel.com> <20190322130129.109964-5-xiaolong.ye@intel.com> From: Maxime Coquelin Message-ID: <0dd90e3f-8412-4508-8056-35ad0a3e5e8d@redhat.com> Date: Fri, 22 Mar 2019 15:51:03 +0100 User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Thunderbird/60.5.1 MIME-Version: 1.0 In-Reply-To: <20190322130129.109964-5-xiaolong.ye@intel.com> Content-Type: text/plain; charset=utf-8; format=flowed Content-Language: en-US Content-Transfer-Encoding: 7bit X-Scanned-By: MIMEDefang 2.79 on 10.5.11.13 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.43]); Fri, 22 Mar 2019 14:51:06 +0000 (UTC) Subject: Re: [dpdk-dev] [PATCH v4 4/5] net/af_xdp: use mbuf mempool for buffer management X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 22 Mar 2019 14:51:07 -0000 On 3/22/19 2:01 PM, Xiaolong Ye wrote: > Now, af_xdp registered memory buffer is managed by rte_mempool. mbuf be s/mbuf be allocated/mbuf allocated/ > allocated from rte_mempool can be convert to xdp_desc's address and vice s/convert/converted/ > versa. > > Signed-off-by: Xiaolong Ye > --- > drivers/net/af_xdp/rte_eth_af_xdp.c | 117 +++++++++++++++++----------- > 1 file changed, 72 insertions(+), 45 deletions(-) > > diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c > index 9f0012347..6b1bc462a 100644 > --- a/drivers/net/af_xdp/rte_eth_af_xdp.c > +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c > @@ -48,7 +48,11 @@ static int af_xdp_logtype; > > #define ETH_AF_XDP_FRAME_SIZE XSK_UMEM__DEFAULT_FRAME_SIZE > #define ETH_AF_XDP_NUM_BUFFERS 4096 > -#define ETH_AF_XDP_DATA_HEADROOM 0 > +/* mempool hdrobj size (64 bytes) + sizeof(struct rte_mbuf) (128 bytes) */ > +#define ETH_AF_XDP_MBUF_OVERHEAD 192 > +/* data start from offset 320 (192 + 128) bytes */ > +#define ETH_AF_XDP_DATA_HEADROOM \ > + (ETH_AF_XDP_MBUF_OVERHEAD + RTE_PKTMBUF_HEADROOM) > #define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS > #define ETH_AF_XDP_DFLT_QUEUE_IDX 0 > > @@ -61,7 +65,7 @@ struct xsk_umem_info { > struct xsk_ring_prod fq; > struct xsk_ring_cons cq; > struct xsk_umem *umem; > - struct rte_ring *buf_ring; > + struct rte_mempool *mb_pool; > void *buffer; > }; > > @@ -123,12 +127,32 @@ static struct rte_eth_link pmd_link = { > .link_autoneg = ETH_LINK_AUTONEG > }; > > +static inline struct rte_mbuf * > +addr_to_mbuf(struct xsk_umem_info *umem, uint64_t addr) > +{ > + uint64_t offset = (addr / ETH_AF_XDP_FRAME_SIZE * > + ETH_AF_XDP_FRAME_SIZE); > + struct rte_mbuf *mbuf = (struct rte_mbuf *)((uint64_t)umem->buffer + > + offset + ETH_AF_XDP_MBUF_OVERHEAD - > + sizeof(struct rte_mbuf)); > + mbuf->data_off = addr - offset - ETH_AF_XDP_MBUF_OVERHEAD; > + return mbuf; > +} > + > +static inline uint64_t > +mbuf_to_addr(struct xsk_umem_info *umem, struct rte_mbuf *mbuf) > +{ > + return (uint64_t)mbuf->buf_addr + mbuf->data_off - > + (uint64_t)umem->buffer; > +} > + > static inline int > reserve_fill_queue(struct xsk_umem_info *umem, int reserve_size) > { > struct xsk_ring_prod *fq = &umem->fq; > + struct rte_mbuf *mbuf; > uint32_t idx; > - void *addr = NULL; > + uint64_t addr; > int i, ret; > > ret = xsk_ring_prod__reserve(fq, reserve_size, &idx); > @@ -139,12 +163,14 @@ reserve_fill_queue(struct xsk_umem_info *umem, int reserve_size) > > for (i = 0; i < reserve_size; i++) { > __u64 *fq_addr; > - if (rte_ring_dequeue(umem->buf_ring, &addr)) { > + mbuf = rte_pktmbuf_alloc(umem->mb_pool); > + if (mbuf == NULL) { unlikely() > i--; > break; > } > + addr = mbuf_to_addr(umem, mbuf); > fq_addr = xsk_ring_prod__fill_addr(fq, idx++); > - *fq_addr = (uint64_t)addr; > + *fq_addr = addr; > } > > xsk_ring_prod__submit(fq, i); > @@ -196,7 +222,7 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) > rx_bytes += len; > bufs[count++] = mbufs[i]; > > - rte_ring_enqueue(umem->buf_ring, (void *)addr); > + rte_pktmbuf_free(addr_to_mbuf(umem, addr)); > } > > xsk_ring_cons__release(rx, rcvd); > @@ -219,7 +245,7 @@ static void pull_umem_cq(struct xsk_umem_info *umem, int size) > for (i = 0; i < n; i++) { > uint64_t addr; > addr = *xsk_ring_cons__comp_addr(cq, idx_cq++); > - rte_ring_enqueue(umem->buf_ring, (void *)addr); > + rte_pktmbuf_free(addr_to_mbuf(umem, addr)); > } > > xsk_ring_cons__release(cq, n); > @@ -248,7 +274,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) > struct pkt_tx_queue *txq = queue; > struct xsk_umem_info *umem = txq->pair->umem; > struct rte_mbuf *mbuf; > - void *addrs[ETH_AF_XDP_TX_BATCH_SIZE]; > + struct rte_mbuf *mbuf_to_tx; > unsigned long tx_bytes = 0; > int i, valid = 0; > uint32_t idx_tx; > @@ -257,11 +283,6 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) > > pull_umem_cq(umem, nb_pkts); > > - nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs, > - nb_pkts, NULL); > - if (nb_pkts == 0) > - return 0; > - > if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) { > kick_tx(txq); > return 0; > @@ -275,7 +296,12 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) > desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i); > mbuf = bufs[i]; > if (mbuf->pkt_len <= buf_len) { > - desc->addr = (uint64_t)addrs[valid]; > + mbuf_to_tx = rte_pktmbuf_alloc(umem->mb_pool); > + if (mbuf_to_tx == NULL) { unlikely() > + rte_pktmbuf_free(mbuf); > + continue; > + } > + desc->addr = mbuf_to_addr(umem, mbuf_to_tx); > desc->len = mbuf->pkt_len; > pkt = xsk_umem__get_data(umem->buffer, > desc->addr); > @@ -291,10 +317,6 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) > > kick_tx(txq); > > - if (valid < nb_pkts) > - rte_ring_enqueue_bulk(umem->buf_ring, &addrs[valid], > - nb_pkts - valid, NULL); > - > txq->stats.err_pkts += nb_pkts - valid; > txq->stats.tx_pkts += valid; > txq->stats.tx_bytes += tx_bytes; > @@ -443,16 +465,29 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused, > > static void xdp_umem_destroy(struct xsk_umem_info *umem) > { > - free(umem->buffer); > - umem->buffer = NULL; > - > - rte_ring_free(umem->buf_ring); > - umem->buf_ring = NULL; > + rte_mempool_free(umem->mb_pool); > + umem->mb_pool = NULL; > > rte_free(umem); > umem = NULL; > } > > +static inline uint64_t get_base_addr(struct rte_mempool *mp) > +{ > + struct rte_mempool_memhdr *memhdr; > + > + memhdr = STAILQ_FIRST(&mp->mem_list); > + return (uint64_t)(memhdr->addr); > +} > + > +static inline uint64_t get_len(struct rte_mempool *mp) > +{ > + struct rte_mempool_memhdr *memhdr; > + > + memhdr = STAILQ_FIRST(&mp->mem_list); > + return (uint64_t)(memhdr->len); > +} > + > static struct xsk_umem_info *xdp_umem_configure(void) > { > struct xsk_umem_info *umem; > @@ -461,9 +496,8 @@ static struct xsk_umem_info *xdp_umem_configure(void) > .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS, > .frame_size = ETH_AF_XDP_FRAME_SIZE, > .frame_headroom = ETH_AF_XDP_DATA_HEADROOM }; > - void *bufs = NULL; > + void *base_addr = NULL; > int ret; > - uint64_t i; > > umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id()); > if (umem == NULL) { > @@ -471,26 +505,20 @@ static struct xsk_umem_info *xdp_umem_configure(void) > return NULL; > } > > - umem->buf_ring = rte_ring_create("af_xdp_ring", > - ETH_AF_XDP_NUM_BUFFERS, > - SOCKET_ID_ANY, > - 0x0); > - if (umem->buf_ring == NULL) { > - AF_XDP_LOG(ERR, "Failed to create rte_ring\n"); > + umem->mb_pool = rte_pktmbuf_pool_create_with_flags("af_xdp_mempool", > + ETH_AF_XDP_NUM_BUFFERS, > + 250, 0, > + ETH_AF_XDP_FRAME_SIZE - > + ETH_AF_XDP_MBUF_OVERHEAD, > + MEMPOOL_F_NO_SPREAD | MEMPOOL_F_PAGE_ALIGN, > + SOCKET_ID_ANY); > + if (umem->mb_pool == NULL || umem->mb_pool->nb_mem_chunks != 1) { > + AF_XDP_LOG(ERR, "Failed to create mempool\n"); > goto err; > } > + base_addr = (void *)get_base_addr(umem->mb_pool); > > - for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++) > - rte_ring_enqueue(umem->buf_ring, > - (void *)(i * ETH_AF_XDP_FRAME_SIZE + > - ETH_AF_XDP_DATA_HEADROOM)); > - > - if (posix_memalign(&bufs, getpagesize(), > - ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE)) { > - AF_XDP_LOG(ERR, "Failed to allocate memory pool.\n"); > - goto err; > - } > - ret = xsk_umem__create(&umem->umem, bufs, > + ret = xsk_umem__create(&umem->umem, base_addr, > ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE, > &umem->fq, &umem->cq, > &usr_config); > @@ -499,7 +527,7 @@ static struct xsk_umem_info *xdp_umem_configure(void) > AF_XDP_LOG(ERR, "Failed to create umem"); > goto err; You need to destroy mb_pool if xsk_umem__create() fails. > } > - umem->buffer = bufs; > + umem->buffer = base_addr; > > return umem; > > @@ -912,10 +940,9 @@ rte_pmd_af_xdp_remove(struct rte_vdev_device *dev) > > internals = eth_dev->data->dev_private; > > - rte_ring_free(internals->umem->buf_ring); > - rte_free(internals->umem->buffer); > rte_free(internals->umem); > > + rte_mempool_free(internals->umem->mb_pool); > rte_eth_dev_release_port(eth_dev); > > > From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id 05537A00E6 for ; Fri, 22 Mar 2019 15:51:10 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E0FF01B602; Fri, 22 Mar 2019 15:51:08 +0100 (CET) Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by dpdk.org (Postfix) with ESMTP id 6AF231B600 for ; Fri, 22 Mar 2019 15:51:07 +0100 (CET) Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com [10.5.11.13]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 75CDC308FBA6; Fri, 22 Mar 2019 14:51:06 +0000 (UTC) Received: from [10.36.112.59] (ovpn-112-59.ams2.redhat.com [10.36.112.59]) by smtp.corp.redhat.com (Postfix) with ESMTPS id DB92F60857; Fri, 22 Mar 2019 14:51:04 +0000 (UTC) To: Xiaolong Ye , dev@dpdk.org Cc: Qi Zhang , Karlsson Magnus , Topel Bjorn References: <20190301080947.91086-1-xiaolong.ye@intel.com> <20190322130129.109964-1-xiaolong.ye@intel.com> <20190322130129.109964-5-xiaolong.ye@intel.com> From: Maxime Coquelin Message-ID: <0dd90e3f-8412-4508-8056-35ad0a3e5e8d@redhat.com> Date: Fri, 22 Mar 2019 15:51:03 +0100 User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Thunderbird/60.5.1 MIME-Version: 1.0 In-Reply-To: <20190322130129.109964-5-xiaolong.ye@intel.com> Content-Type: text/plain; charset="UTF-8"; format="flowed" Content-Language: en-US Content-Transfer-Encoding: 7bit X-Scanned-By: MIMEDefang 2.79 on 10.5.11.13 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.43]); Fri, 22 Mar 2019 14:51:06 +0000 (UTC) Subject: Re: [dpdk-dev] [PATCH v4 4/5] net/af_xdp: use mbuf mempool for buffer management X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Message-ID: <20190322145103.gQjiKFsg5dxnGuuH39dQZqEQTYi1X8melWq2G2oegwg@z> On 3/22/19 2:01 PM, Xiaolong Ye wrote: > Now, af_xdp registered memory buffer is managed by rte_mempool. mbuf be s/mbuf be allocated/mbuf allocated/ > allocated from rte_mempool can be convert to xdp_desc's address and vice s/convert/converted/ > versa. > > Signed-off-by: Xiaolong Ye > --- > drivers/net/af_xdp/rte_eth_af_xdp.c | 117 +++++++++++++++++----------- > 1 file changed, 72 insertions(+), 45 deletions(-) > > diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c > index 9f0012347..6b1bc462a 100644 > --- a/drivers/net/af_xdp/rte_eth_af_xdp.c > +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c > @@ -48,7 +48,11 @@ static int af_xdp_logtype; > > #define ETH_AF_XDP_FRAME_SIZE XSK_UMEM__DEFAULT_FRAME_SIZE > #define ETH_AF_XDP_NUM_BUFFERS 4096 > -#define ETH_AF_XDP_DATA_HEADROOM 0 > +/* mempool hdrobj size (64 bytes) + sizeof(struct rte_mbuf) (128 bytes) */ > +#define ETH_AF_XDP_MBUF_OVERHEAD 192 > +/* data start from offset 320 (192 + 128) bytes */ > +#define ETH_AF_XDP_DATA_HEADROOM \ > + (ETH_AF_XDP_MBUF_OVERHEAD + RTE_PKTMBUF_HEADROOM) > #define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS > #define ETH_AF_XDP_DFLT_QUEUE_IDX 0 > > @@ -61,7 +65,7 @@ struct xsk_umem_info { > struct xsk_ring_prod fq; > struct xsk_ring_cons cq; > struct xsk_umem *umem; > - struct rte_ring *buf_ring; > + struct rte_mempool *mb_pool; > void *buffer; > }; > > @@ -123,12 +127,32 @@ static struct rte_eth_link pmd_link = { > .link_autoneg = ETH_LINK_AUTONEG > }; > > +static inline struct rte_mbuf * > +addr_to_mbuf(struct xsk_umem_info *umem, uint64_t addr) > +{ > + uint64_t offset = (addr / ETH_AF_XDP_FRAME_SIZE * > + ETH_AF_XDP_FRAME_SIZE); > + struct rte_mbuf *mbuf = (struct rte_mbuf *)((uint64_t)umem->buffer + > + offset + ETH_AF_XDP_MBUF_OVERHEAD - > + sizeof(struct rte_mbuf)); > + mbuf->data_off = addr - offset - ETH_AF_XDP_MBUF_OVERHEAD; > + return mbuf; > +} > + > +static inline uint64_t > +mbuf_to_addr(struct xsk_umem_info *umem, struct rte_mbuf *mbuf) > +{ > + return (uint64_t)mbuf->buf_addr + mbuf->data_off - > + (uint64_t)umem->buffer; > +} > + > static inline int > reserve_fill_queue(struct xsk_umem_info *umem, int reserve_size) > { > struct xsk_ring_prod *fq = &umem->fq; > + struct rte_mbuf *mbuf; > uint32_t idx; > - void *addr = NULL; > + uint64_t addr; > int i, ret; > > ret = xsk_ring_prod__reserve(fq, reserve_size, &idx); > @@ -139,12 +163,14 @@ reserve_fill_queue(struct xsk_umem_info *umem, int reserve_size) > > for (i = 0; i < reserve_size; i++) { > __u64 *fq_addr; > - if (rte_ring_dequeue(umem->buf_ring, &addr)) { > + mbuf = rte_pktmbuf_alloc(umem->mb_pool); > + if (mbuf == NULL) { unlikely() > i--; > break; > } > + addr = mbuf_to_addr(umem, mbuf); > fq_addr = xsk_ring_prod__fill_addr(fq, idx++); > - *fq_addr = (uint64_t)addr; > + *fq_addr = addr; > } > > xsk_ring_prod__submit(fq, i); > @@ -196,7 +222,7 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) > rx_bytes += len; > bufs[count++] = mbufs[i]; > > - rte_ring_enqueue(umem->buf_ring, (void *)addr); > + rte_pktmbuf_free(addr_to_mbuf(umem, addr)); > } > > xsk_ring_cons__release(rx, rcvd); > @@ -219,7 +245,7 @@ static void pull_umem_cq(struct xsk_umem_info *umem, int size) > for (i = 0; i < n; i++) { > uint64_t addr; > addr = *xsk_ring_cons__comp_addr(cq, idx_cq++); > - rte_ring_enqueue(umem->buf_ring, (void *)addr); > + rte_pktmbuf_free(addr_to_mbuf(umem, addr)); > } > > xsk_ring_cons__release(cq, n); > @@ -248,7 +274,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) > struct pkt_tx_queue *txq = queue; > struct xsk_umem_info *umem = txq->pair->umem; > struct rte_mbuf *mbuf; > - void *addrs[ETH_AF_XDP_TX_BATCH_SIZE]; > + struct rte_mbuf *mbuf_to_tx; > unsigned long tx_bytes = 0; > int i, valid = 0; > uint32_t idx_tx; > @@ -257,11 +283,6 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) > > pull_umem_cq(umem, nb_pkts); > > - nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs, > - nb_pkts, NULL); > - if (nb_pkts == 0) > - return 0; > - > if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) { > kick_tx(txq); > return 0; > @@ -275,7 +296,12 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) > desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i); > mbuf = bufs[i]; > if (mbuf->pkt_len <= buf_len) { > - desc->addr = (uint64_t)addrs[valid]; > + mbuf_to_tx = rte_pktmbuf_alloc(umem->mb_pool); > + if (mbuf_to_tx == NULL) { unlikely() > + rte_pktmbuf_free(mbuf); > + continue; > + } > + desc->addr = mbuf_to_addr(umem, mbuf_to_tx); > desc->len = mbuf->pkt_len; > pkt = xsk_umem__get_data(umem->buffer, > desc->addr); > @@ -291,10 +317,6 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) > > kick_tx(txq); > > - if (valid < nb_pkts) > - rte_ring_enqueue_bulk(umem->buf_ring, &addrs[valid], > - nb_pkts - valid, NULL); > - > txq->stats.err_pkts += nb_pkts - valid; > txq->stats.tx_pkts += valid; > txq->stats.tx_bytes += tx_bytes; > @@ -443,16 +465,29 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused, > > static void xdp_umem_destroy(struct xsk_umem_info *umem) > { > - free(umem->buffer); > - umem->buffer = NULL; > - > - rte_ring_free(umem->buf_ring); > - umem->buf_ring = NULL; > + rte_mempool_free(umem->mb_pool); > + umem->mb_pool = NULL; > > rte_free(umem); > umem = NULL; > } > > +static inline uint64_t get_base_addr(struct rte_mempool *mp) > +{ > + struct rte_mempool_memhdr *memhdr; > + > + memhdr = STAILQ_FIRST(&mp->mem_list); > + return (uint64_t)(memhdr->addr); > +} > + > +static inline uint64_t get_len(struct rte_mempool *mp) > +{ > + struct rte_mempool_memhdr *memhdr; > + > + memhdr = STAILQ_FIRST(&mp->mem_list); > + return (uint64_t)(memhdr->len); > +} > + > static struct xsk_umem_info *xdp_umem_configure(void) > { > struct xsk_umem_info *umem; > @@ -461,9 +496,8 @@ static struct xsk_umem_info *xdp_umem_configure(void) > .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS, > .frame_size = ETH_AF_XDP_FRAME_SIZE, > .frame_headroom = ETH_AF_XDP_DATA_HEADROOM }; > - void *bufs = NULL; > + void *base_addr = NULL; > int ret; > - uint64_t i; > > umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id()); > if (umem == NULL) { > @@ -471,26 +505,20 @@ static struct xsk_umem_info *xdp_umem_configure(void) > return NULL; > } > > - umem->buf_ring = rte_ring_create("af_xdp_ring", > - ETH_AF_XDP_NUM_BUFFERS, > - SOCKET_ID_ANY, > - 0x0); > - if (umem->buf_ring == NULL) { > - AF_XDP_LOG(ERR, "Failed to create rte_ring\n"); > + umem->mb_pool = rte_pktmbuf_pool_create_with_flags("af_xdp_mempool", > + ETH_AF_XDP_NUM_BUFFERS, > + 250, 0, > + ETH_AF_XDP_FRAME_SIZE - > + ETH_AF_XDP_MBUF_OVERHEAD, > + MEMPOOL_F_NO_SPREAD | MEMPOOL_F_PAGE_ALIGN, > + SOCKET_ID_ANY); > + if (umem->mb_pool == NULL || umem->mb_pool->nb_mem_chunks != 1) { > + AF_XDP_LOG(ERR, "Failed to create mempool\n"); > goto err; > } > + base_addr = (void *)get_base_addr(umem->mb_pool); > > - for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++) > - rte_ring_enqueue(umem->buf_ring, > - (void *)(i * ETH_AF_XDP_FRAME_SIZE + > - ETH_AF_XDP_DATA_HEADROOM)); > - > - if (posix_memalign(&bufs, getpagesize(), > - ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE)) { > - AF_XDP_LOG(ERR, "Failed to allocate memory pool.\n"); > - goto err; > - } > - ret = xsk_umem__create(&umem->umem, bufs, > + ret = xsk_umem__create(&umem->umem, base_addr, > ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE, > &umem->fq, &umem->cq, > &usr_config); > @@ -499,7 +527,7 @@ static struct xsk_umem_info *xdp_umem_configure(void) > AF_XDP_LOG(ERR, "Failed to create umem"); > goto err; You need to destroy mb_pool if xsk_umem__create() fails. > } > - umem->buffer = bufs; > + umem->buffer = base_addr; > > return umem; > > @@ -912,10 +940,9 @@ rte_pmd_af_xdp_remove(struct rte_vdev_device *dev) > > internals = eth_dev->data->dev_private; > > - rte_ring_free(internals->umem->buf_ring); > - rte_free(internals->umem->buffer); > rte_free(internals->umem); > > + rte_mempool_free(internals->umem->mb_pool); > rte_eth_dev_release_port(eth_dev); > > >