From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 26D99493D for ; Tue, 19 Mar 2019 08:17:00 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 19 Mar 2019 00:16:59 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.58,496,1544515200"; d="scan'208";a="329889964" Received: from yexl-server.sh.intel.com ([10.67.110.206]) by fmsmga005.fm.intel.com with ESMTP; 19 Mar 2019 00:16:58 -0700 From: Xiaolong Ye To: dev@dpdk.org Cc: Qi Zhang , Karlsson Magnus , Topel Bjorn , Xiaolong Ye Date: Tue, 19 Mar 2019 15:12:54 +0800 Message-Id: <20190319071256.26302-5-xiaolong.ye@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190319071256.26302-1-xiaolong.ye@intel.com> References: <20190301080947.91086-1-xiaolong.ye@intel.com> <20190319071256.26302-1-xiaolong.ye@intel.com> Subject: [dpdk-dev] [PATCH v2 4/6] net/af_xdp: use mbuf mempool for buffer management X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 19 Mar 2019 07:17:00 -0000 Now, af_xdp registered memory buffer is managed by rte_mempool. mbuf be allocated from rte_mempool can be convert to xdp_desc's address and vice versa. Signed-off-by: Xiaolong Ye --- drivers/net/af_xdp/rte_eth_af_xdp.c | 127 +++++++++++++++++----------- 1 file changed, 78 insertions(+), 49 deletions(-) diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c index 96dedc0c4..fc60cb5c5 100644 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -43,7 +43,11 @@ #define ETH_AF_XDP_FRAME_SIZE XSK_UMEM__DEFAULT_FRAME_SIZE #define ETH_AF_XDP_NUM_BUFFERS 4096 -#define ETH_AF_XDP_DATA_HEADROOM 0 +/* mempool hdrobj size (64 bytes) + sizeof(struct rte_mbuf) (128 bytes) */ +#define ETH_AF_XDP_MBUF_OVERHEAD 192 +/* data start from offset 320 (192 + 128) bytes */ +#define ETH_AF_XDP_DATA_HEADROOM \ + (ETH_AF_XDP_MBUF_OVERHEAD + RTE_PKTMBUF_HEADROOM) #define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS #define ETH_AF_XDP_DFLT_QUEUE_IDX 0 @@ -56,7 +60,7 @@ struct xsk_umem_info { struct xsk_ring_prod fq; struct xsk_ring_cons cq; struct xsk_umem *umem; - struct rte_ring *buf_ring; + struct rte_mempool *mb_pool; void *buffer; }; @@ -110,12 +114,32 @@ static struct rte_eth_link pmd_link = { .link_autoneg = ETH_LINK_AUTONEG }; +static inline struct rte_mbuf * +addr_to_mbuf(struct xsk_umem_info *umem, uint64_t addr) +{ + uint64_t offset = (addr / ETH_AF_XDP_FRAME_SIZE * + ETH_AF_XDP_FRAME_SIZE); + struct rte_mbuf *mbuf = (struct rte_mbuf *)((uint64_t)umem->buffer + + offset + ETH_AF_XDP_MBUF_OVERHEAD - + sizeof(struct rte_mbuf)); + mbuf->data_off = addr - offset - ETH_AF_XDP_MBUF_OVERHEAD; + return mbuf; +} + +static inline uint64_t +mbuf_to_addr(struct xsk_umem_info *umem, struct rte_mbuf *mbuf) +{ + return (uint64_t)mbuf->buf_addr + mbuf->data_off - + (uint64_t)umem->buffer; +} + static inline int reserve_fill_queue(struct xsk_umem_info *umem, int reserve_size) { struct xsk_ring_prod *fq = &umem->fq; + struct rte_mbuf *mbuf; uint32_t idx; - void *addr = NULL; + uint64_t addr; int i, ret = 0; ret = xsk_ring_prod__reserve(fq, reserve_size, &idx); @@ -125,11 +149,14 @@ reserve_fill_queue(struct xsk_umem_info *umem, int reserve_size) } for (i = 0; i < reserve_size; i++) { - rte_ring_dequeue(umem->buf_ring, &addr); - *xsk_ring_prod__fill_addr(fq, idx++) = (uint64_t)addr; + mbuf = rte_pktmbuf_alloc(umem->mb_pool); + if (!mbuf) + break; + addr = mbuf_to_addr(umem, mbuf); + *xsk_ring_prod__fill_addr(fq, idx++) = addr; } - xsk_ring_prod__submit(fq, reserve_size); + xsk_ring_prod__submit(fq, i); return 0; } @@ -174,7 +201,7 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) } else { dropped++; } - rte_ring_enqueue(umem->buf_ring, (void *)addr); + rte_pktmbuf_free(addr_to_mbuf(umem, addr)); } xsk_ring_cons__release(rx, rcvd); @@ -197,9 +224,8 @@ static void pull_umem_cq(struct xsk_umem_info *umem, int size) n = xsk_ring_cons__peek(cq, size, &idx_cq); if (n > 0) { for (i = 0; i < n; i++) { - addr = *xsk_ring_cons__comp_addr(cq, - idx_cq++); - rte_ring_enqueue(umem->buf_ring, (void *)addr); + addr = *xsk_ring_cons__comp_addr(cq, idx_cq++); + rte_pktmbuf_free(addr_to_mbuf(umem, addr)); } xsk_ring_cons__release(cq, n); @@ -236,7 +262,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct pkt_tx_queue *txq = queue; struct xsk_umem_info *umem = txq->pair->umem; struct rte_mbuf *mbuf; - void *addrs[ETH_AF_XDP_TX_BATCH_SIZE]; + struct rte_mbuf *mbuf_to_tx; unsigned long tx_bytes = 0; int i, valid = 0; uint32_t idx_tx; @@ -246,10 +272,6 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) pull_umem_cq(umem, nb_pkts); - nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs, - nb_pkts, NULL); - if (!nb_pkts) - return 0; if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) { kick_tx(txq); @@ -264,7 +286,12 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i); mbuf = bufs[i]; if (mbuf->pkt_len <= buf_len) { - desc->addr = (uint64_t)addrs[valid]; + mbuf_to_tx = rte_pktmbuf_alloc(umem->mb_pool); + if (!mbuf_to_tx) { + rte_pktmbuf_free(mbuf); + continue; + } + desc->addr = mbuf_to_addr(umem, mbuf_to_tx); desc->len = mbuf->pkt_len; pkt = xsk_umem__get_data(umem->buffer, desc->addr); @@ -280,10 +307,6 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) kick_tx(txq); - if (valid < nb_pkts) - rte_ring_enqueue_bulk(umem->buf_ring, &addrs[valid], - nb_pkts - valid, NULL); - txq->err_pkts += nb_pkts - valid; txq->tx_pkts += valid; txq->tx_bytes += tx_bytes; @@ -433,16 +456,29 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused, static void xdp_umem_destroy(struct xsk_umem_info *umem) { - free(umem->buffer); - umem->buffer = NULL; - - rte_ring_free(umem->buf_ring); - umem->buf_ring = NULL; + rte_mempool_free(umem->mb_pool); + umem->mb_pool = NULL; free(umem); umem = NULL; } +static inline uint64_t get_base_addr(struct rte_mempool *mp) +{ + struct rte_mempool_memhdr *memhdr; + + memhdr = STAILQ_FIRST(&mp->mem_list); + return (uint64_t)(memhdr->addr); +} + +static inline uint64_t get_len(struct rte_mempool *mp) +{ + struct rte_mempool_memhdr *memhdr; + + memhdr = STAILQ_FIRST(&mp->mem_list); + return (uint64_t)(memhdr->len); +} + static struct xsk_umem_info *xdp_umem_configure(void) { struct xsk_umem_info *umem; @@ -451,10 +487,9 @@ static struct xsk_umem_info *xdp_umem_configure(void) .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS, .frame_size = ETH_AF_XDP_FRAME_SIZE, .frame_headroom = ETH_AF_XDP_DATA_HEADROOM }; - void *bufs = NULL; - char ring_name[0x100]; + void *base_addr = NULL; + char pool_name[0x100]; int ret; - uint64_t i; umem = calloc(1, sizeof(*umem)); if (!umem) { @@ -462,28 +497,23 @@ static struct xsk_umem_info *xdp_umem_configure(void) return NULL; } - snprintf(ring_name, 0x100, "af_xdp_ring"); - umem->buf_ring = rte_ring_create(ring_name, - ETH_AF_XDP_NUM_BUFFERS, - SOCKET_ID_ANY, - 0x0); - if (!umem->buf_ring) { + snprintf(pool_name, 0x100, "af_xdp_ring"); + umem->mb_pool = rte_pktmbuf_pool_create_with_flags(pool_name, + ETH_AF_XDP_NUM_BUFFERS, + 250, 0, + ETH_AF_XDP_FRAME_SIZE - + ETH_AF_XDP_MBUF_OVERHEAD, + MEMPOOL_F_NO_SPREAD | MEMPOOL_F_PAGE_ALIGN, + SOCKET_ID_ANY); + + if (!umem->mb_pool || umem->mb_pool->nb_mem_chunks != 1) { RTE_LOG(ERR, AF_XDP, - "Failed to create rte_ring\n"); + "Failed to create rte_mempool\n"); goto err; } + base_addr = (void *)get_base_addr(umem->mb_pool); - for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++) - rte_ring_enqueue(umem->buf_ring, - (void *)(i * ETH_AF_XDP_FRAME_SIZE + - ETH_AF_XDP_DATA_HEADROOM)); - - if (posix_memalign(&bufs, getpagesize(), - ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE)) { - RTE_LOG(ERR, AF_XDP, "Failed to allocate memory pool.\n"); - goto err; - } - ret = xsk_umem__create(&umem->umem, bufs, + ret = xsk_umem__create(&umem->umem, base_addr, ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE, &umem->fq, &umem->cq, &usr_config); @@ -492,7 +522,7 @@ static struct xsk_umem_info *xdp_umem_configure(void) RTE_LOG(ERR, AF_XDP, "Failed to create umem"); goto err; } - umem->buffer = bufs; + umem->buffer = base_addr; return umem; @@ -909,8 +939,7 @@ rte_pmd_af_xdp_remove(struct rte_vdev_device *dev) internals = eth_dev->data->dev_private; - rte_ring_free(internals->umem->buf_ring); - rte_free(internals->umem->buffer); + rte_mempool_free(internals->umem->mb_pool); rte_free(internals->umem); rte_eth_dev_release_port(eth_dev); -- 2.17.1 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id 005FCA05FE for ; Tue, 19 Mar 2019 08:17:34 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id BC8C04CB5; Tue, 19 Mar 2019 08:17:11 +0100 (CET) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 26D99493D for ; Tue, 19 Mar 2019 08:17:00 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 19 Mar 2019 00:16:59 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.58,496,1544515200"; d="scan'208";a="329889964" Received: from yexl-server.sh.intel.com ([10.67.110.206]) by fmsmga005.fm.intel.com with ESMTP; 19 Mar 2019 00:16:58 -0700 From: Xiaolong Ye To: dev@dpdk.org Cc: Qi Zhang , Karlsson Magnus , Topel Bjorn , Xiaolong Ye Date: Tue, 19 Mar 2019 15:12:54 +0800 Message-Id: <20190319071256.26302-5-xiaolong.ye@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190319071256.26302-1-xiaolong.ye@intel.com> References: <20190301080947.91086-1-xiaolong.ye@intel.com> <20190319071256.26302-1-xiaolong.ye@intel.com> Subject: [dpdk-dev] [PATCH v2 4/6] net/af_xdp: use mbuf mempool for buffer management X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Content-Type: text/plain; charset="UTF-8" Message-ID: <20190319071254.Gixq5vXJ7LdeS1HsMd8HKfbxRQmAPxZJruFJRTd_wrI@z> Now, af_xdp registered memory buffer is managed by rte_mempool. mbuf be allocated from rte_mempool can be convert to xdp_desc's address and vice versa. Signed-off-by: Xiaolong Ye --- drivers/net/af_xdp/rte_eth_af_xdp.c | 127 +++++++++++++++++----------- 1 file changed, 78 insertions(+), 49 deletions(-) diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c index 96dedc0c4..fc60cb5c5 100644 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -43,7 +43,11 @@ #define ETH_AF_XDP_FRAME_SIZE XSK_UMEM__DEFAULT_FRAME_SIZE #define ETH_AF_XDP_NUM_BUFFERS 4096 -#define ETH_AF_XDP_DATA_HEADROOM 0 +/* mempool hdrobj size (64 bytes) + sizeof(struct rte_mbuf) (128 bytes) */ +#define ETH_AF_XDP_MBUF_OVERHEAD 192 +/* data start from offset 320 (192 + 128) bytes */ +#define ETH_AF_XDP_DATA_HEADROOM \ + (ETH_AF_XDP_MBUF_OVERHEAD + RTE_PKTMBUF_HEADROOM) #define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS #define ETH_AF_XDP_DFLT_QUEUE_IDX 0 @@ -56,7 +60,7 @@ struct xsk_umem_info { struct xsk_ring_prod fq; struct xsk_ring_cons cq; struct xsk_umem *umem; - struct rte_ring *buf_ring; + struct rte_mempool *mb_pool; void *buffer; }; @@ -110,12 +114,32 @@ static struct rte_eth_link pmd_link = { .link_autoneg = ETH_LINK_AUTONEG }; +static inline struct rte_mbuf * +addr_to_mbuf(struct xsk_umem_info *umem, uint64_t addr) +{ + uint64_t offset = (addr / ETH_AF_XDP_FRAME_SIZE * + ETH_AF_XDP_FRAME_SIZE); + struct rte_mbuf *mbuf = (struct rte_mbuf *)((uint64_t)umem->buffer + + offset + ETH_AF_XDP_MBUF_OVERHEAD - + sizeof(struct rte_mbuf)); + mbuf->data_off = addr - offset - ETH_AF_XDP_MBUF_OVERHEAD; + return mbuf; +} + +static inline uint64_t +mbuf_to_addr(struct xsk_umem_info *umem, struct rte_mbuf *mbuf) +{ + return (uint64_t)mbuf->buf_addr + mbuf->data_off - + (uint64_t)umem->buffer; +} + static inline int reserve_fill_queue(struct xsk_umem_info *umem, int reserve_size) { struct xsk_ring_prod *fq = &umem->fq; + struct rte_mbuf *mbuf; uint32_t idx; - void *addr = NULL; + uint64_t addr; int i, ret = 0; ret = xsk_ring_prod__reserve(fq, reserve_size, &idx); @@ -125,11 +149,14 @@ reserve_fill_queue(struct xsk_umem_info *umem, int reserve_size) } for (i = 0; i < reserve_size; i++) { - rte_ring_dequeue(umem->buf_ring, &addr); - *xsk_ring_prod__fill_addr(fq, idx++) = (uint64_t)addr; + mbuf = rte_pktmbuf_alloc(umem->mb_pool); + if (!mbuf) + break; + addr = mbuf_to_addr(umem, mbuf); + *xsk_ring_prod__fill_addr(fq, idx++) = addr; } - xsk_ring_prod__submit(fq, reserve_size); + xsk_ring_prod__submit(fq, i); return 0; } @@ -174,7 +201,7 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) } else { dropped++; } - rte_ring_enqueue(umem->buf_ring, (void *)addr); + rte_pktmbuf_free(addr_to_mbuf(umem, addr)); } xsk_ring_cons__release(rx, rcvd); @@ -197,9 +224,8 @@ static void pull_umem_cq(struct xsk_umem_info *umem, int size) n = xsk_ring_cons__peek(cq, size, &idx_cq); if (n > 0) { for (i = 0; i < n; i++) { - addr = *xsk_ring_cons__comp_addr(cq, - idx_cq++); - rte_ring_enqueue(umem->buf_ring, (void *)addr); + addr = *xsk_ring_cons__comp_addr(cq, idx_cq++); + rte_pktmbuf_free(addr_to_mbuf(umem, addr)); } xsk_ring_cons__release(cq, n); @@ -236,7 +262,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct pkt_tx_queue *txq = queue; struct xsk_umem_info *umem = txq->pair->umem; struct rte_mbuf *mbuf; - void *addrs[ETH_AF_XDP_TX_BATCH_SIZE]; + struct rte_mbuf *mbuf_to_tx; unsigned long tx_bytes = 0; int i, valid = 0; uint32_t idx_tx; @@ -246,10 +272,6 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) pull_umem_cq(umem, nb_pkts); - nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs, - nb_pkts, NULL); - if (!nb_pkts) - return 0; if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) { kick_tx(txq); @@ -264,7 +286,12 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i); mbuf = bufs[i]; if (mbuf->pkt_len <= buf_len) { - desc->addr = (uint64_t)addrs[valid]; + mbuf_to_tx = rte_pktmbuf_alloc(umem->mb_pool); + if (!mbuf_to_tx) { + rte_pktmbuf_free(mbuf); + continue; + } + desc->addr = mbuf_to_addr(umem, mbuf_to_tx); desc->len = mbuf->pkt_len; pkt = xsk_umem__get_data(umem->buffer, desc->addr); @@ -280,10 +307,6 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) kick_tx(txq); - if (valid < nb_pkts) - rte_ring_enqueue_bulk(umem->buf_ring, &addrs[valid], - nb_pkts - valid, NULL); - txq->err_pkts += nb_pkts - valid; txq->tx_pkts += valid; txq->tx_bytes += tx_bytes; @@ -433,16 +456,29 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused, static void xdp_umem_destroy(struct xsk_umem_info *umem) { - free(umem->buffer); - umem->buffer = NULL; - - rte_ring_free(umem->buf_ring); - umem->buf_ring = NULL; + rte_mempool_free(umem->mb_pool); + umem->mb_pool = NULL; free(umem); umem = NULL; } +static inline uint64_t get_base_addr(struct rte_mempool *mp) +{ + struct rte_mempool_memhdr *memhdr; + + memhdr = STAILQ_FIRST(&mp->mem_list); + return (uint64_t)(memhdr->addr); +} + +static inline uint64_t get_len(struct rte_mempool *mp) +{ + struct rte_mempool_memhdr *memhdr; + + memhdr = STAILQ_FIRST(&mp->mem_list); + return (uint64_t)(memhdr->len); +} + static struct xsk_umem_info *xdp_umem_configure(void) { struct xsk_umem_info *umem; @@ -451,10 +487,9 @@ static struct xsk_umem_info *xdp_umem_configure(void) .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS, .frame_size = ETH_AF_XDP_FRAME_SIZE, .frame_headroom = ETH_AF_XDP_DATA_HEADROOM }; - void *bufs = NULL; - char ring_name[0x100]; + void *base_addr = NULL; + char pool_name[0x100]; int ret; - uint64_t i; umem = calloc(1, sizeof(*umem)); if (!umem) { @@ -462,28 +497,23 @@ static struct xsk_umem_info *xdp_umem_configure(void) return NULL; } - snprintf(ring_name, 0x100, "af_xdp_ring"); - umem->buf_ring = rte_ring_create(ring_name, - ETH_AF_XDP_NUM_BUFFERS, - SOCKET_ID_ANY, - 0x0); - if (!umem->buf_ring) { + snprintf(pool_name, 0x100, "af_xdp_ring"); + umem->mb_pool = rte_pktmbuf_pool_create_with_flags(pool_name, + ETH_AF_XDP_NUM_BUFFERS, + 250, 0, + ETH_AF_XDP_FRAME_SIZE - + ETH_AF_XDP_MBUF_OVERHEAD, + MEMPOOL_F_NO_SPREAD | MEMPOOL_F_PAGE_ALIGN, + SOCKET_ID_ANY); + + if (!umem->mb_pool || umem->mb_pool->nb_mem_chunks != 1) { RTE_LOG(ERR, AF_XDP, - "Failed to create rte_ring\n"); + "Failed to create rte_mempool\n"); goto err; } + base_addr = (void *)get_base_addr(umem->mb_pool); - for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++) - rte_ring_enqueue(umem->buf_ring, - (void *)(i * ETH_AF_XDP_FRAME_SIZE + - ETH_AF_XDP_DATA_HEADROOM)); - - if (posix_memalign(&bufs, getpagesize(), - ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE)) { - RTE_LOG(ERR, AF_XDP, "Failed to allocate memory pool.\n"); - goto err; - } - ret = xsk_umem__create(&umem->umem, bufs, + ret = xsk_umem__create(&umem->umem, base_addr, ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE, &umem->fq, &umem->cq, &usr_config); @@ -492,7 +522,7 @@ static struct xsk_umem_info *xdp_umem_configure(void) RTE_LOG(ERR, AF_XDP, "Failed to create umem"); goto err; } - umem->buffer = bufs; + umem->buffer = base_addr; return umem; @@ -909,8 +939,7 @@ rte_pmd_af_xdp_remove(struct rte_vdev_device *dev) internals = eth_dev->data->dev_private; - rte_ring_free(internals->umem->buf_ring); - rte_free(internals->umem->buffer); + rte_mempool_free(internals->umem->mb_pool); rte_free(internals->umem); rte_eth_dev_release_port(eth_dev); -- 2.17.1