From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id BE1BEA00E6 for ; Thu, 21 Mar 2019 10:23:31 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 27FBD1B494; Thu, 21 Mar 2019 10:23:11 +0100 (CET) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id 9460F1B47B for ; Thu, 21 Mar 2019 10:22:58 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga102.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 21 Mar 2019 02:22:58 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.60,252,1549958400"; d="scan'208";a="156987712" Received: from yexl-server.sh.intel.com ([10.67.110.206]) by fmsmga001.fm.intel.com with ESMTP; 21 Mar 2019 02:22:57 -0700 From: Xiaolong Ye To: dev@dpdk.org Cc: Qi Zhang , Karlsson Magnus , Topel Bjorn , Xiaolong Ye Date: Thu, 21 Mar 2019 17:18:44 +0800 Message-Id: <20190321091845.78495-5-xiaolong.ye@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190321091845.78495-1-xiaolong.ye@intel.com> References: <20190301080947.91086-1-xiaolong.ye@intel.com> <20190321091845.78495-1-xiaolong.ye@intel.com> Subject: [dpdk-dev] [PATCH v3 4/5] net/af_xdp: use mbuf mempool for buffer management X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Content-Type: text/plain; charset="UTF-8" Message-ID: <20190321091844.ki59jHi2iTk18-b3LmaK_fW_sKeQgA2gom3ACJybEQw@z> Now, af_xdp registered memory buffer is managed by rte_mempool. mbuf be allocated from rte_mempool can be convert to xdp_desc's address and vice versa. Signed-off-by: Xiaolong Ye --- drivers/net/af_xdp/rte_eth_af_xdp.c | 123 +++++++++++++++++----------- 1 file changed, 77 insertions(+), 46 deletions(-) diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c index 5e671670a..76783c33d 100644 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -43,7 +43,11 @@ #define ETH_AF_XDP_FRAME_SIZE XSK_UMEM__DEFAULT_FRAME_SIZE #define ETH_AF_XDP_NUM_BUFFERS 4096 -#define ETH_AF_XDP_DATA_HEADROOM 0 +/* mempool hdrobj size (64 bytes) + sizeof(struct rte_mbuf) (128 bytes) */ +#define ETH_AF_XDP_MBUF_OVERHEAD 192 +/* data start from offset 320 (192 + 128) bytes */ +#define ETH_AF_XDP_DATA_HEADROOM \ + (ETH_AF_XDP_MBUF_OVERHEAD + RTE_PKTMBUF_HEADROOM) #define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS #define ETH_AF_XDP_DFLT_QUEUE_IDX 0 @@ -56,7 +60,7 @@ struct xsk_umem_info { struct xsk_ring_prod fq; struct xsk_ring_cons cq; struct xsk_umem *umem; - struct rte_ring *buf_ring; + struct rte_mempool *mb_pool; void *buffer; }; @@ -118,12 +122,32 @@ static struct rte_eth_link pmd_link = { .link_autoneg = ETH_LINK_AUTONEG }; +static inline struct rte_mbuf * +addr_to_mbuf(struct xsk_umem_info *umem, uint64_t addr) +{ + uint64_t offset = (addr / ETH_AF_XDP_FRAME_SIZE * + ETH_AF_XDP_FRAME_SIZE); + struct rte_mbuf *mbuf = (struct rte_mbuf *)((uint64_t)umem->buffer + + offset + ETH_AF_XDP_MBUF_OVERHEAD - + sizeof(struct rte_mbuf)); + mbuf->data_off = addr - offset - ETH_AF_XDP_MBUF_OVERHEAD; + return mbuf; +} + +static inline uint64_t +mbuf_to_addr(struct xsk_umem_info *umem, struct rte_mbuf *mbuf) +{ + return (uint64_t)mbuf->buf_addr + mbuf->data_off - + (uint64_t)umem->buffer; +} + static inline int reserve_fill_queue(struct xsk_umem_info *umem, int reserve_size) { struct xsk_ring_prod *fq = &umem->fq; + struct rte_mbuf *mbuf; uint32_t idx; - void *addr = NULL; + uint64_t addr; int i, ret; ret = xsk_ring_prod__reserve(fq, reserve_size, &idx); @@ -134,12 +158,17 @@ reserve_fill_queue(struct xsk_umem_info *umem, int reserve_size) for (i = 0; i < reserve_size; i++) { __u64 *fq_addr; - rte_ring_dequeue(umem->buf_ring, &addr); + mbuf = rte_pktmbuf_alloc(umem->mb_pool); + if (mbuf == NULL) { + i--; + break; + } + addr = mbuf_to_addr(umem, mbuf); fq_addr = xsk_ring_prod__fill_addr(fq, idx++); - *fq_addr = (uint64_t)addr; + *fq_addr = addr; } - xsk_ring_prod__submit(fq, reserve_size); + xsk_ring_prod__submit(fq, i); return 0; } @@ -189,7 +218,7 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) } else { dropped++; } - rte_ring_enqueue(umem->buf_ring, (void *)addr); + rte_pktmbuf_free(addr_to_mbuf(umem, addr)); } xsk_ring_cons__release(rx, rcvd); @@ -213,7 +242,7 @@ static void pull_umem_cq(struct xsk_umem_info *umem, int size) for (i = 0; i < n; i++) { uint64_t addr; addr = *xsk_ring_cons__comp_addr(cq, idx_cq++); - rte_ring_enqueue(umem->buf_ring, (void *)addr); + rte_pktmbuf_free(addr_to_mbuf(umem, addr)); } xsk_ring_cons__release(cq, n); @@ -242,7 +271,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct pkt_tx_queue *txq = queue; struct xsk_umem_info *umem = txq->pair->umem; struct rte_mbuf *mbuf; - void *addrs[ETH_AF_XDP_TX_BATCH_SIZE]; + struct rte_mbuf *mbuf_to_tx; unsigned long tx_bytes = 0; int i, valid = 0; uint32_t idx_tx; @@ -251,11 +280,6 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) pull_umem_cq(umem, nb_pkts); - nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs, - nb_pkts, NULL); - if (nb_pkts == 0) - return 0; - if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) { kick_tx(txq); return 0; @@ -269,7 +293,12 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i); mbuf = bufs[i]; if (mbuf->pkt_len <= buf_len) { - desc->addr = (uint64_t)addrs[valid]; + mbuf_to_tx = rte_pktmbuf_alloc(umem->mb_pool); + if (mbuf_to_tx == NULL) { + rte_pktmbuf_free(mbuf); + continue; + } + desc->addr = mbuf_to_addr(umem, mbuf_to_tx); desc->len = mbuf->pkt_len; pkt = xsk_umem__get_data(umem->buffer, desc->addr); @@ -285,10 +314,6 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) kick_tx(txq); - if (valid < nb_pkts) - rte_ring_enqueue_bulk(umem->buf_ring, &addrs[valid], - nb_pkts - valid, NULL); - txq->stats.err_pkts += nb_pkts - valid; txq->stats.tx_pkts += valid; txq->stats.tx_bytes += tx_bytes; @@ -437,16 +462,29 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused, static void xdp_umem_destroy(struct xsk_umem_info *umem) { - free(umem->buffer); - umem->buffer = NULL; - - rte_ring_free(umem->buf_ring); - umem->buf_ring = NULL; + rte_mempool_free(umem->mb_pool); + umem->mb_pool = NULL; free(umem); umem = NULL; } +static inline uint64_t get_base_addr(struct rte_mempool *mp) +{ + struct rte_mempool_memhdr *memhdr; + + memhdr = STAILQ_FIRST(&mp->mem_list); + return (uint64_t)(memhdr->addr); +} + +static inline uint64_t get_len(struct rte_mempool *mp) +{ + struct rte_mempool_memhdr *memhdr; + + memhdr = STAILQ_FIRST(&mp->mem_list); + return (uint64_t)(memhdr->len); +} + static struct xsk_umem_info *xdp_umem_configure(void) { struct xsk_umem_info *umem; @@ -455,9 +493,8 @@ static struct xsk_umem_info *xdp_umem_configure(void) .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS, .frame_size = ETH_AF_XDP_FRAME_SIZE, .frame_headroom = ETH_AF_XDP_DATA_HEADROOM }; - void *bufs = NULL; + void *base_addr = NULL; int ret; - uint64_t i; umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id()); if (umem == NULL) { @@ -465,27 +502,22 @@ static struct xsk_umem_info *xdp_umem_configure(void) return NULL; } - umem->buf_ring = rte_ring_create("af_xdp_ring", - ETH_AF_XDP_NUM_BUFFERS, - SOCKET_ID_ANY, - 0x0); - if (umem->buf_ring == NULL) { + umem->mb_pool = rte_pktmbuf_pool_create_with_flags("af_xdp_mempool", + ETH_AF_XDP_NUM_BUFFERS, + 250, 0, + ETH_AF_XDP_FRAME_SIZE - + ETH_AF_XDP_MBUF_OVERHEAD, + MEMPOOL_F_NO_SPREAD | MEMPOOL_F_PAGE_ALIGN, + SOCKET_ID_ANY); + + if (umem->mb_pool == NULL || umem->mb_pool->nb_mem_chunks != 1) { RTE_LOG(ERR, AF_XDP, - "Failed to create rte_ring\n"); + "Failed to create rte_mempool\n"); goto err; } + base_addr = (void *)get_base_addr(umem->mb_pool); - for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++) - rte_ring_enqueue(umem->buf_ring, - (void *)(i * ETH_AF_XDP_FRAME_SIZE + - ETH_AF_XDP_DATA_HEADROOM)); - - if (posix_memalign(&bufs, getpagesize(), - ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE)) { - RTE_LOG(ERR, AF_XDP, "Failed to allocate memory pool.\n"); - goto err; - } - ret = xsk_umem__create(&umem->umem, bufs, + ret = xsk_umem__create(&umem->umem, base_addr, ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE, &umem->fq, &umem->cq, &usr_config); @@ -494,7 +526,7 @@ static struct xsk_umem_info *xdp_umem_configure(void) RTE_LOG(ERR, AF_XDP, "Failed to create umem"); goto err; } - umem->buffer = bufs; + umem->buffer = base_addr; return umem; @@ -911,8 +943,7 @@ rte_pmd_af_xdp_remove(struct rte_vdev_device *dev) internals = eth_dev->data->dev_private; - rte_ring_free(internals->umem->buf_ring); - rte_free(internals->umem->buffer); + rte_mempool_free(internals->umem->mb_pool); rte_free(internals->umem); rte_eth_dev_release_port(eth_dev); -- 2.17.1