From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id A9946A0471 for ; Fri, 19 Jul 2019 15:39:08 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 085D85905; Fri, 19 Jul 2019 15:39:03 +0200 (CEST) Received: from proxy.6wind.com (host.76.145.23.62.rev.coltfrance.com [62.23.145.76]) by dpdk.org (Postfix) with ESMTP id D4102374E for ; Fri, 19 Jul 2019 15:38:59 +0200 (CEST) Received: from glumotte.dev.6wind.com. (unknown [10.16.0.195]) by proxy.6wind.com (Postfix) with ESMTP id BA2812ED327; Fri, 19 Jul 2019 15:38:59 +0200 (CEST) From: Olivier Matz To: Vamsi Krishna Attunuru , dev@dpdk.org Cc: Andrew Rybchenko , Thomas Monjalon , Anatoly Burakov , Jerin Jacob Kollanukkaran , Kokkilagadda , Ferruh Yigit Date: Fri, 19 Jul 2019 15:38:44 +0200 Message-Id: <20190719133845.32432-4-olivier.matz@6wind.com> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20190719133845.32432-1-olivier.matz@6wind.com> References: <20190719133845.32432-1-olivier.matz@6wind.com> Subject: [dpdk-dev] [RFC 3/4] mempool: introduce function to get mempool page size X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" In rte_mempool_populate_default(), we determine the page size, which is needed for calc_size and allocation of memory. Move this in a function and export it, it will be used in next commit. Signed-off-by: Olivier Matz --- lib/librte_mempool/rte_mempool.c | 50 +++++++++++++++++++++++++--------------- lib/librte_mempool/rte_mempool.h | 6 +++++ 2 files changed, 37 insertions(+), 19 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 335032dc8..7def0ba68 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -414,6 +414,32 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, return ret; } +int +rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz) +{ + bool need_iova_contig_obj; + bool alloc_in_ext_mem; + int ret; + + /* check if we can retrieve a valid socket ID */ + ret = rte_malloc_heap_socket_is_external(mp->socket_id); + if (ret < 0) + return -EINVAL; + alloc_in_ext_mem = (ret == 1); + need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG); + + if (!need_iova_contig_obj) + *pg_sz = 0; + else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA) + *pg_sz = get_min_page_size(mp->socket_id); + else if (rte_eal_has_hugepages() || alloc_in_ext_mem) + *pg_sz = get_min_page_size(mp->socket_id); + else + *pg_sz = getpagesize(); + + return 0; +} + /* Default function to populate the mempool: allocate memory in memzones, * and populate them. Return the number of objects added, or a negative * value on error. @@ -425,12 +451,11 @@ rte_mempool_populate_default(struct rte_mempool *mp) char mz_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; ssize_t mem_size; - size_t align, pg_sz, pg_shift; + size_t align, pg_sz, pg_shift = 0; rte_iova_t iova; unsigned mz_id, n; int ret; bool need_iova_contig_obj; - bool alloc_in_ext_mem; ret = mempool_ops_alloc_once(mp); if (ret != 0) @@ -482,26 +507,13 @@ rte_mempool_populate_default(struct rte_mempool *mp) * synonymous with IOVA contiguousness will not hold. */ - /* check if we can retrieve a valid socket ID */ - ret = rte_malloc_heap_socket_is_external(mp->socket_id); - if (ret < 0) - return -EINVAL; - alloc_in_ext_mem = (ret == 1); need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG); + ret = rte_mempool_get_page_size(mp, &pg_sz); + if (ret < 0) + return ret; - if (!need_iova_contig_obj) { - pg_sz = 0; - pg_shift = 0; - } else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA) { - pg_sz = 0; - pg_shift = 0; - } else if (rte_eal_has_hugepages() || alloc_in_ext_mem) { - pg_sz = get_min_page_size(mp->socket_id); - pg_shift = rte_bsf32(pg_sz); - } else { - pg_sz = getpagesize(); + if (pg_sz != 0) pg_shift = rte_bsf32(pg_sz); - } for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) { size_t min_chunk_size; diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 7bc10e699..00b927989 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -1692,6 +1692,12 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg), void *arg); +/** + * @internal Get page size used for mempool object allocation. + */ +int +rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz); + #ifdef __cplusplus } #endif -- 2.11.0