From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0A514A2F6B for ; Tue, 8 Oct 2019 11:35:41 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C4D9D1C0D4; Tue, 8 Oct 2019 11:35:40 +0200 (CEST) Received: from proxy.6wind.com (host.76.145.23.62.rev.coltfrance.com [62.23.145.76]) by dpdk.org (Postfix) with ESMTP id F2EFB1C0AA for ; Tue, 8 Oct 2019 11:35:38 +0200 (CEST) Received: from glumotte.dev.6wind.com. (unknown [10.16.0.195]) by proxy.6wind.com (Postfix) with ESMTP id BD539329977; Tue, 8 Oct 2019 11:35:38 +0200 (CEST) From: Olivier Matz To: dev@dpdk.org Cc: Andrew Rybchenko Date: Tue, 8 Oct 2019 11:34:06 +0200 Message-Id: <20191008093405.23533-1-olivier.matz@6wind.com> X-Mailer: git-send-email 2.20.1 MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH] mempool: clarify default populate function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" No functional change. Clarify the populate function to make the next commit easier to understand. Rename the variables: - to avoid negation in the name - to have more understandable names Remove useless variable (no_pageshift is equivalent to pg_sz == 0). Remove duplicate affectation of "external" variable. Signed-off-by: Olivier Matz Reviewed-by: Andrew Rybchenko --- This patch comes from this series: http://patchwork.dpdk.org/project/dpdk/list/?series=5624 lib/librte_mempool/rte_mempool.c | 50 +++++++++++++++++--------------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index 7260ce0be..0f29e8712 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -429,24 +429,18 @@ rte_mempool_populate_default(struct rte_mempool *mp) rte_iova_t iova; unsigned mz_id, n; int ret; - bool no_contig, try_contig, no_pageshift, external; + bool need_iova_contig_obj; + bool try_iova_contig_mempool; + bool alloc_in_ext_mem; ret = mempool_ops_alloc_once(mp); if (ret != 0) return ret; - /* check if we can retrieve a valid socket ID */ - ret = rte_malloc_heap_socket_is_external(mp->socket_id); - if (ret < 0) - return -EINVAL; - external = ret; - /* mempool must not be populated */ if (mp->nb_mem_chunks != 0) return -EEXIST; - no_contig = mp->flags & MEMPOOL_F_NO_IOVA_CONTIG; - /* * the following section calculates page shift and page size values. * @@ -496,16 +490,23 @@ rte_mempool_populate_default(struct rte_mempool *mp) * to go for contiguous memory even if we're in no-huge mode, because * external memory may in fact be IOVA-contiguous. */ - external = rte_malloc_heap_socket_is_external(mp->socket_id) == 1; - no_pageshift = no_contig || - (!external && rte_eal_iova_mode() == RTE_IOVA_VA); - try_contig = !no_contig && !no_pageshift && - (rte_eal_has_hugepages() || external); - if (no_pageshift) { + /* check if we can retrieve a valid socket ID */ + ret = rte_malloc_heap_socket_is_external(mp->socket_id); + if (ret < 0) + return -EINVAL; + alloc_in_ext_mem = (ret == 1); + need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG); + try_iova_contig_mempool = false; + + if (!need_iova_contig_obj) { + pg_sz = 0; + pg_shift = 0; + } else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA) { pg_sz = 0; pg_shift = 0; - } else if (try_contig) { + } else if (rte_eal_has_hugepages() || alloc_in_ext_mem) { + try_iova_contig_mempool = true; pg_sz = get_min_page_size(mp->socket_id); pg_shift = rte_bsf32(pg_sz); } else { @@ -517,7 +518,7 @@ rte_mempool_populate_default(struct rte_mempool *mp) size_t min_chunk_size; unsigned int flags; - if (try_contig || no_pageshift) + if (try_iova_contig_mempool || pg_sz == 0) mem_size = rte_mempool_ops_calc_mem_size(mp, n, 0, &min_chunk_size, &align); else @@ -541,7 +542,7 @@ rte_mempool_populate_default(struct rte_mempool *mp) /* if we're trying to reserve contiguous memory, add appropriate * memzone flag. */ - if (try_contig) + if (try_iova_contig_mempool) flags |= RTE_MEMZONE_IOVA_CONTIG; mz = rte_memzone_reserve_aligned(mz_name, mem_size, @@ -551,8 +552,9 @@ rte_mempool_populate_default(struct rte_mempool *mp) * minimum required contiguous chunk fits minimum page, adjust * memzone size to the page size, and try again. */ - if (mz == NULL && try_contig && min_chunk_size <= pg_sz) { - try_contig = false; + if (mz == NULL && try_iova_contig_mempool && + min_chunk_size <= pg_sz) { + try_iova_contig_mempool = false; flags &= ~RTE_MEMZONE_IOVA_CONTIG; mem_size = rte_mempool_ops_calc_mem_size(mp, n, @@ -587,12 +589,12 @@ rte_mempool_populate_default(struct rte_mempool *mp) goto fail; } - if (no_contig) - iova = RTE_BAD_IOVA; - else + if (need_iova_contig_obj) iova = mz->iova; + else + iova = RTE_BAD_IOVA; - if (no_pageshift || try_contig) + if (try_iova_contig_mempool || pg_sz == 0) ret = rte_mempool_populate_iova(mp, mz->addr, iova, mz->len, rte_mempool_memchunk_mz_free, -- 2.20.1