From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from proxy.6wind.com (host.76.145.23.62.rev.coltfrance.com [62.23.145.76]) by dpdk.org (Postfix) with ESMTP id 0385993C0 for ; Wed, 18 May 2016 13:05:16 +0200 (CEST) Received: from glumotte.dev.6wind.com (unknown [10.16.0.195]) by proxy.6wind.com (Postfix) with ESMTP id D198029BB2; Wed, 18 May 2016 13:03:38 +0200 (CEST) From: Olivier Matz To: dev@dpdk.org Cc: bruce.richardson@intel.com, stephen@networkplumber.org, keith.wiles@intel.com Date: Wed, 18 May 2016 13:04:43 +0200 Message-Id: <1463569496-31086-23-git-send-email-olivier.matz@6wind.com> X-Mailer: git-send-email 2.8.0.rc3 In-Reply-To: <1463569496-31086-1-git-send-email-olivier.matz@6wind.com> References: <1460629199-32489-1-git-send-email-olivier.matz@6wind.com> <1463569496-31086-1-git-send-email-olivier.matz@6wind.com> Subject: [dpdk-dev] [PATCH v3 22/35] mempool: support no hugepage mode X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 18 May 2016 11:05:17 -0000 Introduce a new function rte_mempool_populate_virt() that is now called by default when hugepages are not supported. This function populate the mempool with several physically contiguous chunks whose minimum size is the page size of the system. Thanks to this, rte_mempool_create() will work properly in without hugepages (if the object size is smaller than a page size), and 2 specific workarouds can be removed: - trailer_size was artificially extended to a page size - rte_mempool_virt2phy() did not rely on object physical address Signed-off-by: Olivier Matz --- lib/librte_mempool/rte_mempool.c | 107 ++++++++++++++++++++++++++++++--------- lib/librte_mempool/rte_mempool.h | 17 ++----- 2 files changed, 86 insertions(+), 38 deletions(-) diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index c3abf51..88c5780 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -223,23 +223,6 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, sz->trailer_size = new_size - sz->header_size - sz->elt_size; } - if (! rte_eal_has_hugepages()) { - /* - * compute trailer size so that pool elements fit exactly in - * a standard page - */ - int page_size = getpagesize(); - int new_size = page_size - sz->header_size - sz->elt_size; - if (new_size < 0 || (unsigned int)new_size < sz->trailer_size) { - printf("When hugepages are disabled, pool objects " - "can't exceed PAGE_SIZE: %d + %d + %d > %d\n", - sz->header_size, sz->elt_size, sz->trailer_size, - page_size); - return 0; - } - sz->trailer_size = new_size; - } - /* this is the size of an object, including header and trailer */ sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size; @@ -511,16 +494,74 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, return cnt; } -/* Default function to populate the mempool: allocate memory in mezones, +/* Populate the mempool with a virtual area. Return the number of + * objects added, or a negative value on error. + */ +static int +rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, + size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, + void *opaque) +{ + phys_addr_t paddr; + size_t off, phys_len; + int ret, cnt = 0; + + /* mempool must not be populated */ + if (mp->nb_mem_chunks != 0) + return -EEXIST; + /* address and len must be page-aligned */ + if (RTE_PTR_ALIGN_CEIL(addr, pg_sz) != addr) + return -EINVAL; + if (RTE_ALIGN_CEIL(len, pg_sz) != len) + return -EINVAL; + + for (off = 0; off + pg_sz <= len && + mp->populated_size < mp->size; off += phys_len) { + + paddr = rte_mem_virt2phy(addr + off); + if (paddr == RTE_BAD_PHYS_ADDR) { + ret = -EINVAL; + goto fail; + } + + /* populate with the largest group of contiguous pages */ + for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) { + phys_addr_t paddr_tmp; + + paddr_tmp = rte_mem_virt2phy(addr + off + phys_len); + paddr_tmp = rte_mem_phy2mch(-1, paddr_tmp); + + if (paddr_tmp != paddr + phys_len) + break; + } + + ret = rte_mempool_populate_phys(mp, addr + off, paddr, + phys_len, free_cb, opaque); + if (ret < 0) + goto fail; + /* no need to call the free callback for next chunks */ + free_cb = NULL; + cnt += ret; + } + + return cnt; + + fail: + rte_mempool_free_memchunks(mp); + return ret; +} + +/* Default function to populate the mempool: allocate memory in memzones, * and populate them. Return the number of objects added, or a negative * value on error. */ -static int rte_mempool_populate_default(struct rte_mempool *mp) +static int +rte_mempool_populate_default(struct rte_mempool *mp) { int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; char mz_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; - size_t size, total_elt_sz, align; + size_t size, total_elt_sz, align, pg_sz, pg_shift; unsigned mz_id, n; int ret; @@ -528,10 +569,19 @@ static int rte_mempool_populate_default(struct rte_mempool *mp) if (mp->nb_mem_chunks != 0) return -EEXIST; - align = RTE_CACHE_LINE_SIZE; + if (rte_eal_has_hugepages()) { + pg_shift = 0; /* not needed, zone is physically contiguous */ + pg_sz = 0; + align = RTE_CACHE_LINE_SIZE; + } else { + pg_sz = getpagesize(); + pg_shift = rte_bsf32(pg_sz); + align = pg_sz; + } + total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) { - size = rte_mempool_xmem_size(n, total_elt_sz, 0); + size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift); ret = snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id); @@ -551,9 +601,16 @@ static int rte_mempool_populate_default(struct rte_mempool *mp) goto fail; } - ret = rte_mempool_populate_phys(mp, mz->addr, mz->phys_addr, - mz->len, rte_mempool_memchunk_mz_free, - (void *)(uintptr_t)mz); + if (rte_eal_has_hugepages()) + ret = rte_mempool_populate_phys(mp, mz->addr, + mz->phys_addr, mz->len, + rte_mempool_memchunk_mz_free, + (void *)(uintptr_t)mz); + else + ret = rte_mempool_populate_virt(mp, mz->addr, + mz->len, pg_sz, + rte_mempool_memchunk_mz_free, + (void *)(uintptr_t)mz); if (ret < 0) goto fail; } diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h index 3e458b8..e0aa698 100644 --- a/lib/librte_mempool/rte_mempool.h +++ b/lib/librte_mempool/rte_mempool.h @@ -1150,19 +1150,10 @@ rte_mempool_empty(const struct rte_mempool *mp) static inline phys_addr_t rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt) { - if (rte_eal_has_hugepages()) { - const struct rte_mempool_objhdr *hdr; - hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt, - sizeof(*hdr)); - return hdr->physaddr; - } else { - /* - * If huge pages are disabled, we cannot assume the - * memory region to be physically contiguous. - * Lookup for each element. - */ - return rte_mem_virt2phy(elt); - } + const struct rte_mempool_objhdr *hdr; + hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt, + sizeof(*hdr)); + return hdr->physaddr; } /** -- 2.8.0.rc3