From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from dpdk.org (dpdk.org [92.243.14.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 6A0A8A04EF;
	Mon, 25 May 2020 02:38:18 +0200 (CEST)
Received: from [92.243.14.124] (localhost [127.0.0.1])
	by dpdk.org (Postfix) with ESMTP id EA9D61D633;
	Mon, 25 May 2020 02:37:47 +0200 (CEST)
Received: from mail-lj1-f195.google.com (mail-lj1-f195.google.com
 [209.85.208.195]) by dpdk.org (Postfix) with ESMTP id 3FD941D60A
 for <dev@dpdk.org>; Mon, 25 May 2020 02:37:43 +0200 (CEST)
Received: by mail-lj1-f195.google.com with SMTP id z18so18904680lji.12
 for <dev@dpdk.org>; Sun, 24 May 2020 17:37:43 -0700 (PDT)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025;
 h=from:to:cc:subject:date:message-id:in-reply-to:references
 :mime-version:content-transfer-encoding;
 bh=OuLlsLKlboyWCvIV+UHTjW0BA+4VqjsuG/i+G2iCjws=;
 b=mR7qusZOaZQ8di+22TRMIP89XjANN+JiML5P1sbFSK+3xz8UlB/vFTcBeBqAfre40E
 8QyJl14m2p01banQynCOSyKssMQMeZW5+PA8CBovQU62VzrJIQ8pfmOS0drusb5Sr7ok
 VjfZXAbbkg04i9NCgFreQ6uofKONn4ek8x9hlfgsxlTEzX2tw0g1/Vs2BeqDDQdSjE0t
 r/jmTSYkH9XIJjU0p3rEeG/kxaDaSPV+vwPu0iSJ887Ccjt1m88CuvfCRLqPnimfaPqP
 XqoBWN9PLgFV9N6dQtcvN3zSKDXr7j028vsq7SRjLonkguaESNhb9gnWqFsvzqYUufpE
 3I3g==
X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
 d=1e100.net; s=20161025;
 h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to
 :references:mime-version:content-transfer-encoding;
 bh=OuLlsLKlboyWCvIV+UHTjW0BA+4VqjsuG/i+G2iCjws=;
 b=MCMw05Otc3zDPTo9eIMkjngQCuZW2DnTHmNyVdfDPSbTS69c/VMj9egdP5fObkzBPI
 thi20XvgUGMejlveYZEioXv/NlSSxtF2Rh1isHpDcG0q6SUeSu+9h8LAIhL9a4KbYJc8
 vKozbFof136xe32hCvtNYMZEwV3kCZEUAr4YMtwOu4wo76R4xtIOIO5vslnltebu6mtF
 uyucxjs/m97IxYS1So7SVC0F/LwO8vpeQBJSOIwEIVHHaAzkKM0iGFQWv+SqVCtEAfFu
 QS3GvPV4haIWJC1lXIcgjODdtxgb1J3uDLBY+bG74sf8bMEG9ksa6SIjqrIvNViUNLIB
 hKGQ==
X-Gm-Message-State: AOAM531Z0EbARlGN0cUnbKviHvB9U/HbslZg6uk/HZ8w1duR6s9t645g
 I0QZ1Bo+FILtRRcaARxqM+WuZxXR1qg3ow==
X-Google-Smtp-Source: ABdhPJzwWfbh+xuILt++Rdu6Vs88priwcm4ZSq19GsfGxYx9yot5HlFGkITjl80oBXMGFn9bl5SNSQ==
X-Received: by 2002:a2e:854d:: with SMTP id u13mr9967449ljj.295.1590367062342; 
 Sun, 24 May 2020 17:37:42 -0700 (PDT)
Received: from localhost.localdomain (broadband-37-110-65-23.ip.moscow.rt.ru.
 [37.110.65.23])
 by smtp.gmail.com with ESMTPSA id n8sm4279526lfb.20.2020.05.24.17.37.41
 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);
 Sun, 24 May 2020 17:37:41 -0700 (PDT)
From: Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>
To: dev@dpdk.org
Cc: Dmitry Malloy <dmitrym@microsoft.com>,
 Narcisa Ana Maria Vasile <Narcisa.Vasile@microsoft.com>,
 Fady Bader <fady@mellanox.com>, Tal Shnaiderman <talshn@mellanox.com>,
 Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>,
 Anatoly Burakov <anatoly.burakov@intel.com>,
 Bruce Richardson <bruce.richardson@intel.com>
Date: Mon, 25 May 2020 03:37:13 +0300
Message-Id: <20200525003720.6410-5-dmitry.kozliuk@gmail.com>
X-Mailer: git-send-email 2.25.4
In-Reply-To: <20200525003720.6410-1-dmitry.kozliuk@gmail.com>
References: <20200428235015.2820677-1-dmitry.kozliuk@gmail.com>
 <20200525003720.6410-1-dmitry.kozliuk@gmail.com>
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
Subject: [dpdk-dev] [PATCH v5 04/11] eal/mem: extract common code for memseg
	list initialization
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

All supported OS create memory segment lists (MSL) and reserve VA space
for them in a nearly identical way. Move common code into EAL private
functions to reduce duplication.

Signed-off-by: Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>
---
 lib/librte_eal/common/eal_common_memory.c |  92 +++++++++++++++++
 lib/librte_eal/common/eal_private.h       |  62 ++++++++++++
 lib/librte_eal/freebsd/eal_memory.c       |  94 ++++--------------
 lib/librte_eal/linux/eal_memory.c         | 115 +++++-----------------
 4 files changed, 195 insertions(+), 168 deletions(-)

diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index c6243aca1..0ecadd817 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -24,6 +24,7 @@
 #include "eal_private.h"
 #include "eal_internal_cfg.h"
 #include "eal_memcfg.h"
+#include "eal_options.h"
 #include "malloc_heap.h"
 
 /*
@@ -181,6 +182,97 @@ eal_get_virtual_area(void *requested_addr, size_t *size,
 	return aligned_addr;
 }
 
+int
+eal_memseg_list_init_named(struct rte_memseg_list *msl, const char *name,
+		uint64_t page_sz, int n_segs, int socket_id, bool heap)
+{
+	if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
+			sizeof(struct rte_memseg))) {
+		RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
+			rte_strerror(rte_errno));
+		return -1;
+	}
+
+	msl->page_sz = page_sz;
+	msl->socket_id = socket_id;
+	msl->base_va = NULL;
+	msl->heap = heap;
+
+	RTE_LOG(DEBUG, EAL,
+		"Memseg list allocated at socket %i, page size 0x%zxkB\n",
+		socket_id, (size_t)page_sz >> 10);
+
+	return 0;
+}
+
+int
+eal_memseg_list_init(struct rte_memseg_list *msl, uint64_t page_sz,
+		int n_segs, int socket_id, int type_msl_idx, bool heap)
+{
+	char name[RTE_FBARRAY_NAME_LEN];
+
+	snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
+		 type_msl_idx);
+
+	return eal_memseg_list_init_named(
+		msl, name, page_sz, n_segs, socket_id, heap);
+}
+
+int
+eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags)
+{
+	uint64_t page_sz;
+	size_t mem_sz;
+	void *addr;
+
+	page_sz = msl->page_sz;
+	mem_sz = page_sz * msl->memseg_arr.len;
+
+	addr = eal_get_virtual_area(
+		msl->base_va, &mem_sz, page_sz, 0, reserve_flags);
+	if (addr == NULL) {
+		if (rte_errno == EADDRNOTAVAIL)
+			RTE_LOG(ERR, EAL, "Cannot reserve %llu bytes at [%p] - "
+				"please use '--" OPT_BASE_VIRTADDR "' option\n",
+				(unsigned long long)mem_sz, msl->base_va);
+		else
+			RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
+		return -1;
+	}
+	msl->base_va = addr;
+	msl->len = mem_sz;
+
+	RTE_LOG(DEBUG, EAL, "VA reserved for memseg list at %p, size %zx\n",
+			addr, mem_sz);
+
+	return 0;
+}
+
+void
+eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs)
+{
+	uint64_t page_sz = msl->page_sz;
+	int i;
+
+	for (i = 0; i < n_segs; i++) {
+		struct rte_fbarray *arr = &msl->memseg_arr;
+		struct rte_memseg *ms = rte_fbarray_get(arr, i);
+
+		if (rte_eal_iova_mode() == RTE_IOVA_VA)
+			ms->iova = (uintptr_t)addr;
+		else
+			ms->iova = RTE_BAD_IOVA;
+		ms->addr = addr;
+		ms->hugepage_sz = page_sz;
+		ms->socket_id = 0;
+		ms->len = page_sz;
+
+		rte_fbarray_set_used(arr, i);
+
+		addr = RTE_PTR_ADD(addr, page_sz);
+	}
+}
+
 static struct rte_memseg *
 virt2memseg(const void *addr, const struct rte_memseg_list *msl)
 {
diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h
index a93850c09..705a60e9c 100644
--- a/lib/librte_eal/common/eal_private.h
+++ b/lib/librte_eal/common/eal_private.h
@@ -254,6 +254,68 @@ void *
 eal_get_virtual_area(void *requested_addr, size_t *size,
 		size_t page_sz, int flags, int reserve_flags);
 
+/**
+ * Initialize a memory segment list and create its backing storage.
+ *
+ * @param msl
+ *  Memory segment list to be filled.
+ * @param name
+ *  Name for the backing storage.
+ * @param page_sz
+ *  Size of segment pages in the MSL.
+ * @param n_segs
+ *  Number of segments.
+ * @param socket_id
+ *  Socket ID. Must not be SOCKET_ID_ANY.
+ * @param heap
+ *  Mark MSL as pointing to a heap.
+ * @return
+ *  0 on success, (-1) on failure and rte_errno is set.
+ */
+int
+eal_memseg_list_init_named(struct rte_memseg_list *msl, const char *name,
+	uint64_t page_sz, int n_segs, int socket_id, bool heap);
+
+/**
+ * Initialize memory segment list and create its backing storage
+ * with a name corresponding to MSL parameters.
+ *
+ * @param type_msl_idx
+ *  Index of the MSL among other MSLs of the same socket and page size.
+ *
+ * @see eal_memseg_list_init_named for remaining parameters description.
+ */
+int
+eal_memseg_list_init(struct rte_memseg_list *msl, uint64_t page_sz,
+	int n_segs, int socket_id, int type_msl_idx, bool heap);
+
+/**
+ * Reserve VA space for a memory segment list
+ * previously initialized with eal_memseg_list_init().
+ *
+ * @param msl
+ *  Initialized memory segment list with page size defined.
+ * @param reserve_flags
+ *  Extra memory reservation flags. Can be 0 if unnecessary.
+ * @return
+ *  0 on success, (-1) on failure and rte_errno is set.
+ */
+int
+eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags);
+
+/**
+ * Populate MSL, each segment is one page long.
+ *
+ * @param msl
+ *  Initialized memory segment list with page size defined.
+ * @param addr
+ *  Starting address of list segments.
+ * @param n_segs
+ *  Number of segments to populate.
+ */
+void
+eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs);
+
 /**
  * Get cpu core_id.
  *
diff --git a/lib/librte_eal/freebsd/eal_memory.c b/lib/librte_eal/freebsd/eal_memory.c
index 5bc2da160..29c3ed5a9 100644
--- a/lib/librte_eal/freebsd/eal_memory.c
+++ b/lib/librte_eal/freebsd/eal_memory.c
@@ -66,53 +66,34 @@ rte_eal_hugepage_init(void)
 		struct rte_memseg_list *msl;
 		struct rte_fbarray *arr;
 		struct rte_memseg *ms;
-		uint64_t page_sz;
+		uint64_t mem_sz, page_sz;
 		int n_segs, cur_seg;
 
 		/* create a memseg list */
 		msl = &mcfg->memsegs[0];
 
+		mem_sz = internal_config.memory;
 		page_sz = RTE_PGSIZE_4K;
-		n_segs = internal_config.memory / page_sz;
+		n_segs = mem_sz / page_sz;
 
-		if (rte_fbarray_init(&msl->memseg_arr, "nohugemem", n_segs,
-				sizeof(struct rte_memseg))) {
-			RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
+		if (eal_memseg_list_init_named(
+				msl, "nohugemem", page_sz, n_segs, 0, true)) {
 			return -1;
 		}
 
-		addr = mmap(NULL, internal_config.memory,
-				PROT_READ | PROT_WRITE,
+		addr = mmap(NULL, mem_sz, PROT_READ | PROT_WRITE,
 				MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
 		if (addr == MAP_FAILED) {
 			RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
 					strerror(errno));
 			return -1;
 		}
-		msl->base_va = addr;
-		msl->page_sz = page_sz;
-		msl->len = internal_config.memory;
-		msl->socket_id = 0;
-		msl->heap = 1;
-
-		/* populate memsegs. each memseg is 1 page long */
-		for (cur_seg = 0; cur_seg < n_segs; cur_seg++) {
-			arr = &msl->memseg_arr;
 
-			ms = rte_fbarray_get(arr, cur_seg);
-			if (rte_eal_iova_mode() == RTE_IOVA_VA)
-				ms->iova = (uintptr_t)addr;
-			else
-				ms->iova = RTE_BAD_IOVA;
-			ms->addr = addr;
-			ms->hugepage_sz = page_sz;
-			ms->len = page_sz;
-			ms->socket_id = 0;
+		msl->base_va = addr;
+		msl->len = mem_sz;
 
-			rte_fbarray_set_used(arr, cur_seg);
+		eal_memseg_list_populate(msl, addr, n_segs);
 
-			addr = RTE_PTR_ADD(addr, page_sz);
-		}
 		return 0;
 	}
 
@@ -336,64 +317,25 @@ get_mem_amount(uint64_t page_sz, uint64_t max_mem)
 	return RTE_ALIGN(area_sz, page_sz);
 }
 
-#define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
 static int
-alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
+memseg_list_init(struct rte_memseg_list *msl, uint64_t page_sz,
 		int n_segs, int socket_id, int type_msl_idx)
 {
-	char name[RTE_FBARRAY_NAME_LEN];
-
-	snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
-		 type_msl_idx);
-	if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
-			sizeof(struct rte_memseg))) {
-		RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
-			rte_strerror(rte_errno));
-		return -1;
-	}
-
-	msl->page_sz = page_sz;
-	msl->socket_id = socket_id;
-	msl->base_va = NULL;
-
-	RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
-			(size_t)page_sz >> 10, socket_id);
-
-	return 0;
+	return eal_memseg_list_init(
+		msl, page_sz, n_segs, socket_id, type_msl_idx, false);
 }
 
 static int
-alloc_va_space(struct rte_memseg_list *msl)
+memseg_list_alloc(struct rte_memseg_list *msl)
 {
-	uint64_t page_sz;
-	size_t mem_sz;
-	void *addr;
 	int flags = 0;
 
 #ifdef RTE_ARCH_PPC_64
-	flags |= MAP_HUGETLB;
+	flags |= EAL_RESERVE_HUGEPAGES;
 #endif
-
-	page_sz = msl->page_sz;
-	mem_sz = page_sz * msl->memseg_arr.len;
-
-	addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
-	if (addr == NULL) {
-		if (rte_errno == EADDRNOTAVAIL)
-			RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - "
-				"please use '--" OPT_BASE_VIRTADDR "' option\n",
-				(unsigned long long)mem_sz, msl->base_va);
-		else
-			RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
-		return -1;
-	}
-	msl->base_va = addr;
-	msl->len = mem_sz;
-
-	return 0;
+	return eal_memseg_list_alloc(msl, flags);
 }
 
-
 static int
 memseg_primary_init(void)
 {
@@ -479,7 +421,7 @@ memseg_primary_init(void)
 					cur_max_mem);
 			n_segs = cur_mem / hugepage_sz;
 
-			if (alloc_memseg_list(msl, hugepage_sz, n_segs,
+			if (memseg_list_init(msl, hugepage_sz, n_segs,
 					0, type_msl_idx))
 				return -1;
 
@@ -487,7 +429,7 @@ memseg_primary_init(void)
 			total_type_mem = total_segs * hugepage_sz;
 			type_msl_idx++;
 
-			if (alloc_va_space(msl)) {
+			if (memseg_list_alloc(msl)) {
 				RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
 				return -1;
 			}
@@ -518,7 +460,7 @@ memseg_secondary_init(void)
 		}
 
 		/* preallocate VA space */
-		if (alloc_va_space(msl)) {
+		if (memseg_list_alloc(msl)) {
 			RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
 			return -1;
 		}
diff --git a/lib/librte_eal/linux/eal_memory.c b/lib/librte_eal/linux/eal_memory.c
index 7a9c97ff8..8b5fe613e 100644
--- a/lib/librte_eal/linux/eal_memory.c
+++ b/lib/librte_eal/linux/eal_memory.c
@@ -802,7 +802,7 @@ get_mem_amount(uint64_t page_sz, uint64_t max_mem)
 }
 
 static int
-free_memseg_list(struct rte_memseg_list *msl)
+memseg_list_free(struct rte_memseg_list *msl)
 {
 	if (rte_fbarray_destroy(&msl->memseg_arr)) {
 		RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
@@ -812,58 +812,18 @@ free_memseg_list(struct rte_memseg_list *msl)
 	return 0;
 }
 
-#define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
 static int
-alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
+memseg_list_init(struct rte_memseg_list *msl, uint64_t page_sz,
 		int n_segs, int socket_id, int type_msl_idx)
 {
-	char name[RTE_FBARRAY_NAME_LEN];
-
-	snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
-		 type_msl_idx);
-	if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
-			sizeof(struct rte_memseg))) {
-		RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
-			rte_strerror(rte_errno));
-		return -1;
-	}
-
-	msl->page_sz = page_sz;
-	msl->socket_id = socket_id;
-	msl->base_va = NULL;
-	msl->heap = 1; /* mark it as a heap segment */
-
-	RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
-			(size_t)page_sz >> 10, socket_id);
-
-	return 0;
+	return eal_memseg_list_init(
+		msl, page_sz, n_segs, socket_id, type_msl_idx, true);
 }
 
 static int
-alloc_va_space(struct rte_memseg_list *msl)
+memseg_list_alloc(struct rte_memseg_list *msl)
 {
-	uint64_t page_sz;
-	size_t mem_sz;
-	void *addr;
-	int flags = 0;
-
-	page_sz = msl->page_sz;
-	mem_sz = page_sz * msl->memseg_arr.len;
-
-	addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
-	if (addr == NULL) {
-		if (rte_errno == EADDRNOTAVAIL)
-			RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - "
-				"please use '--" OPT_BASE_VIRTADDR "' option\n",
-				(unsigned long long)mem_sz, msl->base_va);
-		else
-			RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
-		return -1;
-	}
-	msl->base_va = addr;
-	msl->len = mem_sz;
-
-	return 0;
+	return eal_memseg_list_alloc(msl, 0);
 }
 
 /*
@@ -1009,12 +969,12 @@ prealloc_segments(struct hugepage_file *hugepages, int n_pages)
 			}
 
 			/* now, allocate fbarray itself */
-			if (alloc_memseg_list(msl, page_sz, n_segs, socket,
+			if (memseg_list_init(msl, page_sz, n_segs, socket,
 						msl_idx) < 0)
 				return -1;
 
 			/* finally, allocate VA space */
-			if (alloc_va_space(msl) < 0)
+			if (memseg_list_alloc(msl) < 0)
 				return -1;
 		}
 	}
@@ -1323,8 +1283,6 @@ eal_legacy_hugepage_init(void)
 	struct rte_mem_config *mcfg;
 	struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
 	struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
-	struct rte_fbarray *arr;
-	struct rte_memseg *ms;
 
 	uint64_t memory[RTE_MAX_NUMA_NODES];
 
@@ -1343,7 +1301,7 @@ eal_legacy_hugepage_init(void)
 		void *prealloc_addr;
 		size_t mem_sz;
 		struct rte_memseg_list *msl;
-		int n_segs, cur_seg, fd, flags;
+		int n_segs, fd, flags;
 #ifdef MEMFD_SUPPORTED
 		int memfd;
 #endif
@@ -1358,12 +1316,12 @@ eal_legacy_hugepage_init(void)
 		/* create a memseg list */
 		msl = &mcfg->memsegs[0];
 
+		mem_sz = internal_config.memory;
 		page_sz = RTE_PGSIZE_4K;
-		n_segs = internal_config.memory / page_sz;
+		n_segs = mem_sz / page_sz;
 
-		if (rte_fbarray_init(&msl->memseg_arr, "nohugemem", n_segs,
-					sizeof(struct rte_memseg))) {
-			RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
+		if (eal_memseg_list_init_named(
+				msl, "nohugemem", page_sz, n_segs, 0, true)) {
 			return -1;
 		}
 
@@ -1400,16 +1358,10 @@ eal_legacy_hugepage_init(void)
 		/* preallocate address space for the memory, so that it can be
 		 * fit into the DMA mask.
 		 */
-		mem_sz = internal_config.memory;
-		prealloc_addr = eal_get_virtual_area(
-				NULL, &mem_sz, page_sz, 0, 0);
-		if (prealloc_addr == NULL) {
-			RTE_LOG(ERR, EAL,
-					"%s: reserving memory area failed: "
-					"%s\n",
-					__func__, strerror(errno));
+		if (eal_memseg_list_alloc(msl, 0))
 			return -1;
-		}
+
+		prealloc_addr = msl->base_va;
 		addr = mmap(prealloc_addr, mem_sz, PROT_READ | PROT_WRITE,
 				flags | MAP_FIXED, fd, 0);
 		if (addr == MAP_FAILED || addr != prealloc_addr) {
@@ -1418,11 +1370,6 @@ eal_legacy_hugepage_init(void)
 			munmap(prealloc_addr, mem_sz);
 			return -1;
 		}
-		msl->base_va = addr;
-		msl->page_sz = page_sz;
-		msl->socket_id = 0;
-		msl->len = mem_sz;
-		msl->heap = 1;
 
 		/* we're in single-file segments mode, so only the segment list
 		 * fd needs to be set up.
@@ -1434,24 +1381,8 @@ eal_legacy_hugepage_init(void)
 			}
 		}
 
-		/* populate memsegs. each memseg is one page long */
-		for (cur_seg = 0; cur_seg < n_segs; cur_seg++) {
-			arr = &msl->memseg_arr;
+		eal_memseg_list_populate(msl, addr, n_segs);
 
-			ms = rte_fbarray_get(arr, cur_seg);
-			if (rte_eal_iova_mode() == RTE_IOVA_VA)
-				ms->iova = (uintptr_t)addr;
-			else
-				ms->iova = RTE_BAD_IOVA;
-			ms->addr = addr;
-			ms->hugepage_sz = page_sz;
-			ms->socket_id = 0;
-			ms->len = page_sz;
-
-			rte_fbarray_set_used(arr, cur_seg);
-
-			addr = RTE_PTR_ADD(addr, (size_t)page_sz);
-		}
 		if (mcfg->dma_maskbits &&
 		    rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
 			RTE_LOG(ERR, EAL,
@@ -2191,7 +2122,7 @@ memseg_primary_init_32(void)
 						max_pagesz_mem);
 				n_segs = cur_mem / hugepage_sz;
 
-				if (alloc_memseg_list(msl, hugepage_sz, n_segs,
+				if (memseg_list_init(msl, hugepage_sz, n_segs,
 						socket_id, type_msl_idx)) {
 					/* failing to allocate a memseg list is
 					 * a serious error.
@@ -2200,13 +2131,13 @@ memseg_primary_init_32(void)
 					return -1;
 				}
 
-				if (alloc_va_space(msl)) {
+				if (memseg_list_alloc(msl)) {
 					/* if we couldn't allocate VA space, we
 					 * can try with smaller page sizes.
 					 */
 					RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
 					/* deallocate memseg list */
-					if (free_memseg_list(msl))
+					if (memseg_list_free(msl))
 						return -1;
 					break;
 				}
@@ -2395,11 +2326,11 @@ memseg_primary_init(void)
 			}
 			msl = &mcfg->memsegs[msl_idx++];
 
-			if (alloc_memseg_list(msl, pagesz, n_segs,
+			if (memseg_list_init(msl, pagesz, n_segs,
 					socket_id, cur_seglist))
 				goto out;
 
-			if (alloc_va_space(msl)) {
+			if (memseg_list_alloc(msl)) {
 				RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
 				goto out;
 			}
@@ -2433,7 +2364,7 @@ memseg_secondary_init(void)
 		}
 
 		/* preallocate VA space */
-		if (alloc_va_space(msl)) {
+		if (memseg_list_alloc(msl)) {
 			RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
 			return -1;
 		}
-- 
2.25.4