* [dpdk-dev] [PATCH 2/3] mem: improve memory preallocation on 32-bit
2018-04-20 14:41 [dpdk-dev] [PATCH 1/3] mem: fix 32-bit memory upper limit for non-legacy mode Anatoly Burakov
@ 2018-04-20 14:41 ` Anatoly Burakov
2018-04-20 14:41 ` [dpdk-dev] [PATCH 3/3] mem: improve autodetection of hugepage counts " Anatoly Burakov
` (3 subsequent siblings)
4 siblings, 0 replies; 14+ messages in thread
From: Anatoly Burakov @ 2018-04-20 14:41 UTC (permalink / raw)
To: dev
Previously, if we couldn't preallocate VA space on 32-bit for
one page size, we simply bailed out, even though we could've
tried allocating VA space with other page sizes.
For example, if user had both 1G and 2M pages enabled, and
has asked DPDK to allocate memory on both sockets, DPDK
would've tried to allocate VA space for 1x1G page on both
sockets, failed and never tried again, even though it
could've allocated the same 1G of VA space for 512x2M pages.
Fix this by retrying with different page sizes if VA space
reservation failed.
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
lib/librte_eal/common/eal_common_memory.c | 42 +++++++++++++++++++++++++------
1 file changed, 35 insertions(+), 7 deletions(-)
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index c0d4673..d819abe 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -142,6 +142,17 @@ get_mem_amount(uint64_t page_sz, uint64_t max_mem)
}
static int
+free_memseg_list(struct rte_memseg_list *msl)
+{
+ if (rte_fbarray_destroy(&msl->memseg_arr)) {
+ RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
+ return -1;
+ }
+ memset(msl, 0, sizeof(*msl));
+ return 0;
+}
+
+static int
alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
uint64_t max_mem, int socket_id, int type_msl_idx)
{
@@ -339,24 +350,41 @@ memseg_primary_init_32(void)
return -1;
}
- msl = &mcfg->memsegs[msl_idx++];
+ msl = &mcfg->memsegs[msl_idx];
if (alloc_memseg_list(msl, hugepage_sz,
max_pagesz_mem, socket_id,
- type_msl_idx))
+ type_msl_idx)) {
+ /* failing to allocate a memseg list is
+ * a serious error.
+ */
+ RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
return -1;
+ }
+
+ if (alloc_va_space(msl)) {
+ /* if we couldn't allocate VA space, we
+ * can try with smaller page sizes.
+ */
+ RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
+ /* deallocate memseg list */
+ if (free_memseg_list(msl))
+ return -1;
+ break;
+ }
total_segs += msl->memseg_arr.len;
cur_pagesz_mem = total_segs * hugepage_sz;
type_msl_idx++;
-
- if (alloc_va_space(msl)) {
- RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
- return -1;
- }
+ msl_idx++;
}
cur_socket_mem += cur_pagesz_mem;
}
+ if (cur_socket_mem == 0) {
+ RTE_LOG(ERR, EAL, "Cannot allocate VA space on socket %u\n",
+ socket_id);
+ return -1;
+ }
}
return 0;
--
2.7.4
^ permalink raw reply [flat|nested] 14+ messages in thread
* [dpdk-dev] [PATCH 3/3] mem: improve autodetection of hugepage counts on 32-bit
2018-04-20 14:41 [dpdk-dev] [PATCH 1/3] mem: fix 32-bit memory upper limit for non-legacy mode Anatoly Burakov
2018-04-20 14:41 ` [dpdk-dev] [PATCH 2/3] mem: improve memory preallocation on 32-bit Anatoly Burakov
@ 2018-04-20 14:41 ` Anatoly Burakov
2018-04-20 15:12 ` Burakov, Anatoly
2018-04-20 15:25 ` [dpdk-dev] [PATCH v2 1/3] mem: fix 32-bit memory upper limit for non-legacy mode Anatoly Burakov
` (2 subsequent siblings)
4 siblings, 1 reply; 14+ messages in thread
From: Anatoly Burakov @ 2018-04-20 14:41 UTC (permalink / raw)
To: dev
For non-legacy mode, we are preallocating space for hugepages, so
we know in advance which pages we will be able to allocate, and
which we won't. However, the init procedure was using hugepage
counts gathered from sysfs and paid no attention to hugepage
sizes that were actually available for reservation, and failed
on attempts to reserve unavailable pages.
Fix this by limiting total page counts by number of pages
actually preallocated.
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
lib/librte_eal/linuxapp/eal/eal_memory.c | 31 +++++++++++++++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index fadc1de..8eb60cb 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -1603,6 +1603,18 @@ eal_legacy_hugepage_init(void)
return -1;
}
+static int __rte_unused
+hugepage_count_walk(const struct rte_memseg_list *msl, void *arg)
+{
+ struct hugepage_info *hpi = arg;
+
+ if (msl->page_sz != hpi->hugepage_sz)
+ return 0;
+
+ hpi->num_pages[msl->socket_id] += msl->memseg_arr.len;
+ return 0;
+}
+
static int
eal_hugepage_init(void)
{
@@ -1617,10 +1629,29 @@ eal_hugepage_init(void)
for (hp_sz_idx = 0;
hp_sz_idx < (int) internal_config.num_hugepage_sizes;
hp_sz_idx++) {
+#ifndef RTE_ARCH_64
+ struct hugepage_info dummy;
+ unsigned int i;
+#endif
/* also initialize used_hp hugepage sizes in used_hp */
struct hugepage_info *hpi;
hpi = &internal_config.hugepage_info[hp_sz_idx];
used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;
+
+#ifndef RTE_ARCH_64
+ /* for 32-bit, limit number of pages on socket to whatever we've
+ * preallocated, as we cannot allocate more.
+ */
+ memset(dummy, 0, sizeof(dummy));
+ dummy.hugepage_sz = hpi->hugepage_sz;
+ if (rte_memseg_list_walk(hugepage_count_walk, hpi) < 0)
+ return -1;
+
+ for (i = 0; i < RTE_DIM(dummy.num_pages); i++) {
+ hpi->num_pages[i] = RTE_MIN(hpi->num_pages[i],
+ dummy.num_pages[i]);
+ }
+#endif
}
/* make a copy of socket_mem, needed for balanced allocation. */
--
2.7.4
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [dpdk-dev] [PATCH 3/3] mem: improve autodetection of hugepage counts on 32-bit
2018-04-20 14:41 ` [dpdk-dev] [PATCH 3/3] mem: improve autodetection of hugepage counts " Anatoly Burakov
@ 2018-04-20 15:12 ` Burakov, Anatoly
0 siblings, 0 replies; 14+ messages in thread
From: Burakov, Anatoly @ 2018-04-20 15:12 UTC (permalink / raw)
To: dev
On 20-Apr-18 3:41 PM, Anatoly Burakov wrote:
> For non-legacy mode, we are preallocating space for hugepages, so
> we know in advance which pages we will be able to allocate, and
> which we won't. However, the init procedure was using hugepage
> counts gathered from sysfs and paid no attention to hugepage
> sizes that were actually available for reservation, and failed
> on attempts to reserve unavailable pages.
>
> Fix this by limiting total page counts by number of pages
> actually preallocated.
>
> Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
> ---
Oops, didn't update the patch after fixing build.
--
Thanks,
Anatoly
^ permalink raw reply [flat|nested] 14+ messages in thread
* [dpdk-dev] [PATCH v2 1/3] mem: fix 32-bit memory upper limit for non-legacy mode
2018-04-20 14:41 [dpdk-dev] [PATCH 1/3] mem: fix 32-bit memory upper limit for non-legacy mode Anatoly Burakov
2018-04-20 14:41 ` [dpdk-dev] [PATCH 2/3] mem: improve memory preallocation on 32-bit Anatoly Burakov
2018-04-20 14:41 ` [dpdk-dev] [PATCH 3/3] mem: improve autodetection of hugepage counts " Anatoly Burakov
@ 2018-04-20 15:25 ` Anatoly Burakov
2018-04-24 10:19 ` [dpdk-dev] [PATCH v3 " Anatoly Burakov
` (2 more replies)
2018-04-20 15:25 ` [dpdk-dev] [PATCH v2 2/3] mem: improve memory preallocation " Anatoly Burakov
2018-04-20 15:25 ` [dpdk-dev] [PATCH v2 3/3] mem: improve autodetection of hugepage counts " Anatoly Burakov
4 siblings, 3 replies; 14+ messages in thread
From: Anatoly Burakov @ 2018-04-20 15:25 UTC (permalink / raw)
To: dev; +Cc: anatoly.burakov
32-bit mode has an upper limit on amount of VA space it can preallocate,
but the original implementation used the wrong constant, resulting in
failure to initialize due to integer overflow. Fix it by using the
correct constant.
Fixes: 66cc45e293ed ("mem: replace memseg with memseg lists")
Cc: anatoly.burakov@intel.com
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
lib/librte_eal/common/eal_common_memory.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index 24a9ed5..c0d4673 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -249,7 +249,7 @@ memseg_primary_init_32(void)
else
total_requested_mem = internal_config.memory;
- max_mem = (uint64_t) RTE_MAX_MEM_MB_PER_TYPE << 20;
+ max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
if (total_requested_mem > max_mem) {
RTE_LOG(ERR, EAL, "Invalid parameters: 32-bit process can at most use %uM of memory\n",
(unsigned int)(max_mem >> 20));
--
2.7.4
^ permalink raw reply [flat|nested] 14+ messages in thread
* [dpdk-dev] [PATCH v3 1/3] mem: fix 32-bit memory upper limit for non-legacy mode
2018-04-20 15:25 ` [dpdk-dev] [PATCH v2 1/3] mem: fix 32-bit memory upper limit for non-legacy mode Anatoly Burakov
@ 2018-04-24 10:19 ` Anatoly Burakov
2018-04-25 13:26 ` Pattan, Reshma
2018-04-24 10:19 ` [dpdk-dev] [PATCH v3 2/3] mem: improve memory preallocation on 32-bit Anatoly Burakov
2018-04-24 10:19 ` [dpdk-dev] [PATCH v3 3/3] mem: improve autodetection of hugepage counts " Anatoly Burakov
2 siblings, 1 reply; 14+ messages in thread
From: Anatoly Burakov @ 2018-04-24 10:19 UTC (permalink / raw)
To: dev; +Cc: reshma.pattan, anatoly.burakov
32-bit mode has an upper limit on amount of VA space it can preallocate,
but the original implementation used the wrong constant, resulting in
failure to initialize due to integer overflow. Fix it by using the
correct constant.
Fixes: 66cc45e293ed ("mem: replace memseg with memseg lists")
Cc: anatoly.burakov@intel.com
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
lib/librte_eal/common/eal_common_memory.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index 24a9ed5..c0d4673 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -249,7 +249,7 @@ memseg_primary_init_32(void)
else
total_requested_mem = internal_config.memory;
- max_mem = (uint64_t) RTE_MAX_MEM_MB_PER_TYPE << 20;
+ max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
if (total_requested_mem > max_mem) {
RTE_LOG(ERR, EAL, "Invalid parameters: 32-bit process can at most use %uM of memory\n",
(unsigned int)(max_mem >> 20));
--
2.7.4
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/3] mem: fix 32-bit memory upper limit for non-legacy mode
2018-04-24 10:19 ` [dpdk-dev] [PATCH v3 " Anatoly Burakov
@ 2018-04-25 13:26 ` Pattan, Reshma
0 siblings, 0 replies; 14+ messages in thread
From: Pattan, Reshma @ 2018-04-25 13:26 UTC (permalink / raw)
To: Burakov, Anatoly, dev; +Cc: Parthasarathy, JananeeX M
> -----Original Message-----
> From: Burakov, Anatoly
> Sent: Tuesday, April 24, 2018 11:19 AM
> To: dev@dpdk.org
> Cc: Pattan, Reshma <reshma.pattan@intel.com>; Burakov, Anatoly
> <anatoly.burakov@intel.com>
> Subject: [PATCH v3 1/3] mem: fix 32-bit memory upper limit for non-legacy
> mode
>
> 32-bit mode has an upper limit on amount of VA space it can preallocate, but
> the original implementation used the wrong constant, resulting in failure to
> initialize due to integer overflow. Fix it by using the correct constant.
>
> Fixes: 66cc45e293ed ("mem: replace memseg with memseg lists")
> Cc: anatoly.burakov@intel.com
>
> Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
> ---
> lib/librte_eal/common/eal_common_memory.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
Tested-by: Jananee Parthasarathy <jananeex.m.parthasarathy@intel.com>
^ permalink raw reply [flat|nested] 14+ messages in thread
* [dpdk-dev] [PATCH v3 2/3] mem: improve memory preallocation on 32-bit
2018-04-20 15:25 ` [dpdk-dev] [PATCH v2 1/3] mem: fix 32-bit memory upper limit for non-legacy mode Anatoly Burakov
2018-04-24 10:19 ` [dpdk-dev] [PATCH v3 " Anatoly Burakov
@ 2018-04-24 10:19 ` Anatoly Burakov
2018-04-25 13:27 ` Pattan, Reshma
2018-04-24 10:19 ` [dpdk-dev] [PATCH v3 3/3] mem: improve autodetection of hugepage counts " Anatoly Burakov
2 siblings, 1 reply; 14+ messages in thread
From: Anatoly Burakov @ 2018-04-24 10:19 UTC (permalink / raw)
To: dev; +Cc: reshma.pattan
Previously, if we couldn't preallocate VA space on 32-bit for
one page size, we simply bailed out, even though we could've
tried allocating VA space with other page sizes.
For example, if user had both 1G and 2M pages enabled, and
has asked DPDK to allocate memory on both sockets, DPDK
would've tried to allocate VA space for 1x1G page on both
sockets, failed and never tried again, even though it
could've allocated the same 1G of VA space for 512x2M pages.
Fix this by retrying with different page sizes if VA space
reservation failed.
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
lib/librte_eal/common/eal_common_memory.c | 42 +++++++++++++++++++++++++------
1 file changed, 35 insertions(+), 7 deletions(-)
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index c0d4673..d819abe 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -142,6 +142,17 @@ get_mem_amount(uint64_t page_sz, uint64_t max_mem)
}
static int
+free_memseg_list(struct rte_memseg_list *msl)
+{
+ if (rte_fbarray_destroy(&msl->memseg_arr)) {
+ RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
+ return -1;
+ }
+ memset(msl, 0, sizeof(*msl));
+ return 0;
+}
+
+static int
alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
uint64_t max_mem, int socket_id, int type_msl_idx)
{
@@ -339,24 +350,41 @@ memseg_primary_init_32(void)
return -1;
}
- msl = &mcfg->memsegs[msl_idx++];
+ msl = &mcfg->memsegs[msl_idx];
if (alloc_memseg_list(msl, hugepage_sz,
max_pagesz_mem, socket_id,
- type_msl_idx))
+ type_msl_idx)) {
+ /* failing to allocate a memseg list is
+ * a serious error.
+ */
+ RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
return -1;
+ }
+
+ if (alloc_va_space(msl)) {
+ /* if we couldn't allocate VA space, we
+ * can try with smaller page sizes.
+ */
+ RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
+ /* deallocate memseg list */
+ if (free_memseg_list(msl))
+ return -1;
+ break;
+ }
total_segs += msl->memseg_arr.len;
cur_pagesz_mem = total_segs * hugepage_sz;
type_msl_idx++;
-
- if (alloc_va_space(msl)) {
- RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
- return -1;
- }
+ msl_idx++;
}
cur_socket_mem += cur_pagesz_mem;
}
+ if (cur_socket_mem == 0) {
+ RTE_LOG(ERR, EAL, "Cannot allocate VA space on socket %u\n",
+ socket_id);
+ return -1;
+ }
}
return 0;
--
2.7.4
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [dpdk-dev] [PATCH v3 2/3] mem: improve memory preallocation on 32-bit
2018-04-24 10:19 ` [dpdk-dev] [PATCH v3 2/3] mem: improve memory preallocation on 32-bit Anatoly Burakov
@ 2018-04-25 13:27 ` Pattan, Reshma
0 siblings, 0 replies; 14+ messages in thread
From: Pattan, Reshma @ 2018-04-25 13:27 UTC (permalink / raw)
To: Burakov, Anatoly, dev; +Cc: Parthasarathy, JananeeX M
> -----Original Message-----
> From: Burakov, Anatoly
> Sent: Tuesday, April 24, 2018 11:19 AM
> To: dev@dpdk.org
> Cc: Pattan, Reshma <reshma.pattan@intel.com>
> Subject: [PATCH v3 2/3] mem: improve memory preallocation on 32-bit
>
> Previously, if we couldn't preallocate VA space on 32-bit for one page size, we
> simply bailed out, even though we could've tried allocating VA space with
> other page sizes.
>
> For example, if user had both 1G and 2M pages enabled, and has asked DPDK
> to allocate memory on both sockets, DPDK would've tried to allocate VA
> space for 1x1G page on both sockets, failed and never tried again, even
> though it could've allocated the same 1G of VA space for 512x2M pages.
>
> Fix this by retrying with different page sizes if VA space reservation failed.
>
> Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
> ---
> lib/librte_eal/common/eal_common_memory.c | 42
> +++++++++++++++++++++++++------
> 1 file changed, 35 insertions(+), 7 deletions(-)
>
Tested-by: Jananee Parthasarathy <jananeex.m.parthasarathy@intel.com>
^ permalink raw reply [flat|nested] 14+ messages in thread
* [dpdk-dev] [PATCH v3 3/3] mem: improve autodetection of hugepage counts on 32-bit
2018-04-20 15:25 ` [dpdk-dev] [PATCH v2 1/3] mem: fix 32-bit memory upper limit for non-legacy mode Anatoly Burakov
2018-04-24 10:19 ` [dpdk-dev] [PATCH v3 " Anatoly Burakov
2018-04-24 10:19 ` [dpdk-dev] [PATCH v3 2/3] mem: improve memory preallocation on 32-bit Anatoly Burakov
@ 2018-04-24 10:19 ` Anatoly Burakov
2018-04-25 13:28 ` Pattan, Reshma
2 siblings, 1 reply; 14+ messages in thread
From: Anatoly Burakov @ 2018-04-24 10:19 UTC (permalink / raw)
To: dev; +Cc: reshma.pattan
For non-legacy mode, we are preallocating space for hugepages, so
we know in advance which pages we will be able to allocate, and
which we won't. However, the init procedure was using hugepage
counts gathered from sysfs and paid no attention to hugepage
sizes that were actually available for reservation, and failed
on attempts to reserve unavailable pages.
Fix this by limiting total page counts by number of pages
actually preallocated.
Also, VA preallocate procedure only looks at mountpoints that are
available, and expects pages to exist if a mountpoint exists. That
might not necessarily be the case, so also check if there are
hugepages available for a particular page size on a particular
NUMA node.
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
Notes:
v3:
- Added a check to see if pages are actually available before
reserving VA space for them. Only applies to 32-bit non-legacy.
lib/librte_eal/common/eal_common_memory.c | 4 ++++
lib/librte_eal/linuxapp/eal/eal_memory.c | 31 +++++++++++++++++++++++++++++++
2 files changed, 35 insertions(+)
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index d819abe..991979c 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -329,6 +329,10 @@ memseg_primary_init_32(void)
hpi = &internal_config.hugepage_info[hpi_idx];
hugepage_sz = hpi->hugepage_sz;
+ /* check if pages are actually available */
+ if (hpi->num_pages[socket_id] == 0)
+ continue;
+
max_segs = RTE_MAX_MEMSEG_PER_TYPE;
max_pagesz_mem = max_socket_mem - cur_socket_mem;
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index fadc1de..6f2cdf8 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -1603,6 +1603,18 @@ eal_legacy_hugepage_init(void)
return -1;
}
+static int __rte_unused
+hugepage_count_walk(const struct rte_memseg_list *msl, void *arg)
+{
+ struct hugepage_info *hpi = arg;
+
+ if (msl->page_sz != hpi->hugepage_sz)
+ return 0;
+
+ hpi->num_pages[msl->socket_id] += msl->memseg_arr.len;
+ return 0;
+}
+
static int
eal_hugepage_init(void)
{
@@ -1617,10 +1629,29 @@ eal_hugepage_init(void)
for (hp_sz_idx = 0;
hp_sz_idx < (int) internal_config.num_hugepage_sizes;
hp_sz_idx++) {
+#ifndef RTE_ARCH_64
+ struct hugepage_info dummy;
+ unsigned int i;
+#endif
/* also initialize used_hp hugepage sizes in used_hp */
struct hugepage_info *hpi;
hpi = &internal_config.hugepage_info[hp_sz_idx];
used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;
+
+#ifndef RTE_ARCH_64
+ /* for 32-bit, limit number of pages on socket to whatever we've
+ * preallocated, as we cannot allocate more.
+ */
+ memset(&dummy, 0, sizeof(dummy));
+ dummy.hugepage_sz = hpi->hugepage_sz;
+ if (rte_memseg_list_walk(hugepage_count_walk, &dummy) < 0)
+ return -1;
+
+ for (i = 0; i < RTE_DIM(dummy.num_pages); i++) {
+ hpi->num_pages[i] = RTE_MIN(hpi->num_pages[i],
+ dummy.num_pages[i]);
+ }
+#endif
}
/* make a copy of socket_mem, needed for balanced allocation. */
--
2.7.4
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [dpdk-dev] [PATCH v3 3/3] mem: improve autodetection of hugepage counts on 32-bit
2018-04-24 10:19 ` [dpdk-dev] [PATCH v3 3/3] mem: improve autodetection of hugepage counts " Anatoly Burakov
@ 2018-04-25 13:28 ` Pattan, Reshma
2018-04-27 21:47 ` Thomas Monjalon
0 siblings, 1 reply; 14+ messages in thread
From: Pattan, Reshma @ 2018-04-25 13:28 UTC (permalink / raw)
To: Burakov, Anatoly, dev; +Cc: Parthasarathy, JananeeX M
> -----Original Message-----
> From: Burakov, Anatoly
> Sent: Tuesday, April 24, 2018 11:19 AM
> To: dev@dpdk.org
> Cc: Pattan, Reshma <reshma.pattan@intel.com>
> Subject: [PATCH v3 3/3] mem: improve autodetection of hugepage counts on
> 32-bit
>
> For non-legacy mode, we are preallocating space for hugepages, so we know
> in advance which pages we will be able to allocate, and which we won't.
> However, the init procedure was using hugepage counts gathered from sysfs
> and paid no attention to hugepage sizes that were actually available for
> reservation, and failed on attempts to reserve unavailable pages.
>
> Fix this by limiting total page counts by number of pages actually
> preallocated.
>
> Also, VA preallocate procedure only looks at mountpoints that are available,
> and expects pages to exist if a mountpoint exists. That might not necessarily
> be the case, so also check if there are hugepages available for a particular
> page size on a particular NUMA node.
>
> Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
> ---
>
Tested-by: Jananee Parthasarathy <jananeex.m.parthasarathy@intel.com>
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [dpdk-dev] [PATCH v3 3/3] mem: improve autodetection of hugepage counts on 32-bit
2018-04-25 13:28 ` Pattan, Reshma
@ 2018-04-27 21:47 ` Thomas Monjalon
0 siblings, 0 replies; 14+ messages in thread
From: Thomas Monjalon @ 2018-04-27 21:47 UTC (permalink / raw)
To: Burakov, Anatoly; +Cc: dev, Pattan, Reshma, Parthasarathy, JananeeX M
> > For non-legacy mode, we are preallocating space for hugepages, so we know
> > in advance which pages we will be able to allocate, and which we won't.
> > However, the init procedure was using hugepage counts gathered from sysfs
> > and paid no attention to hugepage sizes that were actually available for
> > reservation, and failed on attempts to reserve unavailable pages.
> >
> > Fix this by limiting total page counts by number of pages actually
> > preallocated.
> >
> > Also, VA preallocate procedure only looks at mountpoints that are available,
> > and expects pages to exist if a mountpoint exists. That might not necessarily
> > be the case, so also check if there are hugepages available for a particular
> > page size on a particular NUMA node.
> >
> > Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
>
> Tested-by: Jananee Parthasarathy <jananeex.m.parthasarathy@intel.com>
Series applied, thanks
^ permalink raw reply [flat|nested] 14+ messages in thread
* [dpdk-dev] [PATCH v2 2/3] mem: improve memory preallocation on 32-bit
2018-04-20 14:41 [dpdk-dev] [PATCH 1/3] mem: fix 32-bit memory upper limit for non-legacy mode Anatoly Burakov
` (2 preceding siblings ...)
2018-04-20 15:25 ` [dpdk-dev] [PATCH v2 1/3] mem: fix 32-bit memory upper limit for non-legacy mode Anatoly Burakov
@ 2018-04-20 15:25 ` Anatoly Burakov
2018-04-20 15:25 ` [dpdk-dev] [PATCH v2 3/3] mem: improve autodetection of hugepage counts " Anatoly Burakov
4 siblings, 0 replies; 14+ messages in thread
From: Anatoly Burakov @ 2018-04-20 15:25 UTC (permalink / raw)
To: dev
Previously, if we couldn't preallocate VA space on 32-bit for
one page size, we simply bailed out, even though we could've
tried allocating VA space with other page sizes.
For example, if user had both 1G and 2M pages enabled, and
has asked DPDK to allocate memory on both sockets, DPDK
would've tried to allocate VA space for 1x1G page on both
sockets, failed and never tried again, even though it
could've allocated the same 1G of VA space for 512x2M pages.
Fix this by retrying with different page sizes if VA space
reservation failed.
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
lib/librte_eal/common/eal_common_memory.c | 42 +++++++++++++++++++++++++------
1 file changed, 35 insertions(+), 7 deletions(-)
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index c0d4673..d819abe 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -142,6 +142,17 @@ get_mem_amount(uint64_t page_sz, uint64_t max_mem)
}
static int
+free_memseg_list(struct rte_memseg_list *msl)
+{
+ if (rte_fbarray_destroy(&msl->memseg_arr)) {
+ RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
+ return -1;
+ }
+ memset(msl, 0, sizeof(*msl));
+ return 0;
+}
+
+static int
alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
uint64_t max_mem, int socket_id, int type_msl_idx)
{
@@ -339,24 +350,41 @@ memseg_primary_init_32(void)
return -1;
}
- msl = &mcfg->memsegs[msl_idx++];
+ msl = &mcfg->memsegs[msl_idx];
if (alloc_memseg_list(msl, hugepage_sz,
max_pagesz_mem, socket_id,
- type_msl_idx))
+ type_msl_idx)) {
+ /* failing to allocate a memseg list is
+ * a serious error.
+ */
+ RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
return -1;
+ }
+
+ if (alloc_va_space(msl)) {
+ /* if we couldn't allocate VA space, we
+ * can try with smaller page sizes.
+ */
+ RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
+ /* deallocate memseg list */
+ if (free_memseg_list(msl))
+ return -1;
+ break;
+ }
total_segs += msl->memseg_arr.len;
cur_pagesz_mem = total_segs * hugepage_sz;
type_msl_idx++;
-
- if (alloc_va_space(msl)) {
- RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
- return -1;
- }
+ msl_idx++;
}
cur_socket_mem += cur_pagesz_mem;
}
+ if (cur_socket_mem == 0) {
+ RTE_LOG(ERR, EAL, "Cannot allocate VA space on socket %u\n",
+ socket_id);
+ return -1;
+ }
}
return 0;
--
2.7.4
^ permalink raw reply [flat|nested] 14+ messages in thread
* [dpdk-dev] [PATCH v2 3/3] mem: improve autodetection of hugepage counts on 32-bit
2018-04-20 14:41 [dpdk-dev] [PATCH 1/3] mem: fix 32-bit memory upper limit for non-legacy mode Anatoly Burakov
` (3 preceding siblings ...)
2018-04-20 15:25 ` [dpdk-dev] [PATCH v2 2/3] mem: improve memory preallocation " Anatoly Burakov
@ 2018-04-20 15:25 ` Anatoly Burakov
4 siblings, 0 replies; 14+ messages in thread
From: Anatoly Burakov @ 2018-04-20 15:25 UTC (permalink / raw)
To: dev
For non-legacy mode, we are preallocating space for hugepages, so
we know in advance which pages we will be able to allocate, and
which we won't. However, the init procedure was using hugepage
counts gathered from sysfs and paid no attention to hugepage
sizes that were actually available for reservation, and failed
on attempts to reserve unavailable pages.
Fix this by limiting total page counts by number of pages
actually preallocated.
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
lib/librte_eal/linuxapp/eal/eal_memory.c | 31 +++++++++++++++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index fadc1de..6f2cdf8 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -1603,6 +1603,18 @@ eal_legacy_hugepage_init(void)
return -1;
}
+static int __rte_unused
+hugepage_count_walk(const struct rte_memseg_list *msl, void *arg)
+{
+ struct hugepage_info *hpi = arg;
+
+ if (msl->page_sz != hpi->hugepage_sz)
+ return 0;
+
+ hpi->num_pages[msl->socket_id] += msl->memseg_arr.len;
+ return 0;
+}
+
static int
eal_hugepage_init(void)
{
@@ -1617,10 +1629,29 @@ eal_hugepage_init(void)
for (hp_sz_idx = 0;
hp_sz_idx < (int) internal_config.num_hugepage_sizes;
hp_sz_idx++) {
+#ifndef RTE_ARCH_64
+ struct hugepage_info dummy;
+ unsigned int i;
+#endif
/* also initialize used_hp hugepage sizes in used_hp */
struct hugepage_info *hpi;
hpi = &internal_config.hugepage_info[hp_sz_idx];
used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;
+
+#ifndef RTE_ARCH_64
+ /* for 32-bit, limit number of pages on socket to whatever we've
+ * preallocated, as we cannot allocate more.
+ */
+ memset(&dummy, 0, sizeof(dummy));
+ dummy.hugepage_sz = hpi->hugepage_sz;
+ if (rte_memseg_list_walk(hugepage_count_walk, &dummy) < 0)
+ return -1;
+
+ for (i = 0; i < RTE_DIM(dummy.num_pages); i++) {
+ hpi->num_pages[i] = RTE_MIN(hpi->num_pages[i],
+ dummy.num_pages[i]);
+ }
+#endif
}
/* make a copy of socket_mem, needed for balanced allocation. */
--
2.7.4
^ permalink raw reply [flat|nested] 14+ messages in thread