* [dpdk-stable] [PATCH] mem: balanced allocation of hugepages
[not found] <CGME20170216130139eucas1p2512567d6f5db9eaac5ee840b56bf920a@eucas1p2.samsung.com>
@ 2017-02-16 13:01 ` Ilya Maximets
2017-02-16 13:26 ` Tan, Jianfeng
` (2 more replies)
0 siblings, 3 replies; 15+ messages in thread
From: Ilya Maximets @ 2017-02-16 13:01 UTC (permalink / raw)
To: dev, David Marchand, Sergio Gonzalez Monroy
Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
Ilya Maximets, stable
Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.
Example:
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.
New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.
Cc: <stable@dpdk.org>
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
config/common_base | 1 +
lib/librte_eal/Makefile | 4 ++
lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
mk/rte.app.mk | 3 ++
4 files changed, 74 insertions(+)
diff --git a/config/common_base b/config/common_base
index 71a4fcb..fbcebbd 100644
--- a/config/common_base
+++ b/config/common_base
@@ -97,6 +97,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
# Default driver path (or "" to disable)
CONFIG_RTE_EAL_PMD_PATH=""
diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile
index cf11a09..5ae3846 100644
--- a/lib/librte_eal/Makefile
+++ b/lib/librte_eal/Makefile
@@ -35,4 +35,8 @@ DIRS-y += common
DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += linuxapp
DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += bsdapp
+ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
+
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index a956bb2..8536a36 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -82,6 +82,9 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numaif.h>
+#endif
#include <rte_log.h>
#include <rte_memory.h>
@@ -359,6 +362,21 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -375,10 +393,48 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+ unsigned long maxnode = 0;
+ int node_id = -1;
+
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+#endif
for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+ while (!internal_config.socket_mem[node_id])
+ node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+
+ nodemask[node_id / ULONG_BITS] =
+ 1UL << (node_id % ULONG_BITS);
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ /*
+ * Due to old linux kernel bug (feature?) we have to
+ * increase maxnode by 1. It will be unconditionally
+ * decreased back to normal value inside the syscall
+ * handler.
+ */
+ if (set_mempolicy(MPOL_PREFERRED,
+ nodemask, maxnode + 1) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set policy MPOL_PREFERRED: "
+ "%s\n", strerror(errno));
+ return i;
+ }
+
+ nodemask[node_id / ULONG_BITS] = 0UL;
+ }
+#endif
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -489,6 +545,10 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+ RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+#endif
return i;
}
@@ -573,6 +634,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 92f3635..c2153b9 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -159,6 +159,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
--
2.7.4
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [dpdk-stable] [PATCH] mem: balanced allocation of hugepages
2017-02-16 13:01 ` [dpdk-stable] [PATCH] mem: balanced allocation of hugepages Ilya Maximets
@ 2017-02-16 13:26 ` Tan, Jianfeng
2017-02-16 13:55 ` Ilya Maximets
2017-02-16 13:31 ` [dpdk-stable] [dpdk-dev] " Bruce Richardson
2017-03-06 9:34 ` [dpdk-stable] " Ilya Maximets
2 siblings, 1 reply; 15+ messages in thread
From: Tan, Jianfeng @ 2017-02-16 13:26 UTC (permalink / raw)
To: Ilya Maximets, dev, David Marchand, Gonzalez Monroy, Sergio
Cc: Heetae Ahn, Yuanhan Liu, Neil Horman, Pei, Yulong, stable
Hi,
> -----Original Message-----
> From: Ilya Maximets [mailto:i.maximets@samsung.com]
> Sent: Thursday, February 16, 2017 9:01 PM
> To: dev@dpdk.org; David Marchand; Gonzalez Monroy, Sergio
> Cc: Heetae Ahn; Yuanhan Liu; Tan, Jianfeng; Neil Horman; Pei, Yulong; Ilya
> Maximets; stable@dpdk.org
> Subject: [PATCH] mem: balanced allocation of hugepages
>
> Currently EAL allocates hugepages one by one not paying
> attention from which NUMA node allocation was done.
>
> Such behaviour leads to allocation failure if number of
> available hugepages for application limited by cgroups
> or hugetlbfs and memory requested not only from the first
> socket.
>
> Example:
> # 90 x 1GB hugepages availavle in a system
>
> cgcreate -g hugetlb:/test
> # Limit to 32GB of hugepages
> cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
> # Request 4GB from each of 2 sockets
> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>
> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
> EAL: 32 not 90 hugepages of size 1024 MB allocated
> EAL: Not enough memory available on socket 1!
> Requested: 4096MB, available: 0MB
> PANIC in rte_eal_init():
> Cannot init memory
>
> This happens beacause all allocated pages are
> on socket 0.
For such an use case, why not just use "numactl --interleave=0,1 <DPDK app> xxx"?
Do you see use case like --socket-mem 2048,1024 and only three 1GB-hugepage are allowed?
Thanks,
Jianfeng
>
> Fix this issue by setting mempolicy MPOL_PREFERRED for each
> hugepage to one of requested nodes in a round-robin fashion.
> In this case all allocated pages will be fairly distributed
> between all requested nodes.
>
> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> introduced and disabled by default because of external
> dependency from libnuma.
>
> Cc: <stable@dpdk.org>
> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> ---
> config/common_base | 1 +
> lib/librte_eal/Makefile | 4 ++
> lib/librte_eal/linuxapp/eal/eal_memory.c | 66
> ++++++++++++++++++++++++++++++++
> mk/rte.app.mk | 3 ++
> 4 files changed, 74 insertions(+)
>
> diff --git a/config/common_base b/config/common_base
> index 71a4fcb..fbcebbd 100644
> --- a/config/common_base
> +++ b/config/common_base
> @@ -97,6 +97,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
> CONFIG_RTE_EAL_IGB_UIO=n
> CONFIG_RTE_EAL_VFIO=n
> CONFIG_RTE_MALLOC_DEBUG=n
> +CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
>
> # Default driver path (or "" to disable)
> CONFIG_RTE_EAL_PMD_PATH=""
> diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile
> index cf11a09..5ae3846 100644
> --- a/lib/librte_eal/Makefile
> +++ b/lib/librte_eal/Makefile
> @@ -35,4 +35,8 @@ DIRS-y += common
> DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += linuxapp
> DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += bsdapp
>
> +ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
> +LDLIBS += -lnuma
> +endif
> +
> include $(RTE_SDK)/mk/rte.subdir.mk
> diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c
> b/lib/librte_eal/linuxapp/eal/eal_memory.c
> index a956bb2..8536a36 100644
> --- a/lib/librte_eal/linuxapp/eal/eal_memory.c
> +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
> @@ -82,6 +82,9 @@
> #include <sys/time.h>
> #include <signal.h>
> #include <setjmp.h>
> +#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> +#include <numaif.h>
> +#endif
>
> #include <rte_log.h>
> #include <rte_memory.h>
> @@ -359,6 +362,21 @@ static int huge_wrap_sigsetjmp(void)
> return sigsetjmp(huge_jmpenv, 1);
> }
>
> +#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> +#ifndef ULONG_SIZE
> +#define ULONG_SIZE sizeof(unsigned long)
> +#endif
> +#ifndef ULONG_BITS
> +#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
> +#endif
> +#ifndef DIV_ROUND_UP
> +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
> +#endif
> +#ifndef BITS_TO_LONGS
> +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
> +#endif
> +#endif
> +
> /*
> * Mmap all hugepages of hugepage table: it first open a file in
> * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
> @@ -375,10 +393,48 @@ map_all_hugepages(struct hugepage_file
> *hugepg_tbl,
> void *virtaddr;
> void *vma_addr = NULL;
> size_t vma_len = 0;
> +#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> + unsigned long
> nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
> + unsigned long maxnode = 0;
> + int node_id = -1;
> +
> + for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
> + if (internal_config.socket_mem[i])
> + maxnode = i + 1;
> +#endif
>
> for (i = 0; i < hpi->num_pages[0]; i++) {
> uint64_t hugepage_sz = hpi->hugepage_sz;
>
> +#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> + if (maxnode) {
> + node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
> + while (!internal_config.socket_mem[node_id])
> + node_id = (node_id + 1) %
> RTE_MAX_NUMA_NODES;
> +
> + nodemask[node_id / ULONG_BITS] =
> + 1UL << (node_id %
> ULONG_BITS);
> +
> + RTE_LOG(DEBUG, EAL,
> + "Setting policy MPOL_PREFERRED for
> socket %d\n",
> + node_id);
> + /*
> + * Due to old linux kernel bug (feature?) we have to
> + * increase maxnode by 1. It will be unconditionally
> + * decreased back to normal value inside the syscall
> + * handler.
> + */
> + if (set_mempolicy(MPOL_PREFERRED,
> + nodemask, maxnode + 1) < 0) {
> + RTE_LOG(ERR, EAL,
> + "Failed to set policy
> MPOL_PREFERRED: "
> + "%s\n", strerror(errno));
> + return i;
> + }
> +
> + nodemask[node_id / ULONG_BITS] = 0UL;
> + }
> +#endif
> if (orig) {
> hugepg_tbl[i].file_id = i;
> hugepg_tbl[i].size = hugepage_sz;
> @@ -489,6 +545,10 @@ map_all_hugepages(struct hugepage_file
> *hugepg_tbl,
> vma_len -= hugepage_sz;
> }
>
> +#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> + if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
> + RTE_LOG(ERR, EAL, "Failed to set mempolicy
> MPOL_DEFAULT\n");
> +#endif
> return i;
> }
>
> @@ -573,6 +634,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl,
> struct hugepage_info *hpi)
> if (hugepg_tbl[i].orig_va == va) {
> hugepg_tbl[i].socket_id = socket_id;
> hp_count++;
> +#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> + RTE_LOG(DEBUG, EAL,
> + "Hugepage %s is on socket %d\n",
> + hugepg_tbl[i].filepath, socket_id);
> +#endif
> }
> }
> }
> diff --git a/mk/rte.app.mk b/mk/rte.app.mk
> index 92f3635..c2153b9 100644
> --- a/mk/rte.app.mk
> +++ b/mk/rte.app.mk
> @@ -159,6 +159,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
> # The static libraries do not know their dependencies.
> # So linking with static library requires explicit dependencies.
> _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
> +ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
> +_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
> +endif
> _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
> _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
> _LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
> --
> 2.7.4
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [dpdk-stable] [dpdk-dev] [PATCH] mem: balanced allocation of hugepages
2017-02-16 13:01 ` [dpdk-stable] [PATCH] mem: balanced allocation of hugepages Ilya Maximets
2017-02-16 13:26 ` Tan, Jianfeng
@ 2017-02-16 13:31 ` Bruce Richardson
2017-03-06 9:34 ` [dpdk-stable] " Ilya Maximets
2 siblings, 0 replies; 15+ messages in thread
From: Bruce Richardson @ 2017-02-16 13:31 UTC (permalink / raw)
To: Ilya Maximets
Cc: dev, David Marchand, Sergio Gonzalez Monroy, Heetae Ahn,
Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei, stable
On Thu, Feb 16, 2017 at 04:01:10PM +0300, Ilya Maximets wrote:
> Currently EAL allocates hugepages one by one not paying
> attention from which NUMA node allocation was done.
>
> Such behaviour leads to allocation failure if number of
> available hugepages for application limited by cgroups
> or hugetlbfs and memory requested not only from the first
> socket.
>
> Example:
> # 90 x 1GB hugepages availavle in a system
>
> cgcreate -g hugetlb:/test
> # Limit to 32GB of hugepages
> cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
> # Request 4GB from each of 2 sockets
> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>
> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
> EAL: 32 not 90 hugepages of size 1024 MB allocated
> EAL: Not enough memory available on socket 1!
> Requested: 4096MB, available: 0MB
> PANIC in rte_eal_init():
> Cannot init memory
>
> This happens beacause all allocated pages are
> on socket 0.
>
> Fix this issue by setting mempolicy MPOL_PREFERRED for each
> hugepage to one of requested nodes in a round-robin fashion.
> In this case all allocated pages will be fairly distributed
> between all requested nodes.
>
> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> introduced and disabled by default because of external
> dependency from libnuma.
>
I think this highlights a general technical problem we need to resolve
in DPDK. If we want to add support for a new feature in DPDK by
leveraging functionality in an existing library, we are caught in a sort
of catch-22:
* If we want to leverage the existing library, we have to have the
feature off-by-default, as we don't want to increase the minimum
requirements for DPDK.
* If we want the feature enabled by default we need to avoid the
dependency, and so reimplement some or all of the functionality inside
DPDK itself. That will be rejected on the basis that it duplicates
existing library functionality.
I suspect the solution to this is more dynamic build-time configuration
to start enabling things based on installed dependencies, but I'm open
to other opinions. I see a gap here, however.
/Bruce
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [dpdk-stable] [PATCH] mem: balanced allocation of hugepages
2017-02-16 13:26 ` Tan, Jianfeng
@ 2017-02-16 13:55 ` Ilya Maximets
2017-02-16 13:57 ` Ilya Maximets
0 siblings, 1 reply; 15+ messages in thread
From: Ilya Maximets @ 2017-02-16 13:55 UTC (permalink / raw)
To: Tan, Jianfeng, dev, David Marchand, Gonzalez Monroy, Sergio
Cc: Heetae Ahn, Yuanhan Liu, Neil Horman, Pei, Yulong, stable
Hi,
On 16.02.2017 16:26, Tan, Jianfeng wrote:
> Hi,
>
>> -----Original Message-----
>> From: Ilya Maximets [mailto:i.maximets@samsung.com]
>> Sent: Thursday, February 16, 2017 9:01 PM
>> To: dev@dpdk.org; David Marchand; Gonzalez Monroy, Sergio
>> Cc: Heetae Ahn; Yuanhan Liu; Tan, Jianfeng; Neil Horman; Pei, Yulong; Ilya
>> Maximets; stable@dpdk.org
>> Subject: [PATCH] mem: balanced allocation of hugepages
>>
>> Currently EAL allocates hugepages one by one not paying
>> attention from which NUMA node allocation was done.
>>
>> Such behaviour leads to allocation failure if number of
>> available hugepages for application limited by cgroups
>> or hugetlbfs and memory requested not only from the first
>> socket.
>>
>> Example:
>> # 90 x 1GB hugepages availavle in a system
>>
>> cgcreate -g hugetlb:/test
>> # Limit to 32GB of hugepages
>> cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>> # Request 4GB from each of 2 sockets
>> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>
>> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>> EAL: 32 not 90 hugepages of size 1024 MB allocated
>> EAL: Not enough memory available on socket 1!
>> Requested: 4096MB, available: 0MB
>> PANIC in rte_eal_init():
>> Cannot init memory
>>
>> This happens beacause all allocated pages are
>> on socket 0.
>
> For such an use case, why not just use "numactl --interleave=0,1 <DPDK app> xxx"?
Unfortunately, interleave policy doesn't work for me. I suspect kernel configuration
blocks this or I don't understand something in kernel internals.
I'm using 3.10 rt kernel from rhel7.
I tried to set up MPOL_INTERLEAVE in code and it doesn't work for me. Your example
with numactl doesn't work too:
# Limited to 8GB of hugepages
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096
EAL: Setting up physically contiguous memory...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 8 not 90 hugepages of size 1024 MB allocated
EAL: Hugepage /dev/hugepages/rtemap_0 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_1 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_2 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_3 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_4 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_5 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_6 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_7 is on socket 0
EAL: Not enough memory available on socket 1! Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory
Also, using numactl will affect all the allocations in application. This may
cause additional unexpected issues.
>
> Do you see use case like --socket-mem 2048,1024 and only three 1GB-hugepage are allowed?
This case will work with my patch.
But the opposite one '--socket-mem=1024,2048' will fail.
To be clear, we need to allocate all required memory at first
from each numa node and then allocate all other available pages
in round-robin fashion. But such solution looks a little ugly.
What do you think?
Best regards, Ilya Maximets.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [dpdk-stable] [PATCH] mem: balanced allocation of hugepages
2017-02-16 13:55 ` Ilya Maximets
@ 2017-02-16 13:57 ` Ilya Maximets
0 siblings, 0 replies; 15+ messages in thread
From: Ilya Maximets @ 2017-02-16 13:57 UTC (permalink / raw)
To: Tan, Jianfeng, dev, David Marchand, Gonzalez Monroy, Sergio
Cc: Heetae Ahn, Yuanhan Liu, Neil Horman, Pei, Yulong, stable
On 16.02.2017 16:55, Ilya Maximets wrote:
> Hi,
>
> On 16.02.2017 16:26, Tan, Jianfeng wrote:
>> Hi,
>>
>>> -----Original Message-----
>>> From: Ilya Maximets [mailto:i.maximets@samsung.com]
>>> Sent: Thursday, February 16, 2017 9:01 PM
>>> To: dev@dpdk.org; David Marchand; Gonzalez Monroy, Sergio
>>> Cc: Heetae Ahn; Yuanhan Liu; Tan, Jianfeng; Neil Horman; Pei, Yulong; Ilya
>>> Maximets; stable@dpdk.org
>>> Subject: [PATCH] mem: balanced allocation of hugepages
>>>
>>> Currently EAL allocates hugepages one by one not paying
>>> attention from which NUMA node allocation was done.
>>>
>>> Such behaviour leads to allocation failure if number of
>>> available hugepages for application limited by cgroups
>>> or hugetlbfs and memory requested not only from the first
>>> socket.
>>>
>>> Example:
>>> # 90 x 1GB hugepages availavle in a system
>>>
>>> cgcreate -g hugetlb:/test
>>> # Limit to 32GB of hugepages
>>> cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>> # Request 4GB from each of 2 sockets
>>> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>
>>> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>> EAL: 32 not 90 hugepages of size 1024 MB allocated
>>> EAL: Not enough memory available on socket 1!
>>> Requested: 4096MB, available: 0MB
>>> PANIC in rte_eal_init():
>>> Cannot init memory
>>>
>>> This happens beacause all allocated pages are
>>> on socket 0.
>>
>> For such an use case, why not just use "numactl --interleave=0,1 <DPDK app> xxx"?
>
> Unfortunately, interleave policy doesn't work for me. I suspect kernel configuration
> blocks this or I don't understand something in kernel internals.
> I'm using 3.10 rt kernel from rhel7.
>
> I tried to set up MPOL_INTERLEAVE in code and it doesn't work for me. Your example
> with numactl doesn't work too:
>
> # Limited to 8GB of hugepages
> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096
Sorry,
cgexec -g hugetlb:test numactl --interleave=0,1 ./testpmd --socket-mem=4096,4096 ..
>
> EAL: Setting up physically contiguous memory...
> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
> EAL: 8 not 90 hugepages of size 1024 MB allocated
> EAL: Hugepage /dev/hugepages/rtemap_0 is on socket 0
> EAL: Hugepage /dev/hugepages/rtemap_1 is on socket 0
> EAL: Hugepage /dev/hugepages/rtemap_2 is on socket 0
> EAL: Hugepage /dev/hugepages/rtemap_3 is on socket 0
> EAL: Hugepage /dev/hugepages/rtemap_4 is on socket 0
> EAL: Hugepage /dev/hugepages/rtemap_5 is on socket 0
> EAL: Hugepage /dev/hugepages/rtemap_6 is on socket 0
> EAL: Hugepage /dev/hugepages/rtemap_7 is on socket 0
> EAL: Not enough memory available on socket 1! Requested: 4096MB, available: 0MB
> PANIC in rte_eal_init():
> Cannot init memory
>
> Also, using numactl will affect all the allocations in application. This may
> cause additional unexpected issues.
>
>>
>> Do you see use case like --socket-mem 2048,1024 and only three 1GB-hugepage are allowed?
>
> This case will work with my patch.
> But the opposite one '--socket-mem=1024,2048' will fail.
> To be clear, we need to allocate all required memory at first
> from each numa node and then allocate all other available pages
> in round-robin fashion. But such solution looks a little ugly.
>
> What do you think?
>
> Best regards, Ilya Maximets.
>
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [dpdk-stable] [PATCH] mem: balanced allocation of hugepages
2017-02-16 13:01 ` [dpdk-stable] [PATCH] mem: balanced allocation of hugepages Ilya Maximets
2017-02-16 13:26 ` Tan, Jianfeng
2017-02-16 13:31 ` [dpdk-stable] [dpdk-dev] " Bruce Richardson
@ 2017-03-06 9:34 ` Ilya Maximets
2017-03-08 13:46 ` Sergio Gonzalez Monroy
2 siblings, 1 reply; 15+ messages in thread
From: Ilya Maximets @ 2017-03-06 9:34 UTC (permalink / raw)
To: dev, David Marchand, Sergio Gonzalez Monroy
Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
stable, Thomas Monjalon, Bruce Richardson
Hi all.
So, what about this change?
Best regards, Ilya Maximets.
On 16.02.2017 16:01, Ilya Maximets wrote:
> Currently EAL allocates hugepages one by one not paying
> attention from which NUMA node allocation was done.
>
> Such behaviour leads to allocation failure if number of
> available hugepages for application limited by cgroups
> or hugetlbfs and memory requested not only from the first
> socket.
>
> Example:
> # 90 x 1GB hugepages availavle in a system
>
> cgcreate -g hugetlb:/test
> # Limit to 32GB of hugepages
> cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
> # Request 4GB from each of 2 sockets
> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>
> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
> EAL: 32 not 90 hugepages of size 1024 MB allocated
> EAL: Not enough memory available on socket 1!
> Requested: 4096MB, available: 0MB
> PANIC in rte_eal_init():
> Cannot init memory
>
> This happens beacause all allocated pages are
> on socket 0.
>
> Fix this issue by setting mempolicy MPOL_PREFERRED for each
> hugepage to one of requested nodes in a round-robin fashion.
> In this case all allocated pages will be fairly distributed
> between all requested nodes.
>
> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> introduced and disabled by default because of external
> dependency from libnuma.
>
> Cc: <stable@dpdk.org>
> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> ---
> config/common_base | 1 +
> lib/librte_eal/Makefile | 4 ++
> lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
> mk/rte.app.mk | 3 ++
> 4 files changed, 74 insertions(+)
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [dpdk-stable] [PATCH] mem: balanced allocation of hugepages
2017-03-06 9:34 ` [dpdk-stable] " Ilya Maximets
@ 2017-03-08 13:46 ` Sergio Gonzalez Monroy
2017-03-09 12:57 ` Ilya Maximets
0 siblings, 1 reply; 15+ messages in thread
From: Sergio Gonzalez Monroy @ 2017-03-08 13:46 UTC (permalink / raw)
To: Ilya Maximets, dev, David Marchand
Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
stable, Thomas Monjalon, Bruce Richardson
Hi Ilya,
I have done similar tests and as you already pointed out, 'numactl
--interleave' does not seem to work as expected.
I have also checked that the issue can be reproduced with quota limit on
hugetlbfs mount point.
I would be inclined towards *adding libnuma as dependency* to DPDK to
make memory allocation a bit more reliable.
Currently at a high level regarding hugepages per numa node:
1) Try to map all free hugepages. The total number of mapped hugepages
depends if there were any limits, such as cgroups or quota in mount point.
2) Find out numa node of each hugepage.
3) Check if we have enough hugepages for requested memory in each numa
socket/node.
Using libnuma we could try to allocate hugepages per numa:
1) Try to map as many hugepages from numa 0.
2) Check if we have enough hugepages for requested memory in numa 0.
3) Try to map as many hugepages from numa 1.
4) Check if we have enough hugepages for requested memory in numa 1.
This approach would improve failing scenarios caused by limits but It
would still not fix issues regarding non-contiguous hugepages (worst
case each hugepage is a memseg).
The non-contiguous hugepages issues are not as critical now that
mempools can span over multiple memsegs/hugepages, but it is still a
problem for any other library requiring big chunks of memory.
Potentially if we were to add an option such as 'iommu-only' when all
devices are bound to vfio-pci, we could have a reliable way to allocate
hugepages by just requesting the number of pages from each numa.
Thoughts?
Sergio
On 06/03/2017 09:34, Ilya Maximets wrote:
> Hi all.
>
> So, what about this change?
>
> Best regards, Ilya Maximets.
>
> On 16.02.2017 16:01, Ilya Maximets wrote:
>> Currently EAL allocates hugepages one by one not paying
>> attention from which NUMA node allocation was done.
>>
>> Such behaviour leads to allocation failure if number of
>> available hugepages for application limited by cgroups
>> or hugetlbfs and memory requested not only from the first
>> socket.
>>
>> Example:
>> # 90 x 1GB hugepages availavle in a system
>>
>> cgcreate -g hugetlb:/test
>> # Limit to 32GB of hugepages
>> cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>> # Request 4GB from each of 2 sockets
>> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>
>> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>> EAL: 32 not 90 hugepages of size 1024 MB allocated
>> EAL: Not enough memory available on socket 1!
>> Requested: 4096MB, available: 0MB
>> PANIC in rte_eal_init():
>> Cannot init memory
>>
>> This happens beacause all allocated pages are
>> on socket 0.
>>
>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>> hugepage to one of requested nodes in a round-robin fashion.
>> In this case all allocated pages will be fairly distributed
>> between all requested nodes.
>>
>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>> introduced and disabled by default because of external
>> dependency from libnuma.
>>
>> Cc: <stable@dpdk.org>
>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>
>> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
>> ---
>> config/common_base | 1 +
>> lib/librte_eal/Makefile | 4 ++
>> lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>> mk/rte.app.mk | 3 ++
>> 4 files changed, 74 insertions(+)
>>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [dpdk-stable] [PATCH] mem: balanced allocation of hugepages
2017-03-08 13:46 ` Sergio Gonzalez Monroy
@ 2017-03-09 12:57 ` Ilya Maximets
2017-03-27 13:01 ` Sergio Gonzalez Monroy
0 siblings, 1 reply; 15+ messages in thread
From: Ilya Maximets @ 2017-03-09 12:57 UTC (permalink / raw)
To: Sergio Gonzalez Monroy, dev, David Marchand
Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
stable, Thomas Monjalon, Bruce Richardson
On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
> Hi Ilya,
>
> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
>
> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
>
> Currently at a high level regarding hugepages per numa node:
> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
> 2) Find out numa node of each hugepage.
> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
>
> Using libnuma we could try to allocate hugepages per numa:
> 1) Try to map as many hugepages from numa 0.
> 2) Check if we have enough hugepages for requested memory in numa 0.
> 3) Try to map as many hugepages from numa 1.
> 4) Check if we have enough hugepages for requested memory in numa 1.
>
> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
>
> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
>
> Thoughts?
Hi Sergio,
Thanks for your attention to this.
For now, as we have some issues with non-contiguous
hugepages, I'm thinking about following hybrid schema:
1) Allocate essential hugepages:
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion like in this patch.
3) Sort pages and choose the most suitable.
This solution should decrease number of issues connected with
non-contiguous memory.
Best regards, Ilya Maximets.
>
> On 06/03/2017 09:34, Ilya Maximets wrote:
>> Hi all.
>>
>> So, what about this change?
>>
>> Best regards, Ilya Maximets.
>>
>> On 16.02.2017 16:01, Ilya Maximets wrote:
>>> Currently EAL allocates hugepages one by one not paying
>>> attention from which NUMA node allocation was done.
>>>
>>> Such behaviour leads to allocation failure if number of
>>> available hugepages for application limited by cgroups
>>> or hugetlbfs and memory requested not only from the first
>>> socket.
>>>
>>> Example:
>>> # 90 x 1GB hugepages availavle in a system
>>>
>>> cgcreate -g hugetlb:/test
>>> # Limit to 32GB of hugepages
>>> cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>> # Request 4GB from each of 2 sockets
>>> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>
>>> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>> EAL: 32 not 90 hugepages of size 1024 MB allocated
>>> EAL: Not enough memory available on socket 1!
>>> Requested: 4096MB, available: 0MB
>>> PANIC in rte_eal_init():
>>> Cannot init memory
>>>
>>> This happens beacause all allocated pages are
>>> on socket 0.
>>>
>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>>> hugepage to one of requested nodes in a round-robin fashion.
>>> In this case all allocated pages will be fairly distributed
>>> between all requested nodes.
>>>
>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>> introduced and disabled by default because of external
>>> dependency from libnuma.
>>>
>>> Cc: <stable@dpdk.org>
>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>>
>>> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
>>> ---
>>> config/common_base | 1 +
>>> lib/librte_eal/Makefile | 4 ++
>>> lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>>> mk/rte.app.mk | 3 ++
>>> 4 files changed, 74 insertions(+)
>>>
>
>
>
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [dpdk-stable] [PATCH] mem: balanced allocation of hugepages
2017-03-09 12:57 ` Ilya Maximets
@ 2017-03-27 13:01 ` Sergio Gonzalez Monroy
2017-03-27 14:43 ` Ilya Maximets
0 siblings, 1 reply; 15+ messages in thread
From: Sergio Gonzalez Monroy @ 2017-03-27 13:01 UTC (permalink / raw)
To: Ilya Maximets, dev, David Marchand
Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
stable, Thomas Monjalon, Bruce Richardson
On 09/03/2017 12:57, Ilya Maximets wrote:
> On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
>> Hi Ilya,
>>
>> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
>> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
>>
>> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
>>
>> Currently at a high level regarding hugepages per numa node:
>> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
>> 2) Find out numa node of each hugepage.
>> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
>>
>> Using libnuma we could try to allocate hugepages per numa:
>> 1) Try to map as many hugepages from numa 0.
>> 2) Check if we have enough hugepages for requested memory in numa 0.
>> 3) Try to map as many hugepages from numa 1.
>> 4) Check if we have enough hugepages for requested memory in numa 1.
>>
>> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
>> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
>>
>> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
>>
>> Thoughts?
> Hi Sergio,
>
> Thanks for your attention to this.
>
> For now, as we have some issues with non-contiguous
> hugepages, I'm thinking about following hybrid schema:
> 1) Allocate essential hugepages:
> 1.1) Allocate as many hugepages from numa N to
> only fit requested memory for this numa.
> 1.2) repeat 1.1 for all numa nodes.
> 2) Try to map all remaining free hugepages in a round-robin
> fashion like in this patch.
> 3) Sort pages and choose the most suitable.
>
> This solution should decrease number of issues connected with
> non-contiguous memory.
Sorry for late reply, I was hoping for more comments from the community.
IMHO this should be default behavior, which means no config option and
libnuma as EAL dependency.
I think your proposal is good, could you consider implementing such
approach on next release?
Regards.
> Best regards, Ilya Maximets.
>
>> On 06/03/2017 09:34, Ilya Maximets wrote:
>>> Hi all.
>>>
>>> So, what about this change?
>>>
>>> Best regards, Ilya Maximets.
>>>
>>> On 16.02.2017 16:01, Ilya Maximets wrote:
>>>> Currently EAL allocates hugepages one by one not paying
>>>> attention from which NUMA node allocation was done.
>>>>
>>>> Such behaviour leads to allocation failure if number of
>>>> available hugepages for application limited by cgroups
>>>> or hugetlbfs and memory requested not only from the first
>>>> socket.
>>>>
>>>> Example:
>>>> # 90 x 1GB hugepages availavle in a system
>>>>
>>>> cgcreate -g hugetlb:/test
>>>> # Limit to 32GB of hugepages
>>>> cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>>> # Request 4GB from each of 2 sockets
>>>> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>>
>>>> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>>> EAL: 32 not 90 hugepages of size 1024 MB allocated
>>>> EAL: Not enough memory available on socket 1!
>>>> Requested: 4096MB, available: 0MB
>>>> PANIC in rte_eal_init():
>>>> Cannot init memory
>>>>
>>>> This happens beacause all allocated pages are
>>>> on socket 0.
>>>>
>>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>>>> hugepage to one of requested nodes in a round-robin fashion.
>>>> In this case all allocated pages will be fairly distributed
>>>> between all requested nodes.
>>>>
>>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>> introduced and disabled by default because of external
>>>> dependency from libnuma.
>>>>
>>>> Cc:<stable@dpdk.org>
>>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>>>
>>>> Signed-off-by: Ilya Maximets<i.maximets@samsung.com>
>>>> ---
>>>> config/common_base | 1 +
>>>> lib/librte_eal/Makefile | 4 ++
>>>> lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>>>> mk/rte.app.mk | 3 ++
>>>> 4 files changed, 74 insertions(+)
Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [dpdk-stable] [PATCH] mem: balanced allocation of hugepages
2017-03-27 13:01 ` Sergio Gonzalez Monroy
@ 2017-03-27 14:43 ` Ilya Maximets
2017-04-07 15:14 ` Ilya Maximets
0 siblings, 1 reply; 15+ messages in thread
From: Ilya Maximets @ 2017-03-27 14:43 UTC (permalink / raw)
To: Sergio Gonzalez Monroy, dev, David Marchand
Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
stable, Thomas Monjalon, Bruce Richardson
On 27.03.2017 16:01, Sergio Gonzalez Monroy wrote:
> On 09/03/2017 12:57, Ilya Maximets wrote:
>> On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
>>> Hi Ilya,
>>>
>>> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
>>> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
>>>
>>> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
>>>
>>> Currently at a high level regarding hugepages per numa node:
>>> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
>>> 2) Find out numa node of each hugepage.
>>> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
>>>
>>> Using libnuma we could try to allocate hugepages per numa:
>>> 1) Try to map as many hugepages from numa 0.
>>> 2) Check if we have enough hugepages for requested memory in numa 0.
>>> 3) Try to map as many hugepages from numa 1.
>>> 4) Check if we have enough hugepages for requested memory in numa 1.
>>>
>>> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
>>> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
>>>
>>> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
>>>
>>> Thoughts?
>> Hi Sergio,
>>
>> Thanks for your attention to this.
>>
>> For now, as we have some issues with non-contiguous
>> hugepages, I'm thinking about following hybrid schema:
>> 1) Allocate essential hugepages:
>> 1.1) Allocate as many hugepages from numa N to
>> only fit requested memory for this numa.
>> 1.2) repeat 1.1 for all numa nodes.
>> 2) Try to map all remaining free hugepages in a round-robin
>> fashion like in this patch.
>> 3) Sort pages and choose the most suitable.
>>
>> This solution should decrease number of issues connected with
>> non-contiguous memory.
>
> Sorry for late reply, I was hoping for more comments from the community.
>
> IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
> I think your proposal is good, could you consider implementing such approach on next release?
Sure, I can implement this for 17.08 release.
>>
>>> On 06/03/2017 09:34, Ilya Maximets wrote:
>>>> Hi all.
>>>>
>>>> So, what about this change?
>>>>
>>>> Best regards, Ilya Maximets.
>>>>
>>>> On 16.02.2017 16:01, Ilya Maximets wrote:
>>>>> Currently EAL allocates hugepages one by one not paying
>>>>> attention from which NUMA node allocation was done.
>>>>>
>>>>> Such behaviour leads to allocation failure if number of
>>>>> available hugepages for application limited by cgroups
>>>>> or hugetlbfs and memory requested not only from the first
>>>>> socket.
>>>>>
>>>>> Example:
>>>>> # 90 x 1GB hugepages availavle in a system
>>>>>
>>>>> cgcreate -g hugetlb:/test
>>>>> # Limit to 32GB of hugepages
>>>>> cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>>>> # Request 4GB from each of 2 sockets
>>>>> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>>>
>>>>> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>>>> EAL: 32 not 90 hugepages of size 1024 MB allocated
>>>>> EAL: Not enough memory available on socket 1!
>>>>> Requested: 4096MB, available: 0MB
>>>>> PANIC in rte_eal_init():
>>>>> Cannot init memory
>>>>>
>>>>> This happens beacause all allocated pages are
>>>>> on socket 0.
>>>>>
>>>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>>>>> hugepage to one of requested nodes in a round-robin fashion.
>>>>> In this case all allocated pages will be fairly distributed
>>>>> between all requested nodes.
>>>>>
>>>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>> introduced and disabled by default because of external
>>>>> dependency from libnuma.
>>>>>
>>>>> Cc:<stable@dpdk.org>
>>>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>>>>
>>>>> Signed-off-by: Ilya Maximets<i.maximets@samsung.com>
>>>>> ---
>>>>> config/common_base | 1 +
>>>>> lib/librte_eal/Makefile | 4 ++
>>>>> lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>>>>> mk/rte.app.mk | 3 ++
>>>>> 4 files changed, 74 insertions(+)
>
> Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
Thanks.
Best regards, Ilya Maximets.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [dpdk-stable] [PATCH] mem: balanced allocation of hugepages
2017-03-27 14:43 ` Ilya Maximets
@ 2017-04-07 15:14 ` Ilya Maximets
2017-04-07 15:44 ` Thomas Monjalon
0 siblings, 1 reply; 15+ messages in thread
From: Ilya Maximets @ 2017-04-07 15:14 UTC (permalink / raw)
To: Sergio Gonzalez Monroy, dev, David Marchand, Thomas Monjalon
Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
stable, Bruce Richardson
Hi All.
I wanted to ask (just to clarify current status):
Will this patch be included in current release (acked by maintainer)
and then I will upgrade it to hybrid logic or I will just prepare v3
with hybrid logic for 17.08 ?
Best regards, Ilya Maximets.
On 27.03.2017 17:43, Ilya Maximets wrote:
> On 27.03.2017 16:01, Sergio Gonzalez Monroy wrote:
>> On 09/03/2017 12:57, Ilya Maximets wrote:
>>> On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
>>>> Hi Ilya,
>>>>
>>>> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
>>>> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
>>>>
>>>> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
>>>>
>>>> Currently at a high level regarding hugepages per numa node:
>>>> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
>>>> 2) Find out numa node of each hugepage.
>>>> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
>>>>
>>>> Using libnuma we could try to allocate hugepages per numa:
>>>> 1) Try to map as many hugepages from numa 0.
>>>> 2) Check if we have enough hugepages for requested memory in numa 0.
>>>> 3) Try to map as many hugepages from numa 1.
>>>> 4) Check if we have enough hugepages for requested memory in numa 1.
>>>>
>>>> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
>>>> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
>>>>
>>>> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
>>>>
>>>> Thoughts?
>>> Hi Sergio,
>>>
>>> Thanks for your attention to this.
>>>
>>> For now, as we have some issues with non-contiguous
>>> hugepages, I'm thinking about following hybrid schema:
>>> 1) Allocate essential hugepages:
>>> 1.1) Allocate as many hugepages from numa N to
>>> only fit requested memory for this numa.
>>> 1.2) repeat 1.1 for all numa nodes.
>>> 2) Try to map all remaining free hugepages in a round-robin
>>> fashion like in this patch.
>>> 3) Sort pages and choose the most suitable.
>>>
>>> This solution should decrease number of issues connected with
>>> non-contiguous memory.
>>
>> Sorry for late reply, I was hoping for more comments from the community.
>>
>> IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
>> I think your proposal is good, could you consider implementing such approach on next release?
>
> Sure, I can implement this for 17.08 release.
>
>>>
>>>> On 06/03/2017 09:34, Ilya Maximets wrote:
>>>>> Hi all.
>>>>>
>>>>> So, what about this change?
>>>>>
>>>>> Best regards, Ilya Maximets.
>>>>>
>>>>> On 16.02.2017 16:01, Ilya Maximets wrote:
>>>>>> Currently EAL allocates hugepages one by one not paying
>>>>>> attention from which NUMA node allocation was done.
>>>>>>
>>>>>> Such behaviour leads to allocation failure if number of
>>>>>> available hugepages for application limited by cgroups
>>>>>> or hugetlbfs and memory requested not only from the first
>>>>>> socket.
>>>>>>
>>>>>> Example:
>>>>>> # 90 x 1GB hugepages availavle in a system
>>>>>>
>>>>>> cgcreate -g hugetlb:/test
>>>>>> # Limit to 32GB of hugepages
>>>>>> cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>>>>> # Request 4GB from each of 2 sockets
>>>>>> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>>>>
>>>>>> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>>>>> EAL: 32 not 90 hugepages of size 1024 MB allocated
>>>>>> EAL: Not enough memory available on socket 1!
>>>>>> Requested: 4096MB, available: 0MB
>>>>>> PANIC in rte_eal_init():
>>>>>> Cannot init memory
>>>>>>
>>>>>> This happens beacause all allocated pages are
>>>>>> on socket 0.
>>>>>>
>>>>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>>>>>> hugepage to one of requested nodes in a round-robin fashion.
>>>>>> In this case all allocated pages will be fairly distributed
>>>>>> between all requested nodes.
>>>>>>
>>>>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>> introduced and disabled by default because of external
>>>>>> dependency from libnuma.
>>>>>>
>>>>>> Cc:<stable@dpdk.org>
>>>>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>>>>>
>>>>>> Signed-off-by: Ilya Maximets<i.maximets@samsung.com>
>>>>>> ---
>>>>>> config/common_base | 1 +
>>>>>> lib/librte_eal/Makefile | 4 ++
>>>>>> lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>>>>>> mk/rte.app.mk | 3 ++
>>>>>> 4 files changed, 74 insertions(+)
>>
>> Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
>
> Thanks.
>
> Best regards, Ilya Maximets.
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [dpdk-stable] [PATCH] mem: balanced allocation of hugepages
2017-04-07 15:14 ` Ilya Maximets
@ 2017-04-07 15:44 ` Thomas Monjalon
2017-04-10 7:11 ` Ilya Maximets
0 siblings, 1 reply; 15+ messages in thread
From: Thomas Monjalon @ 2017-04-07 15:44 UTC (permalink / raw)
To: Ilya Maximets, Sergio Gonzalez Monroy
Cc: dev, David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
Neil Horman, Yulong Pei, stable, Bruce Richardson
2017-04-07 18:14, Ilya Maximets:
> Hi All.
>
> I wanted to ask (just to clarify current status):
> Will this patch be included in current release (acked by maintainer)
> and then I will upgrade it to hybrid logic or I will just prepare v3
> with hybrid logic for 17.08 ?
What is your preferred option Ilya?
Sergio?
> On 27.03.2017 17:43, Ilya Maximets wrote:
> > On 27.03.2017 16:01, Sergio Gonzalez Monroy wrote:
> >> On 09/03/2017 12:57, Ilya Maximets wrote:
> >>> On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
> >>>> Hi Ilya,
> >>>>
> >>>> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
> >>>> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
> >>>>
> >>>> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
> >>>>
> >>>> Currently at a high level regarding hugepages per numa node:
> >>>> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
> >>>> 2) Find out numa node of each hugepage.
> >>>> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
> >>>>
> >>>> Using libnuma we could try to allocate hugepages per numa:
> >>>> 1) Try to map as many hugepages from numa 0.
> >>>> 2) Check if we have enough hugepages for requested memory in numa 0.
> >>>> 3) Try to map as many hugepages from numa 1.
> >>>> 4) Check if we have enough hugepages for requested memory in numa 1.
> >>>>
> >>>> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
> >>>> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
> >>>>
> >>>> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
> >>>>
> >>>> Thoughts?
> >>> Hi Sergio,
> >>>
> >>> Thanks for your attention to this.
> >>>
> >>> For now, as we have some issues with non-contiguous
> >>> hugepages, I'm thinking about following hybrid schema:
> >>> 1) Allocate essential hugepages:
> >>> 1.1) Allocate as many hugepages from numa N to
> >>> only fit requested memory for this numa.
> >>> 1.2) repeat 1.1 for all numa nodes.
> >>> 2) Try to map all remaining free hugepages in a round-robin
> >>> fashion like in this patch.
> >>> 3) Sort pages and choose the most suitable.
> >>>
> >>> This solution should decrease number of issues connected with
> >>> non-contiguous memory.
> >>
> >> Sorry for late reply, I was hoping for more comments from the community.
> >>
> >> IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
> >> I think your proposal is good, could you consider implementing such approach on next release?
> >
> > Sure, I can implement this for 17.08 release.
> >
> >>>
> >>>> On 06/03/2017 09:34, Ilya Maximets wrote:
> >>>>> Hi all.
> >>>>>
> >>>>> So, what about this change?
> >>>>>
> >>>>> Best regards, Ilya Maximets.
> >>>>>
> >>>>> On 16.02.2017 16:01, Ilya Maximets wrote:
> >>>>>> Currently EAL allocates hugepages one by one not paying
> >>>>>> attention from which NUMA node allocation was done.
> >>>>>>
> >>>>>> Such behaviour leads to allocation failure if number of
> >>>>>> available hugepages for application limited by cgroups
> >>>>>> or hugetlbfs and memory requested not only from the first
> >>>>>> socket.
> >>>>>>
> >>>>>> Example:
> >>>>>> # 90 x 1GB hugepages availavle in a system
> >>>>>>
> >>>>>> cgcreate -g hugetlb:/test
> >>>>>> # Limit to 32GB of hugepages
> >>>>>> cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
> >>>>>> # Request 4GB from each of 2 sockets
> >>>>>> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
> >>>>>>
> >>>>>> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
> >>>>>> EAL: 32 not 90 hugepages of size 1024 MB allocated
> >>>>>> EAL: Not enough memory available on socket 1!
> >>>>>> Requested: 4096MB, available: 0MB
> >>>>>> PANIC in rte_eal_init():
> >>>>>> Cannot init memory
> >>>>>>
> >>>>>> This happens beacause all allocated pages are
> >>>>>> on socket 0.
> >>>>>>
> >>>>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
> >>>>>> hugepage to one of requested nodes in a round-robin fashion.
> >>>>>> In this case all allocated pages will be fairly distributed
> >>>>>> between all requested nodes.
> >>>>>>
> >>>>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> >>>>>> introduced and disabled by default because of external
> >>>>>> dependency from libnuma.
> >>>>>>
> >>>>>> Cc:<stable@dpdk.org>
> >>>>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
> >>>>>>
> >>>>>> Signed-off-by: Ilya Maximets<i.maximets@samsung.com>
> >>>>>> ---
> >>>>>> config/common_base | 1 +
> >>>>>> lib/librte_eal/Makefile | 4 ++
> >>>>>> lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
> >>>>>> mk/rte.app.mk | 3 ++
> >>>>>> 4 files changed, 74 insertions(+)
> >>
> >> Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
> >
> > Thanks.
> >
> > Best regards, Ilya Maximets.
> >
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [dpdk-stable] [PATCH] mem: balanced allocation of hugepages
2017-04-07 15:44 ` Thomas Monjalon
@ 2017-04-10 7:11 ` Ilya Maximets
2017-04-10 7:51 ` Sergio Gonzalez Monroy
0 siblings, 1 reply; 15+ messages in thread
From: Ilya Maximets @ 2017-04-10 7:11 UTC (permalink / raw)
To: Thomas Monjalon, Sergio Gonzalez Monroy
Cc: dev, David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
Neil Horman, Yulong Pei, stable, Bruce Richardson
On 07.04.2017 18:44, Thomas Monjalon wrote:
> 2017-04-07 18:14, Ilya Maximets:
>> Hi All.
>>
>> I wanted to ask (just to clarify current status):
>> Will this patch be included in current release (acked by maintainer)
>> and then I will upgrade it to hybrid logic or I will just prepare v3
>> with hybrid logic for 17.08 ?
>
> What is your preferred option Ilya?
I have no strong opinion on this. One thought is that it could be
nice if someone else could test this functionality with current
release before enabling it by default in 17.08.
Tomorrow I'm going on vacation. So I'll post rebased version today
(there are few fuzzes with current master) and you with Sergio may
decide what to do.
Best regards, Ilya Maximets.
> Sergio?
>
>
>> On 27.03.2017 17:43, Ilya Maximets wrote:
>>> On 27.03.2017 16:01, Sergio Gonzalez Monroy wrote:
>>>> On 09/03/2017 12:57, Ilya Maximets wrote:
>>>>> On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
>>>>>> Hi Ilya,
>>>>>>
>>>>>> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
>>>>>> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
>>>>>>
>>>>>> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
>>>>>>
>>>>>> Currently at a high level regarding hugepages per numa node:
>>>>>> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
>>>>>> 2) Find out numa node of each hugepage.
>>>>>> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
>>>>>>
>>>>>> Using libnuma we could try to allocate hugepages per numa:
>>>>>> 1) Try to map as many hugepages from numa 0.
>>>>>> 2) Check if we have enough hugepages for requested memory in numa 0.
>>>>>> 3) Try to map as many hugepages from numa 1.
>>>>>> 4) Check if we have enough hugepages for requested memory in numa 1.
>>>>>>
>>>>>> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
>>>>>> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
>>>>>>
>>>>>> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
>>>>>>
>>>>>> Thoughts?
>>>>> Hi Sergio,
>>>>>
>>>>> Thanks for your attention to this.
>>>>>
>>>>> For now, as we have some issues with non-contiguous
>>>>> hugepages, I'm thinking about following hybrid schema:
>>>>> 1) Allocate essential hugepages:
>>>>> 1.1) Allocate as many hugepages from numa N to
>>>>> only fit requested memory for this numa.
>>>>> 1.2) repeat 1.1 for all numa nodes.
>>>>> 2) Try to map all remaining free hugepages in a round-robin
>>>>> fashion like in this patch.
>>>>> 3) Sort pages and choose the most suitable.
>>>>>
>>>>> This solution should decrease number of issues connected with
>>>>> non-contiguous memory.
>>>>
>>>> Sorry for late reply, I was hoping for more comments from the community.
>>>>
>>>> IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
>>>> I think your proposal is good, could you consider implementing such approach on next release?
>>>
>>> Sure, I can implement this for 17.08 release.
>>>
>>>>>
>>>>>> On 06/03/2017 09:34, Ilya Maximets wrote:
>>>>>>> Hi all.
>>>>>>>
>>>>>>> So, what about this change?
>>>>>>>
>>>>>>> Best regards, Ilya Maximets.
>>>>>>>
>>>>>>> On 16.02.2017 16:01, Ilya Maximets wrote:
>>>>>>>> Currently EAL allocates hugepages one by one not paying
>>>>>>>> attention from which NUMA node allocation was done.
>>>>>>>>
>>>>>>>> Such behaviour leads to allocation failure if number of
>>>>>>>> available hugepages for application limited by cgroups
>>>>>>>> or hugetlbfs and memory requested not only from the first
>>>>>>>> socket.
>>>>>>>>
>>>>>>>> Example:
>>>>>>>> # 90 x 1GB hugepages availavle in a system
>>>>>>>>
>>>>>>>> cgcreate -g hugetlb:/test
>>>>>>>> # Limit to 32GB of hugepages
>>>>>>>> cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>>>>>>> # Request 4GB from each of 2 sockets
>>>>>>>> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>>>>>>
>>>>>>>> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>>>>>>> EAL: 32 not 90 hugepages of size 1024 MB allocated
>>>>>>>> EAL: Not enough memory available on socket 1!
>>>>>>>> Requested: 4096MB, available: 0MB
>>>>>>>> PANIC in rte_eal_init():
>>>>>>>> Cannot init memory
>>>>>>>>
>>>>>>>> This happens beacause all allocated pages are
>>>>>>>> on socket 0.
>>>>>>>>
>>>>>>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>>>>>>>> hugepage to one of requested nodes in a round-robin fashion.
>>>>>>>> In this case all allocated pages will be fairly distributed
>>>>>>>> between all requested nodes.
>>>>>>>>
>>>>>>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>>>> introduced and disabled by default because of external
>>>>>>>> dependency from libnuma.
>>>>>>>>
>>>>>>>> Cc:<stable@dpdk.org>
>>>>>>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>>>>>>>
>>>>>>>> Signed-off-by: Ilya Maximets<i.maximets@samsung.com>
>>>>>>>> ---
>>>>>>>> config/common_base | 1 +
>>>>>>>> lib/librte_eal/Makefile | 4 ++
>>>>>>>> lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>>>>>>>> mk/rte.app.mk | 3 ++
>>>>>>>> 4 files changed, 74 insertions(+)
>>>>
>>>> Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
>>>
>>> Thanks.
>>>
>>> Best regards, Ilya Maximets.
>>>
>
>
>
>
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [dpdk-stable] [PATCH] mem: balanced allocation of hugepages
2017-04-10 7:11 ` Ilya Maximets
@ 2017-04-10 7:51 ` Sergio Gonzalez Monroy
2017-04-10 8:05 ` Ilya Maximets
0 siblings, 1 reply; 15+ messages in thread
From: Sergio Gonzalez Monroy @ 2017-04-10 7:51 UTC (permalink / raw)
To: Ilya Maximets, Thomas Monjalon
Cc: dev, David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
Neil Horman, Yulong Pei, stable, Bruce Richardson
On 10/04/2017 08:11, Ilya Maximets wrote:
> On 07.04.2017 18:44, Thomas Monjalon wrote:
>> 2017-04-07 18:14, Ilya Maximets:
>>> Hi All.
>>>
>>> I wanted to ask (just to clarify current status):
>>> Will this patch be included in current release (acked by maintainer)
>>> and then I will upgrade it to hybrid logic or I will just prepare v3
>>> with hybrid logic for 17.08 ?
>> What is your preferred option Ilya?
> I have no strong opinion on this. One thought is that it could be
> nice if someone else could test this functionality with current
> release before enabling it by default in 17.08.
>
> Tomorrow I'm going on vacation. So I'll post rebased version today
> (there are few fuzzes with current master) and you with Sergio may
> decide what to do.
>
> Best regards, Ilya Maximets.
>
>> Sergio?
I would be inclined towards v3 targeting v17.08. IMHO it would be more
clean this way.
Sergio
>>
>>> On 27.03.2017 17:43, Ilya Maximets wrote:
>>>> On 27.03.2017 16:01, Sergio Gonzalez Monroy wrote:
>>>>> On 09/03/2017 12:57, Ilya Maximets wrote:
>>>>>> On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
>>>>>>> Hi Ilya,
>>>>>>>
>>>>>>> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
>>>>>>> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
>>>>>>>
>>>>>>> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
>>>>>>>
>>>>>>> Currently at a high level regarding hugepages per numa node:
>>>>>>> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
>>>>>>> 2) Find out numa node of each hugepage.
>>>>>>> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
>>>>>>>
>>>>>>> Using libnuma we could try to allocate hugepages per numa:
>>>>>>> 1) Try to map as many hugepages from numa 0.
>>>>>>> 2) Check if we have enough hugepages for requested memory in numa 0.
>>>>>>> 3) Try to map as many hugepages from numa 1.
>>>>>>> 4) Check if we have enough hugepages for requested memory in numa 1.
>>>>>>>
>>>>>>> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
>>>>>>> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
>>>>>>>
>>>>>>> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
>>>>>>>
>>>>>>> Thoughts?
>>>>>> Hi Sergio,
>>>>>>
>>>>>> Thanks for your attention to this.
>>>>>>
>>>>>> For now, as we have some issues with non-contiguous
>>>>>> hugepages, I'm thinking about following hybrid schema:
>>>>>> 1) Allocate essential hugepages:
>>>>>> 1.1) Allocate as many hugepages from numa N to
>>>>>> only fit requested memory for this numa.
>>>>>> 1.2) repeat 1.1 for all numa nodes.
>>>>>> 2) Try to map all remaining free hugepages in a round-robin
>>>>>> fashion like in this patch.
>>>>>> 3) Sort pages and choose the most suitable.
>>>>>>
>>>>>> This solution should decrease number of issues connected with
>>>>>> non-contiguous memory.
>>>>> Sorry for late reply, I was hoping for more comments from the community.
>>>>>
>>>>> IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
>>>>> I think your proposal is good, could you consider implementing such approach on next release?
>>>> Sure, I can implement this for 17.08 release.
>>>>
>>>>>>> On 06/03/2017 09:34, Ilya Maximets wrote:
>>>>>>>> Hi all.
>>>>>>>>
>>>>>>>> So, what about this change?
>>>>>>>>
>>>>>>>> Best regards, Ilya Maximets.
>>>>>>>>
>>>>>>>> On 16.02.2017 16:01, Ilya Maximets wrote:
>>>>>>>>> Currently EAL allocates hugepages one by one not paying
>>>>>>>>> attention from which NUMA node allocation was done.
>>>>>>>>>
>>>>>>>>> Such behaviour leads to allocation failure if number of
>>>>>>>>> available hugepages for application limited by cgroups
>>>>>>>>> or hugetlbfs and memory requested not only from the first
>>>>>>>>> socket.
>>>>>>>>>
>>>>>>>>> Example:
>>>>>>>>> # 90 x 1GB hugepages availavle in a system
>>>>>>>>>
>>>>>>>>> cgcreate -g hugetlb:/test
>>>>>>>>> # Limit to 32GB of hugepages
>>>>>>>>> cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>>>>>>>> # Request 4GB from each of 2 sockets
>>>>>>>>> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>>>>>>>
>>>>>>>>> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>>>>>>>> EAL: 32 not 90 hugepages of size 1024 MB allocated
>>>>>>>>> EAL: Not enough memory available on socket 1!
>>>>>>>>> Requested: 4096MB, available: 0MB
>>>>>>>>> PANIC in rte_eal_init():
>>>>>>>>> Cannot init memory
>>>>>>>>>
>>>>>>>>> This happens beacause all allocated pages are
>>>>>>>>> on socket 0.
>>>>>>>>>
>>>>>>>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>>>>>>>>> hugepage to one of requested nodes in a round-robin fashion.
>>>>>>>>> In this case all allocated pages will be fairly distributed
>>>>>>>>> between all requested nodes.
>>>>>>>>>
>>>>>>>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>>>>> introduced and disabled by default because of external
>>>>>>>>> dependency from libnuma.
>>>>>>>>>
>>>>>>>>> Cc:<stable@dpdk.org>
>>>>>>>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>>>>>>>>
>>>>>>>>> Signed-off-by: Ilya Maximets<i.maximets@samsung.com>
>>>>>>>>> ---
>>>>>>>>> config/common_base | 1 +
>>>>>>>>> lib/librte_eal/Makefile | 4 ++
>>>>>>>>> lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>>>>>>>>> mk/rte.app.mk | 3 ++
>>>>>>>>> 4 files changed, 74 insertions(+)
>>>>> Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
>>>> Thanks.
>>>>
>>>> Best regards, Ilya Maximets.
>>>>
>>
>>
>>
>>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [dpdk-stable] [PATCH] mem: balanced allocation of hugepages
2017-04-10 7:51 ` Sergio Gonzalez Monroy
@ 2017-04-10 8:05 ` Ilya Maximets
0 siblings, 0 replies; 15+ messages in thread
From: Ilya Maximets @ 2017-04-10 8:05 UTC (permalink / raw)
To: Sergio Gonzalez Monroy, Thomas Monjalon
Cc: dev, David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
Neil Horman, Yulong Pei, stable, Bruce Richardson
On 10.04.2017 10:51, Sergio Gonzalez Monroy wrote:
> On 10/04/2017 08:11, Ilya Maximets wrote:
>> On 07.04.2017 18:44, Thomas Monjalon wrote:
>>> 2017-04-07 18:14, Ilya Maximets:
>>>> Hi All.
>>>>
>>>> I wanted to ask (just to clarify current status):
>>>> Will this patch be included in current release (acked by maintainer)
>>>> and then I will upgrade it to hybrid logic or I will just prepare v3
>>>> with hybrid logic for 17.08 ?
>>> What is your preferred option Ilya?
>> I have no strong opinion on this. One thought is that it could be
>> nice if someone else could test this functionality with current
>> release before enabling it by default in 17.08.
>>
>> Tomorrow I'm going on vacation. So I'll post rebased version today
>> (there are few fuzzes with current master) and you with Sergio may
>> decide what to do.
>>
>> Best regards, Ilya Maximets.
>>
>>> Sergio?
>
> I would be inclined towards v3 targeting v17.08. IMHO it would be more clean this way.
OK.
I've sent rebased version just in case.
>
> Sergio
>
>>>
>>>> On 27.03.2017 17:43, Ilya Maximets wrote:
>>>>> On 27.03.2017 16:01, Sergio Gonzalez Monroy wrote:
>>>>>> On 09/03/2017 12:57, Ilya Maximets wrote:
>>>>>>> On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
>>>>>>>> Hi Ilya,
>>>>>>>>
>>>>>>>> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
>>>>>>>> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
>>>>>>>>
>>>>>>>> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
>>>>>>>>
>>>>>>>> Currently at a high level regarding hugepages per numa node:
>>>>>>>> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
>>>>>>>> 2) Find out numa node of each hugepage.
>>>>>>>> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
>>>>>>>>
>>>>>>>> Using libnuma we could try to allocate hugepages per numa:
>>>>>>>> 1) Try to map as many hugepages from numa 0.
>>>>>>>> 2) Check if we have enough hugepages for requested memory in numa 0.
>>>>>>>> 3) Try to map as many hugepages from numa 1.
>>>>>>>> 4) Check if we have enough hugepages for requested memory in numa 1.
>>>>>>>>
>>>>>>>> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
>>>>>>>> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
>>>>>>>>
>>>>>>>> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
>>>>>>>>
>>>>>>>> Thoughts?
>>>>>>> Hi Sergio,
>>>>>>>
>>>>>>> Thanks for your attention to this.
>>>>>>>
>>>>>>> For now, as we have some issues with non-contiguous
>>>>>>> hugepages, I'm thinking about following hybrid schema:
>>>>>>> 1) Allocate essential hugepages:
>>>>>>> 1.1) Allocate as many hugepages from numa N to
>>>>>>> only fit requested memory for this numa.
>>>>>>> 1.2) repeat 1.1 for all numa nodes.
>>>>>>> 2) Try to map all remaining free hugepages in a round-robin
>>>>>>> fashion like in this patch.
>>>>>>> 3) Sort pages and choose the most suitable.
>>>>>>>
>>>>>>> This solution should decrease number of issues connected with
>>>>>>> non-contiguous memory.
>>>>>> Sorry for late reply, I was hoping for more comments from the community.
>>>>>>
>>>>>> IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
>>>>>> I think your proposal is good, could you consider implementing such approach on next release?
>>>>> Sure, I can implement this for 17.08 release.
>>>>>
>>>>>>>> On 06/03/2017 09:34, Ilya Maximets wrote:
>>>>>>>>> Hi all.
>>>>>>>>>
>>>>>>>>> So, what about this change?
>>>>>>>>>
>>>>>>>>> Best regards, Ilya Maximets.
>>>>>>>>>
>>>>>>>>> On 16.02.2017 16:01, Ilya Maximets wrote:
>>>>>>>>>> Currently EAL allocates hugepages one by one not paying
>>>>>>>>>> attention from which NUMA node allocation was done.
>>>>>>>>>>
>>>>>>>>>> Such behaviour leads to allocation failure if number of
>>>>>>>>>> available hugepages for application limited by cgroups
>>>>>>>>>> or hugetlbfs and memory requested not only from the first
>>>>>>>>>> socket.
>>>>>>>>>>
>>>>>>>>>> Example:
>>>>>>>>>> # 90 x 1GB hugepages availavle in a system
>>>>>>>>>>
>>>>>>>>>> cgcreate -g hugetlb:/test
>>>>>>>>>> # Limit to 32GB of hugepages
>>>>>>>>>> cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>>>>>>>>> # Request 4GB from each of 2 sockets
>>>>>>>>>> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>>>>>>>>
>>>>>>>>>> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>>>>>>>>> EAL: 32 not 90 hugepages of size 1024 MB allocated
>>>>>>>>>> EAL: Not enough memory available on socket 1!
>>>>>>>>>> Requested: 4096MB, available: 0MB
>>>>>>>>>> PANIC in rte_eal_init():
>>>>>>>>>> Cannot init memory
>>>>>>>>>>
>>>>>>>>>> This happens beacause all allocated pages are
>>>>>>>>>> on socket 0.
>>>>>>>>>>
>>>>>>>>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>>>>>>>>>> hugepage to one of requested nodes in a round-robin fashion.
>>>>>>>>>> In this case all allocated pages will be fairly distributed
>>>>>>>>>> between all requested nodes.
>>>>>>>>>>
>>>>>>>>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>>>>>> introduced and disabled by default because of external
>>>>>>>>>> dependency from libnuma.
>>>>>>>>>>
>>>>>>>>>> Cc:<stable@dpdk.org>
>>>>>>>>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>>>>>>>>>
>>>>>>>>>> Signed-off-by: Ilya Maximets<i.maximets@samsung.com>
>>>>>>>>>> ---
>>>>>>>>>> config/common_base | 1 +
>>>>>>>>>> lib/librte_eal/Makefile | 4 ++
>>>>>>>>>> lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>>>>>>>>>> mk/rte.app.mk | 3 ++
>>>>>>>>>> 4 files changed, 74 insertions(+)
>>>>>> Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
>>>>> Thanks.
>>>>>
>>>>> Best regards, Ilya Maximets.
>>>>>
>>>
>>>
>>>
>>>
>
>
>
>
^ permalink raw reply [flat|nested] 15+ messages in thread
end of thread, other threads:[~2017-04-10 8:06 UTC | newest]
Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
[not found] <CGME20170216130139eucas1p2512567d6f5db9eaac5ee840b56bf920a@eucas1p2.samsung.com>
2017-02-16 13:01 ` [dpdk-stable] [PATCH] mem: balanced allocation of hugepages Ilya Maximets
2017-02-16 13:26 ` Tan, Jianfeng
2017-02-16 13:55 ` Ilya Maximets
2017-02-16 13:57 ` Ilya Maximets
2017-02-16 13:31 ` [dpdk-stable] [dpdk-dev] " Bruce Richardson
2017-03-06 9:34 ` [dpdk-stable] " Ilya Maximets
2017-03-08 13:46 ` Sergio Gonzalez Monroy
2017-03-09 12:57 ` Ilya Maximets
2017-03-27 13:01 ` Sergio Gonzalez Monroy
2017-03-27 14:43 ` Ilya Maximets
2017-04-07 15:14 ` Ilya Maximets
2017-04-07 15:44 ` Thomas Monjalon
2017-04-10 7:11 ` Ilya Maximets
2017-04-10 7:51 ` Sergio Gonzalez Monroy
2017-04-10 8:05 ` Ilya Maximets
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).