DPDK patches and discussions
 help / color / mirror / Atom feed
From: David Marchand <david.marchand@6wind.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH RFC] eal: change default per socket memory allocation
Date: Wed, 30 Apr 2014 16:15:04 +0200	[thread overview]
Message-ID: <1398867304-21171-1-git-send-email-david.marchand@6wind.com> (raw)

From: Didier Pallard <didier.pallard@6wind.com>

Currently, if there is more memory in hugepages than the amount
requested by dpdk application, the memory is allocated by taking as much
memory as possible from each socket, starting from first one.
For example if a system is configured with 8 GB in 2 sockets (4 GB per
socket), and dpdk is requesting only 4GB of memory, all memory will be
taken in socket 0 (that have exactly 4GB of free hugepages) even if some
cores are configured on socket 1, and there are free hugepages on socket
1...

Change this behaviour to allocate memory on all sockets where some cores
are configured, spreading the memory amongst sockets using following
ratio per socket:
N° of cores configured on the socket / Total number of configured cores
* requested memory

This algorithm is used when memory amount is specified globally using
-m option. Per socket memory allocation can always be done using
--socket-mem option.

Signed-off-by: Didier Pallard <didier.pallard@6wind.com>
---
 lib/librte_eal/bsdapp/eal/eal.c          |   24 ++++++++++++++++++++++++
 lib/librte_eal/linuxapp/eal/eal.c        |   24 ++++++++++++++++++++++++
 lib/librte_eal/linuxapp/eal/eal_memory.c |    8 ++++----
 3 files changed, 52 insertions(+), 4 deletions(-)

diff --git a/lib/librte_eal/bsdapp/eal/eal.c b/lib/librte_eal/bsdapp/eal/eal.c
index 5c181b3..b6b5f20 100644
--- a/lib/librte_eal/bsdapp/eal/eal.c
+++ b/lib/librte_eal/bsdapp/eal/eal.c
@@ -869,6 +869,30 @@ rte_eal_init(int argc, char **argv)
 			internal_config.memory = eal_get_hugepage_mem_size();
 	}
 
+	/* Automatically spread requested memory amongst detected sockets according */
+	/* to number of cores from cpu mask present on each socket */
+	if (internal_config.no_hugetlbfs == 0 &&
+	    internal_config.process_type != RTE_PROC_SECONDARY &&
+	    internal_config.xen_dom0_support == 0 &&
+	    internal_config.force_sockets == 0) {
+		int cpu_per_socket[RTE_MAX_NUMA_NODES];
+		unsigned lcore_id, socket_id;
+
+		/* Compute number of cores per socket */
+		memset(cpu_per_socket, 0, sizeof(cpu_per_socket));
+		RTE_LCORE_FOREACH(lcore_id) {
+			cpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++;
+		}
+
+		/* Set memory amount per socket; round up to be sure that sum of all */
+		/* sockets allocation is greater than requested memory size */
+		for (socket_id=0 ; socket_id<RTE_MAX_NUMA_NODES ; socket_id++) {
+			internal_config.socket_mem[socket_id] = (internal_config.memory *
+			    cpu_per_socket[socket_id] + rte_lcore_count() - 1) /
+			    rte_lcore_count();
+		}
+	}
+
 	if (internal_config.vmware_tsc_map == 1) {
 #ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
 		rte_cycles_vmware_tsc_map = 1;
diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
index de082ab..37701ec 100644
--- a/lib/librte_eal/linuxapp/eal/eal.c
+++ b/lib/librte_eal/linuxapp/eal/eal.c
@@ -1035,6 +1035,30 @@ rte_eal_init(int argc, char **argv)
 			internal_config.memory = eal_get_hugepage_mem_size();
 	}
 
+	/* Automatically spread requested memory amongst detected sockets according */
+	/* to number of cores from cpu mask present on each socket */
+	if (internal_config.no_hugetlbfs == 0 &&
+	    internal_config.process_type != RTE_PROC_SECONDARY &&
+	    internal_config.xen_dom0_support == 0 &&
+	    internal_config.force_sockets == 0) {
+		int cpu_per_socket[RTE_MAX_NUMA_NODES];
+		unsigned lcore_id, socket_id;
+
+		/* Compute number of cores per socket */
+		memset(cpu_per_socket, 0, sizeof(cpu_per_socket));
+		RTE_LCORE_FOREACH(lcore_id) {
+			cpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++;
+		}
+
+		/* Set memory amount per socket; round up to be sure that sum of all */
+		/* sockets allocation is greater than requested memory size */
+		for (socket_id=0 ; socket_id<RTE_MAX_NUMA_NODES ; socket_id++) {
+			internal_config.socket_mem[socket_id] = (internal_config.memory *
+			    cpu_per_socket[socket_id] + rte_lcore_count() - 1) /
+			    rte_lcore_count();
+		}
+	}
+
 	if (internal_config.vmware_tsc_map == 1) {
 #ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
 		rte_cycles_vmware_tsc_map = 1;
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 73a6394..a9e6fa1 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -876,16 +876,16 @@ calc_num_pages_per_socket(uint64_t * memory,
 	unsigned requested, available;
 	int total_num_pages = 0;
 	uint64_t remaining_mem, cur_mem;
-	uint64_t total_mem = internal_config.memory;
+	int64_t total_mem = (int64_t) internal_config.memory;
 
 	if (num_hp_info == 0)
 		return -1;
 
-	for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {
+	for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem > 0; socket++) {
 		/* if specific memory amounts per socket weren't requested */
-		if (internal_config.force_sockets == 0) {
+		if (internal_config.force_sockets == 0 && memory[socket] == 0) {
 			/* take whatever is available */
-			memory[socket] = RTE_MIN(get_socket_mem_size(socket),
+			memory[socket] = RTE_MIN((int64_t) get_socket_mem_size(socket),
 					total_mem);
 		}
 		/* skips if the memory on specific socket wasn't requested */
-- 
1.7.10.4

             reply	other threads:[~2014-04-30 14:15 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-04-30 14:15 David Marchand [this message]
2014-05-01 16:06 ` Burakov, Anatoly
2014-05-02  8:54   ` Burakov, Anatoly
2014-05-02  9:05     ` Venkatesan, Venky
2014-05-05  9:26       ` David Marchand
2014-05-06 10:05         ` Burakov, Anatoly
2014-05-06 15:18           ` Thomas Monjalon
2014-05-06 15:56             ` Burakov, Anatoly
2014-05-07 14:56         ` Venkatesan, Venky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1398867304-21171-1-git-send-email-david.marchand@6wind.com \
    --to=david.marchand@6wind.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).