DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/3] fbarray: add API to find biggest used or free chunks
@ 2019-02-22 16:14 Anatoly Burakov
  2019-02-22 16:14 ` [dpdk-dev] [PATCH 2/3] memalloc: improve best-effort allocation Anatoly Burakov
  2019-02-22 16:14 ` [dpdk-dev] [PATCH 3/3] eal: attempt multiple hugepage allocations at init Anatoly Burakov
  0 siblings, 2 replies; 5+ messages in thread
From: Anatoly Burakov @ 2019-02-22 16:14 UTC (permalink / raw)
  To: dev; +Cc: maxime.leroy

Currently, while there is a way to find total amount of used/free
space in an fbarray, there is no way to find biggest contiguous
chunk. Add such API, as well as unit tests to test this API.

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
 lib/librte_eal/common/eal_common_fbarray.c  | 109 ++++++++++++++
 lib/librte_eal/common/include/rte_fbarray.h |  70 +++++++++
 lib/librte_eal/rte_eal_version.map          |   4 +
 test/test/test_fbarray.c                    | 159 ++++++++++++++++++++
 4 files changed, 342 insertions(+)

diff --git a/lib/librte_eal/common/eal_common_fbarray.c b/lib/librte_eal/common/eal_common_fbarray.c
index ea0735cb8..5665282cd 100644
--- a/lib/librte_eal/common/eal_common_fbarray.c
+++ b/lib/librte_eal/common/eal_common_fbarray.c
@@ -1162,6 +1162,115 @@ fbarray_find_contig(struct rte_fbarray *arr, unsigned int start, bool next,
 	return ret;
 }
 
+static int
+fbarray_find_biggest(struct rte_fbarray *arr, unsigned int start, bool used,
+		bool rev)
+{
+	int cur_idx, next_idx, cur_len, biggest_idx, biggest_len;
+	/* don't stack if conditions, use function pointers instead */
+	int (*find_func)(struct rte_fbarray *, unsigned int);
+	int (*find_contig_func)(struct rte_fbarray *, unsigned int);
+
+	if (arr == NULL || start >= arr->len) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+	/* the other API calls already do their fair share of cheap checks, so
+	 * no need to do them here.
+	 */
+
+	/* the API's called are thread-safe, but something may still happen
+	 * inbetween the API calls, so lock the fbarray. all other API's are
+	 * read-locking the fbarray, so read lock here is OK.
+	 */
+	rte_rwlock_read_lock(&arr->rwlock);
+
+	/* pick out appropriate functions */
+	if (used) {
+		if (rev) {
+			find_func = rte_fbarray_find_prev_used;
+			find_contig_func = rte_fbarray_find_rev_contig_used;
+		} else {
+			find_func = rte_fbarray_find_next_used;
+			find_contig_func = rte_fbarray_find_contig_used;
+		}
+	} else {
+		if (rev) {
+			find_func = rte_fbarray_find_prev_free;
+			find_contig_func = rte_fbarray_find_rev_contig_free;
+		} else {
+			find_func = rte_fbarray_find_next_free;
+			find_contig_func = rte_fbarray_find_contig_free;
+		}
+	}
+
+	cur_idx = start;
+	biggest_idx = -1; /* default is error */
+	biggest_len = 0;
+	for (;;) {
+		cur_idx = find_func(arr, cur_idx);
+
+		/* block found, check its length */
+		if (cur_idx >= 0) {
+			cur_len = find_contig_func(arr, cur_idx);
+			/* decide where we go next */
+			next_idx = rev ? cur_idx - cur_len : cur_idx + cur_len;
+			/* move current index to start of chunk */
+			cur_idx = rev ? next_idx + 1 : cur_idx;
+
+			if (cur_len > biggest_len) {
+				biggest_idx = cur_idx;
+				biggest_len = cur_len;
+			}
+			cur_idx = next_idx;
+			/* in reverse mode, next_idx may be -1 if chunk started
+			 * at array beginning. this means there's no more work
+			 * to do.
+			 */
+			if (cur_idx < 0)
+				break;
+		} else {
+			/* nothing more to find, stop. however, a failed API
+			 * call has set rte_errno, which we want to ignore, as
+			 * reaching the end of fbarray is not an error.
+			 */
+			rte_errno = 0;
+			break;
+		}
+	}
+	/* if we didn't find anything at all, set rte_errno */
+	if (biggest_idx < 0)
+		rte_errno = used ? ENOENT : ENOSPC;
+
+	rte_rwlock_read_unlock(&arr->rwlock);
+	return biggest_idx;
+}
+
+int __rte_experimental
+rte_fbarray_find_biggest_free(struct rte_fbarray *arr, unsigned int start)
+{
+	return fbarray_find_biggest(arr, start, false, false);
+}
+
+int __rte_experimental
+rte_fbarray_find_biggest_used(struct rte_fbarray *arr, unsigned int start)
+{
+	return fbarray_find_biggest(arr, start, true, false);
+}
+
+int __rte_experimental
+rte_fbarray_find_rev_biggest_free(struct rte_fbarray *arr, unsigned int start)
+{
+	return fbarray_find_biggest(arr, start, false, true);
+}
+
+int __rte_experimental
+rte_fbarray_find_rev_biggest_used(struct rte_fbarray *arr, unsigned int start)
+{
+	return fbarray_find_biggest(arr, start, true, true);
+}
+
+
 int __rte_experimental
 rte_fbarray_find_contig_free(struct rte_fbarray *arr, unsigned int start)
 {
diff --git a/lib/librte_eal/common/include/rte_fbarray.h b/lib/librte_eal/common/include/rte_fbarray.h
index 5d8805515..33841ca9a 100644
--- a/lib/librte_eal/common/include/rte_fbarray.h
+++ b/lib/librte_eal/common/include/rte_fbarray.h
@@ -451,6 +451,76 @@ int __rte_experimental
 rte_fbarray_find_rev_contig_used(struct rte_fbarray *arr, unsigned int start);
 
 
+/**
+ * Find index of biggest chunk of free elements, starting at specified index.
+ *
+ * @param arr
+ *   Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ *   Element index to start search from.
+ *
+ * @return
+ *  - non-negative integer on success.
+ *  - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_biggest_free(struct rte_fbarray *arr, unsigned int start);
+
+
+/**
+ * Find index of biggest chunk of used elements, starting at specified index.
+ *
+ * @param arr
+ *   Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ *   Element index to start search from.
+ *
+ * @return
+ *  - non-negative integer on success.
+ *  - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_biggest_used(struct rte_fbarray *arr, unsigned int start);
+
+
+/**
+ * Find index of biggest chunk of free elements before a specified index (like
+ * ``rte_fbarray_find_biggest_free``, but going in reverse).
+ *
+ * @param arr
+ *   Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ *   Element index to start search from.
+ *
+ * @return
+ *  - non-negative integer on success.
+ *  - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_rev_biggest_free(struct rte_fbarray *arr, unsigned int start);
+
+
+/**
+ * Find index of biggest chunk of used elements before a specified index (like
+ * ``rte_fbarray_find_biggest_used``, but going in reverse).
+ *
+ * @param arr
+ *   Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ *   Element index to start search from.
+ *
+ * @return
+ *  - non-negative integer on success.
+ *  - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_rev_biggest_used(struct rte_fbarray *arr, unsigned int start);
+
+
 /**
  * Dump ``rte_fbarray`` metadata.
  *
diff --git a/lib/librte_eal/rte_eal_version.map b/lib/librte_eal/rte_eal_version.map
index eb5f7b9cb..d78deaa8a 100644
--- a/lib/librte_eal/rte_eal_version.map
+++ b/lib/librte_eal/rte_eal_version.map
@@ -305,6 +305,8 @@ EXPERIMENTAL {
 	rte_fbarray_detach;
 	rte_fbarray_dump_metadata;
 	rte_fbarray_find_idx;
+	rte_fbarray_find_biggest_free;
+	rte_fbarray_find_biggest_used;
 	rte_fbarray_find_next_free;
 	rte_fbarray_find_next_used;
 	rte_fbarray_find_next_n_free;
@@ -315,6 +317,8 @@ EXPERIMENTAL {
 	rte_fbarray_find_prev_n_used;
 	rte_fbarray_find_contig_free;
 	rte_fbarray_find_contig_used;
+	rte_fbarray_find_rev_biggest_free;
+	rte_fbarray_find_rev_biggest_used;
 	rte_fbarray_find_rev_contig_free;
 	rte_fbarray_find_rev_contig_used;
 	rte_fbarray_get;
diff --git a/test/test/test_fbarray.c b/test/test/test_fbarray.c
index 8c44e2c7e..d2b041887 100644
--- a/test/test/test_fbarray.c
+++ b/test/test/test_fbarray.c
@@ -97,6 +97,8 @@ static int empty_msk_test_setup(void)
 {
 	/* do not fill anything in */
 	reset_array();
+	param.start = -1;
+	param.end = -1;
 	return 0;
 }
 
@@ -456,6 +458,157 @@ static int test_basic(void)
 	return TEST_SUCCESS;
 }
 
+static int test_biggest(struct rte_fbarray *arr, int first, int last)
+{
+	int lo_free_space_first, lo_free_space_last, lo_free_space_len;
+	int hi_free_space_first, hi_free_space_last, hi_free_space_len;
+	int max_free_space_first, max_free_space_last, max_free_space_len;
+	int len = last - first + 1;
+
+	/* first and last must either be both -1, or both not -1 */
+	TEST_ASSERT((first == -1) == (last == -1),
+			"Invalid arguments provided\n");
+
+	/* figure out what we expect from the low chunk of free space */
+	if (first == -1) {
+		/* special case: if there are no occupied elements at all,
+		 * consider both free spaces to consume the entire array.
+		 */
+		lo_free_space_first = 0;
+		lo_free_space_last = arr->len - 1;
+		lo_free_space_len = arr->len;
+		/* if there's no used space, length should be invalid */
+		len = -1;
+	} else if (first == 0) {
+		/* if occupied items start at 0, there's no free space */
+		lo_free_space_first = -1;
+		lo_free_space_last = -1;
+		lo_free_space_len = 0;
+	} else {
+		lo_free_space_first = 0;
+		lo_free_space_last = first - 1;
+		lo_free_space_len = lo_free_space_last -
+				lo_free_space_first + 1;
+	}
+
+	/* figure out what we expect from the high chunk of free space */
+	if (last == -1) {
+		/* special case: if there are no occupied elements at all,
+		 * consider both free spaces to consume the entire array.
+		 */
+		hi_free_space_first = 0;
+		hi_free_space_last = arr->len - 1;
+		hi_free_space_len = arr->len;
+		/* if there's no used space, length should be invalid */
+		len = -1;
+	} else if (last == ((int)arr->len - 1)) {
+		/* if occupied items end at array len, there's no free space */
+		hi_free_space_first = -1;
+		hi_free_space_last = -1;
+		hi_free_space_len = 0;
+	} else {
+		hi_free_space_first = last + 1;
+		hi_free_space_last = arr->len - 1;
+		hi_free_space_len = hi_free_space_last -
+				hi_free_space_first + 1;
+	}
+
+	/* find which one will be biggest */
+	if (lo_free_space_len > hi_free_space_len) {
+		max_free_space_first = lo_free_space_first;
+		max_free_space_last = lo_free_space_last;
+		max_free_space_len = lo_free_space_len;
+	} else {
+		/* if they are equal, we'll just use the high chunk */
+		max_free_space_first = hi_free_space_first;
+		max_free_space_last = hi_free_space_last;
+		max_free_space_len = hi_free_space_len;
+	}
+
+	/* check used regions - these should produce identical results */
+	TEST_ASSERT_EQUAL(rte_fbarray_find_biggest_used(arr, 0), first,
+			"Used space index is wrong\n");
+	TEST_ASSERT_EQUAL(rte_fbarray_find_rev_biggest_used(arr, arr->len - 1),
+			first,
+			"Used space index is wrong\n");
+	/* len may be -1, but function will return error anyway */
+	TEST_ASSERT_EQUAL(rte_fbarray_find_contig_used(arr, first), len,
+			"Used space length is wrong\n");
+
+	/* check if biggest free region is the one we expect to find. It can be
+	 * -1 if there's no free space - we've made sure we use one or the
+	 * other, even if both are invalid.
+	 */
+	TEST_ASSERT_EQUAL(rte_fbarray_find_biggest_free(arr, 0),
+			max_free_space_first,
+			"Biggest free space index is wrong\n");
+	TEST_ASSERT_EQUAL(rte_fbarray_find_rev_biggest_free(arr, arr->len - 1),
+			max_free_space_first,
+			"Biggest free space index is wrong\n");
+
+	/* if biggest region exists, check its length */
+	if (max_free_space_first != -1) {
+		TEST_ASSERT_EQUAL(rte_fbarray_find_contig_free(arr,
+					max_free_space_first),
+				max_free_space_len,
+				"Biggest free space length is wrong\n");
+		TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_free(arr,
+					max_free_space_last),
+				max_free_space_len,
+				"Biggest free space length is wrong\n");
+	}
+
+	/* find if we see what we expect to see in the low region. if there is
+	 * no free space, the function should still match expected value, as
+	 * we've set it to -1. we're scanning backwards to avoid accidentally
+	 * hitting the high free space region. if there is no occupied space,
+	 * there's nothing to do.
+	 */
+	if (last != -1) {
+		TEST_ASSERT_EQUAL(rte_fbarray_find_rev_biggest_free(arr, last),
+				lo_free_space_first,
+				"Low free space index is wrong\n");
+	}
+
+	if (lo_free_space_first != -1) {
+		/* if low free region exists, check its length */
+		TEST_ASSERT_EQUAL(rte_fbarray_find_contig_free(arr,
+					lo_free_space_first),
+				lo_free_space_len,
+				"Low free space length is wrong\n");
+		TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_free(arr,
+					lo_free_space_last),
+				lo_free_space_len,
+				"Low free space length is wrong\n");
+	}
+
+	/* find if we see what we expect to see in the high region. if there is
+	 * no free space, the function should still match expected value, as
+	 * we've set it to -1. we're scanning forwards to avoid accidentally
+	 * hitting the low free space region. if there is no occupied space,
+	 * there's nothing to do.
+	 */
+	if (first != -1) {
+		TEST_ASSERT_EQUAL(rte_fbarray_find_biggest_free(arr, first),
+				hi_free_space_first,
+				"High free space index is wrong\n");
+	}
+
+	/* if high free region exists, check its length */
+	if (hi_free_space_first != -1) {
+		TEST_ASSERT_EQUAL(rte_fbarray_find_contig_free(arr,
+					hi_free_space_first),
+				hi_free_space_len,
+				"High free space length is wrong\n");
+		TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_free(arr,
+					hi_free_space_last),
+				hi_free_space_len,
+				"High free space length is wrong\n");
+	}
+
+	return 0;
+}
+
 static int ensure_correct(struct rte_fbarray *arr, int first, int last,
 		bool used)
 {
@@ -537,6 +690,9 @@ static int test_find(void)
 	if (ensure_correct(&param.arr, param.end + 1, FBARRAY_TEST_LEN - 1,
 			false))
 		return TEST_FAILED;
+	/* test if find_biggest API's work correctly */
+	if (test_biggest(&param.arr, param.start, param.end))
+		return TEST_FAILED;
 	return TEST_SUCCESS;
 }
 
@@ -546,6 +702,9 @@ static int test_empty(void)
 	/* ensure space is free */
 	if (ensure_correct(&param.arr, 0, FBARRAY_TEST_LEN - 1, false))
 		return TEST_FAILED;
+	/* test if find_biggest API's work correctly */
+	if (test_biggest(&param.arr, param.start, param.end))
+		return TEST_FAILED;
 	return TEST_SUCCESS;
 }
 
-- 
2.17.1

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dpdk-dev] [PATCH 2/3] memalloc: improve best-effort allocation
  2019-02-22 16:14 [dpdk-dev] [PATCH 1/3] fbarray: add API to find biggest used or free chunks Anatoly Burakov
@ 2019-02-22 16:14 ` Anatoly Burakov
  2019-02-22 16:14 ` [dpdk-dev] [PATCH 3/3] eal: attempt multiple hugepage allocations at init Anatoly Burakov
  1 sibling, 0 replies; 5+ messages in thread
From: Anatoly Burakov @ 2019-02-22 16:14 UTC (permalink / raw)
  To: dev; +Cc: maxime.leroy

Previously, when using non-exact allocation, we were requesting
N pages to be allocated, but allowed the memory subsystem to
allocate less than requested. However, we were still expecting
to see N contigous free pages in the memseg list.

This presents a problem because there is no way to try and
allocate as many pages as possible, even if there isn't
enough contiguous free entries in the list.

To address this, use the new "find biggest" fbarray API's when
allocating non-exact number of pages. This way, we will first
check how many entries in the list are actually available, and
then try to allocate up to that number.

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
 lib/librte_eal/linuxapp/eal/eal_memalloc.c | 33 ++++++++++++++++++----
 1 file changed, 28 insertions(+), 5 deletions(-)

diff --git a/lib/librte_eal/linuxapp/eal/eal_memalloc.c b/lib/librte_eal/linuxapp/eal/eal_memalloc.c
index b6fb183db..14c3ea838 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memalloc.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memalloc.c
@@ -874,10 +874,32 @@ alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
 	need = wa->n_segs;
 
 	/* try finding space in memseg list */
-	cur_idx = rte_fbarray_find_next_n_free(&cur_msl->memseg_arr, 0, need);
-	if (cur_idx < 0)
-		return 0;
-	start_idx = cur_idx;
+	if (wa->exact) {
+		/* if we require exact number of pages in a list, find them */
+		cur_idx = rte_fbarray_find_next_n_free(&cur_msl->memseg_arr, 0,
+				need);
+		if (cur_idx < 0)
+			return 0;
+		start_idx = cur_idx;
+	} else {
+		int cur_len;
+
+		/* we don't require exact number of pages, so we're going to go
+		 * for best-effort allocation. that means finding the biggest
+		 * unused block, and going with that.
+		 */
+		cur_idx = rte_fbarray_find_biggest_free(&cur_msl->memseg_arr,
+				0);
+		if (cur_idx < 0)
+			return 0;
+		start_idx = cur_idx;
+		/* adjust the size to possibly be smaller than original
+		 * request, but do not allow it to be bigger.
+		 */
+		cur_len = rte_fbarray_find_contig_free(&cur_msl->memseg_arr,
+				cur_idx);
+		need = RTE_MIN(need, (unsigned int)cur_len);
+	}
 
 	/* do not allow any page allocations during the time we're allocating,
 	 * because file creation and locking operations are not atomic,
@@ -954,7 +976,8 @@ alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
 		cur_msl->version++;
 	if (dir_fd >= 0)
 		close(dir_fd);
-	return 1;
+	/* if we didn't allocate any segments, move on to the next list */
+	return i > 0;
 }
 
 struct free_walk_param {
-- 
2.17.1

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dpdk-dev] [PATCH 3/3] eal: attempt multiple hugepage allocations at init
  2019-02-22 16:14 [dpdk-dev] [PATCH 1/3] fbarray: add API to find biggest used or free chunks Anatoly Burakov
  2019-02-22 16:14 ` [dpdk-dev] [PATCH 2/3] memalloc: improve best-effort allocation Anatoly Burakov
@ 2019-02-22 16:14 ` Anatoly Burakov
  2019-03-28 22:22   ` Thomas Monjalon
  1 sibling, 1 reply; 5+ messages in thread
From: Anatoly Burakov @ 2019-02-22 16:14 UTC (permalink / raw)
  To: dev; +Cc: maxime.leroy

When requesting memory with ``-m`` or ``--socket-mem`` flags,
currently the init will fail if the requested memory amount was
bigger than any one memseg list, even if total amount of
available memory was sufficient.

Fix this by making EAL to attempt to allocate pages multiple
times, until we either fulfill our memory requirements, or run
out of hugepages to allocate.

Bugzilla ID: 95

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
 lib/librte_eal/linuxapp/eal/eal_memory.c | 47 ++++++++++++++++--------
 1 file changed, 32 insertions(+), 15 deletions(-)

diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 1b96b576e..361109eb3 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -1784,30 +1784,47 @@ eal_hugepage_init(void)
 			struct rte_memseg **pages;
 			struct hugepage_info *hpi = &used_hp[hp_sz_idx];
 			unsigned int num_pages = hpi->num_pages[socket_id];
-			int num_pages_alloc, i;
+			unsigned int num_pages_alloc;
 
 			if (num_pages == 0)
 				continue;
 
-			pages = malloc(sizeof(*pages) * num_pages);
-
 			RTE_LOG(DEBUG, EAL, "Allocating %u pages of size %" PRIu64 "M on socket %i\n",
 				num_pages, hpi->hugepage_sz >> 20, socket_id);
 
-			num_pages_alloc = eal_memalloc_alloc_seg_bulk(pages,
-					num_pages, hpi->hugepage_sz,
-					socket_id, true);
-			if (num_pages_alloc < 0) {
+			/* we may not be able to allocate all pages in one go,
+			 * because we break up our memory map into multiple
+			 * memseg lists. therefore, try allocating multiple
+			 * times and see if we can get the desired number of
+			 * pages from multiple allocations.
+			 */
+
+			num_pages_alloc = 0;
+			do {
+				int i, cur_pages, needed;
+
+				needed = num_pages - num_pages_alloc;
+
+				pages = malloc(sizeof(*pages) * needed);
+
+				/* do not request exact number of pages */
+				cur_pages = eal_memalloc_alloc_seg_bulk(pages,
+						needed, hpi->hugepage_sz,
+						socket_id, false);
+				if (cur_pages <= 0) {
+					free(pages);
+					return -1;
+				}
+
+				/* mark preallocated pages as unfreeable */
+				for (i = 0; i < cur_pages; i++) {
+					struct rte_memseg *ms = pages[i];
+					ms->flags |= RTE_MEMSEG_FLAG_DO_NOT_FREE;
+				}
 				free(pages);
-				return -1;
-			}
 
-			/* mark preallocated pages as unfreeable */
-			for (i = 0; i < num_pages_alloc; i++) {
-				struct rte_memseg *ms = pages[i];
-				ms->flags |= RTE_MEMSEG_FLAG_DO_NOT_FREE;
-			}
-			free(pages);
+				num_pages_alloc += cur_pages;
+			} while (num_pages_alloc != num_pages);
 		}
 	}
 	/* if socket limits were specified, set them */
-- 
2.17.1

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [dpdk-dev] [PATCH 3/3] eal: attempt multiple hugepage allocations at init
  2019-02-22 16:14 ` [dpdk-dev] [PATCH 3/3] eal: attempt multiple hugepage allocations at init Anatoly Burakov
@ 2019-03-28 22:22   ` Thomas Monjalon
  2019-03-28 22:22     ` Thomas Monjalon
  0 siblings, 1 reply; 5+ messages in thread
From: Thomas Monjalon @ 2019-03-28 22:22 UTC (permalink / raw)
  To: Anatoly Burakov; +Cc: dev, maxime.leroy

22/02/2019 17:14, Anatoly Burakov:
> When requesting memory with ``-m`` or ``--socket-mem`` flags,
> currently the init will fail if the requested memory amount was
> bigger than any one memseg list, even if total amount of
> available memory was sufficient.
> 
> Fix this by making EAL to attempt to allocate pages multiple
> times, until we either fulfill our memory requirements, or run
> out of hugepages to allocate.
> 
> Bugzilla ID: 95
> 
> Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>

Series applied, thanks

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [dpdk-dev] [PATCH 3/3] eal: attempt multiple hugepage allocations at init
  2019-03-28 22:22   ` Thomas Monjalon
@ 2019-03-28 22:22     ` Thomas Monjalon
  0 siblings, 0 replies; 5+ messages in thread
From: Thomas Monjalon @ 2019-03-28 22:22 UTC (permalink / raw)
  To: Anatoly Burakov; +Cc: dev, maxime.leroy

22/02/2019 17:14, Anatoly Burakov:
> When requesting memory with ``-m`` or ``--socket-mem`` flags,
> currently the init will fail if the requested memory amount was
> bigger than any one memseg list, even if total amount of
> available memory was sufficient.
> 
> Fix this by making EAL to attempt to allocate pages multiple
> times, until we either fulfill our memory requirements, or run
> out of hugepages to allocate.
> 
> Bugzilla ID: 95
> 
> Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>

Series applied, thanks




^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2019-03-28 22:22 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-02-22 16:14 [dpdk-dev] [PATCH 1/3] fbarray: add API to find biggest used or free chunks Anatoly Burakov
2019-02-22 16:14 ` [dpdk-dev] [PATCH 2/3] memalloc: improve best-effort allocation Anatoly Burakov
2019-02-22 16:14 ` [dpdk-dev] [PATCH 3/3] eal: attempt multiple hugepage allocations at init Anatoly Burakov
2019-03-28 22:22   ` Thomas Monjalon
2019-03-28 22:22     ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).