From: Chao Zhu <chaozhu@linux.vnet.ibm.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v5 11/14] Add huge page size define for IBM Power architecture
Date: Tue, 25 Nov 2014 17:17:14 -0500 [thread overview]
Message-ID: <1416953837-15894-12-git-send-email-chaozhu@linux.vnet.ibm.com> (raw)
In-Reply-To: <1416953837-15894-1-git-send-email-chaozhu@linux.vnet.ibm.com>
IBM Power architecture has different huge page sizes (16MB, 16GB) than
x86.This patch defines RTE_PGSIZE_16M and RTE_PGSIZE_16G in the
rte_page_sizes enum variable and adds huge page size support of DPDK
for IBM Power architecture.
Signed-off-by: Chao Zhu <chaozhu@linux.vnet.ibm.com>
---
app/test/test_memzone.c | 123 ++++++++++++++++++++++++++-
lib/librte_eal/common/eal_common_memzone.c | 19 +++-
lib/librte_eal/common/include/rte_memory.h | 9 ++-
lib/librte_eal/common/include/rte_memzone.h | 8 ++
lib/librte_eal/linuxapp/eal/eal.c | 9 ++-
5 files changed, 156 insertions(+), 12 deletions(-)
diff --git a/app/test/test_memzone.c b/app/test/test_memzone.c
index 381f643..387dbbc 100644
--- a/app/test/test_memzone.c
+++ b/app/test/test_memzone.c
@@ -133,6 +133,8 @@ test_memzone_reserve_flags(void)
const struct rte_memseg *ms;
int hugepage_2MB_avail = 0;
int hugepage_1GB_avail = 0;
+ int hugepage_16MB_avail = 0;
+ int hugepage_16GB_avail = 0;
const size_t size = 100;
int i = 0;
ms = rte_eal_get_physmem_layout();
@@ -141,12 +143,20 @@ test_memzone_reserve_flags(void)
hugepage_2MB_avail = 1;
if (ms[i].hugepage_sz == RTE_PGSIZE_1G)
hugepage_1GB_avail = 1;
+ if (ms[i].hugepage_sz == RTE_PGSIZE_16M)
+ hugepage_16MB_avail = 1;
+ if (ms[i].hugepage_sz == RTE_PGSIZE_16G)
+ hugepage_16GB_avail = 1;
}
- /* Display the availability of 2MB and 1GB pages */
+ /* Display the availability of 2MB ,1GB, 16MB, 16GB pages */
if (hugepage_2MB_avail)
printf("2MB Huge pages available\n");
if (hugepage_1GB_avail)
printf("1GB Huge pages available\n");
+ if (hugepage_16MB_avail)
+ printf("16MB Huge pages available\n");
+ if (hugepage_16GB_avail)
+ printf("16GB Huge pages available\n");
/*
* If 2MB pages available, check that a small memzone is correctly
* reserved from 2MB huge pages when requested by the RTE_MEMZONE_2MB flag.
@@ -255,6 +265,117 @@ test_memzone_reserve_flags(void)
}
}
}
+ /*
+ * This option is for IBM Power. If 16MB pages available, check
+ * that a small memzone is correctly reserved from 16MB huge pages
+ * when requested by the RTE_MEMZONE_16MB flag. Also check that
+ * RTE_MEMZONE_SIZE_HINT_ONLY flag only defaults to an available
+ * page size (i.e 16GB ) when 16MB pages are unavailable.
+ */
+ if (hugepage_16MB_avail) {
+ mz = rte_memzone_reserve("flag_zone_16M", size, SOCKET_ID_ANY,
+ RTE_MEMZONE_16MB);
+ if (mz == NULL) {
+ printf("MEMZONE FLAG 16MB\n");
+ return -1;
+ }
+ if (mz->hugepage_sz != RTE_PGSIZE_16M) {
+ printf("hugepage_sz not equal 16M\n");
+ return -1;
+ }
+
+ mz = rte_memzone_reserve("flag_zone_16M_HINT", size,
+ SOCKET_ID_ANY, RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY);
+ if (mz == NULL) {
+ printf("MEMZONE FLAG 2MB\n");
+ return -1;
+ }
+ if (mz->hugepage_sz != RTE_PGSIZE_16M) {
+ printf("hugepage_sz not equal 16M\n");
+ return -1;
+ }
+
+ /* Check if 1GB huge pages are unavailable, that function fails
+ * unless HINT flag is indicated
+ */
+ if (!hugepage_16GB_avail) {
+ mz = rte_memzone_reserve("flag_zone_16G_HINT", size,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY);
+ if (mz == NULL) {
+ printf("MEMZONE FLAG 16GB & HINT\n");
+ return -1;
+ }
+ if (mz->hugepage_sz != RTE_PGSIZE_16M) {
+ printf("hugepage_sz not equal 16M\n");
+ return -1;
+ }
+
+ mz = rte_memzone_reserve("flag_zone_16G", size,
+ SOCKET_ID_ANY, RTE_MEMZONE_16GB);
+ if (mz != NULL) {
+ printf("MEMZONE FLAG 16GB\n");
+ return -1;
+ }
+ }
+ }
+ /*As with 16MB tests above for 16GB huge page requests*/
+ if (hugepage_16GB_avail) {
+ mz = rte_memzone_reserve("flag_zone_16G", size, SOCKET_ID_ANY,
+ RTE_MEMZONE_16GB);
+ if (mz == NULL) {
+ printf("MEMZONE FLAG 16GB\n");
+ return -1;
+ }
+ if (mz->hugepage_sz != RTE_PGSIZE_16G) {
+ printf("hugepage_sz not equal 16G\n");
+ return -1;
+ }
+
+ mz = rte_memzone_reserve("flag_zone_16G_HINT", size,
+ SOCKET_ID_ANY, RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY);
+ if (mz == NULL) {
+ printf("MEMZONE FLAG 16GB\n");
+ return -1;
+ }
+ if (mz->hugepage_sz != RTE_PGSIZE_16G) {
+ printf("hugepage_sz not equal 16G\n");
+ return -1;
+ }
+
+ /* Check if 1GB huge pages are unavailable, that function fails
+ * unless HINT flag is indicated
+ */
+ if (!hugepage_16MB_avail) {
+ mz = rte_memzone_reserve("flag_zone_16M_HINT", size,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY);
+ if (mz == NULL) {
+ printf("MEMZONE FLAG 16MB & HINT\n");
+ return -1;
+ }
+ if (mz->hugepage_sz != RTE_PGSIZE_16G) {
+ printf("hugepage_sz not equal 16G\n");
+ return -1;
+ }
+ mz = rte_memzone_reserve("flag_zone_16M", size,
+ SOCKET_ID_ANY, RTE_MEMZONE_16MB);
+ if (mz != NULL) {
+ printf("MEMZONE FLAG 16MB\n");
+ return -1;
+ }
+ }
+
+ if (hugepage_16MB_avail && hugepage_16GB_avail) {
+ mz = rte_memzone_reserve("flag_zone_16M_HINT", size,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_16MB|RTE_MEMZONE_16GB);
+ if (mz != NULL) {
+ printf("BOTH SIZES SET\n");
+ return -1;
+ }
+ }
+ }
return 0;
}
diff --git a/lib/librte_eal/common/eal_common_memzone.c b/lib/librte_eal/common/eal_common_memzone.c
index 5acd9ce..f1fc4a7 100644
--- a/lib/librte_eal/common/eal_common_memzone.c
+++ b/lib/librte_eal/common/eal_common_memzone.c
@@ -216,10 +216,16 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
/* check flags for hugepage sizes */
if ((flags & RTE_MEMZONE_2MB) &&
- free_memseg[i].hugepage_sz == RTE_PGSIZE_1G )
+ free_memseg[i].hugepage_sz == RTE_PGSIZE_1G)
continue;
if ((flags & RTE_MEMZONE_1GB) &&
- free_memseg[i].hugepage_sz == RTE_PGSIZE_2M )
+ free_memseg[i].hugepage_sz == RTE_PGSIZE_2M)
+ continue;
+ if ((flags & RTE_MEMZONE_16MB) &&
+ free_memseg[i].hugepage_sz == RTE_PGSIZE_16G)
+ continue;
+ if ((flags & RTE_MEMZONE_16GB) &&
+ free_memseg[i].hugepage_sz == RTE_PGSIZE_16M)
continue;
/* this segment is the best until now */
@@ -256,7 +262,8 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
* try allocating again without the size parameter otherwise -fail.
*/
if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY) &&
- ((flags & RTE_MEMZONE_1GB) || (flags & RTE_MEMZONE_2MB)))
+ ((flags & RTE_MEMZONE_1GB) || (flags & RTE_MEMZONE_2MB)
+ || (flags & RTE_MEMZONE_16MB) || (flags & RTE_MEMZONE_16GB)))
return memzone_reserve_aligned_thread_unsafe(name,
len, socket_id, 0, align, bound);
@@ -313,7 +320,8 @@ rte_memzone_reserve_aligned(const char *name, size_t len,
const struct rte_memzone *mz = NULL;
/* both sizes cannot be explicitly called for */
- if ((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB)) {
+ if (((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB))
+ || ((flags & RTE_MEMZONE_16MB) && (flags & RTE_MEMZONE_16GB))) {
rte_errno = EINVAL;
return NULL;
}
@@ -344,7 +352,8 @@ rte_memzone_reserve_bounded(const char *name, size_t len,
const struct rte_memzone *mz = NULL;
/* both sizes cannot be explicitly called for */
- if ((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB)) {
+ if (((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB))
+ || ((flags & RTE_MEMZONE_16MB) && (flags & RTE_MEMZONE_16GB))) {
rte_errno = EINVAL;
return NULL;
}
diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h
index 4cf8ea9..2ed2637 100644
--- a/lib/librte_eal/common/include/rte_memory.h
+++ b/lib/librte_eal/common/include/rte_memory.h
@@ -53,9 +53,12 @@ extern "C" {
#endif
enum rte_page_sizes {
- RTE_PGSIZE_4K = 1 << 12,
- RTE_PGSIZE_2M = RTE_PGSIZE_4K << 9,
- RTE_PGSIZE_1G = RTE_PGSIZE_2M <<9
+ RTE_PGSIZE_4K = 1ULL << 12,
+ RTE_PGSIZE_2M = 1ULL << 21,
+ RTE_PGSIZE_1G = 1ULL << 30,
+ RTE_PGSIZE_64K = 1ULL << 16,
+ RTE_PGSIZE_16M = 1ULL << 24,
+ RTE_PGSIZE_16G = 1ULL << 34
};
#define SOCKET_ID_ANY -1 /**< Any NUMA socket. */
diff --git a/lib/librte_eal/common/include/rte_memzone.h b/lib/librte_eal/common/include/rte_memzone.h
index 5014409..7d47bff 100644
--- a/lib/librte_eal/common/include/rte_memzone.h
+++ b/lib/librte_eal/common/include/rte_memzone.h
@@ -60,6 +60,8 @@ extern "C" {
#define RTE_MEMZONE_2MB 0x00000001 /**< Use 2MB pages. */
#define RTE_MEMZONE_1GB 0x00000002 /**< Use 1GB pages. */
+#define RTE_MEMZONE_16MB 0x00000100 /**< Use 16MB pages. */
+#define RTE_MEMZONE_16GB 0x00000200 /**< Use 16GB pages. */
#define RTE_MEMZONE_SIZE_HINT_ONLY 0x00000004 /**< Use available page size */
/**
@@ -111,6 +113,8 @@ struct rte_memzone {
* taken from 1GB or 2MB hugepages.
* - RTE_MEMZONE_2MB - Reserve from 2MB pages
* - RTE_MEMZONE_1GB - Reserve from 1GB pages
+ * - RTE_MEMZONE_16MB - Reserve from 16MB pages
+ * - RTE_MEMZONE_16GB - Reserve from 16GB pages
* - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if
* the requested page size is unavailable.
* If this flag is not set, the function
@@ -156,6 +160,8 @@ const struct rte_memzone *rte_memzone_reserve(const char *name,
* taken from 1GB or 2MB hugepages.
* - RTE_MEMZONE_2MB - Reserve from 2MB pages
* - RTE_MEMZONE_1GB - Reserve from 1GB pages
+ * - RTE_MEMZONE_16MB - Reserve from 16MB pages
+ * - RTE_MEMZONE_16GB - Reserve from 16GB pages
* - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if
* the requested page size is unavailable.
* If this flag is not set, the function
@@ -206,6 +212,8 @@ const struct rte_memzone *rte_memzone_reserve_aligned(const char *name,
* taken from 1GB or 2MB hugepages.
* - RTE_MEMZONE_2MB - Reserve from 2MB pages
* - RTE_MEMZONE_1GB - Reserve from 1GB pages
+ * - RTE_MEMZONE_16MB - Reserve from 16MB pages
+ * - RTE_MEMZONE_16GB - Reserve from 16GB pages
* - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if
* the requested page size is unavailable.
* If this flag is not set, the function
diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
index 8c0223f..8d2a546 100644
--- a/lib/librte_eal/linuxapp/eal/eal.c
+++ b/lib/librte_eal/linuxapp/eal/eal.c
@@ -455,9 +455,12 @@ eal_parse_base_virtaddr(const char *arg)
return -1;
#endif
- /* align the addr on 2M boundary */
- internal_config.base_virtaddr = RTE_PTR_ALIGN_CEIL((uintptr_t)addr,
- RTE_PGSIZE_2M);
+ /* align the addr on 16M boundary, 16MB is the minimum huge page
+ * size on IBM Power architecture. If the addr is aligned to 16MB,
+ * it can align to 2MB for x86. So this alignment can also be used
+ * on x86 */
+ internal_config.base_virtaddr =
+ RTE_PTR_ALIGN_CEIL((uintptr_t)addr, RTE_PGSIZE_16M);
return 0;
}
--
1.7.1
next prev parent reply other threads:[~2014-11-25 10:05 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-11-25 22:17 [dpdk-dev] [PATCH v5 00/14] Patches for DPDK to support " Chao Zhu
2014-11-25 22:17 ` [dpdk-dev] [PATCH v5 01/14] Add compiling definations for IBM " Chao Zhu
2014-11-25 22:17 ` [dpdk-dev] [PATCH v5 02/14] Add atomic operations " Chao Zhu
2014-11-25 22:17 ` [dpdk-dev] [PATCH v5 03/14] Add byte order " Chao Zhu
2014-11-25 22:17 ` [dpdk-dev] [PATCH v5 04/14] Add CPU cycle " Chao Zhu
2014-11-25 22:17 ` [dpdk-dev] [PATCH v5 05/14] Add prefetch operation " Chao Zhu
2014-11-25 22:17 ` [dpdk-dev] [PATCH v5 06/14] Add spinlock " Chao Zhu
2014-11-25 22:17 ` [dpdk-dev] [PATCH v5 07/14] Add vector memcpy " Chao Zhu
2014-11-25 22:17 ` [dpdk-dev] [PATCH v5 08/14] Add CPU flag checking " Chao Zhu
2014-11-25 22:17 ` [dpdk-dev] [PATCH v5 09/14] Remove iopl operation " Chao Zhu
2014-11-25 22:17 ` [dpdk-dev] [PATCH v5 10/14] Add cache size define for IBM Power Architecture Chao Zhu
2014-11-25 22:17 ` Chao Zhu [this message]
2014-11-25 22:17 ` [dpdk-dev] [PATCH v5 12/14] Add eal memory support " Chao Zhu
2014-11-25 22:17 ` [dpdk-dev] [PATCH v5 13/14] test_memzone:fix finding the second smallest segment Chao Zhu
2014-11-25 22:17 ` [dpdk-dev] [PATCH v5 14/14] Fix the compiling of test-pmd on IBM Power Architecture Chao Zhu
2014-11-26 9:32 ` [dpdk-dev] [PATCH v5 00/14] Patches for DPDK to support Power architecture David Marchand
2014-11-26 20:57 ` Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1416953837-15894-12-git-send-email-chaozhu@linux.vnet.ibm.com \
--to=chaozhu@linux.vnet.ibm.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).