From: Stephen Hemminger <stephen@networkplumber.org>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 1/6] xen: allow determining DOM0 at runtime
Date: Thu, 22 Oct 2015 23:34:45 -0700 [thread overview]
Message-ID: <1445582090-5927-2-git-send-email-stephen@networkplumber.org> (raw)
In-Reply-To: <1445582090-5927-1-git-send-email-stephen@networkplumber.org>
Add memory infrastructure for runtime Xen DOM0 support.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
lib/librte_eal/common/include/rte_memory.h | 30 ++++++++++++++++-
lib/librte_eal/linuxapp/eal/eal_memory.c | 7 ++++
lib/librte_eal/linuxapp/eal/eal_xen_memory.c | 2 +-
lib/librte_mempool/rte_mempool.c | 48 ++++++++++++++++++++--------
lib/librte_mempool/rte_mempool.h | 3 +-
5 files changed, 72 insertions(+), 18 deletions(-)
diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h
index 1bed415..067be10 100644
--- a/lib/librte_eal/common/include/rte_memory.h
+++ b/lib/librte_eal/common/include/rte_memory.h
@@ -52,6 +52,8 @@
extern "C" {
#endif
+#include <rte_common.h>
+
enum rte_page_sizes {
RTE_PGSIZE_4K = 1ULL << 12,
RTE_PGSIZE_64K = 1ULL << 16,
@@ -180,6 +182,13 @@ unsigned rte_memory_get_nchannel(void);
unsigned rte_memory_get_nrank(void);
#ifdef RTE_LIBRTE_XEN_DOM0
+
+/**< Internal use only - should DOM0 memory mapping be used */
+extern int is_xen_dom0_supported(void);
+
+/**< Internal use only - phys to virt mapping for xen */
+phys_addr_t rte_xen_mem_phy2mch(uint32_t, const phys_addr_t);
+
/**
* Return the physical address of elt, which is an element of the pool mp.
*
@@ -191,7 +200,14 @@ unsigned rte_memory_get_nrank(void);
* @return
* The physical address or error.
*/
-phys_addr_t rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr);
+static inline phys_addr_t
+rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
+{
+ if (is_xen_dom0_supported())
+ return rte_xen_mem_phy2mch(memseg_id, phy_addr);
+ else
+ return phy_addr;
+}
/**
* Memory init for supporting application running on Xen domain0.
@@ -214,7 +230,19 @@ int rte_xen_dom0_memory_init(void);
* negative: error
*/
int rte_xen_dom0_memory_attach(void);
+#else
+static inline int is_xen_dom0_supported(void)
+{
+ return 0;
+}
+
+static inline phys_addr_t
+rte_mem_phy2mch(uint32_t memseg_id __rte_unused, const phys_addr_t phy_addr)
+{
+ return phy_addr;
+}
#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index ac2745e..f36cabd 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -97,6 +97,13 @@
#include "eal_filesystem.h"
#include "eal_hugepages.h"
+#ifdef RTE_LIBRTE_XEN_DOM0
+int is_xen_dom0_supported(void)
+{
+ return internal_config.xen_dom0_support;
+}
+#endif
+
/**
* @file
* Huge page mapping under linux
diff --git a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
index d228a9d..7fd9e83 100644
--- a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
@@ -156,7 +156,7 @@ get_xen_memory_size(void)
* Based on physical address to caculate MFN in Xen Dom0.
*/
phys_addr_t
-rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
+rte_xen_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
{
int mfn_id;
uint64_t mfn, mfn_offset;
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 8e185c5..d063268 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -375,6 +375,26 @@ rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
return usz;
}
+#ifndef RTE_LIBRTE_XEN_DOM0
+/* stub if DOM0 support not configured */
+struct rte_mempool *
+rte_dom0_mempool_create(const char *name __rte_unused,
+ unsigned n __rte_unused,
+ unsigned elt_size __rte_unused,
+ unsigned cache_size __rte_unused,
+ unsigned private_data_size __rte_unused,
+ rte_mempool_ctor_t *mp_init __rte_unused,
+ void *mp_init_arg __rte_unused,
+ rte_mempool_obj_ctor_t *obj_init __rte_unused,
+ void *obj_init_arg __rte_unused,
+ int socket_id __rte_unused,
+ unsigned flags __rte_unused)
+{
+ rte_errno = EINVAL;
+ return NULL;
+}
+#endif
+
/* create the mempool */
struct rte_mempool *
rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
@@ -383,20 +403,20 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags)
{
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_dom0_mempool_create(name, n, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags);
-#else
- return rte_mempool_xmem_create(name, n, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags,
- NULL, NULL, MEMPOOL_PG_NUM_DEFAULT, MEMPOOL_PG_SHIFT_MAX);
-#endif
+ if (is_xen_dom0_supported())
+ return rte_dom0_mempool_create(name, n, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags);
+ else
+ return rte_mempool_xmem_create(name, n, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags,
+ NULL, NULL, MEMPOOL_PG_NUM_DEFAULT,
+ MEMPOOL_PG_SHIFT_MAX);
}
/*
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 8abeca9..6e2390a 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -640,7 +640,6 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
int socket_id, unsigned flags, void *vaddr,
const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
-#ifdef RTE_LIBRTE_XEN_DOM0
/**
* Create a new mempool named *name* in memory on Xen Dom0.
*
@@ -728,7 +727,7 @@ rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size,
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags);
-#endif
+
/**
* Dump the status of the mempool to the console.
--
2.1.4
next prev parent reply other threads:[~2015-10-23 6:34 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-10-23 6:34 [dpdk-dev] [PATCH v2 0/6] Xen DOM0 runtime support Stephen Hemminger
2015-10-23 6:34 ` Stephen Hemminger [this message]
2015-10-23 6:34 ` [dpdk-dev] [PATCH 2/6] ethdev: add common function for reserving DMA regions Stephen Hemminger
2015-10-23 6:34 ` [dpdk-dev] [PATCH 3/6] e1000: use rte_eth_dma_zone_reserve Stephen Hemminger
2015-10-23 6:34 ` [dpdk-dev] [PATCH 4/6] ixgbe: " Stephen Hemminger
2015-10-23 6:34 ` [dpdk-dev] [PATCH 5/6] i40e: " Stephen Hemminger
2015-10-23 6:34 ` [dpdk-dev] [PATCH 6/6] fm10k: " Stephen Hemminger
2015-10-26 7:24 ` [dpdk-dev] [PATCH v2 0/6] Xen DOM0 runtime support Liu, Jijiang
2015-11-03 0:40 ` Thomas Monjalon
-- strict thread matches above, loose matches on Subject: below --
2015-09-29 0:44 [dpdk-dev] [PATCH " Stephen Hemminger
2015-09-29 0:44 ` [dpdk-dev] [PATCH 1/6] xen: allow determining DOM0 at runtime Stephen Hemminger
2015-10-21 14:41 ` Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1445582090-5927-2-git-send-email-stephen@networkplumber.org \
--to=stephen@networkplumber.org \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).