DPDK patches and discussions
 help / color / mirror / Atom feed
From: Pallavi Kadam <pallavi.kadam@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH] helloworld: Windows DPDK sample application is compiled and built using eal and kvargs libraries in order to add windows support in the mainline repository.
Date: Wed, 28 Nov 2018 21:05:04 -0800	[thread overview]
Message-ID: <20181129050504.26996-1-pallavi.kadam@intel.com> (raw)

Signed-off-by: Pallavi Kadam <pallavi.kadam@intel.com>
---
[RFC] This is a large patch that contains changes to build the HelloWorld
sample application on Windows. It also contains changes to build the
librte_eal and librte_kvargs libraries which are required to build the
sample application. To merge these changes, this patch will be split up
into multiple smaller patches and sent for review.

 lib/librte_eal/common/eal_common_errno.c      |    9 +
 lib/librte_eal/common/eal_common_log.c        |    2 +
 lib/librte_eal/common/eal_common_options.c    |    2 +
 lib/librte_eal/common/eal_common_timer.c      |    2 +
 .../common/include/arch/x86/rte_byteorder.h   |   14 +
 .../common/include/arch/x86/rte_rtm.h         |   22 +-
 lib/librte_eal/common/include/rte_common.h    |   11 +
 .../common/include/rte_malloc_heap.h          |    3 +
 lib/librte_eal/common/include/rte_random.h    |    4 +
 .../common/include/rte_string_fns.h           |    2 +
 lib/librte_eal/common/malloc_elem.h           |    3 +
 lib/librte_eal/common/malloc_heap.c           |    2 +
 lib/librte_eal/common/malloc_heap.h           |    4 +
 lib/librte_eal/windows/eal/eal.c              |  697 +++++++++
 lib/librte_eal/windows/eal/eal_alarm.c        |   29 +
 lib/librte_eal/windows/eal/eal_debug.c        |  102 ++
 lib/librte_eal/windows/eal/eal_fbarray.c      | 1273 +++++++++++++++++
 lib/librte_eal/windows/eal/eal_filesystem.h   |   97 ++
 .../windows/eal/eal_hugepage_info.c           |   20 +
 lib/librte_eal/windows/eal/eal_interrupts.c   |   90 ++
 lib/librte_eal/windows/eal/eal_lcore.c        |   83 ++
 lib/librte_eal/windows/eal/eal_log.c          |  415 ++++++
 lib/librte_eal/windows/eal/eal_memalloc.c     |  995 +++++++++++++
 lib/librte_eal/windows/eal/eal_memory.c       |  140 ++
 lib/librte_eal/windows/eal/eal_proc.c         | 1003 +++++++++++++
 lib/librte_eal/windows/eal/eal_thread.c       |  167 +++
 lib/librte_eal/windows/eal/eal_timer.c        |   40 +
 .../windows/eal/linux-emu/_rand48.c           |   46 +
 .../windows/eal/linux-emu/drand48.c           |   62 +
 lib/librte_eal/windows/eal/linux-emu/fork.c   |  111 ++
 lib/librte_eal/windows/eal/linux-emu/getopt.c |  407 ++++++
 .../windows/eal/linux-emu/lrand48.c           |   23 +
 lib/librte_eal/windows/eal/linux-emu/mman.c   |  179 +++
 lib/librte_eal/windows/eal/linux-emu/setenv.c |   26 +
 .../windows/eal/linux-emu/srand48.c           |   30 +
 .../windows/eal/linux-emu/termios.c           |   11 +
 lib/librte_eal/windows/eal/linux-emu/unistd.c |   21 +
 lib/librte_eal/windows/eal/malloc_heap.c      | 1068 ++++++++++++++
 lib/librte_eal/windows/eal/malloc_mp.c        |  645 +++++++++
 .../windows/include_override/dirent.h         |  950 ++++++++++++
 .../windows/include_override/getopt.h         |  252 ++++
 .../windows/include_override/net/ethernet.h   |  405 ++++++
 .../windows/include_override/netinet/in.h     |   48 +
 .../windows/include_override/netinet/tcp.h    |    4 +
 .../windows/include_override/pthread.h        |   65 +
 .../windows/include_override/rand48.h         |   32 +
 .../windows/include_override/sched.h          |   21 +
 .../windows/include_override/sys/_iovec.h     |   48 +
 .../include_override/sys/_sockaddr_storage.h  |   54 +
 .../windows/include_override/sys/_termios.h   |  222 +++
 .../windows/include_override/sys/_types.h     |  105 ++
 .../windows/include_override/sys/cdefs.h      |    3 +
 .../windows/include_override/sys/mman.h       |   63 +
 .../include_override/sys/netbsd/queue.h       |  846 +++++++++++
 .../windows/include_override/sys/queue.h      |   11 +
 .../windows/include_override/syslog.h         |  217 +++
 .../windows/include_override/termios.h        |    1 +
 .../windows/include_override/unistd.h         |   30 +
 .../windows/include_override/x86intrin.h      |    1 +
 .../rte_override/exec-env/rte_interrupts.h    |    3 +
 lib/librte_eal/windows/rte_override/rte_acl.h |    7 +
 .../windows/rte_override/rte_atomic.h         |  744 ++++++++++
 .../windows/rte_override/rte_bus_pci.h        |   25 +
 .../windows/rte_override/rte_byteorder.h      |   10 +
 .../windows/rte_override/rte_common.h         |   56 +
 .../windows/rte_override/rte_common.h.sav     |  372 +++++
 .../windows/rte_override/rte_config.h         |  328 +++++
 .../windows/rte_override/rte_cpuflags.h       |    3 +
 .../windows/rte_override/rte_cycles.h         |   26 +
 .../windows/rte_override/rte_debug.h          |   22 +
 lib/librte_eal/windows/rte_override/rte_io.h  |    8 +
 .../windows/rte_override/rte_lcore.h          |   15 +
 .../windows/rte_override/rte_log.h.sav        |    6 +
 .../windows/rte_override/rte_memcpy.h         |    3 +
 .../windows/rte_override/rte_memory.h         |   20 +
 .../windows/rte_override/rte_pause.h          |   10 +
 lib/librte_eal/windows/rte_override/rte_pci.h |    7 +
 .../windows/rte_override/rte_per_lcore.h      |   29 +
 .../windows/rte_override/rte_prefetch.h       |   29 +
 lib/librte_eal/windows/rte_override/rte_rtm.h |    8 +
 .../windows/rte_override/rte_rwlock.h         |   40 +
 .../windows/rte_override/rte_spinlock.h       |  271 ++++
 .../windows/rte_override/rte_vect.h           |    5 +
 .../windows/rte_override/rte_wincompat.h      |  347 +++++
 .../windows/rte_override/rte_windows.h        |  497 +++++++
 mk/exec-env/windows/DpdkRteLib.props          |   46 +
 mk/exec-env/windows/dpdk.sln                  |   43 +
 .../windows/helloworld/helloworld.vcxproj     |   98 ++
 .../helloworld/helloworld.vcxproj.filters     |   22 +
 .../helloworld/helloworld.vcxproj.user        |    4 +
 .../windows/librte_eal/librte_eal.vcxproj     |  187 +++
 .../librte_eal/librte_eal.vcxproj.filters     |  297 ++++
 .../librte_eal/librte_eal.vcxproj.user        |    4 +
 .../librte_kvargs/librte_kvargs.vcxproj       |   91 ++
 .../librte_kvargs.vcxproj.filters             |   33 +
 .../librte_kvargs/librte_kvargs.vcxproj.user  |    4 +
 96 files changed, 14955 insertions(+), 3 deletions(-)
 create mode 100644 lib/librte_eal/windows/eal/eal.c
 create mode 100644 lib/librte_eal/windows/eal/eal_alarm.c
 create mode 100644 lib/librte_eal/windows/eal/eal_debug.c
 create mode 100644 lib/librte_eal/windows/eal/eal_fbarray.c
 create mode 100644 lib/librte_eal/windows/eal/eal_filesystem.h
 create mode 100644 lib/librte_eal/windows/eal/eal_hugepage_info.c
 create mode 100644 lib/librte_eal/windows/eal/eal_interrupts.c
 create mode 100644 lib/librte_eal/windows/eal/eal_lcore.c
 create mode 100644 lib/librte_eal/windows/eal/eal_log.c
 create mode 100644 lib/librte_eal/windows/eal/eal_memalloc.c
 create mode 100644 lib/librte_eal/windows/eal/eal_memory.c
 create mode 100644 lib/librte_eal/windows/eal/eal_proc.c
 create mode 100644 lib/librte_eal/windows/eal/eal_thread.c
 create mode 100644 lib/librte_eal/windows/eal/eal_timer.c
 create mode 100644 lib/librte_eal/windows/eal/linux-emu/_rand48.c
 create mode 100644 lib/librte_eal/windows/eal/linux-emu/drand48.c
 create mode 100644 lib/librte_eal/windows/eal/linux-emu/fork.c
 create mode 100644 lib/librte_eal/windows/eal/linux-emu/getopt.c
 create mode 100644 lib/librte_eal/windows/eal/linux-emu/lrand48.c
 create mode 100644 lib/librte_eal/windows/eal/linux-emu/mman.c
 create mode 100644 lib/librte_eal/windows/eal/linux-emu/setenv.c
 create mode 100644 lib/librte_eal/windows/eal/linux-emu/srand48.c
 create mode 100644 lib/librte_eal/windows/eal/linux-emu/termios.c
 create mode 100644 lib/librte_eal/windows/eal/linux-emu/unistd.c
 create mode 100644 lib/librte_eal/windows/eal/malloc_heap.c
 create mode 100644 lib/librte_eal/windows/eal/malloc_mp.c
 create mode 100644 lib/librte_eal/windows/include_override/dirent.h
 create mode 100644 lib/librte_eal/windows/include_override/getopt.h
 create mode 100644 lib/librte_eal/windows/include_override/net/ethernet.h
 create mode 100644 lib/librte_eal/windows/include_override/netinet/in.h
 create mode 100644 lib/librte_eal/windows/include_override/netinet/tcp.h
 create mode 100644 lib/librte_eal/windows/include_override/pthread.h
 create mode 100644 lib/librte_eal/windows/include_override/rand48.h
 create mode 100644 lib/librte_eal/windows/include_override/sched.h
 create mode 100644 lib/librte_eal/windows/include_override/sys/_iovec.h
 create mode 100644 lib/librte_eal/windows/include_override/sys/_sockaddr_storage.h
 create mode 100644 lib/librte_eal/windows/include_override/sys/_termios.h
 create mode 100644 lib/librte_eal/windows/include_override/sys/_types.h
 create mode 100644 lib/librte_eal/windows/include_override/sys/cdefs.h
 create mode 100644 lib/librte_eal/windows/include_override/sys/mman.h
 create mode 100644 lib/librte_eal/windows/include_override/sys/netbsd/queue.h
 create mode 100644 lib/librte_eal/windows/include_override/sys/queue.h
 create mode 100644 lib/librte_eal/windows/include_override/syslog.h
 create mode 100644 lib/librte_eal/windows/include_override/termios.h
 create mode 100644 lib/librte_eal/windows/include_override/unistd.h
 create mode 100644 lib/librte_eal/windows/include_override/x86intrin.h
 create mode 100644 lib/librte_eal/windows/rte_override/exec-env/rte_interrupts.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_acl.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_atomic.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_bus_pci.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_byteorder.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_common.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_common.h.sav
 create mode 100644 lib/librte_eal/windows/rte_override/rte_config.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_cpuflags.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_cycles.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_debug.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_io.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_lcore.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_log.h.sav
 create mode 100644 lib/librte_eal/windows/rte_override/rte_memcpy.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_memory.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_pause.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_pci.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_per_lcore.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_prefetch.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_rtm.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_rwlock.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_spinlock.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_vect.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_wincompat.h
 create mode 100644 lib/librte_eal/windows/rte_override/rte_windows.h
 create mode 100644 mk/exec-env/windows/DpdkRteLib.props
 create mode 100644 mk/exec-env/windows/dpdk.sln
 create mode 100644 mk/exec-env/windows/helloworld/helloworld.vcxproj
 create mode 100644 mk/exec-env/windows/helloworld/helloworld.vcxproj.filters
 create mode 100644 mk/exec-env/windows/helloworld/helloworld.vcxproj.user
 create mode 100644 mk/exec-env/windows/librte_eal/librte_eal.vcxproj
 create mode 100644 mk/exec-env/windows/librte_eal/librte_eal.vcxproj.filters
 create mode 100644 mk/exec-env/windows/librte_eal/librte_eal.vcxproj.user
 create mode 100644 mk/exec-env/windows/librte_kvargs/librte_kvargs.vcxproj
 create mode 100644 mk/exec-env/windows/librte_kvargs/librte_kvargs.vcxproj.filters
 create mode 100644 mk/exec-env/windows/librte_kvargs/librte_kvargs.vcxproj.user

diff --git a/lib/librte_eal/common/eal_common_errno.c b/lib/librte_eal/common/eal_common_errno.c
index c63a943b3..b6fec8cd5 100644
--- a/lib/librte_eal/common/eal_common_errno.c
+++ b/lib/librte_eal/common/eal_common_errno.c
@@ -27,7 +27,12 @@ rte_strerror(int errnum)
 	static const char *sep = "";
 #endif
 #define RETVAL_SZ 256
+#ifdef _WIN64
+	typedef char c_retval[RETVAL_SZ];
+	RTE_DEFINE_PER_LCORE(static c_retval, retval);
+#else
 	static RTE_DEFINE_PER_LCORE(char[RETVAL_SZ], retval);
+#endif
 	char *ret = RTE_PER_LCORE(retval);
 
 	/* since some implementations of strerror_r throw an error
@@ -41,9 +46,13 @@ rte_strerror(int errnum)
 		case E_RTE_NO_CONFIG:
 			return "Missing rte_config structure";
 		default:
+#ifdef _WIN64
+		strerror_s(RTE_PER_LCORE(retval), RETVAL_SZ, errnum);
+#else
 			if (strerror_r(errnum, ret, RETVAL_SZ) != 0)
 				snprintf(ret, RETVAL_SZ, "Unknown error%s %d",
 						sep, errnum);
+#endif
 		}
 
 	return ret;
diff --git a/lib/librte_eal/common/eal_common_log.c b/lib/librte_eal/common/eal_common_log.c
index c714a4bd2..28407ce77 100644
--- a/lib/librte_eal/common/eal_common_log.c
+++ b/lib/librte_eal/common/eal_common_log.c
@@ -28,8 +28,10 @@ struct rte_eal_opt_loglevel {
 	/** Next list entry */
 	TAILQ_ENTRY(rte_eal_opt_loglevel) next;
 	/** Compiled regular expression obtained from the option */
+#ifndef _WIN64
 	regex_t re_match;
 	/** Glob match string option */
+#endif // !_WIN64
 	char *pattern;
 	/** Log level value obtained from the option */
 	uint32_t level;
diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c
index e31eca5c0..17f2ae6a2 100644
--- a/lib/librte_eal/common/eal_common_options.c
+++ b/lib/librte_eal/common/eal_common_options.c
@@ -11,7 +11,9 @@
 #include <limits.h>
 #include <errno.h>
 #include <getopt.h>
+#ifndef _WIN64
 #include <dlfcn.h>
+#endif
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <dirent.h>
diff --git a/lib/librte_eal/common/eal_common_timer.c b/lib/librte_eal/common/eal_common_timer.c
index dcf26bfea..f6c965b64 100644
--- a/lib/librte_eal/common/eal_common_timer.c
+++ b/lib/librte_eal/common/eal_common_timer.c
@@ -33,6 +33,7 @@ rte_delay_us_block(unsigned int us)
 		rte_pause();
 }
 
+#ifndef _WIN64
 void __rte_experimental
 rte_delay_us_sleep(unsigned int us)
 {
@@ -54,6 +55,7 @@ rte_delay_us_sleep(unsigned int us)
 		ind = 1 - ind;
 	}
 }
+#endif
 
 uint64_t
 rte_get_tsc_hz(void)
diff --git a/lib/librte_eal/common/include/arch/x86/rte_byteorder.h b/lib/librte_eal/common/include/arch/x86/rte_byteorder.h
index a2dfecc1f..0f39af5c2 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_byteorder.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_byteorder.h
@@ -26,10 +26,16 @@ extern "C" {
 static inline uint16_t rte_arch_bswap16(uint16_t _x)
 {
 	uint16_t x = _x;
+#ifndef _WIN64
 	asm volatile ("xchgb %b[x1],%h[x2]"
 		      : [x1] "=Q" (x)
 		      : [x2] "0" (x)
 		      );
+#else
+	__asm {
+	    /* Add appropriate __asm here */
+	}
+#endif
 	return x;
 }
 
@@ -41,9 +47,17 @@ static inline uint16_t rte_arch_bswap16(uint16_t _x)
 static inline uint32_t rte_arch_bswap32(uint32_t _x)
 {
 	uint32_t x = _x;
+#ifndef _WIN64
 	asm volatile ("bswap %[x]"
 		      : [x] "+r" (x)
 		      );
+#else
+	__asm {
+	    mov     eax, x
+	    bswap   eax
+	    mov     x, eax
+	}
+#endif
 	return x;
 }
 
diff --git a/lib/librte_eal/common/include/arch/x86/rte_rtm.h b/lib/librte_eal/common/include/arch/x86/rte_rtm.h
index eb0f8e81e..8056b25de 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_rtm.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_rtm.h
@@ -29,29 +29,45 @@ static __attribute__((__always_inline__)) inline
 unsigned int rte_xbegin(void)
 {
 	unsigned int ret = RTE_XBEGIN_STARTED;
-
+#ifndef _WIN64
 	asm volatile(".byte 0xc7,0xf8 ; .long 0" : "+a" (ret) :: "memory");
+#else
+	/* Add appropriate asm here for Windows compilers */
+#endif
 	return ret;
 }
 
 static __attribute__((__always_inline__)) inline
 void rte_xend(void)
 {
-	 asm volatile(".byte 0x0f,0x01,0xd5" ::: "memory");
+#ifndef _WIN64
+	asm volatile(".byte 0x0f,0x01,0xd5" ::: "memory");
+#else
+	/* Add appropriate asm here for Windows compilers */
+#endif
 }
 
 /* not an inline function to workaround a clang bug with -O0 */
+#ifndef _WIN64
 #define rte_xabort(status) do { \
 	asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory"); \
 } while (0)
+#else
+#define rte_xabort(status) do { \
+	/* Add appropriate asm here for Windows compilers */ \
+} while (0)
+#endif
 
 static __attribute__((__always_inline__)) inline
 int rte_xtest(void)
 {
 	unsigned char out;
-
+#ifndef _WIN64
 	asm volatile(".byte 0x0f,0x01,0xd6 ; setnz %0" :
 		"=r" (out) :: "memory");
+#else
+	/* Add appropriate asm here for Windows compilers */
+#endif
 	return out;
 }
 
diff --git a/lib/librte_eal/common/include/rte_common.h b/lib/librte_eal/common/include/rte_common.h
index 66cdf60b2..3e24463be 100644
--- a/lib/librte_eal/common/include/rte_common.h
+++ b/lib/librte_eal/common/include/rte_common.h
@@ -103,8 +103,15 @@ typedef uint16_t unaligned_uint16_t;
  *   Priority number must be above 100.
  *   Lowest number is the first to run.
  */
+#ifndef _WIN64
 #define RTE_INIT_PRIO(func, prio) \
 static void __attribute__((constructor(RTE_PRIO(prio)), used)) func(void)
+#else
+/* Re-define this without the __attribute__ and static declarator */
+#define RTE_INIT_PRIO(func, prio) \
+void func(void)
+#endif
+
 
 /**
  * Run function before main() with low priority.
@@ -262,7 +269,11 @@ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void)
 static inline int
 rte_is_aligned(void *ptr, unsigned align)
 {
+#ifndef _WIN64
 	return RTE_PTR_ALIGN(ptr, align) == ptr;
+#else
+	return (((uintptr_t)ptr % align) == 0);
+#endif
 }
 
 /*********** Macros for compile type checks ********/
diff --git a/lib/librte_eal/common/include/rte_malloc_heap.h b/lib/librte_eal/common/include/rte_malloc_heap.h
index 4a7e0eb1d..3a23fcd59 100644
--- a/lib/librte_eal/common/include/rte_malloc_heap.h
+++ b/lib/librte_eal/common/include/rte_malloc_heap.h
@@ -20,6 +20,9 @@ struct malloc_elem;
 /**
  * Structure to hold malloc heap
  */
+#ifdef _WIN64
+RTE_CACHE_ALIGN
+#endif
 struct malloc_heap {
 	rte_spinlock_t lock;
 	LIST_HEAD(, malloc_elem) free_head[RTE_HEAP_NUM_FREELISTS];
diff --git a/lib/librte_eal/common/include/rte_random.h b/lib/librte_eal/common/include/rte_random.h
index b2ca1c209..680d0774c 100644
--- a/lib/librte_eal/common/include/rte_random.h
+++ b/lib/librte_eal/common/include/rte_random.h
@@ -18,6 +18,10 @@ extern "C" {
 #include <stdint.h>
 #include <stdlib.h>
 
+#ifdef _WIN64
+#include "rand48.h"
+#endif
+
 /**
  * Seed the pseudo-random generator.
  *
diff --git a/lib/librte_eal/common/include/rte_string_fns.h b/lib/librte_eal/common/include/rte_string_fns.h
index 9a2a1ff90..5615d1f3a 100644
--- a/lib/librte_eal/common/include/rte_string_fns.h
+++ b/lib/librte_eal/common/include/rte_string_fns.h
@@ -66,6 +66,7 @@ rte_strlcpy(char *dst, const char *src, size_t size)
 #endif
 
 #else /* non-BSD platforms */
+#ifndef _WIN64
 #ifdef RTE_USE_LIBBSD
 #include <bsd/string.h>
 
@@ -73,6 +74,7 @@ rte_strlcpy(char *dst, const char *src, size_t size)
 #define strlcpy(dst, src, size) rte_strlcpy(dst, src, size)
 
 #endif /* RTE_USE_LIBBSD */
+#endif /* WIN64 */
 #endif /* BSDAPP */
 
 /**
diff --git a/lib/librte_eal/common/malloc_elem.h b/lib/librte_eal/common/malloc_elem.h
index e2bda4c02..697b1b074 100644
--- a/lib/librte_eal/common/malloc_elem.h
+++ b/lib/librte_eal/common/malloc_elem.h
@@ -20,6 +20,9 @@ enum elem_state {
 	ELEM_PAD  /* element is a padding-only header */
 };
 
+#ifdef _WIN64
+RTE_CACHE_ALIGN
+#endif
 struct malloc_elem {
 	struct malloc_heap *heap;
 	struct malloc_elem *volatile prev;
diff --git a/lib/librte_eal/common/malloc_heap.c b/lib/librte_eal/common/malloc_heap.c
index c6a6d4f6b..5314ebd20 100644
--- a/lib/librte_eal/common/malloc_heap.c
+++ b/lib/librte_eal/common/malloc_heap.c
@@ -60,11 +60,13 @@ check_hugepage_sz(unsigned flags, uint64_t hugepage_sz)
 	case RTE_PGSIZE_1G:
 		check_flag = RTE_MEMZONE_1GB;
 		break;
+#ifndef _WIN64
 	case RTE_PGSIZE_4G:
 		check_flag = RTE_MEMZONE_4GB;
 		break;
 	case RTE_PGSIZE_16G:
 		check_flag = RTE_MEMZONE_16GB;
+#endif
 	}
 
 	return check_flag & flags;
diff --git a/lib/librte_eal/common/malloc_heap.h b/lib/librte_eal/common/malloc_heap.h
index e48996d52..df60ab62c 100644
--- a/lib/librte_eal/common/malloc_heap.h
+++ b/lib/librte_eal/common/malloc_heap.h
@@ -10,6 +10,10 @@
 #include <rte_malloc.h>
 #include <rte_malloc_heap.h>
 
+#ifdef _WIN64
+#include <rte_lcore.h>
+#endif
+
 #ifdef __cplusplus
 extern "C" {
 #endif
diff --git a/lib/librte_eal/windows/eal/eal.c b/lib/librte_eal/windows/eal/eal.c
new file mode 100644
index 000000000..b060bbb92
--- /dev/null
+++ b/lib/librte_eal/windows/eal/eal.c
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <direct.h>
+
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_log.h>
+#include <rte_random.h>
+#include <rte_cycles.h>
+#include <rte_errno.h>
+#include <rte_windows.h>
+
+#include "eal_private.h"
+#include "eal_thread.h"
+#include "eal_internal_cfg.h"
+#include "eal_filesystem.h"
+#include "eal_hugepages.h"
+#include "eal_options.h"
+#include "malloc_heap.h"
+#include "private.h"
+
+#define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL)
+
+/* Allow the application to print its usage message too if set */
+static rte_usage_hook_t	rte_application_usage_hook = NULL;
+
+/* define fd variable here, because file needs to be kept open for the
+ * duration of the program, as we hold a write lock on it in the primary proc */
+
+static int mem_cfg_fd = -1; // INVALID_HANDLE_VALUE;
+/*
+static struct flock wr_lock = {
+		.l_type = F_WRLCK,
+		.l_whence = SEEK_SET,
+		.l_start = offsetof(struct rte_mem_config, memseg),
+		.l_len = sizeof(early_mem_config.memseg),
+};
+*/
+/* early configuration structure, when memory config is not mmapped */
+static struct rte_mem_config early_mem_config;
+
+/* Address of global and public configuration */
+static struct rte_config rte_config = {
+		.mem_config = &early_mem_config,
+};
+
+/* internal configuration (per-core) */
+struct lcore_config lcore_config[RTE_MAX_LCORE];
+
+/* internal configuration */
+struct internal_config internal_config;
+
+/* external function protoypes */
+/* these functions are created by the RTE_REGISTER_BUS macro */
+extern void businitfn_pci(void);
+extern void businitfn_vdev(void);
+
+/* these functions are created by the MEMPOOL_REGISTER_OPS macro */
+extern void mp_hdlr_init_ops_mp_mc(void);
+extern void mp_hdlr_init_ops_sp_sc(void);
+extern void mp_hdlr_init_ops_mp_sc(void);
+extern void mp_hdlr_init_ops_sp_mc(void);
+
+/* these functions are created by the EAL_REGISTER_TAILQ macro */
+extern void init_rte_mempool_tailq(void);
+extern void init_rte_ring_tailq(void);
+extern void init_rte_hash_tailq(void);
+extern void init_rte_fbk_hash_tailq(void);
+extern void init_rte_distributor_tailq(void);
+extern void init_rte_dist_burst_tailq(void);
+extern void init_rte_uio_tailq(void);
+extern void init_rte_lpm_tailq(void);
+extern void init_rte_lpm6_tailq(void);
+
+/* these functions are created by the RTE_PMD_REGISTER_PCI macro */
+extern void pciinitfn_net_i40e(void);
+
+/* these are more constructor-like function, that we'll need to call at the start */
+extern void rte_timer_init(void);
+extern void rte_log_init(void);
+extern void i40e_init_log(void);
+
+/* Return a pointer to the configuration structure */
+struct rte_config *
+rte_eal_get_configuration(void)
+{
+	return &rte_config;
+}
+
+/* Return mbuf pool ops name */
+const char *
+rte_eal_mbuf_user_pool_ops(void)
+{
+	return internal_config.user_mbuf_pool_ops_name;
+}
+
+enum rte_iova_mode
+rte_eal_iova_mode(void)
+{
+	return rte_eal_get_configuration()->iova_mode;
+}
+
+/* platform-specific runtime dir */
+static char runtime_dir[PATH_MAX];
+
+/* create memory configuration in shared/mmap memory. Take out
+ * a write lock on the memsegs, so we can auto-detect primary/secondary.
+ * This means we never close the file while running (auto-close on exit).
+ * We also don't lock the whole file, so that in future we can use read-locks
+ * on other parts, e.g. memzones, to detect if there are running secondary
+ * processes. */
+static void
+rte_eal_config_create(void)
+{
+	void *rte_mem_cfg_addr;
+	BOOL retval;
+
+	const char *pathname = eal_runtime_config_path();
+
+	if (internal_config.no_shconf)
+		return;
+
+	if (mem_cfg_fd < 0) {
+	    mem_cfg_fd = _open(pathname, _O_CREAT | _O_RDWR | _O_TRUNC, _S_IREAD | _S_IWRITE);
+	    if (mem_cfg_fd < 0)
+		rte_panic("Cannot open '%s' for rte_mem_config...Error: %d\n", pathname, errno);
+	}
+
+	/* Lock file for exclusive access */
+	OVERLAPPED sOverlapped = {0};
+	sOverlapped.Offset = sizeof(*rte_config.mem_config);
+	sOverlapped.OffsetHigh = 0;
+
+	HANDLE hWinFileHandle = (HANDLE)_get_osfhandle(mem_cfg_fd);
+	retval = LockFileEx(hWinFileHandle,
+			    LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0,
+			    sizeof(*rte_config.mem_config), 0, &sOverlapped);
+	if (!retval) {
+	    _close(mem_cfg_fd);
+	    rte_exit(EXIT_FAILURE, "Cannot create lock on '%s'. Is another primary process running?\n", pathname);
+	}
+
+	rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config),
+				PROT_READ | PROT_WRITE, MAP_SHARED, (int)mem_cfg_fd, 0);
+
+	if (rte_mem_cfg_addr == MAP_FAILED) {
+	    _close(mem_cfg_fd);
+	    rte_exit(EXIT_FAILURE, "Cannot mmap memory for rte_config\n");
+	}
+
+	memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
+	rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr;
+}
+
+/* attach to an existing shared memory config */
+static void
+rte_eal_config_attach(void)
+{
+	void *rte_mem_cfg_addr;
+	const char *pathname = eal_runtime_config_path();
+
+	if (internal_config.no_shconf)
+		return;
+
+	if (mem_cfg_fd < 0) {
+	    mem_cfg_fd = _open(pathname, O_RDWR);
+	    if (mem_cfg_fd < 0)
+		rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
+	}
+
+	rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config), PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
+	_close(mem_cfg_fd);
+
+	if (rte_mem_cfg_addr == MAP_FAILED)
+		rte_panic("Cannot mmap memory for rte_config\n");
+
+	rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr;
+}
+
+/* Detect if we are a primary or a secondary process */
+enum rte_proc_type_t
+eal_proc_type_detect(void)
+{
+	enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
+	const char *pathname = eal_runtime_config_path();
+
+	/* if we can open the file but not get a write-lock we are a secondary
+	 * process. NOTE: if we get a file handle back, we keep that open
+	 * and don't close it to prevent a race condition between multiple opens */
+	if ((mem_cfg_fd = _open(pathname, O_RDWR)) >= 0) {
+	    OVERLAPPED sOverlapped = { 0 };
+	    sOverlapped.Offset = sizeof(*rte_config.mem_config);
+	    sOverlapped.OffsetHigh = 0;
+
+	    HANDLE hWinFileHandle = (HANDLE)_get_osfhandle(mem_cfg_fd);
+
+	    if (!LockFileEx(hWinFileHandle,
+			    LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0,
+			    sizeof(*rte_config.mem_config), 0, &sOverlapped))
+		ptype = RTE_PROC_SECONDARY;
+	}
+
+	RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n",
+			    ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY");
+
+	return ptype;
+}
+
+/* Sets up rte_config structure with the pointer to shared memory config.*/
+static void
+rte_config_init(void)
+{
+	rte_config.process_type = internal_config.process_type;
+
+	switch (rte_config.process_type){
+	case RTE_PROC_PRIMARY:
+		rte_eal_config_create();
+		break;
+	case RTE_PROC_SECONDARY:
+		rte_eal_config_attach();
+		rte_eal_mcfg_wait_complete(rte_config.mem_config);
+		break;
+	case RTE_PROC_AUTO:
+	case RTE_PROC_INVALID:
+		rte_panic("Invalid process type\n");
+	}
+}
+
+/* display usage */
+static void
+eal_usage(const char *prgname)
+{
+	printf("\nUsage: %s ", prgname);
+	eal_common_usage();
+	/* Allow the application to print its usage message too if hook is set */
+	if ( rte_application_usage_hook ) {
+		printf("===== Application Usage =====\n\n");
+		rte_application_usage_hook(prgname);
+	}
+}
+
+/* Set a per-application usage message */
+rte_usage_hook_t
+rte_set_application_usage_hook( rte_usage_hook_t usage_func )
+{
+	rte_usage_hook_t	old_func;
+
+	/* Will be NULL on the first call to denote the last usage routine. */
+	old_func			= rte_application_usage_hook;
+	rte_application_usage_hook	= usage_func;
+
+	return old_func;
+}
+
+static inline size_t
+eal_get_hugepage_mem_size(void)
+{
+	uint64_t size = 0;
+	size = (uint64_t)GetLargePageMinimum();
+
+	return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
+}
+
+/* Parse the arguments for --log-level only */
+static void
+eal_log_level_parse(int argc, char **argv)
+{
+	int opt;
+	char **argvopt;
+	int option_index;
+
+	argvopt = argv;
+
+	eal_reset_internal_config(&internal_config);
+
+	while ((opt = getopt_long(argc, argvopt, eal_short_options,
+				  eal_long_options, &option_index)) != EOF) {
+
+		int ret;
+
+		/* getopt is not happy, stop right now */
+		if (opt == '?')
+			break;
+
+		ret = (opt == OPT_LOG_LEVEL_NUM) ?
+			eal_parse_common_option(opt, optarg, &internal_config) : 0;
+
+		/* common parser is not happy */
+		if (ret < 0)
+			break;
+	}
+
+	optind = 0; /* reset getopt lib */
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+eal_parse_args(int argc, char **argv)
+{
+	int opt, ret;
+	char **argvopt;
+	int option_index;
+	char *prgname = argv[0];
+
+	argvopt = argv;
+
+	while ((opt = getopt_long(argc, argvopt, eal_short_options,
+				  eal_long_options, &option_index)) != EOF) {
+
+		int ret;
+
+		/* getopt is not happy, stop right now */
+		if (opt == '?') {
+			eal_usage(prgname);
+			return -1;
+		}
+
+		ret = eal_parse_common_option(opt, optarg, &internal_config);
+		/* common parser is not happy */
+		if (ret < 0) {
+			eal_usage(prgname);
+			return -1;
+		}
+		/* common parser handled this option */
+		if (ret == 0)
+			continue;
+
+		switch (opt) {
+		case 'h':
+			eal_usage(prgname);
+			exit(EXIT_SUCCESS);
+		default:
+			if (opt < OPT_LONG_MIN_NUM && isprint(opt)) {
+				RTE_LOG(ERR, EAL, "Option %c is not supported "
+					"on FreeBSD\n", opt);
+			} else if (opt >= OPT_LONG_MIN_NUM &&
+				   opt < OPT_LONG_MAX_NUM) {
+				RTE_LOG(ERR, EAL, "Option %s is not supported "
+					"on FreeBSD\n",
+					eal_long_options[option_index].name);
+			} else {
+				RTE_LOG(ERR, EAL, "Option %d is not supported "
+					"on FreeBSD\n", opt);
+			}
+			eal_usage(prgname);
+			return -1;
+		}
+	}
+
+	/* create runtime data directory */
+	if (internal_config.no_shconf == 0 &&
+		eal_create_runtime_dir() < 0) {
+		RTE_LOG(ERR, EAL, "Cannot create runtime directory\n");
+		return ret = -1;
+	}
+
+	if (eal_adjust_config(&internal_config) != 0)
+		return -1;
+
+	/* sanity checks */
+	if (eal_check_common_options(&internal_config) != 0) {
+		eal_usage(prgname);
+		return -1;
+	}
+
+	if (optind >= 0)
+		argv[optind-1] = prgname;
+	ret = optind-1;
+	optind = 0; /* reset getopt lib */
+	return ret;
+}
+
+static int
+check_socket(const struct rte_memseg_list *msl, void *arg)
+{
+	int *socket_id = arg;
+
+	return *socket_id == msl->socket_id;
+}
+
+static void
+eal_check_mem_on_local_socket(void)
+{
+	const struct rte_memseg *ms;
+	int i, socket_id;
+
+	socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
+
+	if (rte_memseg_list_walk(check_socket, &socket_id) == 0)
+		RTE_LOG(WARNING, EAL, "WARNING: Master core has no memory on local socket!\n");
+}
+
+static int
+sync_func(__attribute__((unused)) void *arg)
+{
+	return 0;
+}
+
+inline static void
+rte_eal_mcfg_complete(void)
+{
+	/* ALL shared mem_config related INIT DONE */
+	if (rte_config.process_type == RTE_PROC_PRIMARY)
+		rte_config.mem_config->magic = RTE_MAGIC;
+}
+
+/* return non-zero if hugepages are enabled. */
+int rte_eal_has_hugepages(void)
+{
+	return !internal_config.no_hugetlbfs;
+}
+
+/* Abstraction for port I/0 privilege */
+int
+rte_eal_iopl_init(void)
+{
+	// Not required on modern processors?
+	return -1;
+}
+
+static void rte_eal_init_alert(const char *msg)
+{
+	fprintf(stderr, "EAL: FATAL: %s\n", msg);
+	RTE_LOG(ERR, EAL, "%s\n", msg);
+}
+
+/* Register and initialize all buses */
+/* (This is a workaround for Windows in lieu of a constructor-like function) */
+static void
+eal_register_and_init_buses()
+{
+	businitfn_pci();
+	/* businitfn_vdev(); Not presently supported! */
+}
+
+/* Register and initialize all mempools */
+/* (This is a workaround for Windows in lieu of a constructor-like function) */
+static void
+eal_register_and_init_mempools()
+{
+	/* these functions are created by the MEMPOOL_REGISTER_OPS macro */
+	mp_hdlr_init_ops_mp_mc();
+	mp_hdlr_init_ops_sp_sc();
+	mp_hdlr_init_ops_mp_sc();
+	mp_hdlr_init_ops_sp_mc();
+}
+
+/* Register and initialize tailqs */
+/* (This is a workaround for Windows in lieu of a constructor-like function) */
+static void
+eal_register_and_init_tailq()
+{
+	/* these functions are created by the EAL_REGISTER_TAILQ macro */
+	init_rte_mempool_tailq();
+	init_rte_ring_tailq();
+	init_rte_hash_tailq();
+	init_rte_fbk_hash_tailq();
+	init_rte_distributor_tailq();
+	init_rte_dist_burst_tailq();
+	init_rte_uio_tailq();
+	init_rte_lpm_tailq();
+	init_rte_lpm6_tailq();
+}
+
+/* Register and initialize all supported PMDs */
+/* (This is a workaround for Windows in lieu of a constructor-like function) */
+static void
+eal_register_and_init_pmd()
+{
+	/* these functions are created by the RTE_PMD_REGISTER_PCI macro */
+	pciinitfn_net_i40e();  /* init the Intel 40GbE PMD */
+}
+
+/* Launch threads, called at application init(). */
+int
+rte_eal_init(int argc, char **argv)
+{
+	int i, fctret, ret;
+	pthread_t thread_id;
+	static rte_atomic32_t run_once = RTE_ATOMIC32_INIT(0);
+	char cpuset[RTE_CPU_AFFINITY_STR_LEN];
+
+	rte_timer_init(); /* Initialize timer function */
+
+	if (!rte_atomic32_test_and_set(&run_once))
+		return -1;
+
+	thread_id = pthread_self();
+
+	/* initialize all logs */
+	rte_eal_log_init(NULL, 0);
+	rte_log_init();
+
+	eal_log_level_parse(argc, argv);
+
+	/* set log level as early as possible */
+	rte_log_set_global_level(RTE_LOG_LEVEL);
+
+	/* create a map of all processors in the system */
+	eal_create_cpu_map();
+
+	if (rte_eal_cpu_init() < 0)
+		rte_panic("Cannot detect lcores\n");
+
+	fctret = eal_parse_args(argc, argv);
+	if (fctret < 0)
+		exit(1);
+
+	rte_config_init();
+
+	if (internal_config.no_hugetlbfs == 0 &&
+			internal_config.process_type != RTE_PROC_SECONDARY &&
+			eal_hugepage_info_init() < 0)
+		rte_panic("Cannot get hugepage information\n");
+
+	if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
+		if (internal_config.no_hugetlbfs)
+			internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
+		else
+			internal_config.memory = eal_get_hugepage_mem_size();
+	}
+
+	if (internal_config.vmware_tsc_map == 1) {
+#ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
+		rte_cycles_vmware_tsc_map = 1;
+		RTE_LOG (DEBUG, EAL, "Using VMWARE TSC MAP, "
+				"you must have monitor_control.pseudo_perfctr = TRUE\n");
+#else
+		RTE_LOG (WARNING, EAL, "Ignoring --vmware-tsc-map because "
+				"RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT is not set\n");
+#endif
+	}
+
+	rte_srand(rte_rdtsc());
+
+	/* in secondary processes, memory init may allocate additional fbarrays
+	* not present in primary processes, so to avoid any potential issues,
+	* initialize memzones first.
+	*/
+	if (rte_eal_memzone_init() < 0)
+		rte_panic("Cannot init memzone\n");
+
+	if (rte_eal_memory_init() < 0)
+		rte_panic("Cannot init memory\n");
+
+	if (rte_eal_malloc_heap_init() < 0)
+		rte_panic("Cannot init malloc heap\n");
+
+	if (rte_eal_tailqs_init() < 0)
+		rte_panic("Cannot init tail queues for objects\n");
+
+	if (rte_eal_alarm_init() < 0)
+		rte_panic("Cannot init interrupt-handling thread\n");
+
+	if (rte_eal_intr_init() < 0)
+		rte_panic("Cannot init interrupt-handling thread\n");
+
+	if (rte_eal_timer_init() < 0)
+		rte_panic("Cannot init HPET or TSC timers\n");
+
+	eal_check_mem_on_local_socket();
+
+	eal_thread_init_master(rte_config.master_lcore);
+
+	ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
+
+	RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%p;cpuset=[%s%s])\n",
+		rte_config.master_lcore, thread_id, cpuset,
+		ret == 0 ? "" : "...");
+
+	RTE_LCORE_FOREACH_SLAVE(i) {
+
+		/*
+		 * create communication pipes between master thread
+		 * and children
+		 */
+		if (pipe(lcore_config[i].pipe_master2slave) < 0)
+			rte_panic("Cannot create pipe\n");
+		if (pipe(lcore_config[i].pipe_slave2master) < 0)
+			rte_panic("Cannot create pipe\n");
+
+		lcore_config[i].state = WAIT;
+
+		/* create a thread for each lcore */
+		ret = pthread_create(&lcore_config[i].thread_id, NULL,
+				     eal_thread_loop, NULL);
+		if (ret != 0)
+			rte_panic("Cannot create thread\n");
+	}
+
+	/*
+	 * Launch a dummy function on all slave lcores, so that master lcore
+	 * knows they are all ready when this function returns.
+	 */
+	rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
+	rte_eal_mp_wait_lcore();
+
+#ifdef EAL_SERVICES_SUPPORT
+	/* Not supported on Windows, presently */
+	/* initialize services so vdevs register service during bus_probe. */
+	ret = rte_service_init();
+	if (ret) {
+		rte_eal_init_alert("rte_service_init() failed\n");
+		rte_errno = ENOEXEC;
+		return -1;
+	}
+#endif
+
+	/* Probe & Initialize PCI devices */
+	if (rte_bus_probe())
+		rte_panic("Cannot probe PCI\n");
+
+#ifdef EAL_SERVICES_SUPPORT
+	/* initialize default service/lcore mappings and start running. Ignore
+	* -ENOTSUP, as it indicates no service coremask passed to EAL.
+	*/
+	ret = rte_service_start_with_defaults();
+	if (ret < 0 && ret != -ENOTSUP) {
+		rte_errno = ENOEXEC;
+		return -1;
+	}
+#endif
+	rte_eal_mcfg_complete();
+
+	return fctret;
+}
+
+/* get core role */
+enum rte_lcore_role_t
+rte_eal_lcore_role(unsigned lcore_id)
+{
+	return rte_config.lcore_role[lcore_id];
+}
+
+enum rte_proc_type_t
+rte_eal_process_type(void)
+{
+	return rte_config.process_type;
+}
+
+int
+eal_create_runtime_dir(void)
+{
+	char  Directory[PATH_MAX];
+
+	GetTempPathA(sizeof(Directory), Directory);
+
+	char tmp[PATH_MAX];
+	int ret;
+
+
+	/* create DPDK subdirectory under runtime dir */
+	ret = snprintf(tmp, sizeof(tmp), "%s\\dpdk", Directory);
+	if (ret < 0 || ret == sizeof(tmp)) {
+		RTE_LOG(ERR, EAL, "Error creating DPDK runtime path name\n");
+		return -1;
+	}
+
+	/* create prefix-specific subdirectory under DPDK runtime dir */
+	ret = snprintf(runtime_dir, sizeof(runtime_dir), "%s\\%s",
+		tmp, internal_config.hugefile_prefix);
+	if (ret < 0 || ret == sizeof(runtime_dir)) {
+		RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n");
+		return -1;
+	}
+
+	/* create the path if it doesn't exist. no "mkdir -p" here, so do it
+	* step by step.
+	*/
+	ret = mkdir(tmp);
+	if (ret < 0 && errno != EEXIST) {
+		RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
+			tmp, strerror(errno));
+		return -1;
+	}
+
+	ret = mkdir(runtime_dir);
+	if (ret < 0 && errno != EEXIST) {
+		RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
+			runtime_dir, strerror(errno));
+		return -1;
+	}
+
+	return 0;
+}
+
+const char *
+eal_get_runtime_dir(void)
+{
+	return runtime_dir;
+}
diff --git a/lib/librte_eal/windows/eal/eal_alarm.c b/lib/librte_eal/windows/eal/eal_alarm.c
new file mode 100644
index 000000000..d881cc5dd
--- /dev/null
+++ b/lib/librte_eal/windows/eal/eal_alarm.c
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#include <rte_alarm.h>
+#include <rte_common.h>
+#include "eal_private.h"
+
+int
+rte_eal_alarm_init(void)
+{
+	return 0;
+}
+
+
+int
+rte_eal_alarm_set(uint64_t us __rte_unused,
+		rte_eal_alarm_callback cb_fn __rte_unused,
+		void *cb_arg __rte_unused)
+{
+	return -ENOTSUP;
+}
+
+int
+rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn __rte_unused,
+		void *cb_arg __rte_unused)
+{
+	return -ENOTSUP;
+}
diff --git a/lib/librte_eal/windows/eal/eal_debug.c b/lib/librte_eal/windows/eal/eal_debug.c
new file mode 100644
index 000000000..c1cb810c5
--- /dev/null
+++ b/lib/librte_eal/windows/eal/eal_debug.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#include <rte_windows.h>
+#include <DbgHelp.h>
+
+#include <rte_log.h>
+#include <rte_debug.h>
+
+#define MAX_TRACE_STACK_FRAMES	1024
+
+/* dump the stack of the calling core */
+void rte_dump_stack(void)
+{
+	void *pCallingStack[MAX_TRACE_STACK_FRAMES];
+	WORD  numFrames;
+	DWORD dwError;
+	HANDLE hProcess = GetCurrentProcess();
+
+	SymInitialize(hProcess, NULL, TRUE);
+	numFrames = RtlCaptureStackBackTrace(0, MAX_TRACE_STACK_FRAMES, pCallingStack, NULL);
+
+	for (int i = 0; i < numFrames; i++) {
+	    DWORD64 dwAddress = (DWORD64)(pCallingStack[i]);
+	    DWORD   dwDisplacement;
+
+	    char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(TCHAR)];
+	    PSYMBOL_INFO pSymbol = (PSYMBOL_INFO)buffer;
+
+	    pSymbol->SizeOfStruct = sizeof(SYMBOL_INFO);
+	    pSymbol->MaxNameLen = MAX_SYM_NAME;
+
+	    // Get the symbol information from the address
+	    if (SymFromAddr(hProcess, dwAddress, NULL, pSymbol)) {
+		// Get the line number from the same address
+		IMAGEHLP_LINE64 line;
+
+		SymSetOptions(SYMOPT_LOAD_LINES);
+		line.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
+
+		if (SymGetLineFromAddr64(hProcess, dwAddress, &dwDisplacement, &line))
+		    printf("Currently at %s in %s: line: %lu: address: 0x%0X\n", pSymbol->Name, line.FileName, line.LineNumber, pSymbol->Address);
+		else
+		    goto error;
+	    }
+	    else
+		goto error;
+
+	    continue;
+error:
+	    dwError = GetLastError();
+	    printf("SymFromAddr()/SymGetLineFromAddr64() failed: Error: %d\n", dwError);
+	}
+
+	return;
+}
+
+/* not implemented in this environment */
+void rte_dump_registers(void)
+{
+	return;
+}
+
+/* call abort(), it will generate a coredump if enabled */
+void __rte_panic(const char *funcname, const char *format, ...)
+{
+	va_list ap;
+
+	rte_log(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, "PANIC in %s():\n", funcname);
+	va_start(ap, format);
+	rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap);
+	va_end(ap);
+	rte_dump_stack();
+	rte_dump_registers();
+	abort();
+}
+
+/*
+ * Like rte_panic this terminates the application. However, no traceback is
+ * provided and no core-dump is generated.
+ */
+void
+rte_exit(int exit_code, const char *format, ...)
+{
+	va_list ap;
+
+	if (exit_code != 0)
+		RTE_LOG(CRIT, EAL, "Error - exiting with code: %d\n  Cause: ", exit_code);
+
+	va_start(ap, format);
+	rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap);
+	va_end(ap);
+
+#ifndef RTE_EAL_ALWAYS_PANIC_ON_ERROR
+	exit(exit_code);
+#else
+	rte_dump_stack();
+	rte_dump_registers();
+	abort();
+#endif
+}
diff --git a/lib/librte_eal/windows/eal/eal_fbarray.c b/lib/librte_eal/windows/eal/eal_fbarray.c
new file mode 100644
index 000000000..8e3932b3f
--- /dev/null
+++ b/lib/librte_eal/windows/eal/eal_fbarray.c
@@ -0,0 +1,1273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#include <inttypes.h>
+#include <limits.h>
+#include <assert.h>
+#include <sys/mman.h>
+#include <stdint.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_errno.h>
+#include <rte_spinlock.h>
+#include <rte_tailq.h>
+
+#include "eal_filesystem.h"
+#include "eal_private.h"
+
+#include "rte_fbarray.h"
+
+#define MASK_SHIFT 6ULL
+#define MASK_ALIGN (1ULL << MASK_SHIFT)
+#define MASK_LEN_TO_IDX(x) ((x) >> MASK_SHIFT)
+#define MASK_LEN_TO_MOD(x) ((x) - RTE_ALIGN_FLOOR(x, MASK_ALIGN))
+#define MASK_GET_IDX(idx, mod) ((idx << MASK_SHIFT) + mod)
+
+/*
+ * This is a mask that is always stored at the end of array, to provide fast
+ * way of finding free/used spots without looping through each element.
+ */
+
+struct used_mask {
+	unsigned int n_masks;
+	uint64_t data[];
+};
+
+static size_t
+calc_mask_size(unsigned int len)
+{
+	/* mask must be multiple of MASK_ALIGN, even though length of array
+	 * itself may not be aligned on that boundary.
+	 */
+	len = RTE_ALIGN_CEIL(len, MASK_ALIGN);
+	return sizeof(struct used_mask) +
+			sizeof(uint64_t) * MASK_LEN_TO_IDX(len);
+}
+
+static size_t
+calc_data_size(size_t page_sz, unsigned int elt_sz, unsigned int len)
+{
+	size_t data_sz = elt_sz * len;
+	size_t msk_sz = calc_mask_size(len);
+	return RTE_ALIGN_CEIL(data_sz + msk_sz, page_sz);
+}
+
+static struct used_mask *
+get_used_mask(void *data, unsigned int elt_sz, unsigned int len)
+{
+	return (struct used_mask *) RTE_PTR_ADD(data, elt_sz * len);
+}
+
+static void *
+resize_and_map(int fd, size_t len)
+{
+	// Map the file to the process virtual space and return the address
+	void *addr = mmap(NULL, len, PROT_READ | PROT_WRITE,
+			MAP_SHARED, fd, 0);
+	if (addr == MAP_FAILED) {
+		RTE_LOG(ERR, EAL, "mmap() failed: %s\n", strerror(errno));
+		/* pass errno up the chain */
+		rte_errno = errno;
+		return NULL;
+	}
+
+	return addr;
+}
+
+static int
+find_next_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n,
+	    bool used)
+{
+	const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
+			arr->len);
+	unsigned int msk_idx, lookahead_idx, first, first_mod;
+	unsigned int last, last_mod;
+	uint64_t last_msk, ignore_msk;
+
+	/*
+	 * mask only has granularity of MASK_ALIGN, but start may not be aligned
+	 * on that boundary, so construct a special mask to exclude anything we
+	 * don't want to see to avoid confusing ctz.
+	 */
+	first = MASK_LEN_TO_IDX(start);
+	first_mod = MASK_LEN_TO_MOD(start);
+	ignore_msk = ~((1ULL << first_mod) - 1);
+
+	/* array length may not be aligned, so calculate ignore mask for last
+	 * mask index.
+	 */
+	last = MASK_LEN_TO_IDX(arr->len);
+	last_mod = MASK_LEN_TO_MOD(arr->len);
+	last_msk = ~(-1ULL << last_mod);
+
+	for (msk_idx = first; msk_idx < msk->n_masks; msk_idx++) {
+		uint64_t cur_msk, lookahead_msk;
+		unsigned int run_start, clz, left;
+		bool found = false;
+		/*
+		 * The process of getting n consecutive bits for arbitrary n is
+		 * a bit involved, but here it is in a nutshell:
+		 *
+		 *  1. let n be the number of consecutive bits we're looking for
+		 *  2. check if n can fit in one mask, and if so, do n-1
+		 *     rshift-ands to see if there is an appropriate run inside
+		 *     our current mask
+		 *    2a. if we found a run, bail out early
+		 *    2b. if we didn't find a run, proceed
+		 *  3. invert the mask and count leading zeroes (that is, count
+		 *     how many consecutive set bits we had starting from the
+		 *     end of current mask) as k
+		 *    3a. if k is 0, continue to next mask
+		 *    3b. if k is not 0, we have a potential run
+		 *  4. to satisfy our requirements, next mask must have n-k
+		 *     consecutive set bits right at the start, so we will do
+		 *     (n-k-1) rshift-ands and check if first bit is set.
+		 *
+		 * Step 4 will need to be repeated if (n-k) > MASK_ALIGN until
+		 * we either run out of masks, lose the run, or find what we
+		 * were looking for.
+		 */
+		cur_msk = msk->data[msk_idx];
+		left = n;
+
+		/* if we're looking for free spaces, invert the mask */
+		if (!used)
+			cur_msk = ~cur_msk;
+
+		/* combine current ignore mask with last index ignore mask */
+		if (msk_idx == last)
+			ignore_msk |= last_msk;
+
+		/* if we have an ignore mask, ignore once */
+		if (ignore_msk) {
+			cur_msk &= ignore_msk;
+			ignore_msk = 0;
+		}
+
+		/* if n can fit in within a single mask, do a search */
+		if (n <= MASK_ALIGN) {
+			uint64_t tmp_msk = cur_msk;
+			unsigned int s_idx;
+			for (s_idx = 0; s_idx < n - 1; s_idx++)
+				tmp_msk &= tmp_msk >> 1ULL;
+			/* we found what we were looking for */
+			if (tmp_msk != 0) {
+				run_start = __builtin_ctzll(tmp_msk);
+				return MASK_GET_IDX(msk_idx, run_start);
+			}
+		}
+
+		/*
+		 * we didn't find our run within the mask, or n > MASK_ALIGN,
+		 * so we're going for plan B.
+		 */
+
+		/* count leading zeroes on inverted mask */
+		if (~cur_msk == 0)
+			clz = sizeof(cur_msk) * 8;
+		else
+			clz = __builtin_clzll(~cur_msk);
+
+		/* if there aren't any runs at the end either, just continue */
+		if (clz == 0)
+			continue;
+
+		/* we have a partial run at the end, so try looking ahead */
+		run_start = MASK_ALIGN - clz;
+		left -= clz;
+
+		for (lookahead_idx = msk_idx + 1; lookahead_idx < msk->n_masks;
+				lookahead_idx++) {
+			unsigned int s_idx, need;
+			lookahead_msk = msk->data[lookahead_idx];
+
+			/* if we're looking for free space, invert the mask */
+			if (!used)
+				lookahead_msk = ~lookahead_msk;
+
+			/* figure out how many consecutive bits we need here */
+			need = RTE_MIN(left, MASK_ALIGN);
+
+			for (s_idx = 0; s_idx < need - 1; s_idx++)
+				lookahead_msk &= lookahead_msk >> 1ULL;
+
+			/* if first bit is not set, we've lost the run */
+			if ((lookahead_msk & 1) == 0) {
+				/*
+				 * we've scanned this far, so we know there are
+				 * no runs in the space we've lookahead-scanned
+				 * as well, so skip that on next iteration.
+				 */
+				ignore_msk = ~((1ULL << need) - 1);
+				msk_idx = lookahead_idx;
+				break;
+			}
+
+			left -= need;
+
+			/* check if we've found what we were looking for */
+			if (left == 0) {
+				found = true;
+				break;
+			}
+		}
+
+		/* we didn't find anything, so continue */
+		if (!found)
+			continue;
+
+		return MASK_GET_IDX(msk_idx, run_start);
+	}
+	/* we didn't find anything */
+	rte_errno = used ? ENOENT : ENOSPC;
+	return -1;
+}
+
+static int
+find_next(const struct rte_fbarray *arr, unsigned int start, bool used)
+{
+	const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
+			arr->len);
+	unsigned int idx, first, first_mod;
+	unsigned int last, last_mod;
+	uint64_t last_msk, ignore_msk;
+
+	/*
+	 * mask only has granularity of MASK_ALIGN, but start may not be aligned
+	 * on that boundary, so construct a special mask to exclude anything we
+	 * don't want to see to avoid confusing ctz.
+	 */
+	first = MASK_LEN_TO_IDX(start);
+	first_mod = MASK_LEN_TO_MOD(start);
+	ignore_msk = ~((1ULL << first_mod) - 1ULL);
+
+	/* array length may not be aligned, so calculate ignore mask for last
+	 * mask index.
+	 */
+	last = MASK_LEN_TO_IDX(arr->len);
+	last_mod = MASK_LEN_TO_MOD(arr->len);
+	last_msk = ~(-(1ULL) << last_mod);
+
+	for (idx = first; idx < msk->n_masks; idx++) {
+		uint64_t cur = msk->data[idx];
+		int found;
+
+		/* if we're looking for free entries, invert mask */
+		if (!used)
+			cur = ~cur;
+
+		if (idx == last)
+			cur &= last_msk;
+
+		/* ignore everything before start on first iteration */
+		if (idx == first)
+			cur &= ignore_msk;
+
+		/* check if we have any entries */
+		if (cur == 0)
+			continue;
+
+		/*
+		 * find first set bit - that will correspond to whatever it is
+		 * that we're looking for.
+		 */
+		found = __builtin_ctzll(cur);
+		return MASK_GET_IDX(idx, found);
+	}
+	/* we didn't find anything */
+	rte_errno = used ? ENOENT : ENOSPC;
+	return -1;
+}
+
+static int
+find_contig(const struct rte_fbarray *arr, unsigned int start, bool used)
+{
+	const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
+			arr->len);
+	unsigned int idx, first, first_mod;
+	unsigned int last, last_mod;
+	uint64_t last_msk;
+	unsigned int need_len, result = 0;
+
+	/* array length may not be aligned, so calculate ignore mask for last
+	 * mask index.
+	 */
+	last = MASK_LEN_TO_IDX(arr->len);
+	last_mod = MASK_LEN_TO_MOD(arr->len);
+	last_msk = ~(-(1ULL) << last_mod);
+
+	first = MASK_LEN_TO_IDX(start);
+	first_mod = MASK_LEN_TO_MOD(start);
+	for (idx = first; idx < msk->n_masks; idx++, result += need_len) {
+		uint64_t cur = msk->data[idx];
+		unsigned int run_len;
+
+		need_len = MASK_ALIGN;
+
+		/* if we're looking for free entries, invert mask */
+		if (!used)
+			cur = ~cur;
+
+		/* if this is last mask, ignore everything after last bit */
+		if (idx == last)
+			cur &= last_msk;
+
+		/* ignore everything before start on first iteration */
+		if (idx == first) {
+			cur >>= first_mod;
+			/* at the start, we don't need the full mask len */
+			need_len -= first_mod;
+		}
+
+		/* we will be looking for zeroes, so invert the mask */
+		cur = ~cur;
+
+		/* if mask is zero, we have a complete run */
+		if (cur == 0)
+			continue;
+
+		/*
+		 * see if current run ends before mask end.
+		 */
+		run_len = __builtin_ctzll(cur);
+
+		/* add however many zeroes we've had in the last run and quit */
+		if (run_len < need_len) {
+			result += run_len;
+			break;
+		}
+	}
+	return result;
+}
+
+static int
+find_prev_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n,
+		bool used)
+{
+	const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
+			arr->len);
+	unsigned int msk_idx, lookbehind_idx, first, first_mod;
+	uint64_t ignore_msk;
+
+	/*
+	 * mask only has granularity of MASK_ALIGN, but start may not be aligned
+	 * on that boundary, so construct a special mask to exclude anything we
+	 * don't want to see to avoid confusing ctz.
+	 */
+	first = MASK_LEN_TO_IDX(start);
+	first_mod = MASK_LEN_TO_MOD(start);
+	/* we're going backwards, so mask must start from the top */
+	ignore_msk = first_mod == MASK_ALIGN - 1 ?
+				-1ULL : /* prevent overflow */
+				~(-1ULL << (first_mod + 1));
+
+	/* go backwards, include zero */
+	msk_idx = first;
+	do {
+		uint64_t cur_msk, lookbehind_msk;
+		unsigned int run_start, run_end, ctz, left;
+		bool found = false;
+		/*
+		 * The process of getting n consecutive bits from the top for
+		 * arbitrary n is a bit involved, but here it is in a nutshell:
+		 *
+		 *  1. let n be the number of consecutive bits we're looking for
+		 *  2. check if n can fit in one mask, and if so, do n-1
+		 *     lshift-ands to see if there is an appropriate run inside
+		 *     our current mask
+		 *    2a. if we found a run, bail out early
+		 *    2b. if we didn't find a run, proceed
+		 *  3. invert the mask and count trailing zeroes (that is, count
+		 *     how many consecutive set bits we had starting from the
+		 *     start of current mask) as k
+		 *    3a. if k is 0, continue to next mask
+		 *    3b. if k is not 0, we have a potential run
+		 *  4. to satisfy our requirements, next mask must have n-k
+		 *     consecutive set bits at the end, so we will do (n-k-1)
+		 *     lshift-ands and check if last bit is set.
+		 *
+		 * Step 4 will need to be repeated if (n-k) > MASK_ALIGN until
+		 * we either run out of masks, lose the run, or find what we
+		 * were looking for.
+		 */
+		cur_msk = msk->data[msk_idx];
+		left = n;
+
+		/* if we're looking for free spaces, invert the mask */
+		if (!used)
+			cur_msk = ~cur_msk;
+
+		/* if we have an ignore mask, ignore once */
+		if (ignore_msk) {
+			cur_msk &= ignore_msk;
+			ignore_msk = 0;
+		}
+
+		/* if n can fit in within a single mask, do a search */
+		if (n <= MASK_ALIGN) {
+			uint64_t tmp_msk = cur_msk;
+			unsigned int s_idx;
+			for (s_idx = 0; s_idx < n - 1; s_idx++)
+				tmp_msk &= tmp_msk << 1ULL;
+			/* we found what we were looking for */
+			if (tmp_msk != 0) {
+				/* clz will give us offset from end of mask, and
+				 * we only get the end of our run, not start,
+				 * so adjust result to point to where start
+				 * would have been.
+				 */
+				run_start = MASK_ALIGN -
+						__builtin_clzll(tmp_msk) - n;
+				return MASK_GET_IDX(msk_idx, run_start);
+			}
+		}
+
+		/*
+		 * we didn't find our run within the mask, or n > MASK_ALIGN,
+		 * so we're going for plan B.
+		 */
+
+		/* count trailing zeroes on inverted mask */
+		if (~cur_msk == 0)
+			ctz = sizeof(cur_msk) * 8;
+		else
+			ctz = __builtin_ctzll(~cur_msk);
+
+		/* if there aren't any runs at the start either, just
+		 * continue
+		 */
+		if (ctz == 0)
+			continue;
+
+		/* we have a partial run at the start, so try looking behind */
+		run_end = MASK_GET_IDX(msk_idx, ctz);
+		left -= ctz;
+
+		/* go backwards, include zero */
+		lookbehind_idx = msk_idx - 1;
+
+		/* we can't lookbehind as we've run out of masks, so stop */
+		if (msk_idx == 0)
+			break;
+
+		do {
+			const uint64_t last_bit = 1ULL << (MASK_ALIGN - 1);
+			unsigned int s_idx, need;
+
+			lookbehind_msk = msk->data[lookbehind_idx];
+
+			/* if we're looking for free space, invert the mask */
+			if (!used)
+				lookbehind_msk = ~lookbehind_msk;
+
+			/* figure out how many consecutive bits we need here */
+			need = RTE_MIN(left, MASK_ALIGN);
+
+			for (s_idx = 0; s_idx < need - 1; s_idx++)
+				lookbehind_msk &= lookbehind_msk << 1ULL;
+
+			/* if last bit is not set, we've lost the run */
+			if ((lookbehind_msk & last_bit) == 0) {
+				/*
+				 * we've scanned this far, so we know there are
+				 * no runs in the space we've lookbehind-scanned
+				 * as well, so skip that on next iteration.
+				 */
+				ignore_msk = -1ULL << need;
+				msk_idx = lookbehind_idx;
+				break;
+			}
+
+			left -= need;
+
+			/* check if we've found what we were looking for */
+			if (left == 0) {
+				found = true;
+				break;
+			}
+		} while ((lookbehind_idx--) != 0); /* decrement after check to
+						    * include zero
+						    */
+
+		/* we didn't find anything, so continue */
+		if (!found)
+			continue;
+
+		/* we've found what we were looking for, but we only know where
+		 * the run ended, so calculate start position.
+		 */
+		return run_end - n;
+	} while (msk_idx-- != 0); /* decrement after check to include zero */
+	/* we didn't find anything */
+	rte_errno = used ? ENOENT : ENOSPC;
+	return -1;
+}
+
+static int
+find_prev(const struct rte_fbarray *arr, unsigned int start, bool used)
+{
+	const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
+			arr->len);
+	unsigned int idx, first, first_mod;
+	uint64_t ignore_msk;
+
+	/*
+	 * mask only has granularity of MASK_ALIGN, but start may not be aligned
+	 * on that boundary, so construct a special mask to exclude anything we
+	 * don't want to see to avoid confusing clz.
+	 */
+	first = MASK_LEN_TO_IDX(start);
+	first_mod = MASK_LEN_TO_MOD(start);
+	/* we're going backwards, so mask must start from the top */
+	ignore_msk = first_mod == MASK_ALIGN - 1 ?
+				-1ULL : /* prevent overflow */
+				~(-1ULL << (first_mod + 1));
+
+	/* go backwards, include zero */
+	idx = first;
+	do {
+		uint64_t cur = msk->data[idx];
+		int found;
+
+		/* if we're looking for free entries, invert mask */
+		if (!used)
+			cur = ~cur;
+
+		/* ignore everything before start on first iteration */
+		if (idx == first)
+			cur &= ignore_msk;
+
+		/* check if we have any entries */
+		if (cur == 0)
+			continue;
+
+		/*
+		 * find last set bit - that will correspond to whatever it is
+		 * that we're looking for. we're counting trailing zeroes, thus
+		 * the value we get is counted from end of mask, so calculate
+		 * position from start of mask.
+		 */
+		found = MASK_ALIGN - __builtin_clzll(cur) - 1;
+
+		return MASK_GET_IDX(idx, found);
+	} while (idx-- != 0); /* decrement after check  to include zero*/
+
+	/* we didn't find anything */
+	rte_errno = used ? ENOENT : ENOSPC;
+	return -1;
+}
+
+static int
+find_rev_contig(const struct rte_fbarray *arr, unsigned int start, bool used)
+{
+	const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
+			arr->len);
+	unsigned int idx, first, first_mod;
+	unsigned int need_len, result = 0;
+
+	first = MASK_LEN_TO_IDX(start);
+	first_mod = MASK_LEN_TO_MOD(start);
+
+	/* go backwards, include zero */
+	idx = first;
+	do {
+		uint64_t cur = msk->data[idx];
+		unsigned int run_len;
+
+		need_len = MASK_ALIGN;
+
+		/* if we're looking for free entries, invert mask */
+		if (!used)
+			cur = ~cur;
+
+		/* ignore everything after start on first iteration */
+		if (idx == first) {
+			unsigned int end_len = MASK_ALIGN - first_mod - 1;
+			cur <<= end_len;
+			/* at the start, we don't need the full mask len */
+			need_len -= end_len;
+		}
+
+		/* we will be looking for zeroes, so invert the mask */
+		cur = ~cur;
+
+		/* if mask is zero, we have a complete run */
+		if (cur == 0)
+			goto endloop;
+
+		/*
+		 * see where run ends, starting from the end.
+		 */
+		run_len = __builtin_clzll(cur);
+
+		/* add however many zeroes we've had in the last run and quit */
+		if (run_len < need_len) {
+			result += run_len;
+			break;
+		}
+endloop:
+		result += need_len;
+	} while (idx-- != 0); /* decrement after check to include zero */
+	return result;
+}
+
+static int
+set_used(struct rte_fbarray *arr, unsigned int idx, bool used)
+{
+	struct used_mask *msk;
+	uint64_t msk_bit = 1ULL << MASK_LEN_TO_MOD(idx);
+	unsigned int msk_idx = MASK_LEN_TO_IDX(idx);
+	bool already_used;
+	int ret = -1;
+
+	if (arr == NULL || idx >= arr->len) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+	msk = get_used_mask(arr->data, arr->elt_sz, arr->len);
+	ret = 0;
+
+	/* prevent array from changing under us */
+	rte_rwlock_write_lock(&arr->rwlock);
+
+	already_used = (msk->data[msk_idx] & msk_bit) != 0;
+
+	/* nothing to be done */
+	if (used == already_used)
+		goto out;
+
+	if (used) {
+		msk->data[msk_idx] |= msk_bit;
+		arr->count++;
+	} else {
+		msk->data[msk_idx] &= ~msk_bit;
+		arr->count--;
+	}
+out:
+	rte_rwlock_write_unlock(&arr->rwlock);
+
+	return ret;
+}
+
+static int
+fully_validate(const char *name, unsigned int elt_sz, unsigned int len)
+{
+	if (name == NULL || elt_sz == 0 || len == 0 || len > INT_MAX) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+
+	if (strnlen(name, RTE_FBARRAY_NAME_LEN) == RTE_FBARRAY_NAME_LEN) {
+		rte_errno = ENAMETOOLONG;
+		return -1;
+	}
+	return 0;
+}
+
+int __rte_experimental
+rte_fbarray_init(struct rte_fbarray *arr, const char *name, unsigned int len,
+		unsigned int elt_sz)
+{
+	size_t page_sz, mmap_len;
+	char path[PATH_MAX];
+	struct used_mask *msk;
+	void *data = NULL;
+	int fd = -1;
+	BOOL retval;
+	HANDLE hWinFileHandle;
+
+	if (arr == NULL) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+
+	if (fully_validate(name, elt_sz, len))
+		return -1;
+
+	page_sz = sysconf(_SC_PAGESIZE);
+	if (page_sz == (size_t)-1)
+		goto fail;
+
+	/* calculate our memory limits */
+	mmap_len = calc_data_size(page_sz, elt_sz, len);
+
+	if (internal_config.no_shconf) {
+		/* remap virtual area as writable */
+		void *new_data = mmap(data, mmap_len, PROT_READ | PROT_WRITE,
+				MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+		if (new_data == MAP_FAILED) {
+			RTE_LOG(DEBUG, EAL, "%s(): couldn't remap anonymous memory: %s\n",
+					__func__, strerror(errno));
+			goto fail;
+		}
+	} else {
+		eal_get_fbarray_path(path, sizeof(path), name);
+
+		/*
+		 * Each fbarray is unique to process namespace, i.e. the
+		 * filename depends on process prefix. Try to take out a lock
+		 * and see if we succeed. If we don't, someone else is using it
+		 * already.
+		 */
+		fd = _open(path, O_CREAT | O_RDWR | _O_TRUNC, _S_IREAD | _S_IWRITE);
+		if (fd < 0) {
+			RTE_LOG(DEBUG, EAL, "%s(): couldn't open %s: %s\n",
+					__func__, path, strerror(errno));
+			rte_errno = errno;
+			goto fail;
+		}
+
+		/* Lock file for exclusive access */
+		OVERLAPPED sOverlapped = { 0 };
+		sOverlapped.Offset = 0;
+		sOverlapped.OffsetHigh = 0;
+
+		hWinFileHandle = (HANDLE)_get_osfhandle(fd);
+		retval = LockFileEx(hWinFileHandle,
+			LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0,
+			mmap_len, 0, &sOverlapped);
+		if (!retval) {
+			RTE_LOG(DEBUG, EAL, "%s(): couldn't lock %s: %s\n",
+				__func__, path, strerror(errno));
+			rte_errno = EBUSY;
+			goto fail;
+		}
+
+
+		/* take out a non-exclusive lock, so that other processes could
+		* still attach to it, but no other process could reinitialize
+		* it.
+		*/
+		retval = UnlockFileEx(hWinFileHandle, 0, mmap_len, 0, &sOverlapped);
+		if (!retval) {
+			RTE_LOG(DEBUG, EAL, "%s(): couldn't unlock %s: %s\n",
+				__func__, path, strerror(errno));
+			rte_errno = EBUSY;
+			goto fail;
+		}else {
+			retval = LockFileEx(hWinFileHandle,
+				LOCKFILE_FAIL_IMMEDIATELY, 0,
+				mmap_len, 0, &sOverlapped);
+			if(!retval){
+				rte_errno = errno;
+				goto fail;
+			}
+		}
+		data = resize_and_map(fd, mmap_len);
+		if (data == NULL)
+			goto fail;
+
+		/* we've mmap'ed the file, we can now close the fd */
+		_close(fd);
+	}
+
+	/* initialize the data */
+	memset(data, 0, mmap_len);
+
+	/* populate data structure */
+	strlcpy(arr->name, name, sizeof(arr->name));
+	arr->data = data;
+	arr->len = len;
+	arr->elt_sz = elt_sz;
+	arr->count = 0;
+
+	msk = get_used_mask(data, elt_sz, len);
+	msk->n_masks = MASK_LEN_TO_IDX(RTE_ALIGN_CEIL(len, MASK_ALIGN));
+
+	rte_rwlock_init(&arr->rwlock);
+
+	return 0;
+fail:
+	if (data)
+		munmap(data, mmap_len);
+	if (fd >= 0)
+		_close(fd);
+	return -1;
+}
+
+int __rte_experimental
+rte_fbarray_attach(struct rte_fbarray *arr)
+{
+	size_t page_sz, mmap_len;
+	char path[PATH_MAX];
+	void *data = NULL;
+	int fd = -1;
+	BOOL retval;
+
+	if (arr == NULL) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+
+	/*
+	 * we don't need to synchronize attach as two values we need (element
+	 * size and array length) are constant for the duration of life of
+	 * the array, so the parts we care about will not race.
+	 */
+
+	if (fully_validate(arr->name, arr->elt_sz, arr->len))
+		return -1;
+
+	page_sz = sysconf(_SC_PAGESIZE);
+	if (page_sz == (size_t)-1)
+		goto fail;
+
+	mmap_len = calc_data_size(page_sz, arr->elt_sz, arr->len);
+
+	data = eal_get_virtual_area(arr->data, &mmap_len, page_sz, 0, 0);
+	if (data == NULL)
+		goto fail;
+
+	eal_get_fbarray_path(path, sizeof(path), arr->name);
+
+	fd = open(path, O_RDWR);
+	if (fd < 0) {
+		rte_errno = errno;
+		goto fail;
+	}
+
+	/* lock the file, to let others know we're using it */
+	/* Lock file for exclusive access */
+	OVERLAPPED sOverlapped = { 0 };
+	sOverlapped.Offset = 0;
+	sOverlapped.OffsetHigh = 0;
+
+	HANDLE hWinFileHandle = (HANDLE)_get_osfhandle(fd);
+	retval = LockFileEx(hWinFileHandle, LOCKFILE_FAIL_IMMEDIATELY, 0, mmap_len, 0, &sOverlapped);
+	if (!retval) {
+		rte_errno = errno;
+		goto fail;
+	}
+	data = resize_and_map(fd, mmap_len);
+	if (data == NULL)
+		goto fail;
+
+	close(fd);
+
+	/* we're done */
+
+	return 0;
+fail:
+	if (data)
+		munmap(data, mmap_len);
+	if (fd >= 0)
+		close(fd);
+	return -1;
+}
+
+int __rte_experimental
+rte_fbarray_detach(struct rte_fbarray *arr)
+{
+	if (arr == NULL) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+
+	/*
+	 * we don't need to synchronize detach as two values we need (element
+	 * size and total capacity) are constant for the duration of life of
+	 * the array, so the parts we care about will not race. if the user is
+	 * detaching while doing something else in the same process, we can't
+	 * really do anything about it, things will blow up either way.
+	 */
+
+	size_t page_sz = sysconf(_SC_PAGESIZE);
+
+	if (page_sz == (size_t)-1)
+		return -1;
+
+	/* this may already be unmapped (e.g. repeated call from previously
+	 * failed destroy(), but this is on user, we can't (easily) know if this
+	 * is still mapped.
+	 */
+	munmap(arr->data, calc_data_size(page_sz, arr->elt_sz, arr->len));
+
+	return 0;
+}
+
+int __rte_experimental
+rte_fbarray_destroy(struct rte_fbarray *arr)
+{
+	int fd, ret;
+	char path[PATH_MAX];
+	BOOL retval;
+
+	ret = rte_fbarray_detach(arr);
+	if (ret)
+		return ret;
+
+	/* try deleting the file */
+	eal_get_fbarray_path(path, sizeof(path), arr->name);
+
+	fd = open(path, O_RDONLY);
+	if (fd < 0) {
+		RTE_LOG(ERR, EAL, "Could not open fbarray file: %s\n",
+			strerror(errno));
+		return -1;
+	}
+
+	OVERLAPPED sOverLapped = { 0 };
+	sOverLapped.Offset = 0;
+	sOverLapped.OffsetHigh = 0;
+
+	HANDLE hWinFileHandle = (HANDLE)_get_osfhandle(fd);
+
+	retval = LockFileEx(hWinFileHandle, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, sizeof(*arr), 0, &sOverLapped);
+
+	if (!retval) {
+		RTE_LOG(DEBUG, EAL, "Cannot destroy fbarray - another process is using it\n");
+		rte_errno = EBUSY;
+		ret = -1;
+	} else {
+		ret = 0;
+		unlink(path);
+		memset(arr, 0, sizeof(*arr));
+	}
+	close(fd);
+
+	return ret;
+}
+
+void * __rte_experimental
+rte_fbarray_get(const struct rte_fbarray *arr, unsigned int idx)
+{
+	void *ret = NULL;
+	if (arr == NULL) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	if (idx >= arr->len) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	ret = RTE_PTR_ADD(arr->data, idx * arr->elt_sz);
+
+	return ret;
+}
+
+int __rte_experimental
+rte_fbarray_set_used(struct rte_fbarray *arr, unsigned int idx)
+{
+	return set_used(arr, idx, true);
+}
+
+int __rte_experimental
+rte_fbarray_set_free(struct rte_fbarray *arr, unsigned int idx)
+{
+	return set_used(arr, idx, false);
+}
+
+int __rte_experimental
+rte_fbarray_is_used(struct rte_fbarray *arr, unsigned int idx)
+{
+	struct used_mask *msk;
+	int msk_idx;
+	uint64_t msk_bit;
+	int ret = -1;
+
+	if (arr == NULL || idx >= arr->len) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+
+	/* prevent array from changing under us */
+	rte_rwlock_read_lock(&arr->rwlock);
+
+	msk = get_used_mask(arr->data, arr->elt_sz, arr->len);
+	msk_idx = MASK_LEN_TO_IDX(idx);
+	msk_bit = 1ULL << MASK_LEN_TO_MOD(idx);
+
+	ret = (msk->data[msk_idx] & msk_bit) != 0;
+
+	rte_rwlock_read_unlock(&arr->rwlock);
+
+	return ret;
+}
+
+static int
+fbarray_find(struct rte_fbarray *arr, unsigned int start, bool next, bool used)
+{
+	int ret = -1;
+
+	if (arr == NULL || start >= arr->len) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+
+	/* prevent array from changing under us */
+	rte_rwlock_read_lock(&arr->rwlock);
+
+	/* cheap checks to prevent doing useless work */
+	if (!used) {
+		if (arr->len == arr->count) {
+			rte_errno = ENOSPC;
+			goto out;
+		}
+		if (arr->count == 0) {
+			ret = start;
+			goto out;
+		}
+	} else {
+		if (arr->count == 0) {
+			rte_errno = ENOENT;
+			goto out;
+		}
+		if (arr->len == arr->count) {
+			ret = start;
+			goto out;
+		}
+	}
+	if (next)
+		ret = find_next(arr, start, used);
+	else
+		ret = find_prev(arr, start, used);
+out:
+	rte_rwlock_read_unlock(&arr->rwlock);
+	return ret;
+}
+
+int __rte_experimental
+rte_fbarray_find_next_free(struct rte_fbarray *arr, unsigned int start)
+{
+	return fbarray_find(arr, start, true, false);
+}
+
+int __rte_experimental
+rte_fbarray_find_next_used(struct rte_fbarray *arr, unsigned int start)
+{
+	return fbarray_find(arr, start, true, true);
+}
+
+int __rte_experimental
+rte_fbarray_find_prev_free(struct rte_fbarray *arr, unsigned int start)
+{
+	return fbarray_find(arr, start, false, false);
+}
+
+int __rte_experimental
+rte_fbarray_find_prev_used(struct rte_fbarray *arr, unsigned int start)
+{
+	return fbarray_find(arr, start, false, true);
+}
+
+static int
+fbarray_find_n(struct rte_fbarray *arr, unsigned int start, unsigned int n,
+		bool next, bool used)
+{
+	int ret = -1;
+
+	if (arr == NULL || start >= arr->len || n > arr->len || n == 0) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+	if (next && (arr->len - start) < n) {
+		rte_errno = used ? ENOENT : ENOSPC;
+		return -1;
+	}
+	if (!next && start < (n - 1)) {
+		rte_errno = used ? ENOENT : ENOSPC;
+		return -1;
+	}
+
+	/* prevent array from changing under us */
+	rte_rwlock_read_lock(&arr->rwlock);
+
+	/* cheap checks to prevent doing useless work */
+	if (!used) {
+		if (arr->len == arr->count || arr->len - arr->count < n) {
+			rte_errno = ENOSPC;
+			goto out;
+		}
+		if (arr->count == 0) {
+			ret = next ? start : start - n + 1;
+			goto out;
+		}
+	} else {
+		if (arr->count < n) {
+			rte_errno = ENOENT;
+			goto out;
+		}
+		if (arr->count == arr->len) {
+			ret = next ? start : start - n + 1;
+			goto out;
+		}
+	}
+
+	if (next)
+		ret = find_next_n(arr, start, n, used);
+	else
+		ret = find_prev_n(arr, start, n, used);
+out:
+	rte_rwlock_read_unlock(&arr->rwlock);
+	return ret;
+}
+
+int __rte_experimental
+rte_fbarray_find_next_n_free(struct rte_fbarray *arr, unsigned int start,
+		unsigned int n)
+{
+	return fbarray_find_n(arr, start, n, true, false);
+}
+
+int __rte_experimental
+rte_fbarray_find_next_n_used(struct rte_fbarray *arr, unsigned int start,
+		unsigned int n)
+{
+	return fbarray_find_n(arr, start, n, true, true);
+}
+
+int __rte_experimental
+rte_fbarray_find_prev_n_free(struct rte_fbarray *arr, unsigned int start,
+		unsigned int n)
+{
+	return fbarray_find_n(arr, start, n, false, false);
+}
+
+int __rte_experimental
+rte_fbarray_find_prev_n_used(struct rte_fbarray *arr, unsigned int start,
+		unsigned int n)
+{
+	return fbarray_find_n(arr, start, n, false, true);
+}
+
+static int
+fbarray_find_contig(struct rte_fbarray *arr, unsigned int start, bool next,
+		bool used)
+{
+	int ret = -1;
+
+	if (arr == NULL || start >= arr->len) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+
+	/* prevent array from changing under us */
+	rte_rwlock_read_lock(&arr->rwlock);
+
+	/* cheap checks to prevent doing useless work */
+	if (used) {
+		if (arr->count == 0) {
+			ret = 0;
+			goto out;
+		}
+		if (next && arr->count == arr->len) {
+			ret = arr->len - start;
+			goto out;
+		}
+		if (!next && arr->count == arr->len) {
+			ret = start + 1;
+			goto out;
+		}
+	} else {
+		if (arr->len == arr->count) {
+			ret = 0;
+			goto out;
+		}
+		if (next && arr->count == 0) {
+			ret = arr->len - start;
+			goto out;
+		}
+		if (!next && arr->count == 0) {
+			ret = start + 1;
+			goto out;
+		}
+	}
+
+	if (next)
+		ret = find_contig(arr, start, used);
+	else
+		ret = find_rev_contig(arr, start, used);
+out:
+	rte_rwlock_read_unlock(&arr->rwlock);
+	return ret;
+}
+
+int __rte_experimental
+rte_fbarray_find_contig_free(struct rte_fbarray *arr, unsigned int start)
+{
+	return fbarray_find_contig(arr, start, true, false);
+}
+
+int __rte_experimental
+rte_fbarray_find_contig_used(struct rte_fbarray *arr, unsigned int start)
+{
+	return fbarray_find_contig(arr, start, true, true);
+}
+
+int __rte_experimental
+rte_fbarray_find_rev_contig_free(struct rte_fbarray *arr, unsigned int start)
+{
+	return fbarray_find_contig(arr, start, false, false);
+}
+
+int __rte_experimental
+rte_fbarray_find_rev_contig_used(struct rte_fbarray *arr, unsigned int start)
+{
+	return fbarray_find_contig(arr, start, false, true);
+}
+
+int __rte_experimental
+rte_fbarray_find_idx(const struct rte_fbarray *arr, const void *elt)
+{
+	void *end;
+	int ret = -1;
+
+	/*
+	 * no need to synchronize as it doesn't matter if underlying data
+	 * changes - we're doing pointer arithmetic here.
+	 */
+
+	if (arr == NULL || elt == NULL) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+	end = RTE_PTR_ADD(arr->data, arr->elt_sz * arr->len);
+	if (elt < arr->data || elt >= end) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+
+	ret = RTE_PTR_DIFF(elt, arr->data) / arr->elt_sz;
+
+	return ret;
+}
+
+void __rte_experimental
+rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f)
+{
+	struct used_mask *msk;
+	unsigned int i;
+
+	if (arr == NULL || f == NULL) {
+		rte_errno = EINVAL;
+		return;
+	}
+
+	if (fully_validate(arr->name, arr->elt_sz, arr->len)) {
+		fprintf(f, "Invalid file-backed array\n");
+		goto out;
+	}
+
+	/* prevent array from changing under us */
+	rte_rwlock_read_lock(&arr->rwlock);
+
+	fprintf(f, "File-backed array: %s\n", arr->name);
+	fprintf(f, "size: %i occupied: %i elt_sz: %i\n",
+			arr->len, arr->count, arr->elt_sz);
+
+	msk = get_used_mask(arr->data, arr->elt_sz, arr->len);
+
+	for (i = 0; i < msk->n_masks; i++)
+		fprintf(f, "msk idx %i: 0x%016" PRIx64 "\n", i, msk->data[i]);
+out:
+	rte_rwlock_read_unlock(&arr->rwlock);
+}
diff --git a/lib/librte_eal/windows/eal/eal_filesystem.h b/lib/librte_eal/windows/eal/eal_filesystem.h
new file mode 100644
index 000000000..2bfb8b9a1
--- /dev/null
+++ b/lib/librte_eal/windows/eal/eal_filesystem.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+/**
+ * @file
+ * Stores functions and path defines for files and directories
+ * on the filesystem for Windows, that are used by the Windows EAL.
+ */
+
+#ifndef EAL_FILESYSTEM_H
+#define EAL_FILESYSTEM_H
+
+#include <rte_windows.h>
+#include "eal_internal_cfg.h"
+
+
+ /* sets up platform-specific runtime data dir */
+int
+eal_create_runtime_dir(void);
+
+/* returns runtime dir */
+const char *
+eal_get_runtime_dir(void);
+
+ /* define the default filename prefix for the %s values below */
+#define HUGEFILE_PREFIX_DEFAULT "rte"
+
+/* Path of rte config file */
+#define RUNTIME_CONFIG_FMT "%s\\%s.config"
+
+static inline const char *
+eal_runtime_config_path(void)
+{
+	static char buffer[PATH_MAX];  /* static so auto-zeroed */
+	char  Directory[PATH_MAX];
+
+	GetTempPathA(sizeof(Directory), Directory);
+	snprintf(buffer, sizeof(buffer)-1, RUNTIME_CONFIG_FMT, Directory, internal_config.hugefile_prefix);
+
+	return buffer;
+}
+
+/* Path of file backed array*/
+#define FBARRAY_NAME_FMT "%s\\fbarray_%s"
+
+static inline const char *
+eal_get_fbarray_path(char *buffer, size_t buflen, const char *name) {
+	snprintf(buffer, buflen, FBARRAY_NAME_FMT, eal_get_runtime_dir(), name);
+	return buffer;
+}
+
+/** Path of primary/secondary communication unix socket file. */
+#define MP_SOCKET_FNAME "mp_socket"
+
+static inline const char *
+eal_mp_socket_path(void)
+{
+	static char buffer[PATH_MAX]; /* static so auto-zeroed */
+
+	snprintf(buffer, sizeof(buffer) - 1, "%s/%s", eal_get_runtime_dir(),
+		MP_SOCKET_FNAME);
+	return buffer;
+}
+
+/* Path of hugepage info file */
+#define HUGEPAGE_INFO_FMT "%s\\.%s_hugepage_info"
+
+static inline const char *
+eal_hugepage_info_path(void)
+{
+	static char buffer[PATH_MAX];  /* static so auto-zeroed */
+	TCHAR  Directory[PATH_MAX];
+
+	GetSystemDirectory(Directory, sizeof(Directory));
+	snprintf(buffer, sizeof(buffer)-1, HUGEPAGE_INFO_FMT, Directory, internal_config.hugefile_prefix);
+	return buffer;
+}
+
+/* String format for hugepage map files */
+#define HUGEFILE_FMT "%s/%smap_%d"
+#define TEMP_HUGEFILE_FMT "%s/%smap_temp_%d"
+
+static inline const char *
+eal_get_hugefile_path(char *buffer, size_t buflen, const char *hugedir, int f_id)
+{
+	snprintf(buffer, buflen, HUGEFILE_FMT, hugedir, internal_config.hugefile_prefix, f_id);
+	buffer[buflen - 1] = '\0';
+	return buffer;
+}
+
+
+/** Function to read a single numeric value from a file on the filesystem.
+ * Used to read information from files on /sys */
+int eal_parse_sysfs_value(const char *filename, unsigned long *val);
+
+#endif /* EAL_FILESYSTEM_H */
diff --git a/lib/librte_eal/windows/eal/eal_hugepage_info.c b/lib/librte_eal/windows/eal/eal_hugepage_info.c
new file mode 100644
index 000000000..d417da6da
--- /dev/null
+++ b/lib/librte_eal/windows/eal/eal_hugepage_info.c
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#include <rte_log.h>
+#include "eal_internal_cfg.h"
+#include "eal_hugepages.h"
+
+
+/*
+ * Need to complete hugepage support on Windows
+ */
+int
+eal_hugepage_info_init(void)
+{
+	internal_config.num_hugepage_sizes = 1;
+	internal_config.hugepage_info[0].hugepage_sz = (uint64_t)GetLargePageMinimum();
+
+	return 0;
+}
diff --git a/lib/librte_eal/windows/eal/eal_interrupts.c b/lib/librte_eal/windows/eal/eal_interrupts.c
new file mode 100644
index 000000000..6be47d14e
--- /dev/null
+++ b/lib/librte_eal/windows/eal/eal_interrupts.c
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include "eal_private.h"
+
+int
+rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
+				rte_intr_callback_fn cb,
+				void *cb_arg)
+{
+	return -ENOTSUP;
+}
+
+int
+rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
+				rte_intr_callback_fn cb_fn,
+				void *cb_arg)
+{
+	return -ENOTSUP;
+}
+
+int
+rte_intr_enable(const struct rte_intr_handle *intr_handle)
+{
+	return -ENOTSUP;
+}
+
+int
+rte_intr_disable(const struct rte_intr_handle *intr_handle)
+{
+	return -ENOTSUP;
+}
+
+int
+rte_eal_intr_init(void)
+{
+	return 0;
+}
+
+int
+rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
+		int epfd, int op, unsigned int vec, void *data)
+{
+	RTE_SET_USED(intr_handle);
+	RTE_SET_USED(epfd);
+	RTE_SET_USED(op);
+	RTE_SET_USED(vec);
+	RTE_SET_USED(data);
+
+	return -ENOTSUP;
+}
+
+int
+rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
+{
+	RTE_SET_USED(intr_handle);
+	RTE_SET_USED(nb_efd);
+
+	return 0;
+}
+
+void
+rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
+{
+	RTE_SET_USED(intr_handle);
+}
+
+int
+rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
+{
+	RTE_SET_USED(intr_handle);
+	return 0;
+}
+
+int
+rte_intr_allow_others(struct rte_intr_handle *intr_handle)
+{
+	RTE_SET_USED(intr_handle);
+	return 1;
+}
+
+int
+rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
+{
+	RTE_SET_USED(intr_handle);
+	return 0;
+}
diff --git a/lib/librte_eal/windows/eal/eal_lcore.c b/lib/librte_eal/windows/eal/eal_lcore.c
new file mode 100644
index 000000000..e8222e896
--- /dev/null
+++ b/lib/librte_eal/windows/eal/eal_lcore.c
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#include <rte_windows.h>
+
+/* global data structure that contains the CPU map */
+static struct _win_cpu_map {
+	unsigned numTotalProcessors;
+	unsigned numProcessorSockets;
+	unsigned numProcessorCores;
+	unsigned reserved;
+	struct _win_lcore_map {
+		uint8_t    socketid;
+		uint8_t    coreid;
+	} win_lcore_map[RTE_MAX_LCORE];
+} win_cpu_map = { 0 };
+
+
+void eal_create_cpu_map()
+{
+	win_cpu_map.numTotalProcessors = GetActiveProcessorCount(ALL_PROCESSOR_GROUPS);
+
+	LOGICAL_PROCESSOR_RELATIONSHIP lprocRel;
+	DWORD  lprocInfoSize = 0;
+	BOOL bHyperThreadingEnabled = FALSE;
+
+	/* First get the processor package information */
+	lprocRel = RelationProcessorPackage;
+	/* Determine the size of buffer we need (pass NULL) */
+	GetLogicalProcessorInformationEx(lprocRel, NULL, &lprocInfoSize);
+	win_cpu_map.numProcessorSockets = lprocInfoSize / 48;
+
+	lprocInfoSize = 0;
+	/* Next get the processor core information */
+	lprocRel = RelationProcessorCore;
+	GetLogicalProcessorInformationEx(lprocRel, NULL, &lprocInfoSize);
+	win_cpu_map.numProcessorCores = lprocInfoSize / 48;
+
+	if (win_cpu_map.numTotalProcessors > win_cpu_map.numProcessorCores)
+	    bHyperThreadingEnabled = TRUE;
+
+	/* Distribute the socket and core ids appropriately across the logical cores */
+	/* For now, split the cores equally across the sockets - might need to revisit this later */
+	unsigned lcore = 0;
+	for (unsigned socket=0; socket < win_cpu_map.numProcessorSockets; ++socket) {
+	    for (unsigned core = 0; core < (win_cpu_map.numProcessorCores / win_cpu_map.numProcessorSockets); ++core) {
+		win_cpu_map.win_lcore_map[lcore].socketid = socket;
+		win_cpu_map.win_lcore_map[lcore].coreid = core;
+
+		lcore++;
+
+		if (bHyperThreadingEnabled) {
+		    win_cpu_map.win_lcore_map[lcore].socketid = socket;
+		    win_cpu_map.win_lcore_map[lcore].coreid = core;
+		    lcore++;
+		}
+	    }
+	}
+
+	return;
+}
+
+/* Check if a cpu is present by the presence of the cpu information for it */
+int
+eal_cpu_detected(unsigned lcore_id)
+{
+	return (lcore_id < win_cpu_map.numTotalProcessors);
+}
+
+/* Get CPU socket id (NUMA node) for a logical core */
+unsigned
+eal_cpu_socket_id(unsigned lcore_id)
+{
+	return win_cpu_map.win_lcore_map[lcore_id].socketid;
+}
+
+/* Get the cpu core id value */
+unsigned
+eal_cpu_core_id(unsigned lcore_id)
+{
+	return win_cpu_map.win_lcore_map[lcore_id].coreid;
+}
diff --git a/lib/librte_eal/windows/eal/eal_log.c b/lib/librte_eal/windows/eal/eal_log.c
new file mode 100644
index 000000000..0f5ae54a9
--- /dev/null
+++ b/lib/librte_eal/windows/eal/eal_log.c
@@ -0,0 +1,415 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_eal.h>
+#include <rte_log.h>
+#include <rte_per_lcore.h>
+
+#include "eal_private.h"
+
+/* global log structure */
+struct rte_logs rte_logs = {
+	.type = ~0,
+	.level = RTE_LOG_DEBUG,
+	.file = NULL,
+};
+
+struct rte_eal_opt_loglevel {
+	/** Next list entry */
+	TAILQ_ENTRY(rte_eal_opt_loglevel) next;
+	char *pattern;
+	/** Log level value obtained from the option */
+	uint32_t level;
+};
+
+TAILQ_HEAD(rte_eal_opt_loglevel_list, rte_eal_opt_loglevel);
+
+/** List of valid EAL log level options */
+static struct rte_eal_opt_loglevel_list opt_loglevel_list =
+TAILQ_HEAD_INITIALIZER(opt_loglevel_list);
+
+/* Stream to use for logging if rte_logs.file is NULL */
+static FILE *default_log_stream;
+
+/**
+* This global structure stores some informations about the message
+* that is currently being processed by one lcore
+*/
+struct log_cur_msg {
+	uint32_t loglevel; /**< log level - see rte_log.h */
+	uint32_t logtype;  /**< log type  - see rte_log.h */
+};
+
+struct rte_log_dynamic_type {
+	const char *name;
+	uint32_t loglevel;
+};
+
+/* per core log */
+static RTE_DEFINE_PER_LCORE(struct log_cur_msg, log_cur_msg);
+
+/* default logs */
+
+/* Change the stream that will be used by logging system */
+int
+rte_openlog_stream(FILE *f)
+{
+	rte_logs.file = f;
+	return 0;
+}
+
+/* Set global log level */
+void
+rte_log_set_global_level(uint32_t level)
+{
+	rte_logs.level = (uint32_t)level;
+}
+
+/* Get global log level */
+uint32_t
+rte_log_get_global_level(void)
+{
+	return rte_logs.level;
+}
+
+int
+rte_log_get_level(uint32_t type)
+{
+	if (type >= rte_logs.dynamic_types_len)
+		return -1;
+
+	return rte_logs.dynamic_types[type].loglevel;
+}
+
+int
+rte_log_set_level(uint32_t type, uint32_t level)
+{
+	if (type >= rte_logs.dynamic_types_len)
+		return -1;
+	if (level > RTE_LOG_DEBUG)
+		return -1;
+
+	rte_logs.dynamic_types[type].loglevel = level;
+
+	return 0;
+}
+
+/* set log level by regular expression */
+int
+rte_log_set_level_regexp(const char *regex, uint32_t level)
+{
+	/* Not implemented in Windows */
+	return 0;
+}
+
+/*
+* Save the type string and the loglevel for later dynamic
+* logtypes which may register later.
+*/
+static int rte_log_save_level(int priority,
+	const char *regex, const char *pattern)
+{
+	/* Not implemented in Windows */
+	return 0;
+}
+
+int rte_log_save_regexp(const char *regex, int tmp)
+{
+	return rte_log_save_level(tmp, regex, NULL);
+}
+
+/* set log level based on glob (file match) pattern */
+int
+rte_log_set_level_pattern(const char *pattern, uint32_t level)
+{
+	size_t i;
+
+	if (level > RTE_LOG_DEBUG)
+		return -1;
+
+	for (i = 0; i < rte_logs.dynamic_types_len; i++) {
+		if (rte_logs.dynamic_types[i].name == NULL)
+			continue;
+	}
+
+	return 0;
+}
+
+int rte_log_save_pattern(const char *pattern, int priority)
+{
+	return rte_log_save_level(priority, NULL, pattern);
+}
+
+/* get the current loglevel for the message being processed */
+int rte_log_cur_msg_loglevel(void)
+{
+	return RTE_PER_LCORE(log_cur_msg).loglevel;
+}
+
+/* get the current logtype for the message being processed */
+int rte_log_cur_msg_logtype(void)
+{
+	return RTE_PER_LCORE(log_cur_msg).logtype;
+}
+
+static int
+rte_log_lookup(const char *name)
+{
+	size_t i;
+
+	for (i = 0; i < rte_logs.dynamic_types_len; i++) {
+		if (rte_logs.dynamic_types[i].name == NULL)
+			continue;
+		if (strcmp(name, rte_logs.dynamic_types[i].name) == 0)
+			return i;
+	}
+
+	return -1;
+}
+
+/* register an extended log type, assuming table is large enough, and id
+* is not yet registered.
+*/
+static int
+__rte_log_register(const char *name, int id)
+{
+	char *dup_name = strdup(name);
+
+	if (dup_name == NULL)
+		return -ENOMEM;
+
+	rte_logs.dynamic_types[id].name = dup_name;
+	rte_logs.dynamic_types[id].loglevel = RTE_LOG_DEBUG/*RTE_LOG_INFO*/;
+
+	return id;
+}
+
+/* register an extended log type */
+int
+rte_log_register(const char *name)
+{
+	struct rte_log_dynamic_type *new_dynamic_types;
+	int id, ret;
+
+	id = rte_log_lookup(name);
+	if (id >= 0)
+		return id;
+
+	new_dynamic_types = realloc(rte_logs.dynamic_types,
+		sizeof(struct rte_log_dynamic_type) *
+		(rte_logs.dynamic_types_len + 1));
+	if (new_dynamic_types == NULL)
+		return -ENOMEM;
+	rte_logs.dynamic_types = new_dynamic_types;
+
+	ret = __rte_log_register(name, rte_logs.dynamic_types_len);
+	if (ret < 0)
+		return ret;
+
+	rte_logs.dynamic_types_len++;
+
+	return ret;
+}
+
+/* Register an extended log type and try to pick its level from EAL options */
+int __rte_experimental
+rte_log_register_type_and_pick_level(const char *name, uint32_t level_def)
+{
+	struct rte_eal_opt_loglevel *opt_ll;
+	uint32_t level = level_def;
+	int type;
+
+	type = rte_log_register(name);
+	if (type < 0)
+		return type;
+
+	TAILQ_FOREACH(opt_ll, &opt_loglevel_list, next) {
+		if (opt_ll->level > RTE_LOG_DEBUG)
+			continue;
+	}
+	rte_logs.dynamic_types[type].loglevel = level;
+
+	return type;
+}
+
+struct logtype {
+	uint32_t log_id;
+	const char *logtype;
+};
+
+static const struct logtype logtype_strings[] = {
+	{ RTE_LOGTYPE_EAL,        "lib.eal" },
+{ RTE_LOGTYPE_MALLOC,     "lib.malloc" },
+{ RTE_LOGTYPE_RING,       "lib.ring" },
+{ RTE_LOGTYPE_MEMPOOL,    "lib.mempool" },
+{ RTE_LOGTYPE_TIMER,      "lib.timer" },
+{ RTE_LOGTYPE_PMD,        "pmd" },
+{ RTE_LOGTYPE_HASH,       "lib.hash" },
+{ RTE_LOGTYPE_LPM,        "lib.lpm" },
+{ RTE_LOGTYPE_KNI,        "lib.kni" },
+{ RTE_LOGTYPE_ACL,        "lib.acl" },
+{ RTE_LOGTYPE_POWER,      "lib.power" },
+{ RTE_LOGTYPE_METER,      "lib.meter" },
+{ RTE_LOGTYPE_SCHED,      "lib.sched" },
+{ RTE_LOGTYPE_PORT,       "lib.port" },
+{ RTE_LOGTYPE_TABLE,      "lib.table" },
+{ RTE_LOGTYPE_PIPELINE,   "lib.pipeline" },
+{ RTE_LOGTYPE_MBUF,       "lib.mbuf" },
+{ RTE_LOGTYPE_CRYPTODEV,  "lib.cryptodev" },
+{ RTE_LOGTYPE_EFD,        "lib.efd" },
+{ RTE_LOGTYPE_EVENTDEV,   "lib.eventdev" },
+{ RTE_LOGTYPE_GSO,        "lib.gso" },
+{ RTE_LOGTYPE_USER1,      "user1" },
+{ RTE_LOGTYPE_USER2,      "user2" },
+{ RTE_LOGTYPE_USER3,      "user3" },
+{ RTE_LOGTYPE_USER4,      "user4" },
+{ RTE_LOGTYPE_USER5,      "user5" },
+{ RTE_LOGTYPE_USER6,      "user6" },
+{ RTE_LOGTYPE_USER7,      "user7" },
+{ RTE_LOGTYPE_USER8,      "user8" }
+};
+
+
+void
+rte_log_init(void)
+{
+	uint32_t i;
+
+	rte_log_set_global_level(RTE_LOG_DEBUG);
+
+	rte_logs.dynamic_types = calloc(RTE_LOGTYPE_FIRST_EXT_ID,
+		sizeof(struct rte_log_dynamic_type));
+	if (rte_logs.dynamic_types == NULL)
+		return;
+
+	/* register legacy log types */
+	for (i = 0; i < RTE_DIM(logtype_strings); i++)
+		__rte_log_register(logtype_strings[i].logtype,
+			logtype_strings[i].log_id);
+
+	rte_logs.dynamic_types_len = RTE_LOGTYPE_FIRST_EXT_ID;
+}
+
+static const char *
+loglevel_to_string(uint32_t level)
+{
+	switch (level) {
+	case 0: return "disabled";
+	case RTE_LOG_EMERG: return "emerg";
+	case RTE_LOG_ALERT: return "alert";
+	case RTE_LOG_CRIT: return "critical";
+	case RTE_LOG_ERR: return "error";
+	case RTE_LOG_WARNING: return "warning";
+	case RTE_LOG_NOTICE: return "notice";
+	case RTE_LOG_INFO: return "info";
+	case RTE_LOG_DEBUG: return "debug";
+	default: return "unknown";
+	}
+}
+
+/* dump global level and registered log types */
+void
+rte_log_dump(FILE *f)
+{
+	size_t i;
+
+	fprintf(f, "global log level is %s\n",
+		loglevel_to_string(rte_log_get_global_level()));
+
+	for (i = 0; i < rte_logs.dynamic_types_len; i++) {
+		if (rte_logs.dynamic_types[i].name == NULL)
+			continue;
+		fprintf(f, "id %zu: %s, level is %s\n",
+			i, rte_logs.dynamic_types[i].name,
+			loglevel_to_string(rte_logs.dynamic_types[i].loglevel));
+	}
+}
+
+/*
+* Generates a log message The message will be sent in the stream
+* defined by the previous call to rte_openlog_stream().
+*/
+int
+rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap)
+{
+	int ret;
+	FILE *f = rte_logs.file;
+	if (f == NULL) {
+		f = default_log_stream;
+		if (f == NULL) {
+			/*
+			* Grab the current value of stderr here, rather than
+			* just initializing default_log_stream to stderr. This
+			* ensures that we will always use the current value
+			* of stderr, even if the application closes and
+			* reopens it.
+			*/
+			f = stderr;
+		}
+	}
+
+	if (level > rte_logs.level)
+		return 0;
+	if (logtype >= rte_logs.dynamic_types_len)
+		return -1;
+	if (level > rte_logs.dynamic_types[logtype].loglevel)
+		return 0;
+
+	/* save loglevel and logtype in a global per-lcore variable */
+	RTE_PER_LCORE(log_cur_msg).loglevel = level;
+	RTE_PER_LCORE(log_cur_msg).logtype = logtype;
+
+	ret = vfprintf(f, format, ap);
+	fflush(f);
+	return ret;
+}
+
+/*
+* Generates a log message The message will be sent in the stream
+* defined by the previous call to rte_openlog_stream().
+* No need to check level here, done by rte_vlog().
+*/
+int
+rte_log(uint32_t level, uint32_t logtype, const char *format, ...)
+{
+	va_list ap;
+	int ret;
+
+	va_start(ap, format);
+	ret = rte_vlog(level, logtype, format, ap);
+	va_end(ap);
+	return ret;
+}
+
+/*
+* Called by environment-specific initialization functions.
+*/
+void
+eal_log_set_default(FILE *default_log)
+{
+	default_log_stream = default_log;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	RTE_LOG(NOTICE, EAL,
+		"Debug dataplane logs available - lower performance\n");
+#endif
+}
+
+/*
+* set the log to default function, called during eal init process,
+* once memzones are available.
+*/
+int
+rte_eal_log_init(const char *id, int facility)
+{
+	eal_log_set_default(stderr);
+	return 0;
+}
diff --git a/lib/librte_eal/windows/eal/eal_memalloc.c b/lib/librte_eal/windows/eal/eal_memalloc.c
new file mode 100644
index 000000000..66c0d1ca7
--- /dev/null
+++ b/lib/librte_eal/windows/eal/eal_memalloc.c
@@ -0,0 +1,995 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#define _FILE_OFFSET_BITS 64
+#include <errno.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/queue.h>
+#include <unistd.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <Intsafe.h>
+#include <process.h>
+
+#include <signal.h>
+#include <setjmp.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_eal_memconfig.h>
+#include <rte_eal.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+
+#include "eal_filesystem.h"
+#include "eal_internal_cfg.h"
+#include "eal_memalloc.h"
+#include "eal_private.h"
+
+const int anonymous_hugepages_supported =
+#ifdef MAP_HUGE_SHIFT
+		1;
+#define RTE_MAP_HUGE_SHIFT MAP_HUGE_SHIFT
+#else
+		0;
+#define RTE_MAP_HUGE_SHIFT 26
+#endif
+
+/*
+ * not all kernel version support fallocate on hugetlbfs, so fall back to
+ * ftruncate and disallow deallocation if fallocate is not supported.
+ */
+static int fallocate_supported = -1; /* unknown */
+
+/* for single-file segments, we need some kind of mechanism to keep track of
+ * which hugepages can be freed back to the system, and which cannot. we cannot
+ * use flock() because they don't allow locking parts of a file, and we cannot
+ * use fcntl() due to issues with their semantics, so we will have to rely on a
+ * bunch of lockfiles for each page.
+ *
+ * we cannot know how many pages a system will have in advance, but we do know
+ * that they come in lists, and we know lengths of these lists. so, simply store
+ * a malloc'd array of fd's indexed by list and segment index.
+ *
+ * they will be initialized at startup, and filled as we allocate/deallocate
+ * segments. also, use this to track memseg list proper fd.
+ */
+static struct {
+	int *fds; /**< dynamically allocated array of segment lock fd's */
+	int memseg_list_fd; /**< memseg list fd */
+	int len; /**< total length of the array */
+	int count; /**< entries used in an array */
+} fd_list[RTE_MAX_MEMSEG_LISTS];
+
+/** local copy of a memory map, used to synchronize memory hotplug in MP */
+static struct rte_memseg_list local_memsegs[RTE_MAX_MEMSEG_LISTS];
+
+static sigjmp_buf huge_jmpenv;
+
+/* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
+ * non-static local variable in the stack frame calling sigsetjmp might be
+ * clobbered by a call to longjmp.
+ */
+static int __rte_unused huge_wrap_sigsetjmp(void)
+{
+	return sigsetjmp(huge_jmpenv, 1);
+}
+
+static int
+get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
+		unsigned int list_idx, unsigned int seg_idx)
+{
+	int fd;
+	BOOL ret;
+
+	if (internal_config.single_file_segments) {
+		/* create a hugepage file path */
+		eal_get_hugefile_path(path, buflen, hi->hugedir, list_idx);
+
+		fd = fd_list[list_idx].memseg_list_fd;
+
+		if (fd < 0) {
+			fd = _open(path, O_CREAT | O_RDWR, _S_IREAD | _S_IWRITE);
+			if (fd < 0) {
+				RTE_LOG(ERR, EAL, "%s(): open failed: %s\n",
+					__func__, strerror(errno));
+				return -1;
+			}
+
+			/* TODO dpdk-1808 take out a read lock and keep it indefinitely */
+
+			fd_list[list_idx].memseg_list_fd = fd;
+		}
+	} else {
+		/* create a hugepage file path */
+		eal_get_hugefile_path(path, buflen, hi->hugedir,
+				list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
+		fd = _open(path, O_CREAT | O_RDWR, _S_IREAD | _S_IWRITE);
+		if (fd < 0) {
+			RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
+					strerror(errno));
+			return -1;
+		}
+		/* TODO dpdk-1808 take out a read lock */
+
+	}
+	return fd;
+}
+
+static int
+resize_hugefile(int fd, char *path, int list_idx, int seg_idx,
+		uint64_t fa_offset, uint64_t page_sz, bool grow)
+{
+	/* dpdk-1808 Not implemented in Windows*/
+	return 0;
+}
+
+static int
+alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
+		struct hugepage_info *hi, unsigned int list_idx,
+		unsigned int seg_idx)
+{
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	int cur_socket_id = 0;
+#endif
+	uint64_t map_offset;
+	rte_iova_t iova;
+	void *va;
+	char path[PATH_MAX];
+	int ret = 0;
+	int fd;
+	size_t alloc_sz;
+	int flags;
+	void *new_addr;
+
+	alloc_sz = hi->hugepage_sz;
+	if (!internal_config.single_file_segments &&
+			internal_config.in_memory &&
+			anonymous_hugepages_supported) {
+		int log2, flags;
+
+		log2 = rte_log2_u32(alloc_sz);
+		/* as per mmap() manpage, all page sizes are log2 of page size
+		 * shifted by MAP_HUGE_SHIFT
+		 */
+		flags = (log2 << RTE_MAP_HUGE_SHIFT) | MAP_FIXED |
+				MAP_PRIVATE | MAP_ANONYMOUS;
+		fd = -1;
+		va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE, flags, -1, 0);
+
+		/* single-file segments codepath will never be active because
+		 * in-memory mode is incompatible with it and it's stopped at
+		 * EAL initialization stage, however the compiler doesn't know
+		 * that and complains about map_offset being used uninitialized
+		 * on failure codepaths while having in-memory mode enabled. so,
+		 * assign a value here.
+		 */
+		map_offset = 0;
+	} else {
+		/* takes out a read lock on segment or segment list */
+		fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
+		if (fd < 0) {
+			RTE_LOG(ERR, EAL, "Couldn't get fd on hugepage file\n");
+			return -1;
+		}
+
+		if (internal_config.single_file_segments) {
+			map_offset = seg_idx * alloc_sz;
+			ret = resize_hugefile(fd, path, list_idx, seg_idx,
+					map_offset, alloc_sz, true);
+			if (ret < 0)
+				goto resized;
+		} else {
+			map_offset = 0;
+			if (ftruncate(fd, alloc_sz) < 0) {
+				RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
+					__func__, strerror(errno));
+				goto resized;
+			}
+			if (internal_config.hugepage_unlink) {
+				if (unlink(path)) {
+					RTE_LOG(DEBUG, EAL, "%s(): unlink() failed: %s\n",
+						__func__, strerror(errno));
+					goto resized;
+				}
+			}
+		}
+
+		/*
+		 * map the segment, and populate page tables, the kernel fills
+		 * this segment with zeros if it's a new page.
+		 */
+		va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE,
+				MAP_SHARED | MAP_FIXED, fd,
+				map_offset);
+	}
+
+	if (va == MAP_FAILED) {
+		RTE_LOG(DEBUG, EAL, "%s(): mmap() failed: %s\n", __func__,
+			strerror(errno));
+		/* mmap failed, but the previous region might have been
+		 * unmapped anyway. try to remap it
+		 */
+		goto unmapped;
+	}
+	if (va != addr) {
+		RTE_LOG(DEBUG, EAL, "%s(): wrong mmap() address\n", __func__);
+		munmap(va, alloc_sz);
+		goto resized;
+	}
+
+	/* In linux, hugetlb limitations, like cgroup, are
+	 * enforced at fault time instead of mmap(), even
+	 * with the option of MAP_POPULATE. Kernel will send
+	 * a SIGBUS signal. To avoid to be killed, save stack
+	 * environment here, if SIGBUS happens, we can jump
+	 * back here.
+	 */
+	if (huge_wrap_sigsetjmp()) {
+		RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more hugepages of size %uMB\n",
+			(unsigned int)(alloc_sz >> 20));
+		goto mapped;
+	}
+
+	/* we need to trigger a write to the page to enforce page fault and
+	 * ensure that page is accessible to us, but we can't overwrite value
+	 * that is already there, so read the old value, and write itback.
+	 * kernel populates the page with zeroes initially.
+	 */
+	*(volatile int *)addr = *(volatile int *)addr;
+
+	iova = rte_mem_virt2iova(addr);
+	if (iova == RTE_BAD_PHYS_ADDR) {
+		RTE_LOG(DEBUG, EAL, "%s(): can't get IOVA addr\n",
+			__func__);
+		goto mapped;
+	}
+
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	move_pages(getpid(), 1, &addr, NULL, &cur_socket_id, 0);
+
+	if (cur_socket_id != socket_id) {
+		RTE_LOG(DEBUG, EAL,
+				"%s(): allocation happened on wrong socket (wanted %d, got %d)\n",
+			__func__, socket_id, cur_socket_id);
+		goto mapped;
+	}
+#endif
+	/* for non-single file segments that aren't in-memory, we can close fd
+	 * here */
+	if (!internal_config.single_file_segments && !internal_config.in_memory)
+		close(fd);
+
+	ms->addr = addr;
+	ms->hugepage_sz = alloc_sz;
+	ms->len = alloc_sz;
+	ms->nchannel = rte_memory_get_nchannel();
+	ms->nrank = rte_memory_get_nrank();
+	ms->iova = iova;
+	ms->socket_id = socket_id;
+
+	return 0;
+
+mapped:
+	munmap(addr, alloc_sz);
+unmapped:
+	flags = MAP_FIXED;
+	new_addr = eal_get_virtual_area(addr, &alloc_sz, alloc_sz, 0, flags);
+	if (new_addr != addr) {
+		if (new_addr != NULL)
+			munmap(new_addr, alloc_sz);
+		/* we're leaving a hole in our virtual address space. if
+		 * somebody else maps this hole now, we could accidentally
+		 * override it in the future.
+		 */
+		RTE_LOG(CRIT, EAL, "Can't mmap holes in our virtual address space\n");
+	}
+resized:
+	/* in-memory mode will never be single-file-segments mode */
+	if (internal_config.single_file_segments) {
+		resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
+				alloc_sz, false);
+		/* ignore failure, can't make it any worse */
+	} else {
+
+		/* TODO dpdk-1808 only remove file if we can take out a write lock */
+		if (internal_config.hugepage_unlink == 0 &&
+				internal_config.in_memory == 0 )
+			unlink(path);
+		close(fd);
+	}
+	return -1;
+}
+
+static int
+free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
+		unsigned int list_idx, unsigned int seg_idx)
+{
+	char path[PATH_MAX];
+	int fd;
+	BOOL ret;
+
+	/* erase page data */
+	memset(ms->addr, 0, ms->len);
+
+
+
+	/* if we've already unlinked the page, nothing needs to be done */
+	if (internal_config.hugepage_unlink) {
+		memset(ms, 0, sizeof(*ms));
+		return 0;
+	}
+
+	/* Avoid unmmaping already mapped memory to boost performance*/
+
+	return 0;
+}
+
+struct alloc_walk_param {
+	struct hugepage_info *hi;
+	struct rte_memseg **ms;
+	size_t page_sz;
+	unsigned int segs_allocated;
+	unsigned int n_segs;
+	int socket;
+	bool exact;
+};
+static int
+alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct alloc_walk_param *wa = arg;
+	struct rte_memseg_list *cur_msl;
+	size_t page_sz;
+	BOOL ret;
+	int cur_idx, start_idx, j, dir_fd = -1;
+	unsigned int msl_idx, need, i;
+
+	if (msl->page_sz != wa->page_sz)
+		return 0;
+	if (msl->socket_id != wa->socket)
+		return 0;
+
+	page_sz = (size_t)msl->page_sz;
+
+	msl_idx = msl - mcfg->memsegs;
+	cur_msl = &mcfg->memsegs[msl_idx];
+
+	need = wa->n_segs;
+
+	/* try finding space in memseg list */
+	cur_idx = rte_fbarray_find_next_n_free(&cur_msl->memseg_arr, 0, need);
+	if (cur_idx < 0)
+		return 0;
+	start_idx = cur_idx;
+
+	/*
+	 * TODO dpdk-1808
+	 * do not allow any page allocations during the time we're allocating,
+	 * because file creation and locking operations are not atomic,
+	 * and we might be the first or the last ones to use a particular page,
+	 * so we need to ensure atomicity of every operation.
+	 *
+	 * during init, we already hold a write lock, so don't try to take out
+	 * another one.
+	 */
+
+	for (i = 0; i < need; i++, cur_idx++) {
+		struct rte_memseg *cur;
+		void *map_addr;
+
+		cur = rte_fbarray_get(&cur_msl->memseg_arr, cur_idx);
+		map_addr = RTE_PTR_ADD(cur_msl->base_va,
+				cur_idx * page_sz);
+
+		if (alloc_seg(cur, map_addr, wa->socket, wa->hi,
+				msl_idx, cur_idx)) {
+			RTE_LOG(DEBUG, EAL, "attempted to allocate %i segments, but only %i were allocated\n",
+				need, i);
+
+			/* if exact number wasn't requested, stop */
+			if (!wa->exact)
+				goto out;
+
+			/* clean up */
+			for (j = start_idx; j < cur_idx; j++) {
+				struct rte_memseg *tmp;
+				struct rte_fbarray *arr =
+						&cur_msl->memseg_arr;
+
+				tmp = rte_fbarray_get(arr, j);
+				rte_fbarray_set_free(arr, j);
+
+				/* free_seg may attempt to create a file, which
+				 * may fail.
+				 */
+				if (free_seg(tmp, wa->hi, msl_idx, j))
+					RTE_LOG(DEBUG, EAL, "Cannot free page\n");
+			}
+			/* clear the list */
+			if (wa->ms)
+				memset(wa->ms, 0, sizeof(*wa->ms) * wa->n_segs);
+
+			if (dir_fd >= 0)
+				close(dir_fd);
+			return -1;
+		}
+		if (wa->ms)
+			wa->ms[i] = cur;
+
+		rte_fbarray_set_used(&cur_msl->memseg_arr, cur_idx);
+	}
+out:
+	wa->segs_allocated = i;
+	if (i > 0)
+		cur_msl->version++;
+	if (dir_fd >= 0)
+		close(dir_fd);
+	return 1;
+}
+
+struct free_walk_param {
+	struct hugepage_info *hi;
+	struct rte_memseg *ms;
+};
+static int
+free_seg_walk(const struct rte_memseg_list *msl, void *arg)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct rte_memseg_list *found_msl;
+	struct free_walk_param *wa = arg;
+	uintptr_t start_addr, end_addr;
+	int msl_idx, seg_idx, ret, dir_fd = -1;
+	BOOL retval;
+
+	start_addr = (uintptr_t) msl->base_va;
+	end_addr = start_addr + msl->memseg_arr.len * (size_t)msl->page_sz;
+
+	if ((uintptr_t)wa->ms->addr < start_addr ||
+			(uintptr_t)wa->ms->addr >= end_addr)
+		return 0;
+
+	msl_idx = msl - mcfg->memsegs;
+	seg_idx = RTE_PTR_DIFF(wa->ms->addr, start_addr) / msl->page_sz;
+
+	/* msl is const */
+	found_msl = &mcfg->memsegs[msl_idx];
+
+	/* Removing clean up and synchronization code*/
+	/* TODO dpdk-1808
+	* do not allow any page allocations during the time we're freeing,
+	* because file creation and locking operations are not atomic,
+	* and we might be the first or the last ones to use a particular page,
+	* so we need to ensure atomicity of every operation.
+	*
+	* during init, we already hold a write lock, so don't try to take out
+	* another one.
+	*/
+
+	found_msl->version++;
+
+	rte_fbarray_set_free(&found_msl->memseg_arr, seg_idx);
+
+	ret = free_seg(wa->ms, wa->hi, msl_idx, seg_idx);
+
+	if (ret < 0)
+		return -1;
+
+	return 1;
+}
+
+int
+eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz,
+		int socket, bool exact)
+{
+	int i, ret = -1;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	bool have_numa = false;
+	int oldpolicy;
+	struct bitmask *oldmask;
+#endif
+	struct alloc_walk_param wa;
+	struct hugepage_info *hi = NULL;
+
+	memset(&wa, 0, sizeof(wa));
+
+	/* dynamic allocation not supported in legacy mode */
+	if (internal_config.legacy_mem)
+		return -1;
+
+	for (i = 0; i < (int) RTE_DIM(internal_config.hugepage_info); i++) {
+		if (page_sz ==
+				internal_config.hugepage_info[i].hugepage_sz) {
+			hi = &internal_config.hugepage_info[i];
+			break;
+		}
+	}
+	if (!hi) {
+		RTE_LOG(ERR, EAL, "%s(): can't find relevant hugepage_info entry\n",
+			__func__);
+		return -1;
+	}
+
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	if (check_numa()) {
+		oldmask = numa_allocate_nodemask();
+		prepare_numa(&oldpolicy, oldmask, socket);
+		have_numa = true;
+	}
+#endif
+
+	wa.exact = exact;
+	wa.hi = hi;
+	wa.ms = ms;
+	wa.n_segs = n_segs;
+	wa.page_sz = page_sz;
+	wa.socket = socket;
+	wa.segs_allocated = 0;
+
+	/* memalloc is locked, so it's safe to use thread-unsafe version */
+	ret = rte_memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
+	if (ret == 0) {
+		RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n",
+			__func__);
+		ret = -1;
+	} else if (ret > 0) {
+		ret = (int)wa.segs_allocated;
+	}
+
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	if (have_numa)
+		restore_numa(&oldpolicy, oldmask);
+#endif
+	return ret;
+}
+
+struct rte_memseg *
+eal_memalloc_alloc_seg(size_t page_sz, int socket)
+{
+	struct rte_memseg *ms;
+	if (eal_memalloc_alloc_seg_bulk(&ms, 1, page_sz, socket, true) < 0)
+		return NULL;
+	/* return pointer to newly allocated memseg */
+	return ms;
+}
+
+int
+eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
+{
+	int seg, ret = 0;
+
+	/* dynamic free not supported in legacy mode */
+	if (internal_config.legacy_mem)
+		return -1;
+
+	for (seg = 0; seg < n_segs; seg++) {
+		struct rte_memseg *cur = ms[seg];
+		struct hugepage_info *hi = NULL;
+		struct free_walk_param wa;
+		int i, walk_res;
+
+		/* if this page is marked as unfreeable, fail */
+		if (cur->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
+			RTE_LOG(DEBUG, EAL, "Page is not allowed to be freed\n");
+			ret = -1;
+			continue;
+		}
+
+		memset(&wa, 0, sizeof(wa));
+
+
+
+		wa.ms = cur;
+		wa.hi = hi;
+
+		/* memalloc is locked, so it's safe to use thread-unsafe version
+		 */
+		walk_res = rte_memseg_list_walk_thread_unsafe(free_seg_walk,
+				&wa);
+		if (walk_res == 1)
+			continue;
+		if (walk_res == 0)
+			RTE_LOG(ERR, EAL, "Couldn't find memseg list\n");
+		ret = -1;
+	}
+	return ret;
+}
+
+int
+eal_memalloc_free_seg(struct rte_memseg *ms)
+{
+	/* dynamic free not supported in legacy mode */
+	if (internal_config.legacy_mem)
+		return -1;
+
+	return eal_memalloc_free_seg_bulk(&ms, 1);
+}
+
+static int
+sync_chunk(struct rte_memseg_list *primary_msl,
+		struct rte_memseg_list *local_msl, struct hugepage_info *hi,
+		unsigned int msl_idx, bool used, int start, int end)
+{
+	struct rte_fbarray *l_arr, *p_arr;
+	int i, ret, chunk_len, diff_len;
+
+	l_arr = &local_msl->memseg_arr;
+	p_arr = &primary_msl->memseg_arr;
+
+	/* we need to aggregate allocations/deallocations into bigger chunks,
+	 * as we don't want to spam the user with per-page callbacks.
+	 *
+	 * to avoid any potential issues, we also want to trigger
+	 * deallocation callbacks *before* we actually deallocate
+	 * memory, so that the user application could wrap up its use
+	 * before it goes away.
+	 */
+
+	chunk_len = end - start;
+
+	/* find how many contiguous pages we can map/unmap for this chunk */
+	diff_len = used ?
+			rte_fbarray_find_contig_free(l_arr, start) :
+			rte_fbarray_find_contig_used(l_arr, start);
+
+	/* has to be at least one page */
+	if (diff_len < 1)
+		return -1;
+
+	diff_len = RTE_MIN(chunk_len, diff_len);
+
+	/* if we are freeing memory, notify the application */
+	if (!used) {
+		struct rte_memseg *ms;
+		void *start_va;
+		size_t len, page_sz;
+
+		ms = rte_fbarray_get(l_arr, start);
+		start_va = ms->addr;
+		page_sz = (size_t)primary_msl->page_sz;
+		len = page_sz * diff_len;
+
+		eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
+				start_va, len);
+	}
+
+	for (i = 0; i < diff_len; i++) {
+		struct rte_memseg *p_ms, *l_ms;
+		int seg_idx = start + i;
+
+		l_ms = rte_fbarray_get(l_arr, seg_idx);
+		p_ms = rte_fbarray_get(p_arr, seg_idx);
+
+		if (l_ms == NULL || p_ms == NULL)
+			return -1;
+
+		if (used) {
+			ret = alloc_seg(l_ms, p_ms->addr,
+					p_ms->socket_id, hi,
+					msl_idx, seg_idx);
+			if (ret < 0)
+				return -1;
+			rte_fbarray_set_used(l_arr, seg_idx);
+		} else {
+			ret = free_seg(l_ms, hi, msl_idx, seg_idx);
+			rte_fbarray_set_free(l_arr, seg_idx);
+			if (ret < 0)
+				return -1;
+		}
+	}
+
+	/* if we just allocated memory, notify the application */
+	if (used) {
+		struct rte_memseg *ms;
+		void *start_va;
+		size_t len, page_sz;
+
+		ms = rte_fbarray_get(l_arr, start);
+		start_va = ms->addr;
+		page_sz = (size_t)primary_msl->page_sz;
+		len = page_sz * diff_len;
+
+		eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
+				start_va, len);
+	}
+
+	/* calculate how much we can advance until next chunk */
+	diff_len = used ?
+			rte_fbarray_find_contig_used(l_arr, start) :
+			rte_fbarray_find_contig_free(l_arr, start);
+	ret = RTE_MIN(chunk_len, diff_len);
+
+	return ret;
+}
+
+static int
+sync_status(struct rte_memseg_list *primary_msl,
+		struct rte_memseg_list *local_msl, struct hugepage_info *hi,
+		unsigned int msl_idx, bool used)
+{
+	struct rte_fbarray *l_arr, *p_arr;
+	int p_idx, l_chunk_len, p_chunk_len, ret;
+	int start, end;
+
+	/* this is a little bit tricky, but the basic idea is - walk both lists
+	 * and spot any places where there are discrepancies. walking both lists
+	 * and noting discrepancies in a single go is a hard problem, so we do
+	 * it in two passes - first we spot any places where allocated segments
+	 * mismatch (i.e. ensure that everything that's allocated in the primary
+	 * is also allocated in the secondary), and then we do it by looking at
+	 * free segments instead.
+	 *
+	 * we also need to aggregate changes into chunks, as we have to call
+	 * callbacks per allocation, not per page.
+	 */
+	l_arr = &local_msl->memseg_arr;
+	p_arr = &primary_msl->memseg_arr;
+
+	if (used)
+		p_idx = rte_fbarray_find_next_used(p_arr, 0);
+	else
+		p_idx = rte_fbarray_find_next_free(p_arr, 0);
+
+	while (p_idx >= 0) {
+		int next_chunk_search_idx;
+
+		if (used) {
+			p_chunk_len = rte_fbarray_find_contig_used(p_arr,
+					p_idx);
+			l_chunk_len = rte_fbarray_find_contig_used(l_arr,
+					p_idx);
+		} else {
+			p_chunk_len = rte_fbarray_find_contig_free(p_arr,
+					p_idx);
+			l_chunk_len = rte_fbarray_find_contig_free(l_arr,
+					p_idx);
+		}
+		/* best case scenario - no differences (or bigger, which will be
+		 * fixed during next iteration), look for next chunk
+		 */
+		if (l_chunk_len >= p_chunk_len) {
+			next_chunk_search_idx = p_idx + p_chunk_len;
+			goto next_chunk;
+		}
+
+		/* if both chunks start at the same point, skip parts we know
+		 * are identical, and sync the rest. each call to sync_chunk
+		 * will only sync contiguous segments, so we need to call this
+		 * until we are sure there are no more differences in this
+		 * chunk.
+		 */
+		start = p_idx + l_chunk_len;
+		end = p_idx + p_chunk_len;
+		do {
+			ret = sync_chunk(primary_msl, local_msl, hi, msl_idx,
+					used, start, end);
+			start += ret;
+		} while (start < end && ret >= 0);
+		/* if ret is negative, something went wrong */
+		if (ret < 0)
+			return -1;
+
+		next_chunk_search_idx = p_idx + p_chunk_len;
+next_chunk:
+		/* skip to end of this chunk */
+		if (used) {
+			p_idx = rte_fbarray_find_next_used(p_arr,
+					next_chunk_search_idx);
+		} else {
+			p_idx = rte_fbarray_find_next_free(p_arr,
+					next_chunk_search_idx);
+		}
+	}
+	return 0;
+}
+
+static int
+sync_existing(struct rte_memseg_list *primary_msl,
+		struct rte_memseg_list *local_msl, struct hugepage_info *hi,
+		unsigned int msl_idx)
+{
+	int ret, dir_fd;
+
+	/* TODO dpdk-1808
+	 * do not allow any page allocations during the time we're allocating,
+	 * because file creation and locking operations are not atomic,
+	 * and we might be the first or the last ones to use a particular page,
+	 * so we need to ensure atomicity of every operation.
+	 */
+
+
+	/* ensure all allocated space is the same in both lists */
+	ret = sync_status(primary_msl, local_msl, hi, msl_idx, true);
+	if (ret < 0)
+		goto fail;
+
+	/* ensure all unallocated space is the same in both lists */
+	ret = sync_status(primary_msl, local_msl, hi, msl_idx, false);
+	if (ret < 0)
+		goto fail;
+
+	/* update version number */
+	local_msl->version = primary_msl->version;
+
+	/* TODO dpdk-1808 Unlock and close the directory*/
+
+	return 0;
+fail:
+	/* TODO dpdk-1808 Unlock and close the directory*/
+	return -1;
+}
+
+static int
+sync_walk(const struct rte_memseg_list *msl, void *arg __rte_unused)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct rte_memseg_list *primary_msl, *local_msl;
+	struct hugepage_info *hi = NULL;
+	unsigned int i;
+	int msl_idx;
+
+	msl_idx = msl - mcfg->memsegs;
+	primary_msl = &mcfg->memsegs[msl_idx];
+	local_msl = &local_memsegs[msl_idx];
+
+	for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
+		uint64_t cur_sz =
+			internal_config.hugepage_info[i].hugepage_sz;
+		uint64_t msl_sz = primary_msl->page_sz;
+		if (msl_sz == cur_sz) {
+			hi = &internal_config.hugepage_info[i];
+			break;
+		}
+	}
+	if (!hi) {
+		RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
+		return -1;
+	}
+
+	/* if versions don't match, synchronize everything */
+	if (local_msl->version != primary_msl->version &&
+			sync_existing(primary_msl, local_msl, hi, msl_idx))
+		return -1;
+	return 0;
+}
+
+
+int
+eal_memalloc_sync_with_primary(void)
+{
+	/* nothing to be done in primary */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		return 0;
+
+	/* memalloc is locked, so it's safe to call thread-unsafe version */
+	if (rte_memseg_list_walk_thread_unsafe(sync_walk, NULL))
+		return -1;
+	return 0;
+}
+
+static int
+secondary_msl_create_walk(const struct rte_memseg_list *msl,
+		void *arg __rte_unused)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct rte_memseg_list *primary_msl, *local_msl;
+	char name[PATH_MAX];
+	int msl_idx, ret;
+
+	msl_idx = msl - mcfg->memsegs;
+	primary_msl = &mcfg->memsegs[msl_idx];
+	local_msl = &local_memsegs[msl_idx];
+
+	/* create distinct fbarrays for each secondary */
+	snprintf(name, RTE_FBARRAY_NAME_LEN, "%s_%i",
+		primary_msl->memseg_arr.name, _getpid());
+
+	ret = rte_fbarray_init(&local_msl->memseg_arr, name,
+		primary_msl->memseg_arr.len,
+		primary_msl->memseg_arr.elt_sz);
+	if (ret < 0) {
+		RTE_LOG(ERR, EAL, "Cannot initialize local memory map\n");
+		return -1;
+	}
+	local_msl->base_va = primary_msl->base_va;
+
+	return 0;
+}
+
+static int
+secondary_lock_list_create_walk(const struct rte_memseg_list *msl,
+		void *arg __rte_unused)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	unsigned int i, len;
+	int msl_idx;
+	int *data;
+
+	msl_idx = msl - mcfg->memsegs;
+	len = msl->memseg_arr.len;
+
+	/* ensure we have space to store lock fd per each possible segment */
+	data = malloc(sizeof(int) * len);
+	if (data == NULL) {
+		RTE_LOG(ERR, EAL, "Unable to allocate space for lock descriptors\n");
+		return -1;
+	}
+	/* set all fd's as invalid */
+	for (i = 0; i < len; i++)
+		data[i] = -1;
+
+	fd_list[msl_idx].fds = data;
+	fd_list[msl_idx].len = len;
+	fd_list[msl_idx].count = 0;
+	fd_list[msl_idx].memseg_list_fd = -1;
+
+	return 0;
+}
+
+int
+eal_memalloc_init(void)
+{
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		if (rte_memseg_list_walk(secondary_msl_create_walk, NULL) < 0)
+			return -1;
+
+	/* initialize all of the lock fd lists */
+	if (internal_config.single_file_segments)
+		if (rte_memseg_list_walk(secondary_lock_list_create_walk, NULL))
+			return -1;
+	return 0;
+}
+
+int
+eal_memalloc_get_seg_fd(int list_idx, int seg_idx)
+{
+	int fd;
+	if (internal_config.single_file_segments) {
+		fd = fd_list[list_idx].memseg_list_fd;
+	}
+	else if (fd_list[list_idx].len == 0) {
+		/* list not initialized */
+		fd = -1;
+	}
+	else {
+		fd = fd_list[list_idx].fds[seg_idx];
+	}
+	if (fd < 0)
+		return -ENODEV;
+	return fd;
+}
+
+int
+eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+
+	/* fd_list not initialized? */
+	if (fd_list[list_idx].len == 0)
+		return -ENODEV;
+	if (internal_config.single_file_segments) {
+		size_t pgsz = mcfg->memsegs[list_idx].page_sz;
+
+		/* segment not active? */
+		if (fd_list[list_idx].memseg_list_fd < 0)
+			return -ENOENT;
+		*offset = pgsz * seg_idx;
+	}
+	else {
+		/* segment not active? */
+		if (fd_list[list_idx].fds[seg_idx] < 0)
+			return -ENOENT;
+		*offset = 0;
+	}
+	return 0;
+}
\ No newline at end of file
diff --git a/lib/librte_eal/windows/eal/eal_memory.c b/lib/librte_eal/windows/eal/eal_memory.c
new file mode 100644
index 000000000..9077f5e9e
--- /dev/null
+++ b/lib/librte_eal/windows/eal/eal_memory.c
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#include <sys/mman.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <inttypes.h>
+#include <fcntl.h>
+
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_log.h>
+#include <rte_string_fns.h>
+#include <rte_fbarray.h>
+#include <rte_errno.h>
+#include "eal_private.h"
+#include "eal_internal_cfg.h"
+#include "eal_filesystem.h"
+
+#define EAL_PAGE_SIZE	    (getpagesize())
+
+/*
+ * Get physical address of any mapped virtual address in the current process.
+ */
+phys_addr_t
+rte_mem_virt2iova(const void *virtaddr)
+{
+	/* This function is only used by rte_mempool_virt2phy() when hugepages are disabled. */
+	/* Get pointer to global configuration and calculate physical address offset */
+	phys_addr_t physaddr;
+	struct rte_memseg *ms;
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	if (mcfg) {
+		ms = rte_fbarray_get(&mcfg->memsegs[0].memseg_arr, 0);
+		if(ms == NULL)
+			return RTE_BAD_PHYS_ADDR;
+		physaddr = (phys_addr_t)((uintptr_t)ms->phys_addr + RTE_PTR_DIFF(virtaddr, mcfg->memsegs[0].base_va));
+		return physaddr;
+	}
+	else
+		return RTE_BAD_PHYS_ADDR;
+}
+
+int
+rte_eal_hugepage_init(void)
+{
+	/* We have already initialized our memory whilst in the
+	 * rte_pci_scan() call. Simply return here.
+	*/
+	return 0;
+}
+
+int
+rte_eal_hugepage_attach(void)
+{
+	/* This function is called if our process is a secondary process
+	 * and we need to attach to existing memory that has already
+	 * been allocated.
+	 * It has not been implemented on Windows
+	 */
+	return 0;
+}
+
+static int
+alloc_va_space(struct rte_memseg_list *msl)
+{
+	uint64_t page_sz;
+	size_t mem_sz;
+	void *addr;
+	int flags = 0;
+
+#ifdef RTE_ARCH_PPC_64
+	flags |= MAP_HUGETLB;
+#endif
+
+	page_sz = msl->page_sz;
+	mem_sz = page_sz * msl->memseg_arr.len;
+
+	addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
+	if (addr == NULL) {
+		if (rte_errno == EADDRNOTAVAIL)
+			RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - please use '--base-virtaddr' option\n",
+			(unsigned long long)mem_sz, msl->base_va);
+		else
+			RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
+		return -1;
+	}
+	msl->base_va = addr;
+
+	return 0;
+}
+
+static int __rte_unused
+memseg_primary_init(void)
+{
+	/*
+	*  Primary memory has already been initialized in store_memseg_info()
+	*  Keeping the stub function for integration with common code.
+	*/
+
+	return 0;
+}
+
+static int
+memseg_secondary_init(void)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	int msl_idx = 0;
+	struct rte_memseg_list *msl;
+
+	for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
+
+		msl = &mcfg->memsegs[msl_idx];
+
+		/* skip empty memseg lists */
+		if (msl->memseg_arr.len == 0)
+			continue;
+
+		if (rte_fbarray_attach(&msl->memseg_arr)) {
+			RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n");
+			return -1;
+		}
+
+		/* preallocate VA space */
+		if (alloc_va_space(msl)) {
+			RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+
+int
+rte_eal_memseg_init(void)
+{
+	return rte_eal_process_type() == RTE_PROC_PRIMARY ? memseg_primary_init() : memseg_secondary_init();
+}
diff --git a/lib/librte_eal/windows/eal/eal_proc.c b/lib/librte_eal/windows/eal/eal_proc.c
new file mode 100644
index 000000000..773bef46d
--- /dev/null
+++ b/lib/librte_eal/windows/eal/eal_proc.c
@@ -0,0 +1,1003 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#include <winsock2.h>
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <Intsafe.h>
+#include <unistd.h>
+
+#include <rte_alarm.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_tailq.h>
+
+#include "eal_private.h"
+#include "eal_filesystem.h"
+#include "eal_internal_cfg.h"
+
+static SOCKET mp_fd = INVALID_SOCKET;
+static char mp_filter[PATH_MAX];   /* Filter for secondary process sockets */
+static char mp_dir_path[PATH_MAX]; /* The directory path for all mp sockets */
+static INIT_ONCE initOnce_Lock = INIT_ONCE_STATIC_INIT;
+static INIT_ONCE initOnce_mp_mutex_action = INIT_ONCE_STATIC_INIT;
+static pthread_mutex_t mp_mutex_action;
+
+struct action_entry {
+	TAILQ_ENTRY(action_entry) next;
+	char action_name[RTE_MP_MAX_NAME_LEN];
+	rte_mp_t action;
+};
+
+/** Double linked list of actions. */
+TAILQ_HEAD(action_entry_list, action_entry);
+
+static struct action_entry_list action_entry_list =
+	TAILQ_HEAD_INITIALIZER(action_entry_list);
+
+enum mp_type {
+	MP_MSG, /* Share message with peers, will not block */
+	MP_REQ, /* Request for information, Will block for a reply */
+	MP_REP, /* Response to previously-received request */
+	MP_IGN, /* Response telling requester to ignore this response */
+};
+
+struct mp_msg_internal {
+	int type;
+	struct rte_mp_msg msg;
+};
+
+struct async_request_param {
+	rte_mp_async_reply_t clb;
+	struct rte_mp_reply user_reply;
+	struct timespec end;
+	int n_responses_processed;
+};
+
+struct pending_request {
+	TAILQ_ENTRY(pending_request) next;
+	enum {
+		REQUEST_TYPE_SYNC,
+		REQUEST_TYPE_ASYNC
+	} type;
+	char dst[PATH_MAX];
+	struct rte_mp_msg *request;
+	struct rte_mp_msg *reply;
+	int reply_received;
+	RTE_STD_C11
+	union {
+		struct {
+			struct async_request_param *param;
+		} async;
+		struct {
+			CONDITION_VARIABLE cond;
+		} sync;
+	};
+};
+
+TAILQ_HEAD(pending_request_list, pending_request);
+
+static struct {
+	struct pending_request_list requests;
+	HANDLE  lock;
+} pending_requests = {
+	.requests = TAILQ_HEAD_INITIALIZER(pending_requests.requests)/*,
+	.lock = NULL*/
+	/**< used in async requests only */
+};
+
+/* forward declarations */
+static int
+mp_send(struct rte_mp_msg *msg, const char *peer, int type);
+
+/* for use with alarm callback */
+static void
+async_reply_handle(void *arg);
+
+/* for use with process_msg */
+static struct pending_request *
+async_reply_handle_thread_unsafe(void *arg);
+
+static void
+trigger_async_action(struct pending_request *req);
+
+static struct pending_request *
+find_pending_request(const char *dst, const char *act_name)
+{
+	struct pending_request *r;
+
+	TAILQ_FOREACH(r, &pending_requests.requests, next) {
+		if (!strcmp(r->dst, dst) &&
+		    !strcmp(r->request->name, act_name))
+			break;
+	}
+
+	return r;
+}
+
+static void
+create_socket_path(const char *name, char *buf, int len)
+{
+	const char *prefix = eal_mp_socket_path();
+
+	if (strlen(name) > 0)
+		snprintf(buf, len, "%s_%s", prefix, name);
+	else
+		strlcpy(buf, prefix, len);
+}
+
+int
+rte_eal_primary_proc_alive(const char *config_file_path)
+{
+	int config_fd;
+
+	if (config_file_path)
+		config_fd = _open(config_file_path, O_RDONLY);
+	else {
+		const char *path;
+
+		path = eal_runtime_config_path();
+		config_fd = open(path, O_RDONLY);
+	}
+	if (config_fd < 0)
+		return 0;
+
+	int ret = 0;
+	/*lockf(config_fd, F_TEST, 0);*/
+	_close(config_fd);
+
+	return !!ret;
+}
+
+static struct action_entry *
+find_action_entry_by_name(const char *name)
+{
+	struct action_entry *entry;
+
+	TAILQ_FOREACH(entry, &action_entry_list, next) {
+		if (strncmp(entry->action_name, name, RTE_MP_MAX_NAME_LEN) == 0)
+			break;
+	}
+
+	return entry;
+}
+
+static int
+validate_action_name(const char *name)
+{
+	if (name == NULL) {
+		RTE_LOG(ERR, EAL, "Action name cannot be NULL\n");
+		rte_errno = EINVAL;
+		return -1;
+	}
+	if (strnlen(name, RTE_MP_MAX_NAME_LEN) == 0) {
+		RTE_LOG(ERR, EAL, "Length of action name is zero\n");
+		rte_errno = EINVAL;
+		return -1;
+	}
+	if (strnlen(name, RTE_MP_MAX_NAME_LEN) == RTE_MP_MAX_NAME_LEN) {
+		rte_errno = E2BIG;
+		return -1;
+	}
+	return 0;
+}
+
+int __rte_experimental
+rte_mp_action_register(const char *name, rte_mp_t action)
+{
+	struct action_entry *entry;
+
+	if (validate_action_name(name))
+		return -1;
+
+	entry = malloc(sizeof(struct action_entry));
+	if (entry == NULL) {
+		rte_errno = ENOMEM;
+		return -1;
+	}
+	strlcpy(entry->action_name, name, sizeof(entry->action_name));
+	entry->action = action;
+
+	pthread_mutex_lock(mp_mutex_action);
+	if (find_action_entry_by_name(name) != NULL) {
+		pthread_mutex_unlock(mp_mutex_action);
+		rte_errno = EEXIST;
+		free(entry);
+		return -1;
+	}
+	TAILQ_INSERT_TAIL(&action_entry_list, entry, next);
+	pthread_mutex_unlock(mp_mutex_action);
+	return 0;
+}
+
+void __rte_experimental
+rte_mp_action_unregister(const char *name)
+{
+	struct action_entry *entry;
+
+	if (validate_action_name(name))
+		return;
+
+	pthread_mutex_lock(mp_mutex_action);
+	entry = find_action_entry_by_name(name);
+	if (entry == NULL) {
+		pthread_mutex_unlock(mp_mutex_action);
+		return;
+	}
+	TAILQ_REMOVE(&action_entry_list, entry, next);
+	pthread_mutex_unlock(mp_mutex_action);
+	free(entry);
+}
+
+static int
+read_msg(struct mp_msg_internal *m, struct sockaddr *s)
+{
+	/* Multiple process workflow is not supported in windows implemention*/
+	return 0;
+}
+
+static void
+process_msg(struct mp_msg_internal *m, struct sockaddr_in *s)
+{
+	struct pending_request *pending_req;
+	struct action_entry *entry;
+	struct rte_mp_msg *msg = &m->msg;
+	rte_mp_t action = NULL;
+
+	RTE_LOG(DEBUG, EAL, "msg: %s\n", msg->name);
+
+	if (m->type == MP_REP || m->type == MP_IGN) {
+		struct pending_request *req = NULL;
+
+		pthread_mutex_lock(pending_requests.lock);
+		pending_req = find_pending_request(s->sun_path, msg->name);
+		if (pending_req) {
+			memcpy(pending_req->reply, msg, sizeof(*msg));
+			/* -1 indicates that we've been asked to ignore */
+			pending_req->reply_received =
+				m->type == MP_REP ? 1 : -1;
+
+			if (pending_req->type == REQUEST_TYPE_SYNC)
+				pthread_cond_signal(&pending_req->sync.cond);
+			else if (pending_req->type == REQUEST_TYPE_ASYNC)
+				req = async_reply_handle_thread_unsafe(
+						pending_req);
+		} else
+			RTE_LOG(ERR, EAL, "Drop mp reply: %s\n", msg->name);
+		pthread_mutex_unlock(pending_requests.lock);
+
+		if (req != NULL)
+			trigger_async_action(req);
+		return;
+	}
+
+	pthread_mutex_lock(mp_mutex_action);
+	entry = find_action_entry_by_name(msg->name);
+	if (entry != NULL)
+		action = entry->action;
+	pthread_mutex_unlock(mp_mutex_action);
+
+	if (!action) {
+		if (m->type == MP_REQ && !internal_config.init_complete) {
+			/* if this is a request, and init is not yet complete,
+			 * and callback wasn't registered, we should tell the
+			 * requester to ignore our existence because we're not
+			 * yet ready to process this request.
+			 */
+			struct rte_mp_msg dummy;
+
+			memset(&dummy, 0, sizeof(dummy));
+			strlcpy(dummy.name, msg->name, sizeof(dummy.name));
+			/*mp_send(&dummy, s->sun_path, MP_IGN);*/
+		} else {
+			RTE_LOG(ERR, EAL, "Cannot find action: %s\n",
+				msg->name);
+		}
+	} /*else if (action(msg, s->sun_path) < 0) {
+		RTE_LOG(ERR, EAL, "Fail to handle message: %s\n", msg->name);
+	}*/
+}
+
+static void *
+mp_handle(void *arg __rte_unused)
+{
+	struct mp_msg_internal msg;
+	struct sockaddr sa;
+
+	while (1) {
+		if (read_msg(&msg, &sa) == 0)
+			process_msg(&msg, &sa);
+	}
+
+	return NULL;
+}
+
+static int
+timespec_cmp(const struct timespec *a, const struct timespec *b)
+{
+	if (a->tv_sec < b->tv_sec)
+		return -1;
+	if (a->tv_sec > b->tv_sec)
+		return 1;
+	if (a->tv_nsec < b->tv_nsec)
+		return -1;
+	if (a->tv_nsec > b->tv_nsec)
+		return 1;
+	return 0;
+}
+
+enum async_action {
+	ACTION_FREE, /**< free the action entry, but don't trigger callback */
+	ACTION_TRIGGER /**< trigger callback, then free action entry */
+};
+
+static enum async_action
+process_async_request(struct pending_request *sr, const struct timespec *now)
+{
+	struct async_request_param *param;
+	struct rte_mp_reply *reply;
+	bool timeout, last_msg;
+
+	param = sr->async.param;
+	reply = &param->user_reply;
+
+	/* did we timeout? */
+	timeout = timespec_cmp(&param->end, now) <= 0;
+
+	/* if we received a response, adjust relevant data and copy mesasge. */
+	if (sr->reply_received == 1 && sr->reply) {
+		struct rte_mp_msg *msg, *user_msgs, *tmp;
+
+		msg = sr->reply;
+		user_msgs = reply->msgs;
+
+		tmp = realloc(user_msgs, sizeof(*msg) *
+				(reply->nb_received + 1));
+		if (!tmp) {
+			RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
+				sr->dst, sr->request->name);
+			/* this entry is going to be removed and its message
+			 * dropped, but we don't want to leak memory, so
+			 * continue.
+			 */
+		} else {
+			user_msgs = tmp;
+			reply->msgs = user_msgs;
+			memcpy(&user_msgs[reply->nb_received],
+					msg, sizeof(*msg));
+			reply->nb_received++;
+		}
+
+		/* mark this request as processed */
+		param->n_responses_processed++;
+	} else if (sr->reply_received == -1) {
+		/* we were asked to ignore this process */
+		reply->nb_sent--;
+	} else if (timeout) {
+		/* count it as processed response, but don't increment
+		 * nb_received.
+		 */
+		param->n_responses_processed++;
+	}
+
+	free(sr->reply);
+
+	last_msg = param->n_responses_processed == reply->nb_sent;
+
+	return last_msg ? ACTION_TRIGGER : ACTION_FREE;
+}
+
+static void
+trigger_async_action(struct pending_request *sr)
+{
+	struct async_request_param *param;
+	struct rte_mp_reply *reply;
+
+	param = sr->async.param;
+	reply = &param->user_reply;
+
+	param->clb(sr->request, reply);
+
+	/* clean up */
+	free(sr->async.param->user_reply.msgs);
+	free(sr->async.param);
+	free(sr->request);
+	free(sr);
+}
+
+static struct pending_request *
+async_reply_handle_thread_unsafe(void *arg)
+{
+	struct pending_request *req = (struct pending_request *)arg;
+	enum async_action action;
+	struct timespec ts_now;
+
+	ts_now.tv_nsec = 0;
+	ts_now.tv_sec = 0;
+
+	action = process_async_request(req, &ts_now);
+
+	TAILQ_REMOVE(&pending_requests.requests, req, next);
+
+	if (rte_eal_alarm_cancel(async_reply_handle, req) < 0) {
+		/* if we failed to cancel the alarm because it's already in
+		 * progress, don't proceed because otherwise we will end up
+		 * handling the same message twice.
+		 */
+		if (rte_errno == EINPROGRESS) {
+			RTE_LOG(DEBUG, EAL, "Request handling is already in progress\n");
+			goto no_trigger;
+		}
+		RTE_LOG(ERR, EAL, "Failed to cancel alarm\n");
+	}
+
+	if (action == ACTION_TRIGGER)
+		return req;
+no_trigger:
+	free(req);
+	return NULL;
+}
+
+static void
+async_reply_handle(void *arg)
+{
+	struct pending_request *req;
+
+	pending_requests.lock = WinCreateAndLockStaticMutex(pending_requests.lock, &initOnce_Lock);
+	req = async_reply_handle_thread_unsafe(arg);
+	ReleaseMutex(pending_requests.lock);
+
+	if (req != NULL)
+		trigger_async_action(req);
+}
+
+static int
+open_socket_fd(void)
+{
+	char peer_name[PATH_MAX] = {0};
+	struct sockaddr_in un;
+
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		snprintf(peer_name, sizeof(peer_name),
+				"%d_%"PRIx64, _getpid(), rte_rdtsc());
+
+	mp_fd = socket(AF_UNIX, SOCK_DGRAM, 0);
+	if (mp_fd < 0) {
+		RTE_LOG(ERR, EAL, "failed to create unix socket\n");
+		return -1;
+	}
+
+	memset(&un, 0, sizeof(un));
+	un.sin_family = AF_UNIX;
+
+	create_socket_path(peer_name, un.sin_path, sizeof(un.sin_path));
+
+	unlink(un.sun_path); /* May still exist since last run */
+
+	if (bind(mp_fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
+		RTE_LOG(ERR, EAL, "failed to bind %s: %s\n",
+			un.sun_path, strerror(errno));
+		close(mp_fd);
+		return -1;
+	}
+
+	RTE_LOG(INFO, EAL, "Multi-process socket %u\n", un.sin_port);
+	return mp_fd;
+}
+
+static int
+unlink_sockets(const char *filter)
+{
+	int dir_fd;
+	DIR *mp_dir;
+	struct dirent *ent;
+
+	mp_dir = opendir(mp_dir_path);
+	if (!mp_dir) {
+		RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
+		return -1;
+	}
+	dir_fd = dirfd(mp_dir);
+
+	while ((ent = readdir(mp_dir))) {
+		if (fnmatch(filter, ent->d_name, 0) == 0)
+			unlinkat(dir_fd, ent->d_name, 0);
+	}
+
+	closedir(mp_dir);
+	return 0;
+}
+
+int
+rte_mp_channel_init(void)
+{
+	char path[PATH_MAX];
+	int dir_fd = -1;
+	pthread_t mp_handle_tid;
+
+	/* in no shared files mode, we do not have secondary processes support,
+	 * so no need to initialize IPC.
+	 */
+	if (internal_config.no_shconf) {
+		RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC will be disabled\n");
+		return 0;
+	}
+
+	/* create filter path */
+	create_socket_path("*", path, sizeof(path));
+	strlcpy(mp_filter, basename(path), sizeof(mp_filter));
+
+	/* path may have been modified, so recreate it */
+	create_socket_path("*", path, sizeof(path));
+	strlcpy(mp_dir_path, dirname(path), sizeof(mp_dir_path));
+
+	/* TODO dpdk-1808 open mp_dir_path in O_RDONLY the directory */
+
+	/* TODO dpdk-1808 lock the mp_dir_path directory with exculsive lock */
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
+			unlink_sockets(mp_filter)) {
+		RTE_LOG(ERR, EAL, "failed to unlink mp sockets\n");
+		close(dir_fd);
+		return -1;
+	}
+
+	if (open_socket_fd() < 0) {
+		close(dir_fd);
+		return -1;
+	}
+
+	if (rte_ctrl_thread_create(&mp_handle_tid, "rte_mp_handle",
+			NULL, mp_handle, NULL) < 0) {
+		RTE_LOG(ERR, EAL, "failed to create mp thead: %s\n",
+			strerror(errno));
+		close(mp_fd);
+		close(dir_fd);
+		mp_fd = -1;
+		return -1;
+	}
+
+	/* TODO dpdk-1808 unlock and close the mp_dir_path directory */
+
+	return 0;
+}
+
+/**
+ * Return -1, as fail to send message and it's caused by the local side.
+ * Return 0, as fail to send message and it's caused by the remote side.
+ * Return 1, as succeed to send message.
+ *
+ */
+static int
+send_msg(const char *dst_path, struct rte_mp_msg *msg, int type)
+{
+	/* Multiple process workflow is not supported in windows implemention*/
+
+	return 1;
+}
+
+static int
+mp_send(struct rte_mp_msg *msg, const char *peer, int type)
+{
+	int dir_fd, ret = 0;
+	DIR *mp_dir;
+	struct dirent *ent;
+
+	if (!peer && (rte_eal_process_type() == RTE_PROC_SECONDARY))
+		peer = eal_mp_socket_path();
+
+	if (peer) {
+		if (send_msg(peer, msg, type) < 0)
+			return -1;
+		else
+			return 0;
+	}
+
+	/* broadcast to all secondary processes */
+	mp_dir = opendir(mp_dir_path);
+	if (!mp_dir) {
+		RTE_LOG(ERR, EAL, "Unable to open directory %s\n",
+				mp_dir_path);
+		rte_errno = errno;
+		return -1;
+	}
+
+	/* TODO dpdk-1808 lock the directory to prevent processes spinning up while we send */
+
+
+	while ((ent = readdir(mp_dir))) {
+		char path[PATH_MAX];
+
+		/*if (fnmatch(mp_filter, ent->d_name, 0) != 0)
+			continue;*/
+
+		snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
+			 ent->d_name);
+		if (send_msg(path, msg, type) < 0)
+			ret = -1;
+	}
+	/* TODO dpdk-1808 unlock the dir */
+
+	/* dir_fd automatically closed on closedir */
+	closedir(mp_dir);
+	return ret;
+}
+
+static bool
+check_input(const struct rte_mp_msg *msg)
+{
+	if (msg == NULL) {
+		RTE_LOG(ERR, EAL, "Msg cannot be NULL\n");
+		rte_errno = EINVAL;
+		return false;
+	}
+
+	if (validate_action_name(msg->name))
+		return false;
+
+	if (msg->len_param > RTE_MP_MAX_PARAM_LEN) {
+		RTE_LOG(ERR, EAL, "Message data is too long\n");
+		rte_errno = E2BIG;
+		return false;
+	}
+
+	if (msg->num_fds > RTE_MP_MAX_FD_NUM) {
+		RTE_LOG(ERR, EAL, "Cannot send more than %d FDs\n",
+			RTE_MP_MAX_FD_NUM);
+		rte_errno = E2BIG;
+		return false;
+	}
+
+	return true;
+}
+
+int __rte_experimental
+rte_mp_sendmsg(struct rte_mp_msg *msg)
+{
+	if (!check_input(msg))
+		return -1;
+
+	RTE_LOG(DEBUG, EAL, "sendmsg: %s\n", msg->name);
+	return mp_send(msg, NULL, MP_MSG);
+}
+
+static int
+mp_request_async(const char *dst, struct rte_mp_msg *req,
+		struct async_request_param *param, const struct timespec *ts)
+{
+	struct rte_mp_msg *reply_msg;
+	struct pending_request *pending_req, *exist;
+	int ret;
+
+	pending_req = calloc(1, sizeof(*pending_req));
+	reply_msg = calloc(1, sizeof(*reply_msg));
+	if (pending_req == NULL || reply_msg == NULL) {
+		RTE_LOG(ERR, EAL, "Could not allocate space for sync request\n");
+		rte_errno = ENOMEM;
+		ret = -1;
+		goto fail;
+	}
+
+	pending_req->type = REQUEST_TYPE_ASYNC;
+	strlcpy(pending_req->dst, dst, sizeof(pending_req->dst));
+	pending_req->request = req;
+	pending_req->reply = reply_msg;
+	pending_req->async.param = param;
+
+	/* queue already locked by caller */
+
+	exist = find_pending_request(dst, req->name);
+	if (exist) {
+		RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
+		rte_errno = EEXIST;
+		ret = -1;
+		goto fail;
+	}
+
+	ret = send_msg(dst, req, MP_REQ);
+	if (ret < 0) {
+		RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
+			dst, req->name);
+		ret = -1;
+		goto fail;
+	} else if (ret == 0) {
+		ret = 0;
+		goto fail;
+	}
+	TAILQ_INSERT_TAIL(&pending_requests.requests, pending_req, next);
+
+	param->user_reply.nb_sent++;
+
+	if (rte_eal_alarm_set(ts->tv_sec * 1000000 + ts->tv_nsec / 1000,
+			      async_reply_handle, pending_req) < 0) {
+		RTE_LOG(ERR, EAL, "Fail to set alarm for request %s:%s\n",
+			dst, req->name);
+		rte_panic("Fix the above shit to properly free all memory\n");
+	}
+
+	return 0;
+fail:
+	free(pending_req);
+	free(reply_msg);
+	return ret;
+}
+
+static int
+mp_request_sync(const char *dst, struct rte_mp_msg *req,
+	       struct rte_mp_reply *reply, const struct timespec *ts)
+{
+	int ret;
+	struct rte_mp_msg msg, *tmp;
+	struct pending_request pending_req, *exist;
+
+	pending_req.type = REQUEST_TYPE_SYNC;
+	pending_req.reply_received = 0;
+	strlcpy(pending_req.dst, dst, sizeof(pending_req.dst));
+	pending_req.request = req;
+	pending_req.reply = &msg;
+	pthread_cond_init(&pending_req.sync.cond, NULL);
+
+	exist = find_pending_request(dst, req->name);
+	if (exist) {
+		RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
+		rte_errno = EEXIST;
+		return -1;
+	}
+
+	ret = send_msg(dst, req, MP_REQ);
+	if (ret < 0) {
+		RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
+			dst, req->name);
+		return -1;
+	} else if (ret == 0)
+		return 0;
+
+	TAILQ_INSERT_TAIL(&pending_requests.requests, &pending_req, next);
+
+	reply->nb_sent++;
+
+	do {
+		ret = pthread_cond_timedwait(&pending_req.sync.cond,
+				&pending_requests.lock, ts);
+	} while (ret != 0 && ret != ETIMEDOUT);
+
+	TAILQ_REMOVE(&pending_requests.requests, &pending_req, next);
+
+	if (pending_req.reply_received == 0) {
+		RTE_LOG(ERR, EAL, "Fail to recv reply for request %s:%s\n",
+			dst, req->name);
+		rte_errno = ETIMEDOUT;
+		return -1;
+	}
+	if (pending_req.reply_received == -1) {
+		RTE_LOG(DEBUG, EAL, "Asked to ignore response\n");
+		/* not receiving this message is not an error, so decrement
+		 * number of sent messages
+		 */
+		reply->nb_sent--;
+		return 0;
+	}
+
+	tmp = realloc(reply->msgs, sizeof(msg) * (reply->nb_received + 1));
+	if (!tmp) {
+		RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
+			dst, req->name);
+		rte_errno = ENOMEM;
+		return -1;
+	}
+	memcpy(&tmp[reply->nb_received], &msg, sizeof(msg));
+	reply->msgs = tmp;
+	reply->nb_received++;
+	return 0;
+}
+
+int __rte_experimental
+rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
+		const struct timespec *ts)
+{
+	int dir_fd, ret = 0;
+	DIR *mp_dir;
+	struct dirent *ent;
+	struct timeval now;
+	struct timespec end;
+
+	RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
+
+	if (check_input(req) == false)
+		return -1;
+
+	if (internal_config.no_shconf) {
+		RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
+		return 0;
+	}
+
+
+	end.tv_nsec =ts->tv_nsec % 1000000000;
+	end.tv_sec = ts->tv_sec +
+			(ts->tv_nsec / 1000000000);
+
+	reply->nb_sent = 0;
+	reply->nb_received = 0;
+	reply->msgs = NULL;
+
+	/* for secondary process, send request to the primary process only */
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		pthread_mutex_lock(&pending_requests.lock);
+		ret = mp_request_sync(eal_mp_socket_path(), req, reply, &end);
+		pthread_mutex_unlock(pending_requests.lock);
+		return ret;
+	}
+
+
+	return ret;
+}
+
+int __rte_experimental
+rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
+		rte_mp_async_reply_t clb)
+{
+	struct rte_mp_msg *copy;
+	struct pending_request *dummy;
+	struct async_request_param *param;
+	struct rte_mp_reply *reply;
+	int dir_fd, ret = 0;
+	DIR *mp_dir;
+	struct dirent *ent;
+	/*struct timeval now;*/
+	struct timespec *end;
+	bool dummy_used = false;
+
+	RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
+
+	if (check_input(req) == false)
+		return -1;
+
+	if (internal_config.no_shconf) {
+		RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
+		return 0;
+	}
+
+	copy = calloc(1, sizeof(*copy));
+	dummy = calloc(1, sizeof(*dummy));
+	param = calloc(1, sizeof(*param));
+	if (copy == NULL || dummy == NULL || param == NULL) {
+		RTE_LOG(ERR, EAL, "Failed to allocate memory for async reply\n");
+		rte_errno = ENOMEM;
+		goto fail;
+	}
+
+	/* copy message */
+	memcpy(copy, req, sizeof(*copy));
+
+	param->n_responses_processed = 0;
+	param->clb = clb;
+	/*end = &param->end;*/
+	reply = &param->user_reply;
+
+	reply->nb_sent = 0;
+	reply->nb_received = 0;
+	reply->msgs = NULL;
+
+	/* we have to lock the request queue here, as we will be adding a bunch
+	 * of requests to the queue at once, and some of the replies may arrive
+	 * before we add all of the requests to the queue.
+	 */
+	pending_requests.lock = WinCreateAndLockStaticMutex(pending_requests.lock,&initOnce_Lock);
+
+	/* we have to ensure that callback gets triggered even if we don't send
+	 * anything, therefore earlier we have allocated a dummy request. fill
+	 * it, and put it on the queue if we don't send any requests.
+	 */
+	dummy->type = REQUEST_TYPE_ASYNC;
+	dummy->request = copy;
+	dummy->reply = NULL;
+	dummy->async.param = param;
+	dummy->reply_received = 1; /* short-circuit the timeout */
+
+	/* for secondary process, send request to the primary process only */
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		ret = mp_request_async(eal_mp_socket_path(), copy, param, ts);
+
+		/* if we didn't send anything, put dummy request on the queue */
+		if (ret == 0 && reply->nb_sent == 0) {
+			TAILQ_INSERT_TAIL(&pending_requests.requests, dummy,
+					next);
+			dummy_used = true;
+		}
+
+		ReleaseMutex(pending_requests.lock);
+
+		/* if we couldn't send anything, clean up */
+		if (ret != 0)
+			goto fail;
+		return 0;
+	}
+
+	/* for primary process, broadcast request */
+	mp_dir = opendir(mp_dir_path);
+	if (!mp_dir) {
+		RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
+		rte_errno = errno;
+		goto unlock_fail;
+	}
+	dir_fd = dirfd(mp_dir);
+	/* TODO dpdk-1808 blocking writelock */
+
+	/* TODO dpdk-1808 lock the directory to prevent processes spinning up while we send */
+
+	while ((ent = readdir(mp_dir))) {
+		char path[PATH_MAX];
+
+		if (fnmatch(mp_filter, ent->d_name, 0) != 0)
+			continue;
+
+		snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
+			 ent->d_name);
+
+		if (mp_request_async(path, copy, param, ts))
+			ret = -1;
+	}
+	/* if we didn't send anything, put dummy request on the queue */
+	if (ret == 0 && reply->nb_sent == 0) {
+		TAILQ_INSERT_HEAD(&pending_requests.requests, dummy, next);
+		dummy_used = true;
+	}
+
+	/* finally, unlock the queue */
+	ReleaseMutex(pending_requests.lock);
+
+	/* TODO dpdk-1808 unlock the directory */
+
+	/* dir_fd automatically closed on closedir */
+	closedir(mp_dir);
+
+	/* if dummy was unused, free it */
+	if (!dummy_used)
+		free(dummy);
+
+	return ret;
+closedir_fail:
+	closedir(mp_dir);
+unlock_fail:
+	ReleaseMutex(pending_requests.lock);
+fail:
+	free(dummy);
+	free(param);
+	free(copy);
+	return -1;
+}
+
+int __rte_experimental
+rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
+{
+	RTE_LOG(DEBUG, EAL, "reply: %s\n", msg->name);
+
+	if (check_input(msg) == false)
+		return -1;
+
+	if (peer == NULL) {
+		RTE_LOG(ERR, EAL, "peer is not specified\n");
+		rte_errno = EINVAL;
+		return -1;
+	}
+
+	if (internal_config.no_shconf) {
+		RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
+		return 0;
+	}
+
+	return mp_send(msg, peer, MP_REP);
+}
diff --git a/lib/librte_eal/windows/eal/eal_thread.c b/lib/librte_eal/windows/eal/eal_thread.c
new file mode 100644
index 000000000..1a82eb4a2
--- /dev/null
+++ b/lib/librte_eal/windows/eal/eal_thread.c
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#include <unistd.h>
+
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_per_lcore.h>
+
+#include "eal_private.h"
+#include "eal_thread.h"
+
+RTE_DEFINE_PER_LCORE(unsigned, _lcore_id) = LCORE_ID_ANY;
+RTE_DEFINE_PER_LCORE(unsigned, _socket_id) = (unsigned)SOCKET_ID_ANY;
+RTE_DEFINE_PER_LCORE(rte_cpuset_t, _cpuset);
+
+/*
+ * Send a message to a slave lcore identified by slave_id to call a
+ * function f with argument arg. Once the execution is done, the
+ * remote lcore switch in FINISHED state.
+ */
+int
+rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned slave_id)
+{
+	int n;
+	char c = 0;
+	int m2s = lcore_config[slave_id].pipe_master2slave[1];
+	int s2m = lcore_config[slave_id].pipe_slave2master[0];
+
+	if (lcore_config[slave_id].state != WAIT)
+		return -EBUSY;
+
+	lcore_config[slave_id].f = f;
+	lcore_config[slave_id].arg = arg;
+
+	/* send message */
+	n = 0;
+	while (n == 0 || (n < 0 && errno == EINTR))
+		n = write(m2s, &c, 1);
+	if (n < 0)
+		rte_panic("cannot write on configuration pipe\n");
+
+	/* wait ack */
+	do {
+		n = read(s2m, &c, 1);
+	} while (n < 0 && errno == EINTR);
+
+	if (n <= 0)
+		rte_panic("cannot read on configuration pipe\n");
+
+	return 0;
+}
+
+/* set affinity for current EAL thread */
+static int
+eal_thread_set_affinity(void)
+{
+	unsigned lcore_id = rte_lcore_id();
+
+	/* acquire system unique id  */
+	rte_gettid();
+
+	/* update EAL thread core affinity */
+	return rte_thread_set_affinity(&lcore_config[lcore_id].cpuset);
+}
+
+void eal_thread_init_master(unsigned lcore_id)
+{
+	/* set the lcore ID in per-lcore memory area */
+	RTE_PER_LCORE(_lcore_id) = lcore_id;
+
+	/* set CPU affinity */
+	if (eal_thread_set_affinity() < 0)
+		rte_panic("cannot set affinity\n");
+}
+
+/* main loop of threads */
+void *
+eal_thread_loop(void *arg)
+{
+	char c;
+	int n, ret;
+	unsigned lcore_id;
+	pthread_t thread_id;
+	int m2s, s2m;
+	char cpuset[RTE_CPU_AFFINITY_STR_LEN];
+
+	memset((void *)cpuset, 0, sizeof(cpuset));
+
+	thread_id = pthread_self();
+
+	/* retrieve our lcore_id from the configuration structure */
+	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+		if (thread_id == lcore_config[lcore_id].thread_id)
+			break;
+	}
+	if (lcore_id == RTE_MAX_LCORE)
+		rte_panic("cannot retrieve lcore id\n");
+
+	m2s = lcore_config[lcore_id].pipe_master2slave[0];
+	s2m = lcore_config[lcore_id].pipe_slave2master[1];
+
+	/* set the lcore ID in per-lcore memory area */
+	RTE_PER_LCORE(_lcore_id) = lcore_id;
+
+	/* set CPU affinity */
+	if (eal_thread_set_affinity() < 0)
+		rte_panic("cannot set affinity\n");
+
+	ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
+
+	RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%x;cpuset=[%s%s])\n",
+		lcore_id, (int)thread_id, cpuset, ret == 0 ? "" : "...");
+
+	/* read on our pipe to get commands */
+	while (1) {
+		void *fct_arg;
+
+		/* wait command */
+		do {
+			n = read(m2s, &c, 1);
+		} while (n < 0 && errno == EINTR);
+
+		if (n <= 0)
+			rte_panic("cannot read on configuration pipe\n");
+
+		lcore_config[lcore_id].state = RUNNING;
+
+		/* send ack */
+		n = 0;
+		while (n == 0 || (n < 0 && errno == EINTR))
+			n = write(s2m, &c, 1);
+		if (n < 0)
+			rte_panic("cannot write on configuration pipe\n");
+
+		if (lcore_config[lcore_id].f == NULL)
+			rte_panic("NULL function pointer\n");
+
+		/* call the function and store the return value */
+		fct_arg = lcore_config[lcore_id].arg;
+		ret = lcore_config[lcore_id].f(fct_arg);
+		lcore_config[lcore_id].ret = ret;
+		rte_wmb();
+		lcore_config[lcore_id].state = FINISHED;
+	}
+
+	/* never reached */
+	/* pthread_exit(NULL); */
+	/* return NULL; */
+}
+
+/* require calling thread tid by gettid() */
+int rte_sys_gettid(void)
+{
+	return 0; // (int)syscall(SYS_gettid);
+}
+
+int rte_thread_setname(pthread_t id, const char *name)
+{
+	int ret = 0;
+	RTE_SET_USED(id);
+	RTE_SET_USED(name);
+	return -ret;
+}
\ No newline at end of file
diff --git a/lib/librte_eal/windows/eal/eal_timer.c b/lib/librte_eal/windows/eal/eal_timer.c
new file mode 100644
index 000000000..3bf57b053
--- /dev/null
+++ b/lib/librte_eal/windows/eal/eal_timer.c
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#include <rte_windows.h>
+
+#include <rte_cycles.h>
+
+#include "eal_private.h"
+
+enum timer_source eal_timer_source;
+
+uint64_t
+get_tsc_freq_arch(void)
+{
+	/* This function is not supported on Windows */
+	return 0;
+}
+
+uint64_t
+get_tsc_freq(void)
+{
+	uint64_t tsc_freq;
+	LARGE_INTEGER Frequency;
+
+	QueryPerformanceFrequency(&Frequency);
+	/* mulitply by 1K to obtain the true frequency of the CPU */
+	tsc_freq = ((uint64_t)Frequency.QuadPart * 1024);
+
+	return tsc_freq;
+}
+
+int
+rte_eal_timer_init(void)
+{
+	eal_timer_source = EAL_TIMER_TSC;
+
+	set_tsc_freq();
+	return 0;
+}
diff --git a/lib/librte_eal/windows/eal/linux-emu/_rand48.c b/lib/librte_eal/windows/eal/linux-emu/_rand48.c
new file mode 100644
index 000000000..1694c19e1
--- /dev/null
+++ b/lib/librte_eal/windows/eal/linux-emu/_rand48.c
@@ -0,0 +1,46 @@
+/*
+* Copyright (c) 1993 Martin Birgmeier
+* All rights reserved.
+*
+* You may redistribute unmodified or modified versions of this source
+* code provided that the above copyright notice and this and the
+* following conditions are retained.
+*
+* This software is provided ``as is'', and comes with no warranties
+* of any kind. I shall in no event be liable for anything that happens
+* to anyone/anything when using this software.
+*/
+
+#include "rand48.h"
+
+unsigned short _rand48_seed[3] = {
+	RAND48_SEED_0,
+	RAND48_SEED_1,
+	RAND48_SEED_2
+};
+unsigned short _rand48_mult[3] = {
+	RAND48_MULT_0,
+	RAND48_MULT_1,
+	RAND48_MULT_2
+};
+unsigned short _rand48_add = RAND48_ADD;
+
+void
+_dorand48(unsigned short xseed[3])
+{
+	unsigned long accu;
+	unsigned short temp[2];
+
+	accu = (unsigned long)_rand48_mult[0] * (unsigned long)xseed[0] +
+		(unsigned long)_rand48_add;
+	temp[0] = (unsigned short)accu;	/* lower 16 bits */
+	accu >>= sizeof(unsigned short) * 8;
+	accu += (unsigned long)_rand48_mult[0] * (unsigned long)xseed[1] +
+		(unsigned long)_rand48_mult[1] * (unsigned long)xseed[0];
+	temp[1] = (unsigned short)accu;	/* middle 16 bits */
+	accu >>= sizeof(unsigned short) * 8;
+	accu += _rand48_mult[0] * xseed[2] + _rand48_mult[1] * xseed[1] + _rand48_mult[2] * xseed[0];
+	xseed[0] = temp[0];
+	xseed[1] = temp[1];
+	xseed[2] = (unsigned short)accu;
+}
diff --git a/lib/librte_eal/windows/eal/linux-emu/drand48.c b/lib/librte_eal/windows/eal/linux-emu/drand48.c
new file mode 100644
index 000000000..fec311d3a
--- /dev/null
+++ b/lib/librte_eal/windows/eal/linux-emu/drand48.c
@@ -0,0 +1,62 @@
+
+#define RAND48_SEED_0   (0x330e)
+#define RAND48_SEED_1 (0xabcd)
+#define RAND48_SEED_2 (0x1234)
+#define RAND48_MULT_0 (0xe66d)
+#define RAND48_MULT_1 (0xdeec)
+#define RAND48_MULT_2 (0x0005)
+#define RAND48_ADD (0x000b)
+
+unsigned short _rand48_seed[3] = {
+	RAND48_SEED_0,
+	RAND48_SEED_1,
+	RAND48_SEED_2
+};
+unsigned short _rand48_mult[3] = {
+	RAND48_MULT_0,
+	RAND48_MULT_1,
+	RAND48_MULT_2
+};
+unsigned short _rand48_add = RAND48_ADD;
+
+void
+_dorand48(unsigned short xseed[3])
+{
+	unsigned long accu;
+	unsigned short temp[2];
+
+	accu = (unsigned long)_rand48_mult[0] * (unsigned long)xseed[0] +
+		(unsigned long)_rand48_add;
+	temp[0] = (unsigned short)accu;        /* lower 16 bits */
+	accu >>= sizeof(unsigned short) * 8;
+	accu += (unsigned long)_rand48_mult[0] * (unsigned long)xseed[1] +
+		(unsigned long)_rand48_mult[1] * (unsigned long)xseed[0];
+	temp[1] = (unsigned short)accu;        /* middle 16 bits */
+	accu >>= sizeof(unsigned short) * 8;
+	accu += _rand48_mult[0] * xseed[2] + _rand48_mult[1] * xseed[1] + _rand48_mult[2] * xseed[0];
+	xseed[0] = temp[0];
+	xseed[1] = temp[1];
+	xseed[2] = (unsigned short)accu;
+}
+
+double erand48(unsigned short xseed[3])
+{
+	_dorand48(xseed);
+	return ldexp((double)xseed[0], -48) +
+		ldexp((double)xseed[1], -32) +
+		ldexp((double)xseed[2], -16);
+}
+
+double drand48() {
+	return erand48(_rand48_seed);
+}
+
+void srand48(long seed) {
+	_rand48_seed[0] = RAND48_SEED_0;
+	_rand48_seed[1] = (unsigned short)seed;
+	_rand48_seed[2] = (unsigned short)(seed >> 16);
+	_rand48_mult[0] = RAND48_MULT_0;
+	_rand48_mult[1] = RAND48_MULT_1;
+	_rand48_mult[2] = RAND48_MULT_2;
+	_rand48_add = RAND48_ADD;
+}
diff --git a/lib/librte_eal/windows/eal/linux-emu/fork.c b/lib/librte_eal/windows/eal/linux-emu/fork.c
new file mode 100644
index 000000000..55bd96087
--- /dev/null
+++ b/lib/librte_eal/windows/eal/linux-emu/fork.c
@@ -0,0 +1,111 @@
+/*
+* fork.c
+* Experimental fork() on Windows.  Requires NT 6 subsystem or
+* newer.
+*
+* Copyright (c) 2012 William Pitcock <nenolod@dereferenced.org>
+*
+* Permission to use, copy, modify, and/or distribute this software for any
+* purpose with or without fee is hereby granted, provided that the above
+* copyright notice and this permission notice appear in all copies.
+*
+* This software is provided 'as is' and without any warranty, express or
+* implied.  In no event shall the authors be liable for any damages arising
+* from the use of this software.
+*/
+
+//#define _WIN32_WINNT 0x0600
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <winnt.h>
+#include <winternl.h>
+#include <stdio.h>
+#include <errno.h>
+#include <assert.h>
+#include <process.h>
+
+typedef struct _SECTION_IMAGE_INFORMATION {
+	PVOID EntryPoint;
+	ULONG StackZeroBits;
+	ULONG StackReserved;
+	ULONG StackCommit;
+	ULONG ImageSubsystem;
+	WORD SubSystemVersionLow;
+	WORD SubSystemVersionHigh;
+	ULONG Unknown1;
+	ULONG ImageCharacteristics;
+	ULONG ImageMachineType;
+	ULONG Unknown2[3];
+} SECTION_IMAGE_INFORMATION, *PSECTION_IMAGE_INFORMATION;
+
+typedef struct _RTL_USER_PROCESS_INFORMATION {
+	ULONG Size;
+	HANDLE Process;
+	HANDLE Thread;
+	CLIENT_ID ClientId;
+	SECTION_IMAGE_INFORMATION ImageInformation;
+} RTL_USER_PROCESS_INFORMATION, *PRTL_USER_PROCESS_INFORMATION;
+
+#define RTL_CLONE_PROCESS_FLAGS_CREATE_SUSPENDED	0x00000001
+#define RTL_CLONE_PROCESS_FLAGS_INHERIT_HANDLES		0x00000002
+#define RTL_CLONE_PROCESS_FLAGS_NO_SYNCHRONIZE		0x00000004
+
+#define RTL_CLONE_PARENT				0
+#define RTL_CLONE_CHILD					297
+
+typedef DWORD pid_t;
+
+typedef NTSTATUS(*RtlCloneUserProcess_f)(ULONG ProcessFlags,
+	PSECURITY_DESCRIPTOR ProcessSecurityDescriptor /* optional */,
+	PSECURITY_DESCRIPTOR ThreadSecurityDescriptor /* optional */,
+	HANDLE DebugPort /* optional */,
+	PRTL_USER_PROCESS_INFORMATION ProcessInformation);
+
+pid_t fork(void)
+{
+	HMODULE mod;
+	RtlCloneUserProcess_f clone_p;
+	RTL_USER_PROCESS_INFORMATION process_info;
+	NTSTATUS result;
+
+	mod = GetModuleHandle((LPCWSTR)"ntdll.dll");
+	if (!mod)
+		return -ENOSYS;
+
+	clone_p = (RtlCloneUserProcess_f)GetProcAddress(mod, "RtlCloneUserProcess");
+	if (clone_p == NULL)
+		return -ENOSYS;
+
+	/* lets do this */
+	result = clone_p(RTL_CLONE_PROCESS_FLAGS_CREATE_SUSPENDED | RTL_CLONE_PROCESS_FLAGS_INHERIT_HANDLES, NULL, NULL, NULL, &process_info);
+
+	if (result == RTL_CLONE_PARENT)
+	{
+		HANDLE me, hp, ht, hcp = 0;
+		DWORD pi, ti, mi;
+		me = GetCurrentProcess();
+		pi = (DWORD)process_info.ClientId.UniqueProcess;
+		ti = (DWORD)process_info.ClientId.UniqueThread;
+
+		hp = OpenProcess(PROCESS_ALL_ACCESS, FALSE, pi);
+		ht = OpenThread(THREAD_ALL_ACCESS, FALSE, ti);
+		assert(hp);
+		assert(ht);
+
+		ResumeThread(ht);
+		CloseHandle(ht);
+		CloseHandle(hp);
+		return (pid_t)pi;
+	}
+	else if (result == RTL_CLONE_CHILD)
+	{
+		/* fix stdio */
+		AllocConsole();
+		return 0;
+	}
+	else
+		return -1;
+
+	/* NOTREACHED */
+	return -1;
+}
diff --git a/lib/librte_eal/windows/eal/linux-emu/getopt.c b/lib/librte_eal/windows/eal/linux-emu/getopt.c
new file mode 100644
index 000000000..8383e85ae
--- /dev/null
+++ b/lib/librte_eal/windows/eal/linux-emu/getopt.c
@@ -0,0 +1,407 @@
+#include <getopt.h>
+
+#ifdef REPLACE_GETOPT
+int	opterr = 1;		/* if error message should be printed */
+int	optind = 1;		/* index into parent argv vector */
+int	optopt = '?';		/* character checked for validity */
+#undef	optreset		/* see getopt.h */
+#define	optreset		__mingw_optreset
+int	optreset;		/* reset getopt */
+char    *optarg;		/* argument associated with option */
+#endif
+
+static char *place = EMSG; /* option letter processing */
+
+						   /* XXX: set optreset to 1 rather than these two */
+static int nonopt_start = -1; /* first non option argument (for permute) */
+static int nonopt_end = -1;   /* first option after non options (for permute) */
+
+							  /* Error messages */
+static const char recargchar[] = "option requires an argument -- %c";
+static const char recargstring[] = "option requires an argument -- %s";
+static const char ambig[] = "ambiguous option -- %.*s";
+static const char noarg[] = "option doesn't take an argument -- %.*s";
+static const char illoptchar[] = "unknown option -- %c";
+static const char illoptstring[] = "unknown option -- %s";
+
+/*
+* parse_long_options --
+*	Parse long options in argc/argv argument vector.
+* Returns -1 if short_too is set and the option does not match long_options.
+*/
+static int
+parse_long_options(char * const *nargv, const char *options,
+	const struct option *long_options, int *idx, int short_too)
+{
+	char *current_argv, *has_equal;
+	size_t current_argv_len;
+	int i, ambiguous, match;
+
+#define IDENTICAL_INTERPRETATION(_x, _y)                                \
+	(long_options[(_x)].has_arg == long_options[(_y)].has_arg &&    \
+	 long_options[(_x)].flag == long_options[(_y)].flag &&          \
+	 long_options[(_x)].val == long_options[(_y)].val)
+
+	current_argv = place;
+	match = -1;
+	ambiguous = 0;
+
+	optind++;
+
+	if ((has_equal = strchr(current_argv, '=')) != NULL) {
+		/* argument found (--option=arg) */
+		current_argv_len = has_equal - current_argv;
+		has_equal++;
+	}
+	else
+		current_argv_len = strlen(current_argv);
+
+	for (i = 0; long_options[i].name; i++) {
+		/* find matching long option */
+		if (strncmp(current_argv, long_options[i].name,
+			current_argv_len))
+			continue;
+
+		if (strlen(long_options[i].name) == current_argv_len) {
+			/* exact match */
+			match = i;
+			ambiguous = 0;
+			break;
+		}
+		/*
+		* If this is a known short option, don't allow
+		* a partial match of a single character.
+		*/
+		if (short_too && current_argv_len == 1)
+			continue;
+
+		if (match == -1)	/* partial match */
+			match = i;
+		else if (!IDENTICAL_INTERPRETATION(i, match))
+			ambiguous = 1;
+	}
+	if (ambiguous) {
+		/* ambiguous abbreviation */
+		if (PRINT_ERROR)
+			warnx(ambig, (int)current_argv_len,
+				current_argv);
+		optopt = 0;
+		return (BADCH);
+	}
+	if (match != -1) {		/* option found */
+		if (long_options[match].has_arg == no_argument
+			&& has_equal) {
+			if (PRINT_ERROR)
+				warnx(noarg, (int)current_argv_len,
+					current_argv);
+			/*
+			* XXX: GNU sets optopt to val regardless of flag
+			*/
+			if (long_options[match].flag == NULL)
+				optopt = long_options[match].val;
+			else
+				optopt = 0;
+			return (BADARG);
+		}
+		if (long_options[match].has_arg == required_argument ||
+			long_options[match].has_arg == optional_argument) {
+			if (has_equal)
+				optarg = has_equal;
+			else if (long_options[match].has_arg ==
+				required_argument) {
+				/*
+				* optional argument doesn't use next nargv
+				*/
+				optarg = nargv[optind++];
+			}
+		}
+		if ((long_options[match].has_arg == required_argument)
+			&& (optarg == NULL)) {
+			/*
+			* Missing argument; leading ':' indicates no error
+			* should be generated.
+			*/
+			if (PRINT_ERROR)
+				warnx(recargstring,
+					current_argv);
+			/*
+			* XXX: GNU sets optopt to val regardless of flag
+			*/
+			if (long_options[match].flag == NULL)
+				optopt = long_options[match].val;
+			else
+				optopt = 0;
+			--optind;
+			return (BADARG);
+		}
+	}
+	else {			/* unknown option */
+		if (short_too) {
+			--optind;
+			return (-1);
+		}
+		if (PRINT_ERROR)
+			warnx(illoptstring, current_argv);
+		optopt = 0;
+		return (BADCH);
+	}
+	if (idx)
+		*idx = match;
+	if (long_options[match].flag) {
+		*long_options[match].flag = long_options[match].val;
+		return (0);
+	}
+	else
+		return (long_options[match].val);
+#undef IDENTICAL_INTERPRETATION
+}
+
+#ifdef REPLACE_GETOPT
+/*
+* getopt --
+*	Parse argc/argv argument vector.
+*
+* [eventually this will replace the BSD getopt]
+*/
+int
+getopt(int nargc, char * const *nargv, const char *options)
+{
+
+	/*
+	* We don't pass FLAG_PERMUTE to getopt_internal() since
+	* the BSD getopt(3) (unlike GNU) has never done this.
+	*
+	* Furthermore, since many privileged programs call getopt()
+	* before dropping privileges it makes sense to keep things
+	* as simple (and bug-free) as possible.
+	*/
+	return (getopt_internal(nargc, nargv, options, NULL, NULL, 0));
+}
+#endif /* REPLACE_GETOPT */
+
+/*
+* getopt_internal --
+*	Parse argc/argv argument vector.  Called by user level routines.
+*/
+static int
+getopt_internal(int nargc, char * const *nargv, const char *options,
+	const struct option *long_options, int *idx, int flags)
+{
+	char *oli;				/* option letter list index */
+	int optchar, short_too;
+	static int posixly_correct = -1;
+
+	if (options == NULL)
+		return (-1);
+
+	/*
+	* XXX Some GNU programs (like cvs) set optind to 0 instead of
+	* XXX using optreset.  Work around this braindamage.
+	*/
+	if (optind == 0)
+		optind = optreset = 1;
+
+	/*
+	* Disable GNU extensions if POSIXLY_CORRECT is set or options
+	* string begins with a '+'.
+	*
+	* CV, 2009-12-14: Check POSIXLY_CORRECT anew if optind == 0 or
+	*                 optreset != 0 for GNU compatibility.
+	*/
+	if (posixly_correct == -1 || optreset != 0)
+		posixly_correct = (getenv("POSIXLY_CORRECT") != NULL);
+	if (*options == '-')
+		flags |= FLAG_ALLARGS;
+	else if (posixly_correct || *options == '+')
+		flags &= ~FLAG_PERMUTE;
+	if (*options == '+' || *options == '-')
+		options++;
+
+	optarg = NULL;
+	if (optreset)
+		nonopt_start = nonopt_end = -1;
+start:
+	if (optreset || !*place) {		/* update scanning pointer */
+		optreset = 0;
+		if (optind >= nargc) {          /* end of argument vector */
+			place = EMSG;
+			if (nonopt_end != -1) {
+				/* do permutation, if we have to */
+				permute_args(nonopt_start, nonopt_end,
+					optind, nargv);
+				optind -= nonopt_end - nonopt_start;
+			}
+			else if (nonopt_start != -1) {
+				/*
+				* If we skipped non-options, set optind
+				* to the first of them.
+				*/
+				optind = nonopt_start;
+			}
+			nonopt_start = nonopt_end = -1;
+			return (-1);
+		}
+		if (*(place = nargv[optind]) != '-' ||
+			(place[1] == '\0' && strchr(options, '-') == NULL)) {
+			place = EMSG;		/* found non-option */
+			if (flags & FLAG_ALLARGS) {
+				/*
+				* GNU extension:
+				* return non-option as argument to option 1
+				*/
+				optarg = nargv[optind++];
+				return (INORDER);
+			}
+			if (!(flags & FLAG_PERMUTE)) {
+				/*
+				* If no permutation wanted, stop parsing
+				* at first non-option.
+				*/
+				return (-1);
+			}
+			/* do permutation */
+			if (nonopt_start == -1)
+				nonopt_start = optind;
+			else if (nonopt_end != -1) {
+				permute_args(nonopt_start, nonopt_end,
+					optind, nargv);
+				nonopt_start = optind -
+					(nonopt_end - nonopt_start);
+				nonopt_end = -1;
+			}
+			optind++;
+			/* process next argument */
+			goto start;
+		}
+		if (nonopt_start != -1 && nonopt_end == -1)
+			nonopt_end = optind;
+
+		/*
+		* If we have "-" do nothing, if "--" we are done.
+		*/
+		if (place[1] != '\0' && *++place == '-' && place[1] == '\0') {
+			optind++;
+			place = EMSG;
+			/*
+			* We found an option (--), so if we skipped
+			* non-options, we have to permute.
+			*/
+			if (nonopt_end != -1) {
+				permute_args(nonopt_start, nonopt_end,
+					optind, nargv);
+				optind -= nonopt_end - nonopt_start;
+			}
+			nonopt_start = nonopt_end = -1;
+			return (-1);
+		}
+	}
+
+	/*
+	* Check long options if:
+	*  1) we were passed some
+	*  2) the arg is not just "-"
+	*  3) either the arg starts with -- we are getopt_long_only()
+	*/
+	if (long_options != NULL && place != nargv[optind] &&
+		(*place == '-' || (flags & FLAG_LONGONLY))) {
+		short_too = 0;
+		if (*place == '-')
+			place++;		/* --foo long option */
+		else if (*place != ':' && strchr(options, *place) != NULL)
+			short_too = 1;		/* could be short option too */
+
+		optchar = parse_long_options(nargv, options, long_options,
+			idx, short_too);
+		if (optchar != -1) {
+			place = EMSG;
+			return (optchar);
+		}
+	}
+
+	if ((optchar = (int)*place++) == (int)':' ||
+		(optchar == (int)'-' && *place != '\0') ||
+		(oli = (char*)strchr(options, optchar)) == NULL) {
+		/*
+		* If the user specified "-" and  '-' isn't listed in
+		* options, return -1 (non-option) as per POSIX.
+		* Otherwise, it is an unknown option character (or ':').
+		*/
+		if (optchar == (int)'-' && *place == '\0')
+			return (-1);
+		if (!*place)
+			++optind;
+		if (PRINT_ERROR)
+			warnx(illoptchar, optchar);
+		optopt = optchar;
+		return (BADCH);
+	}
+	if (long_options != NULL && optchar == 'W' && oli[1] == ';') {
+		/* -W long-option */
+		if (*place)			/* no space */
+			/* NOTHING */;
+		else if (++optind >= nargc) {	/* no arg */
+			place = EMSG;
+			if (PRINT_ERROR)
+				warnx(recargchar, optchar);
+			optopt = optchar;
+			return (BADARG);
+		}
+		else				/* white space */
+			place = nargv[optind];
+		optchar = parse_long_options(nargv, options, long_options,
+			idx, 0);
+		place = EMSG;
+		return (optchar);
+	}
+	if (*++oli != ':') {			/* doesn't take argument */
+		if (!*place)
+			++optind;
+	}
+	else {				/* takes (optional) argument */
+		optarg = NULL;
+		if (*place)			/* no white space */
+			optarg = place;
+		else if (oli[1] != ':') {	/* arg not optional */
+			if (++optind >= nargc) {	/* no arg */
+				place = EMSG;
+				if (PRINT_ERROR)
+					warnx(recargchar, optchar);
+				optopt = optchar;
+				return (BADARG);
+			}
+			else
+				optarg = nargv[optind];
+		}
+		place = EMSG;
+		++optind;
+	}
+	/* dump back option letter */
+	return (optchar);
+}
+
+
+/*
+* getopt_long --
+*	Parse argc/argv argument vector.
+*/
+int
+getopt_long(int nargc, char * const *nargv, const char *options,
+	const struct option *long_options, int *idx)
+{
+
+	return (getopt_internal(nargc, nargv, options, long_options, idx,
+		FLAG_PERMUTE));
+}
+
+/*
+* getopt_long_only --
+*	Parse argc/argv argument vector.
+*/
+int
+getopt_long_only(int nargc, char * const *nargv, const char *options,
+	const struct option *long_options, int *idx)
+{
+
+	return (getopt_internal(nargc, nargv, options, long_options, idx,
+		FLAG_PERMUTE | FLAG_LONGONLY));
+}
diff --git a/lib/librte_eal/windows/eal/linux-emu/lrand48.c b/lib/librte_eal/windows/eal/linux-emu/lrand48.c
new file mode 100644
index 000000000..687d0f7b2
--- /dev/null
+++ b/lib/librte_eal/windows/eal/linux-emu/lrand48.c
@@ -0,0 +1,23 @@
+/*
+* Copyright (c) 1993 Martin Birgmeier
+* All rights reserved.
+*
+* You may redistribute unmodified or modified versions of this source
+* code provided that the above copyright notice and this and the
+* following conditions are retained.
+*
+* This software is provided ``as is'', and comes with no warranties
+* of any kind. I shall in no event be liable for anything that happens
+* to anyone/anything when using this software.
+*/
+
+#include "rand48.h"
+
+extern unsigned short _rand48_seed[3];
+
+long
+lrand48(void)
+{
+	_dorand48(_rand48_seed);
+	return ((long)_rand48_seed[2] << 15) + ((long)_rand48_seed[1] >> 1);
+}
diff --git a/lib/librte_eal/windows/eal/linux-emu/mman.c b/lib/librte_eal/windows/eal/linux-emu/mman.c
new file mode 100644
index 000000000..0a8d39038
--- /dev/null
+++ b/lib/librte_eal/windows/eal/linux-emu/mman.c
@@ -0,0 +1,179 @@
+#include <windows.h>
+#include <errno.h>
+#include <io.h>
+
+#include <sys/mman.h>
+
+#ifndef FILE_MAP_EXECUTE
+#define FILE_MAP_EXECUTE    0x0020
+#endif /* FILE_MAP_EXECUTE */
+
+static int __map_mman_error(const DWORD err, const int deferr)
+{
+	if (err == 0)
+		return 0;
+	//TODO: implement
+	return err;
+}
+
+static DWORD __map_mmap_prot_page(const int prot)
+{
+	DWORD protect = 0;
+
+	if (prot == PROT_NONE)
+		return protect;
+
+	if ((prot & PROT_EXEC) != 0)
+	{
+		protect = ((prot & PROT_WRITE) != 0) ?
+			PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
+	}
+	else
+	{
+		protect = ((prot & PROT_WRITE) != 0) ?
+			PAGE_READWRITE : PAGE_READONLY;
+	}
+
+	return protect;
+}
+
+static DWORD __map_mmap_prot_file(const int prot)
+{
+	DWORD desiredAccess = 0;
+
+	if (prot == PROT_NONE)
+		return desiredAccess;
+
+	if ((prot & PROT_READ) != 0)
+		desiredAccess |= FILE_MAP_READ;
+	if ((prot & PROT_WRITE) != 0)
+		desiredAccess |= FILE_MAP_WRITE;
+	if ((prot & PROT_EXEC) != 0)
+		desiredAccess |= FILE_MAP_EXECUTE;
+
+	return desiredAccess;
+}
+
+void* mmap(void *addr, size_t len, int prot, int flags, int fildes, OffsetType off)
+{
+	HANDLE fm, h;
+
+	void * map = MAP_FAILED;
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4293)
+#endif
+
+	const DWORD dwFileOffsetLow = (sizeof(OffsetType) <= sizeof(DWORD)) ?
+		(DWORD)off : (DWORD)(off & 0xFFFFFFFFL);
+	const DWORD dwFileOffsetHigh = (sizeof(OffsetType) <= sizeof(DWORD)) ?
+		(DWORD)0 : (DWORD)((off >> 32) & 0xFFFFFFFFL);
+	const DWORD protect = __map_mmap_prot_page(prot);
+	const DWORD desiredAccess = __map_mmap_prot_file(prot);
+
+	const OffsetType maxSize = off + (OffsetType)len;
+
+	const DWORD dwMaxSizeLow = (sizeof(OffsetType) <= sizeof(DWORD)) ?
+		(DWORD)maxSize : (DWORD)(maxSize & 0xFFFFFFFFL);
+	const DWORD dwMaxSizeHigh = (sizeof(OffsetType) <= sizeof(DWORD)) ?
+		(DWORD)0 : (DWORD)((maxSize >> 32) & 0xFFFFFFFFL);
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+	errno = 0;
+
+	if (len == 0
+		/* Unsupported flag combinations */
+		|| (flags & MAP_FIXED) != 0
+		/* Usupported protection combinations */
+		|| prot == PROT_EXEC)
+	{
+		errno = EINVAL;
+		return MAP_FAILED;
+	}
+
+	h = ((flags & MAP_ANONYMOUS) == 0) ?
+		(HANDLE)_get_osfhandle(fildes) : INVALID_HANDLE_VALUE;
+
+	if ((flags & MAP_ANONYMOUS) == 0 && h == INVALID_HANDLE_VALUE)
+	{
+		errno = EBADF;
+		return MAP_FAILED;
+	}
+
+	fm = CreateFileMapping(h, NULL, protect, dwMaxSizeHigh, dwMaxSizeLow, NULL);
+
+	if (fm == NULL)
+	{
+		errno = __map_mman_error(GetLastError(), EPERM);
+		return MAP_FAILED;
+	}
+
+	map = MapViewOfFile(fm, desiredAccess, dwFileOffsetHigh, dwFileOffsetLow, len);
+
+	CloseHandle(fm);
+
+	if (map == NULL)
+	{
+		errno = __map_mman_error(GetLastError(), EPERM);
+		return MAP_FAILED;
+	}
+
+	return map;
+}
+
+int munmap(void *addr, size_t len)
+{
+	if (UnmapViewOfFile(addr))
+		return 0;
+
+	errno = __map_mman_error(GetLastError(), EPERM);
+
+	return -1;
+}
+
+int _mprotect(void *addr, size_t len, int prot)
+{
+	DWORD newProtect = __map_mmap_prot_page(prot);
+	DWORD oldProtect = 0;
+
+	if (VirtualProtect(addr, len, newProtect, &oldProtect))
+		return 0;
+
+	errno = __map_mman_error(GetLastError(), EPERM);
+
+	return -1;
+}
+
+int msync(void *addr, size_t len, int flags)
+{
+	if (FlushViewOfFile(addr, len))
+		return 0;
+
+	errno = __map_mman_error(GetLastError(), EPERM);
+
+	return -1;
+}
+
+int mlock(const void *addr, size_t len)
+{
+	if (VirtualLock((LPVOID)addr, len))
+		return 0;
+
+	errno = __map_mman_error(GetLastError(), EPERM);
+
+	return -1;
+}
+
+int munlock(const void *addr, size_t len)
+{
+	if (VirtualUnlock((LPVOID)addr, len))
+		return 0;
+
+	errno = __map_mman_error(GetLastError(), EPERM);
+
+	return -1;
+}
diff --git a/lib/librte_eal/windows/eal/linux-emu/setenv.c b/lib/librte_eal/windows/eal/linux-emu/setenv.c
new file mode 100644
index 000000000..bd54bfcb1
--- /dev/null
+++ b/lib/librte_eal/windows/eal/linux-emu/setenv.c
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#include <stdlib.h>
+
+int setenv(const char *name, const char *value, int overwrite)
+{
+	char * curenv;
+	size_t len;
+
+	// Does the environment variable already exist?
+	errno_t err = _dupenv_s(&curenv, &len, name);
+
+	// Free the allocated memory - it is okay to call free(NULL)
+	free(curenv);
+
+	if (err || overwrite)
+	{
+		char newval[128];
+		sprintf_s(newval, sizeof(newval), "%s=%s", name, value);
+		return _putenv(newval);
+	}
+
+	return 0;
+}
\ No newline at end of file
diff --git a/lib/librte_eal/windows/eal/linux-emu/srand48.c b/lib/librte_eal/windows/eal/linux-emu/srand48.c
new file mode 100644
index 000000000..071ef1df9
--- /dev/null
+++ b/lib/librte_eal/windows/eal/linux-emu/srand48.c
@@ -0,0 +1,30 @@
+/*
+* Copyright (c) 1993 Martin Birgmeier
+* All rights reserved.
+*
+* You may redistribute unmodified or modified versions of this source
+* code provided that the above copyright notice and this and the
+* following conditions are retained.
+*
+* This software is provided ``as is'', and comes with no warranties
+* of any kind. I shall in no event be liable for anything that happens
+* to anyone/anything when using this software.
+*/
+
+#include "rand48.h"
+
+extern unsigned short _rand48_seed[3];
+extern unsigned short _rand48_mult[3];
+extern unsigned short _rand48_add;
+
+void
+srand48(long seed)
+{
+	_rand48_seed[0] = RAND48_SEED_0;
+	_rand48_seed[1] = (unsigned short)seed;
+	_rand48_seed[2] = (unsigned short)(seed >> 16);
+	_rand48_mult[0] = RAND48_MULT_0;
+	_rand48_mult[1] = RAND48_MULT_1;
+	_rand48_mult[2] = RAND48_MULT_2;
+	_rand48_add = RAND48_ADD;
+}
diff --git a/lib/librte_eal/windows/eal/linux-emu/termios.c b/lib/librte_eal/windows/eal/linux-emu/termios.c
new file mode 100644
index 000000000..d81a26f53
--- /dev/null
+++ b/lib/librte_eal/windows/eal/linux-emu/termios.c
@@ -0,0 +1,11 @@
+#include <sys/_termios.h>
+
+int tcgetattr(int fd, struct termios *t)
+{
+	return 0;
+}
+
+int tcsetattr(int fd, int opt, struct termios *t)
+{
+	return 0;
+}
\ No newline at end of file
diff --git a/lib/librte_eal/windows/eal/linux-emu/unistd.c b/lib/librte_eal/windows/eal/linux-emu/unistd.c
new file mode 100644
index 000000000..fa0eee9c9
--- /dev/null
+++ b/lib/librte_eal/windows/eal/linux-emu/unistd.c
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#include <unistd.h>
+#include <windows.h>
+
+int getpagesize(void)
+{
+    SYSTEM_INFO si;
+
+    GetSystemInfo(&si);
+
+    return si.dwPageSize;
+}
+
+int getdtablesize(void)
+{
+	// Return OPEN_MAX (256)
+	return 256;
+}
\ No newline at end of file
diff --git a/lib/librte_eal/windows/eal/malloc_heap.c b/lib/librte_eal/windows/eal/malloc_heap.c
new file mode 100644
index 000000000..62b4c39c2
--- /dev/null
+++ b/lib/librte_eal/windows/eal/malloc_heap.c
@@ -0,0 +1,1068 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+#include <stdint.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_errno.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_common.h>
+#include <rte_string_fns.h>
+#include <rte_spinlock.h>
+#include <rte_memcpy.h>
+#include <rte_atomic.h>
+#include <rte_fbarray.h>
+
+#include "eal_internal_cfg.h"
+#include "eal_memalloc.h"
+#include "malloc_elem.h"
+#include "malloc_heap.h"
+#include "malloc_mp.h"
+
+static unsigned
+check_hugepage_sz(unsigned flags, uint64_t hugepage_sz)
+{
+	unsigned check_flag = 0;
+
+	if (!(flags & ~RTE_MEMZONE_SIZE_HINT_ONLY))
+		return 1;
+
+	switch (hugepage_sz) {
+	case RTE_PGSIZE_256K:
+		check_flag = RTE_MEMZONE_256KB;
+		break;
+	case RTE_PGSIZE_2M:
+		check_flag = RTE_MEMZONE_2MB;
+		break;
+	case RTE_PGSIZE_16M:
+		check_flag = RTE_MEMZONE_16MB;
+		break;
+	case RTE_PGSIZE_256M:
+		check_flag = RTE_MEMZONE_256MB;
+		break;
+	case RTE_PGSIZE_512M:
+		check_flag = RTE_MEMZONE_512MB;
+		break;
+	case RTE_PGSIZE_1G:
+		check_flag = RTE_MEMZONE_1GB;
+		break;
+	}
+
+	return check_flag & flags;
+}
+
+/*
+ * Expand the heap with a memory area.
+ */
+static struct malloc_elem *
+malloc_heap_add_memory(struct malloc_heap *heap, struct rte_memseg_list *msl,
+		void *start, size_t len)
+{
+	struct malloc_elem *elem = start;
+
+	malloc_elem_init(elem, heap, msl, len);
+
+	malloc_elem_insert(elem);
+
+	elem = malloc_elem_join_adjacent_free(elem);
+
+	malloc_elem_free_list_insert(elem);
+
+	return elem;
+}
+
+static int
+malloc_add_seg(const struct rte_memseg_list *msl,
+		const struct rte_memseg *ms, size_t len, void *arg __rte_unused)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct rte_memseg_list *found_msl;
+	struct malloc_heap *heap;
+	int msl_idx;
+
+	heap = &mcfg->malloc_heaps[msl->socket_id];
+
+	/* msl is const, so find it */
+	msl_idx = msl - mcfg->memsegs;
+
+	if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
+		return -1;
+
+	found_msl = &mcfg->memsegs[msl_idx];
+
+	malloc_heap_add_memory(heap, found_msl, ms->addr, len);
+
+	heap->total_size += len;
+
+	RTE_LOG(DEBUG, EAL, "Added %zuM to heap on socket %i\n", len >> 20,
+			msl->socket_id);
+	return 0;
+}
+
+/*
+ * Iterates through the freelist for a heap to find a free element
+ * which can store data of the required size and with the requested alignment.
+ * If size is 0, find the biggest available elem.
+ * Returns null on failure, or pointer to element on success.
+ */
+static struct malloc_elem *
+find_suitable_element(struct malloc_heap *heap, size_t size,
+		unsigned int flags, size_t align, size_t bound, bool contig)
+{
+	size_t idx;
+	struct malloc_elem *elem, *alt_elem = NULL;
+
+	for (idx = malloc_elem_free_list_index(size);
+			idx < RTE_HEAP_NUM_FREELISTS; idx++) {
+		for (elem = LIST_FIRST(&heap->free_head[idx]);
+				!!elem; elem = LIST_NEXT(elem, free_list)) {
+			if (malloc_elem_can_hold(elem, size, align, bound,
+					contig)) {
+				if (check_hugepage_sz(flags,
+						elem->msl->page_sz))
+					return elem;
+				if (alt_elem == NULL)
+					alt_elem = elem;
+			}
+		}
+	}
+
+	if ((alt_elem != NULL) && (flags & RTE_MEMZONE_SIZE_HINT_ONLY))
+		return alt_elem;
+
+	return NULL;
+}
+
+/*
+ * Iterates through the freelist for a heap to find a free element with the
+ * biggest size and requested alignment. Will also set size to whatever element
+ * size that was found.
+ * Returns null on failure, or pointer to element on success.
+ */
+static struct malloc_elem *
+find_biggest_element(struct malloc_heap *heap, size_t *size,
+		unsigned int flags, size_t align, bool contig)
+{
+	struct malloc_elem *elem, *max_elem = NULL;
+	size_t idx, max_size = 0;
+
+	for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) {
+		for (elem = LIST_FIRST(&heap->free_head[idx]);
+				!!elem; elem = LIST_NEXT(elem, free_list)) {
+			size_t cur_size;
+			if (!check_hugepage_sz(flags, elem->msl->page_sz))
+				continue;
+			if (contig) {
+				cur_size =
+					malloc_elem_find_max_iova_contig(elem,
+							align);
+			} else {
+				void *data_start = RTE_PTR_ADD(elem,
+						MALLOC_ELEM_HEADER_LEN);
+				void *data_end = RTE_PTR_ADD(elem, elem->size -
+						MALLOC_ELEM_TRAILER_LEN);
+				void *aligned = RTE_PTR_ALIGN_CEIL(data_start,
+						align);
+				/* check if aligned data start is beyond end */
+				if (aligned >= data_end)
+					continue;
+				cur_size = RTE_PTR_DIFF(data_end, aligned);
+			}
+			if (cur_size > max_size) {
+				max_size = cur_size;
+				max_elem = elem;
+			}
+		}
+	}
+
+	*size = max_size;
+	return max_elem;
+}
+
+/*
+ * Main function to allocate a block of memory from the heap.
+ * It locks the free list, scans it, and adds a new memseg if the
+ * scan fails. Once the new memseg is added, it re-scans and should return
+ * the new element after releasing the lock.
+ */
+static void *
+heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size,
+		unsigned int flags, size_t align, size_t bound, bool contig)
+{
+	struct malloc_elem *elem;
+
+	size = RTE_CACHE_LINE_ROUNDUP(size);
+	align = RTE_CACHE_LINE_ROUNDUP(align);
+
+	elem = find_suitable_element(heap, size, flags, align, bound, contig);
+	if (elem != NULL) {
+		elem = malloc_elem_alloc(elem, size, align, bound, contig);
+
+		/* increase heap's count of allocated elements */
+		heap->alloc_count++;
+	}
+
+	return elem == NULL ? NULL : (void *)(&elem[1]);
+}
+
+static void *
+heap_alloc_biggest(struct malloc_heap *heap, const char *type __rte_unused,
+		unsigned int flags, size_t align, bool contig)
+{
+	struct malloc_elem *elem;
+	size_t size;
+
+	align = RTE_CACHE_LINE_ROUNDUP(align);
+
+	elem = find_biggest_element(heap, &size, flags, align, contig);
+	if (elem != NULL) {
+		elem = malloc_elem_alloc(elem, size, align, 0, contig);
+
+		/* increase heap's count of allocated elements */
+		heap->alloc_count++;
+	}
+
+	return elem == NULL ? NULL : (void *)(&elem[1]);
+}
+
+/* this function is exposed in malloc_mp.h */
+void
+rollback_expand_heap(struct rte_memseg **ms, int n_segs,
+		struct malloc_elem *elem, void *map_addr, size_t map_len)
+{
+	if (elem != NULL) {
+		malloc_elem_free_list_remove(elem);
+		malloc_elem_hide_region(elem, map_addr, map_len);
+	}
+
+	eal_memalloc_free_seg_bulk(ms, n_segs);
+}
+
+/* this function is exposed in malloc_mp.h */
+struct malloc_elem *
+alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
+		int socket, unsigned int flags, size_t align, size_t bound,
+		bool contig, struct rte_memseg **ms, int n_segs)
+{
+	struct rte_memseg_list *msl;
+	struct malloc_elem *elem = NULL;
+	size_t alloc_sz;
+	int allocd_pages;
+	void *ret, *map_addr;
+
+	alloc_sz = (size_t)pg_sz * n_segs;
+
+	/* first, check if we're allowed to allocate this memory */
+	if (eal_memalloc_mem_alloc_validate(socket,
+			heap->total_size + alloc_sz) < 0) {
+		RTE_LOG(DEBUG, EAL, "User has disallowed allocation\n");
+		return NULL;
+	}
+
+	allocd_pages = eal_memalloc_alloc_seg_bulk(ms, n_segs, pg_sz,
+			socket, true);
+
+	/* make sure we've allocated our pages... */
+	if (allocd_pages < 0)
+		return NULL;
+
+	map_addr = ms[0]->addr;
+	msl = rte_mem_virt2memseg_list(map_addr);
+
+	/* check if we wanted contiguous memory but didn't get it */
+	if (contig && !eal_memalloc_is_contig(msl, map_addr, alloc_sz)) {
+		RTE_LOG(DEBUG, EAL, "%s(): couldn't allocate physically contiguous space\n",
+				__func__);
+		goto fail;
+	}
+
+	/* add newly minted memsegs to malloc heap */
+	elem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz);
+
+	/* try once more, as now we have allocated new memory */
+	ret = find_suitable_element(heap, elt_size, flags, align, bound,
+			contig);
+
+	if (ret == NULL)
+		goto fail;
+
+	return elem;
+
+fail:
+	rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz);
+	return NULL;
+}
+
+static int
+try_expand_heap_primary(struct malloc_heap *heap, uint64_t pg_sz,
+		size_t elt_size, int socket, unsigned int flags, size_t align,
+		size_t bound, bool contig)
+{
+	struct malloc_elem *elem;
+	struct rte_memseg **ms;
+	void *map_addr;
+	size_t alloc_sz;
+	int n_segs;
+	bool callback_triggered = false;
+
+	alloc_sz = RTE_ALIGN_CEIL(align + elt_size +
+			MALLOC_ELEM_TRAILER_LEN, pg_sz);
+	n_segs = alloc_sz / pg_sz;
+
+	/* we can't know in advance how many pages we'll need, so we malloc */
+	ms = malloc(sizeof(*ms) * n_segs);
+
+	memset(ms, 0, sizeof(*ms) * n_segs);
+
+	if (ms == NULL)
+		return -1;
+
+	elem = alloc_pages_on_heap(heap, pg_sz, elt_size, socket, flags, align,
+			bound, contig, ms, n_segs);
+
+	if (elem == NULL)
+		goto free_ms;
+
+	map_addr = ms[0]->addr;
+
+	/* notify user about changes in memory map */
+	eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC, map_addr, alloc_sz);
+
+	/* notify other processes that this has happened */
+	if (request_sync()) {
+		/* we couldn't ensure all processes have mapped memory,
+		 * so free it back and notify everyone that it's been
+		 * freed back.
+		 *
+		 * technically, we could've avoided adding memory addresses to
+		 * the map, but that would've led to inconsistent behavior
+		 * between primary and secondary processes, as those get
+		 * callbacks during sync. therefore, force primary process to
+		 * do alloc-and-rollback syncs as well.
+		 */
+		callback_triggered = true;
+		goto free_elem;
+	}
+	heap->total_size += alloc_sz;
+
+	RTE_LOG(DEBUG, EAL, "Heap on socket %d was expanded by %zdMB\n",
+		socket, alloc_sz >> 20ULL);
+
+	free(ms);
+
+	return 0;
+
+free_elem:
+	if (callback_triggered)
+		eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
+				map_addr, alloc_sz);
+
+	rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz);
+
+	request_sync();
+free_ms:
+	free(ms);
+
+	return -1;
+}
+
+static int
+try_expand_heap_secondary(struct malloc_heap *heap, uint64_t pg_sz,
+		size_t elt_size, int socket, unsigned int flags, size_t align,
+		size_t bound, bool contig)
+{
+	struct malloc_mp_req req;
+	int req_result;
+
+	memset(&req, 0, sizeof(req));
+
+	req.t = REQ_TYPE_ALLOC;
+	req.alloc_req.align = align;
+	req.alloc_req.bound = bound;
+	req.alloc_req.contig = contig;
+	req.alloc_req.flags = flags;
+	req.alloc_req.elt_size = elt_size;
+	req.alloc_req.page_sz = pg_sz;
+	req.alloc_req.socket = socket;
+	req.alloc_req.heap = heap; /* it's in shared memory */
+
+	req_result = request_to_primary(&req);
+
+	if (req_result != 0)
+		return -1;
+
+	if (req.result != REQ_RESULT_SUCCESS)
+		return -1;
+
+	return 0;
+}
+
+static int
+try_expand_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
+		int socket, unsigned int flags, size_t align, size_t bound,
+		bool contig)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	int ret;
+
+	rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		ret = try_expand_heap_primary(heap, pg_sz, elt_size, socket,
+				flags, align, bound, contig);
+	} else {
+		ret = try_expand_heap_secondary(heap, pg_sz, elt_size, socket,
+				flags, align, bound, contig);
+	}
+
+	rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+	return ret;
+}
+
+static int
+compare_pagesz(const void *a, const void *b)
+{
+	const struct rte_memseg_list * const*mpa = a;
+	const struct rte_memseg_list * const*mpb = b;
+	const struct rte_memseg_list *msla = *mpa;
+	const struct rte_memseg_list *mslb = *mpb;
+	uint64_t pg_sz_a = msla->page_sz;
+	uint64_t pg_sz_b = mslb->page_sz;
+
+	if (pg_sz_a < pg_sz_b)
+		return -1;
+	if (pg_sz_a > pg_sz_b)
+		return 1;
+	return 0;
+}
+
+static int
+alloc_more_mem_on_socket(struct malloc_heap *heap, size_t size, int socket,
+		unsigned int flags, size_t align, size_t bound, bool contig)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct rte_memseg_list *requested_msls[RTE_MAX_MEMSEG_LISTS];
+	struct rte_memseg_list *other_msls[RTE_MAX_MEMSEG_LISTS];
+	uint64_t requested_pg_sz[RTE_MAX_MEMSEG_LISTS];
+	uint64_t other_pg_sz[RTE_MAX_MEMSEG_LISTS];
+	uint64_t prev_pg_sz;
+	int i, n_other_msls, n_other_pg_sz, n_requested_msls, n_requested_pg_sz;
+	bool size_hint = (flags & RTE_MEMZONE_SIZE_HINT_ONLY) > 0;
+	unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
+	void *ret;
+
+	memset(requested_msls, 0, sizeof(requested_msls));
+	memset(other_msls, 0, sizeof(other_msls));
+	memset(requested_pg_sz, 0, sizeof(requested_pg_sz));
+	memset(other_pg_sz, 0, sizeof(other_pg_sz));
+
+	/*
+	 * go through memseg list and take note of all the page sizes available,
+	 * and if any of them were specifically requested by the user.
+	 */
+	n_requested_msls = 0;
+	n_other_msls = 0;
+	for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
+		struct rte_memseg_list *msl = &mcfg->memsegs[i];
+
+		if (msl->socket_id != socket)
+			continue;
+
+		if (msl->base_va == NULL)
+			continue;
+
+		/* if pages of specific size were requested */
+		if (size_flags != 0 && check_hugepage_sz(size_flags,
+				msl->page_sz))
+			requested_msls[n_requested_msls++] = msl;
+		else if (size_flags == 0 || size_hint)
+			other_msls[n_other_msls++] = msl;
+	}
+
+	/* sort the lists, smallest first */
+	qsort(requested_msls, n_requested_msls, sizeof(requested_msls[0]),
+			compare_pagesz);
+	qsort(other_msls, n_other_msls, sizeof(other_msls[0]),
+			compare_pagesz);
+
+	/* now, extract page sizes we are supposed to try */
+	prev_pg_sz = 0;
+	n_requested_pg_sz = 0;
+	for (i = 0; i < n_requested_msls; i++) {
+		uint64_t pg_sz = requested_msls[i]->page_sz;
+
+		if (prev_pg_sz != pg_sz) {
+			requested_pg_sz[n_requested_pg_sz++] = pg_sz;
+			prev_pg_sz = pg_sz;
+		}
+	}
+	prev_pg_sz = 0;
+	n_other_pg_sz = 0;
+	for (i = 0; i < n_other_msls; i++) {
+		uint64_t pg_sz = other_msls[i]->page_sz;
+
+		if (prev_pg_sz != pg_sz) {
+			other_pg_sz[n_other_pg_sz++] = pg_sz;
+			prev_pg_sz = pg_sz;
+		}
+	}
+
+	/* finally, try allocating memory of specified page sizes, starting from
+	 * the smallest sizes
+	 */
+	for (i = 0; i < n_requested_pg_sz; i++) {
+		uint64_t pg_sz = requested_pg_sz[i];
+
+		/*
+		 * do not pass the size hint here, as user expects other page
+		 * sizes first, before resorting to best effort allocation.
+		 */
+		if (!try_expand_heap(heap, pg_sz, size, socket, size_flags,
+				align, bound, contig))
+			return 0;
+	}
+	if (n_other_pg_sz == 0)
+		return -1;
+
+	/* now, check if we can reserve anything with size hint */
+	ret = find_suitable_element(heap, size, flags, align, bound, contig);
+	if (ret != NULL)
+		return 0;
+
+	/*
+	 * we still couldn't reserve memory, so try expanding heap with other
+	 * page sizes, if there are any
+	 */
+	for (i = 0; i < n_other_pg_sz; i++) {
+		uint64_t pg_sz = other_pg_sz[i];
+
+		if (!try_expand_heap(heap, pg_sz, size, socket, flags,
+				align, bound, contig))
+			return 0;
+	}
+	return -1;
+}
+
+/* this will try lower page sizes first */
+static void *
+heap_alloc_on_socket(const char *type, size_t size, int socket,
+		unsigned int flags, size_t align, size_t bound, bool contig)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct malloc_heap *heap = &mcfg->malloc_heaps[socket];
+	unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
+	void *ret;
+
+	rte_spinlock_lock(&(heap->lock));
+
+	align = align == 0 ? 1 : align;
+
+	/* for legacy mode, try once and with all flags */
+	if (internal_config.legacy_mem) {
+		ret = heap_alloc(heap, type, size, flags, align, bound, contig);
+		goto alloc_unlock;
+	}
+
+	/*
+	 * we do not pass the size hint here, because even if allocation fails,
+	 * we may still be able to allocate memory from appropriate page sizes,
+	 * we just need to request more memory first.
+	 */
+	ret = heap_alloc(heap, type, size, size_flags, align, bound, contig);
+	if (ret != NULL)
+		goto alloc_unlock;
+
+	if (!alloc_more_mem_on_socket(heap, size, socket, flags, align, bound,
+			contig)) {
+		ret = heap_alloc(heap, type, size, flags, align, bound, contig);
+
+		/* this should have succeeded */
+		if (ret == NULL)
+			RTE_LOG(ERR, EAL, "Error allocating from heap\n");
+	}
+alloc_unlock:
+	rte_spinlock_unlock(&(heap->lock));
+	return ret;
+}
+
+void *
+malloc_heap_alloc(const char *type, size_t size, int socket_arg,
+		unsigned int flags, size_t align, size_t bound, bool contig)
+{
+	int socket, i, cur_socket;
+	void *ret;
+
+	/* return NULL if size is 0 or alignment is not power-of-2 */
+	if (size == 0 || (align && !rte_is_power_of_2(align)))
+		return NULL;
+
+	if (!rte_eal_has_hugepages())
+		socket_arg = SOCKET_ID_ANY;
+
+	if (socket_arg == SOCKET_ID_ANY)
+		socket = malloc_get_numa_socket();
+	else
+		socket = socket_arg;
+
+	/* Check socket parameter */
+	if (socket >= RTE_MAX_NUMA_NODES)
+		return NULL;
+
+	ret = heap_alloc_on_socket(type, size, socket, flags, align, bound,
+			contig);
+	if (ret != NULL || socket_arg != SOCKET_ID_ANY)
+		return ret;
+
+	/* try other heaps */
+	for (i = 0; i < (int) rte_socket_count(); i++) {
+		cur_socket = rte_socket_id_by_idx(i);
+		if (cur_socket == socket)
+			continue;
+		ret = heap_alloc_on_socket(type, size, cur_socket, flags,
+				align, bound, contig);
+		if (ret != NULL)
+			return ret;
+	}
+	return NULL;
+}
+
+static void *
+heap_alloc_biggest_on_socket(const char *type, int socket, unsigned int flags,
+		size_t align, bool contig)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct malloc_heap *heap = &mcfg->malloc_heaps[socket];
+	void *ret;
+
+	rte_spinlock_lock(&(heap->lock));
+
+	align = align == 0 ? 1 : align;
+
+	ret = heap_alloc_biggest(heap, type, flags, align, contig);
+
+	rte_spinlock_unlock(&(heap->lock));
+
+	return ret;
+}
+
+void *
+malloc_heap_alloc_biggest(const char *type, int socket_arg, unsigned int flags,
+		size_t align, bool contig)
+{
+	int socket, i, cur_socket;
+	void *ret;
+
+	/* return NULL if align is not power-of-2 */
+	if ((align && !rte_is_power_of_2(align)))
+		return NULL;
+
+	if (!rte_eal_has_hugepages())
+		socket_arg = SOCKET_ID_ANY;
+
+	if (socket_arg == SOCKET_ID_ANY)
+		socket = malloc_get_numa_socket();
+	else
+		socket = socket_arg;
+
+	/* Check socket parameter */
+	if (socket >= RTE_MAX_NUMA_NODES)
+		return NULL;
+
+	ret = heap_alloc_biggest_on_socket(type, socket, flags, align,
+			contig);
+	if (ret != NULL || socket_arg != SOCKET_ID_ANY)
+		return ret;
+
+	/* try other heaps */
+	for (i = 0; i < (int) rte_socket_count(); i++) {
+		cur_socket = rte_socket_id_by_idx(i);
+		if (cur_socket == socket)
+			continue;
+		ret = heap_alloc_biggest_on_socket(type, cur_socket, flags,
+				align, contig);
+		if (ret != NULL)
+			return ret;
+	}
+	return NULL;
+}
+
+/* this function is exposed in malloc_mp.h */
+int
+malloc_heap_free_pages(void *aligned_start, size_t aligned_len)
+{
+	int n_segs, seg_idx, max_seg_idx;
+	struct rte_memseg_list *msl;
+	size_t page_sz;
+
+	msl = rte_mem_virt2memseg_list(aligned_start);
+	if (msl == NULL)
+		return -1;
+
+	page_sz = (size_t)msl->page_sz;
+	n_segs = aligned_len / page_sz;
+	seg_idx = RTE_PTR_DIFF(aligned_start, msl->base_va) / page_sz;
+	max_seg_idx = seg_idx + n_segs;
+
+	for (; seg_idx < max_seg_idx; seg_idx++) {
+		struct rte_memseg *ms;
+
+		ms = rte_fbarray_get(&msl->memseg_arr, seg_idx);
+		eal_memalloc_free_seg(ms);
+	}
+	return 0;
+}
+
+int
+malloc_heap_free(struct malloc_elem *elem)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct malloc_heap *heap;
+	void *start, *aligned_start, *end, *aligned_end;
+	size_t len, aligned_len, page_sz;
+	struct rte_memseg_list *msl;
+	unsigned int i, n_segs, before_space, after_space;
+	int ret;
+
+	if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
+		return -1;
+
+	/* elem may be merged with previous element, so keep heap address */
+	heap = elem->heap;
+	msl = elem->msl;
+	page_sz = (size_t)msl->page_sz;
+
+	rte_spinlock_lock(&(heap->lock));
+
+	/* mark element as free */
+	elem->state = ELEM_FREE;
+
+	elem = malloc_elem_free(elem);
+
+	/* anything after this is a bonus */
+	ret = 0;
+
+	/* ...of which we can't avail if we are in legacy mode */
+	if (internal_config.legacy_mem)
+		goto free_unlock;
+
+	/* Removed extra code to keep windows implementation simple. */
+
+free_unlock:
+	rte_spinlock_unlock(&(heap->lock));
+	return ret;
+}
+
+int
+malloc_heap_resize(struct malloc_elem *elem, size_t size)
+{
+	int ret;
+
+	if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
+		return -1;
+
+	rte_spinlock_lock(&(elem->heap->lock));
+
+	ret = malloc_elem_resize(elem, size);
+
+	rte_spinlock_unlock(&(elem->heap->lock));
+
+	return ret;
+}
+
+/*
+ * Function to retrieve data for heap on given socket
+ */
+int
+malloc_heap_get_stats(struct malloc_heap *heap,
+		struct rte_malloc_socket_stats *socket_stats)
+{
+	size_t idx;
+	struct malloc_elem *elem;
+
+	rte_spinlock_lock(&heap->lock);
+
+	/* Initialise variables for heap */
+	socket_stats->free_count = 0;
+	socket_stats->heap_freesz_bytes = 0;
+	socket_stats->greatest_free_size = 0;
+
+	/* Iterate through free list */
+	for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) {
+		for (elem = LIST_FIRST(&heap->free_head[idx]);
+			!!elem; elem = LIST_NEXT(elem, free_list))
+		{
+			socket_stats->free_count++;
+			socket_stats->heap_freesz_bytes += elem->size;
+			if (elem->size > socket_stats->greatest_free_size)
+				socket_stats->greatest_free_size = elem->size;
+		}
+	}
+	/* Get stats on overall heap and allocated memory on this heap */
+	socket_stats->heap_totalsz_bytes = heap->total_size;
+	socket_stats->heap_allocsz_bytes = (socket_stats->heap_totalsz_bytes -
+			socket_stats->heap_freesz_bytes);
+	socket_stats->alloc_count = heap->alloc_count;
+
+	rte_spinlock_unlock(&heap->lock);
+	return 0;
+}
+
+/*
+ * Function to retrieve data for heap on given socket
+ */
+void
+malloc_heap_dump(struct malloc_heap *heap, FILE *f)
+{
+	struct malloc_elem *elem;
+
+	rte_spinlock_lock(&heap->lock);
+
+	fprintf(f, "Heap size: 0x%zx\n", heap->total_size);
+	fprintf(f, "Heap alloc count: %u\n", heap->alloc_count);
+
+	elem = heap->first;
+	while (elem) {
+		malloc_elem_dump(elem, f);
+		elem = elem->next;
+	}
+
+	rte_spinlock_unlock(&heap->lock);
+}
+
+int
+rte_eal_malloc_heap_init(void)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+
+	if (register_mp_requests()) {
+		RTE_LOG(ERR, EAL, "Couldn't register malloc multiprocess actions\n");
+		rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+		return -1;
+	}
+
+	/* unlock mem hotplug here. it's safe for primary as no requests can
+	 * even come before primary itself is fully initialized, and secondaries
+	 * do not need to initialize the heap.
+	 */
+	rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+
+	/* secondary process does not need to initialize anything */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	/* add all IOVA-contiguous areas to the heap */
+	return rte_memseg_contig_walk(malloc_add_seg, NULL);
+}
+
+static int
+destroy_seg(struct malloc_elem *elem, size_t len)
+{
+	struct malloc_heap *heap = elem->heap;
+	struct rte_memseg_list *msl;
+
+	msl = elem->msl;
+
+	/* notify all subscribers that a memory area is going to be removed */
+	eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE, elem, len);
+
+	/* this element can be removed */
+	malloc_elem_free_list_remove(elem);
+	malloc_elem_hide_region(elem, elem, len);
+
+	heap->total_size -= len;
+
+	memset(elem, 0, sizeof(*elem));
+
+	/* destroy the fbarray backing this memory */
+	if (rte_fbarray_destroy(&msl->memseg_arr) < 0)
+		return -1;
+
+	/* reset the memseg list */
+	memset(msl, 0, sizeof(*msl));
+
+	return 0;
+}
+
+int
+malloc_heap_add_external_memory(struct malloc_heap *heap, void *va_addr,
+	rte_iova_t iova_addrs[], unsigned int n_pages, size_t page_sz)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	char fbarray_name[RTE_FBARRAY_NAME_LEN];
+	struct rte_memseg_list *msl = NULL;
+	struct rte_fbarray *arr;
+	size_t seg_len = n_pages * page_sz;
+	unsigned int i;
+
+	/* first, find a free memseg list */
+	for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
+		struct rte_memseg_list *tmp = &mcfg->memsegs[i];
+		if (tmp->base_va == NULL) {
+			msl = tmp;
+			break;
+		}
+	}
+	if (msl == NULL) {
+		RTE_LOG(ERR, EAL, "Couldn't find empty memseg list\n");
+		rte_errno = ENOSPC;
+		return -1;
+	}
+
+	snprintf(fbarray_name, sizeof(fbarray_name) - 1, "%s_%p",
+		heap->name, va_addr);
+
+	/* create the backing fbarray */
+	if (rte_fbarray_init(&msl->memseg_arr, fbarray_name, n_pages,
+		sizeof(struct rte_memseg)) < 0) {
+		RTE_LOG(ERR, EAL, "Couldn't create fbarray backing the memseg list\n");
+		return -1;
+	}
+	arr = &msl->memseg_arr;
+
+	/* fbarray created, fill it up */
+	for (i = 0; i < n_pages; i++) {
+		struct rte_memseg *ms;
+
+		rte_fbarray_set_used(arr, i);
+		ms = rte_fbarray_get(arr, i);
+		ms->addr = RTE_PTR_ADD(va_addr, i * page_sz);
+		ms->iova = iova_addrs == NULL ? RTE_BAD_IOVA : iova_addrs[i];
+		ms->hugepage_sz = page_sz;
+		ms->len = page_sz;
+		ms->nchannel = rte_memory_get_nchannel();
+		ms->nrank = rte_memory_get_nrank();
+		ms->socket_id = heap->socket_id;
+	}
+
+	/* set up the memseg list */
+	msl->base_va = va_addr;
+	msl->page_sz = page_sz;
+	msl->socket_id = heap->socket_id;
+	msl->len = seg_len;
+	msl->version = 0;
+	msl->external = 1;
+
+	/* erase contents of new memory */
+	memset(va_addr, 0, seg_len);
+
+	/* now, add newly minted memory to the malloc heap */
+	malloc_heap_add_memory(heap, msl, va_addr, seg_len);
+
+	heap->total_size += seg_len;
+
+	/* all done! */
+	RTE_LOG(DEBUG, EAL, "Added segment for heap %s starting at %p\n",
+		heap->name, va_addr);
+
+	/* notify all subscribers that a new memory area has been added */
+	eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
+		va_addr, seg_len);
+
+	return 0;
+}
+
+int
+malloc_heap_remove_external_memory(struct malloc_heap *heap, void *va_addr,
+	size_t len)
+{
+	struct malloc_elem *elem = heap->first;
+
+	/* find element with specified va address */
+	while (elem != NULL && elem != va_addr) {
+		elem = elem->next;
+		/* stop if we've blown past our VA */
+		if (elem > (struct malloc_elem *)va_addr) {
+			rte_errno = ENOENT;
+			return -1;
+		}
+	}
+	/* check if element was found */
+	if (elem == NULL || elem->msl->len != len) {
+		rte_errno = ENOENT;
+		return -1;
+	}
+	/* if element's size is not equal to segment len, segment is busy */
+	if (elem->state == ELEM_BUSY || elem->size != len) {
+		rte_errno = EBUSY;
+		return -1;
+	}
+	return destroy_seg(elem, len);
+}
+
+int
+malloc_heap_create(struct malloc_heap *heap, const char *heap_name)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	uint32_t next_socket_id = mcfg->next_socket_id;
+
+	/* prevent overflow. did you really create 2 billion heaps??? */
+	if (next_socket_id > INT32_MAX) {
+		RTE_LOG(ERR, EAL, "Cannot assign new socket ID's\n");
+		rte_errno = ENOSPC;
+		return -1;
+	}
+
+	/* initialize empty heap */
+	heap->alloc_count = 0;
+	heap->first = NULL;
+	heap->last = NULL;
+	LIST_INIT(heap->free_head);
+	rte_spinlock_init(&heap->lock);
+	heap->total_size = 0;
+	heap->socket_id = next_socket_id;
+
+	/* we hold a global mem hotplug writelock, so it's safe to increment */
+	mcfg->next_socket_id++;
+
+	/* set up name */
+	strlcpy(heap->name, heap_name, RTE_HEAP_NAME_MAX_LEN);
+
+	return 0;
+}
+
+int
+malloc_heap_destroy(struct malloc_heap *heap)
+{
+	if (heap->alloc_count != 0) {
+		RTE_LOG(ERR, EAL, "Heap is still in use\n");
+		rte_errno = EBUSY;
+		return -1;
+	}
+	if (heap->first != NULL || heap->last != NULL) {
+		RTE_LOG(ERR, EAL, "Heap still contains memory segments\n");
+		rte_errno = EBUSY;
+		return -1;
+	}
+	if (heap->total_size != 0)
+		RTE_LOG(ERR, EAL, "Total size not zero, heap is likely corrupt\n");
+
+	/* after this, the lock will be dropped */
+	memset(heap, 0, sizeof(*heap));
+
+	return 0;
+}
+
+int
+malloc_socket_to_heap_id(unsigned int socket_id)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	int i;
+
+	for (i = 0; i < RTE_MAX_HEAPS; i++) {
+		struct malloc_heap *heap = &mcfg->malloc_heaps[i];
+
+		if (heap->socket_id == socket_id)
+			return i;
+	}
+	return -1;
+}
diff --git a/lib/librte_eal/windows/eal/malloc_mp.c b/lib/librte_eal/windows/eal/malloc_mp.c
new file mode 100644
index 000000000..15227e2ae
--- /dev/null
+++ b/lib/librte_eal/windows/eal/malloc_mp.c
@@ -0,0 +1,645 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_alarm.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+
+#include "eal_memalloc.h"
+
+#include "malloc_elem.h"
+#include "malloc_mp.h"
+
+#define MP_ACTION_SYNC "mp_malloc_sync"
+/**< request sent by primary process to notify of changes in memory map */
+#define MP_ACTION_ROLLBACK "mp_malloc_rollback"
+/**< request sent by primary process to notify of changes in memory map. this is
+ * essentially a regular sync request, but we cannot send sync requests while
+ * another one is in progress, and we might have to - therefore, we do this as
+ * a separate callback.
+ */
+#define MP_ACTION_REQUEST "mp_malloc_request"
+/**< request sent by secondary process to ask for allocation/deallocation */
+#define MP_ACTION_RESPONSE "mp_malloc_response"
+/**< response sent to secondary process to indicate result of request */
+
+/* forward declarations */
+static int
+handle_sync_response(const struct rte_mp_msg *request,
+		const struct rte_mp_reply *reply);
+static int
+handle_rollback_response(const struct rte_mp_msg *request,
+		const struct rte_mp_reply *reply);
+
+#define MP_TIMEOUT_S 5 /**< 5 seconds timeouts */
+
+/* when we're allocating, we need to store some state to ensure that we can
+ * roll back later
+ */
+struct primary_alloc_req_state {
+	struct malloc_heap *heap;
+	struct rte_memseg **ms;
+	int ms_len;
+	struct malloc_elem *elem;
+	void *map_addr;
+	size_t map_len;
+};
+
+enum req_state {
+	REQ_STATE_INACTIVE = 0,
+	REQ_STATE_ACTIVE,
+	REQ_STATE_COMPLETE
+};
+
+struct mp_request {
+	TAILQ_ENTRY(mp_request) next;
+	struct malloc_mp_req user_req; /**< contents of request */
+	CONDITION_VARIABLE cond; /**< variable we use to time out on this request */
+	enum req_state state; /**< indicate status of this request */
+	struct primary_alloc_req_state alloc_state;
+};
+
+/*
+ * We could've used just a single request, but it may be possible for
+ * secondaries to timeout earlier than the primary, and send a new request while
+ * primary is still expecting replies to the old one. Therefore, each new
+ * request will get assigned a new ID, which is how we will distinguish between
+ * expected and unexpected messages.
+ */
+TAILQ_HEAD(mp_request_list, mp_request);
+static struct {
+	struct mp_request_list list;
+	SRWLOCK  lock;
+} mp_request_list = {
+	.list = TAILQ_HEAD_INITIALIZER(mp_request_list.list),
+	.lock = SRWLOCK_INIT
+};
+
+/**
+ * General workflow is the following:
+ *
+ * Allocation:
+ * S: send request to primary
+ * P: attempt to allocate memory
+ *    if failed, sendmsg failure
+ *    if success, send sync request
+ * S: if received msg of failure, quit
+ *    if received sync request, synchronize memory map and reply with result
+ * P: if received sync request result
+ *    if success, sendmsg success
+ *    if failure, roll back allocation and send a rollback request
+ * S: if received msg of success, quit
+ *    if received rollback request, synchronize memory map and reply with result
+ * P: if received sync request result
+ *    sendmsg sync request result
+ * S: if received msg, quit
+ *
+ * Aside from timeouts, there are three points where we can quit:
+ *  - if allocation failed straight away
+ *  - if allocation and sync request succeeded
+ *  - if allocation succeeded, sync request failed, allocation rolled back and
+ *    rollback request received (irrespective of whether it succeeded or failed)
+ *
+ * Deallocation:
+ * S: send request to primary
+ * P: attempt to deallocate memory
+ *    if failed, sendmsg failure
+ *    if success, send sync request
+ * S: if received msg of failure, quit
+ *    if received sync request, synchronize memory map and reply with result
+ * P: if received sync request result
+ *    sendmsg sync request result
+ * S: if received msg, quit
+ *
+ * There is no "rollback" from deallocation, as it's safe to have some memory
+ * mapped in some processes - it's absent from the heap, so it won't get used.
+ */
+
+static struct mp_request *
+find_request_by_id(uint64_t id)
+{
+	struct mp_request *req;
+	TAILQ_FOREACH(req, &mp_request_list.list, next) {
+		if (req->user_req.id == id)
+			break;
+	}
+	return req;
+}
+
+/* this ID is, like, totally guaranteed to be absolutely unique. pinky swear. */
+static uint64_t
+get_unique_id(void)
+{
+	uint64_t id;
+	do {
+		id = rte_rand();
+	} while (find_request_by_id(id) != NULL);
+	return id;
+}
+
+/* secondary will respond to sync requests thusly */
+static int
+handle_sync(const struct rte_mp_msg *msg, const void *peer)
+{
+	/* TODO dpdk-1808 Not implemented in Windows*/
+	return 0;
+}
+
+static int
+handle_alloc_request(const struct malloc_mp_req *m,
+		struct mp_request *req)
+{
+	const struct malloc_req_alloc *ar = &m->alloc_req;
+	struct malloc_heap *heap;
+	struct malloc_elem *elem;
+	struct rte_memseg **ms;
+	size_t alloc_sz;
+	int n_segs;
+	void *map_addr;
+
+	alloc_sz = RTE_ALIGN_CEIL(ar->align + ar->elt_size +
+			MALLOC_ELEM_TRAILER_LEN, ar->page_sz);
+	n_segs = alloc_sz / ar->page_sz;
+
+	heap = ar->heap;
+
+	/* we can't know in advance how many pages we'll need, so we malloc */
+	ms = malloc(sizeof(*ms) * n_segs);
+
+	memset(ms, 0, sizeof(*ms) * n_segs);
+
+	if (ms == NULL) {
+		RTE_LOG(ERR, EAL, "Couldn't allocate memory for request state\n");
+		goto fail;
+	}
+
+	elem = alloc_pages_on_heap(heap, ar->page_sz, ar->elt_size, ar->socket,
+			ar->flags, ar->align, ar->bound, ar->contig, ms,
+			n_segs);
+
+	if (elem == NULL)
+		goto fail;
+
+	map_addr = ms[0]->addr;
+
+	/* we have succeeded in allocating memory, but we still need to sync
+	 * with other processes. however, since DPDK IPC is single-threaded, we
+	 * send an asynchronous request and exit this callback.
+	 */
+
+	req->alloc_state.ms = ms;
+	req->alloc_state.ms_len = n_segs;
+	req->alloc_state.map_addr = map_addr;
+	req->alloc_state.map_len = alloc_sz;
+	req->alloc_state.elem = elem;
+	req->alloc_state.heap = heap;
+
+	return 0;
+fail:
+	free(ms);
+	return -1;
+}
+
+/* first stage of primary handling requests from secondary */
+static int
+handle_request(const struct rte_mp_msg *msg, const void *peer __rte_unused)
+{
+	const struct malloc_mp_req *m =
+			(const struct malloc_mp_req *)msg->param;
+	struct mp_request *entry;
+	int ret;
+
+	/* lock access to request */
+	AcquireSRWLockExclusive(&mp_request_list.lock);
+
+	/* make sure it's not a dupe */
+	entry = find_request_by_id(m->id);
+	if (entry != NULL) {
+		RTE_LOG(ERR, EAL, "Duplicate request id\n");
+		goto fail;
+	}
+
+	entry = malloc(sizeof(*entry));
+	if (entry == NULL) {
+		RTE_LOG(ERR, EAL, "Unable to allocate memory for request\n");
+		goto fail;
+	}
+
+	/* erase all data */
+	memset(entry, 0, sizeof(*entry));
+
+	if (m->t == REQ_TYPE_ALLOC) {
+		ret = handle_alloc_request(m, entry);
+	} else if (m->t == REQ_TYPE_FREE) {
+		ret = malloc_heap_free_pages(m->free_req.addr,
+				m->free_req.len);
+	} else {
+		RTE_LOG(ERR, EAL, "Unexpected request from secondary\n");
+		goto fail;
+	}
+
+	if (ret != 0) {
+		struct rte_mp_msg resp_msg;
+		struct malloc_mp_req *resp =
+				(struct malloc_mp_req *)resp_msg.param;
+
+		/* send failure message straight away */
+		resp_msg.num_fds = 0;
+		resp_msg.len_param = sizeof(*resp);
+		strlcpy(resp_msg.name, MP_ACTION_RESPONSE,
+				sizeof(resp_msg.name));
+
+		resp->t = m->t;
+		resp->result = REQ_RESULT_FAIL;
+		resp->id = m->id;
+
+		/* TODO dpdk-1808 send msg across to the other process using rte_mp_sendmsg(&resp_msg) */
+		/* we did not modify the request */
+		free(entry);
+	} else {
+		struct rte_mp_msg sr_msg;
+		struct malloc_mp_req *sr =
+				(struct malloc_mp_req *)sr_msg.param;
+		struct timespec ts;
+
+		memset(&sr_msg, 0, sizeof(sr_msg));
+
+		/* we can do something, so send sync request asynchronously */
+		sr_msg.num_fds = 0;
+		sr_msg.len_param = sizeof(*sr);
+		strlcpy(sr_msg.name, MP_ACTION_SYNC, sizeof(sr_msg.name));
+
+		ts.tv_nsec = 0;
+		ts.tv_sec = MP_TIMEOUT_S;
+
+		/* sync requests carry no data */
+		sr->t = REQ_TYPE_SYNC;
+		sr->id = m->id;
+
+		/* TODO dpdk-1808 check if there may be stray timeout still waiting */
+
+		/* mark request as in progress */
+		memcpy(&entry->user_req, m, sizeof(*m));
+		entry->state = REQ_STATE_ACTIVE;
+
+		TAILQ_INSERT_TAIL(&mp_request_list.list, entry, next);
+	}
+	ReleaseSRWLockExclusive(&mp_request_list.lock);
+	return 0;
+fail:
+	ReleaseSRWLockExclusive(&mp_request_list.lock);
+	free(entry);
+	return -1;
+}
+
+/* callback for asynchronous sync requests for primary. this will either do a
+ * sendmsg with results, or trigger rollback request.
+ */
+static int
+handle_sync_response(const struct rte_mp_msg *request,
+		const struct rte_mp_reply *reply)
+{
+	enum malloc_req_result result;
+	struct mp_request *entry;
+	const struct malloc_mp_req *mpreq =
+			(const struct malloc_mp_req *)request->param;
+	int i;
+
+	/* lock the request */
+	AcquireSRWLockExclusive(&mp_request_list.lock);
+
+	entry = find_request_by_id(mpreq->id);
+	if (entry == NULL) {
+		RTE_LOG(ERR, EAL, "Wrong request ID\n");
+		goto fail;
+	}
+
+	result = REQ_RESULT_SUCCESS;
+
+	if (reply->nb_received != reply->nb_sent)
+		result = REQ_RESULT_FAIL;
+
+	for (i = 0; i < reply->nb_received; i++) {
+		struct malloc_mp_req *resp =
+				(struct malloc_mp_req *)reply->msgs[i].param;
+
+		if (resp->t != REQ_TYPE_SYNC) {
+			RTE_LOG(ERR, EAL, "Unexpected response to sync request\n");
+			result = REQ_RESULT_FAIL;
+			break;
+		}
+		if (resp->id != entry->user_req.id) {
+			RTE_LOG(ERR, EAL, "Response to wrong sync request\n");
+			result = REQ_RESULT_FAIL;
+			break;
+		}
+		if (resp->result == REQ_RESULT_FAIL) {
+			result = REQ_RESULT_FAIL;
+			break;
+		}
+	}
+
+	if (entry->user_req.t == REQ_TYPE_FREE) {
+		struct rte_mp_msg msg;
+		struct malloc_mp_req *resp = (struct malloc_mp_req *)msg.param;
+
+		memset(&msg, 0, sizeof(msg));
+
+		/* this is a free request, just sendmsg result */
+		resp->t = REQ_TYPE_FREE;
+		resp->result = result;
+		resp->id = entry->user_req.id;
+		msg.num_fds = 0;
+		msg.len_param = sizeof(*resp);
+		strlcpy(msg.name, MP_ACTION_RESPONSE, sizeof(msg.name));
+
+		if (rte_mp_sendmsg(&msg))
+			RTE_LOG(ERR, EAL, "Could not send message to secondary process\n");
+
+		TAILQ_REMOVE(&mp_request_list.list, entry, next);
+		free(entry);
+	} else if (entry->user_req.t == REQ_TYPE_ALLOC &&
+			result == REQ_RESULT_SUCCESS) {
+		struct malloc_heap *heap = entry->alloc_state.heap;
+		struct rte_mp_msg msg;
+		struct malloc_mp_req *resp =
+				(struct malloc_mp_req *)msg.param;
+
+		memset(&msg, 0, sizeof(msg));
+
+		heap->total_size += entry->alloc_state.map_len;
+
+		/* result is success, so just notify secondary about this */
+		resp->t = REQ_TYPE_ALLOC;
+		resp->result = result;
+		resp->id = entry->user_req.id;
+		msg.num_fds = 0;
+		msg.len_param = sizeof(*resp);
+		strlcpy(msg.name, MP_ACTION_RESPONSE, sizeof(msg.name));
+
+		if (rte_mp_sendmsg(&msg))
+			RTE_LOG(ERR, EAL, "Could not send message to secondary process\n");
+
+		TAILQ_REMOVE(&mp_request_list.list, entry, next);
+		free(entry->alloc_state.ms);
+		free(entry);
+	} else if (entry->user_req.t == REQ_TYPE_ALLOC &&
+			result == REQ_RESULT_FAIL) {
+		struct rte_mp_msg rb_msg;
+		struct malloc_mp_req *rb =
+				(struct malloc_mp_req *)rb_msg.param;
+		struct timespec ts;
+		struct primary_alloc_req_state *state =
+				&entry->alloc_state;
+		int ret;
+
+		memset(&rb_msg, 0, sizeof(rb_msg));
+
+		/* we've failed to sync, so do a rollback */
+		rollback_expand_heap(state->ms, state->ms_len, state->elem,
+				state->map_addr, state->map_len);
+
+		/* send rollback request */
+		rb_msg.num_fds = 0;
+		rb_msg.len_param = sizeof(*rb);
+		strlcpy(rb_msg.name, MP_ACTION_ROLLBACK, sizeof(rb_msg.name));
+
+		ts.tv_nsec = 0;
+		ts.tv_sec = MP_TIMEOUT_S;
+
+		/* sync requests carry no data */
+		rb->t = REQ_TYPE_SYNC;
+		rb->id = entry->user_req.id;
+
+		/* TODO dpdk-1808 check if there may be stray timeout still waiting */
+	} else {
+		RTE_LOG(ERR, EAL, " to sync request of unknown type\n");
+		goto fail;
+	}
+
+	ReleaseSRWLockExclusive(&mp_request_list.lock);
+	return 0;
+fail:
+	ReleaseSRWLockExclusive(&mp_request_list.lock);
+	return -1;
+}
+
+static int
+handle_rollback_response(const struct rte_mp_msg *request,
+		const struct rte_mp_reply *reply __rte_unused)
+{
+	struct rte_mp_msg msg;
+	struct malloc_mp_req *resp = (struct malloc_mp_req *)msg.param;
+	const struct malloc_mp_req *mpreq =
+			(const struct malloc_mp_req *)request->param;
+	struct mp_request *entry;
+
+	/* lock the request */
+	AcquireSRWLockExclusive(&mp_request_list.lock);
+
+	memset(&msg, 0, sizeof(0));
+
+	entry = find_request_by_id(mpreq->id);
+	if (entry == NULL) {
+		RTE_LOG(ERR, EAL, "Wrong request ID\n");
+		goto fail;
+	}
+
+	if (entry->user_req.t != REQ_TYPE_ALLOC) {
+		RTE_LOG(ERR, EAL, "Unexpected active request\n");
+		goto fail;
+	}
+
+	/* we don't care if rollback succeeded, request still failed */
+	resp->t = REQ_TYPE_ALLOC;
+	resp->result = REQ_RESULT_FAIL;
+	resp->id = mpreq->id;
+	msg.num_fds = 0;
+	msg.len_param = sizeof(*resp);
+	strlcpy(msg.name, MP_ACTION_RESPONSE, sizeof(msg.name));
+
+	if (rte_mp_sendmsg(&msg))
+		RTE_LOG(ERR, EAL, "Could not send message to secondary process\n");
+
+	/* clean up */
+	TAILQ_REMOVE(&mp_request_list.list, entry, next);
+	free(entry->alloc_state.ms);
+	free(entry);
+
+	ReleaseSRWLockExclusive(&mp_request_list.lock);
+	return 0;
+fail:
+	ReleaseSRWLockExclusive(&mp_request_list.lock);
+	return -1;
+}
+
+/* final stage of the request from secondary */
+static int
+handle_response(const struct rte_mp_msg *msg, const void *peer  __rte_unused)
+{
+	const struct malloc_mp_req *m =
+			(const struct malloc_mp_req *)msg->param;
+	struct mp_request *entry;
+
+	AcquireSRWLockExclusive(&mp_request_list.lock);
+
+	entry = find_request_by_id(m->id);
+	if (entry != NULL) {
+		/* update request status */
+		entry->user_req.result = m->result;
+
+		entry->state = REQ_STATE_COMPLETE;
+
+		/* trigger thread wakeup */
+		WakeConditionVariable(&entry->cond);
+	}
+
+	ReleaseSRWLockExclusive(&mp_request_list.lock);
+
+	return 0;
+}
+
+/* synchronously request memory map sync, this is only called whenever primary
+ * process initiates the allocation.
+ */
+int
+request_sync(void)
+{
+	struct rte_mp_msg msg;
+	struct rte_mp_reply reply;
+	struct malloc_mp_req *req = (struct malloc_mp_req *)msg.param;
+	struct timespec ts;
+	int i, ret;
+
+	memset(&msg, 0, sizeof(msg));
+	memset(&reply, 0, sizeof(reply));
+
+	/* no need to create tailq entries as this is entirely synchronous */
+
+	msg.num_fds = 0;
+	msg.len_param = sizeof(*req);
+	strlcpy(msg.name, MP_ACTION_SYNC, sizeof(msg.name));
+
+	/* sync request carries no data */
+	req->t = REQ_TYPE_SYNC;
+	req->id = get_unique_id();
+
+	ts.tv_nsec = 0;
+	ts.tv_sec = MP_TIMEOUT_S;
+
+	/* TODO dpdk-1808 check if there may be stray timeout still waiting */
+
+	if (reply.nb_received != reply.nb_sent) {
+		RTE_LOG(ERR, EAL, "Not all secondaries have responded\n");
+		ret = -1;
+		goto out;
+	}
+
+	for (i = 0; i < reply.nb_received; i++) {
+		struct malloc_mp_req *resp =
+				(struct malloc_mp_req *)reply.msgs[i].param;
+		if (resp->t != REQ_TYPE_SYNC) {
+			RTE_LOG(ERR, EAL, "Unexpected response from secondary\n");
+			ret = -1;
+			goto out;
+		}
+		if (resp->id != req->id) {
+			RTE_LOG(ERR, EAL, "Wrong request ID\n");
+			ret = -1;
+			goto out;
+		}
+		if (resp->result != REQ_RESULT_SUCCESS) {
+			RTE_LOG(ERR, EAL, "Secondary process failed to synchronize\n");
+			ret = -1;
+			goto out;
+		}
+	}
+
+	ret = 0;
+out:
+	free(reply.msgs);
+	return ret;
+}
+
+/* this is a synchronous wrapper around a bunch of asynchronous requests to
+ * primary process. this will initiate a request and wait until responses come.
+ */
+int
+request_to_primary(struct malloc_mp_req *user_req)
+{
+	struct rte_mp_msg msg;
+	struct malloc_mp_req *msg_req = (struct malloc_mp_req *)msg.param;
+	struct mp_request *entry;
+
+	DWORD milliseconds;
+	int ret;
+
+	memset(&msg, 0, sizeof(msg));
+
+	AcquireSRWLockExclusive(&mp_request_list.lock);
+
+	entry = malloc(sizeof(*entry));
+	if (entry == NULL) {
+		RTE_LOG(ERR, EAL, "Cannot allocate memory for request\n");
+		goto fail;
+	}
+
+	memset(entry, 0, sizeof(*entry));
+
+
+	milliseconds = MP_TIMEOUT_S * 1000000;
+	/* initialize the request */
+	InitializeConditionVariable(&entry->cond);
+
+	msg.num_fds = 0;
+	msg.len_param = sizeof(*msg_req);
+	strlcpy(msg.name, MP_ACTION_REQUEST, sizeof(msg.name));
+
+	/* (attempt to) get a unique id */
+	user_req->id = get_unique_id();
+
+	/* copy contents of user request into the message */
+	memcpy(msg_req, user_req, sizeof(*msg_req));
+
+	/* TODO dpdk-1808 send msg across to the other process using rte_mp_sendmsg(&resp_msg) */
+	/* copy contents of user request into active request */
+	memcpy(&entry->user_req, user_req, sizeof(*user_req));
+
+	/* mark request as in progress */
+	entry->state = REQ_STATE_ACTIVE;
+
+	TAILQ_INSERT_TAIL(&mp_request_list.list, entry, next);
+
+	/* finally, wait on timeout */
+	do {
+		ret = SleepConditionVariableSRW(&entry->cond,
+				&mp_request_list.lock, milliseconds,0);
+	} while (ret != 0 && ret != ETIMEDOUT);
+
+	if (entry->state != REQ_STATE_COMPLETE) {
+		RTE_LOG(ERR, EAL, "Request timed out\n");
+		ret = -1;
+	} else {
+		ret = 0;
+		user_req->result = entry->user_req.result;
+	}
+	TAILQ_REMOVE(&mp_request_list.list, entry, next);
+	free(entry);
+
+	ReleaseSRWLockExclusive(&mp_request_list.lock);
+	return ret;
+fail:
+	ReleaseSRWLockExclusive(&mp_request_list.lock);
+	free(entry);
+	return -1;
+}
+
+int
+register_mp_requests(void)
+{
+	/* TODO dpdk-1808 Not implemented in Windows*/
+	return 0;
+}
diff --git a/lib/librte_eal/windows/include_override/dirent.h b/lib/librte_eal/windows/include_override/dirent.h
new file mode 100644
index 000000000..47d17fd9a
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/dirent.h
@@ -0,0 +1,950 @@
+/*
+* Dirent interface for Microsoft Visual Studio
+* Version 1.21
+*
+* Copyright (C) 2006-2012 Toni Ronkko
+* This file is part of dirent.  Dirent may be freely distributed
+* under the MIT license.  For all details and documentation, see
+* https://github.com/tronkko/dirent
+*/
+#ifndef DIRENT_H
+#define DIRENT_H
+
+/*
+* Include windows.h without Windows Sockets 1.1 to prevent conflicts with
+* Windows Sockets 2.0.
+*/
+#ifndef WIN32_LEAN_AND_MEAN
+#   define WIN32_LEAN_AND_MEAN
+#endif
+#include <windows.h>
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <wchar.h>
+#include <string.h>
+#include <stdlib.h>
+#include <malloc.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+
+/* Indicates that d_type field is available in dirent structure */
+#define _DIRENT_HAVE_D_TYPE
+
+/* Indicates that d_namlen field is available in dirent structure */
+#define _DIRENT_HAVE_D_NAMLEN
+
+/* Entries missing from MSVC 6.0 */
+#if !defined(FILE_ATTRIBUTE_DEVICE)
+#   define FILE_ATTRIBUTE_DEVICE 0x40
+#endif
+
+/* File type and permission flags for stat(), general mask */
+#if !defined(S_IFMT)
+#   define S_IFMT _S_IFMT
+#endif
+
+/* Directory bit */
+#if !defined(S_IFDIR)
+#   define S_IFDIR _S_IFDIR
+#endif
+
+/* Character device bit */
+#if !defined(S_IFCHR)
+#   define S_IFCHR _S_IFCHR
+#endif
+
+/* Pipe bit */
+#if !defined(S_IFFIFO)
+#   define S_IFFIFO _S_IFFIFO
+#endif
+
+/* Regular file bit */
+#if !defined(S_IFREG)
+#   define S_IFREG _S_IFREG
+#endif
+
+/* Read permission */
+#if !defined(S_IREAD)
+#   define S_IREAD _S_IREAD
+#endif
+
+/* Write permission */
+#if !defined(S_IWRITE)
+#   define S_IWRITE _S_IWRITE
+#endif
+
+/* Execute permission */
+#if !defined(S_IEXEC)
+#   define S_IEXEC _S_IEXEC
+#endif
+
+/* Pipe */
+#if !defined(S_IFIFO)
+#   define S_IFIFO _S_IFIFO
+#endif
+
+/* Block device */
+#if !defined(S_IFBLK)
+#   define S_IFBLK 0
+#endif
+
+/* Link */
+#if !defined(S_IFLNK)
+#   define S_IFLNK 0
+#endif
+
+/* Socket */
+#if !defined(S_IFSOCK)
+#   define S_IFSOCK 0
+#endif
+
+/* Read user permission */
+#if !defined(S_IRUSR)
+#   define S_IRUSR S_IREAD
+#endif
+
+/* Write user permission */
+#if !defined(S_IWUSR)
+#   define S_IWUSR S_IWRITE
+#endif
+
+/* Execute user permission */
+#if !defined(S_IXUSR)
+#   define S_IXUSR 0
+#endif
+
+/* Read group permission */
+#if !defined(S_IRGRP)
+#   define S_IRGRP 0
+#endif
+
+/* Write group permission */
+#if !defined(S_IWGRP)
+#   define S_IWGRP 0
+#endif
+
+/* Execute group permission */
+#if !defined(S_IXGRP)
+#   define S_IXGRP 0
+#endif
+
+/* Read others permission */
+#if !defined(S_IROTH)
+#   define S_IROTH 0
+#endif
+
+/* Write others permission */
+#if !defined(S_IWOTH)
+#   define S_IWOTH 0
+#endif
+
+/* Execute others permission */
+#if !defined(S_IXOTH)
+#   define S_IXOTH 0
+#endif
+
+/* Maximum length of file name */
+#if !defined(PATH_MAX)
+#   define PATH_MAX MAX_PATH
+#endif
+#if !defined(FILENAME_MAX)
+#   define FILENAME_MAX MAX_PATH
+#endif
+#if !defined(NAME_MAX)
+#   define NAME_MAX FILENAME_MAX
+#endif
+
+/* File type flags for d_type */
+#define DT_UNKNOWN 0
+#define DT_REG S_IFREG
+#define DT_DIR S_IFDIR
+#define DT_FIFO S_IFIFO
+#define DT_SOCK S_IFSOCK
+#define DT_CHR S_IFCHR
+#define DT_BLK S_IFBLK
+#define DT_LNK S_IFLNK
+
+/* Macros for converting between st_mode and d_type */
+#define IFTODT(mode) ((mode) & S_IFMT)
+#define DTTOIF(type) (type)
+
+/*
+* File type macros.  Note that block devices, sockets and links cannot be
+* distinguished on Windows and the macros S_ISBLK, S_ISSOCK and S_ISLNK are
+* only defined for compatibility.  These macros should always return false
+* on Windows.
+*/
+#if !defined(S_ISFIFO)
+#   define S_ISFIFO(mode) (((mode) & S_IFMT) == S_IFIFO)
+#endif
+#if !defined(S_ISDIR)
+#   define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR)
+#endif
+#if !defined(S_ISREG)
+#   define S_ISREG(mode) (((mode) & S_IFMT) == S_IFREG)
+#endif
+#if !defined(S_ISLNK)
+#   define S_ISLNK(mode) (((mode) & S_IFMT) == S_IFLNK)
+#endif
+#if !defined(S_ISSOCK)
+#   define S_ISSOCK(mode) (((mode) & S_IFMT) == S_IFSOCK)
+#endif
+#if !defined(S_ISCHR)
+#   define S_ISCHR(mode) (((mode) & S_IFMT) == S_IFCHR)
+#endif
+#if !defined(S_ISBLK)
+#   define S_ISBLK(mode) (((mode) & S_IFMT) == S_IFBLK)
+#endif
+
+/* Return the exact length of d_namlen without zero terminator */
+#define _D_EXACT_NAMLEN(p) ((p)->d_namlen)
+
+/* Return number of bytes needed to store d_namlen */
+#define _D_ALLOC_NAMLEN(p) (PATH_MAX)
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+	/* Wide-character version */
+	struct _wdirent {
+		/* Always zero */
+		long d_ino;
+
+		/* Structure size */
+		unsigned short d_reclen;
+
+		/* Length of name without \0 */
+		size_t d_namlen;
+
+		/* File type */
+		int d_type;
+
+		/* File name */
+		wchar_t d_name[PATH_MAX];
+	};
+	typedef struct _wdirent _wdirent;
+
+	struct _WDIR {
+		/* Current directory entry */
+		struct _wdirent ent;
+
+		/* Private file data */
+		WIN32_FIND_DATAW data;
+
+		/* True if data is valid */
+		int cached;
+
+		/* Win32 search handle */
+		HANDLE handle;
+
+		/* Initial directory name */
+		wchar_t *patt;
+	};
+	typedef struct _WDIR _WDIR;
+
+	static _WDIR *_wopendir(const wchar_t *dirname);
+	static struct _wdirent *_wreaddir(_WDIR *dirp);
+	static int _wclosedir(_WDIR *dirp);
+	static void _wrewinddir(_WDIR* dirp);
+
+
+	/* For compatibility with Symbian */
+#define wdirent _wdirent
+#define WDIR _WDIR
+#define wopendir _wopendir
+#define wreaddir _wreaddir
+#define wclosedir _wclosedir
+#define wrewinddir _wrewinddir
+
+
+	/* Multi-byte character versions */
+	struct dirent {
+		/* Always zero */
+		long d_ino;
+
+		/* Structure size */
+		unsigned short d_reclen;
+
+		/* Length of name without \0 */
+		size_t d_namlen;
+
+		/* File type */
+		int d_type;
+
+		/* File name */
+		char d_name[PATH_MAX];
+	};
+	typedef struct dirent dirent;
+
+	struct DIR {
+		struct dirent ent;
+		struct _WDIR *wdirp;
+	};
+	typedef struct DIR DIR;
+
+	static DIR *opendir(const char *dirname);
+	static struct dirent *readdir(DIR *dirp);
+	static int closedir(DIR *dirp);
+	static void rewinddir(DIR* dirp);
+
+
+	/* Internal utility functions */
+	static WIN32_FIND_DATAW *dirent_first(_WDIR *dirp);
+	static WIN32_FIND_DATAW *dirent_next(_WDIR *dirp);
+
+	static int dirent_mbstowcs_s(
+		size_t *pReturnValue,
+		wchar_t *wcstr,
+		size_t sizeInWords,
+		const char *mbstr,
+		size_t count);
+
+	static int dirent_wcstombs_s(
+		size_t *pReturnValue,
+		char *mbstr,
+		size_t sizeInBytes,
+		const wchar_t *wcstr,
+		size_t count);
+
+	static void dirent_set_errno(int error);
+
+	/*
+	* Open directory stream DIRNAME for read and return a pointer to the
+	* internal working area that is used to retrieve individual directory
+	* entries.
+	*/
+	static _WDIR*
+		_wopendir(
+			const wchar_t *dirname)
+	{
+		_WDIR *dirp = NULL;
+		int error;
+
+		/* Must have directory name */
+		if (dirname == NULL || dirname[0] == '\0') {
+			dirent_set_errno(ENOENT);
+			return NULL;
+		}
+
+		/* Allocate new _WDIR structure */
+		dirp = (_WDIR*)malloc(sizeof(struct _WDIR));
+		if (dirp != NULL) {
+			DWORD n;
+
+			/* Reset _WDIR structure */
+			dirp->handle = INVALID_HANDLE_VALUE;
+			dirp->patt = NULL;
+			dirp->cached = 0;
+
+			/* Compute the length of full path plus zero terminator
+			*
+			* Note that on WinRT there's no way to convert relative paths
+			* into absolute paths, so just assume its an absolute path.
+			*/
+#       if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP)
+			n = wcslen(dirname);
+#       else
+			n = GetFullPathNameW(dirname, 0, NULL, NULL);
+#       endif
+
+			/* Allocate room for absolute directory name and search pattern */
+			dirp->patt = (wchar_t*)malloc(sizeof(wchar_t) * n + 16);
+			if (dirp->patt) {
+
+				/*
+				* Convert relative directory name to an absolute one.  This
+				* allows rewinddir() to function correctly even when current
+				* working directory is changed between opendir() and rewinddir().
+				*
+				* Note that on WinRT there's no way to convert relative paths
+				* into absolute paths, so just assume its an absolute path.
+				*/
+#           if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP)
+				wcsncpy_s(dirp->patt, n + 1, dirname, n);
+#           else
+				n = GetFullPathNameW(dirname, n, dirp->patt, NULL);
+#           endif
+				if (n > 0) {
+					wchar_t *p;
+
+					/* Append search pattern \* to the directory name */
+					p = dirp->patt + n;
+					if (dirp->patt < p) {
+						switch (p[-1]) {
+						case '\\':
+						case '/':
+						case ':':
+							/* Directory ends in path separator, e.g. c:\temp\ */
+							/*NOP*/;
+							break;
+
+						default:
+							/* Directory name doesn't end in path separator */
+							*p++ = '\\';
+						}
+					}
+					*p++ = '*';
+					*p = '\0';
+
+					/* Open directory stream and retrieve the first entry */
+					if (dirent_first(dirp)) {
+						/* Directory stream opened successfully */
+						error = 0;
+					}
+					else {
+						/* Cannot retrieve first entry */
+						error = 1;
+						dirent_set_errno(ENOENT);
+					}
+
+				}
+				else {
+					/* Cannot retrieve full path name */
+					dirent_set_errno(ENOENT);
+					error = 1;
+				}
+
+			}
+			else {
+				/* Cannot allocate memory for search pattern */
+				error = 1;
+			}
+
+		}
+		else {
+			/* Cannot allocate _WDIR structure */
+			error = 1;
+		}
+
+		/* Clean up in case of error */
+		if (error  &&  dirp) {
+			_wclosedir(dirp);
+			dirp = NULL;
+		}
+
+		return dirp;
+	}
+
+	/*
+	* Read next directory entry.  The directory entry is returned in dirent
+	* structure in the d_name field.  Individual directory entries returned by
+	* this function include regular files, sub-directories, pseudo-directories
+	* "." and ".." as well as volume labels, hidden files and system files.
+	*/
+	static struct _wdirent*
+		_wreaddir(
+			_WDIR *dirp)
+	{
+		WIN32_FIND_DATAW *datap;
+		struct _wdirent *entp;
+
+		/* Read next directory entry */
+		datap = dirent_next(dirp);
+		if (datap) {
+			size_t n;
+			DWORD attr;
+
+			/* Pointer to directory entry to return */
+			entp = &dirp->ent;
+
+			/*
+			* Copy file name as wide-character string.  If the file name is too
+			* long to fit in to the destination buffer, then truncate file name
+			* to PATH_MAX characters and zero-terminate the buffer.
+			*/
+			n = 0;
+			while (n + 1 < PATH_MAX  &&  datap->cFileName[n] != 0) {
+				entp->d_name[n] = datap->cFileName[n];
+				n++;
+			}
+			dirp->ent.d_name[n] = 0;
+
+			/* Length of file name excluding zero terminator */
+			entp->d_namlen = n;
+
+			/* File type */
+			attr = datap->dwFileAttributes;
+			if ((attr & FILE_ATTRIBUTE_DEVICE) != 0) {
+				entp->d_type = DT_CHR;
+			}
+			else if ((attr & FILE_ATTRIBUTE_DIRECTORY) != 0) {
+				entp->d_type = DT_DIR;
+			}
+			else {
+				entp->d_type = DT_REG;
+			}
+
+			/* Reset dummy fields */
+			entp->d_ino = 0;
+			entp->d_reclen = sizeof(struct _wdirent);
+
+		}
+		else {
+
+			/* Last directory entry read */
+			entp = NULL;
+
+		}
+
+		return entp;
+	}
+
+	/*
+	* Close directory stream opened by opendir() function.  This invalidates the
+	* DIR structure as well as any directory entry read previously by
+	* _wreaddir().
+	*/
+	static int
+		_wclosedir(
+			_WDIR *dirp)
+	{
+		int ok;
+		if (dirp) {
+
+			/* Release search handle */
+			if (dirp->handle != INVALID_HANDLE_VALUE) {
+				FindClose(dirp->handle);
+				dirp->handle = INVALID_HANDLE_VALUE;
+			}
+
+			/* Release search pattern */
+			if (dirp->patt) {
+				free(dirp->patt);
+				dirp->patt = NULL;
+			}
+
+			/* Release directory structure */
+			free(dirp);
+			ok = /*success*/0;
+
+		}
+		else {
+			/* Invalid directory stream */
+			dirent_set_errno(EBADF);
+			ok = /*failure*/-1;
+		}
+		return ok;
+	}
+
+	/*
+	* Rewind directory stream such that _wreaddir() returns the very first
+	* file name again.
+	*/
+	static void
+		_wrewinddir(
+			_WDIR* dirp)
+	{
+		if (dirp) {
+			/* Release existing search handle */
+			if (dirp->handle != INVALID_HANDLE_VALUE) {
+				FindClose(dirp->handle);
+			}
+
+			/* Open new search handle */
+			dirent_first(dirp);
+		}
+	}
+
+	/* Get first directory entry (internal) */
+	static WIN32_FIND_DATAW*
+		dirent_first(
+			_WDIR *dirp)
+	{
+		WIN32_FIND_DATAW *datap;
+
+		/* Open directory and retrieve the first entry */
+		dirp->handle = FindFirstFileExW(
+			dirp->patt, FindExInfoStandard, &dirp->data,
+			FindExSearchNameMatch, NULL, 0);
+		if (dirp->handle != INVALID_HANDLE_VALUE) {
+
+			/* a directory entry is now waiting in memory */
+			datap = &dirp->data;
+			dirp->cached = 1;
+
+		}
+		else {
+
+			/* Failed to re-open directory: no directory entry in memory */
+			dirp->cached = 0;
+			datap = NULL;
+
+		}
+		return datap;
+	}
+
+	/* Get next directory entry (internal) */
+	static WIN32_FIND_DATAW*
+		dirent_next(
+			_WDIR *dirp)
+	{
+		WIN32_FIND_DATAW *p;
+
+		/* Get next directory entry */
+		if (dirp->cached != 0) {
+
+			/* A valid directory entry already in memory */
+			p = &dirp->data;
+			dirp->cached = 0;
+
+		}
+		else if (dirp->handle != INVALID_HANDLE_VALUE) {
+
+			/* Get the next directory entry from stream */
+			if (FindNextFileW(dirp->handle, &dirp->data) != FALSE) {
+				/* Got a file */
+				p = &dirp->data;
+			}
+			else {
+				/* The very last entry has been processed or an error occured */
+				FindClose(dirp->handle);
+				dirp->handle = INVALID_HANDLE_VALUE;
+				p = NULL;
+			}
+
+		}
+		else {
+
+			/* End of directory stream reached */
+			p = NULL;
+
+		}
+
+		return p;
+	}
+
+	/*
+	* Open directory stream using plain old C-string.
+	*/
+	static DIR*
+		opendir(
+			const char *dirname)
+	{
+		struct DIR *dirp;
+		int error;
+
+		/* Must have directory name */
+		if (dirname == NULL || dirname[0] == '\0') {
+			dirent_set_errno(ENOENT);
+			return NULL;
+		}
+
+		/* Allocate memory for DIR structure */
+		dirp = (DIR*)malloc(sizeof(struct DIR));
+		if (dirp) {
+			wchar_t wname[PATH_MAX];
+			size_t n;
+
+			/* Convert directory name to wide-character string */
+			error = dirent_mbstowcs_s(&n, wname, PATH_MAX, dirname, PATH_MAX);
+			if (!error) {
+
+				/* Open directory stream using wide-character name */
+				dirp->wdirp = _wopendir(wname);
+				if (dirp->wdirp) {
+					/* Directory stream opened */
+					error = 0;
+				}
+				else {
+					/* Failed to open directory stream */
+					error = 1;
+				}
+
+			}
+			else {
+				/*
+				* Cannot convert file name to wide-character string.  This
+				* occurs if the string contains invalid multi-byte sequences or
+				* the output buffer is too small to contain the resulting
+				* string.
+				*/
+				error = 1;
+			}
+
+		}
+		else {
+			/* Cannot allocate DIR structure */
+			error = 1;
+		}
+
+		/* Clean up in case of error */
+		if (error  &&  dirp) {
+			free(dirp);
+			dirp = NULL;
+		}
+
+		return dirp;
+	}
+
+	/*
+	* Read next directory entry.
+	*
+	* When working with text consoles, please note that file names returned by
+	* readdir() are represented in the default ANSI code page while any output to
+	* console is typically formatted on another code page.  Thus, non-ASCII
+	* characters in file names will not usually display correctly on console.  The
+	* problem can be fixed in two ways: (1) change the character set of console
+	* to 1252 using chcp utility and use Lucida Console font, or (2) use
+	* _cprintf function when writing to console.  The _cprinf() will re-encode
+	* ANSI strings to the console code page so many non-ASCII characters will
+	* display correcly.
+	*/
+	static struct dirent*
+		readdir(
+			DIR *dirp)
+	{
+		WIN32_FIND_DATAW *datap;
+		struct dirent *entp;
+
+		/* Read next directory entry */
+		datap = dirent_next(dirp->wdirp);
+		if (datap) {
+			size_t n;
+			int error;
+
+			/* Attempt to convert file name to multi-byte string */
+			error = dirent_wcstombs_s(
+				&n, dirp->ent.d_name, PATH_MAX, datap->cFileName, PATH_MAX);
+
+			/*
+			* If the file name cannot be represented by a multi-byte string,
+			* then attempt to use old 8+3 file name.  This allows traditional
+			* Unix-code to access some file names despite of unicode
+			* characters, although file names may seem unfamiliar to the user.
+			*
+			* Be ware that the code below cannot come up with a short file
+			* name unless the file system provides one.  At least
+			* VirtualBox shared folders fail to do this.
+			*/
+			if (error  &&  datap->cAlternateFileName[0] != '\0') {
+				error = dirent_wcstombs_s(
+					&n, dirp->ent.d_name, PATH_MAX,
+					datap->cAlternateFileName, PATH_MAX);
+			}
+
+			if (!error) {
+				DWORD attr;
+
+				/* Initialize directory entry for return */
+				entp = &dirp->ent;
+
+				/* Length of file name excluding zero terminator */
+				entp->d_namlen = n - 1;
+
+				/* File attributes */
+				attr = datap->dwFileAttributes;
+				if ((attr & FILE_ATTRIBUTE_DEVICE) != 0) {
+					entp->d_type = DT_CHR;
+				}
+				else if ((attr & FILE_ATTRIBUTE_DIRECTORY) != 0) {
+					entp->d_type = DT_DIR;
+				}
+				else {
+					entp->d_type = DT_REG;
+				}
+
+				/* Reset dummy fields */
+				entp->d_ino = 0;
+				entp->d_reclen = sizeof(struct dirent);
+
+			}
+			else {
+				/*
+				* Cannot convert file name to multi-byte string so construct
+				* an errornous directory entry and return that.  Note that
+				* we cannot return NULL as that would stop the processing
+				* of directory entries completely.
+				*/
+				entp = &dirp->ent;
+				entp->d_name[0] = '?';
+				entp->d_name[1] = '\0';
+				entp->d_namlen = 1;
+				entp->d_type = DT_UNKNOWN;
+				entp->d_ino = 0;
+				entp->d_reclen = 0;
+			}
+
+		}
+		else {
+			/* No more directory entries */
+			entp = NULL;
+		}
+
+		return entp;
+	}
+
+	/*
+	* Close directory stream.
+	*/
+	static int
+		closedir(
+			DIR *dirp)
+	{
+		int ok;
+		if (dirp) {
+
+			/* Close wide-character directory stream */
+			ok = _wclosedir(dirp->wdirp);
+			dirp->wdirp = NULL;
+
+			/* Release multi-byte character version */
+			free(dirp);
+
+		}
+		else {
+
+			/* Invalid directory stream */
+			dirent_set_errno(EBADF);
+			ok = /*failure*/-1;
+
+		}
+		return ok;
+	}
+
+	/*
+	* Rewind directory stream to beginning.
+	*/
+	static void
+		rewinddir(
+			DIR* dirp)
+	{
+		/* Rewind wide-character string directory stream */
+		_wrewinddir(dirp->wdirp);
+	}
+
+	/* Convert multi-byte string to wide character string */
+	static int
+		dirent_mbstowcs_s(
+			size_t *pReturnValue,
+			wchar_t *wcstr,
+			size_t sizeInWords,
+			const char *mbstr,
+			size_t count)
+	{
+		int error;
+
+#if defined(_MSC_VER)  &&  _MSC_VER >= 1400
+
+		/* Microsoft Visual Studio 2005 or later */
+		error = mbstowcs_s(pReturnValue, wcstr, sizeInWords, mbstr, count);
+
+#else
+
+		/* Older Visual Studio or non-Microsoft compiler */
+		size_t n;
+
+		/* Convert to wide-character string (or count characters) */
+		n = mbstowcs(wcstr, mbstr, sizeInWords);
+		if (!wcstr || n < count) {
+
+			/* Zero-terminate output buffer */
+			if (wcstr  &&  sizeInWords) {
+				if (n >= sizeInWords) {
+					n = sizeInWords - 1;
+				}
+				wcstr[n] = 0;
+			}
+
+			/* Length of resuting multi-byte string WITH zero terminator */
+			if (pReturnValue) {
+				*pReturnValue = n + 1;
+			}
+
+			/* Success */
+			error = 0;
+
+		}
+		else {
+
+			/* Could not convert string */
+			error = 1;
+
+		}
+
+#endif
+
+		return error;
+	}
+
+	/* Convert wide-character string to multi-byte string */
+	static int
+		dirent_wcstombs_s(
+			size_t *pReturnValue,
+			char *mbstr,
+			size_t sizeInBytes, /* max size of mbstr */
+			const wchar_t *wcstr,
+			size_t count)
+	{
+		int error;
+
+#if defined(_MSC_VER)  &&  _MSC_VER >= 1400
+
+		/* Microsoft Visual Studio 2005 or later */
+		error = wcstombs_s(pReturnValue, mbstr, sizeInBytes, wcstr, count);
+
+#else
+
+		/* Older Visual Studio or non-Microsoft compiler */
+		size_t n;
+
+		/* Convert to multi-byte string (or count the number of bytes needed) */
+		n = wcstombs(mbstr, wcstr, sizeInBytes);
+		if (!mbstr || n < count) {
+
+			/* Zero-terminate output buffer */
+			if (mbstr  &&  sizeInBytes) {
+				if (n >= sizeInBytes) {
+					n = sizeInBytes - 1;
+				}
+				mbstr[n] = '\0';
+			}
+
+			/* Length of resulting multi-bytes string WITH zero-terminator */
+			if (pReturnValue) {
+				*pReturnValue = n + 1;
+			}
+
+			/* Success */
+			error = 0;
+
+		}
+		else {
+
+			/* Cannot convert string */
+			error = 1;
+
+		}
+
+#endif
+
+		return error;
+	}
+
+	/* Set errno variable */
+	static void
+		dirent_set_errno(
+			int error)
+	{
+#if defined(_MSC_VER)  &&  _MSC_VER >= 1400
+
+		/* Microsoft Visual Studio 2005 and later */
+		_set_errno(error);
+
+#else
+
+		/* Non-Microsoft compiler or older Microsoft compiler */
+		errno = error;
+
+#endif
+	}
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /*DIRENT_H*/
diff --git a/lib/librte_eal/windows/include_override/getopt.h b/lib/librte_eal/windows/include_override/getopt.h
new file mode 100644
index 000000000..19bc18f9a
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/getopt.h
@@ -0,0 +1,252 @@
+#ifndef __GETOPT_H__
+/**
+ * DISCLAIMER
+ * This file is a part of the w64 mingw-runtime package.
+ *
+ * The w64 mingw-runtime package and its code is distributed in the hope that it
+ * will be useful but WITHOUT ANY WARRANTY.  ALL WARRANTIES, EXPRESSED OR
+ * IMPLIED ARE HEREBY DISCLAIMED.  This includes but is not limited to
+ * warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ */
+/*
+* Copyright (c) 2002 Todd C. Miller <Todd.Miller@courtesan.com>
+*
+* Permission to use, copy, modify, and distribute this software for any
+* purpose with or without fee is hereby granted, provided that the above
+* copyright notice and this permission notice appear in all copies.
+*
+* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*
+* Sponsored in part by the Defense Advanced Research Projects
+* Agency (DARPA) and Air Force Research Laboratory, Air Force
+* Materiel Command, USAF, under agreement number F39502-99-1-0512.
+*/
+/*-
+* Copyright (c) 2000 The NetBSD Foundation, Inc.
+* All rights reserved.
+*
+* This code is derived from software contributed to The NetBSD Foundation
+* by Dieter Baron and Thomas Klausner.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+* 1. Redistributions of source code must retain the above copyright
+*    notice, this list of conditions and the following disclaimer.
+* 2. Redistributions in binary form must reproduce the above copyright
+*    notice, this list of conditions and the following disclaimer in the
+*    documentation and/or other materials provided with the distribution.
+*
+* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+* POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#pragma warning(disable:4996);
+
+#define __GETOPT_H__
+
+/* All the headers include this file. */
+#include <crtdefs.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+#include <windows.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define	REPLACE_GETOPT		/* use this getopt as the system getopt(3) */
+
+#define PRINT_ERROR	((opterr) && (*options != ':'))
+
+#define FLAG_PERMUTE	0x01	/* permute non-options to the end of argv */
+#define FLAG_ALLARGS	0x02	/* treat non-options as args to option "-1" */
+#define FLAG_LONGONLY	0x04	/* operate as getopt_long_only */
+
+/* return values */
+#define	BADCH		(int)'?'
+#define	BADARG		((*options == ':') ? (int)':' : (int)'?')
+#define	INORDER 	(int)1
+
+#ifndef __CYGWIN__
+#define __progname __argv[0]
+#else
+	extern char __declspec(dllimport) *__progname;
+#endif
+
+#ifdef __CYGWIN__
+	static char EMSG[] = "";
+#else
+#define	EMSG		""
+#endif
+extern int optind;		/* index of first non-option in argv      */
+extern int optopt;		/* single option character, as parsed     */
+extern int opterr;		/* flag to enable built-in diagnostics... */
+				/* (user may set to zero, to suppress)    */
+
+extern char *optarg;		/* pointer to argument of current option  */
+
+extern int getopt(int nargc, char * const *nargv, const char *options);
+
+static int getopt_internal(int, char * const *, const char *,
+                           const struct option *, int *, int);
+static int gcd(int, int);
+static void permute_args(int, int, int, char * const *);
+
+static void
+_vwarnx(const char *fmt, va_list ap)
+{
+    (void)fprintf(stderr, "%s: ", __progname);
+    if (fmt != NULL)
+	(void)vfprintf(stderr, fmt, ap);
+    (void)fprintf(stderr, "\n");
+}
+
+static void
+warnx(const char *fmt, ...)
+{
+    va_list ap;
+    va_start(ap, fmt);
+    _vwarnx(fmt, ap);
+    va_end(ap);
+}
+
+/*
+* Compute the greatest common divisor of a and b.
+*/
+static int
+gcd(int a, int b)
+{
+    int c;
+
+    c = a % b;
+    while (c != 0) {
+	    a = b;
+	    b = c;
+	    c = a % b;
+    }
+
+    return (b);
+}
+
+/*
+* Exchange the block from nonopt_start to nonopt_end with the block
+* from nonopt_end to opt_end (keeping the same order of arguments
+* in each block).
+*/
+static void
+permute_args(int panonopt_start, int panonopt_end, int opt_end, char * const *nargv)
+{
+    int cstart, cyclelen, i, j, ncycle, nnonopts, nopts, pos;
+    char *swap;
+
+    /*
+    * compute lengths of blocks and number and size of cycles
+    */
+    nnonopts = panonopt_end - panonopt_start;
+    nopts = opt_end - panonopt_end;
+    ncycle = gcd(nnonopts, nopts);
+    cyclelen = (opt_end - panonopt_start) / ncycle;
+
+    for (i = 0; i < ncycle; i++) {
+	cstart = panonopt_end + i;
+	pos = cstart;
+	for (j = 0; j < cyclelen; j++) {
+	    if (pos >= panonopt_end)
+		pos -= nnonopts;
+	    else
+		pos += nopts;
+	    swap = nargv[pos];
+	    /* LINTED const cast */
+	    ((char **)nargv)[pos] = nargv[cstart];
+	    /* LINTED const cast */
+	    ((char **)nargv)[cstart] = swap;
+	}
+    }
+}
+
+
+#ifdef _BSD_SOURCE
+/*
+ * BSD adds the non-standard `optreset' feature, for reinitialisation
+ * of `getopt' parsing.  We support this feature, for applications which
+ * proclaim their BSD heritage, before including this header; however,
+ * to maintain portability, developers are advised to avoid it.
+ */
+# define optreset  __mingw_optreset
+extern int optreset;
+#endif
+#ifdef __cplusplus
+}
+#endif
+/*
+ * POSIX requires the `getopt' API to be specified in `unistd.h';
+ * thus, `unistd.h' includes this header.  However, we do not want
+ * to expose the `getopt_long' or `getopt_long_only' APIs, when
+ * included in this manner.  Thus, close the standard __GETOPT_H__
+ * declarations block, and open an additional __GETOPT_LONG_H__
+ * specific block, only when *not* __UNISTD_H_SOURCED__, in which
+ * to declare the extended API.
+ */
+#endif /* !defined(__GETOPT_H__) */
+
+#if !defined(__UNISTD_H_SOURCED__) && !defined(__GETOPT_LONG_H__)
+#define __GETOPT_LONG_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct option		/* specification for a long form option...	*/
+{
+  const char *name;		/* option name, without leading hyphens */
+  int         has_arg;		/* does it take an argument?		*/
+  int        *flag;		/* where to save its status, or NULL	*/
+  int         val;		/* its associated status value		*/
+};
+
+enum    		/* permitted values for its `has_arg' field...	*/
+{
+  no_argument = 0,      	/* option never takes an argument	*/
+  required_argument,		/* option always requires an argument	*/
+  optional_argument		/* option may take an argument		*/
+};
+
+extern int getopt_long(int nargc, char * const *nargv, const char *options,
+    const struct option *long_options, int *idx);
+extern int getopt_long_only(int nargc, char * const *nargv, const char *options,
+    const struct option *long_options, int *idx);
+/*
+ * Previous MinGW implementation had...
+ */
+#ifndef HAVE_DECL_GETOPT
+/*
+ * ...for the long form API only; keep this for compatibility.
+ */
+# define HAVE_DECL_GETOPT	1
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !defined(__UNISTD_H_SOURCED__) && !defined(__GETOPT_LONG_H__) */
diff --git a/lib/librte_eal/windows/include_override/net/ethernet.h b/lib/librte_eal/windows/include_override/net/ethernet.h
new file mode 100644
index 000000000..ae7341ee8
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/net/ethernet.h
@@ -0,0 +1,405 @@
+/*
+ * Fundamental constants relating to ethernet.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _NET_ETHERNET_H_
+#define _NET_ETHERNET_H_
+
+/*
+ * Some basic Ethernet constants.
+ */
+#define	ETHER_ADDR_LEN		6	/* length of an Ethernet address */
+#define	ETHER_TYPE_LEN		2	/* length of the Ethernet type field */
+#define	ETHER_CRC_LEN		4	/* length of the Ethernet CRC */
+#define	ETHER_HDR_LEN		(ETHER_ADDR_LEN*2+ETHER_TYPE_LEN)
+#define	ETHER_MIN_LEN		64	/* minimum frame len, including CRC */
+#define	ETHER_MAX_LEN		1518	/* maximum frame len, including CRC */
+#define	ETHER_MAX_LEN_JUMBO	9018	/* max jumbo frame len, including CRC */
+
+#define	ETHER_VLAN_ENCAP_LEN	4	/* len of 802.1Q VLAN encapsulation */
+/*
+ * Mbuf adjust factor to force 32-bit alignment of IP header.
+ * Drivers should do m_adj(m, ETHER_ALIGN) when setting up a
+ * receive so the upper layers get the IP header properly aligned
+ * past the 14-byte Ethernet header.
+ */
+#define	ETHER_ALIGN		2	/* driver adjust for IP hdr alignment */
+
+/*
+ * Compute the maximum frame size based on ethertype (i.e. possible
+ * encapsulation) and whether or not an FCS is present.
+ */
+#define	ETHER_MAX_FRAME(ifp, etype, hasfcs)				\
+	((ifp)->if_mtu + ETHER_HDR_LEN +				\
+	 ((hasfcs) ? ETHER_CRC_LEN : 0) +				\
+	 (((etype) == ETHERTYPE_VLAN) ? ETHER_VLAN_ENCAP_LEN : 0))
+
+/*
+ * Ethernet-specific mbuf flags.
+ */
+#define	M_HASFCS	M_PROTO5	/* FCS included at end of frame */
+
+/*
+ * Ethernet CRC32 polynomials (big- and little-endian verions).
+ */
+#define	ETHER_CRC_POLY_LE	0xedb88320
+#define	ETHER_CRC_POLY_BE	0x04c11db6
+
+/*
+ * A macro to validate a length with
+ */
+#define	ETHER_IS_VALID_LEN(foo)	\
+	((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
+
+/*
+ * Structure of a 10Mb/s Ethernet header.
+ */
+struct ether_header {
+	u_char	ether_dhost[ETHER_ADDR_LEN];
+	u_char	ether_shost[ETHER_ADDR_LEN];
+	u_short	ether_type;
+} __packed;
+
+/*
+ * Structure of a 48-bit Ethernet address.
+ */
+struct ether_addr {
+	u_char octet[ETHER_ADDR_LEN];
+} __packed;
+
+#define	ETHER_IS_MULTICAST(addr) (*(addr) & 0x01) /* is address mcast/bcast? */
+
+/*
+ *  NOTE: 0x0000-0x05DC (0..1500) are generally IEEE 802.3 length fields.
+ *  However, there are some conflicts.
+ */
+
+#define	ETHERTYPE_8023		0x0004	/* IEEE 802.3 packet */
+		   /* 0x0101 .. 0x1FF	   Experimental */
+#define	ETHERTYPE_PUP		0x0200	/* Xerox PUP protocol - see 0A00 */
+#define	ETHERTYPE_PUPAT		0x0200	/* PUP Address Translation - see 0A01 */
+#define	ETHERTYPE_SPRITE	0x0500	/* ??? */
+			     /* 0x0400	   Nixdorf */
+#define	ETHERTYPE_NS		0x0600	/* XNS */
+#define	ETHERTYPE_NSAT		0x0601	/* XNS Address Translation (3Mb only) */
+#define	ETHERTYPE_DLOG1 	0x0660	/* DLOG (?) */
+#define	ETHERTYPE_DLOG2 	0x0661	/* DLOG (?) */
+#define	ETHERTYPE_IP		0x0800	/* IP protocol */
+#define	ETHERTYPE_X75		0x0801	/* X.75 Internet */
+#define	ETHERTYPE_NBS		0x0802	/* NBS Internet */
+#define	ETHERTYPE_ECMA		0x0803	/* ECMA Internet */
+#define	ETHERTYPE_CHAOS 	0x0804	/* CHAOSnet */
+#define	ETHERTYPE_X25		0x0805	/* X.25 Level 3 */
+#define	ETHERTYPE_ARP		0x0806	/* Address resolution protocol */
+#define	ETHERTYPE_NSCOMPAT	0x0807	/* XNS Compatibility */
+#define	ETHERTYPE_FRARP 	0x0808	/* Frame Relay ARP (RFC1701) */
+			     /* 0x081C	   Symbolics Private */
+		    /* 0x0888 - 0x088A	   Xyplex */
+#define	ETHERTYPE_UBDEBUG	0x0900	/* Ungermann-Bass network debugger */
+#define	ETHERTYPE_IEEEPUP	0x0A00	/* Xerox IEEE802.3 PUP */
+#define	ETHERTYPE_IEEEPUPAT	0x0A01	/* Xerox IEEE802.3 PUP Address Translation */
+#define	ETHERTYPE_VINES 	0x0BAD	/* Banyan VINES */
+#define	ETHERTYPE_VINESLOOP	0x0BAE	/* Banyan VINES Loopback */
+#define	ETHERTYPE_VINESECHO	0x0BAF	/* Banyan VINES Echo */
+
+/*		       0x1000 - 0x100F	   Berkeley Trailer */
+/*
+ * The ETHERTYPE_NTRAILER packet types starting at ETHERTYPE_TRAIL have
+ * (type-ETHERTYPE_TRAIL)*512 bytes of data followed
+ * by an ETHER type (as given above) and then the (variable-length) header.
+ */
+#define	ETHERTYPE_TRAIL		0x1000	/* Trailer packet */
+#define	ETHERTYPE_NTRAILER	16
+
+#define	ETHERTYPE_DCA		0x1234	/* DCA - Multicast */
+#define	ETHERTYPE_VALID 	0x1600	/* VALID system protocol */
+#define	ETHERTYPE_DOGFIGHT	0x1989	/* Artificial Horizons ("Aviator" dogfight simulator [on Sun]) */
+#define	ETHERTYPE_RCL		0x1995	/* Datapoint Corporation (RCL lan protocol) */
+
+					/* The following 3C0x types
+					   are unregistered: */
+#define	ETHERTYPE_NBPVCD	0x3C00	/* 3Com NBP virtual circuit datagram (like XNS SPP) not registered */
+#define	ETHERTYPE_NBPSCD	0x3C01	/* 3Com NBP System control datagram not registered */
+#define	ETHERTYPE_NBPCREQ	0x3C02	/* 3Com NBP Connect request (virtual cct) not registered */
+#define	ETHERTYPE_NBPCRSP	0x3C03	/* 3Com NBP Connect response not registered */
+#define	ETHERTYPE_NBPCC		0x3C04	/* 3Com NBP Connect complete not registered */
+#define	ETHERTYPE_NBPCLREQ	0x3C05	/* 3Com NBP Close request (virtual cct) not registered */
+#define	ETHERTYPE_NBPCLRSP	0x3C06	/* 3Com NBP Close response not registered */
+#define	ETHERTYPE_NBPDG		0x3C07	/* 3Com NBP Datagram (like XNS IDP) not registered */
+#define	ETHERTYPE_NBPDGB	0x3C08	/* 3Com NBP Datagram broadcast not registered */
+#define	ETHERTYPE_NBPCLAIM	0x3C09	/* 3Com NBP Claim NetBIOS name not registered */
+#define	ETHERTYPE_NBPDLTE	0x3C0A	/* 3Com NBP Delete NetBIOS name not registered */
+#define	ETHERTYPE_NBPRAS	0x3C0B	/* 3Com NBP Remote adaptor status request not registered */
+#define	ETHERTYPE_NBPRAR	0x3C0C	/* 3Com NBP Remote adaptor response not registered */
+#define	ETHERTYPE_NBPRST	0x3C0D	/* 3Com NBP Reset not registered */
+
+#define	ETHERTYPE_PCS		0x4242	/* PCS Basic Block Protocol */
+#define	ETHERTYPE_IMLBLDIAG	0x424C	/* Information Modes Little Big LAN diagnostic */
+#define	ETHERTYPE_DIDDLE	0x4321	/* THD - Diddle */
+#define	ETHERTYPE_IMLBL		0x4C42	/* Information Modes Little Big LAN */
+#define	ETHERTYPE_SIMNET	0x5208	/* BBN Simnet Private */
+#define	ETHERTYPE_DECEXPER	0x6000	/* DEC Unassigned, experimental */
+#define	ETHERTYPE_MOPDL		0x6001	/* DEC MOP dump/load */
+#define	ETHERTYPE_MOPRC		0x6002	/* DEC MOP remote console */
+#define	ETHERTYPE_DECnet	0x6003	/* DEC DECNET Phase IV route */
+#define	ETHERTYPE_DN		ETHERTYPE_DECnet	/* libpcap, tcpdump */
+#define	ETHERTYPE_LAT		0x6004	/* DEC LAT */
+#define	ETHERTYPE_DECDIAG	0x6005	/* DEC diagnostic protocol (at interface initialization?) */
+#define	ETHERTYPE_DECCUST	0x6006	/* DEC customer protocol */
+#define	ETHERTYPE_SCA		0x6007	/* DEC LAVC, SCA */
+#define	ETHERTYPE_AMBER		0x6008	/* DEC AMBER */
+#define	ETHERTYPE_DECMUMPS	0x6009	/* DEC MUMPS */
+		    /* 0x6010 - 0x6014	   3Com Corporation */
+#define	ETHERTYPE_TRANSETHER	0x6558	/* Trans Ether Bridging (RFC1701)*/
+#define	ETHERTYPE_RAWFR		0x6559	/* Raw Frame Relay (RFC1701) */
+#define	ETHERTYPE_UBDL		0x7000	/* Ungermann-Bass download */
+#define	ETHERTYPE_UBNIU		0x7001	/* Ungermann-Bass NIUs */
+#define	ETHERTYPE_UBDIAGLOOP	0x7002	/* Ungermann-Bass diagnostic/loopback */
+#define	ETHERTYPE_UBNMC		0x7003	/* Ungermann-Bass ??? (NMC to/from UB Bridge) */
+#define	ETHERTYPE_UBBST		0x7005	/* Ungermann-Bass Bridge Spanning Tree */
+#define	ETHERTYPE_OS9		0x7007	/* OS/9 Microware */
+#define	ETHERTYPE_OS9NET	0x7009	/* OS/9 Net? */
+		    /* 0x7020 - 0x7029	   LRT (England) (now Sintrom) */
+#define	ETHERTYPE_RACAL		0x7030	/* Racal-Interlan */
+#define	ETHERTYPE_PRIMENTS	0x7031	/* Prime NTS (Network Terminal Service) */
+#define	ETHERTYPE_CABLETRON	0x7034	/* Cabletron */
+#define	ETHERTYPE_CRONUSVLN	0x8003	/* Cronus VLN */
+#define	ETHERTYPE_CRONUS	0x8004	/* Cronus Direct */
+#define	ETHERTYPE_HP		0x8005	/* HP Probe */
+#define	ETHERTYPE_NESTAR	0x8006	/* Nestar */
+#define	ETHERTYPE_ATTSTANFORD	0x8008	/* AT&T/Stanford (local use) */
+#define	ETHERTYPE_EXCELAN	0x8010	/* Excelan */
+#define	ETHERTYPE_SG_DIAG	0x8013	/* SGI diagnostic type */
+#define	ETHERTYPE_SG_NETGAMES	0x8014	/* SGI network games */
+#define	ETHERTYPE_SG_RESV	0x8015	/* SGI reserved type */
+#define	ETHERTYPE_SG_BOUNCE	0x8016	/* SGI bounce server */
+#define	ETHERTYPE_APOLLODOMAIN	0x8019	/* Apollo DOMAIN */
+#define	ETHERTYPE_TYMSHARE	0x802E	/* Tymeshare */
+#define	ETHERTYPE_TIGAN		0x802F	/* Tigan, Inc. */
+#define	ETHERTYPE_REVARP	0x8035	/* Reverse addr resolution protocol */
+#define	ETHERTYPE_AEONIC	0x8036	/* Aeonic Systems */
+#define	ETHERTYPE_IPXNEW	0x8037	/* IPX (Novell Netware?) */
+#define	ETHERTYPE_LANBRIDGE	0x8038	/* DEC LANBridge */
+#define	ETHERTYPE_DSMD	0x8039	/* DEC DSM/DDP */
+#define	ETHERTYPE_ARGONAUT	0x803A	/* DEC Argonaut Console */
+#define	ETHERTYPE_VAXELN	0x803B	/* DEC VAXELN */
+#define	ETHERTYPE_DECDNS	0x803C	/* DEC DNS Naming Service */
+#define	ETHERTYPE_ENCRYPT	0x803D	/* DEC Ethernet Encryption */
+#define	ETHERTYPE_DECDTS	0x803E	/* DEC Distributed Time Service */
+#define	ETHERTYPE_DECLTM	0x803F	/* DEC LAN Traffic Monitor */
+#define	ETHERTYPE_DECNETBIOS	0x8040	/* DEC PATHWORKS DECnet NETBIOS Emulation */
+#define	ETHERTYPE_DECLAST	0x8041	/* DEC Local Area System Transport */
+			     /* 0x8042	   DEC Unassigned */
+#define	ETHERTYPE_PLANNING	0x8044	/* Planning Research Corp. */
+		    /* 0x8046 - 0x8047	   AT&T */
+#define	ETHERTYPE_DECAM		0x8048	/* DEC Availability Manager for Distributed Systems DECamds (but someone at DEC says not) */
+#define	ETHERTYPE_EXPERDATA	0x8049	/* ExperData */
+#define	ETHERTYPE_VEXP		0x805B	/* Stanford V Kernel exp. */
+#define	ETHERTYPE_VPROD		0x805C	/* Stanford V Kernel prod. */
+#define	ETHERTYPE_ES		0x805D	/* Evans & Sutherland */
+#define	ETHERTYPE_LITTLE	0x8060	/* Little Machines */
+#define	ETHERTYPE_COUNTERPOINT	0x8062	/* Counterpoint Computers */
+		    /* 0x8065 - 0x8066	   Univ. of Mass @ Amherst */
+#define	ETHERTYPE_VEECO		0x8067	/* Veeco Integrated Auto. */
+#define	ETHERTYPE_GENDYN	0x8068	/* General Dynamics */
+#define	ETHERTYPE_ATT		0x8069	/* AT&T */
+#define	ETHERTYPE_AUTOPHON	0x806A	/* Autophon */
+#define	ETHERTYPE_COMDESIGN	0x806C	/* ComDesign */
+#define	ETHERTYPE_COMPUGRAPHIC	0x806D	/* Compugraphic Corporation */
+		    /* 0x806E - 0x8077	   Landmark Graphics Corp. */
+#define	ETHERTYPE_MATRA		0x807A	/* Matra */
+#define	ETHERTYPE_DDE		0x807B	/* Dansk Data Elektronik */
+#define	ETHERTYPE_MERIT		0x807C	/* Merit Internodal (or Univ of Michigan?) */
+		    /* 0x807D - 0x807F	   Vitalink Communications */
+#define	ETHERTYPE_VLTLMAN	0x8080	/* Vitalink TransLAN III Management */
+		    /* 0x8081 - 0x8083	   Counterpoint Computers */
+		    /* 0x8088 - 0x808A	   Xyplex */
+#define	ETHERTYPE_ATALK		0x809B	/* AppleTalk */
+#define	ETHERTYPE_AT		ETHERTYPE_ATALK		/* old NetBSD */
+#define	ETHERTYPE_APPLETALK	ETHERTYPE_ATALK		/* HP-UX */
+		    /* 0x809C - 0x809E	   Datability */
+#define	ETHERTYPE_SPIDER	0x809F	/* Spider Systems Ltd. */
+			     /* 0x80A3	   Nixdorf */
+		    /* 0x80A4 - 0x80B3	   Siemens Gammasonics Inc. */
+		    /* 0x80C0 - 0x80C3	   DCA (Digital Comm. Assoc.) Data Exchange Cluster */
+		    /* 0x80C4 - 0x80C5	   Banyan Systems */
+#define	ETHERTYPE_PACER		0x80C6	/* Pacer Software */
+#define	ETHERTYPE_APPLITEK	0x80C7	/* Applitek Corporation */
+		    /* 0x80C8 - 0x80CC	   Intergraph Corporation */
+		    /* 0x80CD - 0x80CE	   Harris Corporation */
+		    /* 0x80CF - 0x80D2	   Taylor Instrument */
+		    /* 0x80D3 - 0x80D4	   Rosemount Corporation */
+#define	ETHERTYPE_SNA		0x80D5	/* IBM SNA Services over Ethernet */
+#define	ETHERTYPE_VARIAN	0x80DD	/* Varian Associates */
+		    /* 0x80DE - 0x80DF	   TRFS (Integrated Solutions Transparent Remote File System) */
+		    /* 0x80E0 - 0x80E3	   Allen-Bradley */
+		    /* 0x80E4 - 0x80F0	   Datability */
+#define	ETHERTYPE_RETIX		0x80F2	/* Retix */
+#define	ETHERTYPE_AARP		0x80F3	/* AppleTalk AARP */
+		    /* 0x80F4 - 0x80F5	   Kinetics */
+#define	ETHERTYPE_APOLLO	0x80F7	/* Apollo Computer */
+#define ETHERTYPE_VLAN		0x8100	/* IEEE 802.1Q VLAN tagging (XXX conflicts) */
+		    /* 0x80FF - 0x8101	   Wellfleet Communications (XXX conflicts) */
+#define	ETHERTYPE_BOFL		0x8102	/* Wellfleet; BOFL (Breath OF Life) pkts [every 5-10 secs.] */
+#define	ETHERTYPE_WELLFLEET	0x8103	/* Wellfleet Communications */
+		    /* 0x8107 - 0x8109	   Symbolics Private */
+#define	ETHERTYPE_TALARIS	0x812B	/* Talaris */
+#define	ETHERTYPE_WATERLOO	0x8130	/* Waterloo Microsystems Inc. (XXX which?) */
+#define	ETHERTYPE_HAYES		0x8130	/* Hayes Microcomputers (XXX which?) */
+#define	ETHERTYPE_VGLAB		0x8131	/* VG Laboratory Systems */
+		    /* 0x8132 - 0x8137	   Bridge Communications */
+#define	ETHERTYPE_IPX		0x8137	/* Novell (old) NetWare IPX (ECONFIG E option) */
+#define	ETHERTYPE_NOVELL	0x8138	/* Novell, Inc. */
+		    /* 0x8139 - 0x813D	   KTI */
+#define	ETHERTYPE_MUMPS		0x813F	/* M/MUMPS data sharing */
+#define	ETHERTYPE_AMOEBA	0x8145	/* Vrije Universiteit (NL) Amoeba 4 RPC (obsolete) */
+#define	ETHERTYPE_FLIP		0x8146	/* Vrije Universiteit (NL) FLIP (Fast Local Internet Protocol) */
+#define	ETHERTYPE_VURESERVED	0x8147	/* Vrije Universiteit (NL) [reserved] */
+#define	ETHERTYPE_LOGICRAFT	0x8148	/* Logicraft */
+#define	ETHERTYPE_NCD		0x8149	/* Network Computing Devices */
+#define	ETHERTYPE_ALPHA		0x814A	/* Alpha Micro */
+#define	ETHERTYPE_SNMP		0x814C	/* SNMP over Ethernet (see RFC1089) */
+		    /* 0x814D - 0x814E	   BIIN */
+#define	ETHERTYPE_TEC	0x814F	/* Technically Elite Concepts */
+#define	ETHERTYPE_RATIONAL	0x8150	/* Rational Corp */
+		    /* 0x8151 - 0x8153	   Qualcomm */
+		    /* 0x815C - 0x815E	   Computer Protocol Pty Ltd */
+		    /* 0x8164 - 0x8166	   Charles River Data Systems */
+#define	ETHERTYPE_XTP		0x817D	/* Protocol Engines XTP */
+#define	ETHERTYPE_SGITW		0x817E	/* SGI/Time Warner prop. */
+#define	ETHERTYPE_HIPPI_FP	0x8180	/* HIPPI-FP encapsulation */
+#define	ETHERTYPE_STP		0x8181	/* Scheduled Transfer STP, HIPPI-ST */
+		    /* 0x8182 - 0x8183	   Reserved for HIPPI-6400 */
+		    /* 0x8184 - 0x818C	   SGI prop. */
+#define	ETHERTYPE_MOTOROLA	0x818D	/* Motorola */
+#define	ETHERTYPE_NETBEUI	0x8191	/* PowerLAN NetBIOS/NetBEUI (PC) */
+		    /* 0x819A - 0x81A3	   RAD Network Devices */
+		    /* 0x81B7 - 0x81B9	   Xyplex */
+		    /* 0x81CC - 0x81D5	   Apricot Computers */
+		    /* 0x81D6 - 0x81DD	   Artisoft Lantastic */
+		    /* 0x81E6 - 0x81EF	   Polygon */
+		    /* 0x81F0 - 0x81F2	   Comsat Labs */
+		    /* 0x81F3 - 0x81F5	   SAIC */
+		    /* 0x81F6 - 0x81F8	   VG Analytical */
+		    /* 0x8203 - 0x8205	   QNX Software Systems Ltd. */
+		    /* 0x8221 - 0x8222	   Ascom Banking Systems */
+		    /* 0x823E - 0x8240	   Advanced Encryption Systems */
+		    /* 0x8263 - 0x826A	   Charles River Data Systems */
+		    /* 0x827F - 0x8282	   Athena Programming */
+		    /* 0x829A - 0x829B	   Inst Ind Info Tech */
+		    /* 0x829C - 0x82AB	   Taurus Controls */
+		    /* 0x82AC - 0x8693	   Walker Richer & Quinn */
+#define	ETHERTYPE_ACCTON	0x8390	/* Accton Technologies (unregistered) */
+#define	ETHERTYPE_TALARISMC	0x852B	/* Talaris multicast */
+#define	ETHERTYPE_KALPANA	0x8582	/* Kalpana */
+		    /* 0x8694 - 0x869D	   Idea Courier */
+		    /* 0x869E - 0x86A1	   Computer Network Tech */
+		    /* 0x86A3 - 0x86AC	   Gateway Communications */
+#define	ETHERTYPE_SECTRA	0x86DB	/* SECTRA */
+#define	ETHERTYPE_IPV6		0x86DD	/* IP protocol version 6 */
+#define	ETHERTYPE_DELTACON	0x86DE	/* Delta Controls */
+#define	ETHERTYPE_ATOMIC	0x86DF	/* ATOMIC */
+		    /* 0x86E0 - 0x86EF	   Landis & Gyr Powers */
+		    /* 0x8700 - 0x8710	   Motorola */
+#define	ETHERTYPE_RDP		0x8739	/* Control Technology Inc. RDP Without IP */
+#define	ETHERTYPE_MICP		0x873A	/* Control Technology Inc. Mcast Industrial Ctrl Proto. */
+		    /* 0x873B - 0x873C	   Control Technology Inc. Proprietary */
+#define	ETHERTYPE_TCPCOMP	0x876B	/* TCP/IP Compression (RFC1701) */
+#define	ETHERTYPE_IPAS		0x876C	/* IP Autonomous Systems (RFC1701) */
+#define	ETHERTYPE_SECUREDATA	0x876D	/* Secure Data (RFC1701) */
+#define	ETHERTYPE_FLOWCONTROL	0x8808	/* 802.3x flow control packet */
+#define	ETHERTYPE_SLOW		0x8809	/* 802.3ad link aggregation (LACP) */
+#define	ETHERTYPE_PPP		0x880B	/* PPP (obsolete by PPPoE) */
+#define	ETHERTYPE_HITACHI	0x8820	/* Hitachi Cable (Optoelectronic Systems Laboratory) */
+#define	ETHERTYPE_MPLS		0x8847	/* MPLS Unicast */
+#define	ETHERTYPE_MPLS_MCAST	0x8848	/* MPLS Multicast */
+#define	ETHERTYPE_AXIS		0x8856	/* Axis Communications AB proprietary bootstrap/config */
+#define	ETHERTYPE_PPPOEDISC	0x8863	/* PPP Over Ethernet Discovery Stage */
+#define	ETHERTYPE_PPPOE		0x8864	/* PPP Over Ethernet Session Stage */
+#define	ETHERTYPE_LANPROBE	0x8888	/* HP LanProbe test? */
+#define	ETHERTYPE_PAE		0x888e	/* EAPOL PAE/802.1x */
+#define	ETHERTYPE_LOOPBACK	0x9000	/* Loopback: used to test interfaces */
+#define	ETHERTYPE_LBACK		ETHERTYPE_LOOPBACK	/* DEC MOP loopback */
+#define	ETHERTYPE_XNSSM		0x9001	/* 3Com (Formerly Bridge Communications), XNS Systems Management */
+#define	ETHERTYPE_TCPSM		0x9002	/* 3Com (Formerly Bridge Communications), TCP/IP Systems Management */
+#define	ETHERTYPE_BCLOOP	0x9003	/* 3Com (Formerly Bridge Communications), loopback detection */
+#define	ETHERTYPE_DEBNI		0xAAAA	/* DECNET? Used by VAX 6220 DEBNI */
+#define	ETHERTYPE_SONIX		0xFAF5	/* Sonix Arpeggio */
+#define	ETHERTYPE_VITAL		0xFF00	/* BBN VITAL-LanBridge cache wakeups */
+		    /* 0xFF00 - 0xFFOF	   ISC Bunker Ramo */
+
+#define	ETHERTYPE_MAX		0xFFFF	/* Maximum valid ethernet type, reserved */
+
+/*
+ * The ETHERTYPE_NTRAILER packet types starting at ETHERTYPE_TRAIL have
+ * (type-ETHERTYPE_TRAIL)*512 bytes of data followed
+ * by an ETHER type (as given above) and then the (variable-length) header.
+ */
+#define	ETHERTYPE_TRAIL		0x1000		/* Trailer packet */
+#define	ETHERTYPE_NTRAILER	16
+
+#define	ETHERMTU	(ETHER_MAX_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN)
+#define	ETHERMIN	(ETHER_MIN_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN)
+#define	ETHERMTU_JUMBO	(ETHER_MAX_LEN_JUMBO - ETHER_HDR_LEN - ETHER_CRC_LEN)
+/*
+ * The ETHER_BPF_MTAP macro should be used by drivers which support hardware
+ * offload for VLAN tag processing.  It will check the mbuf to see if it has
+ * M_VLANTAG set, and if it does, will pass the packet along to
+ * ether_vlan_mtap.  This function will re-insert VLAN tags for the duration
+ * of the tap, so they show up properly for network analyzers.
+ */
+#define ETHER_BPF_MTAP(_ifp, _m) do {					\
+	if (bpf_peers_present((_ifp)->if_bpf)) {			\
+		M_ASSERTVALID(_m);					\
+		if (((_m)->m_flags & M_VLANTAG) != 0)			\
+			ether_vlan_mtap((_ifp)->if_bpf, (_m), NULL, 0);	\
+		else							\
+			bpf_mtap((_ifp)->if_bpf, (_m));			\
+	}								\
+} while (0)
+
+#ifdef _KERNEL
+
+struct ifnet;
+struct mbuf;
+struct route;
+struct sockaddr;
+struct bpf_if;
+
+extern	uint32_t ether_crc32_le(const uint8_t *, size_t);
+extern	uint32_t ether_crc32_be(const uint8_t *, size_t);
+extern	void ether_demux(struct ifnet *, struct mbuf *);
+extern	void ether_ifattach(struct ifnet *, const u_int8_t *);
+extern	void ether_ifdetach(struct ifnet *);
+extern	int  ether_ioctl(struct ifnet *, u_long, caddr_t);
+extern	int  ether_output(struct ifnet *,
+		   struct mbuf *, struct sockaddr *, struct route *);
+extern	int  ether_output_frame(struct ifnet *, struct mbuf *);
+extern	char *ether_sprintf(const u_int8_t *);
+void	ether_vlan_mtap(struct bpf_if *, struct mbuf *,
+	    void *, u_int);
+struct mbuf  *ether_vlanencap(struct mbuf *, uint16_t);
+
+#else /* _KERNEL */
+
+#include <sys/cdefs.h>
+
+/*
+ * Ethernet address conversion/parsing routines.
+ */
+__BEGIN_DECLS
+struct	ether_addr *ether_aton(const char *);
+struct	ether_addr *ether_aton_r(const char *, struct ether_addr *);
+int	ether_hostton(const char *, struct ether_addr *);
+int	ether_line(const char *, struct ether_addr *, char *);
+char 	*ether_ntoa(const struct ether_addr *);
+char 	*ether_ntoa_r(const struct ether_addr *, char *);
+int	ether_ntohost(char *, const struct ether_addr *);
+__END_DECLS
+
+#endif /* !_KERNEL */
+
+#endif /* !_NET_ETHERNET_H_ */
diff --git a/lib/librte_eal/windows/include_override/netinet/in.h b/lib/librte_eal/windows/include_override/netinet/in.h
new file mode 100644
index 000000000..5d4f411a3
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/netinet/in.h
@@ -0,0 +1,48 @@
+#ifndef _IN_H_
+#define _IN_H_
+
+/*
+ * IPv6 address
+ */
+struct in6_addr {
+	union {
+		uint8_t		__u6_addr8[16];
+		uint16_t	__u6_addr16[8];
+		uint32_t	__u6_addr32[4];
+	} __u6_addr;		/* 128-bit IP6 address */
+};
+
+#define INET6_ADDRSTRLEN	46
+
+#ifndef _IN_ADDR_T_DECLARED
+typedef uint32_t		in_addr_t;
+#define	_IN_ADDR_T_DECLARED
+#endif
+
+// #define	_STRUCT_IN_ADDR_DECLARED
+/* Internet address (a structure for historical reasons). */
+#ifndef	_STRUCT_IN_ADDR_DECLARED
+struct in_addr {
+	in_addr_t s_addr;
+};
+#define	_STRUCT_IN_ADDR_DECLARED
+#endif
+
+#define AF_INET			0
+
+#define IPPROTO_IP		0
+#define IPPROTO_HOPOPTS		0
+#define	IPPROTO_IPV4		4		/* IPv4 encapsulation */
+#define	IPPROTO_IPIP		IPPROTO_IPV4	/* for compatibility */
+#define IPPROTO_TCP		6
+#define IPPROTO_UDP		17
+#define	IPPROTO_IPV6		41		/* IP6 header */
+#define	IPPROTO_ROUTING		43		/* IP6 routing header */
+#define	IPPROTO_FRAGMENT	44		/* IP6 fragmentation header */
+#define	IPPROTO_GRE		47		/* General Routing Encap. */
+#define	IPPROTO_ESP		50		/* IP6 Encap Sec. Payload */
+#define	IPPROTO_AH		51		/* IP6 Auth Header */
+#define	IPPROTO_DSTOPTS		60		/* IP6 destination option */
+
+
+#endif
diff --git a/lib/librte_eal/windows/include_override/netinet/tcp.h b/lib/librte_eal/windows/include_override/netinet/tcp.h
new file mode 100644
index 000000000..250c4c354
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/netinet/tcp.h
@@ -0,0 +1,4 @@
+#ifndef NETINET_TCP_H
+#define NETINET_TCP_H
+
+#endif
diff --git a/lib/librte_eal/windows/include_override/pthread.h b/lib/librte_eal/windows/include_override/pthread.h
new file mode 100644
index 000000000..1505842e8
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/pthread.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#ifndef _PTHREAD_H_
+#define _PTHREAD_H_
+
+#define PTHREAD_BARRIER_SERIAL_THREAD TRUE
+
+typedef void* pthread_t;
+typedef void pthread_attr_t;
+typedef SYNCHRONIZATION_BARRIER pthread_barrier_t;
+typedef HANDLE pthread_mutex_t;
+
+#define pthread_barrier_init(barrier,attr,count) InitializeSynchronizationBarrier(barrier,count,-1)
+#define pthread_barrier_wait(barrier) EnterSynchronizationBarrier(barrier,SYNCHRONIZATION_BARRIER_FLAGS_BLOCK_ONLY)
+#define pthread_barrier_destroy(barrier) DeleteSynchronizationBarrier(barrier)
+#define pthread_cancel(thread) TerminateThread(thread,0)
+#define pthread_mutex_lock(mutex) WaitForSingleObject(mutex,INFINITE)
+#define pthread_mutex_unlock(mutex) ReleaseMutex(mutex)
+#define pthread_cond_signal(condition_variable) WakeConditionVariable(condition_variable)
+
+
+// pthread function overrides
+#define pthread_self()                                          ((pthread_t)GetCurrentThreadId())
+#define pthread_setaffinity_np(thread,size,cpuset)              WinSetThreadAffinityMask(thread, cpuset)
+#define pthread_getaffinity_np(thread,size,cpuset)              WinGetThreadAffinityMask(thread, cpuset)
+#define pthread_create(threadID, threadattr, threadfunc, args)  WinCreateThreadOverride(threadID, threadattr, threadfunc, args)
+
+typedef int pid_t;
+pid_t fork(void);
+
+static inline int WinSetThreadAffinityMask(void* threadID, unsigned long *cpuset)
+{
+	DWORD dwPrevAffinityMask = SetThreadAffinityMask(threadID, *cpuset);
+	return 0;
+}
+
+static inline int WinGetThreadAffinityMask(void* threadID, unsigned long *cpuset)
+{
+	/* Workaround for the lack of a GetThreadAffinityMask() API in Windows */
+	DWORD dwPrevAffinityMask = SetThreadAffinityMask(threadID, 0x1); /* obtain previous mask by setting dummy mask */
+	SetThreadAffinityMask(threadID, dwPrevAffinityMask); /* set it back! */
+	*cpuset = dwPrevAffinityMask;
+	return 0;
+}
+
+static inline int WinCreateThreadOverride(void* threadID, const void* threadattr, void* threadfunc, void* args)
+{
+	HANDLE hThread;
+	hThread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)threadfunc, args, 0, (LPDWORD)threadID);
+	if (hThread)
+	{
+		SetPriorityClass(GetCurrentProcess(), REALTIME_PRIORITY_CLASS);
+		SetThreadPriority(hThread, THREAD_PRIORITY_TIME_CRITICAL);
+	}
+	return ((hThread != NULL) ? 0 : E_FAIL);
+}
+
+static inline int pthread_join(void* thread, void **value_ptr)
+{
+	return 0;
+}
+
+#endif /* _PTHREAD_H_ */
diff --git a/lib/librte_eal/windows/include_override/rand48.h b/lib/librte_eal/windows/include_override/rand48.h
new file mode 100644
index 000000000..4adabb794
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/rand48.h
@@ -0,0 +1,32 @@
+/*
+* Copyright (c) 1993 Martin Birgmeier
+* All rights reserved.
+*
+* You may redistribute unmodified or modified versions of this source
+* code provided that the above copyright notice and this and the
+* following conditions are retained.
+*
+* This software is provided ``as is'', and comes with no warranties
+* of any kind. I shall in no event be liable for anything that happens
+* to anyone/anything when using this software.
+*/
+
+#ifndef _RAND48_H_
+#define _RAND48_H_
+
+#include <math.h>
+#include <stdlib.h>
+
+void _dorand48(unsigned short[3]);
+void srand48(long seed);
+long lrand48(void);
+
+#define	RAND48_SEED_0	(0x330e)
+#define	RAND48_SEED_1	(0xabcd)
+#define	RAND48_SEED_2	(0x1234)
+#define	RAND48_MULT_0	(0xe66d)
+#define	RAND48_MULT_1	(0xdeec)
+#define	RAND48_MULT_2	(0x0005)
+#define	RAND48_ADD	(0x000b)
+
+#endif /* _RAND48_H_ */
diff --git a/lib/librte_eal/windows/include_override/sched.h b/lib/librte_eal/windows/include_override/sched.h
new file mode 100644
index 000000000..1a2df795c
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/sched.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+
+#ifndef _SCHED_H_
+#define _SCHED_H_
+
+/* Re-defined for Windows */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int sched_yield(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SCHED_H_ */
diff --git a/lib/librte_eal/windows/include_override/sys/_iovec.h b/lib/librte_eal/windows/include_override/sys/_iovec.h
new file mode 100644
index 000000000..bd7207332
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/sys/_iovec.h
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993, 1994
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)uio.h	8.5 (Berkeley) 2/22/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__IOVEC_H_
+#define	_SYS__IOVEC_H_
+
+#include <sys/_types.h>
+
+#ifndef _SIZE_T_DECLARED
+typedef	__size_t	size_t;
+#define	_SIZE_T_DECLARED
+#endif
+
+struct iovec {
+	void	*iov_base;	/* Base address. */
+	size_t	 iov_len;	/* Length. */
+};
+
+#endif /* !_SYS__IOVEC_H_ */
diff --git a/lib/librte_eal/windows/include_override/sys/_sockaddr_storage.h b/lib/librte_eal/windows/include_override/sys/_sockaddr_storage.h
new file mode 100644
index 000000000..5c0048b56
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/sys/_sockaddr_storage.h
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 1982, 1985, 1986, 1988, 1993, 1994
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)socket.h	8.4 (Berkeley) 2/21/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__SOCKADDR_STORAGE_H_
+#define	_SYS__SOCKADDR_STORAGE_H_
+
+/*
+ * RFC 2553: protocol-independent placeholder for socket addresses
+ */
+#define	_SS_MAXSIZE	128U
+#define	_SS_ALIGNSIZE	(sizeof(__int64_t))
+#define	_SS_PAD1SIZE	(_SS_ALIGNSIZE - sizeof(unsigned char) - \
+			    sizeof(sa_family_t))
+#define	_SS_PAD2SIZE	(_SS_MAXSIZE - sizeof(unsigned char) - \
+			    sizeof(sa_family_t) - _SS_PAD1SIZE - _SS_ALIGNSIZE)
+
+struct sockaddr_storage {
+	unsigned char	ss_len;		/* address length */
+	sa_family_t	ss_family;	/* address family */
+	char		__ss_pad1[_SS_PAD1SIZE];
+	__int64_t	__ss_align;	/* force desired struct alignment */
+	char		__ss_pad2[_SS_PAD2SIZE];
+};
+
+#endif /* !_SYS__SOCKADDR_STORAGE_H_ */
diff --git a/lib/librte_eal/windows/include_override/sys/_termios.h b/lib/librte_eal/windows/include_override/sys/_termios.h
new file mode 100644
index 000000000..ae07158a9
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/sys/_termios.h
@@ -0,0 +1,222 @@
+/*-
+ * Copyright (c) 1988, 1989, 1993, 1994
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)termios.h	8.3 (Berkeley) 3/28/94
+ * $FreeBSD: release/9.1.0/sys/sys/_termios.h 199898 2009-11-28 23:50:48Z ed $
+ */
+
+#ifndef _SYS__TERMIOS_H_
+#define	_SYS__TERMIOS_H_
+
+/*
+ * Special Control Characters
+ *
+ * Index into c_cc[] character array.
+ *
+ *	Name	     Subscript	Enabled by
+ */
+#define	VEOF		0	/* ICANON */
+#define	VEOL		1	/* ICANON */
+#ifndef _POSIX_SOURCE
+#define	VEOL2		2	/* ICANON together with IEXTEN */
+#endif
+#define	VERASE		3	/* ICANON */
+#ifndef _POSIX_SOURCE
+#define	VWERASE 	4	/* ICANON together with IEXTEN */
+#endif
+#define	VKILL		5	/* ICANON */
+#ifndef _POSIX_SOURCE
+#define	VREPRINT 	6	/* ICANON together with IEXTEN */
+#define	VERASE2 	7	/* ICANON */
+#endif
+/*			7	   ex-spare 1 */
+#define	VINTR		8	/* ISIG */
+#define	VQUIT		9	/* ISIG */
+#define	VSUSP		10	/* ISIG */
+#ifndef _POSIX_SOURCE
+#define	VDSUSP		11	/* ISIG together with IEXTEN */
+#endif
+#define	VSTART		12	/* IXON, IXOFF */
+#define	VSTOP		13	/* IXON, IXOFF */
+#ifndef _POSIX_SOURCE
+#define	VLNEXT		14	/* IEXTEN */
+#define	VDISCARD	15	/* IEXTEN */
+#endif
+#define	VMIN		16	/* !ICANON */
+#define	VTIME		17	/* !ICANON */
+#ifndef _POSIX_SOURCE
+#define	VSTATUS		18	/* ICANON together with IEXTEN */
+/*			19	   spare 2 */
+#endif
+#define	NCCS		20
+
+#define	_POSIX_VDISABLE	0xff
+
+/*
+ * Input flags - software input processing
+ */
+#define	IGNBRK		0x00000001	/* ignore BREAK condition */
+#define	BRKINT		0x00000002	/* map BREAK to SIGINTR */
+#define	IGNPAR		0x00000004	/* ignore (discard) parity errors */
+#define	PARMRK		0x00000008	/* mark parity and framing errors */
+#define	INPCK		0x00000010	/* enable checking of parity errors */
+#define	ISTRIP		0x00000020	/* strip 8th bit off chars */
+#define	INLCR		0x00000040	/* map NL into CR */
+#define	IGNCR		0x00000080	/* ignore CR */
+#define	ICRNL		0x00000100	/* map CR to NL (ala CRMOD) */
+#define	IXON		0x00000200	/* enable output flow control */
+#define	IXOFF		0x00000400	/* enable input flow control */
+#ifndef _POSIX_SOURCE
+#define	IXANY		0x00000800	/* any char will restart after stop */
+#define	IMAXBEL		0x00002000	/* ring bell on input queue full */
+#endif  /*_POSIX_SOURCE */
+
+/*
+ * Output flags - software output processing
+ */
+#define	OPOST		0x00000001	/* enable following output processing */
+#ifndef _POSIX_SOURCE
+#define	ONLCR		0x00000002	/* map NL to CR-NL (ala CRMOD) */
+#define	TABDLY		0x00000004	/* tab delay mask */
+#define	    TAB0	    0x00000000	    /* no tab delay and expansion */
+#define	    TAB3	    0x00000004	    /* expand tabs to spaces */
+#define	ONOEOT		0x00000008	/* discard EOT's (^D) on output) */
+#define	OCRNL		0x00000010	/* map CR to NL on output */
+#define	ONOCR		0x00000020	/* no CR output at column 0 */
+#define	ONLRET		0x00000040	/* NL performs CR function */
+#endif  /*_POSIX_SOURCE */
+
+/*
+ * Control flags - hardware control of terminal
+ */
+#ifndef _POSIX_SOURCE
+#define	CIGNORE		0x00000001	/* ignore control flags */
+#endif
+#define	CSIZE		0x00000300	/* character size mask */
+#define	    CS5		    0x00000000	    /* 5 bits (pseudo) */
+#define	    CS6		    0x00000100	    /* 6 bits */
+#define	    CS7		    0x00000200	    /* 7 bits */
+#define	    CS8		    0x00000300	    /* 8 bits */
+#define	CSTOPB		0x00000400	/* send 2 stop bits */
+#define	CREAD		0x00000800	/* enable receiver */
+#define	PARENB		0x00001000	/* parity enable */
+#define	PARODD		0x00002000	/* odd parity, else even */
+#define	HUPCL		0x00004000	/* hang up on last close */
+#define	CLOCAL		0x00008000	/* ignore modem status lines */
+#ifndef _POSIX_SOURCE
+#define	CCTS_OFLOW	0x00010000	/* CTS flow control of output */
+#define	CRTSCTS		(CCTS_OFLOW | CRTS_IFLOW)
+#define	CRTS_IFLOW	0x00020000	/* RTS flow control of input */
+#define	CDTR_IFLOW	0x00040000	/* DTR flow control of input */
+#define	CDSR_OFLOW	0x00080000	/* DSR flow control of output */
+#define	CCAR_OFLOW	0x00100000	/* DCD flow control of output */
+#endif
+
+
+/*
+ * "Local" flags - dumping ground for other state
+ *
+ * Warning: some flags in this structure begin with
+ * the letter "I" and look like they belong in the
+ * input flag.
+ */
+
+#ifndef _POSIX_SOURCE
+#define	ECHOKE		0x00000001	/* visual erase for line kill */
+#endif  /*_POSIX_SOURCE */
+#define	ECHOE		0x00000002	/* visually erase chars */
+#define	ECHOK		0x00000004	/* echo NL after line kill */
+#define	ECHO		0x00000008	/* enable echoing */
+#define	ECHONL		0x00000010	/* echo NL even if ECHO is off */
+#ifndef _POSIX_SOURCE
+#define	ECHOPRT		0x00000020	/* visual erase mode for hardcopy */
+#define	ECHOCTL  	0x00000040	/* echo control chars as ^(Char) */
+#endif  /*_POSIX_SOURCE */
+#define	ISIG		0x00000080	/* enable signals INTR, QUIT, [D]SUSP */
+#define	ICANON		0x00000100	/* canonicalize input lines */
+#ifndef _POSIX_SOURCE
+#define	ALTWERASE	0x00000200	/* use alternate WERASE algorithm */
+#endif  /*_POSIX_SOURCE */
+#define	IEXTEN		0x00000400	/* enable DISCARD and LNEXT */
+#define	EXTPROC         0x00000800      /* external processing */
+#define	TOSTOP		0x00400000	/* stop background jobs from output */
+#ifndef _POSIX_SOURCE
+#define	FLUSHO		0x00800000	/* output being flushed (state) */
+#define	NOKERNINFO	0x02000000	/* no kernel output from VSTATUS */
+#define	PENDIN		0x20000000	/* XXX retype pending input (state) */
+#endif  /*_POSIX_SOURCE */
+#define	NOFLSH		0x80000000	/* don't flush after interrupt */
+
+/*
+ * Standard speeds
+ */
+#define	B0	0
+#define	B50	50
+#define	B75	75
+#define	B110	110
+#define	B134	134
+#define	B150	150
+#define	B200	200
+#define	B300	300
+#define	B600	600
+#define	B1200	1200
+#define	B1800	1800
+#define	B2400	2400
+#define	B4800	4800
+#define	B9600	9600
+#define	B19200	19200
+#define	B38400	38400
+#ifndef _POSIX_SOURCE
+#define	B7200	7200
+#define	B14400	14400
+#define	B28800	28800
+#define	B57600	57600
+#define	B76800	76800
+#define	B115200	115200
+#define	B230400	230400
+#define	B460800	460800
+#define	B921600	921600
+#define	EXTA	19200
+#define	EXTB	38400
+#endif  /* !_POSIX_SOURCE */
+
+typedef unsigned int	tcflag_t;
+typedef unsigned char	cc_t;
+typedef unsigned int	speed_t;
+
+struct termios {
+	tcflag_t	c_iflag;	/* input flags */
+	tcflag_t	c_oflag;	/* output flags */
+	tcflag_t	c_cflag;	/* control flags */
+	tcflag_t	c_lflag;	/* local flags */
+	cc_t		c_cc[NCCS];	/* control chars */
+	speed_t		c_ispeed;	/* input speed */
+	speed_t		c_ospeed;	/* output speed */
+};
+
+#endif /* !_SYS__TERMIOS_H_ */
diff --git a/lib/librte_eal/windows/include_override/sys/_types.h b/lib/librte_eal/windows/include_override/sys/_types.h
new file mode 100644
index 000000000..27ecaf4f0
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/sys/_types.h
@@ -0,0 +1,105 @@
+/*-
+ * Copyright (c) 2002 Mike Barcroft <mike@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _SYS__TYPES_H_
+#define _SYS__TYPES_H_
+
+#include <sys/cdefs.h>
+#include <machine/_types.h>
+
+/*
+ * Standard type definitions.
+ */
+typedef	__uint32_t	__blksize_t;	/* file block size */
+typedef	__int64_t	__blkcnt_t;	/* file block count */
+typedef	__int32_t	__clockid_t;	/* clock_gettime()... */
+typedef	__uint64_t	__cap_rights_t;	/* capability rights */
+typedef	__uint32_t	__fflags_t;	/* file flags */
+typedef	__uint64_t	__fsblkcnt_t;
+typedef	__uint64_t	__fsfilcnt_t;
+typedef	__uint32_t	__gid_t;
+typedef	__int64_t	__id_t;		/* can hold a gid_t, pid_t, or uid_t */
+typedef	__uint32_t	__ino_t;	/* inode number */
+typedef	long		__key_t;	/* IPC key (for Sys V IPC) */
+typedef	__int32_t	__lwpid_t;	/* Thread ID (a.k.a. LWP) */
+typedef	__uint16_t	__mode_t;	/* permissions */
+typedef	int		__accmode_t;	/* access permissions */
+typedef	int		__nl_item;
+typedef	__uint16_t	__nlink_t;	/* link count */
+typedef	__int64_t	__off_t;	/* file offset */
+typedef	__int32_t	__pid_t;	/* process [group] */
+typedef	__int64_t	__rlim_t;	/* resource limit - intentionally */
+					/* signed, because of legacy code */
+					/* that uses -1 for RLIM_INFINITY */
+typedef	__uint8_t	__sa_family_t;
+typedef	__uint32_t	__socklen_t;
+typedef	long		__suseconds_t;	/* microseconds (signed) */
+typedef	struct __timer	*__timer_t;	/* timer_gettime()... */
+typedef	struct __mq	*__mqd_t;	/* mq_open()... */
+typedef	__uint32_t	__uid_t;
+typedef	unsigned int	__useconds_t;	/* microseconds (unsigned) */
+typedef	int		__cpuwhich_t;	/* which parameter for cpuset. */
+typedef	int		__cpulevel_t;	/* level parameter for cpuset. */
+typedef int		__cpusetid_t;	/* cpuset identifier. */
+
+/*
+ * Unusual type definitions.
+ */
+/*
+ * rune_t is declared to be an ``int'' instead of the more natural
+ * ``unsigned long'' or ``long''.  Two things are happening here.  It is not
+ * unsigned so that EOF (-1) can be naturally assigned to it and used.  Also,
+ * it looks like 10646 will be a 31 bit standard.  This means that if your
+ * ints cannot hold 32 bits, you will be in trouble.  The reason an int was
+ * chosen over a long is that the is*() and to*() routines take ints (says
+ * ANSI C), but they use __ct_rune_t instead of int.
+ *
+ * NOTE: rune_t is not covered by ANSI nor other standards, and should not
+ * be instantiated outside of lib/libc/locale.  Use wchar_t.  wint_t and
+ * rune_t must be the same type.  Also, wint_t should be able to hold all
+ * members of the largest character set plus one extra value (WEOF), and
+ * must be at least 16 bits.
+ */
+typedef	int		__ct_rune_t;	/* arg type for ctype funcs */
+typedef	__ct_rune_t	__rune_t;	/* rune_t (see above) */
+typedef	__ct_rune_t	__wint_t;	/* wint_t (see above) */
+
+typedef	__uint32_t	__dev_t;	/* device number */
+
+typedef	__uint32_t	__fixpt_t;	/* fixed point number */
+
+/*
+ * mbstate_t is an opaque object to keep conversion state during multibyte
+ * stream conversions.
+ */
+typedef union {
+	char		__mbstate8[128];
+	__int64_t	_mbstateL;	/* for alignment */
+} __mbstate_t;
+
+#endif /* !_SYS__TYPES_H_ */
diff --git a/lib/librte_eal/windows/include_override/sys/cdefs.h b/lib/librte_eal/windows/include_override/sys/cdefs.h
new file mode 100644
index 000000000..b4d2009c5
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/sys/cdefs.h
@@ -0,0 +1,3 @@
+#ifndef _SYS_CDEFS_H_
+#define _SYS_CDEFS_H_
+#endif
diff --git a/lib/librte_eal/windows/include_override/sys/mman.h b/lib/librte_eal/windows/include_override/sys/mman.h
new file mode 100644
index 000000000..7a0ff4258
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/sys/mman.h
@@ -0,0 +1,63 @@
+/*
+* sys/mman.h
+* mman-win32
+*/
+
+#ifndef _SYS_MMAN_H_
+#define _SYS_MMAN_H_
+
+#ifndef _WIN32_WINNT		// Allow use of features specific to Windows XP or later.
+#define _WIN32_WINNT 0x0501	// Change this to the appropriate value to target other versions of Windows.
+#endif
+
+/* All the headers include this file. */
+#ifndef _MSC_VER
+#include <_mingw.h>
+#endif
+
+/* Determine offset type */
+#include <stdint.h>
+#if defined(_WIN64)
+typedef int64_t OffsetType;
+#else
+typedef uint32_t OffsetType;
+#endif
+
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define PROT_NONE       0
+#define PROT_READ       1
+#define PROT_WRITE      2
+#define PROT_EXEC       4
+
+#define MAP_FILE        0
+#define MAP_SHARED      1
+#define MAP_PRIVATE     2
+#define MAP_TYPE        0xf
+#define MAP_FIXED       0x10
+#define MAP_ANONYMOUS   0x20
+#define MAP_ANON        MAP_ANONYMOUS
+
+#define MAP_FAILED      ((void *)-1)
+
+	/* Flags for msync. */
+#define MS_ASYNC        1
+#define MS_SYNC         2
+#define MS_INVALIDATE   4
+
+	void*   mmap(void *addr, size_t len, int prot, int flags, int fildes, OffsetType off);
+	int     munmap(void *addr, size_t len);
+	int     _mprotect(void *addr, size_t len, int prot);
+	int     msync(void *addr, size_t len, int flags);
+	int     mlock(const void *addr, size_t len);
+	int     munlock(const void *addr, size_t len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*  _SYS_MMAN_H_ */
diff --git a/lib/librte_eal/windows/include_override/sys/netbsd/queue.h b/lib/librte_eal/windows/include_override/sys/netbsd/queue.h
new file mode 100644
index 000000000..99d01a55b
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/sys/netbsd/queue.h
@@ -0,0 +1,846 @@
+/*	$NetBSD: queue.h,v 1.68 2014/11/19 08:10:01 uebayasi Exp $	*/
+
+/*
+ * Copyright (c) 1991, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	@(#)queue.h	8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef	_SYS_QUEUE_H_
+#define	_SYS_QUEUE_H_
+
+/*
+ * This file defines five types of data structures: singly-linked lists,
+ * lists, simple queues, tail queues, and circular queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The
+ * elements are singly linked for minimum space and pointer manipulation
+ * overhead at the expense of O(n) removal for arbitrary elements. New
+ * elements can be added to the list after an existing element or at the
+ * head of the list.  Elements being removed from the head of the list
+ * should use the explicit macro for this purpose for optimum
+ * efficiency. A singly-linked list may only be traversed in the forward
+ * direction.  Singly-linked lists are ideal for applications with large
+ * datasets and few or no removals or for implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+/*
+ * Include the definition of NULL only on NetBSD because sys/null.h
+ * is not available elsewhere.  This conditional makes the header
+ * portable and it can simply be dropped verbatim into any system.
+ * The caveat is that on other systems some other header
+ * must provide NULL before the macros can be used.
+ */
+#ifdef __NetBSD__
+#include <sys/null.h>
+#endif
+
+#if defined(QUEUEDEBUG)
+# if defined(_KERNEL)
+#  define QUEUEDEBUG_ABORT(...) panic(__VA_ARGS__)
+# else
+#  include <err.h>
+#  define QUEUEDEBUG_ABORT(...) err(1, __VA_ARGS__)
+# endif
+#endif
+
+/*
+ * Singly-linked List definitions.
+ */
+#define	SLIST_HEAD(name, type)						\
+struct name {								\
+	struct type *slh_first;	/* first element */			\
+}
+
+#define	SLIST_HEAD_INITIALIZER(head)					\
+	{ NULL }
+
+#define	SLIST_ENTRY(type)						\
+struct {								\
+	struct type *sle_next;	/* next element */			\
+}
+
+/*
+ * Singly-linked List access methods.
+ */
+#define	SLIST_FIRST(head)	((head)->slh_first)
+#define	SLIST_END(head)		NULL
+#define	SLIST_EMPTY(head)	((head)->slh_first == NULL)
+#define	SLIST_NEXT(elm, field)	((elm)->field.sle_next)
+
+#define	SLIST_FOREACH(var, head, field)					\
+	for((var) = (head)->slh_first;					\
+	    (var) != SLIST_END(head);					\
+	    (var) = (var)->field.sle_next)
+
+#define	SLIST_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = SLIST_FIRST((head));				\
+	    (var) != SLIST_END(head) &&					\
+	    ((tvar) = SLIST_NEXT((var), field), 1);			\
+	    (var) = (tvar))
+
+/*
+ * Singly-linked List functions.
+ */
+#define	SLIST_INIT(head) do {						\
+	(head)->slh_first = SLIST_END(head);				\
+} while (/*CONSTCOND*/0)
+
+#define	SLIST_INSERT_AFTER(slistelm, elm, field) do {			\
+	(elm)->field.sle_next = (slistelm)->field.sle_next;		\
+	(slistelm)->field.sle_next = (elm);				\
+} while (/*CONSTCOND*/0)
+
+#define	SLIST_INSERT_HEAD(head, elm, field) do {			\
+	(elm)->field.sle_next = (head)->slh_first;			\
+	(head)->slh_first = (elm);					\
+} while (/*CONSTCOND*/0)
+
+#define	SLIST_REMOVE_AFTER(slistelm, field) do {			\
+	(slistelm)->field.sle_next =					\
+	    SLIST_NEXT(SLIST_NEXT((slistelm), field), field);		\
+} while (/*CONSTCOND*/0)
+
+#define	SLIST_REMOVE_HEAD(head, field) do {				\
+	(head)->slh_first = (head)->slh_first->field.sle_next;		\
+} while (/*CONSTCOND*/0)
+
+#define	SLIST_REMOVE(head, elm, type, field) do {			\
+	if ((head)->slh_first == (elm)) {				\
+		SLIST_REMOVE_HEAD((head), field);			\
+	}								\
+	else {								\
+		struct type *curelm = (head)->slh_first;		\
+		while(curelm->field.sle_next != (elm))			\
+			curelm = curelm->field.sle_next;		\
+		curelm->field.sle_next =				\
+		    curelm->field.sle_next->field.sle_next;		\
+	}								\
+} while (/*CONSTCOND*/0)
+
+
+/*
+ * List definitions.
+ */
+#define	LIST_HEAD(name, type)						\
+struct name {								\
+	struct type *lh_first;	/* first element */			\
+}
+
+#define	LIST_HEAD_INITIALIZER(head)					\
+	{ NULL }
+
+#define	LIST_ENTRY(type)						\
+struct {								\
+	struct type *le_next;	/* next element */			\
+	struct type **le_prev;	/* address of previous next element */	\
+}
+
+/*
+ * List access methods.
+ */
+#define	LIST_FIRST(head)		((head)->lh_first)
+#define	LIST_END(head)			NULL
+#define	LIST_EMPTY(head)		((head)->lh_first == LIST_END(head))
+#define	LIST_NEXT(elm, field)		((elm)->field.le_next)
+
+#define	LIST_FOREACH(var, head, field)					\
+	for ((var) = ((head)->lh_first);				\
+	    (var) != LIST_END(head);					\
+	    (var) = ((var)->field.le_next))
+
+#define	LIST_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = LIST_FIRST((head));				\
+	    (var) != LIST_END(head) &&					\
+	    ((tvar) = LIST_NEXT((var), field), 1);			\
+	    (var) = (tvar))
+
+#define	LIST_MOVE(head1, head2) do {					\
+	LIST_INIT((head2));						\
+	if (!LIST_EMPTY((head1))) {					\
+		(head2)->lh_first = (head1)->lh_first;			\
+		LIST_INIT((head1));					\
+	}								\
+} while (/*CONSTCOND*/0)
+
+/*
+ * List functions.
+ */
+#if defined(QUEUEDEBUG)
+#define	QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)			\
+	if ((head)->lh_first &&						\
+	    (head)->lh_first->field.le_prev != &(head)->lh_first)	\
+		QUEUEDEBUG_ABORT("LIST_INSERT_HEAD %p %s:%d", (head),	\
+		    __FILE__, __LINE__);
+#define	QUEUEDEBUG_LIST_OP(elm, field)					\
+	if ((elm)->field.le_next &&					\
+	    (elm)->field.le_next->field.le_prev !=			\
+	    &(elm)->field.le_next)					\
+		QUEUEDEBUG_ABORT("LIST_* forw %p %s:%d", (elm),		\
+		    __FILE__, __LINE__);				\
+	if (*(elm)->field.le_prev != (elm))				\
+		QUEUEDEBUG_ABORT("LIST_* back %p %s:%d", (elm),		\
+		    __FILE__, __LINE__);
+#define	QUEUEDEBUG_LIST_POSTREMOVE(elm, field)				\
+	(elm)->field.le_next = (void *)1L;				\
+	(elm)->field.le_prev = (void *)1L;
+#else
+#define	QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
+#define	QUEUEDEBUG_LIST_OP(elm, field)
+#define	QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
+#endif
+
+#define	LIST_INIT(head) do {						\
+	(head)->lh_first = LIST_END(head);				\
+} while (/*CONSTCOND*/0)
+
+#define	LIST_INSERT_AFTER(listelm, elm, field) do {			\
+	QUEUEDEBUG_LIST_OP((listelm), field)				\
+	if (((elm)->field.le_next = (listelm)->field.le_next) != 	\
+	    LIST_END(head))						\
+		(listelm)->field.le_next->field.le_prev =		\
+		    &(elm)->field.le_next;				\
+	(listelm)->field.le_next = (elm);				\
+	(elm)->field.le_prev = &(listelm)->field.le_next;		\
+} while (/*CONSTCOND*/0)
+
+#define	LIST_INSERT_BEFORE(listelm, elm, field) do {			\
+	QUEUEDEBUG_LIST_OP((listelm), field)				\
+	(elm)->field.le_prev = (listelm)->field.le_prev;		\
+	(elm)->field.le_next = (listelm);				\
+	*(listelm)->field.le_prev = (elm);				\
+	(listelm)->field.le_prev = &(elm)->field.le_next;		\
+} while (/*CONSTCOND*/0)
+
+#define	LIST_INSERT_HEAD(head, elm, field) do {				\
+	QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field)		\
+	if (((elm)->field.le_next = (head)->lh_first) != LIST_END(head))\
+		(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+	(head)->lh_first = (elm);					\
+	(elm)->field.le_prev = &(head)->lh_first;			\
+} while (/*CONSTCOND*/0)
+
+#define	LIST_REMOVE(elm, field) do {					\
+	QUEUEDEBUG_LIST_OP((elm), field)				\
+	if ((elm)->field.le_next != NULL)				\
+		(elm)->field.le_next->field.le_prev = 			\
+		    (elm)->field.le_prev;				\
+	*(elm)->field.le_prev = (elm)->field.le_next;			\
+	QUEUEDEBUG_LIST_POSTREMOVE((elm), field)			\
+} while (/*CONSTCOND*/0)
+
+#define LIST_REPLACE(elm, elm2, field) do {				\
+	if (((elm2)->field.le_next = (elm)->field.le_next) != NULL)	\
+		(elm2)->field.le_next->field.le_prev =			\
+		    &(elm2)->field.le_next;				\
+	(elm2)->field.le_prev = (elm)->field.le_prev;			\
+	*(elm2)->field.le_prev = (elm2);				\
+	QUEUEDEBUG_LIST_POSTREMOVE((elm), field)			\
+} while (/*CONSTCOND*/0)
+
+/*
+ * Simple queue definitions.
+ */
+#define	SIMPLEQ_HEAD(name, type)					\
+struct name {								\
+	struct type *sqh_first;	/* first element */			\
+	struct type **sqh_last;	/* addr of last next element */		\
+}
+
+#define	SIMPLEQ_HEAD_INITIALIZER(head)					\
+	{ NULL, &(head).sqh_first }
+
+#define	SIMPLEQ_ENTRY(type)						\
+struct {								\
+	struct type *sqe_next;	/* next element */			\
+}
+
+/*
+ * Simple queue access methods.
+ */
+#define	SIMPLEQ_FIRST(head)		((head)->sqh_first)
+#define	SIMPLEQ_END(head)		NULL
+#define	SIMPLEQ_EMPTY(head)		((head)->sqh_first == SIMPLEQ_END(head))
+#define	SIMPLEQ_NEXT(elm, field)	((elm)->field.sqe_next)
+
+#define	SIMPLEQ_FOREACH(var, head, field)				\
+	for ((var) = ((head)->sqh_first);				\
+	    (var) != SIMPLEQ_END(head);					\
+	    (var) = ((var)->field.sqe_next))
+
+#define	SIMPLEQ_FOREACH_SAFE(var, head, field, next)			\
+	for ((var) = ((head)->sqh_first);				\
+	    (var) != SIMPLEQ_END(head) &&				\
+	    ((next = ((var)->field.sqe_next)), 1);			\
+	    (var) = (next))
+
+/*
+ * Simple queue functions.
+ */
+#define	SIMPLEQ_INIT(head) do {						\
+	(head)->sqh_first = NULL;					\
+	(head)->sqh_last = &(head)->sqh_first;				\
+} while (/*CONSTCOND*/0)
+
+#define	SIMPLEQ_INSERT_HEAD(head, elm, field) do {			\
+	if (((elm)->field.sqe_next = (head)->sqh_first) == NULL)	\
+		(head)->sqh_last = &(elm)->field.sqe_next;		\
+	(head)->sqh_first = (elm);					\
+} while (/*CONSTCOND*/0)
+
+#define	SIMPLEQ_INSERT_TAIL(head, elm, field) do {			\
+	(elm)->field.sqe_next = NULL;					\
+	*(head)->sqh_last = (elm);					\
+	(head)->sqh_last = &(elm)->field.sqe_next;			\
+} while (/*CONSTCOND*/0)
+
+#define	SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
+		(head)->sqh_last = &(elm)->field.sqe_next;		\
+	(listelm)->field.sqe_next = (elm);				\
+} while (/*CONSTCOND*/0)
+
+#define	SIMPLEQ_REMOVE_HEAD(head, field) do {				\
+	if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
+		(head)->sqh_last = &(head)->sqh_first;			\
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_REMOVE_AFTER(head, elm, field) do {			\
+	if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \
+	    == NULL)							\
+		(head)->sqh_last = &(elm)->field.sqe_next;		\
+} while (/*CONSTCOND*/0)
+
+#define	SIMPLEQ_REMOVE(head, elm, type, field) do {			\
+	if ((head)->sqh_first == (elm)) {				\
+		SIMPLEQ_REMOVE_HEAD((head), field);			\
+	} else {							\
+		struct type *curelm = (head)->sqh_first;		\
+		while (curelm->field.sqe_next != (elm))			\
+			curelm = curelm->field.sqe_next;		\
+		if ((curelm->field.sqe_next =				\
+			curelm->field.sqe_next->field.sqe_next) == NULL) \
+			    (head)->sqh_last = &(curelm)->field.sqe_next; \
+	}								\
+} while (/*CONSTCOND*/0)
+
+#define	SIMPLEQ_CONCAT(head1, head2) do {				\
+	if (!SIMPLEQ_EMPTY((head2))) {					\
+		*(head1)->sqh_last = (head2)->sqh_first;		\
+		(head1)->sqh_last = (head2)->sqh_last;		\
+		SIMPLEQ_INIT((head2));					\
+	}								\
+} while (/*CONSTCOND*/0)
+
+#define	SIMPLEQ_LAST(head, type, field)					\
+	(SIMPLEQ_EMPTY((head)) ?						\
+		NULL :							\
+	        ((struct type *)(void *)				\
+		((char *)((head)->sqh_last) - offsetof(struct type, field))))
+
+/*
+ * Tail queue definitions.
+ */
+#define	_TAILQ_HEAD(name, type, qual)					\
+struct name {								\
+	qual type *tqh_first;		/* first element */		\
+	qual type *qual *tqh_last;	/* addr of last next element */	\
+}
+#define TAILQ_HEAD(name, type)	_TAILQ_HEAD(name, struct type,)
+
+#define	TAILQ_HEAD_INITIALIZER(head)					\
+	{ TAILQ_END(head), &(head).tqh_first }
+
+#define	_TAILQ_ENTRY(type, qual)					\
+struct {								\
+	qual type *tqe_next;		/* next element */		\
+	qual type *qual *tqe_prev;	/* address of previous next element */\
+}
+#define TAILQ_ENTRY(type)	_TAILQ_ENTRY(struct type,)
+
+/*
+ * Tail queue access methods.
+ */
+#define	TAILQ_FIRST(head)		((head)->tqh_first)
+#define	TAILQ_END(head)			(NULL)
+#define	TAILQ_NEXT(elm, field)		((elm)->field.tqe_next)
+#define	TAILQ_LAST(head, headname) \
+	(*(((struct headname *)((head)->tqh_last))->tqh_last))
+#define	TAILQ_PREV(elm, headname, field) \
+	(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define	TAILQ_EMPTY(head)		(TAILQ_FIRST(head) == TAILQ_END(head))
+
+
+#define	TAILQ_FOREACH(var, head, field)					\
+	for ((var) = ((head)->tqh_first);				\
+	    (var) != TAILQ_END(head);					\
+	    (var) = ((var)->field.tqe_next))
+
+#define	TAILQ_FOREACH_SAFE(var, head, field, next)			\
+	for ((var) = ((head)->tqh_first);				\
+	    (var) != TAILQ_END(head) &&					\
+	    ((next) = TAILQ_NEXT(var, field), 1); (var) = (next))
+
+#define	TAILQ_FOREACH_REVERSE(var, head, headname, field)		\
+	for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\
+	    (var) != TAILQ_END(head);					\
+	    (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
+
+#define	TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev)	\
+	for ((var) = TAILQ_LAST((head), headname);			\
+	    (var) != TAILQ_END(head) && 				\
+	    ((prev) = TAILQ_PREV((var), headname, field), 1); (var) = (prev))
+
+/*
+ * Tail queue functions.
+ */
+#if defined(QUEUEDEBUG)
+#define	QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)			\
+	if ((head)->tqh_first &&					\
+	    (head)->tqh_first->field.tqe_prev != &(head)->tqh_first)	\
+		QUEUEDEBUG_ABORT("TAILQ_INSERT_HEAD %p %s:%d", (head),	\
+		    __FILE__, __LINE__);
+#define	QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)			\
+	if (*(head)->tqh_last != NULL)					\
+		QUEUEDEBUG_ABORT("TAILQ_INSERT_TAIL %p %s:%d", (head),	\
+		    __FILE__, __LINE__);
+#define	QUEUEDEBUG_TAILQ_OP(elm, field)					\
+	if ((elm)->field.tqe_next &&					\
+	    (elm)->field.tqe_next->field.tqe_prev !=			\
+	    &(elm)->field.tqe_next)					\
+		QUEUEDEBUG_ABORT("TAILQ_* forw %p %s:%d", (elm),	\
+		    __FILE__, __LINE__);				\
+	if (*(elm)->field.tqe_prev != (elm))				\
+		QUEUEDEBUG_ABORT("TAILQ_* back %p %s:%d", (elm),	\
+		    __FILE__, __LINE__);
+#define	QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)			\
+	if ((elm)->field.tqe_next == NULL &&				\
+	    (head)->tqh_last != &(elm)->field.tqe_next)			\
+		QUEUEDEBUG_ABORT("TAILQ_PREREMOVE head %p elm %p %s:%d",\
+		    (head), (elm), __FILE__, __LINE__);
+#define	QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)				\
+	(elm)->field.tqe_next = (void *)1L;				\
+	(elm)->field.tqe_prev = (void *)1L;
+#else
+#define	QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
+#define	QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
+#define	QUEUEDEBUG_TAILQ_OP(elm, field)
+#define	QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
+#define	QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
+#endif
+
+#define	TAILQ_INIT(head) do {						\
+	(head)->tqh_first = TAILQ_END(head);				\
+	(head)->tqh_last = &(head)->tqh_first;				\
+} while (/*CONSTCOND*/0)
+
+#define	TAILQ_INSERT_HEAD(head, elm, field) do {			\
+	QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field)		\
+	if (((elm)->field.tqe_next = (head)->tqh_first) != TAILQ_END(head))\
+		(head)->tqh_first->field.tqe_prev =			\
+		    &(elm)->field.tqe_next;				\
+	else								\
+		(head)->tqh_last = &(elm)->field.tqe_next;		\
+	(head)->tqh_first = (elm);					\
+	(elm)->field.tqe_prev = &(head)->tqh_first;			\
+} while (/*CONSTCOND*/0)
+
+#define	TAILQ_INSERT_TAIL(head, elm, field) do {			\
+	QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field)		\
+	(elm)->field.tqe_next = TAILQ_END(head);			\
+	(elm)->field.tqe_prev = (head)->tqh_last;			\
+	*(head)->tqh_last = (elm);					\
+	(head)->tqh_last = &(elm)->field.tqe_next;			\
+} while (/*CONSTCOND*/0)
+
+#define	TAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	QUEUEDEBUG_TAILQ_OP((listelm), field)				\
+	if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != 	\
+	    TAILQ_END(head))						\
+		(elm)->field.tqe_next->field.tqe_prev = 		\
+		    &(elm)->field.tqe_next;				\
+	else								\
+		(head)->tqh_last = &(elm)->field.tqe_next;		\
+	(listelm)->field.tqe_next = (elm);				\
+	(elm)->field.tqe_prev = &(listelm)->field.tqe_next;		\
+} while (/*CONSTCOND*/0)
+
+#define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
+	QUEUEDEBUG_TAILQ_OP((listelm), field)				\
+	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
+	(elm)->field.tqe_next = (listelm);				\
+	*(listelm)->field.tqe_prev = (elm);				\
+	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
+} while (/*CONSTCOND*/0)
+
+#define	TAILQ_REMOVE(head, elm, field) do {				\
+	QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field)		\
+	QUEUEDEBUG_TAILQ_OP((elm), field)				\
+	if (((elm)->field.tqe_next) != TAILQ_END(head))			\
+		(elm)->field.tqe_next->field.tqe_prev = 		\
+		    (elm)->field.tqe_prev;				\
+	else								\
+		(head)->tqh_last = (elm)->field.tqe_prev;		\
+	*(elm)->field.tqe_prev = (elm)->field.tqe_next;			\
+	QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field);			\
+} while (/*CONSTCOND*/0)
+
+#define TAILQ_REPLACE(head, elm, elm2, field) do {			\
+        if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != 	\
+	    TAILQ_END(head))   						\
+                (elm2)->field.tqe_next->field.tqe_prev =		\
+                    &(elm2)->field.tqe_next;				\
+        else								\
+                (head)->tqh_last = &(elm2)->field.tqe_next;		\
+        (elm2)->field.tqe_prev = (elm)->field.tqe_prev;			\
+        *(elm2)->field.tqe_prev = (elm2);				\
+	QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field);			\
+} while (/*CONSTCOND*/0)
+
+#define	TAILQ_CONCAT(head1, head2, field) do {				\
+	if (!TAILQ_EMPTY(head2)) {					\
+		*(head1)->tqh_last = (head2)->tqh_first;		\
+		(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last;	\
+		(head1)->tqh_last = (head2)->tqh_last;			\
+		TAILQ_INIT((head2));					\
+	}								\
+} while (/*CONSTCOND*/0)
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define	STAILQ_HEAD(name, type)						\
+struct name {								\
+	struct type *stqh_first;	/* first element */		\
+	struct type **stqh_last;	/* addr of last next element */	\
+}
+
+#define	STAILQ_HEAD_INITIALIZER(head)					\
+	{ NULL, &(head).stqh_first }
+
+#define	STAILQ_ENTRY(type)						\
+struct {								\
+	struct type *stqe_next;	/* next element */			\
+}
+
+/*
+ * Singly-linked Tail queue access methods.
+ */
+#define	STAILQ_FIRST(head)	((head)->stqh_first)
+#define	STAILQ_END(head)	NULL
+#define	STAILQ_NEXT(elm, field)	((elm)->field.stqe_next)
+#define	STAILQ_EMPTY(head)	(STAILQ_FIRST(head) == STAILQ_END(head))
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define	STAILQ_INIT(head) do {						\
+	(head)->stqh_first = NULL;					\
+	(head)->stqh_last = &(head)->stqh_first;				\
+} while (/*CONSTCOND*/0)
+
+#define	STAILQ_INSERT_HEAD(head, elm, field) do {			\
+	if (((elm)->field.stqe_next = (head)->stqh_first) == NULL)	\
+		(head)->stqh_last = &(elm)->field.stqe_next;		\
+	(head)->stqh_first = (elm);					\
+} while (/*CONSTCOND*/0)
+
+#define	STAILQ_INSERT_TAIL(head, elm, field) do {			\
+	(elm)->field.stqe_next = NULL;					\
+	*(head)->stqh_last = (elm);					\
+	(head)->stqh_last = &(elm)->field.stqe_next;			\
+} while (/*CONSTCOND*/0)
+
+#define	STAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
+		(head)->stqh_last = &(elm)->field.stqe_next;		\
+	(listelm)->field.stqe_next = (elm);				\
+} while (/*CONSTCOND*/0)
+
+#define	STAILQ_REMOVE_HEAD(head, field) do {				\
+	if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
+		(head)->stqh_last = &(head)->stqh_first;			\
+} while (/*CONSTCOND*/0)
+
+#define	STAILQ_REMOVE(head, elm, type, field) do {			\
+	if ((head)->stqh_first == (elm)) {				\
+		STAILQ_REMOVE_HEAD((head), field);			\
+	} else {							\
+		struct type *curelm = (head)->stqh_first;		\
+		while (curelm->field.stqe_next != (elm))			\
+			curelm = curelm->field.stqe_next;		\
+		if ((curelm->field.stqe_next =				\
+			curelm->field.stqe_next->field.stqe_next) == NULL) \
+			    (head)->stqh_last = &(curelm)->field.stqe_next; \
+	}								\
+} while (/*CONSTCOND*/0)
+
+#define	STAILQ_FOREACH(var, head, field)				\
+	for ((var) = ((head)->stqh_first);				\
+		(var);							\
+		(var) = ((var)->field.stqe_next))
+
+#define	STAILQ_FOREACH_SAFE(var, head, field, tvar)			\
+	for ((var) = STAILQ_FIRST((head));				\
+	    (var) && ((tvar) = STAILQ_NEXT((var), field), 1);		\
+	    (var) = (tvar))
+
+#define	STAILQ_CONCAT(head1, head2) do {				\
+	if (!STAILQ_EMPTY((head2))) {					\
+		*(head1)->stqh_last = (head2)->stqh_first;		\
+		(head1)->stqh_last = (head2)->stqh_last;		\
+		STAILQ_INIT((head2));					\
+	}								\
+} while (/*CONSTCOND*/0)
+
+#define	STAILQ_LAST(head, type, field)					\
+	(STAILQ_EMPTY((head)) ?						\
+		NULL :							\
+	        ((struct type *)(void *)				\
+		((char *)((head)->stqh_last) - offsetof(struct type, field))))
+
+
+#ifndef _KERNEL
+/*
+ * Circular queue definitions. Do not use. We still keep the macros
+ * for compatibility but because of pointer aliasing issues their use
+ * is discouraged!
+ */
+
+/*
+ * __launder_type():  We use this ugly hack to work around the the compiler
+ * noticing that two types may not alias each other and elide tests in code.
+ * We hit this in the CIRCLEQ macros when comparing 'struct name *' and
+ * 'struct type *' (see CIRCLEQ_HEAD()).  Modern compilers (such as GCC
+ * 4.8) declare these comparisons as always false, causing the code to
+ * not run as designed.
+ *
+ * This hack is only to be used for comparisons and thus can be fully const.
+ * Do not use for assignment.
+ *
+ * If we ever choose to change the ABI of the CIRCLEQ macros, we could fix
+ * this by changing the head/tail sentinal values, but see the note above
+ * this one.
+ */
+static __inline const void * __launder_type(const void *);
+static __inline const void *
+__launder_type(const void *__x)
+{
+	__asm __volatile("" : "+r" (__x));
+	return __x;
+}
+
+#if defined(QUEUEDEBUG)
+#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)				\
+	if ((head)->cqh_first != CIRCLEQ_ENDC(head) &&			\
+	    (head)->cqh_first->field.cqe_prev != CIRCLEQ_ENDC(head))	\
+		QUEUEDEBUG_ABORT("CIRCLEQ head forw %p %s:%d", (head),	\
+		      __FILE__, __LINE__);				\
+	if ((head)->cqh_last != CIRCLEQ_ENDC(head) &&			\
+	    (head)->cqh_last->field.cqe_next != CIRCLEQ_ENDC(head))	\
+		QUEUEDEBUG_ABORT("CIRCLEQ head back %p %s:%d", (head),	\
+		      __FILE__, __LINE__);
+#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)			\
+	if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) {		\
+		if ((head)->cqh_last != (elm))				\
+			QUEUEDEBUG_ABORT("CIRCLEQ elm last %p %s:%d",	\
+			    (elm), __FILE__, __LINE__);			\
+	} else {							\
+		if ((elm)->field.cqe_next->field.cqe_prev != (elm))	\
+			QUEUEDEBUG_ABORT("CIRCLEQ elm forw %p %s:%d",	\
+			    (elm), __FILE__, __LINE__);			\
+	}								\
+	if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) {		\
+		if ((head)->cqh_first != (elm))				\
+			QUEUEDEBUG_ABORT("CIRCLEQ elm first %p %s:%d",	\
+			    (elm), __FILE__, __LINE__);			\
+	} else {							\
+		if ((elm)->field.cqe_prev->field.cqe_next != (elm))	\
+			QUEUEDEBUG_ABORT("CIRCLEQ elm prev %p %s:%d",	\
+			    (elm), __FILE__, __LINE__);			\
+	}
+#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)			\
+	(elm)->field.cqe_next = (void *)1L;				\
+	(elm)->field.cqe_prev = (void *)1L;
+#else
+#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
+#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
+#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
+#endif
+
+#define	CIRCLEQ_HEAD(name, type)					\
+struct name {								\
+	struct type *cqh_first;		/* first element */		\
+	struct type *cqh_last;		/* last element */		\
+}
+
+#define	CIRCLEQ_HEAD_INITIALIZER(head)					\
+	{ CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
+
+#define	CIRCLEQ_ENTRY(type)						\
+struct {								\
+	struct type *cqe_next;		/* next element */		\
+	struct type *cqe_prev;		/* previous element */		\
+}
+
+/*
+ * Circular queue functions.
+ */
+#define	CIRCLEQ_INIT(head) do {						\
+	(head)->cqh_first = CIRCLEQ_END(head);				\
+	(head)->cqh_last = CIRCLEQ_END(head);				\
+} while (/*CONSTCOND*/0)
+
+#define	CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
+	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
+	QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field)		\
+	(elm)->field.cqe_next = (listelm)->field.cqe_next;		\
+	(elm)->field.cqe_prev = (listelm);				\
+	if ((listelm)->field.cqe_next == CIRCLEQ_ENDC(head))		\
+		(head)->cqh_last = (elm);				\
+	else								\
+		(listelm)->field.cqe_next->field.cqe_prev = (elm);	\
+	(listelm)->field.cqe_next = (elm);				\
+} while (/*CONSTCOND*/0)
+
+#define	CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do {		\
+	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
+	QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field)		\
+	(elm)->field.cqe_next = (listelm);				\
+	(elm)->field.cqe_prev = (listelm)->field.cqe_prev;		\
+	if ((listelm)->field.cqe_prev == CIRCLEQ_ENDC(head))		\
+		(head)->cqh_first = (elm);				\
+	else								\
+		(listelm)->field.cqe_prev->field.cqe_next = (elm);	\
+	(listelm)->field.cqe_prev = (elm);				\
+} while (/*CONSTCOND*/0)
+
+#define	CIRCLEQ_INSERT_HEAD(head, elm, field) do {			\
+	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
+	(elm)->field.cqe_next = (head)->cqh_first;			\
+	(elm)->field.cqe_prev = CIRCLEQ_END(head);			\
+	if ((head)->cqh_last == CIRCLEQ_ENDC(head))			\
+		(head)->cqh_last = (elm);				\
+	else								\
+		(head)->cqh_first->field.cqe_prev = (elm);		\
+	(head)->cqh_first = (elm);					\
+} while (/*CONSTCOND*/0)
+
+#define	CIRCLEQ_INSERT_TAIL(head, elm, field) do {			\
+	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
+	(elm)->field.cqe_next = CIRCLEQ_END(head);			\
+	(elm)->field.cqe_prev = (head)->cqh_last;			\
+	if ((head)->cqh_first == CIRCLEQ_ENDC(head))			\
+		(head)->cqh_first = (elm);				\
+	else								\
+		(head)->cqh_last->field.cqe_next = (elm);		\
+	(head)->cqh_last = (elm);					\
+} while (/*CONSTCOND*/0)
+
+#define	CIRCLEQ_REMOVE(head, elm, field) do {				\
+	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
+	QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field)			\
+	if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head))		\
+		(head)->cqh_last = (elm)->field.cqe_prev;		\
+	else								\
+		(elm)->field.cqe_next->field.cqe_prev =			\
+		    (elm)->field.cqe_prev;				\
+	if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head))		\
+		(head)->cqh_first = (elm)->field.cqe_next;		\
+	else								\
+		(elm)->field.cqe_prev->field.cqe_next =			\
+		    (elm)->field.cqe_next;				\
+	QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field)			\
+} while (/*CONSTCOND*/0)
+
+#define	CIRCLEQ_FOREACH(var, head, field)				\
+	for ((var) = ((head)->cqh_first);				\
+		(var) != CIRCLEQ_ENDC(head);				\
+		(var) = ((var)->field.cqe_next))
+
+#define	CIRCLEQ_FOREACH_REVERSE(var, head, field)			\
+	for ((var) = ((head)->cqh_last);				\
+		(var) != CIRCLEQ_ENDC(head);				\
+		(var) = ((var)->field.cqe_prev))
+
+/*
+ * Circular queue access methods.
+ */
+#define	CIRCLEQ_FIRST(head)		((head)->cqh_first)
+#define	CIRCLEQ_LAST(head)		((head)->cqh_last)
+/* For comparisons */
+#define	CIRCLEQ_ENDC(head)		(__launder_type(head))
+/* For assignments */
+#define	CIRCLEQ_END(head)		((void *)(head))
+#define	CIRCLEQ_NEXT(elm, field)	((elm)->field.cqe_next)
+#define	CIRCLEQ_PREV(elm, field)	((elm)->field.cqe_prev)
+#define	CIRCLEQ_EMPTY(head)						\
+    (CIRCLEQ_FIRST(head) == CIRCLEQ_ENDC(head))
+
+#define CIRCLEQ_LOOP_NEXT(head, elm, field)				\
+	(((elm)->field.cqe_next == CIRCLEQ_ENDC(head))			\
+	    ? ((head)->cqh_first)					\
+	    : (elm->field.cqe_next))
+#define CIRCLEQ_LOOP_PREV(head, elm, field)				\
+	(((elm)->field.cqe_prev == CIRCLEQ_ENDC(head))			\
+	    ? ((head)->cqh_last)					\
+	    : (elm->field.cqe_prev))
+#endif /* !_KERNEL */
+
+#endif	/* !_SYS_QUEUE_H_ */
diff --git a/lib/librte_eal/windows/include_override/sys/queue.h b/lib/librte_eal/windows/include_override/sys/queue.h
new file mode 100644
index 000000000..485e86f9e
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/sys/queue.h
@@ -0,0 +1,11 @@
+#pragma once
+
+/*
+ * $NetBSD: queue.h,v 1.68 2014/11/19 08:10:01 uebayasi Exp $
+ *
+ * Need to define _KERNEL to avoid the __launder_type()_ function
+ *
+ */
+#define _KERNEL
+#include "netbsd\queue.h"
+#undef _KERNEL
diff --git a/lib/librte_eal/windows/include_override/syslog.h b/lib/librte_eal/windows/include_override/syslog.h
new file mode 100644
index 000000000..890491b24
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/syslog.h
@@ -0,0 +1,217 @@
+/*
+* Copyright (c) 1982, 1986, 1988, 1993
+*	The Regents of the University of California.  All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+* 1. Redistributions of source code must retain the above copyright
+*    notice, this list of conditions and the following disclaimer.
+* 2. Redistributions in binary form must reproduce the above copyright
+*    notice, this list of conditions and the following disclaimer in the
+*    documentation and/or other materials provided with the distribution.
+* 4. Neither the name of the University nor the names of its contributors
+*    may be used to endorse or promote products derived from this software
+*    without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+* SUCH DAMAGE.
+*
+*	@(#)syslog.h	8.1 (Berkeley) 6/2/93
+*/
+
+#ifndef _SYS_SYSLOG_H
+#define _SYS_SYSLOG_H 1
+
+#include <stdarg.h>
+
+/*
+* priorities/facilities are encoded into a single 32-bit quantity, where the
+* bottom 3 bits are the priority (0-7) and the top 28 bits are the facility
+* (0-big number).  Both the priorities and the facilities map roughly
+* one-to-one to strings in the syslogd(8) source code.  This mapping is
+* included in this file.
+*
+* priorities (these are ordered)
+*/
+#define	LOG_EMERG	0	/* system is unusable */
+#define	LOG_ALERT	1	/* action must be taken immediately */
+#define	LOG_CRIT	2	/* critical conditions */
+#define	LOG_ERR		3	/* error conditions */
+#define	LOG_WARNING	4	/* warning conditions */
+#define	LOG_NOTICE	5	/* normal but significant condition */
+#define	LOG_INFO	6	/* informational */
+#define	LOG_DEBUG	7	/* debug-level messages */
+
+#define	LOG_PRIMASK	0x07	/* mask to extract priority part (internal) */
+/* extract priority */
+#define	LOG_PRI(p)	((p) & LOG_PRIMASK)
+#define	LOG_MAKEPRI(fac, pri)	(((fac) << 3) | (pri))
+
+#ifdef SYSLOG_NAMES
+#define	INTERNAL_NOPRI	0x10	/* the "no priority" priority */
+/* mark "facility" */
+#define	INTERNAL_MARK	LOG_MAKEPRI(LOG_NFACILITIES, 0)
+typedef struct _code {
+	char	*c_name;
+	int	c_val;
+} CODE;
+
+CODE prioritynames[] =
+{
+	{ "alert", LOG_ALERT },
+	{ "crit", LOG_CRIT },
+	{ "debug", LOG_DEBUG },
+	{ "emerg", LOG_EMERG },
+	{ "err", LOG_ERR },
+	{ "error", LOG_ERR },		/* DEPRECATED */
+	{ "info", LOG_INFO },
+	{ "none", INTERNAL_NOPRI },		/* INTERNAL */
+	{ "notice", LOG_NOTICE },
+	{ "panic", LOG_EMERG },		/* DEPRECATED */
+	{ "warn", LOG_WARNING },		/* DEPRECATED */
+	{ "warning", LOG_WARNING },
+	{ NULL, -1 }
+};
+#endif
+
+/* facility codes */
+#define	LOG_KERN	(0<<3)	/* kernel messages */
+#define	LOG_USER	(1<<3)	/* random user-level messages */
+#define	LOG_MAIL	(2<<3)	/* mail system */
+#define	LOG_DAEMON	(3<<3)	/* system daemons */
+#define	LOG_AUTH	(4<<3)	/* security/authorization messages */
+#define	LOG_SYSLOG	(5<<3)	/* messages generated internally by syslogd */
+#define	LOG_LPR		(6<<3)	/* line printer subsystem */
+#define	LOG_NEWS	(7<<3)	/* network news subsystem */
+#define	LOG_UUCP	(8<<3)	/* UUCP subsystem */
+#define	LOG_CRON	(9<<3)	/* clock daemon */
+#define	LOG_AUTHPRIV	(10<<3)	/* security/authorization messages (private) */
+#define	LOG_FTP		(11<<3)	/* ftp daemon */
+
+/* other codes through 15 reserved for system use */
+#define	LOG_LOCAL0	(16<<3)	/* reserved for local use */
+#define	LOG_LOCAL1	(17<<3)	/* reserved for local use */
+#define	LOG_LOCAL2	(18<<3)	/* reserved for local use */
+#define	LOG_LOCAL3	(19<<3)	/* reserved for local use */
+#define	LOG_LOCAL4	(20<<3)	/* reserved for local use */
+#define	LOG_LOCAL5	(21<<3)	/* reserved for local use */
+#define	LOG_LOCAL6	(22<<3)	/* reserved for local use */
+#define	LOG_LOCAL7	(23<<3)	/* reserved for local use */
+
+#define	LOG_NFACILITIES	24	/* current number of facilities */
+#define	LOG_FACMASK	0x03f8	/* mask to extract facility part */
+/* facility of pri */
+#define	LOG_FAC(p)	(((p) & LOG_FACMASK) >> 3)
+
+#ifdef SYSLOG_NAMES
+CODE facilitynames[] =
+{
+	{ "auth", LOG_AUTH },
+	{ "authpriv", LOG_AUTHPRIV },
+	{ "cron", LOG_CRON },
+	{ "daemon", LOG_DAEMON },
+	{ "ftp", LOG_FTP },
+	{ "kern", LOG_KERN },
+	{ "lpr", LOG_LPR },
+	{ "mail", LOG_MAIL },
+	{ "mark", INTERNAL_MARK },		/* INTERNAL */
+	{ "news", LOG_NEWS },
+	{ "security", LOG_AUTH },		/* DEPRECATED */
+	{ "syslog", LOG_SYSLOG },
+	{ "user", LOG_USER },
+	{ "uucp", LOG_UUCP },
+	{ "local0", LOG_LOCAL0 },
+	{ "local1", LOG_LOCAL1 },
+	{ "local2", LOG_LOCAL2 },
+	{ "local3", LOG_LOCAL3 },
+	{ "local4", LOG_LOCAL4 },
+	{ "local5", LOG_LOCAL5 },
+	{ "local6", LOG_LOCAL6 },
+	{ "local7", LOG_LOCAL7 },
+	{ NULL, -1 }
+};
+#endif
+
+/*
+* arguments to setlogmask.
+*/
+#define	LOG_MASK(pri)	(1 << (pri))		/* mask for one priority */
+#define	LOG_UPTO(pri)	((1 << ((pri)+1)) - 1)	/* all priorities through pri */
+
+/*
+* Option flags for openlog.
+*
+* LOG_ODELAY no longer does anything.
+* LOG_NDELAY is the inverse of what it used to be.
+*/
+#define	LOG_PID		0x01	/* log the pid with each message */
+#define	LOG_CONS	0x02	/* log on the console if errors in sending */
+#define	LOG_ODELAY	0x04	/* delay open until first syslog() (default) */
+#define	LOG_NDELAY	0x08	/* don't delay open */
+#define	LOG_NOWAIT	0x10	/* don't wait for console forks: DEPRECATED */
+#define	LOG_PERROR	0x20	/* log to stderr as well */
+
+#define SYSLOG_PORT     514
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+	/* Close desriptor used to write to system logger.  */
+	extern void closelog(void);
+
+	/* Open connection to system logger.  */
+	extern void openlog(char *__ident, int __option, int __facility);
+
+	/* Set the log mask level.  */
+	extern int setlogmask(int __mask);
+
+	/* Generate a log message using FMT string and option arguments.  */
+	extern void syslog(int __pri, char *__fmt, ...);
+
+	/* Generate a log message using FMT and using arguments pointed to by AP.  */
+	extern void vsyslog(int __pri, char *__fmt, va_list __ap);
+
+#ifdef _WIN32
+	/* Windows specific.
+
+	init_syslog() *must* be called before calling any of the above
+	functions.  exit_syslog() will be scheduled using atexit().
+	However, it is not an error and encouraged to call
+	exit_syslog() before the application exits.
+
+	During operation, the application is free to call exit_syslog()
+	followed by init_syslog() to re-initialize the library. i.e. if
+	a different syslog host is to be used.
+
+	*/
+
+	/* Initializes the syslog library and sets the syslog host.  The
+	hostname parameter is of the form "<hostname>[:<port>]".  The
+	<port> may be a numeric port or it may be a name of a service.
+	If the <port> is specified using a service name, it will be
+	looked up using getservbyname().
+
+	On failure, the hostname and port will be set to "localhost"
+	and SYSLOG_PORT respectively.
+	*/
+	extern void init_syslog(const char * hostname);
+
+	extern void exit_syslog(void);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* syslog.h */
diff --git a/lib/librte_eal/windows/include_override/termios.h b/lib/librte_eal/windows/include_override/termios.h
new file mode 100644
index 000000000..ece9cc5c9
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/termios.h
@@ -0,0 +1 @@
+#include <sys/_termios.h>
diff --git a/lib/librte_eal/windows/include_override/unistd.h b/lib/librte_eal/windows/include_override/unistd.h
new file mode 100644
index 000000000..e78a696ab
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/unistd.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#ifndef _UNISTD_H
+#define _UNISTD_H
+
+/* This header file is required to build other rte* libraries and applications */
+
+//#include <io.h>
+#include <getopt.h> /* getopt at: https://gist.github.com/bikerm16/1b75e2dd20d839dcea58 */
+
+/* Types used by Unix-y systems */
+typedef __int8            int8_t;
+typedef __int16           int16_t;
+typedef __int32           int32_t;
+typedef __int64           int64_t;
+typedef unsigned __int8   uint8_t;
+typedef unsigned __int16  uint16_t;
+typedef unsigned __int32  uint32_t;
+typedef unsigned __int64  uint64_t;
+
+#define srandom srand
+#define random rand
+#define _SC_PAGESIZE
+#define sysconf getpagesize
+/* function prototypes */
+int getpagesize(void);
+
+#endif /* unistd.h  */
diff --git a/lib/librte_eal/windows/include_override/x86intrin.h b/lib/librte_eal/windows/include_override/x86intrin.h
new file mode 100644
index 000000000..336aa0baa
--- /dev/null
+++ b/lib/librte_eal/windows/include_override/x86intrin.h
@@ -0,0 +1 @@
+#include <intrin.h>
\ No newline at end of file
diff --git a/lib/librte_eal/windows/rte_override/exec-env/rte_interrupts.h b/lib/librte_eal/windows/rte_override/exec-env/rte_interrupts.h
new file mode 100644
index 000000000..e731442aa
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/exec-env/rte_interrupts.h
@@ -0,0 +1,3 @@
+#pragma once
+
+#include "..\..\..\linuxapp\eal\include\exec-env\rte_interrupts.h"
diff --git a/lib/librte_eal/windows/rte_override/rte_acl.h b/lib/librte_eal/windows/rte_override/rte_acl.h
new file mode 100644
index 000000000..f35b12c4f
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_acl.h
@@ -0,0 +1,7 @@
+#pragma once
+
+#include "..\..\..\librte_acl\rte_acl.h"
+
+#undef RTE_ACL_MASKLEN_TO_BITMASK
+#define	RTE_ACL_MASKLEN_TO_BITMASK(v, s)	\
+((v) == 0 ? (v) : ((uint64_t)-1 << ((s) * CHAR_BIT - (v))))
diff --git a/lib/librte_eal/windows/rte_override/rte_atomic.h b/lib/librte_eal/windows/rte_override/rte_atomic.h
new file mode 100644
index 000000000..936710726
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_atomic.h
@@ -0,0 +1,744 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#ifndef _RTE_ATOMIC_H_
+#define _RTE_ATOMIC_H_
+
+#include <emmintrin.h>
+
+/* Do not include any of the core rte_atomic.h includes. They cause compilation problems on Windows */
+/* Instead, duplicate some of the required definitions here - this is sub-optimal, but... */
+
+#define rte_mb() _mm_mfence()
+#define rte_wmb() _mm_sfence()
+#define rte_rmb() _mm_lfence()
+
+#define rte_smp_mb() rte_mb()
+#define rte_smp_rmb() _ReadBarrier()
+#define rte_smp_wmb() _WriteBarrier()
+
+
+#define rte_io_mb() rte_mb()
+#define rte_io_wmb() _ReadBarrier()
+#define rte_io_rmb() _WriteBarrier()
+
+/**
+* Compiler barrier.
+*
+* Guarantees that operation reordering does not occur at compile time
+* for operations directly before and after the barrier.
+*/
+#define	rte_compiler_barrier() do {		\
+	asm volatile ("" : : : "memory");	\
+} while(0)
+
+
+/* Inline Windows implementation of atomic operations */
+
+/*------------------------- 16 bit atomic operations -------------------------*/
+
+/**
+* Atomic compare and set.
+*
+* (atomic) equivalent to:
+*   if (*dst == exp)
+*     *dst = src (all 16-bit words)
+*
+* @param dst
+*   The destination location into which the value will be written.
+* @param exp
+*   The expected value.
+* @param src
+*   The new value.
+* @return
+*   Non-zero on success; 0 on failure.
+*/
+static inline int
+rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
+{
+    return (_InterlockedCompareExchange16((SHORT *)dst, src, exp) != src);
+}
+
+/**
+* The atomic counter structure.
+*/
+typedef struct {
+    volatile int16_t cnt; /**< An internal counter value. */
+} rte_atomic16_t;
+
+/**
+* Static initializer for an atomic counter.
+*/
+#define RTE_ATOMIC16_INIT(val) { (val) }
+
+/**
+* Initialize an atomic counter.
+*
+* @param v
+*   A pointer to the atomic counter.
+*/
+static inline void
+rte_atomic16_init(rte_atomic16_t *v)
+{
+    v->cnt = 0;
+}
+
+/**
+* Atomically read a 16-bit value from a counter.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @return
+*   The value of the counter.
+*/
+static inline int16_t
+rte_atomic16_read(const rte_atomic16_t *v)
+{
+    return v->cnt;
+}
+
+/**
+* Atomically set a counter to a 16-bit value.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @param new_value
+*   The new value for the counter.
+*/
+static inline void
+rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
+{
+    _InterlockedExchange16(&v->cnt, new_value);
+}
+
+/**
+* Atomically add a 16-bit value to an atomic counter.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @param inc
+*   The value to be added to the counter.
+*/
+static inline void
+rte_atomic16_add(rte_atomic16_t *v, int16_t inc)
+{
+    _InterlockedExchangeAdd16(&v->cnt, inc);
+}
+
+/**
+* Atomically subtract a 16-bit value from an atomic counter.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @param dec
+*   The value to be subtracted from the counter.
+*/
+static inline void
+rte_atomic16_sub(rte_atomic16_t *v, int16_t dec)
+{
+    _InterlockedExchangeAdd16(&v->cnt, (-dec));
+}
+
+/**
+* Atomically increment a counter by one.
+*
+* @param v
+*   A pointer to the atomic counter.
+*/
+static inline void
+rte_atomic16_inc(rte_atomic16_t *v)
+{
+    rte_atomic16_add(v, 1);
+}
+
+/**
+* Atomically decrement a counter by one.
+*
+* @param v
+*   A pointer to the atomic counter.
+*/
+static inline void
+rte_atomic16_dec(rte_atomic16_t *v)
+{
+    rte_atomic16_sub(v, 1);
+}
+
+/**
+* Atomically add a 16-bit value to a counter and return the result.
+*
+* Atomically adds the 16-bits value (inc) to the atomic counter (v) and
+* returns the value of v after addition.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @param inc
+*   The value to be added to the counter.
+* @return
+*   The value of v after the addition.
+*/
+static inline int16_t
+rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
+{
+    _InterlockedExchangeAdd16(&v->cnt, inc);
+    return v->cnt;
+}
+
+/**
+* Atomically subtract a 16-bit value from a counter and return
+* the result.
+*
+* Atomically subtracts the 16-bit value (inc) from the atomic counter
+* (v) and returns the value of v after the subtraction.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @param dec
+*   The value to be subtracted from the counter.
+* @return
+*   The value of v after the subtraction.
+*/
+static inline int16_t
+rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec)
+{
+    _InterlockedExchangeAdd16(&v->cnt, (-dec));
+    return v->cnt;
+}
+
+/**
+* Atomically increment a 16-bit counter by one and test.
+*
+* Atomically increments the atomic counter (v) by one and returns true if
+* the result is 0, or false in all other cases.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @return
+*   True if the result after the increment operation is 0; false otherwise.
+*/
+static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
+{
+    return ((rte_atomic16_add_return(v, 1) == 0));
+}
+
+/**
+* Atomically decrement a 16-bit counter by one and test.
+*
+* Atomically decrements the atomic counter (v) by one and returns true if
+* the result is 0, or false in all other cases.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @return
+*   True if the result after the decrement operation is 0; false otherwise.
+*/
+static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
+{
+    return ((rte_atomic16_sub_return(v, 1) == 0));
+}
+
+/**
+* Atomically test and set a 16-bit atomic counter.
+*
+* If the counter value is already set, return 0 (failed). Otherwise, set
+* the counter value to 1 and return 1 (success).
+*
+* @param v
+*   A pointer to the atomic counter.
+* @return
+*   0 if failed; else 1, success.
+*/
+static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
+{
+    return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
+}
+
+/**
+* Atomically set a 16-bit counter to 0.
+*
+* @param v
+*   A pointer to the atomic counter.
+*/
+static inline void rte_atomic16_clear(rte_atomic16_t *v)
+{
+    rte_atomic16_set(v, 0);
+}
+
+/*------------------------- 32 bit atomic operations -------------------------*/
+
+/**
+* Atomic compare and set.
+*
+* (atomic) equivalent to:
+*   if (*dst == exp)
+*     *dst = src (all 32-bit words)
+*
+* @param dst
+*   The destination location into which the value will be written.
+* @param exp
+*   The expected value.
+* @param src
+*   The new value.
+* @return
+*   Non-zero on success; 0 on failure.
+*/
+static inline int
+rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
+{
+    return (_InterlockedCompareExchange(dst, src, exp) != src);
+}
+
+/**
+* The atomic counter structure.
+*/
+typedef struct {
+    volatile int32_t cnt; /**< An internal counter value. */
+} rte_atomic32_t;
+
+/**
+* Static initializer for an atomic counter.
+*/
+#define RTE_ATOMIC32_INIT(val) { (val) }
+
+/**
+* Initialize an atomic counter.
+*
+* @param v
+*   A pointer to the atomic counter.
+*/
+static inline void
+rte_atomic32_init(rte_atomic32_t *v)
+{
+    v->cnt = 0;
+}
+
+/**
+* Atomically read a 32-bit value from a counter.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @return
+*   The value of the counter.
+*/
+static inline int32_t
+rte_atomic32_read(const rte_atomic32_t *v)
+{
+    return v->cnt;
+}
+
+/**
+* Atomically set a counter to a 32-bit value.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @param new_value
+*   The new value for the counter.
+*/
+static inline void
+rte_atomic32_set(rte_atomic32_t *v, int32_t new_value)
+{
+    _InterlockedExchange((LONG volatile *)&v->cnt, new_value);
+}
+
+/**
+* Atomically add a 32-bit value to an atomic counter.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @param inc
+*   The value to be added to the counter.
+*/
+static inline void
+rte_atomic32_add(rte_atomic32_t *v, int32_t inc)
+{
+    _InterlockedExchangeAdd((LONG volatile *)&v->cnt, inc);
+}
+
+/**
+* Atomically subtract a 32-bit value from an atomic counter.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @param dec
+*   The value to be subtracted from the counter.
+*/
+static inline void
+rte_atomic32_sub(rte_atomic32_t *v, int32_t dec)
+{
+    _InterlockedExchangeAdd((LONG volatile *)&v->cnt, (-dec));
+}
+
+/**
+* Atomically increment a counter by one.
+*
+* @param v
+*   A pointer to the atomic counter.
+*/
+static inline void
+rte_atomic32_inc(rte_atomic32_t *v)
+{
+    rte_atomic32_add(v, 1);
+}
+
+/**
+* Atomically decrement a counter by one.
+*
+* @param v
+*   A pointer to the atomic counter.
+*/
+static inline void
+rte_atomic32_dec(rte_atomic32_t *v)
+{
+    rte_atomic32_sub(v, 1);
+}
+
+/**
+* Atomically add a 32-bit value to a counter and return the result.
+*
+* Atomically adds the 32-bits value (inc) to the atomic counter (v) and
+* returns the value of v after addition.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @param inc
+*   The value to be added to the counter.
+* @return
+*   The value of v after the addition.
+*/
+static inline int32_t
+rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc)
+{
+    _InterlockedExchangeAdd((LONG volatile *)&v->cnt, inc);
+    return v->cnt;
+}
+
+/**
+* Atomically subtract a 32-bit value from a counter and return
+* the result.
+*
+* Atomically subtracts the 32-bit value (inc) from the atomic counter
+* (v) and returns the value of v after the subtraction.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @param dec
+*   The value to be subtracted from the counter.
+* @return
+*   The value of v after the subtraction.
+*/
+static inline int32_t
+rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec)
+{
+    _InterlockedExchangeAdd((LONG volatile *)&v->cnt, (-dec));
+    return v->cnt;
+}
+
+/**
+* Atomically increment a 32-bit counter by one and test.
+*
+* Atomically increments the atomic counter (v) by one and returns true if
+* the result is 0, or false in all other cases.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @return
+*   True if the result after the increment operation is 0; false otherwise.
+*/
+static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
+{
+    return ((rte_atomic32_add_return(v, 1) == 0));
+}
+
+/**
+* Atomically decrement a 32-bit counter by one and test.
+*
+* Atomically decrements the atomic counter (v) by one and returns true if
+* the result is 0, or false in all other cases.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @return
+*   True if the result after the decrement operation is 0; false otherwise.
+*/
+static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
+{
+    return ((rte_atomic32_sub_return(v, 1) == 0));
+}
+
+/**
+* Atomically test and set a 32-bit atomic counter.
+*
+* If the counter value is already set, return 0 (failed). Otherwise, set
+* the counter value to 1 and return 1 (success).
+*
+* @param v
+*   A pointer to the atomic counter.
+* @return
+*   0 if failed; else 1, success.
+*/
+static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
+{
+    return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
+}
+
+/**
+* Atomically set a 32-bit counter to 0.
+*
+* @param v
+*   A pointer to the atomic counter.
+*/
+static inline void rte_atomic32_clear(rte_atomic32_t *v)
+{
+    rte_atomic32_set(v, 0);
+}
+
+/*------------------------- 64 bit atomic operations -------------------------*/
+
+/**
+* An atomic compare and set function used by the mutex functions.
+* (atomic) equivalent to:
+*   if (*dst == exp)
+*     *dst = src (all 64-bit words)
+*
+* @param dst
+*   The destination into which the value will be written.
+* @param exp
+*   The expected value.
+* @param src
+*   The new value.
+* @return
+*   Non-zero on success; 0 on failure.
+*/
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
+{
+    return (_InterlockedCompareExchange64((volatile LONG64 *)dst, src, exp) != src);
+}
+
+/**
+* Atomic exchange.
+*
+* (atomic)equivalent to :
+*ret = *dst
+*   *dst = val;
+*return ret;
+*
+* @param dst
+*   The destination location into which the value will be written.
+* @param val
+*   The new value.
+* @return
+*   The original value at that location
+**/
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val){
+	return _InterlockedExchange64((volatile LONG64 *)dst, val);
+}
+
+/**
+* The atomic counter structure.
+*/
+typedef struct {
+    volatile int64_t cnt;  /**< Internal counter value. */
+} rte_atomic64_t;
+
+/**
+* Static initializer for an atomic counter.
+*/
+#define RTE_ATOMIC64_INIT(val) { (val) }
+
+/**
+* Initialize the atomic counter.
+*
+* @param v
+*   A pointer to the atomic counter.
+*/
+static inline void
+rte_atomic64_init(rte_atomic64_t *v)
+{
+    v->cnt = 0;
+}
+
+/**
+* Atomically read a 64-bit counter.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @return
+*   The value of the counter.
+*/
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v)
+{
+    return v->cnt;
+}
+
+/**
+* Atomically set a 64-bit counter.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @param new_value
+*   The new value of the counter.
+*/
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
+{
+    _InterlockedExchange64(&v->cnt, new_value);
+}
+
+/**
+* Atomically add a 64-bit value to a counter.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @param inc
+*   The value to be added to the counter.
+*/
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
+{
+    _InterlockedExchangeAdd64(&v->cnt, inc);
+}
+
+/**
+* Atomically subtract a 64-bit value from a counter.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @param dec
+*   The value to be subtracted from the counter.
+*/
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
+{
+    _InterlockedExchangeAdd64(&v->cnt, (-dec));
+}
+
+/**
+* Atomically increment a 64-bit counter by one and test.
+*
+* @param v
+*   A pointer to the atomic counter.
+*/
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v)
+{
+    _InterlockedIncrement64(&v->cnt);
+}
+
+/**
+* Atomically decrement a 64-bit counter by one and test.
+*
+* @param v
+*   A pointer to the atomic counter.
+*/
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v)
+{
+    _InterlockedDecrement64(&v->cnt);
+}
+
+/**
+* Add a 64-bit value to an atomic counter and return the result.
+*
+* Atomically adds the 64-bit value (inc) to the atomic counter (v) and
+* returns the value of v after the addition.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @param inc
+*   The value to be added to the counter.
+* @return
+*   The value of v after the addition.
+*/
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
+{
+    _InterlockedExchangeAdd64(&v->cnt, inc);
+    return v->cnt;
+}
+
+/**
+* Subtract a 64-bit value from an atomic counter and return the result.
+*
+* Atomically subtracts the 64-bit value (dec) from the atomic counter (v)
+* and returns the value of v after the subtraction.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @param dec
+*   The value to be subtracted from the counter.
+* @return
+*   The value of v after the subtraction.
+*/
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
+{
+    _InterlockedExchangeAdd64(&v->cnt, (-dec));
+    return v->cnt;
+}
+
+/**
+* Atomically increment a 64-bit counter by one and test.
+*
+* Atomically increments the atomic counter (v) by one and returns
+* true if the result is 0, or false in all other cases.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @return
+*   True if the result after the addition is 0; false otherwise.
+*/
+static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
+{
+    return ((rte_atomic64_add_return(v, 1) == 0));
+}
+
+/**
+* Atomically decrement a 64-bit counter by one and test.
+*
+* Atomically decrements the atomic counter (v) by one and returns true if
+* the result is 0, or false in all other cases.
+*
+* @param v
+*   A pointer to the atomic counter.
+* @return
+*   True if the result after subtraction is 0; false otherwise.
+*/
+static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
+{
+    return ((rte_atomic64_sub_return(v, 1) == 0));
+}
+
+/**
+* Atomically test and set a 64-bit atomic counter.
+*
+* If the counter value is already set, return 0 (failed). Otherwise, set
+* the counter value to 1 and return 1 (success).
+*
+* @param v
+*   A pointer to the atomic counter.
+* @return
+*   0 if failed; else 1, success.
+*/
+static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
+{
+    return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
+}
+
+/**
+* Atomically set a 64-bit counter to 0.
+*
+* @param v
+*   A pointer to the atomic counter.
+*/
+static inline void rte_atomic64_clear(rte_atomic64_t *v)
+{
+    rte_atomic64_set(v, 0);
+}
+
+#endif /* _RTE_ATOMIC_H_ */
+
+/* */
+/* */
diff --git a/lib/librte_eal/windows/rte_override/rte_bus_pci.h b/lib/librte_eal/windows/rte_override/rte_bus_pci.h
new file mode 100644
index 000000000..d7950e218
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_bus_pci.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#pragma once
+
+/* include the original file, so that we can re-define certain macros */
+#include "..\..\..\..\drivers\bus\pci\rte_bus_pci.h"
+
+/* Need to re-define RTE_PMD_REGISTER_PCI for Windows */
+#ifdef RTE_PMD_REGISTER_PCI(nm, pci_drv)
+#undef RTE_PMD_REGISTER_PCI(nm, pci_drv)
+#endif
+
+/*
+* Definition for registering PMDs
+* (This is a workaround for Windows in lieu of a constructor-like function)
+*/
+#define RTE_PMD_REGISTER_PCI(nm, pci_drv) \
+void pciinitfn_##nm(void); \
+void pciinitfn_##nm(void) \
+{\
+	(pci_drv).driver.name = RTE_STR(nm);\
+	rte_pci_register(&pci_drv); \
+}
diff --git a/lib/librte_eal/windows/rte_override/rte_byteorder.h b/lib/librte_eal/windows/rte_override/rte_byteorder.h
new file mode 100644
index 000000000..b87a42756
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_byteorder.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+#pragma once
+
+#ifndef RTE_BYTE_ORDER
+#define RTE_BYTE_ORDER RTE_LITTLE_ENDIAN
+#endif
+
+#include "..\..\common\include\arch\x86\rte_byteorder.h"
\ No newline at end of file
diff --git a/lib/librte_eal/windows/rte_override/rte_common.h b/lib/librte_eal/windows/rte_override/rte_common.h
new file mode 100644
index 000000000..564fe6f82
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_common.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#pragma once
+
+/* If rte_common.h has already been included, then we will have issues */
+#ifdef _RTE_COMMON_H_
+#error
+#endif
+
+#ifdef DPDKWIN_NO_WARNINGS
+#pragma warning (disable : 42)
+#endif
+
+#include <rte_wincompat.h>
+
+#include "../common/include/rte_common.h"
+
+#ifdef DPDKWIN_NO_WARNINGS
+#pragma warning (enable : 42)
+#endif
+
+#ifdef container_of
+/* undefine the existing definition, so that we can use the Windows-compliant version */
+#undef container_of
+#endif
+
+#define container_of(ptr, type, member)		CONTAINING_RECORD(ptr, type, member)
+
+
+/* Override RTE_MIN() / RTE_MAX() as defined, since the one in rte_common uses typeof....TODO: Diagnose this later */
+#undef RTE_MIN
+#define RTE_MIN(a, b)	(((a) < (b)) ? (a) : (b))
+
+#undef RTE_MAX
+#define RTE_MAX(a, b)	max(a, b)
+
+/* Redefine these macros with appropriate typecasting */
+#undef RTE_ALIGN_FLOOR
+#define RTE_ALIGN_FLOOR(val, align)		((uintptr_t)(val) & (~((uintptr_t)((align) - 1))))
+
+#undef RTE_ALIGN_CEIL
+#define RTE_ALIGN_CEIL(val, align)		RTE_ALIGN_FLOOR((val + ((uintptr_t)(align) - 1)), align)
+
+#undef RTE_ALIGN
+#define RTE_ALIGN(val, align)			RTE_ALIGN_CEIL(val, align)
+
+#undef RTE_PTR_ALIGN_FLOOR
+#define RTE_PTR_ALIGN_FLOOR(ptr, align)		(void *)(RTE_ALIGN_FLOOR((uintptr_t)ptr, align))
+
+#undef RTE_PTR_ALIGN_CEIL
+#define RTE_PTR_ALIGN_CEIL(ptr, align)		(void *)RTE_PTR_ALIGN_FLOOR((uintptr_t)RTE_PTR_ADD(ptr, (align) - 1), align)
+
+#undef RTE_LEN2MASK
+#define	RTE_LEN2MASK(ln, tp)			((uint64_t)-1 >> (sizeof(uint64_t) * CHAR_BIT - (ln)))
diff --git a/lib/librte_eal/windows/rte_override/rte_common.h.sav b/lib/librte_eal/windows/rte_override/rte_common.h.sav
new file mode 100644
index 000000000..6f067aa3f
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_common.h.sav
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#ifndef _RTE_COMMON_H_
+#define _RTE_COMMON_H_
+
+/**
+ * @file
+ *
+ * Generic, commonly-used macro and inline function definitions
+ * for DPDK.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <errno.h>
+#include <limits.h>
+
+#ifndef typeof
+#define typeof __typeof__
+#endif
+
+#ifndef asm
+#define asm __asm__
+#endif
+
+#ifdef RTE_ARCH_STRICT_ALIGN
+typedef uint64_t unaligned_uint64_t __attribute__ ((aligned(1)));
+typedef uint32_t unaligned_uint32_t __attribute__ ((aligned(1)));
+typedef uint16_t unaligned_uint16_t __attribute__ ((aligned(1)));
+#else
+typedef uint64_t unaligned_uint64_t;
+typedef uint32_t unaligned_uint32_t;
+typedef uint16_t unaligned_uint16_t;
+#endif
+
+/**
+ * Force alignment
+ */
+#define __rte_aligned(a) __attribute__((__aligned__(a)))
+
+/**
+ * Force a structure to be packed
+ */
+#define __rte_packed __attribute__((__packed__))
+
+/******* Macro to mark functions and fields scheduled for removal *****/
+#define __rte_deprecated	__attribute__((__deprecated__))
+
+/*********** Macros to eliminate unused variable warnings ********/
+
+/**
+ * short definition to mark a function parameter unused
+ */
+#define __rte_unused __attribute__((__unused__))
+
+/**
+ * definition to mark a variable or function parameter as used so
+ * as to avoid a compiler warning
+ */
+#define RTE_SET_USED(x) (void)(x)
+
+/*********** Macros for pointer arithmetic ********/
+
+/**
+ * add a byte-value offset from a pointer
+ */
+#define RTE_PTR_ADD(ptr, x) ((void*)((uintptr_t)(ptr) + (x)))
+
+/**
+ * subtract a byte-value offset from a pointer
+ */
+#define RTE_PTR_SUB(ptr, x) ((void*)((uintptr_t)ptr - (x)))
+
+/**
+ * get the difference between two pointer values, i.e. how far apart
+ * in bytes are the locations they point two. It is assumed that
+ * ptr1 is greater than ptr2.
+ */
+#define RTE_PTR_DIFF(ptr1, ptr2) ((uintptr_t)(ptr1) - (uintptr_t)(ptr2))
+
+/*********** Macros/static functions for doing alignment ********/
+
+
+/**
+ * Macro to align a pointer to a given power-of-two. The resultant
+ * pointer will be a pointer of the same type as the first parameter, and
+ * point to an address no higher than the first parameter. Second parameter
+ * must be a power-of-two value.
+ */
+#define RTE_PTR_ALIGN_FLOOR(ptr, align) \
+	((typeof(ptr))RTE_ALIGN_FLOOR((uintptr_t)ptr, align))
+
+/**
+ * Macro to align a value to a given power-of-two. The resultant value
+ * will be of the same type as the first parameter, and will be no
+ * bigger than the first parameter. Second parameter must be a
+ * power-of-two value.
+ */
+#define RTE_ALIGN_FLOOR(val, align) \
+	(typeof(val))((val) & (~((typeof(val))((align) - 1))))
+
+/**
+ * Macro to align a pointer to a given power-of-two. The resultant
+ * pointer will be a pointer of the same type as the first parameter, and
+ * point to an address no lower than the first parameter. Second parameter
+ * must be a power-of-two value.
+ */
+#define RTE_PTR_ALIGN_CEIL(ptr, align) \
+	RTE_PTR_ALIGN_FLOOR((typeof(ptr))RTE_PTR_ADD(ptr, (align) - 1), align)
+
+/**
+ * Macro to align a value to a given power-of-two. The resultant value
+ * will be of the same type as the first parameter, and will be no lower
+ * than the first parameter. Second parameter must be a power-of-two
+ * value.
+ */
+#define RTE_ALIGN_CEIL(val, align) \
+	RTE_ALIGN_FLOOR(((val) + ((typeof(val)) (align) - 1)), align)
+
+/**
+ * Macro to align a pointer to a given power-of-two. The resultant
+ * pointer will be a pointer of the same type as the first parameter, and
+ * point to an address no lower than the first parameter. Second parameter
+ * must be a power-of-two value.
+ * This function is the same as RTE_PTR_ALIGN_CEIL
+ */
+#define RTE_PTR_ALIGN(ptr, align) RTE_PTR_ALIGN_CEIL(ptr, align)
+
+/**
+ * Macro to align a value to a given power-of-two. The resultant
+ * value will be of the same type as the first parameter, and
+ * will be no lower than the first parameter. Second parameter
+ * must be a power-of-two value.
+ * This function is the same as RTE_ALIGN_CEIL
+ */
+#define RTE_ALIGN(val, align) RTE_ALIGN_CEIL(val, align)
+
+/**
+ * Checks if a pointer is aligned to a given power-of-two value
+ *
+ * @param ptr
+ *   The pointer whose alignment is to be checked
+ * @param align
+ *   The power-of-two value to which the ptr should be aligned
+ *
+ * @return
+ *   True(1) where the pointer is correctly aligned, false(0) otherwise
+ */
+static inline int
+rte_is_aligned(void *ptr, unsigned align)
+{
+	return (((uintptr_t)ptr % align) == 0);
+}
+
+/*********** Macros for compile type checks ********/
+
+/**
+ * Triggers an error at compilation time if the condition is true.
+ */
+#ifndef __OPTIMIZE__
+#define RTE_BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#else
+extern int RTE_BUILD_BUG_ON_detected_error;
+#define RTE_BUILD_BUG_ON(condition) do {             \
+	((void)sizeof(char[1 - 2*!!(condition)]));   \
+	if (condition)                               \
+		RTE_BUILD_BUG_ON_detected_error = 1; \
+} while(0)
+#endif
+
+/*********** Macros to work with powers of 2 ********/
+
+/**
+ * Returns true if n is a power of 2
+ * @param n
+ *     Number to check
+ * @return 1 if true, 0 otherwise
+ */
+static inline int
+rte_is_power_of_2(uint32_t n)
+{
+	return n && !(n & (n - 1));
+}
+
+/**
+ * Aligns input parameter to the next power of 2
+ *
+ * @param x
+ *   The integer value to algin
+ *
+ * @return
+ *   Input parameter aligned to the next power of 2
+ */
+static inline uint32_t
+rte_align32pow2(uint32_t x)
+{
+	x--;
+	x |= x >> 1;
+	x |= x >> 2;
+	x |= x >> 4;
+	x |= x >> 8;
+	x |= x >> 16;
+
+	return x + 1;
+}
+
+/**
+ * Aligns 64b input parameter to the next power of 2
+ *
+ * @param v
+ *   The 64b value to align
+ *
+ * @return
+ *   Input parameter aligned to the next power of 2
+ */
+static inline uint64_t
+rte_align64pow2(uint64_t v)
+{
+	v--;
+	v |= v >> 1;
+	v |= v >> 2;
+	v |= v >> 4;
+	v |= v >> 8;
+	v |= v >> 16;
+	v |= v >> 32;
+
+	return v + 1;
+}
+
+/*********** Macros for calculating min and max **********/
+
+/**
+ * Macro to return the minimum of two numbers
+ */
+#define RTE_MIN(a, b) ({ \
+		typeof (a) _a = (a); \
+		typeof (b) _b = (b); \
+		_a < _b ? _a : _b; \
+	})
+
+/**
+ * Macro to return the maximum of two numbers
+ */
+#define RTE_MAX(a, b) ({ \
+		typeof (a) _a = (a); \
+		typeof (b) _b = (b); \
+		_a > _b ? _a : _b; \
+	})
+
+/*********** Other general functions / macros ********/
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+/**
+ * PAUSE instruction for tight loops (avoid busy waiting)
+ */
+static inline void
+rte_pause (void)
+{
+	_mm_pause();
+}
+#else
+static inline void
+rte_pause(void) {}
+#endif
+
+/**
+ * Searches the input parameter for the least significant set bit
+ * (starting from zero).
+ * If a least significant 1 bit is found, its bit index is returned.
+ * If the content of the input parameter is zero, then the content of the return
+ * value is undefined.
+ * @param v
+ *     input parameter, should not be zero.
+ * @return
+ *     least significant set bit in the input parameter.
+ */
+static inline uint32_t
+rte_bsf32(uint32_t v)
+{
+	return __builtin_ctz(v);
+}
+
+#ifndef offsetof
+/** Return the offset of a field in a structure. */
+#define offsetof(TYPE, MEMBER)  __builtin_offsetof (TYPE, MEMBER)
+#endif
+
+#define _RTE_STR(x) #x
+/** Take a macro value and get a string version of it */
+#define RTE_STR(x) _RTE_STR(x)
+
+/** Mask value of type "tp" for the first "ln" bit set. */
+#define	RTE_LEN2MASK(ln, tp)	\
+	((tp)((uint64_t)-1 >> (sizeof(uint64_t) * CHAR_BIT - (ln))))
+
+/** Number of elements in the array. */
+#define	RTE_DIM(a)	(sizeof (a) / sizeof ((a)[0]))
+
+/**
+ * Converts a numeric string to the equivalent uint64_t value.
+ * As well as straight number conversion, also recognises the suffixes
+ * k, m and g for kilobytes, megabytes and gigabytes respectively.
+ *
+ * If a negative number is passed in  i.e. a string with the first non-black
+ * character being "-", zero is returned. Zero is also returned in the case of
+ * an error with the strtoull call in the function.
+ *
+ * @param str
+ *     String containing number to convert.
+ * @return
+ *     Number.
+ */
+static inline uint64_t
+rte_str_to_size(const char *str)
+{
+	char *endptr;
+	unsigned long long size;
+
+	while (isspace((int)*str))
+		str++;
+	if (*str == '-')
+		return 0;
+
+	errno = 0;
+	size = strtoull(str, &endptr, 0);
+	if (errno)
+		return 0;
+
+	if (*endptr == ' ')
+		endptr++; /* allow 1 space gap */
+
+	switch (*endptr){
+	case 'G': case 'g': size *= 1024; /* fall-through */
+	case 'M': case 'm': size *= 1024; /* fall-through */
+	case 'K': case 'k': size *= 1024; /* fall-through */
+	default:
+		break;
+	}
+	return size;
+}
+
+/**
+ * Function to terminate the application immediately, printing an error
+ * message and returning the exit_code back to the shell.
+ *
+ * This function never returns
+ *
+ * @param exit_code
+ *     The exit code to be returned by the application
+ * @param format
+ *     The format string to be used for printing the message. This can include
+ *     printf format characters which will be expanded using any further parameters
+ *     to the function.
+ */
+void
+rte_exit(int exit_code, const char *format, ...)
+	__attribute__((noreturn))
+	__attribute__((format(printf, 2, 3)));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/librte_eal/windows/rte_override/rte_config.h b/lib/librte_eal/windows/rte_override/rte_config.h
new file mode 100644
index 000000000..d26f689b1
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_config.h
@@ -0,0 +1,328 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#define RTE_EXEC_ENV "windowsapp"
+#define RTE_EXEC_ENV_WINDOWSAPP 1
+#define RTE_MACHINE "native"
+#define RTE_ARCH "x86_64"
+#define RTE_ARCH_X86_64 1
+#define RTE_TOOLCHAIN "icc"
+#define RTE_TOOLCHAIN_ICC 1
+#undef RTE_LIBC
+#undef RTE_LIBC_NEWLIB_SRC
+#undef RTE_LIBC_NEWLIB_BIN
+#undef RTE_LIBC_NETINCS
+#undef RTE_LIBGLOSS
+
+#define RTE_MAX_HEAPS 32
+#define RTE_MAX_MEMSEG_LISTS 128
+#define RTE_MAX_MEMSEG_PER_LIST 8192
+#define RTE_LIBRTE_EAL 1
+#define RTE_MAX_LCORE 128
+#define RTE_MAX_NUMA_NODES 8
+#define RTE_MAX_MEMSEG 256
+#define RTE_MAX_MEMZONE 2560
+#define RTE_MAX_TAILQ 32
+#define RTE_LOG_LEVEL RTE_LOG_DEBUG
+#define RTE_LOG_DP_LEVEL RTE_LOG_DEBUG
+#define RTE_LOG_HISTORY 256
+#undef RTE_LIBEAL_USE_HPET
+#undef RTE_EAL_ALLOW_INV_SOCKET_ID
+#undef RTE_EAL_ALWAYS_PANIC_ON_ERROR
+#undef RTE_EAL_UNBIND_PORTS
+#define RTE_LIBRTE_EAL_LINUXAPP 1
+#undef RTE_LIBRTE_EAL_BAREMETAL
+#define RTE_ENABLE_AVX 1
+#undef RTE_ENABLE_AVX512
+#undef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
+#define RTE_LIBRTE_PCI 1
+#define RTE_LIBRTE_KVARGS 1
+#define RTE_LIBRTE_ETHDEV 1
+#undef RTE_LIBRTE_ETHDEV_DEBUG
+#define RTE_MAX_ETHPORTS 32
+#define RTE_MAX_QUEUES_PER_PORT 1024
+#undef RTE_LIBRTE_IEEE1588
+#define RTE_ETHDEV_QUEUE_STAT_CNTRS 16
+#define RTE_ETHDEV_RXTX_CALLBACKS 1
+#undef RTE_ETHDEV_PROFILE_ITT_WASTED_RX_ITERATIONS
+#undef RTE_ETHDEV_TX_PREPARE_NOOP
+#define RTE_LIBRTE_PCI_BUS 1
+#undef RTE_LIBRTE_ENA_PMD
+#undef RTE_LIBRTE_ENA_DEBUG_RX
+#undef RTE_LIBRTE_ENA_DEBUG_TX
+#undef RTE_LIBRTE_ENA_DEBUG_TX_FREE
+#undef RTE_LIBRTE_ENA_DEBUG_DRIVER
+#undef RTE_LIBRTE_ENA_COM_DEBUG
+#define RTE_LIBRTE_EM_PMD 1
+#define RTE_LIBRTE_IGB_PMD 1
+#undef RTE_LIBRTE_E1000_DEBUG_INIT
+#undef RTE_LIBRTE_E1000_DEBUG_RX
+#undef RTE_LIBRTE_E1000_DEBUG_TX
+#undef RTE_LIBRTE_E1000_DEBUG_TX_FREE
+#undef RTE_LIBRTE_E1000_DEBUG_DRIVER
+#undef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
+#define RTE_LIBRTE_IXGBE_PMD 1
+#undef RTE_LIBRTE_IXGBE_DEBUG_INIT
+#undef RTE_LIBRTE_IXGBE_DEBUG_RX
+#undef RTE_LIBRTE_IXGBE_DEBUG_TX
+#undef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
+#undef RTE_LIBRTE_IXGBE_DEBUG_DRIVER
+#undef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
+#define RTE_IXGBE_INC_VECTOR 1
+#undef RTE_LIBRTE_IXGBE_BYPASS
+#define RTE_LIBRTE_I40E_PMD 1
+#undef RTE_LIBRTE_I40E_DEBUG_RX
+#undef RTE_LIBRTE_I40E_DEBUG_TX
+#undef RTE_LIBRTE_I40E_DEBUG_TX_FREE
+#define RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC 1
+#define RTE_LIBRTE_I40E_INC_VECTOR 1
+#undef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF 64
+#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4
+#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM 4
+#define RTE_LIBRTE_I40E_ITR_INTERVAL -1
+#undef RTE_LIBRTE_FM10K_PMD
+#undef RTE_LIBRTE_FM10K_DEBUG_INIT
+#undef RTE_LIBRTE_FM10K_DEBUG_RX
+#undef RTE_LIBRTE_FM10K_DEBUG_TX
+#undef RTE_LIBRTE_FM10K_DEBUG_TX_FREE
+#undef RTE_LIBRTE_FM10K_DEBUG_DRIVER
+#define RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE 1
+#define RTE_LIBRTE_FM10K_INC_VECTOR 1
+#undef RTE_LIBRTE_MLX4_PMD
+#undef RTE_LIBRTE_MLX4_DEBUG
+#undef RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS
+#define RTE_LIBRTE_MLX4_TX_MP_CACHE 8
+#undef RTE_LIBRTE_MLX5_PMD
+#undef RTE_LIBRTE_MLX5_DEBUG
+#define RTE_LIBRTE_MLX5_TX_MP_CACHE 8
+#undef RTE_LIBRTE_BNX2X_PMD
+#undef RTE_LIBRTE_BNX2X_DEBUG
+#undef RTE_LIBRTE_BNX2X_DEBUG_INIT
+#undef RTE_LIBRTE_BNX2X_DEBUG_RX
+#undef RTE_LIBRTE_BNX2X_DEBUG_TX
+#undef RTE_LIBRTE_BNX2X_MF_SUPPORT
+#undef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
+#undef RTE_LIBRTE_CXGBE_PMD
+#undef RTE_LIBRTE_CXGBE_DEBUG
+#undef RTE_LIBRTE_CXGBE_DEBUG_REG
+#undef RTE_LIBRTE_CXGBE_DEBUG_MBOX
+#undef RTE_LIBRTE_CXGBE_DEBUG_TX
+#undef RTE_LIBRTE_CXGBE_DEBUG_RX
+#undef RTE_LIBRTE_CXGBE_TPUT
+#undef RTE_LIBRTE_ENIC_PMD
+#undef RTE_LIBRTE_ENIC_DEBUG
+#undef RTE_LIBRTE_ENIC_DEBUG_FLOW
+#undef RTE_LIBRTE_NFP_PMD
+#undef RTE_LIBRTE_NFP_DEBUG
+#undef RTE_LIBRTE_MRVL_PMD
+#undef RTE_LIBRTE_BNXT_PMD
+#undef RTE_LIBRTE_SFC_EFX_PMD
+#undef RTE_LIBRTE_SFC_EFX_DEBUG
+#define RTE_LIBRTE_PMD_SOFTNIC 1
+#undef RTE_LIBRTE_PMD_SZEDATA2
+#define RTE_LIBRTE_PMD_SZEDATA2_AS 0
+#undef RTE_LIBRTE_THUNDERX_NICVF_PMD
+#undef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_INIT
+#undef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX
+#undef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX
+#undef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_DRIVER
+#undef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX
+#undef RTE_LIBRTE_LIO_PMD
+#undef RTE_LIBRTE_LIO_DEBUG_DRIVER
+#undef RTE_LIBRTE_LIO_DEBUG_INIT
+#undef RTE_LIBRTE_LIO_DEBUG_RX
+#undef RTE_LIBRTE_LIO_DEBUG_TX
+#undef RTE_LIBRTE_LIO_DEBUG_MBOX
+#undef RTE_LIBRTE_LIO_DEBUG_REGS
+#undef RTE_LIBRTE_DPAA_BUS
+#undef RTE_LIBRTE_DPAA_MEMPOOL
+#undef RTE_LIBRTE_DPAA_PMD
+#undef RTE_LIBRTE_OCTEONTX_PMD
+#undef RTE_LIBRTE_OCTEONTX_DEBUG_INIT
+#undef RTE_LIBRTE_OCTEONTX_DEBUG_RX
+#undef RTE_LIBRTE_OCTEONTX_DEBUG_TX
+#undef RTE_LIBRTE_OCTEONTX_DEBUG_DRIVER
+#undef RTE_LIBRTE_OCTEONTX_DEBUG_MBOX
+#undef RTE_LIBRTE_FSLMC_BUS
+#undef RTE_LIBRTE_DPAA2_MEMPOOL
+#undef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+#undef RTE_LIBRTE_DPAA2_PMD
+#undef RTE_LIBRTE_DPAA2_DEBUG_INIT
+#undef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
+#undef RTE_LIBRTE_DPAA2_DEBUG_RX
+#undef RTE_LIBRTE_DPAA2_DEBUG_TX
+#undef RTE_LIBRTE_DPAA2_DEBUG_TX_FREE
+#undef RTE_LIBRTE_VIRTIO_PMD
+#undef RTE_LIBRTE_VIRTIO_DEBUG_INIT
+#undef RTE_LIBRTE_VIRTIO_DEBUG_RX
+#undef RTE_LIBRTE_VIRTIO_DEBUG_TX
+#undef RTE_LIBRTE_VIRTIO_DEBUG_DRIVER
+#undef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
+#undef RTE_VIRTIO_USER
+#undef RTE_LIBRTE_VMXNET3_PMD
+#undef RTE_LIBRTE_VMXNET3_DEBUG_INIT
+#undef RTE_LIBRTE_VMXNET3_DEBUG_RX
+#undef RTE_LIBRTE_VMXNET3_DEBUG_TX
+#undef RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE
+#undef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
+#define RTE_LIBRTE_PMD_RING 1
+#define RTE_PMD_RING_MAX_RX_RINGS 16
+#define RTE_PMD_RING_MAX_TX_RINGS 16
+#define RTE_LIBRTE_PMD_PCAP 1
+#undef RTE_LIBRTE_PMD_BOND
+#undef RTE_LIBRTE_BOND_DEBUG_ALB
+#undef RTE_LIBRTE_BOND_DEBUG_ALB_L1
+#undef RTE_LIBRTE_QEDE_PMD
+#undef RTE_LIBRTE_QEDE_DEBUG_INIT
+#undef RTE_LIBRTE_QEDE_DEBUG_INFO
+#undef RTE_LIBRTE_QEDE_DEBUG_DRIVER
+#undef RTE_LIBRTE_QEDE_DEBUG_TX
+#undef RTE_LIBRTE_QEDE_DEBUG_RX
+#define RTE_LIBRTE_QEDE_FW ""
+#undef RTE_LIBRTE_PMD_AF_PACKET
+#undef RTE_LIBRTE_ARK_PMD
+#undef RTE_LIBRTE_ARK_PAD_TX
+#undef RTE_LIBRTE_ARK_DEBUG_RX
+#undef RTE_LIBRTE_ARK_DEBUG_TX
+#undef RTE_LIBRTE_ARK_DEBUG_STATS
+#undef RTE_LIBRTE_ARK_DEBUG_TRACE
+#undef RTE_LIBRTE_AVP_PMD
+#undef RTE_LIBRTE_AVP_DEBUG_RX
+#undef RTE_LIBRTE_AVP_DEBUG_TX
+#undef RTE_LIBRTE_AVP_DEBUG_DRIVER
+#undef RTE_LIBRTE_AVP_DEBUG_BUFFERS
+#undef RTE_LIBRTE_PMD_TAP
+#undef RTE_LIBRTE_PMD_NULL
+#undef RTE_LIBRTE_PMD_FAILSAFE
+#define RTE_PMD_PACKET_PREFETCH 1
+#define RTE_LIBRTE_CRYPTODEV 1
+#undef RTE_LIBRTE_CRYPTODEV_DEBUG
+#define RTE_CRYPTO_MAX_DEVS 64
+#define RTE_CRYPTODEV_NAME_LEN 64
+#undef RTE_LIBRTE_PMD_ARMV8_CRYPTO
+#undef RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG
+#undef RTE_LIBRTE_PMD_DPAA2_SEC
+#undef RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT
+#undef RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER
+#undef RTE_LIBRTE_DPAA2_SEC_DEBUG_RX
+#undef RTE_LIBRTE_PMD_DPAA_SEC
+#undef RTE_LIBRTE_DPAA_SEC_DEBUG_INIT
+#undef RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER
+#undef RTE_LIBRTE_DPAA_SEC_DEBUG_RX
+#undef RTE_LIBRTE_PMD_QAT
+#undef RTE_LIBRTE_PMD_QAT_DEBUG_INIT
+#undef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+#undef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+#undef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+#define RTE_QAT_PMD_MAX_NB_SESSIONS 2048
+#undef RTE_LIBRTE_PMD_AESNI_MB
+#undef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
+#undef RTE_LIBRTE_PMD_OPENSSL
+#undef RTE_LIBRTE_PMD_OPENSSL_DEBUG
+#undef RTE_LIBRTE_PMD_AESNI_GCM
+#undef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
+#undef RTE_LIBRTE_PMD_SNOW3G
+#undef RTE_LIBRTE_PMD_SNOW3G_DEBUG
+#undef RTE_LIBRTE_PMD_KASUMI
+#undef RTE_LIBRTE_PMD_KASUMI_DEBUG
+#undef RTE_LIBRTE_PMD_ZUC
+#undef RTE_LIBRTE_PMD_ZUC_DEBUG
+#undef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+#undef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG
+#undef RTE_LIBRTE_PMD_NULL_CRYPTO
+#undef RTE_LIBRTE_PMD_MRVL_CRYPTO
+#undef RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG
+#define RTE_LIBRTE_SECURITY 1
+#define RTE_LIBRTE_EVENTDEV 1
+#undef RTE_LIBRTE_EVENTDEV_DEBUG
+#define RTE_EVENT_MAX_DEVS 16
+#define RTE_EVENT_MAX_QUEUES_PER_DEV 64
+#define RTE_LIBRTE_PMD_SKELETON_EVENTDEV 1
+#undef RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG
+#define RTE_LIBRTE_PMD_SW_EVENTDEV 1
+#undef RTE_LIBRTE_PMD_SW_EVENTDEV_DEBUG
+#undef RTE_LIBRTE_PMD_OCTEONTX_SSOVF
+#undef RTE_LIBRTE_PMD_OCTEONTX_SSOVF_DEBUG
+#define RTE_LIBRTE_RING 1
+#undef RTE_LIBRTE_RING_DEBUG
+#define RTE_LIBRTE_MEMPOOL 1
+#define RTE_MEMPOOL_CACHE_MAX_SIZE 512
+#undef RTE_LIBRTE_MEMPOOL_DEBUG
+#define RTE_DRIVER_MEMPOOL_RING 1
+#define RTE_DRIVER_MEMPOOL_STACK 1
+#undef RTE_LIBRTE_OCTEONTX_MEMPOOL
+#undef RTE_LIBRTE_OCTEONTX_MEMPOOL_DEBUG
+#define RTE_LIBRTE_MBUF 1
+#undef RTE_LIBRTE_MBUF_DEBUG
+#define RTE_MBUF_DEFAULT_MEMPOOL_OPS "ring_mp_mc"
+#undef RTE_MBUF_SCATTER_GATHER 1
+#undef RTE_MBUF_REFCNT_ATOMIC 1
+#define RTE_PKTMBUF_HEADROOM 128
+#define RTE_LIBRTE_TIMER 1
+#undef RTE_LIBRTE_TIMER_DEBUG
+#define RTE_LIBRTE_CFGFILE 1
+#define RTE_LIBRTE_CMDLINE 1
+#undef RTE_LIBRTE_CMDLINE_DEBUG
+#define RTE_LIBRTE_HASH 1
+#undef RTE_LIBRTE_HASH_DEBUG
+#undef RTE_LIBRTE_EFD
+#undef RTE_LIBRTE_MEMBER
+#define RTE_LIBRTE_JOBSTATS 1
+#define RTE_LIBRTE_METRICS 1
+#define RTE_LIBRTE_BITRATE 1
+#define RTE_LIBRTE_LATENCY_STATS 1
+#define RTE_LIBRTE_LPM 1
+#undef RTE_LIBRTE_LPM_DEBUG
+#define RTE_LIBRTE_ACL 1
+#undef RTE_LIBRTE_ACL_DEBUG
+#undef RTE_LIBRTE_POWER
+#undef RTE_LIBRTE_POWER_DEBUG
+#define RTE_MAX_LCORE_FREQS 64
+#define RTE_LIBRTE_NET 1
+#define RTE_LIBRTE_IP_FRAG 1
+#undef CONFIG_RTE_LIBRTE_IP_FRAG_DEBUG
+#define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
+#undef RTE_LIBRTE_IP_FRAG_TBL_STAT
+#define RTE_LIBRTE_GRO 1
+#define RTE_LIBRTE_GSO 1
+#define RTE_LIBRTE_METER 1
+#define RTE_LIBRTE_FLOW_CLASSIFY 1
+#define RTE_LIBRTE_SCHED 1
+#undef CONFIG_RTE_SCHED_DEBUG
+#undef CONFIG_RTE_SCHED_RED
+#undef RTE_SCHED_COLLECT_STATS
+#undef RTE_SCHED_SUBPORT_TC_OV
+#define RTE_SCHED_PORT_N_GRINDERS 8
+#undef RTE_SCHED_VECTOR
+#define RTE_LIBRTE_DISTRIBUTOR 1
+#define RTE_LIBRTE_REORDER 1
+#define RTE_LIBRTE_PORT 1
+#undef RTE_PORT_STATS_COLLECT
+#undef RTE_PORT_PCAP
+#define RTE_LIBRTE_TABLE 1
+#undef RTE_TABLE_STATS_COLLECT
+#define RTE_LIBRTE_PIPELINE 1
+#undef RTE_PIPELINE_STATS_COLLECT
+#undef RTE_LIBRTE_KNI
+#undef RTE_LIBRTE_PMD_KNI
+#undef RTE_KNI_KMOD
+#undef RTE_KNI_KMOD_ETHTOOL
+#undef RTE_KNI_PREEMPT_DEFAULT
+#undef RTE_LIBRTE_PDUMP
+#undef RTE_LIBRTE_VHOST
+#undef RTE_LIBRTE_VHOST_NUMA
+#undef RTE_LIBRTE_VHOST_DEBUG
+#undef RTE_LIBRTE_PMD_VHOST
+#define RTE_APP_TEST 1
+#undef RTE_APP_TEST_RESOURCE_TAR
+#define RTE_APP_CHKINCS 1
+#define RTE_TEST_PMD 1
+#undef RTE_TEST_PMD_RECORD_CORE_CYCLES
+#undef RTE_TEST_PMD_RECORD_BURST_STATS
+#define RTE_APP_CRYPTO_PERF 1
+#define RTE_APP_EVENTDEV 1
+#define RTE_EAL_PMD_PATH ""
+#define RTE_CACHE_LINE_SIZE 64
+#define RTE_CACHE_LINE_MIN_SIZE 64
diff --git a/lib/librte_eal/windows/rte_override/rte_cpuflags.h b/lib/librte_eal/windows/rte_override/rte_cpuflags.h
new file mode 100644
index 000000000..23accc85b
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_cpuflags.h
@@ -0,0 +1,3 @@
+#define RTE_COMPILE_TIME_CPUFLAGS RTE_CPUFLAG_SSE4_1
+
+#include "..\common\include\arch\x86\rte_cpuflags.h"
diff --git a/lib/librte_eal/windows/rte_override/rte_cycles.h b/lib/librte_eal/windows/rte_override/rte_cycles.h
new file mode 100644
index 000000000..98e4cc426
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_cycles.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#pragma once
+
+#include "..\..\common\include\generic\rte_cycles.h"
+
+static inline uint64_t
+rte_rdtsc(void)
+{
+	return (uint64_t) __rdtsc();
+}
+
+static inline uint64_t
+rte_rdtsc_precise(void)
+{
+	rte_mb();
+	return rte_rdtsc();
+}
+
+static inline uint64_t
+rte_get_tsc_cycles(void)
+{
+	return rte_rdtsc();
+}
diff --git a/lib/librte_eal/windows/rte_override/rte_debug.h b/lib/librte_eal/windows/rte_override/rte_debug.h
new file mode 100644
index 000000000..3469ff3d7
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_debug.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#pragma once
+
+/* If rte_common.h has already been included, then we will have issues */
+#ifdef _RTE_DEBUG_H_
+#error
+#endif
+
+#include <stdio.h>
+#include "../common/include/rte_debug.h"
+
+#undef rte_panic
+#define rte_panic(fmt, ...)	{ printf (fmt, ##__VA_ARGS__); while(1); }
+
+#undef RTE_VERIFY
+#define	RTE_VERIFY(exp)	do {											\
+	if (!(exp))                                                         \
+		rte_panic("line %d\tassert \"" #exp "\" failed\n", __LINE__);	\
+} while (0)
diff --git a/lib/librte_eal/windows/rte_override/rte_io.h b/lib/librte_eal/windows/rte_override/rte_io.h
new file mode 100644
index 000000000..d111c4239
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_io.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+
+#pragma once
+
+#include "..\..\common\include\generic\rte_io.h"
diff --git a/lib/librte_eal/windows/rte_override/rte_lcore.h b/lib/librte_eal/windows/rte_override/rte_lcore.h
new file mode 100644
index 000000000..d2a2788c8
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_lcore.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#ifndef _RTE_WIN_LCORE_H_
+#define _RTE_WIN_LCORE_H_
+
+/* DPDK 1.8 */
+typedef	unsigned long rte_cpuset_t;
+
+/* Include the original rte_lcore.h from common */
+#include "../../common/include/rte_lcore.h"
+
+
+#endif
\ No newline at end of file
diff --git a/lib/librte_eal/windows/rte_override/rte_log.h.sav b/lib/librte_eal/windows/rte_override/rte_log.h.sav
new file mode 100644
index 000000000..e892a69ac
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_log.h.sav
@@ -0,0 +1,6 @@
+#pragma once
+
+#include "..\..\common\include\rte_log.h"
+
+#undef RTE_LOG
+#define RTE_LOG(l, t, ...)	printf (##__VA_ARGS__)
diff --git a/lib/librte_eal/windows/rte_override/rte_memcpy.h b/lib/librte_eal/windows/rte_override/rte_memcpy.h
new file mode 100644
index 000000000..6132df294
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_memcpy.h
@@ -0,0 +1,3 @@
+#pragma once
+
+#define rte_memcpy(dest, src, n)	memcpy(dest, src,n)
diff --git a/lib/librte_eal/windows/rte_override/rte_memory.h b/lib/librte_eal/windows/rte_override/rte_memory.h
new file mode 100644
index 000000000..0df9dd822
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_memory.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#pragma once
+
+/* If rte_common.h has already been included, then we will have issues */
+#ifdef _RTE_MEMORY_H_
+#error
+#endif
+
+#ifdef DPDKWIN_NO_WARNINGS
+#pragma warning (disable : 66)	/*  warning #66: enumeration value is out of "int" range */
+#endif
+
+#include "../common/include/rte_memory.h"
+
+#ifdef DPDKWIN_NO_WARNINGS
+#pragma warning (enable : 66)
+#endif
diff --git a/lib/librte_eal/windows/rte_override/rte_pause.h b/lib/librte_eal/windows/rte_override/rte_pause.h
new file mode 100644
index 000000000..a8fa3bf51
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_pause.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#pragma once
+
+#include "..\..\common\include\generic\rte_pause.h"
+#if defined(RTE_ARCH_X86)
+#include "..\..\common\include\arch\x86\rte_pause.h"
+#endif
diff --git a/lib/librte_eal/windows/rte_override/rte_pci.h b/lib/librte_eal/windows/rte_override/rte_pci.h
new file mode 100644
index 000000000..fbf62b861
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_pci.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#pragma once
+
+#include "..\..\..\librte_pci\rte_pci.h"
diff --git a/lib/librte_eal/windows/rte_override/rte_per_lcore.h b/lib/librte_eal/windows/rte_override/rte_per_lcore.h
new file mode 100644
index 000000000..3dd629e68
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_per_lcore.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#pragma once
+
+#include "..\..\common\include\rte_per_lcore.h"
+
+/* Undefine the stuff that is problematic for windows and redefine */
+#undef RTE_DEFINE_PER_LCORE
+#undef RTE_DECLARE_PER_LCORE
+
+
+/**
+ * @file
+ * Per-lcore variables in RTE on windows environment
+ */
+
+/**
+ * Macro to define a per lcore variable "name" of type "type", don't
+ * use keywords like "static" or "volatile" in type, just prefix the
+ * whole macro.
+ */
+#define RTE_DEFINE_PER_LCORE(type, name)			__declspec(thread) type per_lcore_##name
+
+/**
+ * Macro to declare an extern per lcore variable "name" of type "type"
+ */
+#define RTE_DECLARE_PER_LCORE(type, name)			__declspec(thread) extern type per_lcore_##name
diff --git a/lib/librte_eal/windows/rte_override/rte_prefetch.h b/lib/librte_eal/windows/rte_override/rte_prefetch.h
new file mode 100644
index 000000000..c5a750715
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_prefetch.h
@@ -0,0 +1,29 @@
+#pragma once
+
+#ifdef DPDKWIN_NO_WARNINGS
+#pragma warning (disable : 2330)
+#endif
+
+static inline void rte_prefetch0(const volatile void *p)
+{
+	_mm_prefetch(p, _MM_HINT_T0);
+}
+
+static inline void rte_prefetch1(const volatile void *p)
+{
+	_mm_prefetch(p, _MM_HINT_T1);
+}
+
+static inline void rte_prefetch2(const volatile void *p)
+{
+	_mm_prefetch(p, _MM_HINT_T2);
+}
+
+static inline void rte_prefetch_non_temporal(const volatile void *p)
+{
+	_mm_prefetch(p, _MM_HINT_NTA);
+}
+
+#ifdef DPDKWIN_NO_WARNINGS
+#pragma warning (enable : 2330)
+#endif
diff --git a/lib/librte_eal/windows/rte_override/rte_rtm.h b/lib/librte_eal/windows/rte_override/rte_rtm.h
new file mode 100644
index 000000000..0313ca0b1
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_rtm.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+
+#pragma once
+
+#include "..\..\common\include\arch\x86\rte_rtm.h"
diff --git a/lib/librte_eal/windows/rte_override/rte_rwlock.h b/lib/librte_eal/windows/rte_override/rte_rwlock.h
new file mode 100644
index 000000000..1ea667d0c
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_rwlock.h
@@ -0,0 +1,40 @@
+
+
+#ifndef _RTE_RWLOCK_WIN_H_
+#define _RTE_RWLOCK_WIN_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "generic/rte_rwlock.h"
+
+static inline void
+rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
+{
+	rte_rwlock_read_lock(rwl);
+}
+
+static inline void
+rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
+{
+	rte_rwlock_read_unlock(rwl);
+}
+
+static inline void
+rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
+{
+	rte_rwlock_write_lock(rwl);
+}
+
+static inline void
+rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
+{
+	rte_rwlock_write_unlock(rwl);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_RWLOCK_WIN_H_ */
\ No newline at end of file
diff --git a/lib/librte_eal/windows/rte_override/rte_spinlock.h b/lib/librte_eal/windows/rte_override/rte_spinlock.h
new file mode 100644
index 000000000..475a406e7
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_spinlock.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+#pragma once
+
+#ifndef _RTE_SPINLOCK_H_
+#define _RTE_SPINLOCK_H_
+
+#include <rte_eal.h>
+#include <rte_pause.h>
+
+/**
+* The rte_spinlock_t type.
+*/
+typedef struct {
+	volatile long locked; /**< lock status 0 = unlocked, 1 = locked */
+} rte_spinlock_t;
+
+/**
+* A static spinlock initializer.
+*/
+#define RTE_SPINLOCK_INITIALIZER { 0 }
+
+/**
+* Initialize the spinlock to an unlocked state.
+*
+* @param sl
+*   A pointer to the spinlock.
+*/
+static inline void
+rte_spinlock_init(rte_spinlock_t *sl)
+{
+	sl->locked = 0;
+}
+
+/**
+* Take the spinlock.
+*
+* @param sl
+*   A pointer to the spinlock.
+*/
+static inline void
+rte_spinlock_lock(rte_spinlock_t *sl)
+{
+	while (_InterlockedExchange(&sl->locked, 1))
+		while (sl->locked)
+			rte_pause();
+}
+
+/**
+* Release the spinlock.
+*
+* @param sl
+*   A pointer to the spinlock.
+*/
+static inline void
+rte_spinlock_unlock(rte_spinlock_t *sl)
+{
+	_InterlockedExchange(&sl->locked, 0);
+}
+
+/**
+* Try to take the lock.
+*
+* @param sl
+*   A pointer to the spinlock.
+* @return
+*   1 if the lock is successfully taken; 0 otherwise.
+*/
+static inline int
+rte_spinlock_trylock(rte_spinlock_t *sl)
+{
+	return _InterlockedExchange(&sl->locked, 1) == 0;
+}
+
+/**
+* Test if the lock is taken.
+*
+* @param sl
+*   A pointer to the spinlock.
+* @return
+*   1 if the lock is currently taken; 0 otherwise.
+*/
+static inline int rte_spinlock_is_locked(rte_spinlock_t *sl)
+{
+	return sl->locked;
+}
+
+/**
+* Test if hardware transactional memory (lock elision) is supported
+*
+* @return
+*   1 if the hardware transactional memory is supported; 0 otherwise.
+*/
+static inline int rte_tm_supported(void)
+{
+	return 0;
+}
+
+/**
+* Try to execute critical section in a hardware memory transaction,
+* if it fails or not available take the spinlock.
+*
+* NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+* transaction always aborts the transaction since the CPU is not able to
+* roll-back should the transaction fail. Therefore, hardware transactional
+* locks are not advised to be used around rte_eth_rx_burst() and
+* rte_eth_tx_burst() calls.
+*
+* @param sl
+*   A pointer to the spinlock.
+*/
+static inline void
+rte_spinlock_lock_tm(rte_spinlock_t *sl);
+
+/**
+* Commit hardware memory transaction or release the spinlock if
+* the spinlock is used as a fall-back
+*
+* @param sl
+*   A pointer to the spinlock.
+*/
+static inline void
+rte_spinlock_unlock_tm(rte_spinlock_t *sl);
+
+/**
+* Try to execute critical section in a hardware memory transaction,
+* if it fails or not available try to take the lock.
+*
+* NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+* transaction always aborts the transaction since the CPU is not able to
+* roll-back should the transaction fail. Therefore, hardware transactional
+* locks are not advised to be used around rte_eth_rx_burst() and
+* rte_eth_tx_burst() calls.
+*
+* @param sl
+*   A pointer to the spinlock.
+* @return
+*   1 if the hardware memory transaction is successfully started
+*   or lock is successfully taken; 0 otherwise.
+*/
+static inline int
+rte_spinlock_trylock_tm(rte_spinlock_t *sl);
+
+/**
+* The rte_spinlock_recursive_t type.
+*/
+typedef struct {
+	rte_spinlock_t sl; /**< the actual spinlock */
+	volatile int user; /**< core id using lock, -1 for unused */
+	volatile int count; /**< count of time this lock has been called */
+} rte_spinlock_recursive_t;
+
+/**
+* A static recursive spinlock initializer.
+*/
+#define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0}
+
+/**
+* Initialize the recursive spinlock to an unlocked state.
+*
+* @param slr
+*   A pointer to the recursive spinlock.
+*/
+static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
+{
+	rte_spinlock_init(&slr->sl);
+	slr->user = -1;
+	slr->count = 0;
+}
+
+/**
+* Take the recursive spinlock.
+*
+* @param slr
+*   A pointer to the recursive spinlock.
+*/
+static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
+{
+	int id = rte_gettid();
+
+	if (slr->user != id) {
+		rte_spinlock_lock(&slr->sl);
+		slr->user = id;
+	}
+	slr->count++;
+}
+/**
+* Release the recursive spinlock.
+*
+* @param slr
+*   A pointer to the recursive spinlock.
+*/
+static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
+{
+	if (--(slr->count) == 0) {
+		slr->user = -1;
+		rte_spinlock_unlock(&slr->sl);
+	}
+
+}
+
+/**
+* Try to take the recursive lock.
+*
+* @param slr
+*   A pointer to the recursive spinlock.
+* @return
+*   1 if the lock is successfully taken; 0 otherwise.
+*/
+static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
+{
+	int id = rte_gettid();
+
+	if (slr->user != id) {
+		if (rte_spinlock_trylock(&slr->sl) == 0)
+			return 0;
+		slr->user = id;
+	}
+	slr->count++;
+	return 1;
+}
+
+
+/**
+* Try to execute critical section in a hardware memory transaction,
+* if it fails or not available take the recursive spinlocks
+*
+* NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+* transaction always aborts the transaction since the CPU is not able to
+* roll-back should the transaction fail. Therefore, hardware transactional
+* locks are not advised to be used around rte_eth_rx_burst() and
+* rte_eth_tx_burst() calls.
+*
+* @param slr
+*   A pointer to the recursive spinlock.
+*/
+static inline void rte_spinlock_recursive_lock_tm(
+	rte_spinlock_recursive_t *slr);
+
+/**
+* Commit hardware memory transaction or release the recursive spinlock
+* if the recursive spinlock is used as a fall-back
+*
+* @param slr
+*   A pointer to the recursive spinlock.
+*/
+static inline void rte_spinlock_recursive_unlock_tm(
+	rte_spinlock_recursive_t *slr);
+
+/**
+* Try to execute critical section in a hardware memory transaction,
+* if it fails or not available try to take the recursive lock
+*
+* NOTE: An attempt to perform a HW I/O operation inside a hardware memory
+* transaction always aborts the transaction since the CPU is not able to
+* roll-back should the transaction fail. Therefore, hardware transactional
+* locks are not advised to be used around rte_eth_rx_burst() and
+* rte_eth_tx_burst() calls.
+*
+* @param slr
+*   A pointer to the recursive spinlock.
+* @return
+*   1 if the hardware memory transaction is successfully started
+*   or lock is successfully taken; 0 otherwise.
+*/
+static inline int rte_spinlock_recursive_trylock_tm(
+	rte_spinlock_recursive_t *slr);
+
+#endif /* _RTE_SPINLOCK_H_ */
diff --git a/lib/librte_eal/windows/rte_override/rte_vect.h b/lib/librte_eal/windows/rte_override/rte_vect.h
new file mode 100644
index 000000000..f2530147d
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_vect.h
@@ -0,0 +1,5 @@
+#pragma once
+
+#define __ICC	1600
+
+#include "..\..\common\include\arch\x86\rte_vect.h"
diff --git a/lib/librte_eal/windows/rte_override/rte_wincompat.h b/lib/librte_eal/windows/rte_override/rte_wincompat.h
new file mode 100644
index 000000000..2dff9f279
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_wincompat.h
@@ -0,0 +1,347 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+
+#ifndef _RTE_WINCOMPAT_H_
+#define _RTE_WINCOMPAT_H_
+
+#if !defined _M_IX86 && !defined _M_X64
+#error Unsupported architecture
+#endif
+
+#include <stdint.h>
+
+/* Required for definition of read(), write() */
+#include <io.h>
+#include <intrin.h>
+
+/* limits.h replacement */
+#include <stdlib.h>
+#ifndef PATH_MAX
+#define PATH_MAX _MAX_PATH
+#endif
+
+
+#ifndef EDQUOT
+#define EDQUOT 0xFE
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Quick generic implemetation of popcount - all architectures */
+static __forceinline int __popcount(unsigned int x)
+{
+	static const unsigned int m1 = 0x55555555;
+	static const unsigned int m2 = 0x33333333;
+	static const unsigned int m4 = 0x0f0f0f0f;
+	static const unsigned int h01 = 0x01010101;
+
+	x -= (x >> 1) & m1;
+	x = (x & m2) + ((x >> 2) & m2);
+	x = (x + (x >> 4)) & m4;
+	return (x * h01) >> 24;
+}
+
+static __forceinline int __builtin_popcountl(unsigned long x)
+{
+	return __popcount((unsigned int)x);
+}
+
+static __forceinline int __builtin_popcountll(unsigned long long x)
+{
+	static const unsigned long long m1 = 0x5555555555555555LL;
+	static const unsigned long long m2 = 0x3333333333333333LL;
+	static const unsigned long long m4 = 0x0f0f0f0f0f0f0f0fLL;
+	static const unsigned long long h01 = 0x0101010101010101LL;
+
+	x -= (x >> 1) & m1;
+	x = (x & m2) + ((x >> 2) & m2);
+	x = (x + (x >> 4)) & m4;
+	return (x * h01) >> 56;
+}
+
+static __forceinline int __builtin_popcount(unsigned int x)
+{
+	return __popcount(x);
+}
+
+// __builtin_ctz - count of trailing zeroes
+// _BitScanForward returns the bit number of first bit that is 1 starting from the LSB to MSB
+static __forceinline int __builtin_ctz(unsigned int x)
+{
+	unsigned long index = 0;
+
+	if (_BitScanForward(&index, x))
+		return index;
+
+	return 32;
+}
+
+// __builtin_ctzl - count of trailing zeroes for long
+static __forceinline int __builtin_ctzl(unsigned long x)
+{
+	return __builtin_ctz((unsigned int) x);
+}
+
+// __builtin_ctzll - count of trailing zeroes for long long (64 bits)
+static __forceinline int __builtin_ctzll(unsigned long long x)
+{
+	unsigned long index = 0;
+
+	if (_BitScanForward64(&index, x))
+		return (int) index;
+
+	return 64;
+}
+
+
+// __builtin_clz - count of leading zeroes
+// _BitScanReverse returns the bit number of first bit that is 1 starting from the MSB to LSB
+static __forceinline int __builtin_clz(unsigned int x)
+{
+	unsigned long index = 0;
+
+	if (_BitScanReverse(&index, x))
+		return ((sizeof(x) * CHAR_BIT) -1 - index);
+
+	return 32;
+}
+
+// __builtin_clzl - count of leading zeroes for long
+static __forceinline int __builtin_clzl(unsigned long x)
+{
+	return __builtin_clz((unsigned int) x);
+}
+
+// __builtin_clzll - count of leading zeroes for long long (64 bits)
+static __forceinline int __builtin_clzll(unsigned long long x)
+{
+	unsigned long index = 0;
+
+	if (_BitScanReverse64(&index, x))
+		return ((sizeof(x) * CHAR_BIT) - 1 - index);
+
+	return 64;
+}
+
+static __forceinline uint32_t __builtin_bswap32(uint32_t val)
+{
+	return (uint32_t)_byteswap_ulong((unsigned long)val);
+}
+
+static __forceinline uint64_t __builtin_bswap64(uint64_t val)
+{
+	return (uint64_t) _byteswap_uint64((unsigned long long)val);
+}
+
+typedef int useconds_t;
+static void usleep(useconds_t us)
+{
+	LARGE_INTEGER cntr, start, current;
+	useconds_t curr_time;
+
+	QueryPerformanceFrequency(&cntr);
+	QueryPerformanceCounter(&start);
+
+	do {
+		QueryPerformanceCounter(&current);
+
+		// Compute current time.
+		curr_time = ((current.QuadPart - start.QuadPart) / (float)cntr.QuadPart * 1000 * 1000);
+	} while (curr_time < us);
+
+}
+
+static inline int getuid (void)
+{
+	return 0;
+}
+
+#include <string.h>
+static inline char* strtok_r(char *str, const char *delim, char **nextp)
+{
+	char *ret;
+
+	if (str == NULL)
+		str = *nextp;
+
+	str += strspn(str, delim);
+	if (*str == '\0')
+		return NULL;
+
+	ret = str;
+	str += strcspn(str, delim);
+
+	if (*str)
+		*str++ = '\0';
+
+	*nextp = str;
+	return ret;
+}
+
+#define index(a, b)     strchr(a, b)
+#define rindex(a, b)    strrchr(a, b)
+
+#define pipe(i)         _pipe(i, 8192, _O_BINARY)
+
+#define siglongjmp(a, err)  /* NO-OP */
+
+#define strncasecmp(s1,s2,count)        _strnicmp(s1,s2,count)
+
+// Replacement with safe string functions
+#define strcpy(dest,src)                strcpy_s(dest,sizeof(dest),src)
+#define strncpy(dest,src,count)         strncpy_s(dest,sizeof(dest),src,count)
+#define strlcpy(dest,src,count)			strncpy_s(dest,sizeof(dest),src,count)
+#define strerror(errnum)                WinSafeStrError(errnum)
+#define strsep(str,sep)                 WinStrSep(str,sep)
+#define strdup(str)                     _strdup(str)
+#define strcat(dest,src)                strcat_s(dest,sizeof(dest),src)
+#define sscanf(source,pattern, ...)		sscanf_s(source,pattern, __VA_ARGS__)
+
+
+static inline char* WinSafeStrError(int errnum)
+{
+	static char buffer[256];
+
+	ZeroMemory(buffer, sizeof(buffer));
+	strerror_s(buffer, sizeof(buffer), errnum);
+	return buffer;
+}
+
+static inline char* WinStrSep(char** ppString, char* pSeparator)
+{
+	char *pStrStart = NULL;
+
+	if ((ppString != NULL) && (*ppString != NULL) && (**ppString != '\0')) {
+		pStrStart = *ppString;
+		char *pStr = pStrStart + strcspn(pStrStart, pSeparator);
+
+		if (pStr == NULL)
+			*ppString = NULL;
+		else {
+			*pStr = '\0';
+			*ppString = pStr + 1;
+		}
+	}
+
+	return pStrStart;
+}
+
+#define sleep(secs)                 Sleep((secs)*1000)   // Windows Sleep() requires milliseconds
+#define ftruncate(fd,len)			_chsize_s(fd,len)
+
+// CPU set function overrides
+#define CPU_ZERO(cpuset)                {*cpuset = 0;}
+#define CPU_SET(cpucore, cpuset)        { *cpuset |= (1 << cpucore); }
+#define CPU_ISSET(cpucore, cpuset)      ((*cpuset & (1 << cpucore)) ? 1 : 0)
+
+/* Winsock IP protocol Numbers (not available on Windows) */
+#define IPPROTO_NONE	59       /* No next header for IPv6 */
+#define IPPROTO_SCTP	132      /* Stream Control Transmission Protocol */
+
+/* signal definitions - defined in signal.h */
+#define SIGUSR1		30
+#define SIGUSR2		31
+
+/* Definitions for access() */
+#define F_OK	0	/* Check for existence */
+#define W_OK	2	/* Write permission */
+#define R_OK	4	/* Read permission */
+#define X_OK	8	/* DO NOT USE */
+
+#ifndef AF_INET6
+#define AF_INET6	28
+#endif
+
+/* stdlib extensions that aren't defined in windows */
+int setenv(const char *name, const char *value, int overwrite);
+
+// Returns a handle to an mutex object that is created only once
+static inline HANDLE OpenMutexHandleAsync(INIT_ONCE *g_InitOnce)
+{
+	PVOID  lpContext;
+	BOOL   fStatus;
+	BOOL   fPending;
+	HANDLE hMutex;
+
+	// Begin one-time initialization
+	fStatus = InitOnceBeginInitialize(g_InitOnce,       // Pointer to one-time initialization structure
+		INIT_ONCE_ASYNC,   // Asynchronous one-time initialization
+		&fPending,         // Receives initialization status
+		&lpContext);       // Receives pointer to data in g_InitOnce
+
+						   // InitOnceBeginInitialize function failed.
+	if (!fStatus)
+	{
+		return (INVALID_HANDLE_VALUE);
+	}
+
+	// Initialization has already completed and lpContext contains mutex object.
+	if (!fPending)
+	{
+		return (HANDLE)lpContext;
+	}
+
+	// Create Mutex object for one-time initialization.
+	hMutex = CreateMutex(NULL,    // Default security descriptor
+		FALSE,    // Manual-reset mutex object
+		NULL);   // Object is unnamed
+
+				 // mutex object creation failed.
+	if (NULL == hMutex)
+	{
+		return (INVALID_HANDLE_VALUE);
+	}
+
+	// Complete one-time initialization.
+	fStatus = InitOnceComplete(g_InitOnce,             // Pointer to one-time initialization structure
+		INIT_ONCE_ASYNC,         // Asynchronous initialization
+		(PVOID)hMutex);          // Pointer to mutex object to be stored in g_InitOnce
+
+								 // InitOnceComplete function succeeded. Return mutex object.
+	if (fStatus)
+	{
+		return hMutex;
+	}
+
+	// Initialization has already completed. Free the local mutex.
+	CloseHandle(hMutex);
+
+
+	// Retrieve the final context data.
+	fStatus = InitOnceBeginInitialize(g_InitOnce,            // Pointer to one-time initialization structure
+		INIT_ONCE_CHECK_ONLY,   // Check whether initialization is complete
+		&fPending,              // Receives initialization status
+		&lpContext);            // Receives pointer to mutex object in g_InitOnce
+
+								// Initialization is complete. Return mutex.
+	if (fStatus && !fPending)
+	{
+		return (HANDLE)lpContext;
+	}
+	else
+	{
+		return INVALID_HANDLE_VALUE;
+	}
+}
+
+/*
+ * Used to statically create and lock a mutex
+*/
+static inline HANDLE WinCreateAndLockStaticMutex(HANDLE mutex, INIT_ONCE *g_InitOnce) {
+	mutex = OpenMutexHandleAsync(g_InitOnce);
+	WaitForSingleObject(mutex, INFINITE);
+	return mutex;
+}
+
+//#include <rte_gcc_builtins.h>
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/librte_eal/windows/rte_override/rte_windows.h b/lib/librte_eal/windows/rte_override/rte_windows.h
new file mode 100644
index 000000000..8e7f5299a
--- /dev/null
+++ b/lib/librte_eal/windows/rte_override/rte_windows.h
@@ -0,0 +1,497 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+* Copyright(c) 2017-2018 Intel Corporation
+*/
+
+
+#pragma once
+
+#ifndef _RTE_WINDOWS_H_
+#define _RTE_WINDOWS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef _MSC_VER
+#error
+#endif
+
+#ifndef _WINDOWS
+#define _WINDOWS
+#endif
+
+// If we define WIN32_LEAN_AND_MEAN, winsock isn't included by default. We can then include it in specific header files as we need later.
+#define WIN32_LEAN_AND_MEAN
+
+#include <windows.h>
+
+// This isn't a complete replacement for typeof in GCC. For example, it doesn't work in cases where you have typeof(x) _val = 0. However,
+// it does allow us to remove some of the windows specific changes that need to be put into a lot of files to allow compilation.
+#define typeof(x)	_Generic((x),			\
+	void *			: void *,		\
+	char			: char,			\
+	unsigned char		: unsigned char		\
+	char *			: char *,		\
+	unsigned char *		: unsigned char *,	\
+	short			: short,		\
+	unsigned short		: unsigned short,	\
+	short *			: short *,		\
+	unsigned short *	: unsigned short *,	\
+	int			: int,			\
+	int *			: int *,		\
+	unsigned int		: unsigned int		\
+	unsigned int *		: unsigned int *,	\
+	long 			: long,			\
+	long *			: long *,		\
+	unsigned long		: unsigned long,	\
+	unsigned long *		: unsigned long *,	\
+	long long		: long long,		\
+	unsigned long long	: unsigned long long,	\
+	unsigned long long *	: unsigned long long *, \
+	default			: void			\
+)
+
+/*
+* Globally driven over-rides.
+*/
+#define __attribute__(x)
+
+#define __func__ __FUNCTION__
+
+#define NORETURN __declspec(noreturn)
+#define ATTR_UNUSED
+#define __AVX__    1
+
+#define E_RTE_NO_TAILQ	(-1)
+
+/* Include this header here, so that we can re-define EAL_REGISTER_TAILQ */
+#include <rte_tailq.h>
+#ifdef EAL_REGISTER_TAILQ(t)
+#undef EAL_REGISTER_TAILQ(t)
+#endif
+
+/*
+* Definition for registering TAILQs
+* (This is a workaround for Windows in lieu of a constructor-like function)
+*/
+#define EAL_REGISTER_TAILQ(t) \
+void init_##t(void); \
+void init_##t(void) \
+{ \
+	if (rte_eal_tailq_register(&t) < 0) \
+		rte_panic("Cannot initialize tailq: %s\n", t.name); \
+}
+
+/* Include this header here, so that we can re-define RTE_REGISTER_BUS */
+#include <rte_bus.h>
+#ifdef RTE_REGISTER_BUS(nm, bus)
+#undef RTE_REGISTER_BUS(nm, bus)
+#endif
+
+/*
+* Definition for registering a bus
+* (This is a workaround for Windows in lieu of a constructor-like function)
+*/
+#define RTE_REGISTER_BUS(nm, bus) \
+void businitfn_ ##nm(void) \
+{\
+	(bus).name = RTE_STR(nm);\
+	rte_bus_register(&bus); \
+}
+
+
+/*
+* Global warnings control. Disable this to see warnings in the
+* include/rte_override files
+*/
+#define DPDKWIN_NO_WARNINGS
+
+#ifdef DPDKWIN_NO_WARNINGS
+#pragma warning (disable : 94)	/* warning #94: the size of an array must be greater than zero */
+#pragma warning (disable : 169)	/* warning #169: expected a declaration */
+#endif
+
+
+/*
+* These definitions are to force a specific version of the defined function.
+* For Windows, we'll always stick with the latest defined version.
+*/
+#define rte_lpm_create			rte_lpm_create_v1604
+#define rte_lpm_add			rte_lpm_add_v1604
+#define rte_lpm6_add			rte_lpm6_add_v1705
+#define rte_lpm6_lookup			rte_lpm6_lookup_v1705
+#define rte_lpm6_lookup_bulk_func	rte_lpm6_lookup_bulk_func_v1705
+#define rte_lpm6_is_rule_present	rte_lpm6_is_rule_present_v1705
+#define rte_lpm_find_existing		rte_lpm_find_existing_v1604
+
+#define rte_distributor_request_pkt	rte_distributor_request_pkt_v1705
+#define rte_distributor_poll_pkt	rte_distributor_poll_pkt_v1705
+#define rte_distributor_get_pkt		rte_distributor_get_pkt_v1705
+#define rte_distributor_return_pkt	rte_distributor_return_pkt_v1705
+#define rte_distributor_returned_pkts	rte_distributor_returned_pkts_v1705
+#define rte_distributor_clear_returns	rte_distributor_clear_returns_v1705
+#define rte_distributor_process		rte_distributor_process_v1705
+#define rte_distributor_flush		rte_distributor_flush_v1705
+#define rte_distributor_create		rte_distributor_create_v1705
+
+/*
+* Definitions and overrides for ethernet.h
+*/
+#define u_char uint8_t
+#define u_short uint16_t
+
+#define __packed
+
+#define __BEGIN_DECLS
+#define __END_DECLS
+
+/*
+* sys/_cdefs.h
+*/
+#define __extension__
+
+/*
+* sys/_iovec.h
+*/
+#define ssize_t size_t
+#define SSIZE_T_DECLARED
+#define _SSIZE_T_DECLARED
+#define _SIZE_T_DECLARED
+
+/*
+* Linux to BSD termios differences
+*/
+#define TCSANOW 0
+
+/* Support X86 architecture */
+#define RTE_ARCH_X86
+
+/*
+* We can safely remove __attribute__((__packed__)). We will replace it with all structures
+* being packed
+*/
+#pragma pack(1)
+
+#include <rte_wincompat.h>
+
+/* Include rte_common.h first to get this out of the way controlled */
+#include "./rte_common.h"
+
+
+#include <sys/types.h>
+/* rte_pci.h must be included before we define typeof() to be nothing */
+//#include "./rte_pci.h"
+
+
+#define __attribute__(x)
+
+#define RTE_FORCE_INTRINSICS
+
+#include "rte_config.h"
+
+#define RTE_CACHE_ALIGN		__declspec(align(RTE_CACHE_LINE_SIZE))
+#define RTE_CACHE_MIN_ALIGN	__declspec(align(RTE_CACHE_LINE_MIN_SIZE))
+
+/* The windows port does not currently support dymamic loading of libraries, so fail these calls */
+#define dlopen(lib, flag)   (0)
+#define dlerror()           ("Not supported!")
+
+/* Include time.h for struct timespec */
+#include <time.h>
+#ifndef _TIMESPEC_DEFINED
+#define _TIMESPEC_DEFINED
+#endif
+
+typedef jmp_buf sigjmp_buf;
+#define sigsetjmp(env, savemask) _setjmp((env))
+
+/* function prototypes for those used exclusively by Windows */
+void eal_create_cpu_map();
+
+#define uint uint32_t
+
+#define RTE_APP_TEST_RESOURCE_TAR 1
+
+#if 0
+/* rte_config.h defines all the libraries that we have to include. For all the libraries that are enabled,
+generate a comment that will include it */
+#ifdef _RTE_WIN_DPDK_APP
+
+#ifdef RTE_LIBRTE_EAL
+#pragma comment (lib, "librte_eal.lib")
+#endif
+#ifdef RTE_LIBRTE_PCI
+#pragma comment (lib, "librte_pci.lib")
+#endif
+#ifdef RTE_LIBRTE_ETHDEV
+#pragma comment (lib, "librte_ethdev.lib")
+#endif
+#ifdef RTE_LIBRTE_KVARGS
+#pragma comment (lib, "librte_kvargs.lib")
+#endif
+#ifdef RTE_LIBRTE_IEEE1588
+#pragma comment (lib, "librte_ieee1588.lib")
+#endif
+#ifdef RTE_LIBRTE_PCI_BUS
+#pragma comment (lib, "librte_bus_pci.lib")
+#endif
+#ifdef RTE_LIBRTE_EM_PMD || RTE_LIBRTE_IGB_PMD
+#pragma comment (lib, "librte_pmd_e1000.lib")
+#endif
+#ifdef RTE_LIBRTE_IXGBE_PMD
+#pragma comment (lib, "librte_pmd_ixgbe.lib")
+#endif
+#ifdef RTE_LIBRTE_I40E_PMD
+#pragma comment (lib, "librte_pmd_i40e.lib")
+#endif
+#ifdef RTE_LIBRTE_FM10K_PMD
+#pragma comment (lib, "librte_pmd_fm10k.lib")
+#endif
+#ifdef RTE_LIBRTE_MLX4_PMD
+#pragma comment (lib, "librte_pmd_mlx4.lib")
+#endif
+#ifdef RTE_LIBRTE_MLX5_PMD
+#pragma comment (lib, "librte_pmd_mlx5.lib")
+#endif
+#ifdef RTE_LIBRTE_BNX2X_PMD
+#pragma comment (lib, "librte_pmd_bnx2x.lib")
+#endif
+#ifdef RTE_LIBRTE_CXGBE_PMD
+#pragma comment (lib, "librte_pmd_cxgbe.lib")
+#endif
+#ifdef RTE_LIBRTE_ENIC_PMD
+#pragma comment (lib, "librte_pmd_enic.lib")
+#endif
+#ifdef RTE_LIBRTE_NFP_PMD
+#pragma comment (lib, "librte_pmd_nfp.lib")
+#endif
+#ifdef RTE_LIBRTE_SFC_EFX_PMD
+#pragma comment (lib, "librte_pmd_sfc_efx.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_SOFTNIC
+//#pragma comment (lib, "librte_pmd_softnic.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_SZEDATA2
+#pragma comment (lib, "librte_pmd_szedata2.lib")
+#endif
+#ifdef RTE_LIBRTE_THUNDERX_NICVF_PMD
+#pragma comment (lib, "librte_pmd_thunderx_nicvf.lib")
+#endif
+#ifdef RTE_LIBRTE_LIO_PMD
+#pragma comment (lib, "librte_pmd_lio.lib")
+#endif
+#ifdef RTE_LIBRTE_DPAA_BUS
+#pragma comment (lib, "librte_dpaa_bus.lib")
+#endif
+#ifdef RTE_LIBRTE_DPAA_PMD
+#pragma comment (lib, "librte_pmd_dpaa.lib")
+#endif
+#ifdef RTE_LIBRTE_OCTEONTX_PMD
+#pragma comment (lib, "librte_pmd_octeontx.lib")
+#endif
+#ifdef RTE_LIBRTE_FSLMC_BUS
+#pragma comment (lib, "librte_fslmc_bus.lib")
+#endif
+#ifdef RTE_LIBRTE_DPAA2_PMD
+#pragma comment (lib, "librte_pmd_dpaa2.lib")
+#endif
+#ifdef RTE_LIBRTE_VIRTIO_PMD
+#pragma comment (lib, "librte_pmd_virtio.lib")
+#endif
+#ifdef RTE_VIRTIO_USER
+#pragma comment (lib, "librte_virtio_user.lib")
+#endif
+#ifdef RTE_LIBRTE_VMXNET3_PMD
+#pragma comment (lib, "librte_pmd_vmxnet3.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_RING
+//#pragma comment (lib, "librte_pmd_ring.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_BOND
+#pragma comment (lib, "librte_pmd_bond.lib")
+#endif
+#ifdef RTE_LIBRTE_QEDE_PMD
+#pragma comment (lib, "librte_pmd_qede.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_AF_PACKET
+#pragma comment (lib, "librte_pmd_af_packet.lib")
+#endif
+#ifdef RTE_LIBRTE_ARK_PMD
+#pragma comment (lib, "librte_pmd_ark.lib")
+#endif
+#ifdef RTE_LIBRTE_AVP_PMD
+#pragma comment (lib, "librte_pmd_avp.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_TAP
+#pragma comment (lib, "librte_pmd_tap.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_NULL
+#pragma comment (lib, "librte_pmd_null.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_FAILSAFE
+#pragma comment (lib, "librte_pmd_failsafe.lib")
+#endif
+#ifdef RTE_LIBRTE_CRYPTODEV
+#pragma comment (lib, "librte_cryptodev.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_ARMV8_CRYPTO
+#pragma comment (lib, "librte_pmd_armv8_crypto.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_DPAA2_SEC
+#pragma comment (lib, "librte_pmd_dpaa2_sec.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_QAT
+#pragma comment (lib, "librte_pmd_qat.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_AESNI_MB
+#pragma comment (lib, "librte_pmd_aesni_mb.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_OPENSSL
+#pragma comment (lib, "librte_pmd_openssl.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_AESNI_GCM
+#pragma comment (lib, "librte_pmd_aesni_gcm.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_SNOW3G
+#pragma comment (lib, "librte_pmd_snow3g.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_KASUMI
+#pragma comment (lib, "librte_pmd_kasumi.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_ZUC
+#pragma comment (lib, "librte_pmd_zuc.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+#pragma comment (lib, "librte_pmd_crypto_scheduler.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_NULL_CRYPTO
+#pragma comment (lib, "librte_pmd_null_crypto.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_MRVL_CRYPTO
+#pragma comment (lib, "librte_pmd_mrvl_crypto.lib")
+#endif
+#ifdef RTE_LIBRTE_SECURITY
+#pragma comment (lib, "librte_security.lib")
+#endif
+#ifdef RTE_LIBRTE_EVENTDEV
+#pragma comment (lib, "librte_eventdev.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_SKELETON_EVENTDEV
+//#pragma comment (lib, "librte_pmd_skeleton_eventdev.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_SW_EVENTDEV
+//#pragma comment (lib, "librte_pmd_sw_eventdev.lib")
+#endif
+#ifdef RTE_LIBRTE_RING
+#pragma comment (lib, "librte_ring.lib")
+#endif
+#ifdef RTE_LIBRTE_MEMPOOL
+#pragma comment (lib, "librte_mempool.lib")
+#pragma comment (lib, "librte_mempool_ring.lib")
+#endif
+#ifdef RTE_LIBRTE_MBUF
+#pragma comment (lib, "librte_mbuf.lib")
+#endif
+#ifdef RTE_LIBRTE_TIMER
+#pragma comment (lib, "librte_timer.lib")
+#endif
+#ifdef RTE_LIBRTE_CFGFILE
+#pragma comment (lib, "librte_cfgfile.lib")
+#endif
+#ifdef RTE_LIBRTE_CMDLINE
+#pragma comment (lib, "librte_cmdline.lib")
+#endif
+#ifdef RTE_LIBRTE_HASH
+#pragma comment (lib, "librte_hash.lib")
+#endif
+#ifdef RTE_LIBRTE_EFD
+#pragma comment (lib, "librte_efd.lib")
+#endif
+#ifdef RTE_LIBRTE_EFD
+#pragma comment (lib, "librte_efd.lib")
+#endif
+#ifdef RTE_LIBRTE_MEMBER
+#pragma comment (lib, "librte_member.lib")
+#endif
+#ifdef RTE_LIBRTE_JOBSTATS
+//#pragma comment (lib, "librte_jobstats.lib")
+#endif
+#ifdef RTE_LIBRTE_METRICS
+#pragma comment (lib, "librte_metrics.lib")
+#endif
+#ifdef RTE_LIBRTE_BITRATE
+#pragma comment (lib, "librte_bitratestats.lib")
+#endif
+#ifdef RTE_LIBRTE_LATENCY_STATS
+#pragma comment (lib, "librte_latencystats.lib")
+#endif
+#ifdef RTE_LIBRTE_LPM
+#pragma comment (lib, "librte_lpm.lib")
+#endif
+#ifdef RTE_LIBRTE_ACL
+#pragma comment (lib, "librte_acl.lib")
+#endif
+#ifdef RTE_LIBRTE_POWER
+#pragma comment (lib, "librte_power.lib")
+#endif
+#ifdef RTE_LIBRTE_NET
+#pragma comment (lib, "librte_net.lib")
+#endif
+#ifdef RTE_LIBRTE_IP_FRAG
+#pragma comment (lib, "librte_ipfrag.lib")
+#endif
+#ifdef RTE_LIBRTE_GRO
+#pragma comment (lib, "librte_gro.lib")
+#endif
+#ifdef RTE_LIBRTE_GSO
+#pragma comment (lib, "librte_gso.lib")
+#endif
+#ifdef RTE_LIBRTE_METER
+#pragma comment (lib, "librte_meter.lib")
+#endif
+#ifdef RTE_LIBRTE_FLOW_CLASSIFY
+#pragma comment (lib, "librte_flowclassify.lib")
+#endif
+#ifdef RTE_LIBRTE_SCHED
+#pragma comment (lib, "librte_sched.lib")
+#endif
+#ifdef RTE_LIBRTE_DISTRIBUTOR
+#pragma comment (lib, "librte_distributor.lib")
+#endif
+#ifdef RTE_LIBRTE_REORDER
+#pragma comment (lib, "librte_reorder.lib")
+#endif
+#ifdef RTE_LIBRTE_PORT
+#pragma comment (lib, "librte_port.lib")
+#endif
+#ifdef RTE_LIBRTE_TABLE
+#pragma comment (lib, "librte_table.lib")
+#endif
+#ifdef RTE_LIBRTE_PIPELINE
+#pragma comment (lib, "librte_pipeline.lib")
+#endif
+#ifdef RTE_LIBRTE_KNI
+#pragma comment (lib, "librte_kni.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_KNI
+#pragma comment (lib, "librte_pmd_kni.lib")
+#endif
+#ifdef RTE_LIBRTE_PDUMP
+#pragma comment (lib, "librte_pdump.lib")
+#endif
+#ifdef RTE_LIBRTE_VHOST
+#pragma comment (lib, "librte_vhost.lib")
+#endif
+#ifdef RTE_LIBRTE_PMD_VHOST
+#pragma comment (lib, "librte_pmd_vhost.lib")
+#endif
+
+
+#endif /* _RTE_WIN_DPDK_APP */
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_WINDOWS_H_ */
diff --git a/mk/exec-env/windows/DpdkRteLib.props b/mk/exec-env/windows/DpdkRteLib.props
new file mode 100644
index 000000000..076253937
--- /dev/null
+++ b/mk/exec-env/windows/DpdkRteLib.props
@@ -0,0 +1,46 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ImportGroup Label="PropertySheets" />
+  <PropertyGroup Label="UserMacros">
+    <RTE_SDK>$(SolutionDir)..\..\..</RTE_SDK>
+  </PropertyGroup>
+  <PropertyGroup>
+    <OutDir>$(RTE_SDK)\$(Platform)\$(Configuration)\$(MSBuildProjectName)\</OutDir>
+    <IntDir>$(RTE_SDK)\mk\exec-env\windows\$(Platform)\$(Configuration)\$(MSBuildProjectName)\</IntDir>
+  </PropertyGroup>
+  <PropertyGroup Label="Globals">
+    <WindowsTargetPlatformVersion>10.0.15063.0</WindowsTargetPlatformVersion>
+  </PropertyGroup>
+  <ItemDefinitionGroup>
+    <ClCompile>
+      <PrecompiledHeader>NotUsing</PrecompiledHeader>
+    </ClCompile>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup>
+    <ClCompile>
+      <PrecompiledHeaderFile />
+    </ClCompile>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup>
+    <ClCompile>
+      <PrecompiledHeaderOutputFile />
+      <AdditionalIncludeDirectories>$(RTE_SDK)\lib\librte_eal\windows\include_override;$(RTE_SDK)\lib\librte_eal\windows\rte_override;$(RTE_SDK)\lib\librte_eal\common;$(RTE_SDK)\lib\librte_eal\common\include;$(RTE_SDK)\lib\librte_acl;$(RTE_SDK)\lib\librte_cmdline;$(RTE_SDK)\lib\librte_distributor;$(RTE_SDK)\lib\librte_ethdev;$(RTE_SDK)\lib\librte_hash;$(RTE_SDK)\lib\librte_ip_frag;$(RTE_SDK)\lib\librte_kvargs;$(RTE_SDK)\lib\librte_lpm;$(RTE_SDK)\lib\librte_malloc;$(RTE_SDK)\lib\librte_mempool;$(RTE_SDK)\lib\librte_mbuf;$(RTE_SDK)\lib\librte_meter;$(RTE_SDK)\lib\librte_net;$(RTE_SDK)\lib\librte_pipeline;$(RTE_SDK)\lib\librte_port;$(RTE_SDK)\lib\librte_reorder;$(RTE_SDK)\lib\librte_ring;$(RTE_SDK)\lib\librte_sched;$(RTE_SDK)\lib\librte_table;$(RTE_SDK)\lib\librte_timer;$(RTE_SDK)\lib\librte_vhost;$(RTE_SDK)\lib\librte_compat;$(RTE_SDK)\drivers\bus\pci;$(RTE_SDK)\lib\librte_security;$(RTE_SDK)\lib\librte_bitratestats;$(RTE_SDK)\lib\librte_metrics;$(RTE_SDK)\lib\librte_efd;$(RTE_SDK)\lib\librte_cryptodev;$(RTE_SDK)\lib\librte_flow_classify</AdditionalIncludeDirectories>
+      <ForcedIncludeFiles>$(RTE_SDK)\lib\librte_eal\windows\rte_override\rte_windows.h</ForcedIncludeFiles>
+      <CLanguageStandard>gnu11</CLanguageStandard>
+      <PrecompiledHeaderCompileAs>
+      </PrecompiledHeaderCompileAs>
+      <PrecompiledHeaderOutputFileDirectory />
+      <CompileAs>Default</CompileAs>
+      <C99Support>
+      </C99Support>
+      <StructMemberAlignment>1Byte</StructMemberAlignment>
+      <AdditionalOptions>/Qstd=c11 %(AdditionalOptions)</AdditionalOptions>
+      <WarningLevel>Level3</WarningLevel>
+    </ClCompile>
+  </ItemDefinitionGroup>
+  <ItemGroup>
+    <BuildMacro Include="RTE_SDK">
+      <Value>$(RTE_SDK)</Value>
+    </BuildMacro>
+  </ItemGroup>
+</Project>
\ No newline at end of file
diff --git a/mk/exec-env/windows/dpdk.sln b/mk/exec-env/windows/dpdk.sln
new file mode 100644
index 000000000..8cbaa9e93
--- /dev/null
+++ b/mk/exec-env/windows/dpdk.sln
@@ -0,0 +1,43 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio 15
+VisualStudioVersion = 15.0.27130.2010
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "librte_eal", "librte_eal\librte_eal.vcxproj", "{7380DC42-DE9A-4BA3-B153-FC0156DA20B7}"
+	ProjectSection(ProjectDependencies) = postProject
+		{F74A831C-CD22-4D19-BE6F-A318D0376EFA} = {F74A831C-CD22-4D19-BE6F-A318D0376EFA}
+	EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "librte_kvargs", "librte_kvargs\librte_kvargs.vcxproj", "{F74A831C-CD22-4D19-BE6F-A318D0376EFA}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "helloworld", "helloworld\helloworld.vcxproj", "{40B2A34F-A9EC-4420-8BD1-652883AA39E5}"
+	ProjectSection(ProjectDependencies) = postProject
+		{7380DC42-DE9A-4BA3-B153-FC0156DA20B7} = {7380DC42-DE9A-4BA3-B153-FC0156DA20B7}
+	EndProjectSection
+EndProject
+Global
+	GlobalSection(SolutionConfigurationPlatforms) = preSolution
+		Debug|x64 = Debug|x64
+		Release|x64 = Release|x64
+	EndGlobalSection
+	GlobalSection(ProjectConfigurationPlatforms) = postSolution
+		{7380DC42-DE9A-4BA3-B153-FC0156DA20B7}.Debug|x64.ActiveCfg = Debug|x64
+		{7380DC42-DE9A-4BA3-B153-FC0156DA20B7}.Debug|x64.Build.0 = Debug|x64
+		{7380DC42-DE9A-4BA3-B153-FC0156DA20B7}.Release|x64.ActiveCfg = Release|x64
+		{7380DC42-DE9A-4BA3-B153-FC0156DA20B7}.Release|x64.Build.0 = Release|x64
+		{F74A831C-CD22-4D19-BE6F-A318D0376EFA}.Debug|x64.ActiveCfg = Debug|x64
+		{F74A831C-CD22-4D19-BE6F-A318D0376EFA}.Debug|x64.Build.0 = Debug|x64
+		{F74A831C-CD22-4D19-BE6F-A318D0376EFA}.Release|x64.ActiveCfg = Release|x64
+		{F74A831C-CD22-4D19-BE6F-A318D0376EFA}.Release|x64.Build.0 = Release|x64
+		{40B2A34F-A9EC-4420-8BD1-652883AA39E5}.Debug|x64.ActiveCfg = Debug|x64
+		{40B2A34F-A9EC-4420-8BD1-652883AA39E5}.Debug|x64.Build.0 = Debug|x64
+		{40B2A34F-A9EC-4420-8BD1-652883AA39E5}.Release|x64.ActiveCfg = Release|x64
+		{40B2A34F-A9EC-4420-8BD1-652883AA39E5}.Release|x64.Build.0 = Release|x64
+	EndGlobalSection
+	GlobalSection(SolutionProperties) = preSolution
+		HideSolutionNode = FALSE
+	EndGlobalSection
+	GlobalSection(ExtensibilityGlobals) = postSolution
+		SolutionGuid = {6CB597CF-1AD9-4A06-9C23-26B0EAEA3E63}
+	EndGlobalSection
+EndGlobal
diff --git a/mk/exec-env/windows/helloworld/helloworld.vcxproj b/mk/exec-env/windows/helloworld/helloworld.vcxproj
new file mode 100644
index 000000000..108d4b05b
--- /dev/null
+++ b/mk/exec-env/windows/helloworld/helloworld.vcxproj
@@ -0,0 +1,98 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup Label="ProjectConfigurations">
+    <ProjectConfiguration Include="Debug|x64">
+      <Configuration>Debug</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|x64">
+      <Configuration>Release</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+  </ItemGroup>
+  <ItemGroup>
+    <ClCompile Include="..\..\..\..\examples\helloworld\main.c" />
+  </ItemGroup>
+  <PropertyGroup Label="Globals">
+    <VCProjectVersion>15.0</VCProjectVersion>
+    <ProjectGuid>{40B2A34F-A9EC-4420-8BD1-652883AA39E5}</ProjectGuid>
+    <Keyword>Win32Proj</Keyword>
+    <RootNamespace>helloworld</RootNamespace>
+    <WindowsTargetPlatformVersion>10.0.17134.0</WindowsTargetPlatformVersion>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <UseDebugLibraries>true</UseDebugLibraries>
+    <PlatformToolset>Intel C++ Compiler 18.0</PlatformToolset>
+    <CharacterSet>Unicode</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <UseDebugLibraries>false</UseDebugLibraries>
+    <PlatformToolset>Intel C++ Compiler 18.0</PlatformToolset>
+    <WholeProgramOptimization>true</WholeProgramOptimization>
+    <CharacterSet>Unicode</CharacterSet>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+  <ImportGroup Label="ExtensionSettings">
+  </ImportGroup>
+  <ImportGroup Label="Shared">
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="..\DpdkRteLib.props" />
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="..\DpdkRteLib.props" />
+  </ImportGroup>
+  <PropertyGroup Label="UserMacros" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <LinkIncremental>true</LinkIncremental>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <LinkIncremental>false</LinkIncremental>
+  </PropertyGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <ClCompile>
+      <PrecompiledHeader>NotUsing</PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <Optimization>Disabled</Optimization>
+      <SDLCheck>true</SDLCheck>
+      <PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <ConformanceMode>true</ConformanceMode>
+      <PrecompiledHeaderFile>
+      </PrecompiledHeaderFile>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <AdditionalDependencies>setupapi.lib;dbghelp.lib;$(RTE_SDK)\$(Platform)\$(Configuration)\librte_eal\librte_eal.lib;$(RTE_SDK)\$(Platform)\$(Configuration)\librte_kvargs\librte_kvargs.lib;%(AdditionalDependencies)</AdditionalDependencies>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <ClCompile>
+      <PrecompiledHeader>NotUsing</PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <Optimization>MaxSpeed</Optimization>
+      <FunctionLevelLinking>true</FunctionLevelLinking>
+      <IntrinsicFunctions>true</IntrinsicFunctions>
+      <SDLCheck>true</SDLCheck>
+      <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <ConformanceMode>true</ConformanceMode>
+      <PrecompiledHeaderFile>
+      </PrecompiledHeaderFile>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <OptimizeReferences>true</OptimizeReferences>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <AdditionalDependencies>setupapi.lib;dbghelp.lib;$(RTE_SDK)\$(Platform)\$(Configuration)\librte_eal\librte_eal.lib;$(RTE_SDK)\$(Platform)\$(Configuration)\librte_kvargs\librte_kvargs.lib;%(AdditionalDependencies)</AdditionalDependencies>
+    </Link>
+  </ItemDefinitionGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+  <ImportGroup Label="ExtensionTargets">
+  </ImportGroup>
+</Project>
\ No newline at end of file
diff --git a/mk/exec-env/windows/helloworld/helloworld.vcxproj.filters b/mk/exec-env/windows/helloworld/helloworld.vcxproj.filters
new file mode 100644
index 000000000..cf332900f
--- /dev/null
+++ b/mk/exec-env/windows/helloworld/helloworld.vcxproj.filters
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup>
+    <Filter Include="Source Files">
+      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+    </Filter>
+    <Filter Include="Header Files">
+      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+      <Extensions>h;hh;hpp;hxx;hm;inl;inc;ipp;xsd</Extensions>
+    </Filter>
+    <Filter Include="Resource Files">
+      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
+      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
+    </Filter>
+  </ItemGroup>
+  <ItemGroup>
+    <ClCompile Include="..\..\..\..\examples\helloworld\main.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+  </ItemGroup>
+</Project>
\ No newline at end of file
diff --git a/mk/exec-env/windows/helloworld/helloworld.vcxproj.user b/mk/exec-env/windows/helloworld/helloworld.vcxproj.user
new file mode 100644
index 000000000..be2507870
--- /dev/null
+++ b/mk/exec-env/windows/helloworld/helloworld.vcxproj.user
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup />
+</Project>
\ No newline at end of file
diff --git a/mk/exec-env/windows/librte_eal/librte_eal.vcxproj b/mk/exec-env/windows/librte_eal/librte_eal.vcxproj
new file mode 100644
index 000000000..5b456d351
--- /dev/null
+++ b/mk/exec-env/windows/librte_eal/librte_eal.vcxproj
@@ -0,0 +1,187 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup Label="ProjectConfigurations">
+    <ProjectConfiguration Include="Debug|x64">
+      <Configuration>Debug</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|x64">
+      <Configuration>Release</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+  </ItemGroup>
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>{7380DC42-DE9A-4BA3-B153-FC0156DA20B7}</ProjectGuid>
+    <Keyword>Win32Proj</Keyword>
+    <RootNamespace>librte_eal</RootNamespace>
+    <WindowsTargetPlatformVersion>10.0.17134.0</WindowsTargetPlatformVersion>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+    <ConfigurationType>StaticLibrary</ConfigurationType>
+    <UseDebugLibraries>true</UseDebugLibraries>
+    <PlatformToolset>Intel C++ Compiler 18.0</PlatformToolset>
+    <CharacterSet>Unicode</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+    <ConfigurationType>StaticLibrary</ConfigurationType>
+    <UseDebugLibraries>false</UseDebugLibraries>
+    <PlatformToolset>Intel C++ Compiler 18.0</PlatformToolset>
+    <WholeProgramOptimization>true</WholeProgramOptimization>
+    <CharacterSet>Unicode</CharacterSet>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+  <ImportGroup Label="ExtensionSettings">
+  </ImportGroup>
+  <ImportGroup Label="Shared">
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <Import Project="..\DpdkRteLib.props" />
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <Import Project="..\DpdkRteLib.props" />
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <PropertyGroup Label="UserMacros" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <OutDir>$(RTE_SDK)\$(Platform)\$(Configuration)\$(MSBuildProjectName)\</OutDir>
+    <IntDir>$(RTE_SDK)\mk\exec-env\windows\$(Platform)\$(Configuration)\$(MSBuildProjectName)\</IntDir>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <OutDir>$(RTE_SDK)\$(Platform)\$(Configuration)\$(MSBuildProjectName)\</OutDir>
+    <IntDir>$(RTE_SDK)\mk\exec-env\windows\$(Platform)\$(Configuration)\$(MSBuildProjectName)\</IntDir>
+  </PropertyGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <ClCompile>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>C:\winddk\MSVS2015_SDK_WDK_Windows10_14393\Program Files\Microsoft Visual Studio 14.0\VC\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <UndefinePreprocessorDefinitions>__ICL</UndefinePreprocessorDefinitions>
+      <StructMemberAlignment>1Byte</StructMemberAlignment>
+      <CCppSupport>C99Support</CCppSupport>
+    </ClCompile>
+    <Link>
+      <SubSystem>Windows</SubSystem>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <ClCompile>
+      <WarningLevel>Level3</WarningLevel>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <Optimization>MaxSpeed</Optimization>
+      <FunctionLevelLinking>true</FunctionLevelLinking>
+      <IntrinsicFunctions>true</IntrinsicFunctions>
+      <PreprocessorDefinitions>NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>C:\winddk\MSVS2015_SDK_WDK_Windows10_14393\Program Files\Microsoft Visual Studio 14.0\VC\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+      <StructMemberAlignment>1Byte</StructMemberAlignment>
+      <CCppSupport>C99Support</CCppSupport>
+    </ClCompile>
+    <Link>
+      <SubSystem>Windows</SubSystem>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <OptimizeReferences>true</OptimizeReferences>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemGroup>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_bus.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_class.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_cpuflags.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_dev.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_devargs.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_errno.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_hexdump.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_hypervisor.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_launch.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_lcore.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_memalloc.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_memory.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_memzone.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_options.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_string_fns.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_tailqs.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_thread.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_timer.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\malloc_elem.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\rte_keepalive.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\rte_malloc.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\rte_reciprocal.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_alarm.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_debug.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_fbarray.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_hugepage_info.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_interrupts.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_lcore.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_log.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_memalloc.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_memory.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_thread.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_timer.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\fork.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\getopt.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\lrand48.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\mman.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\setenv.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\srand48.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\termios.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\unistd.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\_rand48.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\malloc_heap.c" />
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\malloc_mp.c" />
+  </ItemGroup>
+  <ItemGroup>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\eal_internal_cfg.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\eal_memalloc.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\eal_options.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\eal_private.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\eal_thread.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\generic\rte_cycles.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\generic\rte_rwlock.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_bus.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_common.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_debug.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_dev.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_eal.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_eal_memconfig.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_interrupts.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_launch.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_lcore.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_log.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_malloc_heap.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_memory.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_memzone.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_pci.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_pci_dev_feature_defs.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_per_lcore.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_random.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_string_fns.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_tailq.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_version.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\malloc_elem.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\malloc_heap.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\malloc_mp.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\eal\eal_filesystem.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\eal\eal_pci_private.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\include_override\rand48.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\include_override\unistd.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_atomic.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_common.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_cycles.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_debug.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_lcore.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_memory.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_pci.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_per_lcore.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_spinlock.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_wincompat.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_windows.h" />
+  </ItemGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+  <ImportGroup Label="ExtensionTargets">
+  </ImportGroup>
+</Project>
\ No newline at end of file
diff --git a/mk/exec-env/windows/librte_eal/librte_eal.vcxproj.filters b/mk/exec-env/windows/librte_eal/librte_eal.vcxproj.filters
new file mode 100644
index 000000000..589392cf5
--- /dev/null
+++ b/mk/exec-env/windows/librte_eal/librte_eal.vcxproj.filters
@@ -0,0 +1,297 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup>
+    <Filter Include="Source Files">
+      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+    </Filter>
+    <Filter Include="Header Files">
+      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
+    </Filter>
+    <Filter Include="Resource Files">
+      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
+      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
+    </Filter>
+    <Filter Include="Header Files\windows override">
+      <UniqueIdentifier>{ba45c4dc-83b8-4fb0-9cec-e2b175fa8db4}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Source Files\windows override">
+      <UniqueIdentifier>{4ab5055a-d124-48a7-8801-14bc9f281934}</UniqueIdentifier>
+    </Filter>
+  </ItemGroup>
+  <ItemGroup>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_cpuflags.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_dev.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_devargs.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_errno.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_hexdump.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_launch.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_lcore.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_memory.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_memzone.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_options.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_string_fns.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_tailqs.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_thread.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_timer.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\malloc_elem.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\rte_malloc.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\_rand48.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\getopt.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\lrand48.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\mman.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\srand48.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\unistd.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_debug.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_log.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_alarm.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_interrupts.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_timer.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_lcore.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_thread.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_memory.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_hugepage_info.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_bus.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\fork.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\setenv.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\linux-emu\termios.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_hypervisor.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\rte_keepalive.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\rte_reciprocal.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_class.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_fbarray.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\common\eal_common_memalloc.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\eal_memalloc.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\malloc_mp.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="..\..\..\..\lib\librte_eal\windows\eal\malloc_heap.c">
+      <Filter>Source Files\windows override</Filter>
+    </ClCompile>
+  </ItemGroup>
+  <ItemGroup>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\eal_private.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\eal_thread.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\eal_internal_cfg.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\malloc_heap.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\eal_options.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_eal.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_pci_dev_feature_defs.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_string_fns.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_pci.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_lcore.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_memory.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_per_lcore.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_launch.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_interrupts.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\include_override\rand48.h">
+      <Filter>Header Files\windows override</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_pci.h">
+      <Filter>Header Files\windows override</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_per_lcore.h">
+      <Filter>Header Files\windows override</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_lcore.h">
+      <Filter>Header Files\windows override</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_memory.h">
+      <Filter>Header Files\windows override</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_common.h">
+      <Filter>Header Files\windows override</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_common.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_cycles.h">
+      <Filter>Header Files\windows override</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\generic\rte_cycles.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\eal\eal_filesystem.h">
+      <Filter>Header Files\windows override</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_windows.h">
+      <Filter>Header Files\windows override</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\include_override\unistd.h">
+      <Filter>Header Files\windows override</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_eal_memconfig.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_log.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_random.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_spinlock.h">
+      <Filter>Header Files\windows override</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\generic\rte_rwlock.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_malloc_heap.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_memzone.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_debug.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_debug.h">
+      <Filter>Header Files\windows override</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_atomic.h">
+      <Filter>Header Files\windows override</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\rte_override\rte_wincompat.h">
+      <Filter>Header Files\windows override</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\windows\eal\eal_pci_private.h">
+      <Filter>Header Files\windows override</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_dev.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_bus.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_tailq.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_version.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\eal_memalloc.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\malloc_mp.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\malloc_elem.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+  </ItemGroup>
+</Project>
\ No newline at end of file
diff --git a/mk/exec-env/windows/librte_eal/librte_eal.vcxproj.user b/mk/exec-env/windows/librte_eal/librte_eal.vcxproj.user
new file mode 100644
index 000000000..abe8dd896
--- /dev/null
+++ b/mk/exec-env/windows/librte_eal/librte_eal.vcxproj.user
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup />
+</Project>
\ No newline at end of file
diff --git a/mk/exec-env/windows/librte_kvargs/librte_kvargs.vcxproj b/mk/exec-env/windows/librte_kvargs/librte_kvargs.vcxproj
new file mode 100644
index 000000000..afd216f2d
--- /dev/null
+++ b/mk/exec-env/windows/librte_kvargs/librte_kvargs.vcxproj
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup Label="ProjectConfigurations">
+    <ProjectConfiguration Include="Debug|x64">
+      <Configuration>Debug</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|x64">
+      <Configuration>Release</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+  </ItemGroup>
+  <ItemGroup>
+    <ClCompile Include="..\..\..\..\lib\librte_kvargs\rte_kvargs.c" />
+  </ItemGroup>
+  <ItemGroup>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_log.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_string_fns.h" />
+    <ClInclude Include="..\..\..\..\lib\librte_kvargs\rte_kvargs.h" />
+  </ItemGroup>
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>{F74A831C-CD22-4D19-BE6F-A318D0376EFA}</ProjectGuid>
+    <Keyword>Win32Proj</Keyword>
+    <RootNamespace>librte_kvargs</RootNamespace>
+    <WindowsTargetPlatformVersion>10.0.17134.0</WindowsTargetPlatformVersion>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+    <ConfigurationType>StaticLibrary</ConfigurationType>
+    <UseDebugLibraries>true</UseDebugLibraries>
+    <PlatformToolset>Intel C++ Compiler 18.0</PlatformToolset>
+    <CharacterSet>Unicode</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+    <ConfigurationType>StaticLibrary</ConfigurationType>
+    <UseDebugLibraries>false</UseDebugLibraries>
+    <PlatformToolset>Intel C++ Compiler 18.0</PlatformToolset>
+    <WholeProgramOptimization>true</WholeProgramOptimization>
+    <CharacterSet>Unicode</CharacterSet>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+  <ImportGroup Label="ExtensionSettings">
+  </ImportGroup>
+  <ImportGroup Label="Shared">
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="..\DpdkRteLib.props" />
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+    <Import Project="..\DpdkRteLib.props" />
+  </ImportGroup>
+  <PropertyGroup Label="UserMacros" />
+  <PropertyGroup />
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <ClCompile>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <SDLCheck>true</SDLCheck>
+      <StructMemberAlignment>1Byte</StructMemberAlignment>
+    </ClCompile>
+    <Link>
+      <SubSystem>Windows</SubSystem>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <ClCompile>
+      <WarningLevel>Level3</WarningLevel>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <Optimization>MaxSpeed</Optimization>
+      <FunctionLevelLinking>true</FunctionLevelLinking>
+      <IntrinsicFunctions>true</IntrinsicFunctions>
+      <PreprocessorDefinitions>NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <SDLCheck>true</SDLCheck>
+      <StructMemberAlignment>1Byte</StructMemberAlignment>
+    </ClCompile>
+    <Link>
+      <SubSystem>Windows</SubSystem>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <OptimizeReferences>true</OptimizeReferences>
+    </Link>
+  </ItemDefinitionGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+  <ImportGroup Label="ExtensionTargets">
+  </ImportGroup>
+</Project>
\ No newline at end of file
diff --git a/mk/exec-env/windows/librte_kvargs/librte_kvargs.vcxproj.filters b/mk/exec-env/windows/librte_kvargs/librte_kvargs.vcxproj.filters
new file mode 100644
index 000000000..ce48d6391
--- /dev/null
+++ b/mk/exec-env/windows/librte_kvargs/librte_kvargs.vcxproj.filters
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup>
+    <Filter Include="Source Files">
+      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+    </Filter>
+    <Filter Include="Header Files">
+      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
+    </Filter>
+    <Filter Include="Resource Files">
+      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
+      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
+    </Filter>
+  </ItemGroup>
+  <ItemGroup>
+    <ClCompile Include="..\..\..\..\lib\librte_kvargs\rte_kvargs.c">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+  </ItemGroup>
+  <ItemGroup>
+    <ClInclude Include="..\..\..\..\lib\librte_kvargs\rte_kvargs.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_log.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\..\..\lib\librte_eal\common\include\rte_string_fns.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+  </ItemGroup>
+</Project>
\ No newline at end of file
diff --git a/mk/exec-env/windows/librte_kvargs/librte_kvargs.vcxproj.user b/mk/exec-env/windows/librte_kvargs/librte_kvargs.vcxproj.user
new file mode 100644
index 000000000..be2507870
--- /dev/null
+++ b/mk/exec-env/windows/librte_kvargs/librte_kvargs.vcxproj.user
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <PropertyGroup />
+</Project>
\ No newline at end of file
-- 
2.18.0.windows.1

             reply	other threads:[~2018-11-29  5:05 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-29  5:05 Pallavi Kadam [this message]
2018-11-29 18:15 ` Stephen Hemminger
2018-11-30 16:04 ` Luca Boccassi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181129050504.26996-1-pallavi.kadam@intel.com \
    --to=pallavi.kadam@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).