DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] mem: allow using ASan in multi-process mode
@ 2023-10-04 14:23 Artur Paszkiewicz
  2023-10-04 14:51 ` David Marchand
                   ` (2 more replies)
  0 siblings, 3 replies; 10+ messages in thread
From: Artur Paszkiewicz @ 2023-10-04 14:23 UTC (permalink / raw)
  To: anatoly.burakov; +Cc: dev, Artur Paszkiewicz

Multi-process applications operate on shared hugepage memory but each
process has its own ASan shadow region which is not synchronized with
the other processes. This causes issues when different processes try to
use the same memory because they have their own view of which addresses
are valid.

Fix it by mapping the shadow regions for memseg lists as shared memory.
The primary process is responsible for creating and removing the shared
memory objects.

Disable ASan instrumentation for triggering the page fault in
alloc_seg() because if the segment is already allocated by another
process and is marked as free in the shadow, accessing this address will
cause an ASan error.

Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
---
 lib/eal/common/eal_common_memory.c | 10 ++++
 lib/eal/common/eal_private.h       | 22 ++++++++
 lib/eal/linux/eal_memalloc.c       |  9 +++-
 lib/eal/linux/eal_memory.c         | 87 ++++++++++++++++++++++++++++++
 lib/eal/linux/meson.build          |  4 ++
 5 files changed, 131 insertions(+), 1 deletion(-)

diff --git a/lib/eal/common/eal_common_memory.c b/lib/eal/common/eal_common_memory.c
index d9433db623..2c15d5fc90 100644
--- a/lib/eal/common/eal_common_memory.c
+++ b/lib/eal/common/eal_common_memory.c
@@ -263,6 +263,12 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags)
 	RTE_LOG(DEBUG, EAL, "VA reserved for memseg list at %p, size %zx\n",
 			addr, mem_sz);
 
+#ifdef RTE_MALLOC_ASAN
+	if (eal_memseg_list_map_asan_shadow(msl) != 0) {
+		RTE_LOG(ERR, EAL, "Failed to map ASan shadow region for memseg list");
+		return -1;
+	}
+#endif
 	return 0;
 }
 
@@ -1050,6 +1056,10 @@ rte_eal_memory_detach(void)
 				RTE_LOG(ERR, EAL, "Could not unmap memory: %s\n",
 						rte_strerror(rte_errno));
 
+#ifdef RTE_MALLOC_ASAN
+		eal_memseg_list_unmap_asan_shadow(msl);
+#endif
+
 		/*
 		 * we are detaching the fbarray rather than destroying because
 		 * other processes might still reference this fbarray, and we
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index 5eadba4902..48df338cf9 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -300,6 +300,28 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags);
 void
 eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs);
 
+#ifdef RTE_MALLOC_ASAN
+/**
+ * Map shared memory for MSL ASan shadow region.
+ *
+ * @param msl
+ *  Memory segment list.
+ * @return
+ *  0 on success, (-1) on failure.
+ */
+int
+eal_memseg_list_map_asan_shadow(struct rte_memseg_list *msl);
+
+/**
+ * Unmap the MSL ASan shadow region.
+ *
+ * @param msl
+ *  Memory segment list.
+ */
+void
+eal_memseg_list_unmap_asan_shadow(struct rte_memseg_list *msl);
+#endif
+
 /**
  * Distribute available memory between MSLs.
  *
diff --git a/lib/eal/linux/eal_memalloc.c b/lib/eal/linux/eal_memalloc.c
index f8b1588cae..5212ae6b56 100644
--- a/lib/eal/linux/eal_memalloc.c
+++ b/lib/eal/linux/eal_memalloc.c
@@ -511,6 +511,13 @@ resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz, bool grow,
 			grow, dirty);
 }
 
+__rte_no_asan
+static inline void
+page_fault(void *addr)
+{
+	*(volatile int *)addr = *(volatile int *)addr;
+}
+
 static int
 alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 		struct hugepage_info *hi, unsigned int list_idx,
@@ -641,7 +648,7 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 	 * that is already there, so read the old value, and write itback.
 	 * kernel populates the page with zeroes initially.
 	 */
-	*(volatile int *)addr = *(volatile int *)addr;
+	page_fault(addr);
 
 	iova = rte_mem_virt2iova(addr);
 	if (iova == RTE_BAD_PHYS_ADDR) {
diff --git a/lib/eal/linux/eal_memory.c b/lib/eal/linux/eal_memory.c
index 9b6f08fba8..aabc5a68b3 100644
--- a/lib/eal/linux/eal_memory.c
+++ b/lib/eal/linux/eal_memory.c
@@ -41,6 +41,7 @@
 #include "eal_filesystem.h"
 #include "eal_hugepages.h"
 #include "eal_options.h"
+#include "malloc_elem.h"
 
 #define PFN_MASK_SIZE	8
 
@@ -1956,3 +1957,89 @@ rte_eal_memseg_init(void)
 #endif
 			memseg_secondary_init();
 }
+
+#ifdef RTE_MALLOC_ASAN
+int
+eal_memseg_list_map_asan_shadow(struct rte_memseg_list *msl)
+{
+	const struct internal_config *internal_conf =
+			eal_get_internal_configuration();
+	void *addr = msl->base_va;
+	void *shadow_addr = ASAN_MEM_TO_SHADOW(addr);
+	size_t shadow_sz = msl->len >> ASAN_SHADOW_SCALE;
+	int shm_oflag = O_RDWR;
+	char shm_path[PATH_MAX];
+	int shm_fd;
+	int ret = 0;
+
+	if (!msl->heap)
+		return 0;
+
+	snprintf(shm_path, sizeof(shm_path), "/%s_%s_shadow",
+		eal_get_hugefile_prefix(), msl->memseg_arr.name);
+
+	if (internal_conf->process_type == RTE_PROC_PRIMARY)
+		shm_oflag |= O_CREAT | O_TRUNC;
+
+	shm_fd = shm_open(shm_path, shm_oflag, 0600);
+	if (shm_fd == -1) {
+		RTE_LOG(DEBUG, EAL, "shadow shm_open() failed: %s\n",
+			strerror(errno));
+		return -1;
+	}
+
+	if (internal_conf->process_type == RTE_PROC_PRIMARY) {
+		ret = ftruncate(shm_fd, shadow_sz);
+		if (ret == -1) {
+			RTE_LOG(DEBUG, EAL, "shadow ftruncate() failed: %s\n",
+				strerror(errno));
+			goto out;
+		}
+	}
+
+	addr = mmap(shadow_addr, shadow_sz, PROT_READ | PROT_WRITE,
+		    MAP_SHARED | MAP_FIXED, shm_fd, 0);
+	if (addr == MAP_FAILED) {
+		RTE_LOG(DEBUG, EAL, "shadow mmap() failed: %s\n",
+			strerror(errno));
+		goto out;
+	}
+
+	if (addr != shadow_addr) {
+		RTE_LOG(DEBUG, EAL, "wrong shadow mmap() address\n");
+		munmap(addr, shadow_sz);
+		ret = -1;
+	}
+out:
+	close(shm_fd);
+	if (ret != 0) {
+		if (internal_conf->process_type == RTE_PROC_PRIMARY)
+			shm_unlink(shm_path);
+	}
+
+	return ret;
+}
+
+void
+eal_memseg_list_unmap_asan_shadow(struct rte_memseg_list *msl)
+{
+	const struct internal_config *internal_conf =
+			eal_get_internal_configuration();
+
+	if (!msl->heap)
+		return;
+
+	if (munmap(ASAN_MEM_TO_SHADOW(msl->base_va),
+		   msl->len >> ASAN_SHADOW_SCALE) != 0)
+		RTE_LOG(ERR, EAL, "Could not unmap asan shadow memory: %s\n",
+			strerror(errno));
+	if (internal_conf->process_type == RTE_PROC_PRIMARY) {
+		char shm_path[PATH_MAX];
+
+		snprintf(shm_path, sizeof(shm_path), "/%s_%s_shadow",
+			 eal_get_hugefile_prefix(),
+			 msl->memseg_arr.name);
+		shm_unlink(shm_path);
+	}
+}
+#endif
diff --git a/lib/eal/linux/meson.build b/lib/eal/linux/meson.build
index e99ebed256..1e8a48c8d3 100644
--- a/lib/eal/linux/meson.build
+++ b/lib/eal/linux/meson.build
@@ -23,3 +23,7 @@ deps += ['kvargs', 'telemetry']
 if has_libnuma
     dpdk_conf.set10('RTE_EAL_NUMA_AWARE_HUGEPAGES', true)
 endif
+
+if dpdk_conf.has('RTE_MALLOC_ASAN')
+    ext_deps += cc.find_library('rt')
+endif
-- 
2.35.3


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] mem: allow using ASan in multi-process mode
  2023-10-04 14:23 [PATCH] mem: allow using ASan in multi-process mode Artur Paszkiewicz
@ 2023-10-04 14:51 ` David Marchand
  2023-10-09 11:05   ` Artur Paszkiewicz
  2023-10-09 11:03 ` [PATCH v2] " Artur Paszkiewicz
  2023-10-25  9:27 ` [PATCH v3] " Artur Paszkiewicz
  2 siblings, 1 reply; 10+ messages in thread
From: David Marchand @ 2023-10-04 14:51 UTC (permalink / raw)
  To: Artur Paszkiewicz, anatoly.burakov; +Cc: dev

On Wed, Oct 4, 2023 at 4:23 PM Artur Paszkiewicz
<artur.paszkiewicz@intel.com> wrote:
>
> Multi-process applications operate on shared hugepage memory but each
> process has its own ASan shadow region which is not synchronized with
> the other processes. This causes issues when different processes try to
> use the same memory because they have their own view of which addresses
> are valid.
>
> Fix it by mapping the shadow regions for memseg lists as shared memory.
> The primary process is responsible for creating and removing the shared
> memory objects.
>
> Disable ASan instrumentation for triggering the page fault in
> alloc_seg() because if the segment is already allocated by another
> process and is marked as free in the shadow, accessing this address will
> cause an ASan error.
>
> Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>

Interesting patch.

I have a few questions:
- did you test with --in-memory mode? with --no-huge?
- I did not look at the patch, but I wonder if there is a risk some
"local" ASan region (for the process heap, for example) can overlap
with some "shared" ASan region (for shared DPDK hugepages).
- with this work, would unit tests (that were marked failing with
ASan) be ok now? See REGISTER_FAST_TEST macro in app/test.

Thanks for working on this topic.


-- 
David Marchand


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH v2] mem: allow using ASan in multi-process mode
  2023-10-04 14:23 [PATCH] mem: allow using ASan in multi-process mode Artur Paszkiewicz
  2023-10-04 14:51 ` David Marchand
@ 2023-10-09 11:03 ` Artur Paszkiewicz
  2023-10-25  9:27 ` [PATCH v3] " Artur Paszkiewicz
  2 siblings, 0 replies; 10+ messages in thread
From: Artur Paszkiewicz @ 2023-10-09 11:03 UTC (permalink / raw)
  To: anatoly.burakov; +Cc: dev, Artur Paszkiewicz

Multi-process applications operate on shared hugepage memory but each
process has its own ASan shadow region which is not synchronized with
the other processes. This causes issues when different processes try to
use the same memory because they have their own view of which addresses
are valid.

Fix it by mapping the shadow regions for memseg lists as shared memory.
The primary process is responsible for creating and removing the shared
memory objects.

Disable ASan instrumentation for triggering the page fault in
alloc_seg() because if the segment is already allocated by another
process and is marked as free in the shadow, accessing this address will
cause an ASan error.

Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
---
v2:
- Added checks for config options disabling multi-process support.
- Fixed missing unmap in legacy mode.

 lib/eal/common/eal_common_memory.c |  9 +++
 lib/eal/common/eal_private.h       | 22 +++++++
 lib/eal/linux/eal_memalloc.c       |  9 ++-
 lib/eal/linux/eal_memory.c         | 97 ++++++++++++++++++++++++++++++
 lib/eal/linux/meson.build          |  4 ++
 5 files changed, 140 insertions(+), 1 deletion(-)

diff --git a/lib/eal/common/eal_common_memory.c b/lib/eal/common/eal_common_memory.c
index d9433db623..15f950810b 100644
--- a/lib/eal/common/eal_common_memory.c
+++ b/lib/eal/common/eal_common_memory.c
@@ -263,6 +263,12 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags)
 	RTE_LOG(DEBUG, EAL, "VA reserved for memseg list at %p, size %zx\n",
 			addr, mem_sz);
 
+#ifdef RTE_MALLOC_ASAN
+	if (eal_memseg_list_map_asan_shadow(msl) != 0) {
+		RTE_LOG(ERR, EAL, "Failed to map ASan shadow region for memseg list");
+		return -1;
+	}
+#endif
 	return 0;
 }
 
@@ -1050,6 +1056,9 @@ rte_eal_memory_detach(void)
 				RTE_LOG(ERR, EAL, "Could not unmap memory: %s\n",
 						rte_strerror(rte_errno));
 
+#ifdef RTE_MALLOC_ASAN
+		eal_memseg_list_unmap_asan_shadow(msl);
+#endif
 		/*
 		 * we are detaching the fbarray rather than destroying because
 		 * other processes might still reference this fbarray, and we
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index 5eadba4902..48df338cf9 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -300,6 +300,28 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags);
 void
 eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs);
 
+#ifdef RTE_MALLOC_ASAN
+/**
+ * Map shared memory for MSL ASan shadow region.
+ *
+ * @param msl
+ *  Memory segment list.
+ * @return
+ *  0 on success, (-1) on failure.
+ */
+int
+eal_memseg_list_map_asan_shadow(struct rte_memseg_list *msl);
+
+/**
+ * Unmap the MSL ASan shadow region.
+ *
+ * @param msl
+ *  Memory segment list.
+ */
+void
+eal_memseg_list_unmap_asan_shadow(struct rte_memseg_list *msl);
+#endif
+
 /**
  * Distribute available memory between MSLs.
  *
diff --git a/lib/eal/linux/eal_memalloc.c b/lib/eal/linux/eal_memalloc.c
index f8b1588cae..5212ae6b56 100644
--- a/lib/eal/linux/eal_memalloc.c
+++ b/lib/eal/linux/eal_memalloc.c
@@ -511,6 +511,13 @@ resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz, bool grow,
 			grow, dirty);
 }
 
+__rte_no_asan
+static inline void
+page_fault(void *addr)
+{
+	*(volatile int *)addr = *(volatile int *)addr;
+}
+
 static int
 alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 		struct hugepage_info *hi, unsigned int list_idx,
@@ -641,7 +648,7 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 	 * that is already there, so read the old value, and write itback.
 	 * kernel populates the page with zeroes initially.
 	 */
-	*(volatile int *)addr = *(volatile int *)addr;
+	page_fault(addr);
 
 	iova = rte_mem_virt2iova(addr);
 	if (iova == RTE_BAD_PHYS_ADDR) {
diff --git a/lib/eal/linux/eal_memory.c b/lib/eal/linux/eal_memory.c
index 9b6f08fba8..3dca532874 100644
--- a/lib/eal/linux/eal_memory.c
+++ b/lib/eal/linux/eal_memory.c
@@ -41,6 +41,7 @@
 #include "eal_filesystem.h"
 #include "eal_hugepages.h"
 #include "eal_options.h"
+#include "malloc_elem.h"
 
 #define PFN_MASK_SIZE	8
 
@@ -1469,6 +1470,9 @@ eal_legacy_hugepage_init(void)
 		if (msl->memseg_arr.count > 0)
 			continue;
 		/* this is an unused list, deallocate it */
+#ifdef RTE_MALLOC_ASAN
+		eal_memseg_list_unmap_asan_shadow(msl);
+#endif
 		mem_sz = msl->len;
 		munmap(msl->base_va, mem_sz);
 		msl->base_va = NULL;
@@ -1956,3 +1960,96 @@ rte_eal_memseg_init(void)
 #endif
 			memseg_secondary_init();
 }
+
+#ifdef RTE_MALLOC_ASAN
+int
+eal_memseg_list_map_asan_shadow(struct rte_memseg_list *msl)
+{
+	const struct internal_config *internal_conf =
+			eal_get_internal_configuration();
+	void *addr;
+	void *shadow_addr;
+	size_t shadow_sz;
+	int shm_oflag;
+	char shm_path[PATH_MAX];
+	int shm_fd;
+	int ret = 0;
+
+	if (!msl->heap || internal_conf->hugepage_file.unlink_before_mapping ||
+	    internal_conf->no_shconf || internal_conf->no_hugetlbfs)
+		return 0;
+
+	shadow_addr = ASAN_MEM_TO_SHADOW(msl->base_va);
+	shadow_sz = msl->len >> ASAN_SHADOW_SCALE;
+
+	snprintf(shm_path, sizeof(shm_path), "/%s_%s_shadow",
+		eal_get_hugefile_prefix(), msl->memseg_arr.name);
+
+	shm_oflag = O_RDWR;
+	if (internal_conf->process_type == RTE_PROC_PRIMARY)
+		shm_oflag |= O_CREAT | O_TRUNC;
+
+	shm_fd = shm_open(shm_path, shm_oflag, 0600);
+	if (shm_fd == -1) {
+		RTE_LOG(DEBUG, EAL, "shadow shm_open() failed: %s\n",
+			strerror(errno));
+		return -1;
+	}
+
+	if (internal_conf->process_type == RTE_PROC_PRIMARY) {
+		ret = ftruncate(shm_fd, shadow_sz);
+		if (ret == -1) {
+			RTE_LOG(DEBUG, EAL, "shadow ftruncate() failed: %s\n",
+				strerror(errno));
+			goto out;
+		}
+	}
+
+	addr = mmap(shadow_addr, shadow_sz, PROT_READ | PROT_WRITE,
+		    MAP_SHARED | MAP_FIXED, shm_fd, 0);
+	if (addr == MAP_FAILED) {
+		RTE_LOG(DEBUG, EAL, "shadow mmap() failed: %s\n",
+			strerror(errno));
+		ret = -1;
+		goto out;
+	}
+
+	if (addr != shadow_addr) {
+		RTE_LOG(DEBUG, EAL, "wrong shadow mmap() address\n");
+		munmap(addr, shadow_sz);
+		ret = -1;
+	}
+out:
+	close(shm_fd);
+	if (ret != 0) {
+		if (internal_conf->process_type == RTE_PROC_PRIMARY)
+			shm_unlink(shm_path);
+	}
+
+	return ret;
+}
+
+void
+eal_memseg_list_unmap_asan_shadow(struct rte_memseg_list *msl)
+{
+	const struct internal_config *internal_conf =
+			eal_get_internal_configuration();
+
+	if (!msl->heap || internal_conf->hugepage_file.unlink_before_mapping ||
+	    internal_conf->no_shconf || internal_conf->no_hugetlbfs)
+		return;
+
+	if (munmap(ASAN_MEM_TO_SHADOW(msl->base_va),
+		   msl->len >> ASAN_SHADOW_SCALE) != 0)
+		RTE_LOG(ERR, EAL, "Could not unmap asan shadow memory: %s\n",
+			strerror(errno));
+	if (internal_conf->process_type == RTE_PROC_PRIMARY) {
+		char shm_path[PATH_MAX];
+
+		snprintf(shm_path, sizeof(shm_path), "/%s_%s_shadow",
+			 eal_get_hugefile_prefix(),
+			 msl->memseg_arr.name);
+		shm_unlink(shm_path);
+	}
+}
+#endif
diff --git a/lib/eal/linux/meson.build b/lib/eal/linux/meson.build
index e99ebed256..1e8a48c8d3 100644
--- a/lib/eal/linux/meson.build
+++ b/lib/eal/linux/meson.build
@@ -23,3 +23,7 @@ deps += ['kvargs', 'telemetry']
 if has_libnuma
     dpdk_conf.set10('RTE_EAL_NUMA_AWARE_HUGEPAGES', true)
 endif
+
+if dpdk_conf.has('RTE_MALLOC_ASAN')
+    ext_deps += cc.find_library('rt')
+endif
-- 
2.35.3


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] mem: allow using ASan in multi-process mode
  2023-10-04 14:51 ` David Marchand
@ 2023-10-09 11:05   ` Artur Paszkiewicz
  0 siblings, 0 replies; 10+ messages in thread
From: Artur Paszkiewicz @ 2023-10-09 11:05 UTC (permalink / raw)
  To: David Marchand, anatoly.burakov; +Cc: dev

On 10/4/23 16:51, David Marchand wrote:
> - did you test with --in-memory mode? with --no-huge?

Please see v2 of the patch. I added checks for these options. They imply
no multi-process support so mapping is skipped for those cases.

> - I did not look at the patch, but I wonder if there is a risk some
> "local" ASan region (for the process heap, for example) can overlap
> with some "shared" ASan region (for shared DPDK hugepages).

I don't think it's possible unless the actual memory regions overlap.
The ASan shadow region is always at a fixed offset from the memory it
shadows. Also, this patch only makes the shadow regions shared, ASan
instrumentation already uses these regions.

> - with this work, would unit tests (that were marked failing with
> ASan) be ok now? See REGISTER_FAST_TEST macro in app/test.

I tried enabling these tests and some of them started passing with this
patch, namely:
- multiprocess_autotest
- eal_flags_c_opt_autotest
- eal_flags_main_opt_autotest
- eal_flags_a_opt_autotest

eal_flags_file_prefix_autotest still fails. The rest seem to be passing
even without the patch.

Regards,
Artur

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH v3] mem: allow using ASan in multi-process mode
  2023-10-04 14:23 [PATCH] mem: allow using ASan in multi-process mode Artur Paszkiewicz
  2023-10-04 14:51 ` David Marchand
  2023-10-09 11:03 ` [PATCH v2] " Artur Paszkiewicz
@ 2023-10-25  9:27 ` Artur Paszkiewicz
  2024-10-03 21:18   ` Stephen Hemminger
  2024-10-17 10:03   ` [PATCH v4] " Artur Paszkiewicz
  2 siblings, 2 replies; 10+ messages in thread
From: Artur Paszkiewicz @ 2023-10-25  9:27 UTC (permalink / raw)
  To: anatoly.burakov; +Cc: dev, david.marchand, Artur Paszkiewicz

Multi-process applications operate on shared hugepage memory but each
process has its own ASan shadow region which is not synchronized with
the other processes. This causes issues when different processes try to
use the same memory because they have their own view of which addresses
are valid.

Fix it by mapping the shadow regions for memseg lists as shared memory.
The primary process is responsible for creating and removing the shared
memory objects.

Disable ASan instrumentation for triggering the page fault in
alloc_seg() because if the segment is already allocated by another
process and is marked as free in the shadow, accessing this address will
cause an ASan error.

Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
---
v3:
- Removed conditional compilation from eal_common_memory.c.
- Improved comments.
v2:
- Added checks for config options disabling multi-process support.
- Fixed missing unmap in legacy mode.

 lib/eal/common/eal_common_memory.c |   7 ++
 lib/eal/common/eal_private.h       |  35 ++++++++++
 lib/eal/linux/eal_memalloc.c       |  23 +++++--
 lib/eal/linux/eal_memory.c         | 101 +++++++++++++++++++++++++++++
 lib/eal/linux/meson.build          |   4 ++
 5 files changed, 164 insertions(+), 6 deletions(-)

diff --git a/lib/eal/common/eal_common_memory.c b/lib/eal/common/eal_common_memory.c
index d9433db623..5daf53d4d2 100644
--- a/lib/eal/common/eal_common_memory.c
+++ b/lib/eal/common/eal_common_memory.c
@@ -263,6 +263,11 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags)
 	RTE_LOG(DEBUG, EAL, "VA reserved for memseg list at %p, size %zx\n",
 			addr, mem_sz);
 
+	if (eal_memseg_list_map_asan_shadow(msl) != 0) {
+		RTE_LOG(ERR, EAL, "Failed to map ASan shadow region for memseg list");
+		return -1;
+	}
+
 	return 0;
 }
 
@@ -1050,6 +1055,8 @@ rte_eal_memory_detach(void)
 				RTE_LOG(ERR, EAL, "Could not unmap memory: %s\n",
 						rte_strerror(rte_errno));
 
+		eal_memseg_list_unmap_asan_shadow(msl);
+
 		/*
 		 * we are detaching the fbarray rather than destroying because
 		 * other processes might still reference this fbarray, and we
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index 5eadba4902..6535b38637 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -300,6 +300,41 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags);
 void
 eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs);
 
+/**
+ * Map shared memory for MSL ASan shadow region.
+ *
+ * @param msl
+ *  Memory segment list.
+ * @return
+ *  0 on success, (-1) on failure.
+ */
+#ifdef RTE_MALLOC_ASAN
+int
+eal_memseg_list_map_asan_shadow(struct rte_memseg_list *msl);
+#else
+static inline int
+eal_memseg_list_map_asan_shadow(__rte_unused struct rte_memseg_list *msl)
+{
+	return 0;
+}
+#endif
+
+/**
+ * Unmap the MSL ASan shadow region.
+ *
+ * @param msl
+ *  Memory segment list.
+ */
+#ifdef RTE_MALLOC_ASAN
+void
+eal_memseg_list_unmap_asan_shadow(struct rte_memseg_list *msl);
+#else
+static inline void
+eal_memseg_list_unmap_asan_shadow(__rte_unused struct rte_memseg_list *msl)
+{
+}
+#endif
+
 /**
  * Distribute available memory between MSLs.
  *
diff --git a/lib/eal/linux/eal_memalloc.c b/lib/eal/linux/eal_memalloc.c
index f8b1588cae..a4151534a8 100644
--- a/lib/eal/linux/eal_memalloc.c
+++ b/lib/eal/linux/eal_memalloc.c
@@ -511,6 +511,21 @@ resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz, bool grow,
 			grow, dirty);
 }
 
+__rte_no_asan
+static inline void
+page_fault(void *addr)
+{
+	/* We need to trigger a write to the page to enforce page fault but we
+	 * can't overwrite value that is already there, so read the old value
+	 * and write it back. Kernel populates the page with zeroes initially.
+	 *
+	 * Disable ASan instrumentation here because if the segment is already
+	 * allocated by another process and is marked as free in the shadow,
+	 * accessing this address will cause an ASan error.
+	 */
+	*(volatile int *)addr = *(volatile int *)addr;
+}
+
 static int
 alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 		struct hugepage_info *hi, unsigned int list_idx,
@@ -636,12 +651,8 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 		goto mapped;
 	}
 
-	/* we need to trigger a write to the page to enforce page fault and
-	 * ensure that page is accessible to us, but we can't overwrite value
-	 * that is already there, so read the old value, and write itback.
-	 * kernel populates the page with zeroes initially.
-	 */
-	*(volatile int *)addr = *(volatile int *)addr;
+	/* enforce page fault and ensure that page is accessible to us */
+	page_fault(addr);
 
 	iova = rte_mem_virt2iova(addr);
 	if (iova == RTE_BAD_PHYS_ADDR) {
diff --git a/lib/eal/linux/eal_memory.c b/lib/eal/linux/eal_memory.c
index 9b6f08fba8..102a57fd23 100644
--- a/lib/eal/linux/eal_memory.c
+++ b/lib/eal/linux/eal_memory.c
@@ -41,6 +41,7 @@
 #include "eal_filesystem.h"
 #include "eal_hugepages.h"
 #include "eal_options.h"
+#include "malloc_elem.h"
 
 #define PFN_MASK_SIZE	8
 
@@ -1469,6 +1470,7 @@ eal_legacy_hugepage_init(void)
 		if (msl->memseg_arr.count > 0)
 			continue;
 		/* this is an unused list, deallocate it */
+		eal_memseg_list_unmap_asan_shadow(msl);
 		mem_sz = msl->len;
 		munmap(msl->base_va, mem_sz);
 		msl->base_va = NULL;
@@ -1956,3 +1958,102 @@ rte_eal_memseg_init(void)
 #endif
 			memseg_secondary_init();
 }
+
+#ifdef RTE_MALLOC_ASAN
+int
+eal_memseg_list_map_asan_shadow(struct rte_memseg_list *msl)
+{
+	const struct internal_config *internal_conf =
+			eal_get_internal_configuration();
+	void *addr;
+	void *shadow_addr;
+	size_t shadow_sz;
+	int shm_oflag;
+	char shm_path[PATH_MAX];
+	int shm_fd;
+	int ret = 0;
+
+	if (!msl->heap)
+		return 0;
+
+	/* these options imply no secondary process support */
+	if (internal_conf->hugepage_file.unlink_before_mapping ||
+	    internal_conf->no_shconf || internal_conf->no_hugetlbfs) {
+		RTE_ASSERT(rte_eal_process_type() != RTE_PROC_SECONDARY);
+		return 0;
+	}
+
+	shadow_addr = ASAN_MEM_TO_SHADOW(msl->base_va);
+	shadow_sz = msl->len >> ASAN_SHADOW_SCALE;
+
+	snprintf(shm_path, sizeof(shm_path), "/%s_%s_shadow",
+		eal_get_hugefile_prefix(), msl->memseg_arr.name);
+
+	shm_oflag = O_RDWR;
+	if (internal_conf->process_type == RTE_PROC_PRIMARY)
+		shm_oflag |= O_CREAT | O_TRUNC;
+
+	shm_fd = shm_open(shm_path, shm_oflag, 0600);
+	if (shm_fd == -1) {
+		RTE_LOG(DEBUG, EAL, "shadow shm_open() failed: %s\n",
+			strerror(errno));
+		return -1;
+	}
+
+	if (internal_conf->process_type == RTE_PROC_PRIMARY) {
+		ret = ftruncate(shm_fd, shadow_sz);
+		if (ret == -1) {
+			RTE_LOG(DEBUG, EAL, "shadow ftruncate() failed: %s\n",
+				strerror(errno));
+			goto out;
+		}
+	}
+
+	addr = mmap(shadow_addr, shadow_sz, PROT_READ | PROT_WRITE,
+		    MAP_SHARED | MAP_FIXED, shm_fd, 0);
+	if (addr == MAP_FAILED) {
+		RTE_LOG(DEBUG, EAL, "shadow mmap() failed: %s\n",
+			strerror(errno));
+		ret = -1;
+		goto out;
+	}
+
+	if (addr != shadow_addr) {
+		RTE_LOG(DEBUG, EAL, "wrong shadow mmap() address\n");
+		munmap(addr, shadow_sz);
+		ret = -1;
+	}
+out:
+	close(shm_fd);
+	if (ret != 0) {
+		if (internal_conf->process_type == RTE_PROC_PRIMARY)
+			shm_unlink(shm_path);
+	}
+
+	return ret;
+}
+
+void
+eal_memseg_list_unmap_asan_shadow(struct rte_memseg_list *msl)
+{
+	const struct internal_config *internal_conf =
+			eal_get_internal_configuration();
+
+	if (!msl->heap || internal_conf->hugepage_file.unlink_before_mapping ||
+	    internal_conf->no_shconf || internal_conf->no_hugetlbfs)
+		return;
+
+	if (munmap(ASAN_MEM_TO_SHADOW(msl->base_va),
+		   msl->len >> ASAN_SHADOW_SCALE) != 0)
+		RTE_LOG(ERR, EAL, "Could not unmap asan shadow memory: %s\n",
+			strerror(errno));
+	if (internal_conf->process_type == RTE_PROC_PRIMARY) {
+		char shm_path[PATH_MAX];
+
+		snprintf(shm_path, sizeof(shm_path), "/%s_%s_shadow",
+			 eal_get_hugefile_prefix(),
+			 msl->memseg_arr.name);
+		shm_unlink(shm_path);
+	}
+}
+#endif
diff --git a/lib/eal/linux/meson.build b/lib/eal/linux/meson.build
index e99ebed256..1e8a48c8d3 100644
--- a/lib/eal/linux/meson.build
+++ b/lib/eal/linux/meson.build
@@ -23,3 +23,7 @@ deps += ['kvargs', 'telemetry']
 if has_libnuma
     dpdk_conf.set10('RTE_EAL_NUMA_AWARE_HUGEPAGES', true)
 endif
+
+if dpdk_conf.has('RTE_MALLOC_ASAN')
+    ext_deps += cc.find_library('rt')
+endif
-- 
2.35.3


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3] mem: allow using ASan in multi-process mode
  2023-10-25  9:27 ` [PATCH v3] " Artur Paszkiewicz
@ 2024-10-03 21:18   ` Stephen Hemminger
  2024-10-17 10:19     ` Artur Paszkiewicz
  2024-10-17 10:03   ` [PATCH v4] " Artur Paszkiewicz
  1 sibling, 1 reply; 10+ messages in thread
From: Stephen Hemminger @ 2024-10-03 21:18 UTC (permalink / raw)
  To: Artur Paszkiewicz; +Cc: anatoly.burakov, dev, david.marchand

On Wed, 25 Oct 2023 11:27:17 +0200
Artur Paszkiewicz <artur.paszkiewicz@intel.com> wrote:

> Multi-process applications operate on shared hugepage memory but each
> process has its own ASan shadow region which is not synchronized with
> the other processes. This causes issues when different processes try to
> use the same memory because they have their own view of which addresses
> are valid.
> 
> Fix it by mapping the shadow regions for memseg lists as shared memory.
> The primary process is responsible for creating and removing the shared
> memory objects.
> 
> Disable ASan instrumentation for triggering the page fault in
> alloc_seg() because if the segment is already allocated by another
> process and is marked as free in the shadow, accessing this address will
> cause an ASan error.
> 
> Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
> ---
> v3:
> - Removed conditional compilation from eal_common_memory.c.
> - Improved comments.
> v2:
> - Added checks for config options disabling multi-process support.
> - Fixed missing unmap in legacy mode.
> 
>  lib/eal/common/eal_common_memory.c |   7 ++
>  lib/eal/common/eal_private.h       |  35 ++++++++++
>  lib/eal/linux/eal_memalloc.c       |  23 +++++--
>  lib/eal/linux/eal_memory.c         | 101 +++++++++++++++++++++++++++++
>  lib/eal/linux/meson.build          |   4 ++
>  5 files changed, 164 insertions(+), 6 deletions(-)

Makes sense, but patch has some fuzz against current main branch.
There is also another patch that address the ASAN touch issue.

https://patchwork.dpdk.org/project/dpdk/patch/20240723083419.12435-1-amichon@kalrayinc.com/

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH v4] mem: allow using ASan in multi-process mode
  2023-10-25  9:27 ` [PATCH v3] " Artur Paszkiewicz
  2024-10-03 21:18   ` Stephen Hemminger
@ 2024-10-17 10:03   ` Artur Paszkiewicz
  2024-10-18  9:04     ` Artur Paszkiewicz
  2024-10-24  7:30     ` Artur Paszkiewicz
  1 sibling, 2 replies; 10+ messages in thread
From: Artur Paszkiewicz @ 2024-10-17 10:03 UTC (permalink / raw)
  To: dev; +Cc: anatoly.burakov, david.marchand, stephen

Multi-process applications operate on shared hugepage memory but each
process has its own ASan shadow region which is not synchronized with
the other processes. This causes issues when different processes try to
use the same memory because they have their own view of which addresses
are valid.

Fix it by mapping the shadow regions for allocated segments as shared
memory. The primary process is responsible for creating and removing the
shared memory objects.

Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
---
v4:
- Map ASan shadow shm after mapping the segment.
  Due to a change in ASan behavior[1] the mapped shadow shared memory
  regions are remapped later, when segments are mapped. So instead of
  mapping the whole shadow region when reserving the memseg list memory,
  map only the fragments corresponding to the segments after they are
  mapped. Because of this it is also no longer necessary to disable ASan
  instrumentation for triggering the page fault in alloc_seg().
- Adjusted function naming.
- Enabled unit tests.
v3:
- Removed conditional compilation from eal_common_memory.c.
- Improved comments.
v2:
- Added checks for config options disabling multi-process support.
- Fixed missing unmap in legacy mode.

[1] https://github.com/llvm/llvm-project/commit/a34e702aa16fde4cc76e9360d985a64e008e0b23

 app/test/test_mp_secondary.c       |  2 +-
 app/test/test_pdump.c              |  2 +-
 lib/eal/common/eal_common_memory.c |  7 +++
 lib/eal/common/eal_private.h       | 54 ++++++++++++++++
 lib/eal/linux/eal_memalloc.c       | 30 +++++++++
 lib/eal/linux/eal_memory.c         | 98 ++++++++++++++++++++++++++++++
 lib/eal/linux/meson.build          |  4 ++
 7 files changed, 195 insertions(+), 2 deletions(-)

diff --git a/app/test/test_mp_secondary.c b/app/test/test_mp_secondary.c
index f3694530a8..7da2878f64 100644
--- a/app/test/test_mp_secondary.c
+++ b/app/test/test_mp_secondary.c
@@ -223,4 +223,4 @@ test_mp_secondary(void)
 
 #endif /* !RTE_EXEC_ENV_WINDOWS */
 
-REGISTER_FAST_TEST(multiprocess_autotest, false, false, test_mp_secondary);
+REGISTER_FAST_TEST(multiprocess_autotest, false, true, test_mp_secondary);
diff --git a/app/test/test_pdump.c b/app/test/test_pdump.c
index 9f7769707e..a0919e89ba 100644
--- a/app/test/test_pdump.c
+++ b/app/test/test_pdump.c
@@ -219,4 +219,4 @@ test_pdump(void)
 	return TEST_SUCCESS;
 }
 
-REGISTER_FAST_TEST(pdump_autotest, true, false, test_pdump);
+REGISTER_FAST_TEST(pdump_autotest, true, true, test_pdump);
diff --git a/lib/eal/common/eal_common_memory.c b/lib/eal/common/eal_common_memory.c
index a185e0b580..8fbd0c5af9 100644
--- a/lib/eal/common/eal_common_memory.c
+++ b/lib/eal/common/eal_common_memory.c
@@ -263,6 +263,11 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags)
 	EAL_LOG(DEBUG, "VA reserved for memseg list at %p, size %zx",
 			addr, mem_sz);
 
+	if (eal_memseg_list_init_asan_shadow(msl) != 0) {
+		EAL_LOG(ERR, "Failed to init ASan shadow region for memseg list");
+		return -1;
+	}
+
 	return 0;
 }
 
@@ -1052,6 +1057,8 @@ rte_eal_memory_detach(void)
 				EAL_LOG(ERR, "Could not unmap memory: %s",
 						rte_strerror(rte_errno));
 
+		eal_memseg_list_cleanup_asan_shadow(msl);
+
 		/*
 		 * we are detaching the fbarray rather than destroying because
 		 * other processes might still reference this fbarray, and we
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index bb315dab04..96e05647ff 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -309,6 +309,60 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags);
 void
 eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs);
 
+/**
+ * Initialize the MSL ASan shadow region shared memory.
+ *
+ * @param msl
+ *  Memory segment list.
+ * @return
+ *  0 on success, (-1) on failure.
+ */
+#ifdef RTE_MALLOC_ASAN
+int
+eal_memseg_list_init_asan_shadow(struct rte_memseg_list *msl);
+#else
+static inline int
+eal_memseg_list_init_asan_shadow(__rte_unused struct rte_memseg_list *msl)
+{
+	return 0;
+}
+#endif
+
+/**
+ * Cleanup the MSL ASan shadow region shared memory.
+ *
+ * @param msl
+ *  Memory segment list.
+ */
+#ifdef RTE_MALLOC_ASAN
+void
+eal_memseg_list_cleanup_asan_shadow(struct rte_memseg_list *msl);
+#else
+static inline void
+eal_memseg_list_cleanup_asan_shadow(__rte_unused struct rte_memseg_list *msl)
+{
+}
+#endif
+
+/**
+ * Get the MSL ASan shadow shared memory object file descriptor.
+ *
+ * @param msl
+ *  Index of the MSL.
+ * @return
+ *  A file descriptor.
+ */
+#ifdef RTE_MALLOC_ASAN
+int
+eal_memseg_list_get_asan_shadow_fd(int msl_idx);
+#else
+static inline int
+eal_memseg_list_get_asan_shadow_fd(__rte_unused int msl_idx)
+{
+	return -1;
+}
+#endif
+
 /**
  * Distribute available memory between MSLs.
  *
diff --git a/lib/eal/linux/eal_memalloc.c b/lib/eal/linux/eal_memalloc.c
index e354efc95d..5ea6dc25b0 100644
--- a/lib/eal/linux/eal_memalloc.c
+++ b/lib/eal/linux/eal_memalloc.c
@@ -37,6 +37,7 @@
 #include "eal_memalloc.h"
 #include "eal_memcfg.h"
 #include "eal_private.h"
+#include "malloc_elem.h"
 
 const int anonymous_hugepages_supported =
 #ifdef MAP_HUGE_SHIFT
@@ -677,6 +678,35 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 				__func__);
 #endif
 
+#ifdef RTE_MALLOC_ASAN
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	int shadow_shm_fd = eal_memseg_list_get_asan_shadow_fd(list_idx);
+
+	if (shadow_shm_fd != -1) {
+		void *shadow_base_addr, *shadow_addr;
+		off_t shadow_map_offset;
+		size_t shadow_sz;
+
+		shadow_base_addr = ASAN_MEM_TO_SHADOW(mcfg->memsegs[list_idx].base_va);
+		shadow_addr = ASAN_MEM_TO_SHADOW(addr);
+		shadow_map_offset = (char *)shadow_addr - (char *)shadow_base_addr;
+		shadow_sz = alloc_sz >> ASAN_SHADOW_SCALE;
+
+		va = mmap(shadow_addr, shadow_sz, PROT_READ | PROT_WRITE,
+			  MAP_SHARED | MAP_FIXED, shadow_shm_fd, shadow_map_offset);
+		if (va == MAP_FAILED) {
+			EAL_LOG(DEBUG, "shadow mmap() failed: %s",
+				strerror(errno));
+			goto mapped;
+		}
+
+		if (va != shadow_addr) {
+			EAL_LOG(DEBUG, "wrong shadow mmap() address");
+			munmap(addr, shadow_sz);
+			goto mapped;
+		}
+	}
+#endif
 	huge_recover_sigbus();
 
 	ms->addr = addr;
diff --git a/lib/eal/linux/eal_memory.c b/lib/eal/linux/eal_memory.c
index 45879ca743..2795abdbf4 100644
--- a/lib/eal/linux/eal_memory.c
+++ b/lib/eal/linux/eal_memory.c
@@ -41,6 +41,7 @@
 #include "eal_filesystem.h"
 #include "eal_hugepages.h"
 #include "eal_options.h"
+#include "malloc_elem.h"
 
 #define PFN_MASK_SIZE	8
 
@@ -1469,6 +1470,7 @@ eal_legacy_hugepage_init(void)
 		if (msl->memseg_arr.count > 0)
 			continue;
 		/* this is an unused list, deallocate it */
+		eal_memseg_list_cleanup_asan_shadow(msl);
 		mem_sz = msl->len;
 		munmap(msl->base_va, mem_sz);
 		msl->base_va = NULL;
@@ -1915,6 +1917,10 @@ memseg_secondary_init(void)
 	return 0;
 }
 
+#ifdef RTE_MALLOC_ASAN
+static int msl_asan_shadow_fd[RTE_MAX_MEMSEG_LISTS];
+#endif
+
 int
 rte_eal_memseg_init(void)
 {
@@ -1947,6 +1953,12 @@ rte_eal_memseg_init(void)
 		EAL_LOG(WARNING, "Please use --"OPT_LEGACY_MEM" option, or recompile with NUMA support.");
 	}
 #endif
+#ifdef RTE_MALLOC_ASAN
+	int msl_idx;
+
+	for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++)
+		msl_asan_shadow_fd[msl_idx] = -1;
+#endif
 
 	return rte_eal_process_type() == RTE_PROC_PRIMARY ?
 #ifndef RTE_ARCH_64
@@ -1956,3 +1968,89 @@ rte_eal_memseg_init(void)
 #endif
 			memseg_secondary_init();
 }
+
+#ifdef RTE_MALLOC_ASAN
+int
+eal_memseg_list_init_asan_shadow(struct rte_memseg_list *msl)
+{
+	const struct internal_config *internal_conf =
+			eal_get_internal_configuration();
+	int msl_idx = msl - rte_eal_get_configuration()->mem_config->memsegs;
+	int shm_oflag;
+	char shm_path[PATH_MAX];
+	int shm_fd;
+
+	if (!msl->heap)
+		return 0;
+
+	/* these options imply no secondary process support */
+	if (internal_conf->hugepage_file.unlink_before_mapping ||
+	    internal_conf->no_shconf || internal_conf->no_hugetlbfs) {
+		RTE_ASSERT(rte_eal_process_type() != RTE_PROC_SECONDARY);
+		return 0;
+	}
+
+	snprintf(shm_path, sizeof(shm_path), "/%s_%s_shadow",
+		eal_get_hugefile_prefix(), msl->memseg_arr.name);
+
+	shm_oflag = O_RDWR;
+	if (internal_conf->process_type == RTE_PROC_PRIMARY)
+		shm_oflag |= O_CREAT | O_TRUNC;
+
+	shm_fd = shm_open(shm_path, shm_oflag, 0600);
+	if (shm_fd == -1) {
+		EAL_LOG(DEBUG, "shadow shm_open() failed: %s",
+			strerror(errno));
+		return -1;
+	}
+
+	if (internal_conf->process_type == RTE_PROC_PRIMARY) {
+		if (ftruncate(shm_fd, msl->len >> ASAN_SHADOW_SCALE) == -1) {
+			EAL_LOG(DEBUG, "shadow ftruncate() failed: %s",
+				strerror(errno));
+			close(shm_fd);
+			if (internal_conf->process_type == RTE_PROC_PRIMARY)
+				shm_unlink(shm_path);
+			return -1;
+		}
+	}
+
+	msl_asan_shadow_fd[msl_idx] = shm_fd;
+
+	return 0;
+}
+
+void
+eal_memseg_list_cleanup_asan_shadow(struct rte_memseg_list *msl)
+{
+	const struct internal_config *internal_conf =
+			eal_get_internal_configuration();
+	int msl_idx = msl - rte_eal_get_configuration()->mem_config->memsegs;
+	int *shm_fd = &msl_asan_shadow_fd[msl_idx];
+
+	if (*shm_fd == -1)
+		return;
+
+	close(*shm_fd);
+	*shm_fd = -1;
+
+	if (munmap(ASAN_MEM_TO_SHADOW(msl->base_va),
+		   msl->len >> ASAN_SHADOW_SCALE) != 0)
+		EAL_LOG(ERR, "Could not unmap asan shadow memory: %s",
+			strerror(errno));
+	if (internal_conf->process_type == RTE_PROC_PRIMARY) {
+		char shm_path[PATH_MAX];
+
+		snprintf(shm_path, sizeof(shm_path), "/%s_%s_shadow",
+			 eal_get_hugefile_prefix(),
+			 msl->memseg_arr.name);
+		shm_unlink(shm_path);
+	}
+}
+
+int
+eal_memseg_list_get_asan_shadow_fd(int msl_idx)
+{
+	return msl_asan_shadow_fd[msl_idx];
+}
+#endif
diff --git a/lib/eal/linux/meson.build b/lib/eal/linux/meson.build
index e99ebed256..1e8a48c8d3 100644
--- a/lib/eal/linux/meson.build
+++ b/lib/eal/linux/meson.build
@@ -23,3 +23,7 @@ deps += ['kvargs', 'telemetry']
 if has_libnuma
     dpdk_conf.set10('RTE_EAL_NUMA_AWARE_HUGEPAGES', true)
 endif
+
+if dpdk_conf.has('RTE_MALLOC_ASAN')
+    ext_deps += cc.find_library('rt')
+endif
-- 
2.43.0


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3] mem: allow using ASan in multi-process mode
  2024-10-03 21:18   ` Stephen Hemminger
@ 2024-10-17 10:19     ` Artur Paszkiewicz
  0 siblings, 0 replies; 10+ messages in thread
From: Artur Paszkiewicz @ 2024-10-17 10:19 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: anatoly.burakov, dev, david.marchand

On 10/3/24 23:18, Stephen Hemminger wrote:
> Makes sense, but patch has some fuzz against current main branch.
> There is also another patch that address the ASAN touch issue.
> 
> https://patchwork.dpdk.org/project/dpdk/patch/20240723083419.12435-1-amichon@kalrayinc.com/

I just sent a new version of the patch, it no longer needs the change
that was related to that linked patch.

Thanks,
Artur

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v4] mem: allow using ASan in multi-process mode
  2024-10-17 10:03   ` [PATCH v4] " Artur Paszkiewicz
@ 2024-10-18  9:04     ` Artur Paszkiewicz
  2024-10-24  7:30     ` Artur Paszkiewicz
  1 sibling, 0 replies; 10+ messages in thread
From: Artur Paszkiewicz @ 2024-10-18  9:04 UTC (permalink / raw)
  To: dev

Recheck-request: iol-unit-amd64-testing

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v4] mem: allow using ASan in multi-process mode
  2024-10-17 10:03   ` [PATCH v4] " Artur Paszkiewicz
  2024-10-18  9:04     ` Artur Paszkiewicz
@ 2024-10-24  7:30     ` Artur Paszkiewicz
  1 sibling, 0 replies; 10+ messages in thread
From: Artur Paszkiewicz @ 2024-10-24  7:30 UTC (permalink / raw)
  To: dev

Recheck-request: 
rebase=main,iol-compile-amd64-testing,iol-compile-arm64-testing,iol-unit-amd64-testing,iol-unit-arm64-testing

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2024-10-24  7:30 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-10-04 14:23 [PATCH] mem: allow using ASan in multi-process mode Artur Paszkiewicz
2023-10-04 14:51 ` David Marchand
2023-10-09 11:05   ` Artur Paszkiewicz
2023-10-09 11:03 ` [PATCH v2] " Artur Paszkiewicz
2023-10-25  9:27 ` [PATCH v3] " Artur Paszkiewicz
2024-10-03 21:18   ` Stephen Hemminger
2024-10-17 10:19     ` Artur Paszkiewicz
2024-10-17 10:03   ` [PATCH v4] " Artur Paszkiewicz
2024-10-18  9:04     ` Artur Paszkiewicz
2024-10-24  7:30     ` Artur Paszkiewicz

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).