From: zhihongx.peng@intel.com
To: anatoly.burakov@intel.com, konstantin.ananyev@intel.com,
stephen@networkplumber.org
Cc: dev@dpdk.org, xueqin.lin@intel.com,
Zhihong Peng <zhihongx.peng@intel.com>
Subject: [dpdk-dev] [RFC v2] porting AddressSanitizer feature to DPDK
Date: Tue, 15 Jun 2021 16:12:05 +0800 [thread overview]
Message-ID: <20210615081205.101071-1-zhihongx.peng@intel.com> (raw)
In-Reply-To: <20210610051352.48493-1-zhihongx.peng@intel.com>
From: Zhihong Peng <zhihongx.peng@intel.com>
AddressSanitizer (ASan) is a google memory error detect
standard tool. It could help to detect use-after-free and
{heap,stack,global}-buffer overflow bugs in C/C++ programs,
print detailed error information when error happens, large
improve debug efficiency.
By referring to its implementation algorithm
(https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm),
ported heap-buffer-overflow and use-after-freefunctions to dpdk.
Here is an example of heap-buffer-overflow bug:
......
char *p = rte_zmalloc(NULL, 7, 0);
p[7] = 'a';
......
Here is an example of use-after-free bug:
......
char *p = rte_zmalloc(NULL, 7, 0);
rte_free(p);
*p = 'a';
......
If you want to use this feature,
you need to use the following compilation options:
-Db_lundef=false -Db_sanitize=address
Signed-off-by: Xueqin Lin <xueqin.lin@intel.com>
Signed-off-by: Zhihong Peng <zhihongx.peng@intel.com>
---
lib/eal/common/malloc_elem.c | 26 +++++-
lib/eal/common/malloc_elem.h | 159 ++++++++++++++++++++++++++++++++++-
lib/eal/common/malloc_heap.c | 12 +++
lib/eal/common/meson.build | 4 +
lib/eal/common/rte_malloc.c | 7 ++
5 files changed, 206 insertions(+), 2 deletions(-)
diff --git a/lib/eal/common/malloc_elem.c b/lib/eal/common/malloc_elem.c
index c2c9461f1..bdd20a162 100644
--- a/lib/eal/common/malloc_elem.c
+++ b/lib/eal/common/malloc_elem.c
@@ -446,6 +446,8 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
struct malloc_elem *new_free_elem =
RTE_PTR_ADD(new_elem, size + MALLOC_ELEM_OVERHEAD);
+ asan_clear_split_alloczone(new_free_elem);
+
split_elem(elem, new_free_elem);
malloc_elem_free_list_insert(new_free_elem);
@@ -458,6 +460,8 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
elem->state = ELEM_BUSY;
elem->pad = old_elem_size;
+ asan_clear_alloczone(elem);
+
/* put a dummy header in padding, to point to real element header */
if (elem->pad > 0) { /* pad will be at least 64-bytes, as everything
* is cache-line aligned */
@@ -470,12 +474,18 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
return new_elem;
}
+ asan_clear_split_alloczone(new_elem);
+
/* we are going to split the element in two. The original element
* remains free, and the new element is the one allocated.
* Re-insert original element, in case its new size makes it
* belong on a different list.
*/
+
split_elem(elem, new_elem);
+
+ asan_clear_alloczone(new_elem);
+
new_elem->state = ELEM_BUSY;
malloc_elem_free_list_insert(elem);
@@ -601,6 +611,8 @@ malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len)
if (next && next_elem_is_adjacent(elem)) {
len_after = RTE_PTR_DIFF(next, hide_end);
if (len_after >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
+ asan_clear_split_alloczone(hide_end);
+
/* split after */
split_elem(elem, hide_end);
@@ -615,6 +627,8 @@ malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len)
if (prev && prev_elem_is_adjacent(elem)) {
len_before = RTE_PTR_DIFF(hide_start, elem);
if (len_before >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
+ asan_clear_split_alloczone(hide_start);
+
/* split before */
split_elem(elem, hide_start);
@@ -628,6 +642,8 @@ malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len)
}
}
+ asan_clear_alloczone(elem);
+
remove_elem(elem);
}
@@ -641,8 +657,10 @@ malloc_elem_resize(struct malloc_elem *elem, size_t size)
const size_t new_size = size + elem->pad + MALLOC_ELEM_OVERHEAD;
/* if we request a smaller size, then always return ok */
- if (elem->size >= new_size)
+ if (elem->size >= new_size) {
+ asan_clear_alloczone(elem);
return 0;
+ }
/* check if there is a next element, it's free and adjacent */
if (!elem->next || elem->next->state != ELEM_FREE ||
@@ -661,9 +679,15 @@ malloc_elem_resize(struct malloc_elem *elem, size_t size)
/* now we have a big block together. Lets cut it down a bit, by splitting */
struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size);
split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE);
+
+ asan_clear_split_alloczone(split_pt);
+
split_elem(elem, split_pt);
malloc_elem_free_list_insert(split_pt);
}
+
+ asan_clear_alloczone(elem);
+
return 0;
}
diff --git a/lib/eal/common/malloc_elem.h b/lib/eal/common/malloc_elem.h
index a1e5f7f02..365625c83 100644
--- a/lib/eal/common/malloc_elem.h
+++ b/lib/eal/common/malloc_elem.h
@@ -36,10 +36,20 @@ struct malloc_elem {
uint64_t header_cookie; /* Cookie marking start of data */
/* trailer cookie at start + size */
#endif
+#ifdef RTE_MALLOC_ASAN
+ size_t user_size;
+ uint64_t asan_cookie[2]; /*must be next to header_cookie*/
+#endif
} __rte_cache_aligned;
+static const unsigned MALLOC_ELEM_HEADER_LEN = sizeof(struct malloc_elem);
+
#ifndef RTE_MALLOC_DEBUG
+#ifdef RTE_MALLOC_ASAN
+static const unsigned MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE;
+#else
static const unsigned MALLOC_ELEM_TRAILER_LEN = 0;
+#endif
/* dummy function - just check if pointer is non-null */
static inline int
@@ -90,9 +100,156 @@ malloc_elem_cookies_ok(const struct malloc_elem *elem)
#endif
-static const unsigned MALLOC_ELEM_HEADER_LEN = sizeof(struct malloc_elem);
#define MALLOC_ELEM_OVERHEAD (MALLOC_ELEM_HEADER_LEN + MALLOC_ELEM_TRAILER_LEN)
+#ifdef RTE_MALLOC_ASAN
+
+#define ASAN_SHADOW_GRAIN_SIZE 8
+#define ASAN_MEM_FREE_FLAG 0xfd
+#define ASAN_MEM_REDZONE_FLAG 0xfa
+#define ASAN_MEM_TO_SHADOW(mem) (((mem) >> 3) + 0x00007fff8000)
+
+#if defined(__clang__)
+__attribute__((no_sanitize("address", "hwaddress")))
+#else
+__attribute__((no_sanitize_address))
+#endif
+static inline void
+asan_set_shadow(void *addr, char val)
+{
+ *(char *)addr = val;
+}
+
+static inline void
+asan_set_zone(void *ptr, size_t len, uint32_t val)
+{
+ size_t offset;
+ char *shadow;
+ size_t zone_len = len / ASAN_SHADOW_GRAIN_SIZE;
+ if (len % ASAN_SHADOW_GRAIN_SIZE != 0)
+ zone_len += 1;
+
+ for (size_t i = 0; i < zone_len; i++) {
+ offset = i * ASAN_SHADOW_GRAIN_SIZE;
+ shadow = (char *)ASAN_MEM_TO_SHADOW(((int64_t)ptr + offset));
+ asan_set_shadow(shadow, val);
+ }
+}
+
+/*
+ * When the memory is released, the release mark is
+ * set in the corresponding range of the shadow area.
+ */
+static inline void
+asan_set_freezone(void *ptr, size_t size)
+{
+ asan_set_zone(ptr, size, ASAN_MEM_FREE_FLAG);
+}
+
+/*
+ * When the memory is allocated, memory state must set accessible.
+ */
+static inline void
+asan_clear_alloczone(struct malloc_elem *elem)
+{
+ asan_set_zone((void *)elem, elem->size, 0x0);
+}
+
+static inline void
+asan_clear_split_alloczone(struct malloc_elem *elem)
+{
+ void *ptr = RTE_PTR_SUB(elem, MALLOC_ELEM_TRAILER_LEN);
+ asan_set_zone(ptr, MALLOC_ELEM_OVERHEAD, 0x0);
+}
+
+/*
+ * When the memory is allocated, the memory boundary is
+ * marked in the corresponding range of the shadow area.
+ */
+static inline void
+asan_set_redzone(struct malloc_elem *elem, size_t user_size)
+{
+ uint64_t ptr;
+ char *shadow;
+ if (elem != NULL) {
+ if (elem->state != ELEM_PAD)
+ elem = RTE_PTR_ADD(elem, elem->pad);
+
+ elem->user_size = user_size;
+
+ /* Set mark before the start of the allocated memory */
+ ptr = (uint64_t)RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN)
+ - ASAN_SHADOW_GRAIN_SIZE;
+ shadow = (char *)ASAN_MEM_TO_SHADOW(ptr);
+ asan_set_shadow(shadow, ASAN_MEM_REDZONE_FLAG);
+ shadow = (char *)ASAN_MEM_TO_SHADOW(ptr
+ - ASAN_SHADOW_GRAIN_SIZE);
+ asan_set_shadow(shadow, ASAN_MEM_REDZONE_FLAG);
+
+ /* Set mark after the end of the allocated memory */
+ ptr = (uint64_t)RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN
+ + elem->user_size);
+ shadow = (char *)ASAN_MEM_TO_SHADOW(ptr);
+ uint32_t val = (ptr % ASAN_SHADOW_GRAIN_SIZE);
+ val = (val == 0) ? ASAN_MEM_REDZONE_FLAG : val;
+ asan_set_shadow(shadow, val);
+ shadow = (char *)ASAN_MEM_TO_SHADOW(ptr
+ + ASAN_SHADOW_GRAIN_SIZE);
+ asan_set_shadow(shadow, ASAN_MEM_REDZONE_FLAG);
+ }
+}
+
+/*
+ * When the memory is released, the mark of the memory boundary
+ * in the corresponding range of the shadow area is cleared.
+ */
+static inline void
+asan_clear_redzone(struct malloc_elem *elem)
+{
+ uint64_t ptr;
+ char *shadow;
+ if (elem != NULL) {
+ elem = RTE_PTR_ADD(elem, elem->pad);
+
+ /* Clear mark before the start of the allocated memory */
+ ptr = (uint64_t)RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN)
+ - ASAN_SHADOW_GRAIN_SIZE;
+ shadow = (char *)ASAN_MEM_TO_SHADOW(ptr);
+ asan_set_shadow(shadow, 0x00);
+ shadow = (char *)ASAN_MEM_TO_SHADOW(ptr
+ - ASAN_SHADOW_GRAIN_SIZE);
+ asan_set_shadow(shadow, 0x00);
+
+ /* Clear mark after the end of the allocated memory */
+ ptr = (uint64_t)RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN
+ + elem->user_size);
+ shadow = (char *)ASAN_MEM_TO_SHADOW(ptr);
+ asan_set_shadow(shadow, 0x00);
+ shadow = (char *)ASAN_MEM_TO_SHADOW(ptr
+ + ASAN_SHADOW_GRAIN_SIZE);
+ asan_set_shadow(shadow, 0x00);
+ }
+}
+
+#else
+static inline void
+asan_set_freezone(void *ptr __rte_unused, size_t size __rte_unused) { }
+
+static inline void
+asan_clear_alloczone(struct malloc_elem *elem __rte_unused) { }
+
+static inline void
+asan_clear_split_alloczone(struct malloc_elem *elem __rte_unused) { }
+
+static inline void
+asan_set_redzone(struct malloc_elem *elem __rte_unused,
+ size_t user_size __rte_unused) { }
+
+static inline void
+asan_clear_redzone(struct malloc_elem *elem __rte_unused) { }
+
+#endif
+
/*
* Given a pointer to the start of a memory block returned by malloc, get
* the actual malloc_elem header for that block.
diff --git a/lib/eal/common/malloc_heap.c b/lib/eal/common/malloc_heap.c
index ee400f38e..775d6789d 100644
--- a/lib/eal/common/malloc_heap.c
+++ b/lib/eal/common/malloc_heap.c
@@ -237,6 +237,7 @@ heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size,
unsigned int flags, size_t align, size_t bound, bool contig)
{
struct malloc_elem *elem;
+ size_t user_size = size;
size = RTE_CACHE_LINE_ROUNDUP(size);
align = RTE_CACHE_LINE_ROUNDUP(align);
@@ -250,6 +251,8 @@ heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size,
/* increase heap's count of allocated elements */
heap->alloc_count++;
+
+ asan_set_redzone(elem, user_size);
}
return elem == NULL ? NULL : (void *)(&elem[1]);
@@ -270,6 +273,8 @@ heap_alloc_biggest(struct malloc_heap *heap, const char *type __rte_unused,
/* increase heap's count of allocated elements */
heap->alloc_count++;
+
+ asan_set_redzone(elem, size);
}
return elem == NULL ? NULL : (void *)(&elem[1]);
@@ -841,6 +846,8 @@ malloc_heap_free(struct malloc_elem *elem)
if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
return -1;
+ asan_clear_redzone(elem);
+
/* elem may be merged with previous element, so keep heap address */
heap = elem->heap;
msl = elem->msl;
@@ -848,6 +855,9 @@ malloc_heap_free(struct malloc_elem *elem)
rte_spinlock_lock(&(heap->lock));
+ void *asan_ptr = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN + elem->pad);
+ size_t asan_data_len = elem->size - MALLOC_ELEM_OVERHEAD - elem->pad;
+
/* mark element as free */
elem->state = ELEM_FREE;
@@ -1001,6 +1011,8 @@ malloc_heap_free(struct malloc_elem *elem)
rte_mcfg_mem_write_unlock();
free_unlock:
+ asan_set_freezone(asan_ptr, asan_data_len);
+
rte_spinlock_unlock(&(heap->lock));
return ret;
}
diff --git a/lib/eal/common/meson.build b/lib/eal/common/meson.build
index edfca7777..2f786841d 100644
--- a/lib/eal/common/meson.build
+++ b/lib/eal/common/meson.build
@@ -5,6 +5,10 @@ includes += include_directories('.')
cflags += [ '-DABI_VERSION="@0@"'.format(abi_version) ]
+if get_option('b_sanitize').startswith('address')
+ cflags += '-DRTE_MALLOC_ASAN'
+endif
+
if is_windows
sources += files(
'eal_common_bus.c',
diff --git a/lib/eal/common/rte_malloc.c b/lib/eal/common/rte_malloc.c
index 9d39e58c0..712bcbfce 100644
--- a/lib/eal/common/rte_malloc.c
+++ b/lib/eal/common/rte_malloc.c
@@ -162,6 +162,8 @@ rte_calloc(const char *type, size_t num, size_t size, unsigned align)
void *
rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket)
{
+ size_t user_size;
+
if (ptr == NULL)
return rte_malloc_socket(NULL, size, align, socket);
@@ -171,6 +173,8 @@ rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket)
return NULL;
}
+ user_size = size;
+
size = RTE_CACHE_LINE_ROUNDUP(size), align = RTE_CACHE_LINE_ROUNDUP(align);
/* check requested socket id and alignment matches first, and if ok,
@@ -181,6 +185,9 @@ rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket)
RTE_PTR_ALIGN(ptr, align) == ptr &&
malloc_heap_resize(elem, size) == 0) {
rte_eal_trace_mem_realloc(size, align, socket, ptr);
+
+ asan_set_redzone(elem, user_size);
+
return ptr;
}
--
2.17.1
next prev parent reply other threads:[~2021-06-15 8:16 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-10 5:13 [dpdk-dev] [RFC] " zhihongx.peng
2021-06-10 8:32 ` Bruce Richardson
2021-06-11 4:42 ` Lin, Xueqin
2021-06-10 9:12 ` Ananyev, Konstantin
2021-06-11 4:49 ` Lin, Xueqin
2021-06-10 20:03 ` Stephen Hemminger
2021-06-11 6:15 ` Lin, Xueqin
2021-06-15 8:12 ` zhihongx.peng [this message]
2021-06-15 8:40 ` [dpdk-dev] [RFC v2] " Jerin Jacob
2021-06-16 9:13 ` Lin, Xueqin
2021-06-16 11:34 ` Jerin Jacob
2021-06-18 7:48 ` Lin, Xueqin
2021-06-18 9:04 ` David Marchand
2021-06-22 3:26 ` Lin, Xueqin
2021-06-28 14:22 ` Burakov, Anatoly
2021-06-28 14:23 ` Jerin Jacob
2021-06-30 8:15 ` Lin, Xueqin
2021-06-30 8:34 ` David Marchand
2021-07-01 6:48 ` Lin, Xueqin
2021-07-01 7:40 ` David Marchand
2021-07-02 11:05 ` Lin, Xueqin
2021-07-06 20:40 ` David Christensen
2021-07-06 23:12 ` Stephen Hemminger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210615081205.101071-1-zhihongx.peng@intel.com \
--to=zhihongx.peng@intel.com \
--cc=anatoly.burakov@intel.com \
--cc=dev@dpdk.org \
--cc=konstantin.ananyev@intel.com \
--cc=stephen@networkplumber.org \
--cc=xueqin.lin@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).