From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id CA9BCA0C47; Tue, 12 Oct 2021 11:54:23 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id EEE8D4111E; Tue, 12 Oct 2021 11:54:19 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id C05C04003C for ; Tue, 12 Oct 2021 11:54:17 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10134"; a="224519249" X-IronPort-AV: E=Sophos;i="5.85,367,1624345200"; d="scan'208";a="224519249" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Oct 2021 02:54:17 -0700 X-IronPort-AV: E=Sophos;i="5.85,367,1624345200"; d="scan'208";a="490894995" Received: from unknown (HELO localhost.localdomain) ([10.240.183.65]) by orsmga008-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Oct 2021 02:54:14 -0700 From: zhihongx.peng@intel.com To: david.marchand@redhat.com, anatoly.burakov@intel.com, konstantin.ananyev@intel.com, stephen@networkplumber.org Cc: dev@dpdk.org, xueqin.lin@intel.com, Zhihong Peng Date: Tue, 12 Oct 2021 09:43:17 +0000 Message-Id: <20211012094318.1154727-2-zhihongx.peng@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20211012094318.1154727-1-zhihongx.peng@intel.com> References: <20211011062810.422220-1-zhihongx.peng@intel.com> <20211012094318.1154727-1-zhihongx.peng@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v9 2/3] DPDK code adapts to ASan X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Zhihong Peng DPDK ASan functionality is currently only supported Linux x86_64. Support other platforms, need to define ASAN_SHADOW_OFFSET value according to google ASan document, and configure meson (config/meson.build). Signed-off-by: Xueqin Lin Signed-off-by: Zhihong Peng --- v7: Split doc and code into two. v8: No change. v9: Modify the definition of RTE_MALLOC_ASAN. --- config/meson.build | 2 + lib/eal/common/malloc_elem.c | 26 ++++- lib/eal/common/malloc_elem.h | 194 ++++++++++++++++++++++++++++++++++- lib/eal/common/malloc_heap.c | 12 +++ lib/eal/common/rte_malloc.c | 9 +- 5 files changed, 238 insertions(+), 5 deletions(-) diff --git a/config/meson.build b/config/meson.build index 5170b79fed..1ae9390e68 100644 --- a/config/meson.build +++ b/config/meson.build @@ -419,6 +419,8 @@ if get_option('b_sanitize') == 'address' error('broken dependency, "libasan"') endif endif + + dpdk_conf.set10('RTE_MALLOC_ASAN', true) endif if get_option('default_library') == 'both' diff --git a/lib/eal/common/malloc_elem.c b/lib/eal/common/malloc_elem.c index c2c9461f1d..bdd20a162e 100644 --- a/lib/eal/common/malloc_elem.c +++ b/lib/eal/common/malloc_elem.c @@ -446,6 +446,8 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align, struct malloc_elem *new_free_elem = RTE_PTR_ADD(new_elem, size + MALLOC_ELEM_OVERHEAD); + asan_clear_split_alloczone(new_free_elem); + split_elem(elem, new_free_elem); malloc_elem_free_list_insert(new_free_elem); @@ -458,6 +460,8 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align, elem->state = ELEM_BUSY; elem->pad = old_elem_size; + asan_clear_alloczone(elem); + /* put a dummy header in padding, to point to real element header */ if (elem->pad > 0) { /* pad will be at least 64-bytes, as everything * is cache-line aligned */ @@ -470,12 +474,18 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align, return new_elem; } + asan_clear_split_alloczone(new_elem); + /* we are going to split the element in two. The original element * remains free, and the new element is the one allocated. * Re-insert original element, in case its new size makes it * belong on a different list. */ + split_elem(elem, new_elem); + + asan_clear_alloczone(new_elem); + new_elem->state = ELEM_BUSY; malloc_elem_free_list_insert(elem); @@ -601,6 +611,8 @@ malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len) if (next && next_elem_is_adjacent(elem)) { len_after = RTE_PTR_DIFF(next, hide_end); if (len_after >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) { + asan_clear_split_alloczone(hide_end); + /* split after */ split_elem(elem, hide_end); @@ -615,6 +627,8 @@ malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len) if (prev && prev_elem_is_adjacent(elem)) { len_before = RTE_PTR_DIFF(hide_start, elem); if (len_before >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) { + asan_clear_split_alloczone(hide_start); + /* split before */ split_elem(elem, hide_start); @@ -628,6 +642,8 @@ malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len) } } + asan_clear_alloczone(elem); + remove_elem(elem); } @@ -641,8 +657,10 @@ malloc_elem_resize(struct malloc_elem *elem, size_t size) const size_t new_size = size + elem->pad + MALLOC_ELEM_OVERHEAD; /* if we request a smaller size, then always return ok */ - if (elem->size >= new_size) + if (elem->size >= new_size) { + asan_clear_alloczone(elem); return 0; + } /* check if there is a next element, it's free and adjacent */ if (!elem->next || elem->next->state != ELEM_FREE || @@ -661,9 +679,15 @@ malloc_elem_resize(struct malloc_elem *elem, size_t size) /* now we have a big block together. Lets cut it down a bit, by splitting */ struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size); split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE); + + asan_clear_split_alloczone(split_pt); + split_elem(elem, split_pt); malloc_elem_free_list_insert(split_pt); } + + asan_clear_alloczone(elem); + return 0; } diff --git a/lib/eal/common/malloc_elem.h b/lib/eal/common/malloc_elem.h index a1e5f7f02c..a06c11ac8b 100644 --- a/lib/eal/common/malloc_elem.h +++ b/lib/eal/common/malloc_elem.h @@ -36,10 +36,20 @@ struct malloc_elem { uint64_t header_cookie; /* Cookie marking start of data */ /* trailer cookie at start + size */ #endif +#ifdef RTE_MALLOC_ASAN + size_t user_size; + uint64_t asan_cookie[2]; /* must be next to header_cookie */ +#endif } __rte_cache_aligned; +static const unsigned int MALLOC_ELEM_HEADER_LEN = sizeof(struct malloc_elem); + #ifndef RTE_MALLOC_DEBUG -static const unsigned MALLOC_ELEM_TRAILER_LEN = 0; +#ifdef RTE_MALLOC_ASAN +static const unsigned int MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE; +#else +static const unsigned int MALLOC_ELEM_TRAILER_LEN; +#endif /* dummy function - just check if pointer is non-null */ static inline int @@ -55,7 +65,7 @@ set_trailer(struct malloc_elem *elem __rte_unused){ } #else -static const unsigned MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE; +static const unsigned int MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE; #define MALLOC_HEADER_COOKIE 0xbadbadbadadd2e55ULL /**< Header cookie. */ #define MALLOC_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/ @@ -90,9 +100,187 @@ malloc_elem_cookies_ok(const struct malloc_elem *elem) #endif -static const unsigned MALLOC_ELEM_HEADER_LEN = sizeof(struct malloc_elem); #define MALLOC_ELEM_OVERHEAD (MALLOC_ELEM_HEADER_LEN + MALLOC_ELEM_TRAILER_LEN) +#ifdef RTE_MALLOC_ASAN + +#ifdef RTE_ARCH_X86_64 +#define ASAN_SHADOW_OFFSET 0x00007fff8000 +#endif + +#define ASAN_SHADOW_GRAIN_SIZE 8 +#define ASAN_MEM_FREE_FLAG 0xfd +#define ASAN_MEM_REDZONE_FLAG 0xfa +#define ASAN_SHADOW_SCALE 3 + +#define ASAN_MEM_SHIFT(mem) ((void *)((uintptr_t)(mem) >> ASAN_SHADOW_SCALE)) +#define ASAN_MEM_TO_SHADOW(mem) \ + RTE_PTR_ADD(ASAN_MEM_SHIFT(mem), ASAN_SHADOW_OFFSET) + +#if defined(__clang__) +__attribute__((no_sanitize("address", "hwaddress"))) +#else +__attribute__((no_sanitize_address)) +#endif +static inline void +asan_set_shadow(void *addr, char val) +{ + *(char *)addr = val; +} + +static inline void +asan_set_zone(void *ptr, size_t len, uint32_t val) +{ + size_t offset, i; + void *shadow; + size_t zone_len = len / ASAN_SHADOW_GRAIN_SIZE; + if (len % ASAN_SHADOW_GRAIN_SIZE != 0) + zone_len += 1; + + for (i = 0; i < zone_len; i++) { + offset = i * ASAN_SHADOW_GRAIN_SIZE; + shadow = ASAN_MEM_TO_SHADOW((uintptr_t)ptr + offset); + asan_set_shadow(shadow, val); + } +} + +/* + * When the memory is released, the release mark is + * set in the corresponding range of the shadow area. + */ +static inline void +asan_set_freezone(void *ptr, size_t size) +{ + asan_set_zone(ptr, size, ASAN_MEM_FREE_FLAG); +} + +/* + * When the memory is allocated, memory state must set as accessible. + */ +static inline void +asan_clear_alloczone(struct malloc_elem *elem) +{ + asan_set_zone((void *)elem, elem->size, 0x0); +} + +static inline void +asan_clear_split_alloczone(struct malloc_elem *elem) +{ + void *ptr = RTE_PTR_SUB(elem, MALLOC_ELEM_TRAILER_LEN); + asan_set_zone(ptr, MALLOC_ELEM_OVERHEAD, 0x0); +} + +/* + * When the memory is allocated, the memory boundary is + * marked in the corresponding range of the shadow area. + * Requirement: redzone >= 16, is a power of two. + */ +static inline void +asan_set_redzone(struct malloc_elem *elem, size_t user_size) +{ + uintptr_t head_redzone; + uintptr_t tail_redzone; + void *front_shadow; + void *tail_shadow; + uint32_t val; + + if (elem != NULL) { + if (elem->state != ELEM_PAD) + elem = RTE_PTR_ADD(elem, elem->pad); + + elem->user_size = user_size; + + /* Set mark before the start of the allocated memory */ + head_redzone = (uintptr_t)RTE_PTR_ADD(elem, + MALLOC_ELEM_HEADER_LEN - ASAN_SHADOW_GRAIN_SIZE); + front_shadow = ASAN_MEM_TO_SHADOW(head_redzone); + asan_set_shadow(front_shadow, ASAN_MEM_REDZONE_FLAG); + front_shadow = ASAN_MEM_TO_SHADOW(head_redzone + - ASAN_SHADOW_GRAIN_SIZE); + asan_set_shadow(front_shadow, ASAN_MEM_REDZONE_FLAG); + + /* Set mark after the end of the allocated memory */ + tail_redzone = (uintptr_t)RTE_PTR_ADD(elem, + MALLOC_ELEM_HEADER_LEN + + elem->user_size); + tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone); + val = (tail_redzone % ASAN_SHADOW_GRAIN_SIZE); + val = (val == 0) ? ASAN_MEM_REDZONE_FLAG : val; + asan_set_shadow(tail_shadow, val); + tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone + + ASAN_SHADOW_GRAIN_SIZE); + asan_set_shadow(tail_shadow, ASAN_MEM_REDZONE_FLAG); + } +} + +/* + * When the memory is released, the mark of the memory boundary + * in the corresponding range of the shadow area is cleared. + * Requirement: redzone >= 16, is a power of two. + */ +static inline void +asan_clear_redzone(struct malloc_elem *elem) +{ + uintptr_t head_redzone; + uintptr_t tail_redzone; + void *head_shadow; + void *tail_shadow; + + if (elem != NULL) { + elem = RTE_PTR_ADD(elem, elem->pad); + + /* Clear mark before the start of the allocated memory */ + head_redzone = (uintptr_t)RTE_PTR_ADD(elem, + MALLOC_ELEM_HEADER_LEN - ASAN_SHADOW_GRAIN_SIZE); + head_shadow = ASAN_MEM_TO_SHADOW(head_redzone); + asan_set_shadow(head_shadow, 0x00); + head_shadow = ASAN_MEM_TO_SHADOW(head_redzone + - ASAN_SHADOW_GRAIN_SIZE); + asan_set_shadow(head_shadow, 0x00); + + /* Clear mark after the end of the allocated memory */ + tail_redzone = (uintptr_t)RTE_PTR_ADD(elem, + MALLOC_ELEM_HEADER_LEN + elem->user_size); + tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone); + asan_set_shadow(tail_shadow, 0x00); + tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone + + ASAN_SHADOW_GRAIN_SIZE); + asan_set_shadow(tail_shadow, 0x00); + } +} + +static inline size_t +old_malloc_size(struct malloc_elem *elem) +{ + if (elem->state != ELEM_PAD) + elem = RTE_PTR_ADD(elem, elem->pad); + + return elem->user_size; +} +#else +static inline void +asan_set_freezone(void *ptr __rte_unused, size_t size __rte_unused) { } + +static inline void +asan_clear_alloczone(struct malloc_elem *elem __rte_unused) { } + +static inline void +asan_clear_split_alloczone(struct malloc_elem *elem __rte_unused) { } + +static inline void +asan_set_redzone(struct malloc_elem *elem __rte_unused, + size_t user_size __rte_unused) { } + +static inline void +asan_clear_redzone(struct malloc_elem *elem __rte_unused) { } + +static inline size_t +old_malloc_size(struct malloc_elem *elem) +{ + return elem->size - elem->pad - MALLOC_ELEM_OVERHEAD; +} +#endif + /* * Given a pointer to the start of a memory block returned by malloc, get * the actual malloc_elem header for that block. diff --git a/lib/eal/common/malloc_heap.c b/lib/eal/common/malloc_heap.c index ee400f38ec..775d6789df 100644 --- a/lib/eal/common/malloc_heap.c +++ b/lib/eal/common/malloc_heap.c @@ -237,6 +237,7 @@ heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size, unsigned int flags, size_t align, size_t bound, bool contig) { struct malloc_elem *elem; + size_t user_size = size; size = RTE_CACHE_LINE_ROUNDUP(size); align = RTE_CACHE_LINE_ROUNDUP(align); @@ -250,6 +251,8 @@ heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size, /* increase heap's count of allocated elements */ heap->alloc_count++; + + asan_set_redzone(elem, user_size); } return elem == NULL ? NULL : (void *)(&elem[1]); @@ -270,6 +273,8 @@ heap_alloc_biggest(struct malloc_heap *heap, const char *type __rte_unused, /* increase heap's count of allocated elements */ heap->alloc_count++; + + asan_set_redzone(elem, size); } return elem == NULL ? NULL : (void *)(&elem[1]); @@ -841,6 +846,8 @@ malloc_heap_free(struct malloc_elem *elem) if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY) return -1; + asan_clear_redzone(elem); + /* elem may be merged with previous element, so keep heap address */ heap = elem->heap; msl = elem->msl; @@ -848,6 +855,9 @@ malloc_heap_free(struct malloc_elem *elem) rte_spinlock_lock(&(heap->lock)); + void *asan_ptr = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN + elem->pad); + size_t asan_data_len = elem->size - MALLOC_ELEM_OVERHEAD - elem->pad; + /* mark element as free */ elem->state = ELEM_FREE; @@ -1001,6 +1011,8 @@ malloc_heap_free(struct malloc_elem *elem) rte_mcfg_mem_write_unlock(); free_unlock: + asan_set_freezone(asan_ptr, asan_data_len); + rte_spinlock_unlock(&(heap->lock)); return ret; } diff --git a/lib/eal/common/rte_malloc.c b/lib/eal/common/rte_malloc.c index 9d39e58c08..d0bec26920 100644 --- a/lib/eal/common/rte_malloc.c +++ b/lib/eal/common/rte_malloc.c @@ -162,6 +162,8 @@ rte_calloc(const char *type, size_t num, size_t size, unsigned align) void * rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket) { + size_t user_size; + if (ptr == NULL) return rte_malloc_socket(NULL, size, align, socket); @@ -171,6 +173,8 @@ rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket) return NULL; } + user_size = size; + size = RTE_CACHE_LINE_ROUNDUP(size), align = RTE_CACHE_LINE_ROUNDUP(align); /* check requested socket id and alignment matches first, and if ok, @@ -181,6 +185,9 @@ rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket) RTE_PTR_ALIGN(ptr, align) == ptr && malloc_heap_resize(elem, size) == 0) { rte_eal_trace_mem_realloc(size, align, socket, ptr); + + asan_set_redzone(elem, user_size); + return ptr; } @@ -192,7 +199,7 @@ rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket) if (new_ptr == NULL) return NULL; /* elem: |pad|data_elem|data|trailer| */ - const size_t old_size = elem->size - elem->pad - MALLOC_ELEM_OVERHEAD; + const size_t old_size = old_malloc_size(elem); rte_memcpy(new_ptr, ptr, old_size < size ? old_size : size); rte_free(ptr); -- 2.25.1