From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B8DC4A0C51; Thu, 10 Jun 2021 07:18:17 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 411214067C; Thu, 10 Jun 2021 07:18:17 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by mails.dpdk.org (Postfix) with ESMTP id 96E6B4003C for ; Thu, 10 Jun 2021 07:18:15 +0200 (CEST) IronPort-SDR: 5bSpM5cn6+9ee+HVZJJ2KBZW2VSBeDbNa8ntsEB3OcCeEfB754R8xw2Qc9TeGVzyo0Ti3GNS1N sGA1ApCGUVfw== X-IronPort-AV: E=McAfee;i="6200,9189,10010"; a="184918739" X-IronPort-AV: E=Sophos;i="5.83,262,1616482800"; d="scan'208";a="184918739" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 09 Jun 2021 22:18:14 -0700 IronPort-SDR: 0Fxc6Qbfh/B3RPKhFmkUvk7RtlvIbswNSmwdhjocvyuBzrQABaIpbZCNuMXfTeE5FOkfCCXEFP AEQyTH+pZkhQ== X-IronPort-AV: E=Sophos;i="5.83,262,1616482800"; d="scan'208";a="448573661" Received: from unknown (HELO localhost.localdomain) ([10.240.183.63]) by orsmga008-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 09 Jun 2021 22:18:12 -0700 From: zhihongx.peng@intel.com To: anatoly.burakov@intel.com, stephen@networkplumber.org Cc: dev@dpdk.org, xueqin.lin@intel.com, Zhihong Peng Date: Thu, 10 Jun 2021 13:13:52 +0800 Message-Id: <20210610051352.48493-1-zhihongx.peng@intel.com> X-Mailer: git-send-email 2.17.1 Subject: [dpdk-dev] [RFC] porting AddressSanitizer feature to DPDK X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Zhihong Peng AddressSanitizer (ASan) is a google memory error detect standard tool. It could help to detect use-after-free and {heap,stack,global}-buffer overflow bugs in C/C++ programs, print detailed error information when error happens, large improve debug efficiency. By referring to its implementation algorithm (https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm), ported heap-buffer-overflow and use-after-freefunctions to dpdk. Here is an example of heap-buffer-overflow bug: ...... char *p = rte_zmalloc(NULL, 7, 0); p[7] = 'a'; ...... Here is an example of use-after-free bug: ...... char *p = rte_zmalloc(NULL, 7, 0); rte_free(p); *p = 'a'; ...... If you want to use this feature, you need to use the following compilation options: -Dc_args='-DRTE_MALLOC_ASAN' -Db_lundef=false -Db_sanitize=address Signed-off-by: Xueqin Lin Signed-off-by: Zhihong Peng --- lib/eal/common/malloc_elem.c | 33 +++++++- lib/eal/common/malloc_elem.h | 141 ++++++++++++++++++++++++++++++++++- lib/eal/common/malloc_heap.c | 19 +++++ lib/eal/common/rte_malloc.c | 6 ++ 4 files changed, 197 insertions(+), 2 deletions(-) diff --git a/lib/eal/common/malloc_elem.c b/lib/eal/common/malloc_elem.c index c2c9461f1..4a146b1b9 100644 --- a/lib/eal/common/malloc_elem.c +++ b/lib/eal/common/malloc_elem.c @@ -446,6 +446,9 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align, struct malloc_elem *new_free_elem = RTE_PTR_ADD(new_elem, size + MALLOC_ELEM_OVERHEAD); +#ifdef RTE_MALLOC_ASAN + asan_clear_split_alloczone(new_free_elem); +#endif split_elem(elem, new_free_elem); malloc_elem_free_list_insert(new_free_elem); @@ -458,6 +461,9 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align, elem->state = ELEM_BUSY; elem->pad = old_elem_size; +#ifdef RTE_MALLOC_ASAN + asan_clear_alloczone(elem); +#endif /* put a dummy header in padding, to point to real element header */ if (elem->pad > 0) { /* pad will be at least 64-bytes, as everything * is cache-line aligned */ @@ -475,7 +481,13 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align, * Re-insert original element, in case its new size makes it * belong on a different list. */ +#ifdef RTE_MALLOC_ASAN + asan_clear_split_alloczone(new_elem); +#endif split_elem(elem, new_elem); +#ifdef RTE_MALLOC_ASAN + asan_clear_alloczone(new_elem); +#endif new_elem->state = ELEM_BUSY; malloc_elem_free_list_insert(elem); @@ -601,6 +613,9 @@ malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len) if (next && next_elem_is_adjacent(elem)) { len_after = RTE_PTR_DIFF(next, hide_end); if (len_after >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) { +#ifdef RTE_MALLOC_ASAN + asan_clear_split_alloczone(hide_end); +#endif /* split after */ split_elem(elem, hide_end); @@ -615,6 +630,9 @@ malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len) if (prev && prev_elem_is_adjacent(elem)) { len_before = RTE_PTR_DIFF(hide_start, elem); if (len_before >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) { +#ifdef RTE_MALLOC_ASAN + asan_clear_split_alloczone(hide_start); +#endif /* split before */ split_elem(elem, hide_start); @@ -628,6 +646,9 @@ malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len) } } +#ifdef RTE_MALLOC_ASAN + asan_clear_alloczone(elem); +#endif remove_elem(elem); } @@ -641,8 +662,12 @@ malloc_elem_resize(struct malloc_elem *elem, size_t size) const size_t new_size = size + elem->pad + MALLOC_ELEM_OVERHEAD; /* if we request a smaller size, then always return ok */ - if (elem->size >= new_size) + if (elem->size >= new_size) { +#ifdef RTE_MALLOC_ASAN + asan_clear_alloczone(elem); +#endif return 0; + } /* check if there is a next element, it's free and adjacent */ if (!elem->next || elem->next->state != ELEM_FREE || @@ -661,9 +686,15 @@ malloc_elem_resize(struct malloc_elem *elem, size_t size) /* now we have a big block together. Lets cut it down a bit, by splitting */ struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size); split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE); +#ifdef RTE_MALLOC_ASAN + asan_clear_split_alloczone(split_pt); +#endif split_elem(elem, split_pt); malloc_elem_free_list_insert(split_pt); } +#ifdef RTE_MALLOC_ASAN + asan_clear_alloczone(elem); +#endif return 0; } diff --git a/lib/eal/common/malloc_elem.h b/lib/eal/common/malloc_elem.h index a1e5f7f02..d0d8bbb48 100644 --- a/lib/eal/common/malloc_elem.h +++ b/lib/eal/common/malloc_elem.h @@ -36,10 +36,20 @@ struct malloc_elem { uint64_t header_cookie; /* Cookie marking start of data */ /* trailer cookie at start + size */ #endif +#ifdef RTE_MALLOC_ASAN + size_t user_size; + uint64_t asan_cookie[2]; /*must be next to header_cookie*/ +#endif } __rte_cache_aligned; +static const unsigned MALLOC_ELEM_HEADER_LEN = sizeof(struct malloc_elem); + #ifndef RTE_MALLOC_DEBUG +#ifdef RTE_MALLOC_ASAN +static const unsigned MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE; +#else static const unsigned MALLOC_ELEM_TRAILER_LEN = 0; +#endif /* dummy function - just check if pointer is non-null */ static inline int @@ -90,9 +100,138 @@ malloc_elem_cookies_ok(const struct malloc_elem *elem) #endif -static const unsigned MALLOC_ELEM_HEADER_LEN = sizeof(struct malloc_elem); #define MALLOC_ELEM_OVERHEAD (MALLOC_ELEM_HEADER_LEN + MALLOC_ELEM_TRAILER_LEN) +#ifdef RTE_MALLOC_ASAN + +#define ASAN_SHADOW_GRAIN_SIZE 8 +#define ASAN_MEM_FREE_FLAG 0xfd +#define ASAN_MEM_REDZONE_FLAG 0xfa +#define ASAN_MEM_TO_SHADOW(mem) (((mem) >> 3) + 0x00007fff8000) + +#if defined(__clang__) +__attribute__((no_sanitize("address", "hwaddress"))) +#else +__attribute__((no_sanitize_address)) +#endif +static inline void +asan_set_shadow(void *addr, char val) +{ + *(char *)addr = val; +} + +static inline void +asan_set_zone(void *ptr, size_t len, uint32_t val) +{ + size_t offset; + char *shadow; + size_t zone_len = len / ASAN_SHADOW_GRAIN_SIZE; + if (len % ASAN_SHADOW_GRAIN_SIZE != 0) + zone_len += 1; + + for (size_t i = 0; i < zone_len; i++) { + offset = i * ASAN_SHADOW_GRAIN_SIZE; + shadow = (char *)ASAN_MEM_TO_SHADOW(((int64_t)ptr + offset)); + asan_set_shadow(shadow, val); + } +} + +/* + * When the memory is released, the release mark is + * set in the corresponding range of the shadow area. + */ +static inline void +asan_set_freezone(void *ptr, size_t size) +{ + asan_set_zone(ptr, size, ASAN_MEM_FREE_FLAG); +} + +/* + * When the memory is allocated, memory state must set accessible. + */ +static inline void +asan_clear_alloczone(struct malloc_elem *elem) +{ + asan_set_zone((void *)elem, elem->size, 0x0); +} + +static inline void +asan_clear_split_alloczone(struct malloc_elem *elem) +{ + void *ptr = RTE_PTR_SUB(elem, MALLOC_ELEM_TRAILER_LEN); + asan_set_zone(ptr, MALLOC_ELEM_OVERHEAD, 0x0); +} + +/* + * When the memory is allocated, the memory boundary is + * marked in the corresponding range of the shadow area. + */ +static inline void +asan_set_redzone(struct malloc_elem *elem, size_t user_size) +{ + uint64_t ptr; + char *shadow; + if (elem != NULL) { + if (elem->state != ELEM_PAD) + elem = RTE_PTR_ADD(elem, elem->pad); + + elem->user_size = user_size; + + /* Set mark before the start of the allocated memory */ + ptr = (uint64_t)RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN) + - ASAN_SHADOW_GRAIN_SIZE; + shadow = (char *)ASAN_MEM_TO_SHADOW(ptr); + asan_set_shadow(shadow, ASAN_MEM_REDZONE_FLAG); + shadow = (char *)ASAN_MEM_TO_SHADOW(ptr + - ASAN_SHADOW_GRAIN_SIZE); + asan_set_shadow(shadow, ASAN_MEM_REDZONE_FLAG); + + /* Set mark after the end of the allocated memory */ + ptr = (uint64_t)RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN + + elem->user_size); + shadow = (char *)ASAN_MEM_TO_SHADOW(ptr); + uint32_t val = (ptr % ASAN_SHADOW_GRAIN_SIZE); + val = (val == 0) ? ASAN_MEM_REDZONE_FLAG : val; + asan_set_shadow(shadow, val); + shadow = (char *)ASAN_MEM_TO_SHADOW(ptr + + ASAN_SHADOW_GRAIN_SIZE); + asan_set_shadow(shadow, ASAN_MEM_REDZONE_FLAG); + } +} + +/* + * When the memory is released, the mark of the memory boundary + * in the corresponding range of the shadow area is cleared. + */ +static inline void +asan_clear_redzone(struct malloc_elem *elem) +{ + uint64_t ptr; + char *shadow; + if (elem != NULL) { + elem = RTE_PTR_ADD(elem, elem->pad); + + /* Clear mark before the start of the allocated memory */ + ptr = (uint64_t)RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN) + - ASAN_SHADOW_GRAIN_SIZE; + shadow = (char *)ASAN_MEM_TO_SHADOW(ptr); + asan_set_shadow(shadow, 0x00); + shadow = (char *)ASAN_MEM_TO_SHADOW(ptr + - ASAN_SHADOW_GRAIN_SIZE); + asan_set_shadow(shadow, 0x00); + + /* Clear mark after the end of the allocated memory */ + ptr = (uint64_t)RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN + + elem->user_size); + shadow = (char *)ASAN_MEM_TO_SHADOW(ptr); + asan_set_shadow(shadow, 0x00); + shadow = (char *)ASAN_MEM_TO_SHADOW(ptr + + ASAN_SHADOW_GRAIN_SIZE); + asan_set_shadow(shadow, 0x00); + } +} +#endif + /* * Given a pointer to the start of a memory block returned by malloc, get * the actual malloc_elem header for that block. diff --git a/lib/eal/common/malloc_heap.c b/lib/eal/common/malloc_heap.c index ee400f38e..6d39549d3 100644 --- a/lib/eal/common/malloc_heap.c +++ b/lib/eal/common/malloc_heap.c @@ -238,6 +238,9 @@ heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size, { struct malloc_elem *elem; +#ifdef RTE_MALLOC_ASAN + size_t user_size = size; +#endif size = RTE_CACHE_LINE_ROUNDUP(size); align = RTE_CACHE_LINE_ROUNDUP(align); @@ -250,6 +253,9 @@ heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size, /* increase heap's count of allocated elements */ heap->alloc_count++; +#ifdef RTE_MALLOC_ASAN + asan_set_redzone(elem, user_size); +#endif } return elem == NULL ? NULL : (void *)(&elem[1]); @@ -270,6 +276,9 @@ heap_alloc_biggest(struct malloc_heap *heap, const char *type __rte_unused, /* increase heap's count of allocated elements */ heap->alloc_count++; +#ifdef RTE_MALLOC_ASAN + asan_set_redzone(elem, size); +#endif } return elem == NULL ? NULL : (void *)(&elem[1]); @@ -841,6 +850,9 @@ malloc_heap_free(struct malloc_elem *elem) if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY) return -1; +#ifdef RTE_MALLOC_ASAN + asan_clear_redzone(elem); +#endif /* elem may be merged with previous element, so keep heap address */ heap = elem->heap; msl = elem->msl; @@ -848,6 +860,10 @@ malloc_heap_free(struct malloc_elem *elem) rte_spinlock_lock(&(heap->lock)); +#ifdef RTE_MALLOC_ASAN + void *asan_ptr = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN + elem->pad); + size_t asan_data_len = elem->size - MALLOC_ELEM_OVERHEAD - elem->pad; +#endif /* mark element as free */ elem->state = ELEM_FREE; @@ -1001,6 +1017,9 @@ malloc_heap_free(struct malloc_elem *elem) rte_mcfg_mem_write_unlock(); free_unlock: +#ifdef RTE_MALLOC_ASAN + asan_set_freezone(asan_ptr, asan_data_len); +#endif rte_spinlock_unlock(&(heap->lock)); return ret; } diff --git a/lib/eal/common/rte_malloc.c b/lib/eal/common/rte_malloc.c index 9d39e58c0..fe70ee938 100644 --- a/lib/eal/common/rte_malloc.c +++ b/lib/eal/common/rte_malloc.c @@ -170,6 +170,9 @@ rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket) RTE_LOG(ERR, EAL, "Error: memory corruption detected\n"); return NULL; } +#ifdef RTE_MALLOC_ASAN + size_t user_size = size; +#endif size = RTE_CACHE_LINE_ROUNDUP(size), align = RTE_CACHE_LINE_ROUNDUP(align); @@ -181,6 +184,9 @@ rte_realloc_socket(void *ptr, size_t size, unsigned int align, int socket) RTE_PTR_ALIGN(ptr, align) == ptr && malloc_heap_resize(elem, size) == 0) { rte_eal_trace_mem_realloc(size, align, socket, ptr); +#ifdef RTE_MALLOC_ASAN + asan_set_redzone(elem, user_size); +#endif return ptr; } -- 2.17.1