add support for extra padding and backtraces

This commit is contained in:
Daan Leijen 2021-12-09 14:19:41 -08:00
parent 0be71a2cac
commit a84df3795a
6 changed files with 113 additions and 24 deletions

View File

@ -16,6 +16,7 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_trace_message(...)
#endif
#define MI_CACHE_LINE 64
#if defined(_MSC_VER)
#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths)
@ -51,6 +52,11 @@ void _mi_trace_message(const char* fmt, ...);
void _mi_options_init(void);
void _mi_error_message(int err, const char* fmt, ...);
#if MI_DEBUG_TRACE > 0
void _mi_stack_trace_capture(void** strace, size_t len, size_t skip);
void _mi_stack_trace_print(const void* const* strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail);
#endif
// random.c
void _mi_random_init(mi_random_ctx_t* ctx);
void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx);
@ -389,7 +395,7 @@ static inline uintptr_t _mi_ptr_cookie(const void* p) {
----------------------------------------------------------- */
static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) {
mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE));
mi_assert_internal(size <= (MI_SMALL_SIZE_MAX));
const size_t idx = _mi_wsize_from_size(size);
mi_assert_internal(idx < MI_PAGES_DIRECT);
return heap->pages_free_direct[idx];

View File

@ -60,6 +60,18 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_PADDING 1
#endif
#if !defined(MI_PADDING_EXTRA) // use extra padding bytes?
#define MI_PADDING_EXTRA (64)
#endif
#if !defined(MI_DEBUG_TRACE) // store stack trace at each allocation
#define MI_DEBUG_TRACE 1
#endif
#if !defined(MI_DEBUG_TRACE_LEN) // store stack trace at each allocation
#define MI_DEBUG_TRACE_LEN (6) // store up to N frames
#endif
// Encoded free lists allow detection of corrupted free lists
// and can detect buffer overflows, modify after free, and double `free`s.
@ -356,15 +368,16 @@ typedef struct mi_random_cxt_s {
typedef struct mi_padding_s {
uint32_t canary; // encoded block value to check validity of the padding (in case of overflow)
uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
#if (MI_DEBUG_TRACE > 0)
void* strace[MI_DEBUG_TRACE_LEN]; // stack trace at allocation time
#endif
} mi_padding_t;
#define MI_PADDING_SIZE (sizeof(mi_padding_t))
#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE)
#define MI_PADDING_SIZE (sizeof(mi_padding_t) + MI_PADDING_EXTRA)
#else
#define MI_PADDING_SIZE 0
#define MI_PADDING_WSIZE 0
#endif
#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1)
#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + 1)
// A heap owns a set of pages.

View File

@ -61,6 +61,9 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
padding->delta = (uint32_t)(delta);
#if (MI_DEBUG_TRACE)
_mi_stack_trace_capture(padding->strace, MI_DEBUG_TRACE_LEN, 2);
#endif
uint8_t* fill = (uint8_t*)padding - delta;
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
@ -97,10 +100,11 @@ extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexce
// The main allocation function
extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
if (mi_likely(size + MI_PADDING_SIZE <= MI_SMALL_SIZE_MAX)) {
return mi_heap_malloc_small(heap, size);
}
else {
else
{
mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE); // note: size can overflow but it is detected in malloc_generic
@ -216,18 +220,38 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
// ---------------------------------------------------------------------------
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
static const mi_padding_t* mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
*bsize = mi_page_usable_block_size(page);
const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
*delta = padding->delta;
return ((uint32_t)mi_ptr_encode(page,block,page->keys) == padding->canary && *delta <= *bsize);
if ((uint32_t)mi_ptr_encode(page, block, page->keys) == padding->canary && *delta <= *bsize) {
return padding;
}
else {
return NULL;
}
}
#if MI_DEBUG_TRACE > 0
static void _mi_error_trace(const mi_page_t* page, const mi_block_t* block) {
size_t bsize;
size_t delta;
const mi_padding_t* padding = mi_page_decode_padding(page, block, &delta, &bsize);
if (padding != NULL) {
_mi_stack_trace_print(padding->strace, MI_DEBUG_TRACE_LEN, block, bsize, bsize - delta);
}
}
#else
static void _mi_error_trace(const mi_page_t* page, const mi_block_t* block) {
MI_UNUSED(page); MI_UNUSED(block);
}
#endif
// Return the exact usable size of a block.
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
size_t bsize;
size_t delta;
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
bool ok = (mi_page_decode_padding(page, block, &delta, &bsize) != NULL);
mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
return (ok ? bsize - delta : 0);
}
@ -235,7 +259,7 @@ static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* bl
static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
size_t bsize;
size_t delta;
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
bool ok = (mi_page_decode_padding(page, block, &delta, &bsize) != NULL);
*size = *wrong = bsize;
if (!ok) return false;
mi_assert_internal(bsize >= delta);
@ -255,6 +279,7 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
size_t size;
size_t wrong;
if (!mi_verify_padding(page,block,&size,&wrong)) {
_mi_error_trace(page, block);
_mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
}
}
@ -405,8 +430,8 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
// and push it on the free list
if (mi_likely(local)) {
// owning thread can free a block directly
if (mi_unlikely(mi_check_is_double_free(page, block))) return;
mi_check_padding(page, block);
if (mi_unlikely(mi_check_is_double_free(page, block))) return;
#if (MI_DEBUG!=0)
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif
@ -487,8 +512,8 @@ void mi_free(void* p) mi_attr_noexcept
if (mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
// local, and not full or aligned
if (mi_unlikely(mi_check_is_double_free(page,block))) return;
mi_check_padding(page, block);
if (mi_unlikely(mi_check_is_double_free(page,block))) return;
mi_stat_free(page, block);
#if (MI_DEBUG!=0)
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));

View File

@ -31,14 +31,7 @@ const mi_page_t _mi_page_empty = {
};
#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
#if (MI_PADDING>0) && (MI_INTPTR_SIZE >= 8)
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
#elif (MI_PADDING>0)
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
#else
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() }
#endif
// Empty page queues for every bin

View File

@ -346,6 +346,41 @@ void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, co
}
#endif
// --------------------------------------------------------
// Stack traces
// --------------------------------------------------------
#if (MI_DEBUG_TRACE > 0) && defined(_WIN32)
void _mi_stack_trace_capture(void** strace, size_t len, size_t skip) {
CaptureStackBackTrace((DWORD)skip + 1, (DWORD)len, strace, NULL);
}
#include <dbghelp.h>
#pragma comment(lib,"dbghelp")
void _mi_stack_trace_print(const void* const* strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) {
HANDLE current_process = GetCurrentProcess();
SymInitialize(current_process, NULL, TRUE);
PSYMBOL_INFO info = (PSYMBOL_INFO)_malloca(sizeof(SYMBOL_INFO) + 256 * sizeof(TCHAR));
if (info==NULL) return;
memset(info, 0, sizeof(info));
info->MaxNameLen = 255;
info->SizeOfStruct = sizeof(SYMBOL_INFO);
_mi_fprintf(NULL, NULL, "for block %p of %zu allocated bytes (%zu total in block), allocated at:\n", block, avail, bsize);
for (size_t i = 0; i < len && strace[i] != NULL; i++) {
if (SymFromAddr(current_process, (DWORD64)(strace[i]), 0, info)) {
_mi_fprintf(NULL, NULL, " frame %2zu: %8p: %s\n", i, strace[i], info->Name);
}
else {
_mi_fprintf(NULL, NULL, " frame %2zu: %8p: <unknown address: error: 0x%04x>\n", i, strace[i], GetLastError());
}
}
}
#else
void _mi_capture_stack_trace(void** strace, size_t len, size_t skip) {
MI_UNUSED(strace); MI_UNUSED(len); MI_UNUSED(skip);
}
#endif
// --------------------------------------------------------
// Errors
// --------------------------------------------------------

View File

@ -11,6 +11,7 @@ static void double_free1();
static void double_free2();
static void corrupt_free();
static void block_overflow1();
static void block_overflow2();
static void invalid_free();
static void test_aslr(void);
static void test_process_info(void);
@ -23,8 +24,9 @@ int main() {
// detect double frees and heap corruption
// double_free1();
// double_free2();
// corrupt_free();
corrupt_free();
// block_overflow1();
// block_overflow2();
// test_aslr();
// invalid_free();
// test_reserved();
@ -65,6 +67,15 @@ static void block_overflow1() {
free(p);
}
#define OVF_SIZE 100
static void block_overflow2() {
uint8_t* p = (uint8_t*)mi_malloc(30);
memset(p+30, 0, OVF_SIZE);
free(p);
}
// The double free samples come ArcHeap [1] by Insu Yun (issue #161)
// [1]: https://arxiv.org/pdf/1903.00503.pdf
@ -106,6 +117,7 @@ static void double_free2() {
// Try to corrupt the heap through buffer overflow
#define N 256
#define SZ 64
#define OVF_SZ 100
static void corrupt_free() {
void* p[N];
@ -121,7 +133,7 @@ static void corrupt_free() {
// try to corrupt the free list
for (int i = 0; i < N; i++) {
if (p[i] != NULL) {
memset(p[i], 0, SZ+8);
memset(p[i], 0, SZ+OVF_SZ);
}
}
// allocate more.. trying to trigger an allocation from a corrupted entry
@ -129,6 +141,11 @@ static void corrupt_free() {
for (int i = 0; i < 4096; i++) {
malloc(SZ);
}
// free the rest
for (int i = 0; i < N; i++) {
free(p[i]);
p[i] = NULL;
}
}
static void test_aslr(void) {