Compare commits
28 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
343a747f2f | ||
![]() |
31473c8e37 | ||
![]() |
24ef590532 | ||
![]() |
f2a2eb4ad0 | ||
![]() |
b9e44dfa78 | ||
![]() |
b2fe83fa2c | ||
![]() |
6d852d9ff5 | ||
![]() |
1270eec6c0 | ||
![]() |
0dafa1e0a0 | ||
![]() |
04ab0f639e | ||
![]() |
5fcb2615a8 | ||
![]() |
95a8196490 | ||
![]() |
636931874f | ||
![]() |
f690711539 | ||
![]() |
ad47cab97c | ||
![]() |
83f8451e62 | ||
![]() |
b91198826c | ||
![]() |
9b8bb5b6d6 | ||
![]() |
7a7a774257 | ||
![]() |
72ca23d14f | ||
![]() |
5739714b8d | ||
![]() |
b6e2b6e975 | ||
![]() |
65b2cebcef | ||
![]() |
28893a6c1b | ||
![]() |
ea75c745e1 | ||
![]() |
e125c04081 | ||
![]() |
8c04558af8 | ||
![]() |
a84df3795a |
@ -21,6 +21,7 @@ option(MI_BUILD_OBJECT "Build object library" ON)
|
||||
option(MI_BUILD_TESTS "Build test executables" ON)
|
||||
option(MI_DEBUG_TSAN "Build with thread sanitizer (needs clang)" OFF)
|
||||
option(MI_DEBUG_UBSAN "Build with undefined-behavior sanitizer (needs clang++)" OFF)
|
||||
option(MI_DEBUG_TRACE "Store allocation stack trace in each heap block to debug heap block overflows or corruption" OFF)
|
||||
option(MI_SKIP_COLLECT_ON_EXIT, "Skip collecting memory on program exit" OFF)
|
||||
|
||||
# deprecated options
|
||||
@ -28,6 +29,9 @@ option(MI_CHECK_FULL "Use full internal invariant checking in DEBUG mode
|
||||
option(MI_INSTALL_TOPLEVEL "Install directly into $CMAKE_INSTALL_PREFIX instead of PREFIX/lib/mimalloc-version (deprecated)" OFF)
|
||||
option(MI_USE_LIBATOMIC "Explicitly link with -latomic (on older systems) (deprecated and detected automatically)" OFF)
|
||||
|
||||
set(MI_PADDING_EXTRA 0 CACHE STRING "Specify extra bytes for padding in each heap block (to debug heap block overflows)")
|
||||
|
||||
|
||||
include(GNUInstallDirs)
|
||||
include("cmake/mimalloc-config-version.cmake")
|
||||
|
||||
@ -128,6 +132,17 @@ if(MI_DEBUG_FULL)
|
||||
list(APPEND mi_defines MI_DEBUG=3) # full invariant checking
|
||||
endif()
|
||||
|
||||
if(MI_DEBUG_TRACE)
|
||||
message(STATUS "Enable allocation trace in each heap block (MI_DEBUG_TRACE=ON)")
|
||||
list(APPEND mi_defines MI_DEBUG_TRACE=1)
|
||||
set(CMAKE_ENABLE_EXPORTS TRUE)
|
||||
endif()
|
||||
|
||||
if(MI_PADDING_EXTRA)
|
||||
message(STATUS "Add extra debug padding to each heap block (MI_PADDING_EXTRA=${MI_PADDING_EXTRA})")
|
||||
list(APPEND mi_defines MI_PADDING_EXTRA=${MI_PADDING_EXTRA})
|
||||
endif()
|
||||
|
||||
if(NOT MI_PADDING)
|
||||
message(STATUS "Disable padding of heap blocks in debug mode (MI_PADDING=OFF)")
|
||||
list(APPEND mi_defines MI_PADDING=0)
|
||||
@ -230,6 +245,12 @@ else()
|
||||
if (MI_LIBATOMIC OR MI_USE_LIBATOMIC)
|
||||
list(APPEND mi_libraries atomic)
|
||||
endif()
|
||||
if(MI_DEBUG_TRACE)
|
||||
find_library(MI_LIBEXECINFO execinfo)
|
||||
if (MI_LIBEXECINFO)
|
||||
list(APPEND mi_libraries ${MI_LIBEXECINFO})
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
@ -116,7 +116,7 @@
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<ConformanceMode>true</ConformanceMode>
|
||||
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
|
||||
<PreprocessorDefinitions>MI_DEBUG=3;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>MI_DEBUG_TRACE=1;MI_DEBUG=3;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
||||
<CompileAs>CompileAsCpp</CompileAs>
|
||||
<SupportJustMyCode>false</SupportJustMyCode>
|
||||
<LanguageStandard>Default</LanguageStandard>
|
||||
|
@ -116,7 +116,7 @@
|
||||
<SDLCheck>true</SDLCheck>
|
||||
<ConformanceMode>true</ConformanceMode>
|
||||
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
|
||||
<PreprocessorDefinitions>MI_DEBUG=3;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>MI_DEBUG_TRACE=1;MI_DEBUG=3;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
||||
<CompileAs>CompileAsCpp</CompileAs>
|
||||
<SupportJustMyCode>false</SupportJustMyCode>
|
||||
<LanguageStandard>stdcpp20</LanguageStandard>
|
||||
|
@ -16,6 +16,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
#define mi_trace_message(...)
|
||||
#endif
|
||||
|
||||
|
||||
#define MI_CACHE_LINE 64
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths)
|
||||
@ -57,6 +58,11 @@ void _mi_trace_message(const char* fmt, ...);
|
||||
void _mi_options_init(void);
|
||||
void _mi_error_message(int err, const char* fmt, ...);
|
||||
|
||||
#if MI_DEBUG_TRACE > 0
|
||||
void _mi_stack_trace_capture(void** strace, size_t len, size_t skip);
|
||||
void _mi_stack_trace_print(const char* msg, void** strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail);
|
||||
#endif
|
||||
|
||||
// random.c
|
||||
void _mi_random_init(mi_random_ctx_t* ctx);
|
||||
void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx);
|
||||
@ -143,6 +149,7 @@ void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_att
|
||||
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept;
|
||||
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
|
||||
bool _mi_free_delayed_block(mi_block_t* block);
|
||||
void _mi_show_block_trace_with_predecessor(const mi_page_t* page, const mi_block_t* block, const char* msg);
|
||||
|
||||
#if MI_DEBUG>1
|
||||
bool _mi_page_is_valid(mi_page_t* page);
|
||||
@ -405,7 +412,7 @@ static inline uintptr_t _mi_ptr_cookie(const void* p) {
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) {
|
||||
mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE));
|
||||
mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_MINSIZE));
|
||||
const size_t idx = _mi_wsize_from_size(size);
|
||||
mi_assert_internal(idx < MI_PAGES_DIRECT);
|
||||
return heap->pages_free_direct[idx];
|
||||
@ -648,7 +655,8 @@ static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t*
|
||||
// check for free list corruption: is `next` at least in the same page?
|
||||
// TODO: check if `next` is `page->block_size` aligned?
|
||||
if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) {
|
||||
_mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
|
||||
_mi_show_block_trace_with_predecessor(page, block, "free block");
|
||||
_mi_error_message(EFAULT, "corrupted free list entry of size %zu at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
|
||||
next = NULL;
|
||||
}
|
||||
return next;
|
||||
|
@ -60,9 +60,26 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
#define MI_PADDING 1
|
||||
#endif
|
||||
|
||||
#if !defined(MI_DEBUG_TRACE) // store stack trace at each allocation
|
||||
#define MI_DEBUG_TRACE (0)
|
||||
#endif
|
||||
|
||||
#if !defined(MI_DEBUG_TRACE_LEN)
|
||||
#define MI_DEBUG_TRACE_LEN (8) // store up to N frames if tracing is enabled
|
||||
#endif
|
||||
|
||||
#if !defined(MI_PADDING_EXTRA) // use extra padding bytes? (so a stack trace can be preserved or next block corruption prevented)
|
||||
#if MI_DEBUG_TRACE > 0
|
||||
#define MI_PADDING_EXTRA (64)
|
||||
#else
|
||||
#define MI_PADDING_EXTRA (0)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
// Encoded free lists allow detection of corrupted free lists
|
||||
// and can detect buffer overflows, modify after free, and double `free`s.
|
||||
// (It must be enabled if MI_PADDING is enabled as the same mechanism is used to encode the canary.)
|
||||
#if (MI_SECURE>=3 || MI_DEBUG>=1 || MI_PADDING > 0)
|
||||
#define MI_ENCODE_FREELIST 1
|
||||
#endif
|
||||
@ -354,20 +371,40 @@ typedef struct mi_random_cxt_s {
|
||||
} mi_random_ctx_t;
|
||||
|
||||
|
||||
// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows
|
||||
// If MI_PADDING is enabled, there is a padding structure at the end of the blocks to check for buffer overflows
|
||||
// The full layout is of a block becomes:
|
||||
//
|
||||
// |--- data ---------|--- fill ----------|--- struct padding_s -----------------------------------------|
|
||||
// |.. actual data .. | .. delta bytes .. | canary_lo | .. extra .. | canary | delta | .. stack trace .. |
|
||||
//
|
||||
// where the delta bytes are used to align the padding structure and to detect byte precise overflow.
|
||||
// The `canary` is used to see if `delta` and `strace` are not corrupted, while `canary_lo` can
|
||||
// detect overflow into the `extra` padding (where the stack trace could remain valid)
|
||||
|
||||
#if (MI_PADDING)
|
||||
typedef struct mi_padding_s {
|
||||
uint32_t canary; // encoded block value to check validity of the padding (in case of overflow)
|
||||
uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
|
||||
#if MI_PADDING_EXTRA > 0
|
||||
uint32_t canary_lo; // extra canary to detect initial overflow
|
||||
uint8_t extra[MI_PADDING_EXTRA];
|
||||
#endif
|
||||
uint32_t canary; // encoded block value to check validity of the delat (in case of overflow)
|
||||
uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
|
||||
#if (MI_DEBUG_TRACE > 0)
|
||||
void* strace[MI_DEBUG_TRACE_LEN]; // stack trace at allocation time
|
||||
#endif
|
||||
} mi_padding_t;
|
||||
#define MI_PADDING_SIZE (sizeof(mi_padding_t))
|
||||
#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE)
|
||||
#define MI_PADDING_MINSIZE (8) // 2*sizeof(uint32_t)
|
||||
#define MI_PADDING_SIZE (sizeof(mi_padding_t))
|
||||
#else
|
||||
#define MI_PADDING_SIZE 0
|
||||
#define MI_PADDING_WSIZE 0
|
||||
#define MI_PADDING_MINSIZE (0)
|
||||
#define MI_PADDING_SIZE (0)
|
||||
#endif
|
||||
|
||||
#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1)
|
||||
// add 2 more for minimal padding (MI_PADDING && !MI_DEBUG_TRACE && MI_PADDING_EXTRA==0)
|
||||
// since this is used in secure mode, we optimize this case by allowing
|
||||
// `heap_malloc_small` to also work with `MI_WSMALL_SIZE_MAX + MI_PADDING_MINSIZE` sizes.
|
||||
// see `init.c` where all are initialized with an empty page and the check at `heap_malloc_small`.
|
||||
#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + 1 + 2)
|
||||
|
||||
|
||||
// A heap owns a set of pages.
|
||||
|
268
src/alloc.c
268
src/alloc.c
@ -68,6 +68,13 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
||||
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
|
||||
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
|
||||
padding->delta = (uint32_t)(delta);
|
||||
#if MI_PADDING_EXTRA > 0
|
||||
padding->canary_lo = padding->canary;
|
||||
memset(padding->extra, 0, sizeof(padding->extra));
|
||||
#endif
|
||||
#if (MI_DEBUG_TRACE)
|
||||
_mi_stack_trace_capture(padding->strace, MI_DEBUG_TRACE_LEN, 2 /*frames to skip*/);
|
||||
#endif
|
||||
uint8_t* fill = (uint8_t*)padding - delta;
|
||||
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
|
||||
for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
|
||||
@ -80,15 +87,25 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
|
||||
mi_assert(heap != NULL);
|
||||
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
|
||||
mi_assert(size <= MI_SMALL_SIZE_MAX);
|
||||
#if (MI_PADDING)
|
||||
void* p;
|
||||
#if (MI_PADDING)
|
||||
if (size == 0) {
|
||||
size = sizeof(void*);
|
||||
}
|
||||
#endif
|
||||
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
|
||||
void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
|
||||
mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
|
||||
#if MI_STAT>1
|
||||
#endif
|
||||
#if (MI_PADDING_EXTRA > 0 || MI_DEBUG_TRACE > 0)
|
||||
// with extra padding it is not guaranteed the size + MI_PADDING_SIZE <= MI_SMALL_SIZE_MAX + MI_PADDING_MINSIZE, so we need an extra check
|
||||
if (size + MI_PADDING_SIZE > MI_SMALL_SIZE_MAX) {
|
||||
p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
|
||||
p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
|
||||
}
|
||||
mi_assert_internal(p==NULL || mi_usable_size(p) >= size);
|
||||
#if MI_STAT>1
|
||||
if (p != NULL) {
|
||||
if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
|
||||
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
|
||||
@ -108,10 +125,11 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t si
|
||||
|
||||
// The main allocation function
|
||||
extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
|
||||
if mi_likely(size <= MI_SMALL_SIZE_MAX) {
|
||||
if mi_likely(size + MI_PADDING_SIZE <= MI_SMALL_SIZE_MAX + MI_PADDING_MINSIZE) {
|
||||
return mi_heap_malloc_small_zero(heap, size, zero);
|
||||
}
|
||||
else {
|
||||
else
|
||||
{
|
||||
mi_assert(heap!=NULL);
|
||||
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
|
||||
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero); // note: size can overflow but it is detected in malloc_generic
|
||||
@ -148,6 +166,143 @@ mi_decl_nodiscard mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept
|
||||
}
|
||||
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Check for heap block overflow by setting up padding at the end of the block
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
|
||||
static mi_padding_t* mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
|
||||
*bsize = mi_page_usable_block_size(page);
|
||||
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
|
||||
*delta = padding->delta;
|
||||
if ((uint32_t)mi_ptr_encode(page, block, page->keys) == padding->canary && *delta <= *bsize) {
|
||||
return padding;
|
||||
}
|
||||
else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#if MI_DEBUG_TRACE > 0
|
||||
static void _mi_show_block_trace(const mi_page_t* page, const mi_block_t* block, const char* msg) {
|
||||
size_t bsize;
|
||||
size_t delta;
|
||||
mi_padding_t* padding = mi_page_decode_padding(page, block, &delta, &bsize);
|
||||
if (padding != NULL) {
|
||||
_mi_stack_trace_print(msg, &padding->strace[0], MI_DEBUG_TRACE_LEN, block, bsize, bsize - delta);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void _mi_show_block_trace(const mi_page_t* page, const mi_block_t* block, const char* msg) {
|
||||
MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(msg);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Return the exact usable size of a block. (whereas `mi_page_usable_block_size` returns the total available size without padding)
|
||||
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
|
||||
size_t bsize;
|
||||
size_t delta;
|
||||
bool ok = (mi_page_decode_padding(page, block, &delta, &bsize) != NULL);
|
||||
mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
|
||||
return (ok ? bsize - delta : 0);
|
||||
}
|
||||
|
||||
static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
|
||||
size_t bsize;
|
||||
size_t delta;
|
||||
const mi_padding_t* padding = mi_page_decode_padding(page, block, &delta, &bsize);
|
||||
*size = *wrong = bsize;
|
||||
if (padding == NULL) return false;
|
||||
mi_assert_internal(bsize >= delta);
|
||||
*size = bsize - delta;
|
||||
uint8_t* fill = (uint8_t*)block + bsize - delta;
|
||||
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
|
||||
for (size_t i = 0; i < maxpad; i++) {
|
||||
if (fill[i] != MI_DEBUG_PADDING) {
|
||||
*wrong = bsize - delta + i;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#if MI_PADDING_EXTRA > 0
|
||||
if (padding->canary_lo != padding->canary) {
|
||||
*wrong = bsize;
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
||||
size_t size;
|
||||
size_t wrong;
|
||||
if mi_unlikely(!mi_verify_padding(page,block,&size,&wrong)) {
|
||||
_mi_show_block_trace_with_predecessor(page, block, NULL);
|
||||
_mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
|
||||
}
|
||||
}
|
||||
|
||||
// When a non-thread-local block is freed, it becomes part of the thread delayed free
|
||||
// list that is freed later by the owning heap. If the exact usable size is too small to
|
||||
// contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
|
||||
// so it will later not trigger an overflow error in `mi_free_block`.
|
||||
// Returns the originally allocated byte size.
|
||||
static size_t mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
|
||||
size_t bsize;
|
||||
size_t delta;
|
||||
mi_padding_t* padding = mi_page_decode_padding(page, block, &delta, &bsize);
|
||||
mi_assert_internal(padding!=NULL);
|
||||
if (padding == NULL) return 0;
|
||||
mi_assert_internal(bsize > delta);
|
||||
if (bsize <= delta) return 0;
|
||||
const size_t avail = bsize - delta;
|
||||
if (avail >= min_size) return avail; // usually already enough space
|
||||
mi_assert_internal(bsize >= min_size);
|
||||
if (bsize < min_size) return avail; // should never happen
|
||||
size_t new_delta = (bsize - min_size);
|
||||
mi_assert_internal(new_delta < bsize);
|
||||
padding->delta = (uint32_t)new_delta;
|
||||
return avail;
|
||||
}
|
||||
#else
|
||||
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
||||
MI_UNUSED(page); MI_UNUSED(block);
|
||||
}
|
||||
|
||||
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
|
||||
MI_UNUSED(block);
|
||||
return mi_page_usable_block_size(page);
|
||||
}
|
||||
|
||||
static size_t mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
|
||||
MI_UNUSED(block); MI_UNUSED(min_size);
|
||||
return mi_page_usable_block_size(page);
|
||||
}
|
||||
|
||||
static void _mi_show_block_trace(const mi_page_t* page, const mi_block_t* block, const char* msg) {
|
||||
MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(msg);
|
||||
}
|
||||
#endif
|
||||
|
||||
static const mi_block_t* mi_block_predecessor(const mi_page_t* page, const mi_block_t* block) {
|
||||
const size_t bsize = page->xblock_size;
|
||||
mi_assert_internal(bsize > 0);
|
||||
if (bsize >= MI_HUGE_BLOCK_SIZE) return NULL;
|
||||
const mi_block_t* prev = (const mi_block_t*)((uint8_t*)block - bsize);
|
||||
uint8_t* pstart = _mi_segment_page_start(_mi_page_segment(page), page, bsize, NULL, NULL);
|
||||
if (pstart > (uint8_t*)prev) return NULL;
|
||||
return prev;
|
||||
}
|
||||
|
||||
// Used if a free list is corrupted which is usually caused by the previous block(s)
|
||||
void _mi_show_block_trace_with_predecessor(const mi_page_t* page, const mi_block_t* block, const char* msg) {
|
||||
const mi_block_t* prev = mi_block_predecessor(page,block);
|
||||
if (prev != NULL) {
|
||||
_mi_show_block_trace(page, prev, "predecessor block");
|
||||
}
|
||||
_mi_show_block_trace(page, block, msg);
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Check for double free in secure and debug mode
|
||||
// This is somewhat expensive so only enabled for secure mode 4
|
||||
@ -170,7 +325,8 @@ static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, con
|
||||
mi_list_contains(page, page->local_free, block) ||
|
||||
mi_list_contains(page, mi_page_thread_free(page), block))
|
||||
{
|
||||
_mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page));
|
||||
_mi_show_block_trace(page, block, NULL);
|
||||
_mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_usable_size_of(page,block));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -179,7 +335,7 @@ static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, con
|
||||
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
|
||||
mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
|
||||
if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
|
||||
(n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
|
||||
(n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
|
||||
{
|
||||
// Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free?
|
||||
// (continue in separate function to improve code generation)
|
||||
@ -195,88 +351,6 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
|
||||
}
|
||||
#endif
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Check for heap block overflow by setting up padding at the end of the block
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
|
||||
static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
|
||||
*bsize = mi_page_usable_block_size(page);
|
||||
const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
|
||||
*delta = padding->delta;
|
||||
return ((uint32_t)mi_ptr_encode(page,block,page->keys) == padding->canary && *delta <= *bsize);
|
||||
}
|
||||
|
||||
// Return the exact usable size of a block.
|
||||
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
|
||||
size_t bsize;
|
||||
size_t delta;
|
||||
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
|
||||
mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
|
||||
return (ok ? bsize - delta : 0);
|
||||
}
|
||||
|
||||
static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
|
||||
size_t bsize;
|
||||
size_t delta;
|
||||
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
|
||||
*size = *wrong = bsize;
|
||||
if (!ok) return false;
|
||||
mi_assert_internal(bsize >= delta);
|
||||
*size = bsize - delta;
|
||||
uint8_t* fill = (uint8_t*)block + bsize - delta;
|
||||
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
|
||||
for (size_t i = 0; i < maxpad; i++) {
|
||||
if (fill[i] != MI_DEBUG_PADDING) {
|
||||
*wrong = bsize - delta + i;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
||||
size_t size;
|
||||
size_t wrong;
|
||||
if (!mi_verify_padding(page,block,&size,&wrong)) {
|
||||
_mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
|
||||
}
|
||||
}
|
||||
|
||||
// When a non-thread-local block is freed, it becomes part of the thread delayed free
|
||||
// list that is freed later by the owning heap. If the exact usable size is too small to
|
||||
// contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
|
||||
// so it will later not trigger an overflow error in `mi_free_block`.
|
||||
static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
|
||||
size_t bsize;
|
||||
size_t delta;
|
||||
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
|
||||
mi_assert_internal(ok);
|
||||
if (!ok || (bsize - delta) >= min_size) return; // usually already enough space
|
||||
mi_assert_internal(bsize >= min_size);
|
||||
if (bsize < min_size) return; // should never happen
|
||||
size_t new_delta = (bsize - min_size);
|
||||
mi_assert_internal(new_delta < bsize);
|
||||
mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
|
||||
padding->delta = (uint32_t)new_delta;
|
||||
}
|
||||
#else
|
||||
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
||||
MI_UNUSED(page);
|
||||
MI_UNUSED(block);
|
||||
}
|
||||
|
||||
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
|
||||
MI_UNUSED(block);
|
||||
return mi_page_usable_block_size(page);
|
||||
}
|
||||
|
||||
static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
|
||||
MI_UNUSED(page);
|
||||
MI_UNUSED(block);
|
||||
MI_UNUSED(min_size);
|
||||
}
|
||||
#endif
|
||||
|
||||
// only maintain stats for smaller objects if requested
|
||||
#if (MI_STAT>0)
|
||||
@ -331,9 +405,11 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
||||
// The padding check may access the non-thread-owned page for the key values.
|
||||
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
|
||||
mi_check_padding(page, block);
|
||||
mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
|
||||
const size_t avail = mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
|
||||
#if (MI_DEBUG!=0)
|
||||
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
|
||||
memset(block, MI_DEBUG_FREED, avail);
|
||||
#else
|
||||
MI_UNUSED(avail);
|
||||
#endif
|
||||
|
||||
// huge page segments are always abandoned and can be freed immediately
|
||||
@ -392,7 +468,7 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
|
||||
if mi_unlikely(mi_check_is_double_free(page, block)) return;
|
||||
mi_check_padding(page, block);
|
||||
#if (MI_DEBUG!=0)
|
||||
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
|
||||
memset(block, MI_DEBUG_FREED, mi_page_usable_block_size(page));
|
||||
#endif
|
||||
mi_block_set_next(page, block, page->local_free);
|
||||
page->local_free = block;
|
||||
@ -476,7 +552,7 @@ void mi_free(void* p) mi_attr_noexcept
|
||||
mi_check_padding(page, block);
|
||||
mi_stat_free(page, block);
|
||||
#if (MI_DEBUG!=0)
|
||||
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
|
||||
memset(block, MI_DEBUG_FREED, mi_page_usable_block_size(page));
|
||||
#endif
|
||||
mi_block_set_next(page, block, page->local_free);
|
||||
page->local_free = block;
|
||||
|
32
src/init.c
32
src/init.c
@ -30,15 +30,8 @@ const mi_page_t _mi_page_empty = {
|
||||
NULL, NULL
|
||||
};
|
||||
|
||||
#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
|
||||
|
||||
#if (MI_PADDING>0) && (MI_INTPTR_SIZE >= 8)
|
||||
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
|
||||
#elif (MI_PADDING>0)
|
||||
#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
|
||||
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
|
||||
#else
|
||||
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() }
|
||||
#endif
|
||||
|
||||
|
||||
// Empty page queues for every bin
|
||||
@ -497,7 +490,7 @@ static void mi_allocator_done(void) {
|
||||
// Called once by the process loader
|
||||
static void mi_process_load(void) {
|
||||
mi_heap_main_init();
|
||||
#if defined(MI_TLS_RECURSE_GUARD)
|
||||
#if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD)
|
||||
volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true;
|
||||
MI_UNUSED(dummy);
|
||||
#endif
|
||||
@ -534,6 +527,25 @@ static void mi_detect_cpu_features(void) {
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32) && (MI_DEBUG_TRACE > 0)
|
||||
#include <dbghelp.h>
|
||||
static void mi_debug_init(void) {
|
||||
if (SymInitialize(GetCurrentProcess(), NULL, TRUE) != TRUE) { // initialize here as it is single threaded.
|
||||
_mi_warning_message("unable to initialize debug symbol information (error 0x%x)", GetLastError());
|
||||
}
|
||||
}
|
||||
static void mi_debug_done(void) {
|
||||
SymCleanup(GetCurrentProcess());
|
||||
}
|
||||
#else
|
||||
static void mi_debug_init(void) {
|
||||
// nothing
|
||||
}
|
||||
static void mi_debug_done(void) {
|
||||
// nothing
|
||||
}
|
||||
#endif
|
||||
|
||||
// Initialize the process; called by thread_init or the process loader
|
||||
void mi_process_init(void) mi_attr_noexcept {
|
||||
// ensure we are called once
|
||||
@ -550,6 +562,7 @@ void mi_process_init(void) mi_attr_noexcept {
|
||||
_mi_verbose_message("debug level : %d\n", MI_DEBUG);
|
||||
#endif
|
||||
_mi_verbose_message("secure level: %d\n", MI_SECURE);
|
||||
mi_debug_init();
|
||||
mi_thread_init();
|
||||
|
||||
#if defined(_WIN32) && !defined(MI_SHARED_LIB)
|
||||
@ -604,6 +617,7 @@ static void mi_process_done(void) {
|
||||
mi_stats_print(NULL);
|
||||
}
|
||||
mi_allocator_done();
|
||||
mi_debug_done();
|
||||
_mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id);
|
||||
os_preloading = true; // don't call the C runtime anymore
|
||||
}
|
||||
|
@ -396,6 +396,93 @@ void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, co
|
||||
}
|
||||
#endif
|
||||
|
||||
// --------------------------------------------------------
|
||||
// Stack traces
|
||||
// --------------------------------------------------------
|
||||
|
||||
#if (MI_DEBUG_TRACE > 0) && defined(_WIN32)
|
||||
void _mi_stack_trace_capture(void** strace, size_t len, size_t skip) {
|
||||
CaptureStackBackTrace((DWORD)skip + 1, (DWORD)len, strace, NULL);
|
||||
}
|
||||
|
||||
#include <dbghelp.h>
|
||||
#pragma comment(lib,"dbghelp")
|
||||
void _mi_stack_trace_print(const char* msg, void** strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) {
|
||||
_mi_fprintf(NULL, NULL, "trace %s at %p of size %zu (%zub usable), allocated at:\n",
|
||||
(msg==NULL ? "block" : msg), block, avail, bsize);
|
||||
uintptr_t uninit = 0;
|
||||
for( size_t i = 0; i < MI_INTPTR_SIZE; i++ ) {
|
||||
uninit = (uninit << 8) | MI_DEBUG_UNINIT;
|
||||
}
|
||||
if (strace == NULL || uninit == (uintptr_t)strace[0]) {
|
||||
_mi_fprintf(NULL, NULL, " (uninitialized trace)\n");
|
||||
}
|
||||
else {
|
||||
PSYMBOL_INFO info = (PSYMBOL_INFO)_malloca(sizeof(SYMBOL_INFO) + 256 * sizeof(TCHAR));
|
||||
if (info==NULL) return;
|
||||
memset(info, 0, sizeof(info));
|
||||
info->MaxNameLen = 255;
|
||||
info->SizeOfStruct = sizeof(SYMBOL_INFO);
|
||||
HANDLE current_process = GetCurrentProcess();
|
||||
for (size_t i = 0; i < len && strace[i] != NULL; i++) {
|
||||
if (SymFromAddr(current_process, (DWORD64)(strace[i]), 0, info)) {
|
||||
_mi_fprintf(NULL, NULL, " %2zu: %8p: %s\n", i, strace[i], info->Name);
|
||||
}
|
||||
else {
|
||||
_mi_fprintf(NULL, NULL, " %2zu: %8p: <unknown address: error: 0x%04x>\n", i, strace[i], GetLastError());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#elif (MI_DEBUG_TRACE > 0) && (defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__))
|
||||
#include <execinfo.h>
|
||||
#define MI_MAX_TRACE_LEN (64)
|
||||
void _mi_stack_trace_capture(void** strace, size_t len, size_t skip) {
|
||||
if (_mi_preloading()) return;
|
||||
if (!mi_recurse_enter()) return; // needed for pthreads
|
||||
void* trace[MI_MAX_TRACE_LEN];
|
||||
size_t trace_len = skip + len;
|
||||
if (trace_len > len) { trace_len = MI_MAX_TRACE_LEN; }
|
||||
memset(trace,0,trace_len);
|
||||
trace_len = backtrace(trace, trace_len);
|
||||
for (size_t i = 0; i < len; i++) {
|
||||
void* p = (i + skip < trace_len ? trace[i+skip] : NULL);
|
||||
strace[i] = p;
|
||||
}
|
||||
mi_recurse_exit();
|
||||
}
|
||||
|
||||
void _mi_stack_trace_print(const char* msg, void** strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) {
|
||||
_mi_fprintf(NULL, NULL, "trace %s at %p of size %zu (%zub usable), allocated at:\n",
|
||||
(msg==NULL ? "block" : msg), block, avail, bsize);
|
||||
uintptr_t uninit = 0;
|
||||
for( size_t i = 0; i < MI_INTPTR_SIZE; i++ ) {
|
||||
uninit = (uninit << 8) | MI_DEBUG_UNINIT;
|
||||
}
|
||||
if (strace == NULL || uninit == (uintptr_t)strace[0]) {
|
||||
_mi_fprintf(NULL, NULL, " (uninitialized trace)\n");
|
||||
}
|
||||
else {
|
||||
while( len > 0 && strace[len-1] == NULL) { len--; }
|
||||
if (len == 0) return;
|
||||
char** names = backtrace_symbols(strace, len);
|
||||
for (size_t i = 0; i < len && strace[i] != NULL; i++) {
|
||||
_mi_fprintf(NULL, NULL, " %2zu: %8p: %s\n", i, strace[i], (names == NULL || names[i] == NULL ? "<unknown>" : names[i]));
|
||||
}
|
||||
// free(names); // avoid potential recursion and leak the trace
|
||||
}
|
||||
}
|
||||
#else
|
||||
void _mi_stack_trace_capture(void** strace, size_t len, size_t skip) {
|
||||
MI_UNUSED(strace); MI_UNUSED(len); MI_UNUSED(skip);
|
||||
}
|
||||
void _mi_stack_trace_print(const char* msg, void** strace, size_t len, const mi_block_t* block, size_t bsize, size_t avail) {
|
||||
MI_UNUSED(strace); MI_UNUSED(len); MI_UNUSED(block);
|
||||
MI_UNUSED(bsize); MI_UNUSED(avail); MI_UNUSED(msg);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
// --------------------------------------------------------
|
||||
// Errors
|
||||
// --------------------------------------------------------
|
||||
|
@ -15,6 +15,7 @@ if (NOT CMAKE_BUILD_TYPE)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
# Import mimalloc (if installed)
|
||||
find_package(mimalloc 1.7 REQUIRED NO_SYSTEM_ENVIRONMENT_PATH)
|
||||
message(STATUS "Found mimalloc installed at: ${MIMALLOC_LIBRARY_DIR} (${MIMALLOC_VERSION_DIR})")
|
||||
|
@ -9,8 +9,11 @@
|
||||
|
||||
static void double_free1();
|
||||
static void double_free2();
|
||||
static void corrupt_free();
|
||||
static void double_free3();
|
||||
static void corrupt_free1();
|
||||
static void corrupt_free2();
|
||||
static void block_overflow1();
|
||||
static void block_overflow2();
|
||||
static void invalid_free();
|
||||
static void test_aslr(void);
|
||||
static void test_process_info(void);
|
||||
@ -24,12 +27,15 @@ int main() {
|
||||
mi_version();
|
||||
mi_stats_reset();
|
||||
// detect double frees and heap corruption
|
||||
// double_free1();
|
||||
double_free1();
|
||||
// double_free2();
|
||||
// corrupt_free();
|
||||
// double_free3();
|
||||
// corrupt_free1();
|
||||
// corrupt_free2();
|
||||
// block_overflow1();
|
||||
// block_overflow2();
|
||||
// test_aslr();
|
||||
// invalid_free();
|
||||
invalid_free();
|
||||
// test_reserved();
|
||||
// negative_stat();
|
||||
test_heap_walk();
|
||||
@ -63,7 +69,8 @@ int main() {
|
||||
|
||||
static void invalid_free() {
|
||||
free((void*)0xBADBEEF);
|
||||
realloc((void*)0xBADBEEF,10);
|
||||
void* p = realloc((void*)0xBADBEEF,10);
|
||||
free(p);
|
||||
}
|
||||
|
||||
static void block_overflow1() {
|
||||
@ -72,6 +79,15 @@ static void block_overflow1() {
|
||||
free(p);
|
||||
}
|
||||
|
||||
#define OVF_SIZE 100
|
||||
|
||||
static void block_overflow2() {
|
||||
uint8_t* p = (uint8_t*)mi_malloc(30);
|
||||
memset(p+30, 0, OVF_SIZE);
|
||||
free(p);
|
||||
}
|
||||
|
||||
|
||||
// The double free samples come ArcHeap [1] by Insu Yun (issue #161)
|
||||
// [1]: https://arxiv.org/pdf/1903.00503.pdf
|
||||
|
||||
@ -109,12 +125,35 @@ static void double_free2() {
|
||||
fprintf(stderr, "p1: %p-%p, p2: %p-%p\n", p[4], (uint8_t*)(p[4]) + 917504, p[1], (uint8_t*)(p[1]) + 786432);
|
||||
}
|
||||
|
||||
static void double_free3() {
|
||||
void* p1 = malloc(32);
|
||||
void* p2 = malloc(32);
|
||||
void* p3 = malloc(32);
|
||||
free(p2);
|
||||
free(p1);
|
||||
free(p2);
|
||||
free(p3);
|
||||
}
|
||||
|
||||
static void corrupt_free1() {
|
||||
void* p1 = malloc(32);
|
||||
void* p2 = malloc(32);
|
||||
void* p3 = malloc(32);
|
||||
free(p2);
|
||||
memset(p2, 0, 8); // corrupt free list entry
|
||||
mi_collect(true);
|
||||
p2 = malloc(32); // should trigger corrupted free list
|
||||
free(p1);
|
||||
free(p2);
|
||||
free(p3);
|
||||
}
|
||||
|
||||
// Try to corrupt the heap through buffer overflow
|
||||
#define N 256
|
||||
#define SZ 64
|
||||
#define OVF_SZ 32
|
||||
|
||||
static void corrupt_free() {
|
||||
static void corrupt_free2() {
|
||||
void* p[N];
|
||||
// allocate
|
||||
for (int i = 0; i < N; i++) {
|
||||
@ -128,13 +167,18 @@ static void corrupt_free() {
|
||||
// try to corrupt the free list
|
||||
for (int i = 0; i < N; i++) {
|
||||
if (p[i] != NULL) {
|
||||
memset(p[i], 0, SZ+8);
|
||||
memset(p[i], 0, SZ+OVF_SZ);
|
||||
}
|
||||
}
|
||||
// allocate more.. trying to trigger an allocation from a corrupted entry
|
||||
// this may need many allocations to get there (if at all)
|
||||
for (int i = 0; i < 4096; i++) {
|
||||
malloc(SZ);
|
||||
void* p = malloc(SZ);
|
||||
}
|
||||
// free the rest
|
||||
for (int i = 0; i < N; i++) {
|
||||
free(p[i]);
|
||||
p[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -181,7 +225,7 @@ static void test_reserved(void) {
|
||||
|
||||
|
||||
static void negative_stat(void) {
|
||||
int* p = mi_malloc(60000);
|
||||
int* p = (int*)mi_malloc(60000);
|
||||
mi_stats_print_out(NULL, NULL);
|
||||
*p = 100;
|
||||
mi_free(p);
|
||||
|
37
test/test-overflow.cpp
Normal file
37
test/test-overflow.cpp
Normal file
@ -0,0 +1,37 @@
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <new>
|
||||
#include <vector>
|
||||
#include <future>
|
||||
#include <iostream>
|
||||
|
||||
#include <thread>
|
||||
#include <assert.h>
|
||||
static void block_overflow1(void) {
|
||||
uint8_t* p = (uint8_t*)malloc(17);
|
||||
p[18] = 0;
|
||||
free(p);
|
||||
uint8_t* q = (uint8_t*)malloc(17);
|
||||
free(p);
|
||||
free(q);
|
||||
}
|
||||
|
||||
#define OVF_SIZE 100
|
||||
|
||||
static void block_overflow2(void) {
|
||||
uint8_t* p = (uint8_t*)malloc(30);
|
||||
memset(p+30, 0, OVF_SIZE);
|
||||
free(p);
|
||||
}
|
||||
|
||||
int main() {
|
||||
printf("test overflow..\n");
|
||||
block_overflow1();
|
||||
block_overflow2();
|
||||
printf("done..\n");
|
||||
return 0;
|
||||
}
|
@ -103,6 +103,7 @@ static void* alloc_items(size_t items, random_t r) {
|
||||
for (uintptr_t i = 0; i < items; i++) {
|
||||
p[i] = (items - i) ^ cookie;
|
||||
}
|
||||
// if (pick(r)%1000 <= 1) { p[items+1] = 42; } // overflow
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user