fix sizes in memory tracking and padding for huge alignments
This commit is contained in:
parent
e24c7c9de6
commit
6dcebdc303
@ -6,7 +6,7 @@ set(CMAKE_CXX_STANDARD 17)
|
||||
|
||||
option(MI_SECURE "Use full security mitigations (like guard pages, allocation randomization, double-free mitigation, and free-list corruption detection)" OFF)
|
||||
option(MI_DEBUG_FULL "Use full internal heap invariant checking in DEBUG mode (expensive)" OFF)
|
||||
option(MI_PADDING "Enable padding to detect heap block overflow (used only in DEBUG mode or with Valgrind)" ON)
|
||||
option(MI_PADDING "Enable padding to detect heap block overflow (always on in DEBUG mode or with Valgrind)" OFF)
|
||||
option(MI_OVERRIDE "Override the standard malloc interface (e.g. define entry points for malloc() etc)" ON)
|
||||
option(MI_XMALLOC "Enable abort() call on memory allocation failure by default" OFF)
|
||||
option(MI_SHOW_ERRORS "Show error and warning messages by default (only enabled by default in DEBUG mode)" OFF)
|
||||
@ -186,9 +186,9 @@ if(MI_DEBUG_FULL)
|
||||
list(APPEND mi_defines MI_DEBUG=3) # full invariant checking
|
||||
endif()
|
||||
|
||||
if(NOT MI_PADDING)
|
||||
message(STATUS "Disable padding of heap blocks in debug mode (MI_PADDING=OFF)")
|
||||
list(APPEND mi_defines MI_PADDING=0)
|
||||
if(MI_PADDING)
|
||||
message(STATUS "Enable padding of heap blocks explicitly (MI_PADDING=ON)")
|
||||
list(APPEND mi_defines MI_PADDING=1)
|
||||
endif()
|
||||
|
||||
if(MI_XMALLOC)
|
||||
|
@ -162,6 +162,7 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool
|
||||
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
|
||||
bool _mi_free_delayed_block(mi_block_t* block);
|
||||
void _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration
|
||||
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size);
|
||||
|
||||
#if MI_DEBUG>1
|
||||
bool _mi_page_is_valid(mi_page_t* page);
|
||||
|
@ -13,6 +13,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
// address sanitizer, or other memory checkers.
|
||||
// ------------------------------------------------------
|
||||
|
||||
|
||||
#if MI_VALGRIND
|
||||
|
||||
#define MI_TRACK_ENABLED 1
|
||||
@ -23,8 +24,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
|
||||
#define mi_track_malloc(p,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero)
|
||||
#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/)
|
||||
#define mi_track_free(p) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/)
|
||||
#define mi_track_free_size(p,_size) mi_track_free(p)
|
||||
#define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/)
|
||||
#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size)
|
||||
#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size)
|
||||
#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size)
|
||||
@ -38,7 +38,6 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
|
||||
#define mi_track_malloc(p,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size)
|
||||
#define mi_track_resize(p,oldsize,newsize) ASAN_POISON_MEMORY_REGION(p,oldsize); ASAN_UNPOISON_MEMORY_REGION(p,newsize)
|
||||
#define mi_track_free(p) ASAN_POISON_MEMORY_REGION(p,mi_usable_size(p))
|
||||
#define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size)
|
||||
#define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
|
||||
#define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
|
||||
@ -51,7 +50,6 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
|
||||
#define mi_track_malloc(p,size,zero)
|
||||
#define mi_track_resize(p,oldsize,newsize)
|
||||
#define mi_track_free(p)
|
||||
#define mi_track_free_size(p,_size)
|
||||
#define mi_track_mem_defined(p,size)
|
||||
#define mi_track_mem_undefined(p,size)
|
||||
@ -59,4 +57,13 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_free
|
||||
#define mi_track_free(p) mi_track_free_size(p,mi_usable_size(p));
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_resize
|
||||
#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false)
|
||||
#endif
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -58,11 +58,16 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
#endif
|
||||
|
||||
// Reserve extra padding at the end of each block to be more resilient against heap block overflows.
|
||||
// The padding can detect byte-precise buffer overflow on free.
|
||||
#if !defined(MI_PADDING) && (MI_DEBUG>=1 || MI_VALGRIND)
|
||||
// The padding can detect buffer overflow on free.
|
||||
#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || MI_VALGRIND || MI_ASAN)
|
||||
#define MI_PADDING 1
|
||||
#endif
|
||||
|
||||
// Check padding bytes; allows byte-precise buffer overflow detection
|
||||
#if !defined(MI_PADDING_CHECK) && MI_PADDING && (MI_SECURE>=3 || MI_DEBUG>=1)
|
||||
#define MI_PADDING_CHECK 1
|
||||
#endif
|
||||
|
||||
|
||||
// Encoded free lists allow detection of corrupted free lists
|
||||
// and can detect buffer overflows, modify after free, and double `free`s.
|
||||
@ -282,8 +287,8 @@ typedef struct mi_page_s {
|
||||
uint32_t xblock_size; // size available in each block (always `>0`)
|
||||
mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
|
||||
|
||||
#ifdef MI_ENCODE_FREELIST
|
||||
uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`)
|
||||
#if (MI_ENCODE_FREELIST || MI_PADDING)
|
||||
uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
|
||||
#endif
|
||||
|
||||
_Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads
|
||||
|
@ -46,7 +46,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
||||
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
|
||||
p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block
|
||||
// zero afterwards as only the area from the aligned_p may be committed!
|
||||
if (p == NULL) return NULL;
|
||||
if (p == NULL) return NULL;
|
||||
}
|
||||
else {
|
||||
// otherwise over-allocate
|
||||
@ -61,7 +61,9 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
||||
mi_assert_internal(adjust < alignment);
|
||||
void* aligned_p = (void*)((uintptr_t)p + adjust);
|
||||
if (aligned_p != p) {
|
||||
mi_page_set_has_aligned(_mi_ptr_page(p), true);
|
||||
mi_page_t* page = _mi_ptr_page(p);
|
||||
mi_page_set_has_aligned(page, true);
|
||||
_mi_padding_shrink(page, (mi_block_t*)p, adjust + size);
|
||||
}
|
||||
|
||||
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
|
||||
@ -80,7 +82,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
||||
|
||||
#if MI_TRACK_ENABLED
|
||||
if (p != aligned_p) {
|
||||
mi_track_free_size(p, oversize);
|
||||
mi_track_free(p);
|
||||
mi_track_malloc(aligned_p, size, zero);
|
||||
}
|
||||
else {
|
||||
|
103
src/alloc.c
103
src/alloc.c
@ -70,20 +70,22 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
||||
}
|
||||
#endif
|
||||
|
||||
#if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED
|
||||
#if MI_PADDING // && !MI_TRACK_ENABLED
|
||||
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
|
||||
ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
|
||||
#if (MI_DEBUG>1)
|
||||
#if (MI_DEBUG>=2)
|
||||
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
|
||||
mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess
|
||||
#endif
|
||||
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
|
||||
padding->delta = (uint32_t)(delta);
|
||||
#if MI_PADDING_CHECK
|
||||
if (!mi_page_is_huge(page)) {
|
||||
uint8_t* fill = (uint8_t*)padding - delta;
|
||||
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
|
||||
for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
return block;
|
||||
@ -96,21 +98,23 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
|
||||
mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local
|
||||
#endif
|
||||
mi_assert(size <= MI_SMALL_SIZE_MAX);
|
||||
#if (MI_PADDING)
|
||||
if (size == 0) {
|
||||
size = sizeof(void*);
|
||||
}
|
||||
#endif
|
||||
#if (MI_PADDING)
|
||||
if (size == 0) { size = sizeof(void*); }
|
||||
#endif
|
||||
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
|
||||
void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
|
||||
#if MI_PADDING
|
||||
mi_assert_internal(p == NULL || mi_usable_size(p) == size);
|
||||
#else
|
||||
mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
|
||||
#if MI_STAT>1
|
||||
#endif
|
||||
#if MI_STAT>1
|
||||
if (p != NULL) {
|
||||
if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
|
||||
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
|
||||
}
|
||||
#endif
|
||||
mi_track_malloc(p,size,zero);
|
||||
#endif
|
||||
if (p!=NULL) { mi_track_malloc(p,size,zero); }
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -133,14 +137,18 @@ extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool z
|
||||
mi_assert(heap!=NULL);
|
||||
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
|
||||
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic
|
||||
#if MI_PADDING
|
||||
mi_assert_internal(p == NULL || mi_usable_size(p) == size);
|
||||
#else
|
||||
mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
|
||||
#endif
|
||||
#if MI_STAT>1
|
||||
if (p != NULL) {
|
||||
if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
|
||||
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
|
||||
}
|
||||
#endif
|
||||
mi_track_malloc(p,size,zero);
|
||||
if (p!=NULL) { mi_track_malloc(p,size,zero); }
|
||||
return p;
|
||||
}
|
||||
}
|
||||
@ -225,7 +233,7 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
|
||||
// Check for heap block overflow by setting up padding at the end of the block
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED
|
||||
#if MI_PADDING // && !MI_TRACK_ENABLED
|
||||
static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
|
||||
*bsize = mi_page_usable_block_size(page);
|
||||
const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
|
||||
@ -249,6 +257,40 @@ static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* bl
|
||||
return (ok ? bsize - delta : 0);
|
||||
}
|
||||
|
||||
// When a non-thread-local block is freed, it becomes part of the thread delayed free
|
||||
// list that is freed later by the owning heap. If the exact usable size is too small to
|
||||
// contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
|
||||
// so it will later not trigger an overflow error in `mi_free_block`.
|
||||
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
|
||||
size_t bsize;
|
||||
size_t delta;
|
||||
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
|
||||
mi_assert_internal(ok);
|
||||
if (!ok || (bsize - delta) >= min_size) return; // usually already enough space
|
||||
mi_assert_internal(bsize >= min_size);
|
||||
if (bsize < min_size) return; // should never happen
|
||||
size_t new_delta = (bsize - min_size);
|
||||
mi_assert_internal(new_delta < bsize);
|
||||
mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
|
||||
mi_track_mem_defined(padding,sizeof(mi_padding_t));
|
||||
padding->delta = (uint32_t)new_delta;
|
||||
mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
|
||||
}
|
||||
#else
|
||||
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
|
||||
MI_UNUSED(block);
|
||||
return mi_page_usable_block_size(page);
|
||||
}
|
||||
|
||||
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
|
||||
MI_UNUSED(page);
|
||||
MI_UNUSED(block);
|
||||
MI_UNUSED(min_size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if MI_PADDING && MI_PADDING_CHECK
|
||||
|
||||
static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
|
||||
size_t bsize;
|
||||
size_t delta;
|
||||
@ -281,39 +323,13 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
||||
}
|
||||
}
|
||||
|
||||
// When a non-thread-local block is freed, it becomes part of the thread delayed free
|
||||
// list that is freed later by the owning heap. If the exact usable size is too small to
|
||||
// contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
|
||||
// so it will later not trigger an overflow error in `mi_free_block`.
|
||||
static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
|
||||
size_t bsize;
|
||||
size_t delta;
|
||||
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
|
||||
mi_assert_internal(ok);
|
||||
if (!ok || (bsize - delta) >= min_size) return; // usually already enough space
|
||||
mi_assert_internal(bsize >= min_size);
|
||||
if (bsize < min_size) return; // should never happen
|
||||
size_t new_delta = (bsize - min_size);
|
||||
mi_assert_internal(new_delta < bsize);
|
||||
mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
|
||||
padding->delta = (uint32_t)new_delta;
|
||||
}
|
||||
#else
|
||||
|
||||
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
|
||||
MI_UNUSED(page);
|
||||
MI_UNUSED(block);
|
||||
}
|
||||
|
||||
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
|
||||
MI_UNUSED(block);
|
||||
return mi_page_usable_block_size(page);
|
||||
}
|
||||
|
||||
static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
|
||||
MI_UNUSED(page);
|
||||
MI_UNUSED(block);
|
||||
MI_UNUSED(min_size);
|
||||
}
|
||||
#endif
|
||||
|
||||
// only maintain stats for smaller objects if requested
|
||||
@ -382,7 +398,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
||||
// The padding check may access the non-thread-owned page for the key values.
|
||||
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
|
||||
mi_check_padding(page, block);
|
||||
mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
|
||||
_mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
|
||||
|
||||
mi_segment_t* const segment = _mi_page_segment(page);
|
||||
if (segment->page_kind == MI_PAGE_HUGE) {
|
||||
@ -677,9 +693,10 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
|
||||
// (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
|
||||
const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0)
|
||||
if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
|
||||
// todo: adjust potential padding to reflect the new size?
|
||||
mi_track_free_size(p, size);
|
||||
mi_track_malloc(p,newsize,true);
|
||||
mi_assert_internal(p!=NULL);
|
||||
// todo: do not track as the usable size is still the same in the free; adjust potential padding?
|
||||
// mi_track_free(p);
|
||||
// mi_track_malloc(p,newsize,true);
|
||||
return p; // reallocation still fits and not more than 50% waste
|
||||
}
|
||||
void* newp = mi_heap_malloc(heap,newsize);
|
||||
|
@ -22,7 +22,7 @@ const mi_page_t _mi_page_empty = {
|
||||
0, // used
|
||||
0, // xblock_size
|
||||
NULL, // local_free
|
||||
#if MI_ENCODE_FREELIST
|
||||
#if (MI_PADDING || MI_ENCODE_FREELIST)
|
||||
{ 0, 0 },
|
||||
#endif
|
||||
MI_ATOMIC_VAR_INIT(0), // xthread_free
|
||||
|
@ -659,7 +659,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
||||
mi_assert_internal(page_size / block_size < (1L<<16));
|
||||
page->reserved = (uint16_t)(page_size / block_size);
|
||||
mi_assert_internal(page->reserved > 0);
|
||||
#ifdef MI_ENCODE_FREELIST
|
||||
#if (MI_PADDING || MI_ENCODE_FREELIST)
|
||||
page->keys[0] = _mi_heap_random_next(heap);
|
||||
page->keys[1] = _mi_heap_random_next(heap);
|
||||
#endif
|
||||
@ -677,7 +677,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
||||
mi_assert_internal(page->prev == NULL);
|
||||
mi_assert_internal(page->retire_expire == 0);
|
||||
mi_assert_internal(!mi_page_has_aligned(page));
|
||||
#if (MI_ENCODE_FREELIST)
|
||||
#if (MI_PADDING || MI_ENCODE_FREELIST)
|
||||
mi_assert_internal(page->keys[0] != 0);
|
||||
mi_assert_internal(page->keys[1] != 0);
|
||||
#endif
|
||||
|
@ -51,7 +51,7 @@ bool test_stl_allocator2(void);
|
||||
// ---------------------------------------------------------------------------
|
||||
int main(void) {
|
||||
mi_option_disable(mi_option_verbose);
|
||||
|
||||
|
||||
// ---------------------------------------------------
|
||||
// Malloc
|
||||
// ---------------------------------------------------
|
||||
@ -149,7 +149,8 @@ int main(void) {
|
||||
for (size_t align = 1; align <= MI_ALIGNMENT_MAX && ok; align *= 2) {
|
||||
void* ps[8];
|
||||
for (int i = 0; i < 8 && ok; i++) {
|
||||
ps[i] = mi_malloc_aligned(align*13 /*size*/, align);
|
||||
ps[i] = mi_malloc_aligned(align*13 // size
|
||||
, align);
|
||||
if (ps[i] == NULL || (uintptr_t)(ps[i]) % align != 0) {
|
||||
ok = false;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user