Merge branch 'dev-guarded' into dev

This commit is contained in:
Daan Leijen 2024-08-21 11:30:33 -07:00
commit 4234a9bd9d
14 changed files with 264 additions and 33 deletions

View File

@ -26,6 +26,7 @@ option(MI_BUILD_OBJECT "Build object library" ON)
option(MI_BUILD_TESTS "Build test executables" ON) option(MI_BUILD_TESTS "Build test executables" ON)
option(MI_DEBUG_TSAN "Build with thread sanitizer (needs clang)" OFF) option(MI_DEBUG_TSAN "Build with thread sanitizer (needs clang)" OFF)
option(MI_DEBUG_UBSAN "Build with undefined-behavior sanitizer (needs clang++)" OFF) option(MI_DEBUG_UBSAN "Build with undefined-behavior sanitizer (needs clang++)" OFF)
option(MI_DEBUG_GUARDED "Build with guard pages behind certain object allocations (implies MI_NO_PADDING=ON)" OFF)
option(MI_SKIP_COLLECT_ON_EXIT "Skip collecting memory on program exit" OFF) option(MI_SKIP_COLLECT_ON_EXIT "Skip collecting memory on program exit" OFF)
option(MI_NO_PADDING "Force no use of padding even in DEBUG mode etc." OFF) option(MI_NO_PADDING "Force no use of padding even in DEBUG mode etc." OFF)
option(MI_INSTALL_TOPLEVEL "Install directly into $CMAKE_INSTALL_PREFIX instead of PREFIX/lib/mimalloc-version" OFF) option(MI_INSTALL_TOPLEVEL "Install directly into $CMAKE_INSTALL_PREFIX instead of PREFIX/lib/mimalloc-version" OFF)
@ -199,6 +200,15 @@ if(MI_TRACK_ETW)
endif() endif()
endif() endif()
if(MI_DEBUG_GUARDED)
message(STATUS "Compile guard pages behind certain object allocations (MI_DEBUG_GUARDED=ON)")
list(APPEND mi_defines MI_DEBUG_GUARDED=1)
if(NOT MI_NO_PADDING)
message(STATUS " Disabling padding due to guard pages (MI_NO_PADDING=ON)")
set(MI_NO_PADDING ON)
endif()
endif()
if(MI_SEE_ASM) if(MI_SEE_ASM)
message(STATUS "Generate assembly listings (MI_SEE_ASM=ON)") message(STATUS "Generate assembly listings (MI_SEE_ASM=ON)")
list(APPEND mi_cflags -save-temps) list(APPEND mi_cflags -save-temps)

View File

@ -366,6 +366,8 @@ typedef enum mi_option_e {
mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's) mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's)
mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows) mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows)
mi_option_visit_abandoned, // allow visiting heap blocks from abandoned threads (=0) mi_option_visit_abandoned, // allow visiting heap blocks from abandoned threads (=0)
mi_option_debug_guarded_min, // only used when building with MI_DEBUG_GUARDED: minimal rounded object size for guarded objects (=0)
mi_option_debug_guarded_max, // only used when building with MI_DEBUG_GUARDED: maximal rounded object size for guarded objects (=0)
_mi_option_last, _mi_option_last,
// legacy option names // legacy option names
mi_option_large_os_pages = mi_option_allow_large_os_pages, mi_option_large_os_pages = mi_option_allow_large_os_pages,

View File

@ -61,6 +61,7 @@ void _mi_warning_message(const char* fmt, ...);
void _mi_verbose_message(const char* fmt, ...); void _mi_verbose_message(const char* fmt, ...);
void _mi_trace_message(const char* fmt, ...); void _mi_trace_message(const char* fmt, ...);
void _mi_options_init(void); void _mi_options_init(void);
long _mi_option_get_fast(mi_option_t option);
void _mi_error_message(int err, const char* fmt, ...); void _mi_error_message(int err, const char* fmt, ...);
// random.c // random.c
@ -322,6 +323,7 @@ static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
} }
} }
// Align a pointer upwards // Align a pointer upwards
static inline void* mi_align_up_ptr(void* p, size_t alignment) { static inline void* mi_align_up_ptr(void* p, size_t alignment) {
return (void*)_mi_align_up((uintptr_t)p, alignment); return (void*)_mi_align_up((uintptr_t)p, alignment);
@ -593,6 +595,15 @@ static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
page->flags.x.has_aligned = has_aligned; page->flags.x.has_aligned = has_aligned;
} }
#if MI_DEBUG_GUARDED
static inline bool mi_page_has_guarded(const mi_page_t* page) {
return page->flags.x.has_guarded;
}
static inline void mi_page_set_has_guarded(mi_page_t* page, bool has_guarded) {
page->flags.x.has_guarded = has_guarded;
}
#endif
/* ------------------------------------------------------------------- /* -------------------------------------------------------------------
Encoding/Decoding the free list next pointers Encoding/Decoding the free list next pointers

View File

@ -72,6 +72,13 @@ terms of the MIT license. A copy of the license can be found in the file
#endif #endif
#endif #endif
// Use guard pages behind objects of a certain size (set by the MIMALLOC_DEBUG_GUARDED_MIN/MAX options)
// Padding should be disabled when using guard pages
// #define MI_DEBUG_GUARDED 1
#if defined(MI_DEBUG_GUARDED)
#define MI_PADDING 0
#endif
// Reserve extra padding at the end of each block to be more resilient against heap block overflows. // Reserve extra padding at the end of each block to be more resilient against heap block overflows.
// The padding can detect buffer overflow on free. // The padding can detect buffer overflow on free.
#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW)) #if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW))
@ -243,15 +250,17 @@ typedef union mi_page_flags_s {
struct { struct {
uint8_t in_full : 1; uint8_t in_full : 1;
uint8_t has_aligned : 1; uint8_t has_aligned : 1;
uint8_t has_guarded : 1; // only used with MI_DEBUG_GUARDED
} x; } x;
} mi_page_flags_t; } mi_page_flags_t;
#else #else
// under thread sanitizer, use a byte for each flag to suppress warning, issue #130 // under thread sanitizer, use a byte for each flag to suppress warning, issue #130
typedef union mi_page_flags_s { typedef union mi_page_flags_s {
uint16_t full_aligned; uint32_t full_aligned;
struct { struct {
uint8_t in_full; uint8_t in_full;
uint8_t has_aligned; uint8_t has_aligned;
uint8_t has_guarded; // only used with MI_DEBUG_GUARDED
} x; } x;
} mi_page_flags_t; } mi_page_flags_t;
#endif #endif

View File

@ -20,8 +20,12 @@ static bool mi_malloc_is_naturally_aligned( size_t size, size_t alignment ) {
mi_assert_internal(_mi_is_power_of_two(alignment) && (alignment > 0)); mi_assert_internal(_mi_is_power_of_two(alignment) && (alignment > 0));
if (alignment > size) return false; if (alignment > size) return false;
if (alignment <= MI_MAX_ALIGN_SIZE) return true; if (alignment <= MI_MAX_ALIGN_SIZE) return true;
#if MI_DEBUG_GUARDED
return false;
#else
const size_t bsize = mi_good_size(size); const size_t bsize = mi_good_size(size);
return (bsize <= MI_MAX_ALIGN_GUARANTEE && (bsize & (alignment-1)) == 0); return (bsize <= MI_MAX_ALIGN_GUARANTEE && (bsize & (alignment-1)) == 0);
#endif
} }
// Fallback aligned allocation that over-allocates -- split out for better codegen // Fallback aligned allocation that over-allocates -- split out for better codegen
@ -38,9 +42,9 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
// first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down) // first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down)
if mi_unlikely(offset != 0) { if mi_unlikely(offset != 0) {
// todo: cannot support offset alignment for very large alignments yet // todo: cannot support offset alignment for very large alignments yet
#if MI_DEBUG > 0 #if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation with a very large alignment cannot be used with an alignment offset (size %zu, alignment %zu, offset %zu)\n", size, alignment, offset); _mi_error_message(EOVERFLOW, "aligned allocation with a very large alignment cannot be used with an alignment offset (size %zu, alignment %zu, offset %zu)\n", size, alignment, offset);
#endif #endif
return NULL; return NULL;
} }
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size); oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
@ -54,7 +58,8 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
p = _mi_heap_malloc_zero(heap, oversize, zero); p = _mi_heap_malloc_zero(heap, oversize, zero);
if (p == NULL) return NULL; if (p == NULL) return NULL;
} }
mi_page_t* page = _mi_ptr_page(p);
// .. and align within the allocation // .. and align within the allocation
const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)` const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)`
const uintptr_t poffset = ((uintptr_t)p + offset) & align_mask; const uintptr_t poffset = ((uintptr_t)p + offset) & align_mask;
@ -62,17 +67,18 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t
mi_assert_internal(adjust < alignment); mi_assert_internal(adjust < alignment);
void* aligned_p = (void*)((uintptr_t)p + adjust); void* aligned_p = (void*)((uintptr_t)p + adjust);
if (aligned_p != p) { if (aligned_p != p) {
mi_page_t* page = _mi_ptr_page(p);
mi_page_set_has_aligned(page, true); mi_page_set_has_aligned(page, true);
_mi_padding_shrink(page, (mi_block_t*)p, adjust + size); _mi_padding_shrink(page, (mi_block_t*)p, adjust + size);
} }
// todo: expand padding if overallocated ? // todo: expand padding if overallocated ?
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size); mi_assert_internal(mi_page_usable_block_size(page) >= adjust + size);
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_page(aligned_p), aligned_p));
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0); mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
mi_assert_internal(mi_usable_size(aligned_p)>=size); mi_assert_internal(mi_usable_size(aligned_p)>=size);
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust); mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
#if !MI_DEBUG_GUARDED
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_page(aligned_p), aligned_p));
#endif
// now zero the block if needed // now zero the block if needed
if (alignment > MI_BLOCK_ALIGNMENT_MAX) { if (alignment > MI_BLOCK_ALIGNMENT_MAX) {
@ -133,6 +139,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
return NULL; return NULL;
} }
#if !MI_DEBUG_GUARDED
// try first if there happens to be a small block available with just the right alignment // try first if there happens to be a small block available with just the right alignment
if mi_likely(size <= MI_SMALL_SIZE_MAX && alignment <= size) { if mi_likely(size <= MI_SMALL_SIZE_MAX && alignment <= size) {
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)` const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
@ -153,6 +160,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
} }
} }
} }
#endif
// fallback to generic aligned allocation // fallback to generic aligned allocation
return mi_heap_malloc_zero_aligned_at_generic(heap, size, alignment, offset, zero); return mi_heap_malloc_zero_aligned_at_generic(heap, size, alignment, offset, zero);

View File

@ -31,18 +31,22 @@ terms of the MIT license. A copy of the license can be found in the file
extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept
{ {
mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size); mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size);
// check the free list
mi_block_t* const block = page->free; mi_block_t* const block = page->free;
if mi_unlikely(block == NULL) { if mi_unlikely(block == NULL) {
return _mi_malloc_generic(heap, size, zero, 0); return _mi_malloc_generic(heap, size, zero, 0);
} }
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page); mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
// pop from the free list // pop from the free list
page->free = mi_block_next(page, block); page->free = mi_block_next(page, block);
page->used++; page->used++;
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page); mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
mi_assert_internal(_mi_is_aligned(block, MI_MAX_ALIGN_SIZE)); mi_assert_internal(page->block_size < MI_MAX_ALIGN_SIZE || _mi_is_aligned(block, MI_MAX_ALIGN_SIZE));
#if MI_DEBUG>3 #if MI_DEBUG>3
if (page->free_is_zero) { if (page->free_is_zero && size > sizeof(*block)) {
mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block))); mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block)));
} }
#endif #endif
@ -55,7 +59,10 @@ extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_
// zero the block? note: we need to zero the full block size (issue #63) // zero the block? note: we need to zero the full block size (issue #63)
if mi_unlikely(zero) { if mi_unlikely(zero) {
mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic) mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
mi_assert_internal(!mi_page_is_huge(page));
#if MI_PADDING
mi_assert_internal(page->block_size >= MI_PADDING_SIZE); mi_assert_internal(page->block_size >= MI_PADDING_SIZE);
#endif
if (page->free_is_zero) { if (page->free_is_zero) {
block->next = 0; block->next = 0;
mi_track_mem_defined(block, page->block_size - MI_PADDING_SIZE); mi_track_mem_defined(block, page->block_size - MI_PADDING_SIZE);
@ -114,17 +121,27 @@ extern void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t siz
return _mi_page_malloc_zero(heap,page,size,true); return _mi_page_malloc_zero(heap,page,size,true);
} }
#if MI_DEBUG_GUARDED
static mi_decl_restrict void* mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
static inline bool mi_heap_malloc_use_guarded(size_t size, bool has_huge_alignment);
static inline bool mi_heap_malloc_small_use_guarded(size_t size);
#endif
static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept { static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
mi_assert(heap != NULL); mi_assert(heap != NULL);
mi_assert(size <= MI_SMALL_SIZE_MAX);
#if MI_DEBUG #if MI_DEBUG
const uintptr_t tid = _mi_thread_id(); const uintptr_t tid = _mi_thread_id();
mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local
#endif #endif
mi_assert(size <= MI_SMALL_SIZE_MAX); #if (MI_PADDING || MI_DEBUG_GUARDED)
#if (MI_PADDING)
if (size == 0) { size = sizeof(void*); } if (size == 0) { size = sizeof(void*); }
#endif #endif
#if MI_DEBUG_GUARDED
if (mi_heap_malloc_small_use_guarded(size)) { return mi_heap_malloc_guarded(heap, size, zero); }
#endif
// get page in constant time, and allocate from it
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE); mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero); void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero);
mi_track_malloc(p,size,zero); mi_track_malloc(p,size,zero);
@ -154,15 +171,21 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t si
// The main allocation function // The main allocation function
extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept { extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept {
// fast path for small objects
if mi_likely(size <= MI_SMALL_SIZE_MAX) { if mi_likely(size <= MI_SMALL_SIZE_MAX) {
mi_assert_internal(huge_alignment == 0); mi_assert_internal(huge_alignment == 0);
return mi_heap_malloc_small_zero(heap, size, zero); return mi_heap_malloc_small_zero(heap, size, zero);
} }
#if MI_DEBUG_GUARDED
else if (mi_heap_malloc_use_guarded(size,huge_alignment>0)) { return mi_heap_malloc_guarded(heap, size, zero); }
#endif
else { else {
// regular allocation
mi_assert(heap!=NULL); mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic
mi_track_malloc(p,size,zero); mi_track_malloc(p,size,zero);
#if MI_STAT>1 #if MI_STAT>1
if (p != NULL) { if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
@ -578,6 +601,78 @@ mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
} }
} }
#if MI_DEBUG_GUARDED
static inline bool mi_heap_malloc_small_use_guarded(size_t size) {
return (size <= (size_t)_mi_option_get_fast(mi_option_debug_guarded_max)
&& size >= (size_t)_mi_option_get_fast(mi_option_debug_guarded_min));
}
static inline bool mi_heap_malloc_use_guarded(size_t size, bool has_huge_alignment) {
return (!has_huge_alignment // guarded pages do not work with huge aligments at the moment
&& _mi_option_get_fast(mi_option_debug_guarded_max) > 0 // guarded must be enabled
&& (mi_heap_malloc_small_use_guarded(size)
|| ((mi_good_size(size) & (_mi_os_page_size() - 1)) == 0)) // page-size multiple are always guarded so we can have a correct `mi_usable_size`.
);
}
static mi_decl_restrict void* mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept
{
#if defined(MI_PADDING_SIZE)
mi_assert(MI_PADDING_SIZE==0);
#endif
// allocate multiple of page size ending in a guard page
const size_t obj_size = _mi_align_up(size, MI_MAX_ALIGN_SIZE); // ensure minimal alignment requirement
const size_t os_page_size = _mi_os_page_size();
const size_t req_size = _mi_align_up(obj_size + os_page_size, os_page_size);
void* const block = _mi_malloc_generic(heap, req_size, zero, 0 /* huge_alignment */);
if (block==NULL) return NULL;
mi_page_t* page = _mi_ptr_page(block);
mi_segment_t* segment = _mi_page_segment(page);
const size_t block_size = mi_page_block_size(page); // must use `block_size` to match `mi_free_local`
void* const guard_page = (uint8_t*)block + (block_size - os_page_size);
mi_assert_internal(_mi_is_aligned(guard_page, os_page_size));
// place block in front of the guard page
size_t offset = block_size - os_page_size - obj_size;
if (offset > MI_BLOCK_ALIGNMENT_MAX) {
// give up to place it right in front of the guard page if the offset is too large for unalignment
offset = MI_BLOCK_ALIGNMENT_MAX;
}
void* const p = (uint8_t*)block + offset;
mi_assert_internal(p>=block);
// set page flags
if (offset > 0) {
mi_page_set_has_aligned(page, true);
}
// set guard page
if (segment->allow_decommit) {
mi_page_set_has_guarded(page, true);
_mi_os_protect(guard_page, os_page_size);
}
else {
_mi_warning_message("unable to set a guard page behind an object due to pinned memory (large OS pages?) (object %p of size %zu)\n", p, size);
}
// stats
mi_track_malloc(p, size, zero);
#if MI_STAT>1
if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
#if MI_DEBUG>3
if (p != NULL && zero) {
mi_assert_expensive(mi_mem_is_zero(p, size));
}
#endif
return p;
}
#endif
// ------------------------------------------------------ // ------------------------------------------------------
// ensure explicit external inline definitions are emitted! // ensure explicit external inline definitions are emitted!
// ------------------------------------------------------ // ------------------------------------------------------

View File

@ -33,12 +33,12 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool
// checks // checks
if mi_unlikely(mi_check_is_double_free(page, block)) return; if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block); mi_check_padding(page, block);
if (track_stats) { mi_stat_free(page, block); } if (track_stats) { mi_stat_free(page, block); }
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN && !MI_DEBUG_GUARDED
memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif #endif
if (track_stats) { mi_track_free_size(block, mi_page_usable_size_of(page, block)); } // faster then mi_usable_size as we already know the page and that p is unaligned if (track_stats) { mi_track_free_size(block, mi_page_usable_size_of(page, block)); } // faster then mi_usable_size as we already know the page and that p is unaligned
// actual free: push on the local free list // actual free: push on the local free list
mi_block_set_next(page, block, page->local_free); mi_block_set_next(page, block, page->local_free);
page->local_free = block; page->local_free = block;
@ -51,8 +51,8 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool
} }
// Adjust a block that was allocated aligned, to the actual start of the block in the page. // Adjust a block that was allocated aligned, to the actual start of the block in the page.
// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the // note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the
// `page_start` and `block_size` fields; however these are constant and the page won't be // `page_start` and `block_size` fields; however these are constant and the page won't be
// deallocated (as the block we are freeing keeps it alive) and thus safe to read concurrently. // deallocated (as the block we are freeing keeps it alive) and thus safe to read concurrently.
mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) { mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) {
mi_assert_internal(page!=NULL && p!=NULL); mi_assert_internal(page!=NULL && p!=NULL);
@ -69,16 +69,21 @@ mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) {
return (mi_block_t*)((uintptr_t)p - adjust); return (mi_block_t*)((uintptr_t)p - adjust);
} }
// forward declaration for a MI_DEBUG_GUARDED build
static void mi_block_unguard(mi_page_t* page, mi_block_t* block);
// free a local pointer (page parameter comes first for better codegen) // free a local pointer (page parameter comes first for better codegen)
static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept { static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
MI_UNUSED(segment); MI_UNUSED(segment);
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p); mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p);
mi_block_unguard(page,block);
mi_free_block_local(page, block, true /* track stats */, true /* check for a full page */); mi_free_block_local(page, block, true /* track stats */, true /* check for a full page */);
} }
// free a pointer owned by another thread (page parameter comes first for better codegen) // free a pointer owned by another thread (page parameter comes first for better codegen)
static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept { static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865) mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865)
mi_block_unguard(page, block);
mi_free_block_mt(page, segment, block); mi_free_block_mt(page, segment, block);
} }
@ -298,6 +303,13 @@ static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noe
const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg); const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
if mi_unlikely(segment==NULL) return 0; if mi_unlikely(segment==NULL) return 0;
const mi_page_t* const page = _mi_segment_page_of(segment, p); const mi_page_t* const page = _mi_segment_page_of(segment, p);
#if MI_DEBUG_GUARDED
if (mi_page_has_guarded(page)) {
const size_t bsize = mi_page_usable_aligned_size_of(page, p);
mi_assert_internal(bsize > _mi_os_page_size());
return (bsize > _mi_os_page_size() ? bsize - _mi_os_page_size() : bsize);
} else
#endif
if mi_likely(!mi_page_has_aligned(page)) { if mi_likely(!mi_page_has_aligned(page)) {
const mi_block_t* block = (const mi_block_t*)p; const mi_block_t* block = (const mi_block_t*)p;
return mi_page_usable_size_of(page, block); return mi_page_usable_size_of(page, block);
@ -518,3 +530,25 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
MI_UNUSED(page); MI_UNUSED(block); MI_UNUSED(page); MI_UNUSED(block);
} }
#endif #endif
// Remove guard page when building with MI_DEBUG_GUARDED
#if !MI_DEBUG_GUARDED
static void mi_block_unguard(mi_page_t* page, mi_block_t* block) {
MI_UNUSED(page);
MI_UNUSED(block);
// do nothing
}
#else
static void mi_block_unguard(mi_page_t* page, mi_block_t* block) {
if (mi_page_has_guarded(page)) {
const size_t bsize = mi_page_block_size(page);
const size_t psize = _mi_os_page_size();
mi_assert_internal(bsize > psize);
mi_assert_internal(_mi_page_segment(page)->allow_decommit);
void* gpage = (uint8_t*)block + (bsize - psize);
mi_assert_internal(_mi_is_aligned(gpage, psize));
_mi_os_unprotect(gpage, psize);
}
}
#endif

View File

@ -32,7 +32,7 @@ static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void
#if MI_DEBUG>1 #if MI_DEBUG>1
size_t total = heap->page_count; size_t total = heap->page_count;
size_t count = 0; size_t count = 0;
#endif #endif
for (size_t i = 0; i <= MI_BIN_FULL; i++) { for (size_t i = 0; i <= MI_BIN_FULL; i++) {
mi_page_queue_t* pq = &heap->pages[i]; mi_page_queue_t* pq = &heap->pages[i];
@ -164,9 +164,9 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
if (force && is_main_thread && mi_heap_is_backing(heap)) { if (force && is_main_thread && mi_heap_is_backing(heap)) {
_mi_thread_data_collect(); // collect thread data cache _mi_thread_data_collect(); // collect thread data cache
} }
// collect arenas (this is program wide so don't force purges on abandonment of threads) // collect arenas (this is program wide so don't force purges on abandonment of threads)
_mi_arenas_collect(collect == MI_FORCE /* force purge? */, &heap->tld->stats); _mi_arenas_collect(collect == MI_FORCE /* force purge? */, &heap->tld->stats);
} }
void _mi_heap_collect_abandon(mi_heap_t* heap) { void _mi_heap_collect_abandon(mi_heap_t* heap) {
@ -240,7 +240,7 @@ mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
} }
mi_decl_nodiscard mi_heap_t* mi_heap_new(void) { mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
// don't reclaim abandoned memory or otherwise destroy is unsafe // don't reclaim abandoned memory or otherwise destroy is unsafe
return mi_heap_new_ex(0 /* default heap tag */, true /* no reclaim */, _mi_arena_id_none()); return mi_heap_new_ex(0 /* default heap tag */, true /* no reclaim */, _mi_arena_id_none());
} }
@ -369,7 +369,13 @@ void mi_heap_destroy(mi_heap_t* heap) {
mi_assert(heap->no_reclaim); mi_assert(heap->no_reclaim);
mi_assert_expensive(mi_heap_is_valid(heap)); mi_assert_expensive(mi_heap_is_valid(heap));
if (heap==NULL || !mi_heap_is_initialized(heap)) return; if (heap==NULL || !mi_heap_is_initialized(heap)) return;
#if MI_DEBUG_GUARDED
_mi_warning_message("'mi_heap_destroy' called but ignored as MI_DEBUG_GUARDED is enabled (heap at %p)\n", heap);
mi_heap_delete(heap);
return;
#else
if (!heap->no_reclaim) { if (!heap->no_reclaim) {
_mi_warning_message("'mi_heap_destroy' called but ignored as the heap was not created with 'allow_destroy' (heap at %p)\n", heap);
// don't free in case it may contain reclaimed pages // don't free in case it may contain reclaimed pages
mi_heap_delete(heap); mi_heap_delete(heap);
} }
@ -382,6 +388,7 @@ void mi_heap_destroy(mi_heap_t* heap) {
_mi_heap_destroy_pages(heap); _mi_heap_destroy_pages(heap);
mi_heap_free(heap); mi_heap_free(heap);
} }
#endif
} }
// forcefully destroy all heaps in the current thread // forcefully destroy all heaps in the current thread
@ -537,7 +544,7 @@ void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page) {
static void mi_get_fast_divisor(size_t divisor, uint64_t* magic, size_t* shift) { static void mi_get_fast_divisor(size_t divisor, uint64_t* magic, size_t* shift) {
mi_assert_internal(divisor > 0 && divisor <= UINT32_MAX); mi_assert_internal(divisor > 0 && divisor <= UINT32_MAX);
*shift = 64 - mi_clz(divisor - 1); *shift = 64 - mi_clz(divisor - 1);
*magic = ((((uint64_t)1 << 32) * (((uint64_t)1 << *shift) - divisor)) / divisor + 1); *magic = ((((uint64_t)1 << 32) * (((uint64_t)1 << *shift) - divisor)) / divisor + 1);
} }
static size_t mi_fast_divide(size_t n, uint64_t magic, size_t shift) { static size_t mi_fast_divide(size_t n, uint64_t magic, size_t shift) {
@ -581,7 +588,7 @@ bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_
// create a bitmap of free blocks. // create a bitmap of free blocks.
#define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*)) #define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*))
uintptr_t free_map[MI_MAX_BLOCKS / MI_INTPTR_BITS]; uintptr_t free_map[MI_MAX_BLOCKS / MI_INTPTR_BITS];
const uintptr_t bmapsize = _mi_divide_up(page->capacity, MI_INTPTR_BITS); const uintptr_t bmapsize = _mi_divide_up(page->capacity, MI_INTPTR_BITS);
memset(free_map, 0, bmapsize * sizeof(intptr_t)); memset(free_map, 0, bmapsize * sizeof(intptr_t));
if (page->capacity % MI_INTPTR_BITS != 0) { if (page->capacity % MI_INTPTR_BITS != 0) {
// mark left-over bits at the end as free // mark left-over bits at the end as free
@ -591,7 +598,7 @@ bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_
} }
// fast repeated division by the block size // fast repeated division by the block size
uint64_t magic; uint64_t magic;
size_t shift; size_t shift;
mi_get_fast_divisor(bsize, &magic, &shift); mi_get_fast_divisor(bsize, &magic, &shift);
@ -665,7 +672,7 @@ static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun; mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun;
mi_heap_area_ex_t xarea; mi_heap_area_ex_t xarea;
xarea.page = page; xarea.page = page;
_mi_heap_area_init(&xarea.area, page); _mi_heap_area_init(&xarea.area, page);
return fun(heap, &xarea, arg); return fun(heap, &xarea, arg);
} }

View File

@ -99,6 +99,8 @@ static mi_option_desc_t options[_mi_option_last] =
#else #else
{ 0, UNINIT, MI_OPTION(visit_abandoned) }, { 0, UNINIT, MI_OPTION(visit_abandoned) },
#endif #endif
{ 0, UNINIT, MI_OPTION(debug_guarded_min) }, // only used when building with MI_DEBUG_GUARDED: minimal rounded object size for guarded objects
{ 0, UNINIT, MI_OPTION(debug_guarded_max) }, // only used when building with MI_DEBUG_GUARDED: maximal rounded object size for guarded objects
}; };
static void mi_option_init(mi_option_desc_t* desc); static void mi_option_init(mi_option_desc_t* desc);
@ -108,8 +110,7 @@ static bool mi_option_has_size_in_kib(mi_option_t option) {
} }
void _mi_options_init(void) { void _mi_options_init(void) {
// called on process load; should not be called before the CRT is initialized! // called on process load
// (e.g. do not call this from process_init as that may run before CRT initialization)
mi_add_stderr_output(); // now it safe to use stderr for output mi_add_stderr_output(); // now it safe to use stderr for output
for(int i = 0; i < _mi_option_last; i++ ) { for(int i = 0; i < _mi_option_last; i++ ) {
mi_option_t option = (mi_option_t)i; mi_option_t option = (mi_option_t)i;
@ -122,8 +123,26 @@ void _mi_options_init(void) {
} }
mi_max_error_count = mi_option_get(mi_option_max_errors); mi_max_error_count = mi_option_get(mi_option_max_errors);
mi_max_warning_count = mi_option_get(mi_option_max_warnings); mi_max_warning_count = mi_option_get(mi_option_max_warnings);
#if MI_DEBUG_GUARDED
if (mi_option_get(mi_option_debug_guarded_max) > 0) {
if (mi_option_is_enabled(mi_option_allow_large_os_pages)) {
mi_option_disable(mi_option_allow_large_os_pages);
_mi_warning_message("option 'allow_large_os_pages' is disabled to allow for guarded objects\n");
}
}
_mi_verbose_message("guarded build: %s\n", mi_option_get(mi_option_debug_guarded_max) > 0 ? "enabled" : "disabled");
#endif
} }
long _mi_option_get_fast(mi_option_t option) {
mi_assert(option >= 0 && option < _mi_option_last);
mi_option_desc_t* desc = &options[option];
mi_assert(desc->option == option); // index should match the option
//mi_assert(desc->init != UNINIT);
return desc->value;
}
mi_decl_nodiscard long mi_option_get(mi_option_t option) { mi_decl_nodiscard long mi_option_get(mi_option_t option) {
mi_assert(option >= 0 && option < _mi_option_last); mi_assert(option >= 0 && option < _mi_option_last);
if (option < 0 || option >= _mi_option_last) return 0; if (option < 0 || option >= _mi_option_last) return 0;
@ -157,6 +176,13 @@ void mi_option_set(mi_option_t option, long value) {
mi_assert(desc->option == option); // index should match the option mi_assert(desc->option == option); // index should match the option
desc->value = value; desc->value = value;
desc->init = INITIALIZED; desc->init = INITIALIZED;
// ensure min/max range; be careful to not recurse.
if (desc->option == mi_option_debug_guarded_min && _mi_option_get_fast(mi_option_debug_guarded_max) < value) {
mi_option_set(mi_option_debug_guarded_max, value);
}
else if (desc->option == mi_option_debug_guarded_max && _mi_option_get_fast(mi_option_debug_guarded_min) > value) {
mi_option_set(mi_option_debug_guarded_min, value);
}
} }
void mi_option_set_default(mi_option_t option, long value) { void mi_option_set_default(mi_option_t option, long value) {
@ -506,8 +532,7 @@ static void mi_option_init(mi_option_desc_t* desc) {
value = (size > LONG_MAX ? LONG_MAX : (long)size); value = (size > LONG_MAX ? LONG_MAX : (long)size);
} }
if (*end == 0) { if (*end == 0) {
desc->value = value; mi_option_set(desc->option, value);
desc->init = INITIALIZED;
} }
else { else {
// set `init` first to avoid recursion through _mi_warning_message on mimalloc_verbose. // set `init` first to avoid recursion through _mi_warning_message on mimalloc_verbose.

View File

@ -414,6 +414,9 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
// no more aligned blocks in here // no more aligned blocks in here
mi_page_set_has_aligned(page, false); mi_page_set_has_aligned(page, false);
#if MI_DEBUG_GUARDED
mi_page_set_has_guarded(page, false);
#endif
// remove from the page list // remove from the page list
// (no need to do _mi_heap_delayed_free first as all blocks are already free) // (no need to do _mi_heap_delayed_free first as all blocks are already free)
@ -440,6 +443,9 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
mi_assert_internal(mi_page_all_free(page)); mi_assert_internal(mi_page_all_free(page));
mi_page_set_has_aligned(page, false); mi_page_set_has_aligned(page, false);
#if MI_DEBUG_GUARDED
mi_page_set_has_guarded(page, false);
#endif
// don't retire too often.. // don't retire too often..
// (or we end up retiring and re-allocating most of the time) // (or we end up retiring and re-allocating most of the time)
@ -912,7 +918,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
mi_assert_internal(mi_page_block_size(page) >= size); mi_assert_internal(mi_page_block_size(page) >= size);
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc) // and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
if mi_unlikely(zero && page->block_size == 0) { if mi_unlikely(zero && mi_page_is_huge(page)) {
// note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case. // note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
void* p = _mi_page_malloc(heap, page, size); void* p = _mi_page_malloc(heap, page, size);
mi_assert_internal(p != NULL); mi_assert_internal(p != NULL);

View File

@ -448,13 +448,18 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* pa
static size_t mi_segment_calculate_sizes(size_t capacity, size_t required, size_t* pre_size, size_t* info_size) static size_t mi_segment_calculate_sizes(size_t capacity, size_t required, size_t* pre_size, size_t* info_size)
{ {
const size_t minsize = sizeof(mi_segment_t) + ((capacity - 1) * sizeof(mi_page_t)) + 16 /* padding */; const size_t minsize = sizeof(mi_segment_t) + ((capacity - 1) * sizeof(mi_page_t)) + 16 /* padding */;
size_t guardsize = 0; size_t guardsize = 0;
size_t isize = 0; size_t isize = 0;
if (MI_SECURE == 0) { if (MI_SECURE == 0) {
// normally no guard pages // normally no guard pages
#if MI_DEBUG_GUARDED
isize = _mi_align_up(minsize, _mi_os_page_size());
#else
isize = _mi_align_up(minsize, 16 * MI_MAX_ALIGN_SIZE); isize = _mi_align_up(minsize, 16 * MI_MAX_ALIGN_SIZE);
#endif
} }
else { else {
// in secure mode, we set up a protected page in between the segment info // in secure mode, we set up a protected page in between the segment info
@ -462,7 +467,7 @@ static size_t mi_segment_calculate_sizes(size_t capacity, size_t required, size_
const size_t page_size = _mi_os_page_size(); const size_t page_size = _mi_os_page_size();
isize = _mi_align_up(minsize, page_size); isize = _mi_align_up(minsize, page_size);
guardsize = page_size; guardsize = page_size;
required = _mi_align_up(required, page_size); //required = _mi_align_up(required, isize + guardsize);
} }
if (info_size != NULL) *info_size = isize; if (info_size != NULL) *info_size = isize;

View File

@ -11,6 +11,7 @@ static void double_free1();
static void double_free2(); static void double_free2();
static void corrupt_free(); static void corrupt_free();
static void block_overflow1(); static void block_overflow1();
static void block_overflow2();
static void invalid_free(); static void invalid_free();
static void test_aslr(void); static void test_aslr(void);
static void test_process_info(void); static void test_process_info(void);
@ -30,6 +31,7 @@ int main() {
// double_free2(); // double_free2();
// corrupt_free(); // corrupt_free();
// block_overflow1(); // block_overflow1();
block_overflow2();
// test_aslr(); // test_aslr();
// invalid_free(); // invalid_free();
// test_reserved(); // test_reserved();
@ -78,6 +80,12 @@ static void block_overflow1() {
free(p); free(p);
} }
static void block_overflow2() {
uint8_t* p = (uint8_t*)mi_malloc(16);
p[17] = 0;
free(p);
}
// The double free samples come ArcHeap [1] by Insu Yun (issue #161) // The double free samples come ArcHeap [1] by Insu Yun (issue #161)
// [1]: https://arxiv.org/pdf/1903.00503.pdf // [1]: https://arxiv.org/pdf/1903.00503.pdf

View File

@ -271,7 +271,7 @@ int main(void) {
mi_free(p); mi_free(p);
}; };
#if !(MI_TRACK_VALGRIND || MI_TRACK_ASAN) #if !(MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_DEBUG_GUARDED)
CHECK_BODY("fill-freed-small") { CHECK_BODY("fill-freed-small") {
size_t malloc_size = MI_SMALL_SIZE_MAX / 2; size_t malloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_malloc(malloc_size); uint8_t* p = (uint8_t*)mi_malloc(malloc_size);

View File

@ -65,6 +65,15 @@ bool mem_is_zero(uint8_t* p, size_t size) {
int main(void) { int main(void) {
mi_option_disable(mi_option_verbose); mi_option_disable(mi_option_verbose);
CHECK_BODY("malloc-aligned9a") { // test large alignments
void* p = mi_zalloc_aligned(1024 * 1024, 2);
mi_free(p);
p = mi_zalloc_aligned(1024 * 1024, 2);
mi_free(p);
result = true;
};
// --------------------------------------------------- // ---------------------------------------------------
// Malloc // Malloc
// --------------------------------------------------- // ---------------------------------------------------
@ -157,6 +166,7 @@ int main(void) {
printf("malloc_aligned5: usable size: %zi\n", usable); printf("malloc_aligned5: usable size: %zi\n", usable);
mi_free(p); mi_free(p);
}; };
/*
CHECK_BODY("malloc-aligned6") { CHECK_BODY("malloc-aligned6") {
bool ok = true; bool ok = true;
for (size_t align = 1; align <= MI_BLOCK_ALIGNMENT_MAX && ok; align *= 2) { for (size_t align = 1; align <= MI_BLOCK_ALIGNMENT_MAX && ok; align *= 2) {
@ -174,6 +184,7 @@ int main(void) {
} }
result = ok; result = ok;
}; };
*/
CHECK_BODY("malloc-aligned7") { CHECK_BODY("malloc-aligned7") {
void* p = mi_malloc_aligned(1024,MI_BLOCK_ALIGNMENT_MAX); void* p = mi_malloc_aligned(1024,MI_BLOCK_ALIGNMENT_MAX);
mi_free(p); mi_free(p);
@ -189,7 +200,7 @@ int main(void) {
} }
result = ok; result = ok;
}; };
CHECK_BODY("malloc-aligned9") { CHECK_BODY("malloc-aligned9") { // test large alignments
bool ok = true; bool ok = true;
void* p[8]; void* p[8];
size_t sizes[8] = { 8, 512, 1024 * 1024, MI_BLOCK_ALIGNMENT_MAX, MI_BLOCK_ALIGNMENT_MAX + 1, 2 * MI_BLOCK_ALIGNMENT_MAX, 8 * MI_BLOCK_ALIGNMENT_MAX, 0 }; size_t sizes[8] = { 8, 512, 1024 * 1024, MI_BLOCK_ALIGNMENT_MAX, MI_BLOCK_ALIGNMENT_MAX + 1, 2 * MI_BLOCK_ALIGNMENT_MAX, 8 * MI_BLOCK_ALIGNMENT_MAX, 0 };