simplified aligned allocation; improved codegen; fix mi_good_size with padding included; add MI_MAX_ALIGN_GUARANTEE

This commit is contained in:
Daan 2024-05-11 06:43:52 -07:00
parent c70c1df16a
commit 7128db7bba
7 changed files with 69 additions and 55 deletions

View File

@ -196,7 +196,9 @@ mi_msecs_t _mi_clock_end(mi_msecs_t start);
mi_msecs_t _mi_clock_start(void); mi_msecs_t _mi_clock_start(void);
// "alloc.c" // "alloc.c"
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic` void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic`
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned`
void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned`
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept; void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned`
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept; void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept;

View File

@ -200,6 +200,9 @@ typedef int32_t mi_ssize_t;
#error "mimalloc internal: define more bins" #error "mimalloc internal: define more bins"
#endif #endif
// Maximum block size for which blocks are guarenteed to be block size aligned. (see `segment.c:_mi_segment_page_start`)
#define MI_MAX_ALIGN_GUARANTEE (MI_MEDIUM_OBJ_SIZE_MAX)
// Alignments over MI_BLOCK_ALIGNMENT_MAX are allocated in dedicated huge page segments // Alignments over MI_BLOCK_ALIGNMENT_MAX are allocated in dedicated huge page segments
#define MI_BLOCK_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1) #define MI_BLOCK_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)

View File

@ -15,15 +15,15 @@ terms of the MIT license. A copy of the license can be found in the file
// Aligned Allocation // Aligned Allocation
// ------------------------------------------------------ // ------------------------------------------------------
static inline bool mi_is_naturally_aligned( size_t size, size_t alignment ) { static bool mi_malloc_is_naturally_aligned( size_t size, size_t alignment ) {
// objects up to `MI_MEDIUM_OBJ_SIZE_MAX` are allocated aligned to their size (see `segment.c:_mi_segment_page_start`). // objects up to `MI_MAX_ALIGN_GUARANTEE` are allocated aligned to their size (see `segment.c:_mi_segment_page_start`).
// note: the size may not be not an actual bin-size but it turns out the test below is still correct for our
// powers of two bin spacing (see test-api.c:test-aligned13).
mi_assert_internal(_mi_is_power_of_two(alignment) && (alignment > 0)); mi_assert_internal(_mi_is_power_of_two(alignment) && (alignment > 0));
return (size <= MI_MEDIUM_OBJ_SIZE_MAX && alignment <= size && ((size + MI_PADDING_SIZE) & (alignment-1)) == 0); if (alignment > size) return false;
if (alignment <= MI_MAX_ALIGN_SIZE) return true;
const size_t bsize = mi_good_size(size);
return (bsize <= MI_MAX_ALIGN_GUARANTEE && (bsize & (alignment-1)) == 0);
} }
// Fallback primitive aligned allocation -- split out for better codegen // Fallback primitive aligned allocation -- split out for better codegen
static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
{ {
@ -31,11 +31,19 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment)); mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment));
// use regular allocation if it is guaranteed to fit the alignment constraints. // use regular allocation if it is guaranteed to fit the alignment constraints.
if (offset == 0 && mi_is_naturally_aligned(size,alignment)) { if (offset == 0 && mi_malloc_is_naturally_aligned(size,alignment)) {
void* p = _mi_heap_malloc_zero(heap, size, zero); void* p = _mi_heap_malloc_zero(heap, size, zero);
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0); mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
const bool is_aligned_or_null = (((uintptr_t)p) & (alignment-1))==0;
if mi_likely(is_aligned_or_null) {
return p; return p;
} }
else {
// this should never happen if the `mi_malloc_is_naturally_aligned` check is correct..
mi_assert(false);
mi_free(p);
}
}
void* p; void* p;
size_t oversize; size_t oversize;
@ -107,32 +115,34 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
return NULL; return NULL;
} }
if mi_unlikely(size > (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)) { // we don't allocate more than MI_MAX_ALLOC_SIZE (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
#endif
return NULL;
}
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size check
// try first if there happens to be a small block available with just the right alignment // try first if there happens to be a small block available with just the right alignment
if mi_likely(padsize <= MI_SMALL_SIZE_MAX && alignment <= padsize) { if mi_likely(size <= MI_SMALL_SIZE_MAX && alignment <= size) {
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
const size_t padsize = size + MI_PADDING_SIZE;
mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize); mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0; if mi_likely(page->free != NULL) {
if mi_likely(page->free != NULL && is_aligned) const bool is_aligned = (((uintptr_t)page->free + offset) & align_mask)==0;
if mi_likely(is_aligned)
{ {
#if MI_STAT>1 #if MI_STAT>1
mi_heap_stat_increase(heap, malloc, size); mi_heap_stat_increase(heap, malloc, size);
#endif #endif
void* p = _mi_page_malloc(heap, page, padsize, zero); // TODO: inline _mi_page_malloc void* p = (zero ? _mi_page_malloc_zeroed(heap,page,padsize) : _mi_page_malloc(heap,page,padsize)); // call specific page malloc for better codegen
mi_assert_internal(p != NULL); mi_assert_internal(p != NULL);
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0); mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
mi_track_malloc(p,size,zero); mi_track_malloc(p,size,zero);
return p; return p;
} }
} }
}
// fallback // fallback
if mi_unlikely(size > (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)) { // we don't allocate more than MI_MAX_ALLOC_SIZE (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
#endif
return NULL;
}
return mi_heap_malloc_zero_aligned_at_fallback(heap, size, alignment, offset, zero); return mi_heap_malloc_zero_aligned_at_fallback(heap, size, alignment, offset, zero);
} }
@ -146,14 +156,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* he
} }
mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept { mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
if (alignment == 0 || !_mi_is_power_of_two(alignment)) return NULL;
if (size <= MI_SMALL_SIZE_MAX && mi_is_naturally_aligned(size,alignment)) {
// fast path for common alignment and size
return mi_heap_malloc_small(heap, size);
}
else {
return mi_heap_malloc_aligned_at(heap, size, alignment, 0); return mi_heap_malloc_aligned_at(heap, size, alignment, 0);
}
} }
// ------------------------------------------------------ // ------------------------------------------------------

View File

@ -28,7 +28,7 @@ terms of the MIT license. A copy of the license can be found in the file
// Fast allocation in a page: just pop from the free list. // Fast allocation in a page: just pop from the free list.
// Fall back to generic allocation only if the list is empty. // Fall back to generic allocation only if the list is empty.
// Note: in release mode the (inlined) routine is about 7 instructions with a single test. // Note: in release mode the (inlined) routine is about 7 instructions with a single test.
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept
{ {
mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size); mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size);
mi_block_t* const block = page->free; mi_block_t* const block = page->free;
@ -105,6 +105,14 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
return block; return block;
} }
// extra entries for improved efficiency in `alloc-aligned.c`.
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
return _mi_page_malloc_zero(heap,page,size,false);
}
extern inline void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
return _mi_page_malloc_zero(heap,page,size,true);
}
static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept { static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
mi_assert(heap != NULL); mi_assert(heap != NULL);
#if MI_DEBUG #if MI_DEBUG
@ -117,7 +125,7 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
#endif #endif
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE); mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero); void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero);
mi_track_malloc(p,size,zero); mi_track_malloc(p,size,zero);
#if MI_STAT>1 #if MI_STAT>1

View File

@ -113,10 +113,10 @@ size_t _mi_bin_size(uint8_t bin) {
// Good size for allocation // Good size for allocation
size_t mi_good_size(size_t size) mi_attr_noexcept { size_t mi_good_size(size_t size) mi_attr_noexcept {
if (size <= MI_LARGE_OBJ_SIZE_MAX) { if (size <= MI_LARGE_OBJ_SIZE_MAX) {
return _mi_bin_size(mi_bin(size)); return _mi_bin_size(mi_bin(size + MI_PADDING_SIZE));
} }
else { else {
return _mi_align_up(size,_mi_os_page_size()); return _mi_align_up(size + MI_PADDING_SIZE,_mi_os_page_size());
} }
} }

View File

@ -914,12 +914,12 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc) // and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
if mi_unlikely(zero && page->block_size == 0) { if mi_unlikely(zero && page->block_size == 0) {
// note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case. // note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
void* p = _mi_page_malloc(heap, page, size, false); void* p = _mi_page_malloc(heap, page, size);
mi_assert_internal(p != NULL); mi_assert_internal(p != NULL);
_mi_memzero_aligned(p, mi_page_usable_block_size(page)); _mi_memzero_aligned(p, mi_page_usable_block_size(page));
return p; return p;
} }
else { else {
return _mi_page_malloc(heap, page, size, zero); return _mi_page_malloc_zero(heap, page, size, zero);
} }
} }

View File

@ -426,15 +426,13 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* pa
size_t psize; size_t psize;
uint8_t* p = mi_segment_raw_page_start(segment, page, &psize); uint8_t* p = mi_segment_raw_page_start(segment, page, &psize);
const size_t block_size = mi_page_block_size(page); const size_t block_size = mi_page_block_size(page);
if (page->segment_idx == 0 && block_size > 0 && segment->page_kind <= MI_PAGE_MEDIUM) { if (/*page->segment_idx == 0 &&*/ block_size > 0 && block_size <= MI_MAX_ALIGN_GUARANTEE) {
// for small and medium objects, ensure the page start is aligned with the block size (PR#66 by kickunderscore) // for small and medium objects, ensure the page start is aligned with the block size (PR#66 by kickunderscore)
mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM);
size_t adjust = block_size - ((uintptr_t)p % block_size); size_t adjust = block_size - ((uintptr_t)p % block_size);
if (psize - adjust >= block_size) { if (adjust < block_size && psize >= block_size + adjust) {
if (adjust < block_size) {
p += adjust; p += adjust;
psize -= adjust; psize -= adjust;
// if (pre_size != NULL) *pre_size = adjust;
}
mi_assert_internal((uintptr_t)p % block_size == 0); mi_assert_internal((uintptr_t)p % block_size == 0);
} }
} }