mirror of https://github.com/microsoft/mimalloc
merge from dev with new page fields (block_size and is_huge)
This commit is contained in:
commit
3c85983a35
|
@ -499,11 +499,11 @@ void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_m
|
|||
/// \{
|
||||
|
||||
/// The maximum supported alignment size (currently 1MiB).
|
||||
#define MI_ALIGNMENT_MAX (1024*1024UL)
|
||||
#define MI_BLOCK_ALIGNMENT_MAX (1024*1024UL)
|
||||
|
||||
/// Allocate \a size bytes aligned by \a alignment.
|
||||
/// @param size number of bytes to allocate.
|
||||
/// @param alignment the minimal alignment of the allocated memory. Must be less than #MI_ALIGNMENT_MAX.
|
||||
/// @param alignment the minimal alignment of the allocated memory. Must be less than #MI_BLOCK_ALIGNMENT_MAX.
|
||||
/// @returns pointer to the allocated memory or \a NULL if out of memory.
|
||||
/// The returned pointer is aligned by \a alignment, i.e.
|
||||
/// `(uintptr_t)p % alignment == 0`.
|
||||
|
|
|
@ -217,6 +217,12 @@
|
|||
<ClCompile Include="..\..\src\bitmap.c">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\free.c">
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
|
||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\heap.c" />
|
||||
<ClCompile Include="..\..\src\init.c" />
|
||||
<ClCompile Include="..\..\src\libc.c" />
|
||||
|
|
|
@ -58,6 +58,9 @@
|
|||
<ClCompile Include="..\..\src\libc.c">
|
||||
<Filter>Sources</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\free.c">
|
||||
<Filter>Sources</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="..\..\src\bitmap.h">
|
||||
|
|
|
@ -203,7 +203,7 @@ void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool
|
|||
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
|
||||
void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned`
|
||||
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept;
|
||||
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
|
||||
mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p);
|
||||
bool _mi_free_delayed_block(mi_block_t* block);
|
||||
void _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration
|
||||
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size);
|
||||
|
@ -488,8 +488,10 @@ static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const
|
|||
}
|
||||
|
||||
// Quick page start for initialized pages
|
||||
static inline uint8_t* _mi_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) {
|
||||
return _mi_segment_page_start(segment, page, page_size);
|
||||
static inline uint8_t* mi_page_start(const mi_page_t* page) {
|
||||
mi_assert_internal(page->page_start != NULL);
|
||||
mi_assert_expensive( _mi_segment_page_start(_mi_page_segment(page), page, NULL) == page->page_start);
|
||||
return page->page_start;
|
||||
}
|
||||
|
||||
// Get the page containing the pointer
|
||||
|
@ -500,20 +502,14 @@ static inline mi_page_t* _mi_ptr_page(void* p) {
|
|||
|
||||
// Get the block size of a page (special case for huge objects)
|
||||
static inline size_t mi_page_block_size(const mi_page_t* page) {
|
||||
const size_t bsize = page->xblock_size;
|
||||
mi_assert_internal(bsize > 0);
|
||||
if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) {
|
||||
return bsize;
|
||||
}
|
||||
else {
|
||||
size_t psize;
|
||||
_mi_segment_page_start(_mi_page_segment(page), page, &psize);
|
||||
return psize;
|
||||
}
|
||||
mi_assert_internal(page->block_size > 0);
|
||||
return page->block_size;
|
||||
}
|
||||
|
||||
static inline bool mi_page_is_huge(const mi_page_t* page) {
|
||||
return (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
|
||||
mi_assert_internal((page->is_huge && _mi_page_segment(page)->kind == MI_SEGMENT_HUGE) ||
|
||||
(!page->is_huge && _mi_page_segment(page)->kind != MI_SEGMENT_HUGE));
|
||||
return page->is_huge;
|
||||
}
|
||||
|
||||
// Get the usable block size of a page without fixed padding.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
|
@ -193,16 +193,13 @@ typedef int32_t mi_ssize_t;
|
|||
#endif
|
||||
|
||||
// Maximum slice offset (15)
|
||||
#define MI_MAX_SLICE_OFFSET ((MI_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
|
||||
|
||||
// Used as a special value to encode block sizes in 32 bits.
|
||||
#define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB))
|
||||
#define MI_MAX_SLICE_OFFSET ((MI_BLOCK_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
|
||||
|
||||
// blocks up to this size are always allocated aligned
|
||||
#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
|
||||
|
||||
// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
|
||||
#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
|
||||
// Alignments over MI_BLOCK_ALIGNMENT_MAX are allocated in dedicated huge page segments
|
||||
#define MI_BLOCK_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
|
@ -266,7 +263,6 @@ typedef uintptr_t mi_thread_free_t;
|
|||
// implement a monotonic heartbeat. The `thread_free` list is needed for
|
||||
// avoiding atomic operations in the common case.
|
||||
//
|
||||
//
|
||||
// `used - |thread_free|` == actual blocks that are in use (alive)
|
||||
// `used - |thread_free| + |free| + |local_free| == capacity`
|
||||
//
|
||||
|
@ -274,16 +270,13 @@ typedef uintptr_t mi_thread_free_t;
|
|||
// the number of memory accesses in the `mi_page_all_free` function(s).
|
||||
//
|
||||
// Notes:
|
||||
// - Access is optimized for `mi_free` and `mi_page_alloc` (in `alloc.c`)
|
||||
// - Access is optimized for `free.c:mi_free` and `alloc.c:mi_page_alloc`
|
||||
// - Using `uint16_t` does not seem to slow things down
|
||||
// - The size is 8 words on 64-bit which helps the page index calculations
|
||||
// (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10
|
||||
// and 12 are still good for address calculation)
|
||||
// - To limit the structure size, the `xblock_size` is 32-bits only; for
|
||||
// blocks > MI_HUGE_BLOCK_SIZE the size is determined from the segment page size
|
||||
// - The size is 12 words on 64-bit which helps the page index calculations
|
||||
// (and 14 words on 32-bit, and encoded free lists add 2 words)
|
||||
// - `xthread_free` uses the bottom bits as a delayed-free flags to optimize
|
||||
// concurrent frees where only the first concurrent free adds to the owning
|
||||
// heap `thread_delayed_free` list (see `alloc.c:mi_free_block_mt`).
|
||||
// heap `thread_delayed_free` list (see `free.c:mi_free_block_mt`).
|
||||
// The invariant is that no-delayed-free is only set if there is
|
||||
// at least one block that will be added, or as already been added, to
|
||||
// the owning heap `thread_delayed_free` list. This guarantees that pages
|
||||
|
@ -294,20 +287,21 @@ typedef struct mi_page_s {
|
|||
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
|
||||
uint8_t is_committed : 1; // `true` if the page virtual memory is committed
|
||||
uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized
|
||||
|
||||
uint8_t is_huge:1; // `true` if the page is in a huge segment
|
||||
// padding
|
||||
// layout like this to optimize access in `mi_malloc` and `mi_free`
|
||||
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
|
||||
uint16_t reserved; // number of blocks reserved in memory
|
||||
uint16_t used; // number of blocks in use (including blocks in `thread_free`)
|
||||
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
|
||||
uint8_t free_is_zero : 1; // `true` if the blocks in the free list are zero initialized
|
||||
uint8_t retire_expire : 7; // expiration count for retired blocks
|
||||
|
||||
uint8_t block_size_shift; // if not zero, then `(1 << block_size_shift) == block_size` (only used for fast path in `free.c:_mi_page_ptr_unalign`)
|
||||
uint8_t free_is_zero:1; // `true` if the blocks in the free list are zero initialized
|
||||
uint8_t retire_expire:7; // expiration count for retired blocks
|
||||
// padding
|
||||
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
|
||||
mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
|
||||
uint16_t used; // number of blocks in use (including blocks in `thread_free`)
|
||||
uint8_t block_size_shift; // if not zero, then `(1 << block_size_shift) == block_size` (only used for fast path in `free.c:_mi_page_ptr_unalign`)
|
||||
uint8_t block_offset_adj; // if not zero, then `(mi_page_start(_,page,_) - (uint8_t*)page - MI_MAX_ALIGN_SIZE*(block_offset_adj-1)) % block_size == 0)` (only used for fast path in `free.c:_mi_page_ptr_unalign`)
|
||||
uint32_t xblock_size; // size available in each block (always `>0`)
|
||||
size_t block_size; // size available in each block (always `>0`)
|
||||
uint8_t* page_start; // start of the page area containing the blocks
|
||||
|
||||
#if (MI_ENCODE_FREELIST || MI_PADDING)
|
||||
uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
|
||||
|
@ -319,10 +313,8 @@ typedef struct mi_page_s {
|
|||
struct mi_page_s* next; // next page owned by this thread with the same `block_size`
|
||||
struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
|
||||
|
||||
// 64-bit 9 words, 32-bit 12 words, (+2 for secure)
|
||||
#if MI_INTPTR_SIZE==8
|
||||
uintptr_t padding[1];
|
||||
#endif
|
||||
// 64-bit 11 words, 32-bit 13 words, (+2 for secure)
|
||||
void* padding[1];
|
||||
} mi_page_t;
|
||||
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
|||
|
||||
void* p;
|
||||
size_t oversize;
|
||||
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) {
|
||||
if mi_unlikely(alignment > MI_BLOCK_ALIGNMENT_MAX) {
|
||||
// use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page)
|
||||
// This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the
|
||||
// first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down)
|
||||
|
@ -47,7 +47,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
|||
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
|
||||
p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block
|
||||
// zero afterwards as only the area from the aligned_p may be committed!
|
||||
if (p == NULL) return NULL;
|
||||
if (p == NULL) return NULL;
|
||||
}
|
||||
else {
|
||||
// otherwise over-allocate
|
||||
|
@ -69,13 +69,13 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
|||
// todo: expand padding if overallocated ?
|
||||
|
||||
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
|
||||
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
|
||||
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_page(aligned_p), aligned_p));
|
||||
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
|
||||
mi_assert_internal(mi_usable_size(aligned_p)>=size);
|
||||
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
|
||||
|
||||
|
||||
// now zero the block if needed
|
||||
if (alignment > MI_ALIGNMENT_MAX) {
|
||||
if (alignment > MI_BLOCK_ALIGNMENT_MAX) {
|
||||
// for the tracker, on huge aligned allocations only from the start of the large block is defined
|
||||
mi_track_mem_undefined(aligned_p, size);
|
||||
if (zero) {
|
||||
|
@ -85,7 +85,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
|||
|
||||
if (p != aligned_p) {
|
||||
mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p));
|
||||
}
|
||||
}
|
||||
return aligned_p;
|
||||
}
|
||||
|
||||
|
|
12
src/alloc.c
12
src/alloc.c
|
@ -1,5 +1,5 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
|
||||
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
|
@ -30,7 +30,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
// Note: in release mode the (inlined) routine is about 7 instructions with a single test.
|
||||
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept
|
||||
{
|
||||
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
|
||||
mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size);
|
||||
mi_block_t* const block = page->free;
|
||||
if mi_unlikely(block == NULL) {
|
||||
return _mi_malloc_generic(heap, size, zero, 0);
|
||||
|
@ -53,14 +53,14 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
|||
|
||||
// zero the block? note: we need to zero the full block size (issue #63)
|
||||
if mi_unlikely(zero) {
|
||||
mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
|
||||
mi_assert_internal(page->xblock_size >= MI_PADDING_SIZE);
|
||||
mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
|
||||
mi_assert_internal(page->block_size >= MI_PADDING_SIZE);
|
||||
if (page->free_is_zero) {
|
||||
block->next = 0;
|
||||
mi_track_mem_defined(block, page->xblock_size - MI_PADDING_SIZE);
|
||||
mi_track_mem_defined(block, page->block_size - MI_PADDING_SIZE);
|
||||
}
|
||||
else {
|
||||
_mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE);
|
||||
_mi_memzero_aligned(block, page->block_size - MI_PADDING_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
40
src/free.c
40
src/free.c
|
@ -25,22 +25,22 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block);
|
|||
// ------------------------------------------------------
|
||||
|
||||
// forward declaration of multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
|
||||
static mi_decl_noinline void mi_free_block_mt(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
||||
static mi_decl_noinline void mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block);
|
||||
|
||||
// regular free of a (thread local) block pointer
|
||||
// fast path written carefully to prevent spilling on the stack
|
||||
static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool check_full)
|
||||
static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool track_stats, bool check_full)
|
||||
{
|
||||
// checks
|
||||
if mi_unlikely(mi_check_is_double_free(page, block)) return;
|
||||
mi_check_padding(page, block);
|
||||
mi_stat_free(page, block);
|
||||
if (track_stats) { mi_stat_free(page, block); }
|
||||
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
|
||||
if (!mi_page_is_huge(page)) { // huge page content may be already decommitted
|
||||
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
|
||||
}
|
||||
#endif
|
||||
mi_track_free_size(p, mi_page_usable_size_of(page,block)); // faster then mi_usable_size as we already know the page and that p is unaligned
|
||||
if (track_stats) { mi_track_free_size(p, mi_page_usable_size_of(page, block)); } // faster then mi_usable_size as we already know the page and that p is unaligned
|
||||
|
||||
// actual free: push on the local free list
|
||||
mi_block_set_next(page, block, page->local_free);
|
||||
|
@ -54,17 +54,10 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool
|
|||
}
|
||||
|
||||
// Adjust a block that was allocated aligned, to the actual start of the block in the page.
|
||||
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) {
|
||||
mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) {
|
||||
mi_assert_internal(page!=NULL && p!=NULL);
|
||||
|
||||
size_t diff;
|
||||
if mi_likely(page->block_offset_adj != 0) {
|
||||
diff = (uint8_t*)p - (uint8_t*)page - (MI_MAX_ALIGN_SIZE*(page->block_offset_adj - 1));
|
||||
}
|
||||
else {
|
||||
diff = (uint8_t*)p - _mi_page_start(segment, page, NULL);
|
||||
}
|
||||
|
||||
size_t diff = (uint8_t*)p - page->page_start;
|
||||
size_t adjust;
|
||||
if mi_likely(page->block_size_shift != 0) {
|
||||
adjust = diff & (((size_t)1 << page->block_size_shift) - 1);
|
||||
|
@ -78,14 +71,15 @@ mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* p
|
|||
|
||||
// free a local pointer (page parameter comes first for better codegen)
|
||||
static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
|
||||
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
|
||||
mi_free_block_local(page, block, true);
|
||||
MI_UNUSED(segment);
|
||||
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p);
|
||||
mi_free_block_local(page, block, true, true);
|
||||
}
|
||||
|
||||
// free a pointer owned by another thread (page parameter comes first for better codegen)
|
||||
static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
|
||||
mi_block_t* const block = _mi_page_ptr_unalign(segment, page, p); // don't check `has_aligned` flag to avoid a race (issue #865)
|
||||
mi_free_block_mt(segment, page, block);
|
||||
mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865)
|
||||
mi_free_block_mt(page, segment, block);
|
||||
}
|
||||
|
||||
// generic free (for runtime integration)
|
||||
|
@ -150,7 +144,7 @@ void mi_free(void* p) mi_attr_noexcept
|
|||
if mi_likely(page->flags.full_aligned == 0) { // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned)
|
||||
// thread-local, aligned, and not a full page
|
||||
mi_block_t* const block = (mi_block_t*)p;
|
||||
mi_free_block_local(page, block, false /* no need to check if the page is full */);
|
||||
mi_free_block_local(page, block, true, false /* no need to check if the page is full */);
|
||||
}
|
||||
else {
|
||||
// page is full or contains (inner) aligned blocks; use generic path
|
||||
|
@ -185,7 +179,7 @@ bool _mi_free_delayed_block(mi_block_t* block) {
|
|||
_mi_page_free_collect(page, false);
|
||||
|
||||
// and free the block (possibly freeing the page as well since used is updated)
|
||||
mi_free_block_local(page, block, true);
|
||||
mi_free_block_local(page, block, false /* stats have already been adjusted */, true);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -244,7 +238,7 @@ static void mi_stat_huge_free(const mi_page_t* page);
|
|||
#endif
|
||||
|
||||
// Multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
|
||||
static void mi_decl_noinline mi_free_block_mt(mi_segment_t* segment, mi_page_t* page, mi_block_t* block)
|
||||
static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block)
|
||||
{
|
||||
// first see if the segment was abandoned and if we can reclaim it into our thread
|
||||
if (mi_option_is_enabled(mi_option_abandoned_reclaim_on_free) &&
|
||||
|
@ -302,8 +296,8 @@ static void mi_decl_noinline mi_free_block_mt(mi_segment_t* segment, mi_page_t*
|
|||
// ------------------------------------------------------
|
||||
|
||||
// Bytes available in a block
|
||||
static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_segment_t* segment, const mi_page_t* page, const void* p) mi_attr_noexcept {
|
||||
const mi_block_t* block = _mi_page_ptr_unalign(segment, page, p);
|
||||
static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_page_t* page, const void* p) mi_attr_noexcept {
|
||||
const mi_block_t* block = _mi_page_ptr_unalign(page, p);
|
||||
const size_t size = mi_page_usable_size_of(page, block);
|
||||
const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block;
|
||||
mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
|
||||
|
@ -320,7 +314,7 @@ static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noe
|
|||
}
|
||||
else {
|
||||
// split out to separate routine for improved code generation
|
||||
return mi_page_usable_aligned_size_of(segment, page, p);
|
||||
return mi_page_usable_aligned_size_of(page, p);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
15
src/heap.c
15
src/heap.c
|
@ -32,7 +32,7 @@ static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void
|
|||
#if MI_DEBUG>1
|
||||
size_t total = heap->page_count;
|
||||
size_t count = 0;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
|
||||
mi_page_queue_t* pq = &heap->pages[i];
|
||||
|
@ -120,11 +120,11 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
|
|||
{
|
||||
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
|
||||
|
||||
const bool force = collect >= MI_FORCE;
|
||||
const bool force = collect >= MI_FORCE;
|
||||
_mi_deferred_free(heap, force);
|
||||
|
||||
// note: never reclaim on collect but leave it to threads that need storage to reclaim
|
||||
const bool force_main =
|
||||
// note: never reclaim on collect but leave it to threads that need storage to reclaim
|
||||
const bool force_main =
|
||||
#ifdef NDEBUG
|
||||
collect == MI_FORCE
|
||||
#else
|
||||
|
@ -474,8 +474,7 @@ static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
|
|||
MI_UNUSED(heap);
|
||||
MI_UNUSED(pq);
|
||||
bool* found = (bool*)vfound;
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
void* start = _mi_page_start(segment, page, NULL);
|
||||
void* start = mi_page_start(page);
|
||||
void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page));
|
||||
*found = (p >= start && p < end);
|
||||
return (!*found); // continue if not found
|
||||
|
@ -521,7 +520,7 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v
|
|||
const size_t bsize = mi_page_block_size(page);
|
||||
const size_t ubsize = mi_page_usable_block_size(page); // without padding
|
||||
size_t psize;
|
||||
uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize);
|
||||
uint8_t* pstart = _mi_segment_page_start(_mi_page_segment(page), page, &psize);
|
||||
|
||||
if (page->capacity == 1) {
|
||||
// optimize page with one block
|
||||
|
@ -588,7 +587,7 @@ static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
|
|||
xarea.page = page;
|
||||
xarea.area.reserved = page->reserved * bsize;
|
||||
xarea.area.committed = page->capacity * bsize;
|
||||
xarea.area.blocks = _mi_page_start(_mi_page_segment(page), page, NULL);
|
||||
xarea.area.blocks = mi_page_start(page);
|
||||
xarea.area.used = page->used; // number of blocks in use (#553)
|
||||
xarea.area.block_size = ubsize;
|
||||
xarea.area.full_block_size = bsize;
|
||||
|
|
16
src/init.c
16
src/init.c
|
@ -14,18 +14,19 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
|
||||
// Empty page used to initialize the small free pages array
|
||||
const mi_page_t _mi_page_empty = {
|
||||
0, false, false, false,
|
||||
0,
|
||||
false, false, false, false,
|
||||
0, // capacity
|
||||
0, // reserved capacity
|
||||
0, // used
|
||||
{ 0 }, // flags
|
||||
0, // block size shift
|
||||
false, // is_zero
|
||||
0, // retire_expire
|
||||
NULL, // free
|
||||
NULL, // local_free
|
||||
0, // used
|
||||
0, // block size shift
|
||||
0, // block offset adj
|
||||
0, // xblock_size
|
||||
0, // block_size
|
||||
NULL, // page_start
|
||||
#if (MI_PADDING || MI_ENCODE_FREELIST)
|
||||
{ 0, 0 },
|
||||
#endif
|
||||
|
@ -86,8 +87,9 @@ const mi_page_t _mi_page_empty = {
|
|||
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
|
||||
MI_STAT_COUNT_NULL(), \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 } \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
|
||||
{ 0, 0 } \
|
||||
MI_STAT_COUNT_END_NULL()
|
||||
|
||||
|
||||
|
|
48
src/os.c
48
src/os.c
|
@ -29,7 +29,7 @@ bool _mi_os_has_overcommit(void) {
|
|||
return mi_os_mem_config.has_overcommit;
|
||||
}
|
||||
|
||||
bool _mi_os_has_virtual_reserve(void) {
|
||||
bool _mi_os_has_virtual_reserve(void) {
|
||||
return mi_os_mem_config.has_virtual_reserve;
|
||||
}
|
||||
|
||||
|
@ -165,7 +165,7 @@ void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t me
|
|||
}
|
||||
}
|
||||
else {
|
||||
// nothing to do
|
||||
// nothing to do
|
||||
mi_assert(memid.memkind < MI_MEM_OS);
|
||||
}
|
||||
}
|
||||
|
@ -188,25 +188,25 @@ static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bo
|
|||
if (!commit) { allow_large = false; }
|
||||
if (try_alignment == 0) { try_alignment = 1; } // avoid 0 to ensure there will be no divide by zero when aligning
|
||||
*is_zero = false;
|
||||
void* p = NULL;
|
||||
void* p = NULL;
|
||||
int err = _mi_prim_alloc(size, try_alignment, commit, allow_large, is_large, is_zero, &p);
|
||||
if (err != 0) {
|
||||
_mi_warning_message("unable to allocate OS memory (error: %d (0x%x), size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, size, try_alignment, commit, allow_large);
|
||||
}
|
||||
|
||||
|
||||
MI_UNUSED(tld_stats);
|
||||
mi_stats_t* stats = &_mi_stats_main;
|
||||
mi_stat_counter_increase(stats->mmap_calls, 1);
|
||||
if (p != NULL) {
|
||||
_mi_stat_increase(&stats->reserved, size);
|
||||
if (commit) {
|
||||
_mi_stat_increase(&stats->committed, size);
|
||||
if (commit) {
|
||||
_mi_stat_increase(&stats->committed, size);
|
||||
// seems needed for asan (or `mimalloc-test-api` fails)
|
||||
#ifdef MI_TRACK_ASAN
|
||||
if (*is_zero) { mi_track_mem_defined(p,size); }
|
||||
else { mi_track_mem_undefined(p,size); }
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
|
|||
// over-allocate uncommitted (virtual) memory
|
||||
p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero, stats);
|
||||
if (p == NULL) return NULL;
|
||||
|
||||
|
||||
// set p to the aligned part in the full region
|
||||
// note: this is dangerous on Windows as VirtualFree needs the actual base pointer
|
||||
// this is handled though by having the `base` field in the memid's
|
||||
|
@ -259,7 +259,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
|
|||
// overallocate...
|
||||
p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero, stats);
|
||||
if (p == NULL) return NULL;
|
||||
|
||||
|
||||
// and selectively unmap parts around the over-allocated area. (noop on sbrk)
|
||||
void* aligned_p = mi_align_up_ptr(p, alignment);
|
||||
size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p;
|
||||
|
@ -270,7 +270,7 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit
|
|||
if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); }
|
||||
// we can return the aligned pointer on `mmap` (and sbrk) systems
|
||||
p = aligned_p;
|
||||
*base = aligned_p; // since we freed the pre part, `*base == p`.
|
||||
*base = aligned_p; // since we freed the pre part, `*base == p`.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -292,7 +292,7 @@ void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) {
|
|||
void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero, stats);
|
||||
if (p != NULL) {
|
||||
*memid = _mi_memid_create_os(true, os_is_zero, os_is_large);
|
||||
}
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
|
@ -303,7 +303,7 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo
|
|||
if (size == 0) return NULL;
|
||||
size = _mi_os_good_alloc_size(size);
|
||||
alignment = _mi_align_up(alignment, _mi_os_page_size());
|
||||
|
||||
|
||||
bool os_is_large = false;
|
||||
bool os_is_zero = false;
|
||||
void* os_base = NULL;
|
||||
|
@ -318,7 +318,7 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo
|
|||
|
||||
/* -----------------------------------------------------------
|
||||
OS aligned allocation with an offset. This is used
|
||||
for large alignments > MI_ALIGNMENT_MAX. We use a large mimalloc
|
||||
for large alignments > MI_BLOCK_ALIGNMENT_MAX. We use a large mimalloc
|
||||
page where the object can be aligned at an offset from the start of the segment.
|
||||
As we may need to overallocate, we need to free such pointers using `mi_free_aligned`
|
||||
to use the actual start of the memory region.
|
||||
|
@ -381,7 +381,7 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t*
|
|||
|
||||
bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
|
||||
MI_UNUSED(tld_stats);
|
||||
mi_stats_t* stats = &_mi_stats_main;
|
||||
mi_stats_t* stats = &_mi_stats_main;
|
||||
if (is_zero != NULL) { *is_zero = false; }
|
||||
_mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit
|
||||
_mi_stat_counter_increase(&stats->commit_calls, 1);
|
||||
|
@ -391,21 +391,21 @@ bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats
|
|||
void* start = mi_os_page_align_areax(false /* conservative? */, addr, size, &csize);
|
||||
if (csize == 0) return true;
|
||||
|
||||
// commit
|
||||
// commit
|
||||
bool os_is_zero = false;
|
||||
int err = _mi_prim_commit(start, csize, &os_is_zero);
|
||||
int err = _mi_prim_commit(start, csize, &os_is_zero);
|
||||
if (err != 0) {
|
||||
_mi_warning_message("cannot commit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
|
||||
return false;
|
||||
}
|
||||
if (os_is_zero && is_zero != NULL) {
|
||||
if (os_is_zero && is_zero != NULL) {
|
||||
*is_zero = true;
|
||||
mi_assert_expensive(mi_mem_is_zero(start, csize));
|
||||
}
|
||||
// note: the following seems required for asan (otherwise `mimalloc-test-stress` fails)
|
||||
#ifdef MI_TRACK_ASAN
|
||||
if (os_is_zero) { mi_track_mem_defined(start,csize); }
|
||||
else { mi_track_mem_undefined(start,csize); }
|
||||
else { mi_track_mem_undefined(start,csize); }
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
@ -419,11 +419,11 @@ static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_
|
|||
// page align
|
||||
size_t csize;
|
||||
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
|
||||
if (csize == 0) return true;
|
||||
if (csize == 0) return true;
|
||||
|
||||
// decommit
|
||||
*needs_recommit = true;
|
||||
int err = _mi_prim_decommit(start,csize,needs_recommit);
|
||||
int err = _mi_prim_decommit(start,csize,needs_recommit);
|
||||
if (err != 0) {
|
||||
_mi_warning_message("cannot decommit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
|
||||
}
|
||||
|
@ -441,7 +441,7 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
|
|||
// but may be used later again. This will release physical memory
|
||||
// pages and reduce swapping while keeping the memory committed.
|
||||
// We page align to a conservative area inside the range to reset.
|
||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
|
||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
|
||||
// page align conservatively within the range
|
||||
size_t csize;
|
||||
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
|
||||
|
@ -461,7 +461,7 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
|
|||
}
|
||||
|
||||
|
||||
// either resets or decommits memory, returns true if the memory needs
|
||||
// either resets or decommits memory, returns true if the memory needs
|
||||
// to be recommitted if it is to be re-used later on.
|
||||
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
|
||||
{
|
||||
|
@ -474,7 +474,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
|
|||
{
|
||||
bool needs_recommit = true;
|
||||
mi_os_decommit_ex(p, size, &needs_recommit, stats);
|
||||
return needs_recommit;
|
||||
return needs_recommit;
|
||||
}
|
||||
else {
|
||||
if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed
|
||||
|
@ -484,7 +484,7 @@ bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
|
|||
}
|
||||
}
|
||||
|
||||
// either resets or decommits memory, returns true if the memory needs
|
||||
// either resets or decommits memory, returns true if the memory needs
|
||||
// to be recommitted if it is to be re-used later on.
|
||||
bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) {
|
||||
return _mi_os_purge_ex(p, size, true, stats);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
|
||||
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
|
@ -11,6 +11,10 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||
|
||||
#ifndef MI_IN_PAGE_C
|
||||
#error "this file should be included from 'page.c'"
|
||||
// include to help an IDE
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc/internal.h"
|
||||
#include "mimalloc/atomic.h"
|
||||
#endif
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
|
@ -137,21 +141,25 @@ static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t*
|
|||
}
|
||||
#endif
|
||||
|
||||
static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
|
||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
mi_assert_internal(heap != NULL && bin <= MI_BIN_FULL);
|
||||
mi_page_queue_t* pq = &heap->pages[bin];
|
||||
mi_assert_internal(bin >= MI_BIN_HUGE || page->xblock_size == pq->block_size);
|
||||
mi_assert_expensive(mi_page_queue_contains(pq, page));
|
||||
return pq;
|
||||
static inline bool mi_page_is_large_or_huge(mi_page_t* page) {
|
||||
return (mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_huge(page));
|
||||
}
|
||||
|
||||
static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
|
||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
|
||||
mi_assert_internal(heap!=NULL);
|
||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page))));
|
||||
mi_assert_internal(bin <= MI_BIN_FULL);
|
||||
mi_page_queue_t* pq = &heap->pages[bin];
|
||||
mi_assert_internal(mi_page_is_in_full(page) || page->xblock_size == pq->block_size);
|
||||
mi_assert_internal((mi_page_block_size(page) == pq->block_size) ||
|
||||
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(pq)) ||
|
||||
(mi_page_is_in_full(page) && mi_page_queue_is_full(pq)));
|
||||
return pq;
|
||||
}
|
||||
|
||||
static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
|
||||
mi_assert_expensive(mi_page_queue_contains(pq, page));
|
||||
return pq;
|
||||
}
|
||||
|
||||
|
@ -206,7 +214,9 @@ static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
|
|||
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_assert_expensive(mi_page_queue_contains(queue, page));
|
||||
mi_assert_internal(page->xblock_size == queue->block_size || (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
|
||||
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) ||
|
||||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
|
||||
if (page->prev != NULL) page->prev->next = page->next;
|
||||
|
@ -232,8 +242,8 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
|
|||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
||||
#endif
|
||||
mi_assert_internal(page->xblock_size == queue->block_size ||
|
||||
(page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX) ||
|
||||
mi_assert_internal(mi_page_block_size(page) == queue->block_size ||
|
||||
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) ||
|
||||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
|
||||
mi_page_set_in_full(page, mi_page_queue_is_full(queue));
|
||||
|
@ -259,12 +269,13 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
|
|||
mi_assert_internal(page != NULL);
|
||||
mi_assert_expensive(mi_page_queue_contains(from, page));
|
||||
mi_assert_expensive(!mi_page_queue_contains(to, page));
|
||||
|
||||
mi_assert_internal((page->xblock_size == to->block_size && page->xblock_size == from->block_size) ||
|
||||
(page->xblock_size == to->block_size && mi_page_queue_is_full(from)) ||
|
||||
(page->xblock_size == from->block_size && mi_page_queue_is_full(to)) ||
|
||||
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) ||
|
||||
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_full(to)));
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
MI_UNUSED(bsize);
|
||||
mi_assert_internal((bsize == to->block_size && bsize == from->block_size) ||
|
||||
(bsize == to->block_size && mi_page_queue_is_full(from)) ||
|
||||
(bsize == from->block_size && mi_page_queue_is_full(to)) ||
|
||||
(mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(to)) ||
|
||||
(mi_page_is_large_or_huge(page) && mi_page_queue_is_full(to)));
|
||||
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
if (page->prev != NULL) page->prev->next = page->next;
|
||||
|
|
71
src/page.c
71
src/page.c
|
@ -1,5 +1,5 @@
|
|||
/*----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
|
||||
Copyright (c) 2018-2024, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
|
@ -59,7 +59,7 @@ static inline uint8_t* mi_page_area(const mi_page_t* page) {
|
|||
|
||||
static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
|
||||
size_t psize;
|
||||
uint8_t* page_area = _mi_page_start(_mi_page_segment(page), page, &psize);
|
||||
uint8_t* page_area = _mi_segment_page_start(_mi_page_segment(page), page, &psize);
|
||||
mi_block_t* start = (mi_block_t*)page_area;
|
||||
mi_block_t* end = (mi_block_t*)(page_area + psize);
|
||||
while(p != NULL) {
|
||||
|
@ -78,13 +78,12 @@ static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
|
|||
}
|
||||
|
||||
static bool mi_page_is_valid_init(mi_page_t* page) {
|
||||
mi_assert_internal(page->xblock_size > 0);
|
||||
mi_assert_internal(mi_page_block_size(page) > 0);
|
||||
mi_assert_internal(page->used <= page->capacity);
|
||||
mi_assert_internal(page->capacity <= page->reserved);
|
||||
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
uint8_t* start = _mi_page_start(segment,page,NULL);
|
||||
mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL));
|
||||
uint8_t* start = mi_page_start(page);
|
||||
mi_assert_internal(start == _mi_segment_page_start(_mi_page_segment(page), page, NULL));
|
||||
//const size_t bsize = mi_page_block_size(page);
|
||||
//mi_assert_internal(start + page->capacity*page->block_size == page->top);
|
||||
|
||||
|
@ -282,11 +281,13 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
|
|||
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
|
||||
return NULL;
|
||||
}
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
||||
#endif
|
||||
mi_assert_internal(page_alignment >0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
||||
mi_assert_internal(pq!=NULL || page->xblock_size != 0);
|
||||
mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
|
||||
// a fresh page was found, initialize it
|
||||
const size_t full_block_size = ((pq == NULL || mi_page_queue_is_huge(pq)) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
|
||||
const size_t full_block_size = (pq == NULL || mi_page_is_huge(page) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
|
||||
mi_assert_internal(full_block_size >= block_size);
|
||||
mi_page_init(heap, page, full_block_size, heap->tld);
|
||||
mi_heap_stat_increase(heap, pages, 1);
|
||||
|
@ -427,8 +428,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
|
|||
_mi_segment_page_free(page, force, segments_tld);
|
||||
}
|
||||
|
||||
// Retire parameters
|
||||
#define MI_MAX_RETIRE_SIZE (MI_MEDIUM_OBJ_SIZE_MAX)
|
||||
#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE
|
||||
#define MI_RETIRE_CYCLES (16)
|
||||
|
||||
// Retire a page with no more used blocks
|
||||
|
@ -451,10 +451,11 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
|
|||
// how to check this efficiently though...
|
||||
// for now, we don't retire if it is the only page left of this size class.
|
||||
mi_page_queue_t* pq = mi_page_queue_of(page);
|
||||
if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_queue_is_special(pq)) { // not too large && not full or huge queue?
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue?
|
||||
if (pq->last==page && pq->first==page) { // the only page in the queue?
|
||||
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
|
||||
page->retire_expire = 1 + (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
|
||||
page->retire_expire = (bsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
|
||||
mi_heap_t* heap = mi_page_heap(page);
|
||||
mi_assert_internal(pq >= heap->pages);
|
||||
const size_t index = pq - heap->pages;
|
||||
|
@ -516,7 +517,7 @@ static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* co
|
|||
#endif
|
||||
mi_assert_internal(page->capacity + extend <= page->reserved);
|
||||
mi_assert_internal(bsize == mi_page_block_size(page));
|
||||
void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL);
|
||||
void* const page_area = mi_page_start(page);
|
||||
|
||||
// initialize a randomized free list
|
||||
// set up `slice_count` slices to alternate between
|
||||
|
@ -574,7 +575,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
|
|||
#endif
|
||||
mi_assert_internal(page->capacity + extend <= page->reserved);
|
||||
mi_assert_internal(bsize == mi_page_block_size(page));
|
||||
void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL );
|
||||
void* const page_area = mi_page_start(page);
|
||||
|
||||
mi_block_t* const start = mi_page_block_at(page, page_area, bsize, page->capacity);
|
||||
|
||||
|
@ -617,16 +618,14 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld)
|
|||
#endif
|
||||
if (page->capacity >= page->reserved) return;
|
||||
|
||||
size_t page_size;
|
||||
_mi_page_start(_mi_page_segment(page), page, &page_size);
|
||||
mi_stat_counter_increase(tld->stats.pages_extended, 1);
|
||||
|
||||
// calculate the extend count
|
||||
const size_t bsize = (page->xblock_size < MI_HUGE_BLOCK_SIZE ? page->xblock_size : page_size);
|
||||
const size_t bsize = mi_page_block_size(page);
|
||||
size_t extend = page->reserved - page->capacity;
|
||||
mi_assert_internal(extend > 0);
|
||||
|
||||
size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/(uint32_t)bsize);
|
||||
size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/bsize);
|
||||
if (max_extend < MI_MIN_EXTEND) { max_extend = MI_MIN_EXTEND; }
|
||||
mi_assert_internal(max_extend > 0);
|
||||
|
||||
|
@ -660,10 +659,10 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
|||
mi_assert_internal(block_size > 0);
|
||||
// set fields
|
||||
mi_page_set_heap(page, heap);
|
||||
page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE); // initialize before _mi_segment_page_start
|
||||
page->block_size = block_size;
|
||||
size_t page_size;
|
||||
const void* page_start = _mi_segment_page_start(segment, page, &page_size);
|
||||
mi_track_mem_noaccess(page_start,page_size);
|
||||
page->page_start = _mi_segment_page_start(segment, page, &page_size);
|
||||
mi_track_mem_noaccess(page->page_start,page_size);
|
||||
mi_assert_internal(mi_page_block_size(page) <= page_size);
|
||||
mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
|
||||
mi_assert_internal(page_size / block_size < (1L<<16));
|
||||
|
@ -677,21 +676,15 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
|||
#if MI_DEBUG>2
|
||||
if (page->is_zero_init) {
|
||||
mi_track_mem_defined(page_start, page_size);
|
||||
mi_assert_expensive(mi_mem_is_zero(page_start, page_size));
|
||||
mi_assert_expensive(mi_mem_is_zero(page->page_start, page_size));
|
||||
}
|
||||
#endif
|
||||
mi_assert_internal(page->is_committed);
|
||||
if (block_size > 0 && _mi_is_power_of_two(block_size)) {
|
||||
page->block_size_shift = (uint8_t)(mi_ctz((uintptr_t)block_size));
|
||||
}
|
||||
if (block_size > 0) {
|
||||
const ptrdiff_t start_offset = (uint8_t*)page_start - (uint8_t*)page;
|
||||
const ptrdiff_t start_adjust = start_offset % block_size;
|
||||
if (start_offset >= 0 && (start_adjust % MI_MAX_ALIGN_SIZE) == 0 && (start_adjust / MI_MAX_ALIGN_SIZE) < 255) {
|
||||
const ptrdiff_t adjust = (start_adjust / MI_MAX_ALIGN_SIZE);
|
||||
mi_assert_internal(adjust + 1 == (uint8_t)(adjust + 1));
|
||||
page->block_offset_adj = (uint8_t)(adjust + 1);
|
||||
}
|
||||
else {
|
||||
page->block_size_shift = 0;
|
||||
}
|
||||
|
||||
mi_assert_internal(page->capacity == 0);
|
||||
|
@ -706,8 +699,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
|||
mi_assert_internal(page->keys[0] != 0);
|
||||
mi_assert_internal(page->keys[1] != 0);
|
||||
#endif
|
||||
mi_assert_internal(page->block_size_shift == 0 || (block_size == (1UL << page->block_size_shift)));
|
||||
mi_assert_internal(page->block_offset_adj == 0 || (((uint8_t*)page_start - (uint8_t*)page - MI_MAX_ALIGN_SIZE*(page->block_offset_adj-1))) % block_size == 0);
|
||||
mi_assert_internal(page->block_size_shift == 0 || (block_size == ((size_t)1 << page->block_size_shift)));
|
||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||
|
||||
// initialize an initial free list
|
||||
|
@ -833,11 +825,9 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex
|
|||
----------------------------------------------------------- */
|
||||
|
||||
// Large and huge page allocation.
|
||||
// Huge pages are allocated directly without being in a queue.
|
||||
// Because huge pages contain just one block, and the segment contains
|
||||
// just that page, we always treat them as abandoned and any thread
|
||||
// that frees the block can free the whole page and segment directly.
|
||||
// Huge pages are also use if the requested alignment is very large (> MI_ALIGNMENT_MAX).
|
||||
// Huge pages contain just one block, and the segment contains just that page (as `MI_SEGMENT_HUGE`).
|
||||
// Huge pages are also use if the requested alignment is very large (> MI_BLOCK_ALIGNMENT_MAX)
|
||||
// so their size is not always `> MI_LARGE_OBJ_SIZE_MAX`.
|
||||
static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
|
||||
size_t block_size = _mi_os_good_alloc_size(size);
|
||||
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
|
||||
|
@ -845,7 +835,7 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t
|
|||
#if MI_HUGE_PAGE_ABANDON
|
||||
mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
|
||||
#else
|
||||
mi_page_queue_t* pq = mi_page_queue(heap, is_huge ? MI_HUGE_BLOCK_SIZE : block_size); // not block_size as that can be low if the page_alignment > 0
|
||||
mi_page_queue_t* pq = mi_page_queue(heap, is_huge ? MI_LARGE_OBJ_SIZE_MAX+1 : block_size);
|
||||
mi_assert_internal(!is_huge || mi_page_queue_is_huge(pq));
|
||||
#endif
|
||||
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
|
||||
|
@ -853,6 +843,7 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t
|
|||
mi_assert_internal(mi_page_immediate_available(page));
|
||||
|
||||
if (is_huge) {
|
||||
mi_assert_internal(mi_page_is_huge(page));
|
||||
mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
|
||||
mi_assert_internal(_mi_page_segment(page)->used==1);
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
|
@ -861,7 +852,7 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t
|
|||
#endif
|
||||
}
|
||||
else {
|
||||
mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
|
||||
mi_assert_internal(!mi_page_is_huge(page));
|
||||
}
|
||||
|
||||
const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding
|
||||
|
@ -939,7 +930,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
|
|||
mi_assert_internal(mi_page_block_size(page) >= size);
|
||||
|
||||
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
|
||||
if mi_unlikely(zero && page->xblock_size == 0) {
|
||||
if mi_unlikely(zero && page->block_size == 0) {
|
||||
// note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
|
||||
void* p = _mi_page_malloc(heap, page, size, false);
|
||||
mi_assert_internal(p != NULL);
|
||||
|
|
|
@ -212,7 +212,7 @@ static void mi_span_queue_push(mi_span_queue_t* sq, mi_slice_t* slice) {
|
|||
sq->first = slice;
|
||||
if (slice->next != NULL) slice->next->prev = slice;
|
||||
else sq->last = slice;
|
||||
slice->xblock_size = 0; // free
|
||||
slice->block_size = 0; // free
|
||||
}
|
||||
|
||||
static mi_span_queue_t* mi_span_queue_for(size_t slice_count, mi_segments_tld_t* tld) {
|
||||
|
@ -223,7 +223,7 @@ static mi_span_queue_t* mi_span_queue_for(size_t slice_count, mi_segments_tld_t*
|
|||
}
|
||||
|
||||
static void mi_span_queue_delete(mi_span_queue_t* sq, mi_slice_t* slice) {
|
||||
mi_assert_internal(slice->xblock_size==0 && slice->slice_count>0 && slice->slice_offset==0);
|
||||
mi_assert_internal(slice->block_size==0 && slice->slice_count>0 && slice->slice_offset==0);
|
||||
// should work too if the queue does not contain slice (which can happen during reclaim)
|
||||
if (slice->prev != NULL) slice->prev->next = slice->next;
|
||||
if (slice == sq->first) sq->first = slice->next;
|
||||
|
@ -231,7 +231,7 @@ static void mi_span_queue_delete(mi_span_queue_t* sq, mi_slice_t* slice) {
|
|||
if (slice == sq->last) sq->last = slice->prev;
|
||||
slice->prev = NULL;
|
||||
slice->next = NULL;
|
||||
slice->xblock_size = 1; // no more free
|
||||
slice->block_size = 1; // no more free
|
||||
}
|
||||
|
||||
|
||||
|
@ -240,7 +240,7 @@ static void mi_span_queue_delete(mi_span_queue_t* sq, mi_slice_t* slice) {
|
|||
----------------------------------------------------------- */
|
||||
|
||||
static bool mi_slice_is_used(const mi_slice_t* slice) {
|
||||
return (slice->xblock_size > 0);
|
||||
return (slice->block_size > 0);
|
||||
}
|
||||
|
||||
|
||||
|
@ -270,17 +270,18 @@ static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
|||
size_t maxindex = (index + slice->slice_count >= segment->slice_entries ? segment->slice_entries : index + slice->slice_count) - 1;
|
||||
if (mi_slice_is_used(slice)) { // a page in use, we need at least MAX_SLICE_OFFSET valid back offsets
|
||||
used_count++;
|
||||
if (segment->kind == MI_SEGMENT_HUGE) { mi_assert_internal(slice->is_huge); }
|
||||
for (size_t i = 0; i <= MI_MAX_SLICE_OFFSET && index + i <= maxindex; i++) {
|
||||
mi_assert_internal(segment->slices[index + i].slice_offset == i*sizeof(mi_slice_t));
|
||||
mi_assert_internal(i==0 || segment->slices[index + i].slice_count == 0);
|
||||
mi_assert_internal(i==0 || segment->slices[index + i].xblock_size == 1);
|
||||
mi_assert_internal(i==0 || segment->slices[index + i].block_size == 1);
|
||||
}
|
||||
// and the last entry as well (for coalescing)
|
||||
const mi_slice_t* last = slice + slice->slice_count - 1;
|
||||
if (last > slice && last < mi_segment_slices_end(segment)) {
|
||||
mi_assert_internal(last->slice_offset == (slice->slice_count-1)*sizeof(mi_slice_t));
|
||||
mi_assert_internal(last->slice_count == 0);
|
||||
mi_assert_internal(last->xblock_size == 1);
|
||||
mi_assert_internal(last->block_size == 1);
|
||||
}
|
||||
}
|
||||
else { // free range of slices; only last slice needs a valid back offset
|
||||
|
@ -289,7 +290,7 @@ static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
|||
mi_assert_internal((uint8_t*)slice == (uint8_t*)last - last->slice_offset);
|
||||
}
|
||||
mi_assert_internal(slice == last || last->slice_count == 0 );
|
||||
mi_assert_internal(last->xblock_size == 0 || (segment->kind==MI_SEGMENT_HUGE && last->xblock_size==1));
|
||||
mi_assert_internal(last->block_size == 0 || (segment->kind==MI_SEGMENT_HUGE && last->block_size==1));
|
||||
if (segment->kind != MI_SEGMENT_HUGE && segment->thread_id != 0) { // segment is not huge or abandoned
|
||||
sq = mi_span_queue_for(slice->slice_count,tld);
|
||||
mi_assert_internal(mi_span_queue_contains(sq,slice));
|
||||
|
@ -331,8 +332,8 @@ static uint8_t* _mi_segment_page_start_from_slice(const mi_segment_t* segment, c
|
|||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
|
||||
{
|
||||
const mi_slice_t* slice = mi_page_to_slice((mi_page_t*)page);
|
||||
uint8_t* p = _mi_segment_page_start_from_slice(segment, slice, page->xblock_size, page_size);
|
||||
mi_assert_internal(page->xblock_size > 0 || _mi_ptr_page(p) == page);
|
||||
uint8_t* p = _mi_segment_page_start_from_slice(segment, slice, mi_page_block_size(page), page_size);
|
||||
mi_assert_internal(mi_page_block_size(page) > 0 || _mi_ptr_page(p) == page);
|
||||
mi_assert_internal(_mi_ptr_segment(p) == segment);
|
||||
return p;
|
||||
}
|
||||
|
@ -620,7 +621,7 @@ static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size
|
|||
mi_slice_t* last = &segment->slices[slice_index + slice_count - 1];
|
||||
last->slice_count = 0;
|
||||
last->slice_offset = (uint32_t)(sizeof(mi_page_t)*(slice_count - 1));
|
||||
last->xblock_size = 0;
|
||||
last->block_size = 0;
|
||||
}
|
||||
|
||||
// perhaps decommit
|
||||
|
@ -630,7 +631,7 @@ static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size
|
|||
|
||||
// and push it on the free page queue (if it was not a huge page)
|
||||
if (sq != NULL) mi_span_queue_push( sq, slice );
|
||||
else slice->xblock_size = 0; // mark huge page as free anyways
|
||||
else slice->block_size = 0; // mark huge page as free anyways
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -644,7 +645,7 @@ static void mi_segment_span_add_free(mi_slice_t* slice, mi_segments_tld_t* tld)
|
|||
*/
|
||||
|
||||
static void mi_segment_span_remove_from_queue(mi_slice_t* slice, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(slice->slice_count > 0 && slice->slice_offset==0 && slice->xblock_size==0);
|
||||
mi_assert_internal(slice->slice_count > 0 && slice->slice_offset==0 && slice->block_size==0);
|
||||
mi_assert_internal(_mi_ptr_segment(slice)->kind != MI_SEGMENT_HUGE);
|
||||
mi_span_queue_t* sq = mi_span_queue_for(slice->slice_count, tld);
|
||||
mi_span_queue_delete(sq, slice);
|
||||
|
@ -659,8 +660,8 @@ static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_
|
|||
// for huge pages, just mark as free but don't add to the queues
|
||||
if (segment->kind == MI_SEGMENT_HUGE) {
|
||||
// issue #691: segment->used can be 0 if the huge page block was freed while abandoned (reclaim will get here in that case)
|
||||
mi_assert_internal((segment->used==0 && slice->xblock_size==0) || segment->used == 1); // decreased right after this call in `mi_segment_page_clear`
|
||||
slice->xblock_size = 0; // mark as free anyways
|
||||
mi_assert_internal((segment->used==0 && slice->block_size==0) || segment->used == 1); // decreased right after this call in `mi_segment_page_clear`
|
||||
slice->block_size = 0; // mark as free anyways
|
||||
// we should mark the last slice `xblock_size=0` now to maintain invariants but we skip it to
|
||||
// avoid a possible cache miss (and the segment is about to be freed)
|
||||
return slice;
|
||||
|
@ -670,7 +671,7 @@ static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_
|
|||
size_t slice_count = slice->slice_count;
|
||||
mi_slice_t* next = slice + slice->slice_count;
|
||||
mi_assert_internal(next <= mi_segment_slices_end(segment));
|
||||
if (next < mi_segment_slices_end(segment) && next->xblock_size==0) {
|
||||
if (next < mi_segment_slices_end(segment) && next->block_size==0) {
|
||||
// free next block -- remove it from free and merge
|
||||
mi_assert_internal(next->slice_count > 0 && next->slice_offset==0);
|
||||
slice_count += next->slice_count; // extend
|
||||
|
@ -679,7 +680,7 @@ static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_
|
|||
if (slice > segment->slices) {
|
||||
mi_slice_t* prev = mi_slice_first(slice - 1);
|
||||
mi_assert_internal(prev >= segment->slices);
|
||||
if (prev->xblock_size==0) {
|
||||
if (prev->block_size==0) {
|
||||
// free previous slice -- remove it from free and merge
|
||||
mi_assert_internal(prev->slice_count > 0 && prev->slice_offset==0);
|
||||
slice_count += prev->slice_count;
|
||||
|
@ -703,7 +704,7 @@ static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_
|
|||
static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_index, size_t slice_count, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(slice_index < segment->slice_entries);
|
||||
mi_slice_t* const slice = &segment->slices[slice_index];
|
||||
mi_assert_internal(slice->xblock_size==0 || slice->xblock_size==1);
|
||||
mi_assert_internal(slice->block_size==0 || slice->block_size==1);
|
||||
|
||||
// commit before changing the slice data
|
||||
if (!mi_segment_ensure_committed(segment, _mi_segment_page_start_from_slice(segment, slice, 0, NULL), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats)) {
|
||||
|
@ -715,7 +716,7 @@ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_i
|
|||
slice->slice_count = (uint32_t)slice_count;
|
||||
mi_assert_internal(slice->slice_count == slice_count);
|
||||
const size_t bsize = slice_count * MI_SEGMENT_SLICE_SIZE;
|
||||
slice->xblock_size = (uint32_t)(bsize >= MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : bsize);
|
||||
slice->block_size = bsize;
|
||||
mi_page_t* page = mi_slice_to_page(slice);
|
||||
mi_assert_internal(mi_page_block_size(page) == bsize);
|
||||
|
||||
|
@ -728,7 +729,7 @@ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_i
|
|||
for (size_t i = 1; i <= extra; i++, slice_next++) {
|
||||
slice_next->slice_offset = (uint32_t)(sizeof(mi_slice_t)*i);
|
||||
slice_next->slice_count = 0;
|
||||
slice_next->xblock_size = 1;
|
||||
slice_next->block_size = 1;
|
||||
}
|
||||
|
||||
// and also for the last one (if not set already) (the last one is needed for coalescing and for large alignments)
|
||||
|
@ -739,11 +740,12 @@ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_i
|
|||
if (last > slice) {
|
||||
last->slice_offset = (uint32_t)(sizeof(mi_slice_t) * (last - slice));
|
||||
last->slice_count = 0;
|
||||
last->xblock_size = 1;
|
||||
last->block_size = 1;
|
||||
}
|
||||
|
||||
// and initialize the page
|
||||
page->is_committed = true;
|
||||
page->is_huge = (segment->kind == MI_SEGMENT_HUGE);
|
||||
segment->used++;
|
||||
return page;
|
||||
}
|
||||
|
@ -751,7 +753,7 @@ static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_i
|
|||
static void mi_segment_slice_split(mi_segment_t* segment, mi_slice_t* slice, size_t slice_count, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(_mi_ptr_segment(slice) == segment);
|
||||
mi_assert_internal(slice->slice_count >= slice_count);
|
||||
mi_assert_internal(slice->xblock_size > 0); // no more in free queue
|
||||
mi_assert_internal(slice->block_size > 0); // no more in free queue
|
||||
if (slice->slice_count <= slice_count) return;
|
||||
mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
|
||||
size_t next_index = mi_slice_index(slice) + slice_count;
|
||||
|
@ -777,7 +779,7 @@ static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_aren
|
|||
if (slice->slice_count > slice_count) {
|
||||
mi_segment_slice_split(segment, slice, slice_count, tld);
|
||||
}
|
||||
mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->xblock_size > 0);
|
||||
mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->block_size > 0);
|
||||
mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count, tld);
|
||||
if (page == NULL) {
|
||||
// commit failed; return NULL but first restore the slice
|
||||
|
@ -954,8 +956,8 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
|
|||
while (slice < end) {
|
||||
mi_assert_internal(slice->slice_count > 0);
|
||||
mi_assert_internal(slice->slice_offset == 0);
|
||||
mi_assert_internal(mi_slice_index(slice)==0 || slice->xblock_size == 0); // no more used pages ..
|
||||
if (slice->xblock_size == 0 && segment->kind != MI_SEGMENT_HUGE) {
|
||||
mi_assert_internal(mi_slice_index(slice)==0 || slice->block_size == 0); // no more used pages ..
|
||||
if (slice->block_size == 0 && segment->kind != MI_SEGMENT_HUGE) {
|
||||
mi_segment_span_remove_from_queue(slice, tld);
|
||||
}
|
||||
#if MI_DEBUG>1
|
||||
|
@ -981,7 +983,7 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld);
|
|||
|
||||
// note: can be called on abandoned pages
|
||||
static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(page->xblock_size > 0);
|
||||
mi_assert_internal(page->block_size > 0);
|
||||
mi_assert_internal(mi_page_all_free(page));
|
||||
mi_segment_t* segment = _mi_ptr_segment(page);
|
||||
mi_assert_internal(segment->used > 0);
|
||||
|
@ -993,7 +995,7 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld
|
|||
// reset the page memory to reduce memory pressure?
|
||||
if (segment->allow_decommit && mi_option_is_enabled(mi_option_deprecated_page_reset)) {
|
||||
size_t psize;
|
||||
uint8_t* start = _mi_page_start(segment, page, &psize);
|
||||
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
|
||||
_mi_os_reset(start, psize, tld->stats);
|
||||
}
|
||||
|
||||
|
@ -1001,7 +1003,7 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld
|
|||
page->is_zero_init = false;
|
||||
ptrdiff_t ofs = offsetof(mi_page_t, capacity);
|
||||
_mi_memzero((uint8_t*)page + ofs, sizeof(*page) - ofs);
|
||||
page->xblock_size = 1;
|
||||
page->block_size = 1;
|
||||
|
||||
// and free it
|
||||
mi_slice_t* slice = mi_segment_span_free_coalesce(mi_page_to_slice(page), tld);
|
||||
|
@ -1076,9 +1078,9 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
|||
while (slice < end) {
|
||||
mi_assert_internal(slice->slice_count > 0);
|
||||
mi_assert_internal(slice->slice_offset == 0);
|
||||
if (slice->xblock_size == 0) { // a free page
|
||||
if (slice->block_size == 0) { // a free page
|
||||
mi_segment_span_remove_from_queue(slice,tld);
|
||||
slice->xblock_size = 0; // but keep it free
|
||||
slice->block_size = 0; // but keep it free
|
||||
}
|
||||
slice = slice + slice->slice_count;
|
||||
}
|
||||
|
@ -1125,7 +1127,7 @@ void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
|
|||
static mi_slice_t* mi_slices_start_iterate(mi_segment_t* segment, const mi_slice_t** end) {
|
||||
mi_slice_t* slice = &segment->slices[0];
|
||||
*end = mi_segment_slices_end(segment);
|
||||
mi_assert_internal(slice->slice_count>0 && slice->xblock_size>0); // segment allocated page
|
||||
mi_assert_internal(slice->slice_count>0 && slice->block_size>0); // segment allocated page
|
||||
slice = slice + slice->slice_count; // skip the first segment allocated page
|
||||
return slice;
|
||||
}
|
||||
|
@ -1133,7 +1135,6 @@ static mi_slice_t* mi_slices_start_iterate(mi_segment_t* segment, const mi_slice
|
|||
// Possibly free pages and check if free space is available
|
||||
static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, size_t block_size, mi_segments_tld_t* tld)
|
||||
{
|
||||
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
|
||||
mi_assert_internal(mi_segment_is_abandoned(segment));
|
||||
bool has_page = false;
|
||||
|
||||
|
@ -1158,11 +1159,9 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s
|
|||
has_page = true;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (page->xblock_size == block_size && mi_page_has_any_available(page)) {
|
||||
// a page has available free blocks of the right size
|
||||
has_page = true;
|
||||
}
|
||||
else if (mi_page_block_size(page) == block_size && mi_page_has_any_available(page)) {
|
||||
// a page has available free blocks of the right size
|
||||
has_page = true;
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
@ -1216,7 +1215,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
|
|||
else {
|
||||
// otherwise reclaim it into the heap
|
||||
_mi_page_reclaim(heap, page);
|
||||
if (requested_block_size == page->xblock_size && mi_page_has_any_available(page)) {
|
||||
if (requested_block_size == mi_page_block_size(page) && mi_page_has_any_available(page)) {
|
||||
if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; }
|
||||
}
|
||||
}
|
||||
|
@ -1342,7 +1341,6 @@ void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld)
|
|||
|
||||
static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||
{
|
||||
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
|
||||
mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
|
||||
|
||||
// 1. try to reclaim an abandoned segment
|
||||
|
@ -1409,11 +1407,12 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
|
|||
segment->thread_id = 0; // huge segments are immediately abandoned
|
||||
#endif
|
||||
|
||||
// for huge pages we initialize the xblock_size as we may
|
||||
// for huge pages we initialize the block_size as we may
|
||||
// overallocate to accommodate large alignments.
|
||||
size_t psize;
|
||||
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
|
||||
page->xblock_size = (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
|
||||
page->block_size = psize;
|
||||
mi_assert_internal(page->is_huge);
|
||||
|
||||
// decommit the part of the prefix of a page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE)
|
||||
if (page_alignment > 0 && segment->allow_decommit) {
|
||||
|
@ -1480,7 +1479,7 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_bloc
|
|||
----------------------------------------------------------- */
|
||||
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
|
||||
mi_page_t* page;
|
||||
if mi_unlikely(page_alignment > MI_ALIGNMENT_MAX) {
|
||||
if mi_unlikely(page_alignment > MI_BLOCK_ALIGNMENT_MAX) {
|
||||
mi_assert_internal(_mi_is_power_of_two(page_alignment));
|
||||
mi_assert_internal(page_alignment >= MI_SEGMENT_SIZE);
|
||||
if (page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; }
|
||||
|
|
|
@ -457,7 +457,7 @@ mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, s
|
|||
pinfo.page_faults = 0;
|
||||
|
||||
_mi_prim_process_info(&pinfo);
|
||||
|
||||
|
||||
if (elapsed_msecs!=NULL) *elapsed_msecs = (pinfo.elapsed < 0 ? 0 : (pinfo.elapsed < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.elapsed : PTRDIFF_MAX));
|
||||
if (user_msecs!=NULL) *user_msecs = (pinfo.utime < 0 ? 0 : (pinfo.utime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.utime : PTRDIFF_MAX));
|
||||
if (system_msecs!=NULL) *system_msecs = (pinfo.stime < 0 ? 0 : (pinfo.stime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.stime : PTRDIFF_MAX));
|
||||
|
|
|
@ -34,7 +34,7 @@ we therefore test the API over various inputs. Please add more tests :-)
|
|||
|
||||
#include "mimalloc.h"
|
||||
// #include "mimalloc/internal.h"
|
||||
#include "mimalloc/types.h" // for MI_DEBUG and MI_ALIGNMENT_MAX
|
||||
#include "mimalloc/types.h" // for MI_DEBUG and MI_BLOCK_ALIGNMENT_MAX
|
||||
|
||||
#include "testhelper.h"
|
||||
|
||||
|
@ -59,7 +59,7 @@ bool mem_is_zero(uint8_t* p, size_t size) {
|
|||
// ---------------------------------------------------------------------------
|
||||
int main(void) {
|
||||
mi_option_disable(mi_option_verbose);
|
||||
|
||||
|
||||
// ---------------------------------------------------
|
||||
// Malloc
|
||||
// ---------------------------------------------------
|
||||
|
@ -154,7 +154,7 @@ int main(void) {
|
|||
};
|
||||
CHECK_BODY("malloc-aligned6") {
|
||||
bool ok = true;
|
||||
for (size_t align = 1; align <= MI_ALIGNMENT_MAX && ok; align *= 2) {
|
||||
for (size_t align = 1; align <= MI_BLOCK_ALIGNMENT_MAX && ok; align *= 2) {
|
||||
void* ps[8];
|
||||
for (int i = 0; i < 8 && ok; i++) {
|
||||
ps[i] = mi_malloc_aligned(align*13 // size
|
||||
|
@ -170,16 +170,16 @@ int main(void) {
|
|||
result = ok;
|
||||
};
|
||||
CHECK_BODY("malloc-aligned7") {
|
||||
void* p = mi_malloc_aligned(1024,MI_ALIGNMENT_MAX);
|
||||
void* p = mi_malloc_aligned(1024,MI_BLOCK_ALIGNMENT_MAX);
|
||||
mi_free(p);
|
||||
result = ((uintptr_t)p % MI_ALIGNMENT_MAX) == 0;
|
||||
result = ((uintptr_t)p % MI_BLOCK_ALIGNMENT_MAX) == 0;
|
||||
};
|
||||
CHECK_BODY("malloc-aligned8") {
|
||||
bool ok = true;
|
||||
for (int i = 0; i < 5 && ok; i++) {
|
||||
int n = (1 << i);
|
||||
void* p = mi_malloc_aligned(1024, n * MI_ALIGNMENT_MAX);
|
||||
ok = ((uintptr_t)p % (n*MI_ALIGNMENT_MAX)) == 0;
|
||||
void* p = mi_malloc_aligned(1024, n * MI_BLOCK_ALIGNMENT_MAX);
|
||||
ok = ((uintptr_t)p % (n*MI_BLOCK_ALIGNMENT_MAX)) == 0;
|
||||
mi_free(p);
|
||||
}
|
||||
result = ok;
|
||||
|
@ -187,7 +187,7 @@ int main(void) {
|
|||
CHECK_BODY("malloc-aligned9") {
|
||||
bool ok = true;
|
||||
void* p[8];
|
||||
size_t sizes[8] = { 8, 512, 1024 * 1024, MI_ALIGNMENT_MAX, MI_ALIGNMENT_MAX + 1, 2 * MI_ALIGNMENT_MAX, 8 * MI_ALIGNMENT_MAX, 0 };
|
||||
size_t sizes[8] = { 8, 512, 1024 * 1024, MI_BLOCK_ALIGNMENT_MAX, MI_BLOCK_ALIGNMENT_MAX + 1, 2 * MI_BLOCK_ALIGNMENT_MAX, 8 * MI_BLOCK_ALIGNMENT_MAX, 0 };
|
||||
for (int i = 0; i < 28 && ok; i++) {
|
||||
int align = (1 << i);
|
||||
for (int j = 0; j < 8 && ok; j++) {
|
||||
|
|
Loading…
Reference in New Issue