Switch to 64bit bitmask and bug fixes
This commit is contained in:
parent
d67fe5d26a
commit
02f3e0fe7a
@ -349,6 +349,7 @@ typedef enum mi_option_e {
|
||||
mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's)
|
||||
mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows)
|
||||
mi_option_max_segments_per_heap, // max number of segments that heap can own
|
||||
mi_option_heap_collect_abandoned_interval, // interval (in ms) between collecting abandoned segments when a heap drops exessive segments
|
||||
_mi_option_last,
|
||||
// legacy option names
|
||||
mi_option_large_os_pages = mi_option_allow_large_os_pages,
|
||||
|
@ -220,12 +220,13 @@ typedef int32_t mi_ssize_t;
|
||||
#define MI_MAX_ALLOC_SIZE PTRDIFF_MAX
|
||||
#endif
|
||||
|
||||
#define MI_FREE_SPACE_MASK_BIT_COUNT 31
|
||||
#define MI_FREE_SPACE_MASK_BIT_COUNT 63
|
||||
#define MI_FREE_SPACE_BINS_PER_BIT (MI_BIN_HUGE/MI_FREE_SPACE_MASK_BIT_COUNT)
|
||||
#define MI_FREE_SPACE_MASK_ALL ((size_t)0xFFFFFFFF)
|
||||
#define MI_FREE_SPACE_MASK_ABANDONED ((size_t)0x80000000)
|
||||
#define MI_FREE_SPACE_MASK_ALL ((size_t)0xFFFFFFFFFFFFFFFF)
|
||||
#define MI_FREE_SPACE_MASK_ABANDONED ((size_t)0x8000000000000000)
|
||||
#define MI_FREE_SPACE_MASK_ANY (MI_FREE_SPACE_MASK_ALL & (~MI_FREE_SPACE_MASK_ABANDONED))
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Mimalloc pages contain allocated blocks
|
||||
// ------------------------------------------------------
|
||||
@ -544,6 +545,7 @@ struct mi_heap_s {
|
||||
size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues)
|
||||
size_t page_retired_max; // largest retired index into the `pages` array.
|
||||
mi_heap_t* next; // list of heaps per thread
|
||||
mi_msecs_t last_abandoned_collect_time;
|
||||
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
|
||||
uint8_t tag; // custom tag, can be used for separating heaps based on the object types
|
||||
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
|
||||
|
15
src/heap.c
15
src/heap.c
@ -230,6 +230,7 @@ void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool
|
||||
// push on the thread local heaps list
|
||||
heap->next = heap->tld->heaps;
|
||||
heap->tld->heaps = heap;
|
||||
heap->last_abandoned_collect_time = 0;
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
|
||||
@ -702,6 +703,7 @@ static mi_decl_noinline void mi_segment_visit_pages(mi_heap_t* heap, mi_segment_
|
||||
}
|
||||
|
||||
void mi_heap_drop_segment(mi_heap_t* heap, size_t targetSegmentCount) {
|
||||
bool segmentsDropped = false;
|
||||
|
||||
while (heap->tld->segments.count >= targetSegmentCount) {
|
||||
|
||||
@ -711,6 +713,7 @@ void mi_heap_drop_segment(mi_heap_t* heap, size_t targetSegmentCount) {
|
||||
break;
|
||||
}
|
||||
|
||||
segmentsDropped = true;
|
||||
// 2. when abandoning, mark all pages to no longer add to delayed_free
|
||||
mi_segment_visit_pages(heap, segmentToAbandon, &mi_heap_page_never_delayed_free, NULL);
|
||||
|
||||
@ -723,6 +726,18 @@ void mi_heap_drop_segment(mi_heap_t* heap, size_t targetSegmentCount) {
|
||||
mi_collect_t collect = MI_ABANDON;
|
||||
mi_segment_visit_pages(heap, segmentToAbandon, &mi_heap_page_collect, &collect);
|
||||
}
|
||||
|
||||
if (segmentsDropped) {
|
||||
mi_msecs_t now_msec = _mi_clock_now();
|
||||
long collect_interval = mi_option_get(mi_option_heap_collect_abandoned_interval);
|
||||
if ((now_msec - heap->last_abandoned_collect_time) >= collect_interval) {
|
||||
heap->last_abandoned_collect_time = now_msec;
|
||||
|
||||
// collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list)
|
||||
// note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment
|
||||
_mi_abandoned_collect(heap, false /* force? */, &heap->tld->segments);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void mi_heap_drop_segment_if_required(mi_heap_t* heap, size_t alloc_block_size)
|
||||
|
@ -123,6 +123,7 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
|
||||
0, // page count
|
||||
MI_BIN_FULL, 0, // page retired min/max
|
||||
NULL, // next
|
||||
0, // last_abandoned_collect_time
|
||||
false, // can reclaim
|
||||
0, // tag
|
||||
MI_SMALL_PAGES_EMPTY,
|
||||
@ -169,6 +170,7 @@ mi_heap_t _mi_heap_main = {
|
||||
0, // page count
|
||||
MI_BIN_FULL, 0, // page retired min/max
|
||||
NULL, // next heap
|
||||
0, // last_abandoned_collect_time
|
||||
false, // can reclaim
|
||||
0, // tag
|
||||
MI_SMALL_PAGES_EMPTY,
|
||||
|
@ -94,6 +94,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
||||
{ 0, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's)
|
||||
{ 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries.
|
||||
{ 8, UNINIT, MI_OPTION(max_segments_per_heap) }, // max number of segments that heap can own.
|
||||
{ 2000, UNINIT, MI_OPTION(heap_collect_abandoned_interval) }, // max number of segments that heap can own.
|
||||
};
|
||||
|
||||
static void mi_option_init(mi_option_desc_t* desc);
|
||||
|
@ -1101,7 +1101,7 @@ size_t mi_free_space_mask_from_slicecount(uint32_t slice_count)
|
||||
max_size = slice_count * MI_SEGMENT_SLICE_SIZE;
|
||||
}
|
||||
|
||||
free_space_mask = mi_free_space_mask_from_blocksize(max_size - 1);
|
||||
free_space_mask = mi_free_space_mask_from_blocksize(max_size);
|
||||
free_space_mask = free_space_mask | (free_space_mask - 1); // mark all allocations with size < max_size as available
|
||||
|
||||
return free_space_mask;
|
||||
@ -1187,6 +1187,7 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s
|
||||
{
|
||||
mi_assert_internal(mi_segment_is_abandoned(segment));
|
||||
bool has_page = false;
|
||||
size_t free_space_mask = 0;
|
||||
|
||||
// for all slices
|
||||
const mi_slice_t* end;
|
||||
@ -1208,6 +1209,7 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s
|
||||
if (slice->slice_count >= slices_needed) {
|
||||
has_page = true;
|
||||
}
|
||||
free_space_mask |= mi_free_space_mask_from_slicecount(slice->slice_count);
|
||||
}
|
||||
else if (mi_page_block_size(page) == block_size && mi_page_has_any_available(page)) {
|
||||
// a page has available free blocks of the right size
|
||||
@ -1222,6 +1224,10 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s
|
||||
}
|
||||
slice = slice + slice->slice_count;
|
||||
}
|
||||
|
||||
if (free_space_mask != 0) {
|
||||
mi_atomic_or_acq_rel(&segment->free_space_mask, free_space_mask);
|
||||
}
|
||||
return has_page;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user