fix over aggressive decommit of abandoned pages

This commit is contained in:
daan 2022-02-03 14:26:56 -08:00
parent b365623b13
commit 741d39a004
2 changed files with 24 additions and 18 deletions

View File

@ -115,17 +115,20 @@ static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq
static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
{
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
_mi_deferred_free(heap, collect >= MI_FORCE);
const bool force = collect >= MI_FORCE;
_mi_deferred_free(heap, force);
// note: never reclaim on collect but leave it to threads that need storage to reclaim
if (
#ifdef NDEBUG
const bool force_main =
#ifdef NDEBUG
collect == MI_FORCE
#else
#else
collect >= MI_FORCE
#endif
&& _mi_is_main_thread() && mi_heap_is_backing(heap) && !heap->no_reclaim)
{
#endif
&& _mi_is_main_thread() && mi_heap_is_backing(heap) && !heap->no_reclaim;
if (force_main) {
// the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
// if all memory is freed by now, all segments should be freed.
_mi_abandoned_reclaim_all(heap, &heap->tld->segments);
@ -141,25 +144,27 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
_mi_heap_delayed_free(heap);
// collect retired pages
_mi_heap_collect_retired(heap, collect >= MI_FORCE);
_mi_heap_collect_retired(heap, force);
// collect all pages owned by this thread
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
// collect abandoned pages
_mi_abandoned_collect(heap, collect >= MI_FORCE, &heap->tld->segments);
// collect abandoned segments (in particular, decommit expired parts of segments in the abandoned segment list)
// note: forced decommit can be quite expensive if many threads are created/destroyed so we do not force on abandonment
_mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments);
// collect segment local caches
if (collect >= MI_FORCE) {
if (force) {
_mi_segment_thread_collect(&heap->tld->segments);
}
// decommit in global segment caches
_mi_segment_cache_collect( collect >= MI_FORCE, &heap->tld->os);
// note: forced decommit can be quite expensive if many threads are created/destroyed so we do not force on abandonment
_mi_segment_cache_collect( collect == MI_FORCE, &heap->tld->os);
// collect regions on program-exit (or shared library unload)
if (collect >= MI_FORCE && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
if (force && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
//_mi_mem_collect(&heap->tld->os);
}
}

View File

@ -569,7 +569,7 @@ static void mi_segment_perhaps_decommit(mi_segment_t* segment, uint8_t* p, size_
mi_commit_mask_t cmask;
mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); // only decommit what is committed; span_free may try to decommit more
mi_commit_mask_set(&segment->decommit_mask, &cmask);
segment->decommit_expire = _mi_clock_now() + mi_option_get(mi_option_decommit_delay);
// segment->decommit_expire = _mi_clock_now() + mi_option_get(mi_option_decommit_delay);
mi_msecs_t now = _mi_clock_now();
if (segment->decommit_expire == 0) {
// no previous decommits, initialize now
@ -582,8 +582,8 @@ static void mi_segment_perhaps_decommit(mi_segment_t* segment, uint8_t* p, size_
segment->decommit_expire = now + (mi_option_get(mi_option_decommit_delay) / 8); // wait a tiny bit longer in case there is a series of free's
}
else {
// previous decommit mask is not yet expired
// segment->decommit_expire += 2; // = now + mi_option_get(mi_option_decommit_delay);
// previous decommit mask is not yet expired, increase the expiration by a bit.
segment->decommit_expire += (mi_option_get(mi_option_decommit_delay) / 8);
}
}
}
@ -1431,7 +1431,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
}
else {
// otherwise, push on the visited list so it gets not looked at too quickly again
mi_segment_delayed_decommit(segment, true /* force? */, tld->stats); // forced decommit if needed
mi_segment_delayed_decommit(segment, true /* force? */, tld->stats); // forced decommit if needed as we may not visit soon again
mi_abandoned_visited_push(segment);
}
}
@ -1456,7 +1456,8 @@ void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld)
}
else {
// otherwise, decommit if needed and push on the visited list
mi_segment_delayed_decommit(segment, force, tld->stats); // forced decommit if needed
// note: forced decommit can be expensive if many threads are destroyed/created as in mstress.
mi_segment_delayed_decommit(segment, force, tld->stats);
mi_abandoned_visited_push(segment);
}
}