cleanup reclaim logic

This commit is contained in:
daan 2020-01-26 12:39:11 -08:00
parent d4927adddc
commit e628fc7067
3 changed files with 87 additions and 94 deletions

View File

@ -411,30 +411,24 @@ static inline mi_thread_free_t mi_tf_set_block(mi_thread_free_t tf, mi_block_t*
return mi_tf_make(block, mi_tf_delayed(tf)); return mi_tf_make(block, mi_tf_delayed(tf));
} }
// are all blocks in a page freed? // are all blocks in a page freed?
// note: needs up-to-date used count, (as the `xthread_free` list may not be empty). see `_mi_page_collect_free`.
static inline bool mi_page_all_free(const mi_page_t* page) { static inline bool mi_page_all_free(const mi_page_t* page) {
mi_assert_internal(page != NULL); mi_assert_internal(page != NULL);
return (page->used == 0); return (page->used == 0);
} }
// are there immediately available blocks // are there any available blocks?
static inline bool mi_page_has_any_available(const mi_page_t* page) {
mi_assert_internal(page != NULL && page->reserved > 0);
return (page->used < page->reserved || (mi_page_thread_free(page) != NULL));
}
// are there immediately available blocks, i.e. blocks available on the free list.
static inline bool mi_page_immediate_available(const mi_page_t* page) { static inline bool mi_page_immediate_available(const mi_page_t* page) {
mi_assert_internal(page != NULL); mi_assert_internal(page != NULL);
return (page->free != NULL); return (page->free != NULL);
} }
// are there free blocks in this page?
static inline bool mi_page_has_free(mi_page_t* page) {
mi_assert_internal(page != NULL);
bool hasfree = (mi_page_immediate_available(page) || page->local_free != NULL || (mi_page_thread_free(page) != NULL));
mi_assert_internal(hasfree || page->used == page->capacity);
return hasfree;
}
// are all blocks in use?
static inline bool mi_page_all_used(mi_page_t* page) {
mi_assert_internal(page != NULL);
return !mi_page_has_free(page);
}
// is more than 7/8th of a page in use? // is more than 7/8th of a page in use?
static inline bool mi_page_mostly_used(const mi_page_t* page) { static inline bool mi_page_mostly_used(const mi_page_t* page) {

View File

@ -234,6 +234,7 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE); mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE);
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE); mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
mi_assert_internal(!page->is_reset); mi_assert_internal(!page->is_reset);
// TODO: push on full queue immediately if it is full?
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page)); mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
mi_page_queue_push(heap, pq, page); mi_page_queue_push(heap, pq, page);
mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_expensive(_mi_page_is_valid(page));
@ -245,28 +246,16 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
mi_assert_internal(pq==NULL||block_size == pq->block_size); mi_assert_internal(pq==NULL||block_size == pq->block_size);
mi_page_t* page = _mi_segment_page_alloc(heap, block_size, &heap->tld->segments, &heap->tld->os); mi_page_t* page = _mi_segment_page_alloc(heap, block_size, &heap->tld->segments, &heap->tld->os);
if (page == NULL) { if (page == NULL) {
// this may be out-of-memory, or a page was reclaimed // this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
if (pq!=NULL && (page = pq->first) != NULL) { return NULL;
mi_assert_expensive(_mi_page_is_valid(page));
if (!mi_page_immediate_available(page)) {
mi_page_extend_free(heap, page, heap->tld);
}
mi_assert_internal(mi_page_immediate_available(page));
if (mi_page_immediate_available(page)) {
return page; // reclaimed page
}
}
return NULL; // out-of-memory
}
else {
// a fresh page was allocated, initialize it
mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
mi_page_init(heap, page, block_size, heap->tld);
_mi_stat_increase(&heap->tld->stats.pages, 1);
if (pq!=NULL) mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL
mi_assert_expensive(_mi_page_is_valid(page));
return page;
} }
// a fresh page was found, initialize it
mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
mi_page_init(heap, page, block_size, heap->tld);
_mi_stat_increase(&heap->tld->stats.pages, 1);
if (pq!=NULL) mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL
mi_assert_expensive(_mi_page_is_valid(page));
return page;
} }
// Get a fresh page to use // Get a fresh page to use
@ -648,7 +637,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
-------------------------------------------------------------*/ -------------------------------------------------------------*/
// Find a page with free blocks of `page->block_size`. // Find a page with free blocks of `page->block_size`.
static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq) static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
{ {
// search through the pages in "next fit" order // search through the pages in "next fit" order
size_t count = 0; size_t count = 0;
@ -686,13 +675,16 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
if (page == NULL) { if (page == NULL) {
_mi_heap_collect_retired(heap, false); // perhaps make a page available _mi_heap_collect_retired(heap, false); // perhaps make a page available
page = mi_page_fresh(heap, pq); page = mi_page_fresh(heap, pq);
if (page == NULL && first_try) {
// out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again
page = mi_page_queue_find_free_ex(heap, pq, false);
}
} }
else { else {
mi_assert(pq->first == page); mi_assert(pq->first == page);
page->retire_expire = 0; page->retire_expire = 0;
} }
mi_assert_internal(page == NULL || mi_page_immediate_available(page)); mi_assert_internal(page == NULL || mi_page_immediate_available(page));
return page; return page;
} }
@ -716,7 +708,7 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
return page; // fast path return page; // fast path
} }
} }
return mi_page_queue_find_free_ex(heap, pq); return mi_page_queue_find_free_ex(heap, pq, true);
} }

View File

@ -669,6 +669,11 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
// set protection // set protection
mi_segment_protect(segment, true, tld->os); mi_segment_protect(segment, true, tld->os);
// insert in free lists for small and medium pages
if (page_kind <= MI_PAGE_MEDIUM) {
mi_segment_insert_in_free_queue(segment, tld);
}
//fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment); //fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment);
return segment; return segment;
} }
@ -1019,21 +1024,25 @@ void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
----------------------------------------------------------- */ ----------------------------------------------------------- */
// Possibly clear pages and check if free space is available // Possibly clear pages and check if free space is available
static bool mi_segment_check_free(mi_segment_t* segment, size_t block_size) static bool mi_segment_check_free(mi_segment_t* segment, size_t block_size, bool* all_pages_free)
{ {
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE); mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
bool has_page = false; bool has_page = false;
size_t pages_used = 0;
size_t pages_used_empty = 0;
for (size_t i = 0; i < segment->capacity; i++) { for (size_t i = 0; i < segment->capacity; i++) {
mi_page_t* page = &segment->pages[i]; mi_page_t* page = &segment->pages[i];
if (page->segment_in_use) { if (page->segment_in_use) {
pages_used++;
// ensure used count is up to date and collect potential concurrent frees // ensure used count is up to date and collect potential concurrent frees
_mi_page_free_collect(page, false); _mi_page_free_collect(page, false);
if (mi_page_all_free(page)) { if (mi_page_all_free(page)) {
// if everything free already, page can be reused for some block size // if everything free already, page can be reused for some block size
// note: don't clear yet as we can only reset it once it is reclaimed // note: don't clear the page yet as we can only OS reset it once it is reclaimed
pages_used_empty++;
has_page = true; has_page = true;
} }
else if (page->xblock_size == block_size && page->used < page->reserved) { else if (page->xblock_size == block_size && mi_page_has_any_available(page)) {
// a page has available free blocks of the right size // a page has available free blocks of the right size
has_page = true; has_page = true;
} }
@ -1043,15 +1052,19 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t block_size)
has_page = true; has_page = true;
} }
} }
mi_assert_internal(pages_used == segment->used && pages_used >= pages_used_empty);
if (all_pages_free != NULL) {
*all_pages_free = ((pages_used - pages_used_empty) == 0);
}
return has_page; return has_page;
} }
#define MI_RECLAIMED ((mi_segment_t*)1)
// Reclaim a segment // Reclaim a segment; returns NULL if the segment was freed
static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld) { // set `right_page_reclaimed` to `true` if it reclaimed a page of the right `block_size` that was not full.
static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, size_t requested_block_size, bool* right_page_reclaimed, mi_segments_tld_t* tld) {
mi_assert_internal(segment->abandoned_next == NULL); mi_assert_internal(segment->abandoned_next == NULL);
bool right_page_reclaimed = false; if (right_page_reclaimed != NULL) { *right_page_reclaimed = false; }
segment->thread_id = _mi_thread_id(); segment->thread_id = _mi_thread_id();
segment->abandoned_visits = 0; segment->abandoned_visits = 0;
@ -1071,10 +1084,10 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
segment->abandoned--; segment->abandoned--;
mi_assert(page->next == NULL); mi_assert(page->next == NULL);
_mi_stat_decrease(&tld->stats->pages_abandoned, 1); _mi_stat_decrease(&tld->stats->pages_abandoned, 1);
// set the heap again and allow delayed free again // set the heap again and allow heap thread delayed free again.
mi_page_set_heap(page, heap); mi_page_set_heap(page, heap);
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set) _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set)
// TODO: should we not collect again given that we just collected? // TODO: should we not collect again given that we just collected in `check_free`?
_mi_page_free_collect(page, false); // ensure used count is up to date _mi_page_free_collect(page, false); // ensure used count is up to date
if (mi_page_all_free(page)) { if (mi_page_all_free(page)) {
// if everything free already, clear the page directly // if everything free already, clear the page directly
@ -1083,77 +1096,67 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
else { else {
// otherwise reclaim it into the heap // otherwise reclaim it into the heap
_mi_page_reclaim(heap, page); _mi_page_reclaim(heap, page);
if (block_size == page->xblock_size) { if (requested_block_size == page->xblock_size && mi_page_has_any_available(page)) {
right_page_reclaimed = true; if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; }
} }
} }
} }
else if (page->is_committed && !page->is_reset) { // not in-use, and not reset yet else if (page->is_committed && !page->is_reset) { // not in-use, and not reset yet
// note: no not reset as this includes pages that were not touched before // note: do not reset as this includes pages that were not touched before
// mi_pages_reset_add(segment, page, tld); // mi_pages_reset_add(segment, page, tld);
} }
} }
mi_assert_internal(segment->abandoned == 0); mi_assert_internal(segment->abandoned == 0);
if (right_page_reclaimed) { if (segment->used == 0) {
// add the segment's free pages to the free small segment queue mi_assert_internal(right_page_reclaimed == NULL || !(*right_page_reclaimed));
mi_segment_free(segment, false, tld);
return NULL;
}
else {
if (segment->page_kind <= MI_PAGE_MEDIUM && mi_segment_has_free(segment)) { if (segment->page_kind <= MI_PAGE_MEDIUM && mi_segment_has_free(segment)) {
mi_segment_insert_in_free_queue(segment, tld); mi_segment_insert_in_free_queue(segment, tld);
} }
// and return reclaimed: at the page allocation the page is already in the queue now
return MI_RECLAIMED;
}
else {
// otherwise return the segment as it will contain some free pages
// (except for abandoned_reclaim_all which uses a block_size of zero)
mi_assert_internal(segment->used < segment->capacity || block_size == 0);
return segment; return segment;
} }
} }
// Reclaim a segment without returning it
static void mi_segment_reclaim_force(mi_segment_t* segment, mi_heap_t* heap, mi_segments_tld_t* tld) {
mi_segment_t* res = mi_segment_reclaim(segment, heap, 0, tld);
mi_assert_internal(res != MI_RECLAIMED); // due to block_size == 0
if (res!=MI_RECLAIMED && res != NULL) {
mi_assert_internal(res == segment);
if (res->used == 0) {
mi_segment_free(segment, false, tld);
}
else if (res->page_kind <= MI_PAGE_MEDIUM && mi_segment_has_free(res)) {
mi_segment_insert_in_free_queue(res, tld);
}
}
}
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) { void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
mi_segment_t* segment; mi_segment_t* segment;
while ((segment = mi_abandoned_pop()) != NULL) { while ((segment = mi_abandoned_pop()) != NULL) {
mi_segment_reclaim_force(segment, heap, tld); mi_segment_reclaim(segment, heap, 0, NULL, tld);
} }
} }
static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size, mi_page_kind_t page_kind, bool* reclaimed, mi_segments_tld_t* tld)
static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size, mi_page_kind_t page_kind, mi_segments_tld_t* tld)
{ {
*reclaimed = false;
mi_segment_t* segment; mi_segment_t* segment;
int max_tries = 8; // limit the work to bound allocation times int max_tries = 8; // limit the work to bound allocation times
while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) { while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) {
segment->abandoned_visits++; segment->abandoned_visits++;
bool has_page = mi_segment_check_free(segment,block_size); // try to free up pages (due to concurrent frees) bool all_pages_free;
if (has_page && segment->page_kind == page_kind) { bool has_page = mi_segment_check_free(segment,block_size,&all_pages_free); // try to free up pages (due to concurrent frees)
// found a free page of the right kind, or page of the right block_size with free space if (all_pages_free) {
return mi_segment_reclaim(segment, heap, block_size, tld); // free the segment (by forced reclaim) to make it available to other threads.
// note1: we prefer to free a segment as that might lead to reclaiming another
// segment that is still partially used.
// note2: we could in principle optimize this by skipping reclaim and directly
// freeing but that would violate some invariants temporarily)
mi_segment_reclaim(segment, heap, 0, NULL, tld);
} }
else if (segment->used==0) { else if (has_page && segment->page_kind == page_kind) {
// free the segment to make it available to other threads // found a free page of the right kind, or page of the right block_size with free space
mi_segment_os_free(segment, segment->segment_size, tld); // we return the result of reclaim (which is usually `segment`) as it might free
// the segment due to concurrent frees (in which case `NULL` is returned).
return mi_segment_reclaim(segment, heap, block_size, reclaimed, tld);
} }
else if (segment->abandoned_visits >= 3) { else if (segment->abandoned_visits >= 3) {
// always reclaim on 3rd visit to limit the list length // always reclaim on 3rd visit to limit the list length.
mi_segment_reclaim_force(segment, heap, tld); mi_segment_reclaim(segment, heap, 0, NULL, tld);
} }
else { else {
// push on the visited list so it gets not looked at too quickly again // otherwise, push on the visited list so it gets not looked at too quickly again
mi_abandoned_visited_push(segment); mi_abandoned_visited_push(segment);
} }
} }
@ -1176,12 +1179,16 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_s
return segment; return segment;
} }
// 2. try to reclaim an abandoned segment // 2. try to reclaim an abandoned segment
segment = mi_segment_try_reclaim(heap, block_size, page_kind, tld); bool reclaimed;
if (segment == MI_RECLAIMED) { segment = mi_segment_try_reclaim(heap, block_size, page_kind, &reclaimed, tld);
return NULL; // pretend out-of-memory as the page will be in the page queue of the heap if (reclaimed) {
// reclaimed the right page right into the heap
mi_assert_internal(segment != NULL && segment->page_kind == page_kind && page_kind <= MI_PAGE_LARGE);
return NULL; // pretend out-of-memory as the page will be in the page queue of the heap with available blocks
} }
else if (segment != NULL) { else if (segment != NULL) {
return segment; // reclaimed a segment with empty pages in it // reclaimed a segment with empty pages (of `page_kind`) in it
return segment;
} }
// 3. otherwise allocate a fresh segment // 3. otherwise allocate a fresh segment
return mi_segment_alloc(0, page_kind, page_shift, tld, os_tld); return mi_segment_alloc(0, page_kind, page_shift, tld, os_tld);
@ -1216,12 +1223,12 @@ static mi_page_t* mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_p
// find an available segment the segment free queue // find an available segment the segment free queue
mi_segment_queue_t* const free_queue = mi_segment_free_queue_of_kind(kind, tld); mi_segment_queue_t* const free_queue = mi_segment_free_queue_of_kind(kind, tld);
if (mi_segment_queue_is_empty(free_queue)) { if (mi_segment_queue_is_empty(free_queue)) {
// possibly allocate a fresh segment // possibly allocate or reclaim a fresh segment
mi_segment_t* segment = mi_segment_reclaim_or_alloc(heap, block_size, kind, page_shift, tld, os_tld); mi_segment_t* const segment = mi_segment_reclaim_or_alloc(heap, block_size, kind, page_shift, tld, os_tld);
if (segment == NULL) return NULL; // return NULL if out-of-memory (or reclaimed) if (segment == NULL) return NULL; // return NULL if out-of-memory (or reclaimed)
mi_assert_internal(free_queue->first == segment);
mi_assert_internal(segment->page_kind==kind); mi_assert_internal(segment->page_kind==kind);
mi_assert_internal(segment->used < segment->capacity); mi_assert_internal(segment->used < segment->capacity);
mi_segment_enqueue(free_queue, segment);
} }
mi_assert_internal(free_queue->first != NULL); mi_assert_internal(free_queue->first != NULL);
mi_page_t* const page = mi_segment_page_alloc_in(free_queue->first, tld); mi_page_t* const page = mi_segment_page_alloc_in(free_queue->first, tld);