initial fix for delayed freeing of huge pages transferred between threads

This commit is contained in:
daan 2019-07-11 13:30:40 -07:00
parent 4e6249fd12
commit 72a39c0bb1
3 changed files with 13 additions and 5 deletions

View File

@ -123,7 +123,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
if (collect == ABANDON) {
for (mi_page_t* page = heap->pages[MI_BIN_FULL].first; page != NULL; page = page->next) {
_mi_page_use_delayed_free(page, false); // set thread_free.delayed to MI_NO_DELAYED_FREE
}
}
}
// free thread delayed blocks.

View File

@ -293,7 +293,7 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
mi_assert_expensive(mi_page_queue_contains(from, page));
mi_assert_expensive(!mi_page_queue_contains(to, page));
mi_assert_internal(page->block_size == to->block_size ||
(page->block_size > MI_LARGE_SIZE_MAX && mi_page_queue_is_huge(to)) ||
(page->block_size > MI_LARGE_SIZE_MAX && (mi_page_queue_is_huge(to) || mi_page_queue_is_full(to))) ||
(page->block_size == from->block_size && mi_page_queue_is_full(to)));
if (page->prev != NULL) page->prev->next = page->next;

View File

@ -542,6 +542,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert(mi_page_immediate_available(page));
}
/* -----------------------------------------------------------
Find pages with free blocks
-------------------------------------------------------------*/
@ -618,7 +619,6 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
// Find a page with free blocks of `size`.
static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
_mi_heap_delayed_free(heap);
mi_page_queue_t* pq = mi_page_queue(heap,size);
mi_page_t* page = pq->first;
if (page != NULL) {
@ -674,7 +674,7 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(page->block_size == block_size);
mi_heap_stat_increase( heap, huge, block_size);
}
}
return page;
}
@ -694,6 +694,9 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
// call potential deferred free routines
_mi_deferred_free(heap, false);
// free delayed frees from other threads
_mi_heap_delayed_free(heap);
// huge allocation?
mi_page_t* page;
if (mi_unlikely(size > MI_LARGE_SIZE_MAX)) {
@ -714,5 +717,10 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
mi_assert_internal(page->block_size >= size);
// and try again, this time succeeding! (i.e. this should never recurse)
return _mi_page_malloc(heap, page, size);
void* p = _mi_page_malloc(heap, page, size);
if (page->used == page->reserved) {
// needed for huge pages to free reliably from other threads.
mi_page_to_full(page,mi_page_queue_of(page));
}
return p;
}