merge from dev

This commit is contained in:
daan 2019-07-11 16:32:11 -07:00
commit 8a6b474fa0
8 changed files with 71 additions and 31 deletions

View File

@ -71,7 +71,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); //
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
void _mi_heap_delayed_free(mi_heap_t* heap);
void _mi_page_use_delayed_free(mi_page_t* page, bool enable);
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay);
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
void _mi_deferred_free(mi_heap_t* heap, bool force);

View File

@ -75,8 +75,8 @@ terms of the MIT license. A copy of the license can be found in the file
// Main tuning parameters for segment and page sizes
// Sizes for 64-bit, divide by two for 32-bit
#define MI_SMALL_PAGE_SHIFT (13 + MI_INTPTR_SHIFT) // 64kb
#define MI_LARGE_PAGE_SHIFT ( 6 + MI_SMALL_PAGE_SHIFT) // 4mb
#define MI_SMALL_PAGE_SHIFT (14 + MI_INTPTR_SHIFT) // 64kb
#define MI_LARGE_PAGE_SHIFT ( 5 + MI_SMALL_PAGE_SHIFT) // 4mb
#define MI_SEGMENT_SHIFT ( MI_LARGE_PAGE_SHIFT) // 4mb
// Derived constants
@ -114,8 +114,9 @@ typedef struct mi_block_s {
typedef enum mi_delayed_e {
MI_NO_DELAYED_FREE = 0,
MI_USE_DELAYED_FREE,
MI_DELAYED_FREEING
MI_USE_DELAYED_FREE = 1,
MI_DELAYED_FREEING = 2,
MI_NEVER_DELAYED_FREE = 3
} mi_delayed_t;
@ -132,7 +133,7 @@ typedef union mi_page_flags_u {
typedef union mi_thread_free_u {
volatile uintptr_t value;
struct {
mi_delayed_t delayed:2;
uintptr_t delayed:2;
#if MI_INTPTR_SIZE==8
uintptr_t head:62; // head free block in the list (right-shifted by 2)
#elif MI_INTPTR_SIZE==4

View File

@ -115,7 +115,9 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
do {
tfreex.value = tfree.value = page->thread_free.value;
use_delayed = (tfree.delayed == MI_USE_DELAYED_FREE);
use_delayed = (tfree.delayed == MI_USE_DELAYED_FREE ||
(tfree.delayed == MI_NO_DELAYED_FREE && page->used == page->thread_freed+1)
);
if (mi_unlikely(use_delayed)) {
// unlikely: this only happens on the first concurrent free in a page that is in the full list
tfreex.delayed = MI_DELAYED_FREEING;
@ -147,7 +149,8 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
// and reset the MI_DELAYED_FREEING flag
do {
tfreex.value = tfree.value = page->thread_free.value;
tfreex.delayed = MI_NO_DELAYED_FREE;
mi_assert_internal(tfree.delayed == MI_NEVER_DELAYED_FREE || tfree.delayed == MI_DELAYED_FREEING);
if (tfree.delayed != MI_NEVER_DELAYED_FREE) tfreex.delayed = MI_NO_DELAYED_FREE;
} while (!mi_atomic_compare_exchange((volatile uintptr_t*)&page->thread_free, tfreex.value, tfree.value));
}
}

View File

@ -97,6 +97,14 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t
return true; // don't break
}
static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
UNUSED(arg1);
UNUSED(arg2);
UNUSED(heap);
UNUSED(pq);
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE);
return true; // don't break
}
static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
{
@ -119,11 +127,12 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
#endif
}
// if abandoning, mark all full pages to no longer add to delayed_free
// if abandoning, mark all pages to no longer add to delayed_free
if (collect == ABANDON) {
for (mi_page_t* page = heap->pages[MI_BIN_FULL].first; page != NULL; page = page->next) {
_mi_page_use_delayed_free(page, false); // set thread_free.delayed to MI_NO_DELAYED_FREE
}
//for (mi_page_t* page = heap->pages[MI_BIN_FULL].first; page != NULL; page = page->next) {
// _mi_page_use_delayed_free(page, false); // set thread_free.delayed to MI_NO_DELAYED_FREE
//}
mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL);
}
// free thread delayed blocks.
@ -228,7 +237,7 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
UNUSED(pq);
// ensure no more thread_delayed_free will be added
_mi_page_use_delayed_free(page, false);
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE);
// stats
if (page->block_size > MI_LARGE_SIZE_MAX) {

View File

@ -34,8 +34,8 @@ typedef struct mi_option_desc_s {
static mi_option_desc_t options[_mi_option_last] = {
{ 0, UNINIT, "page_reset" },
{ 0, UNINIT, "cache_reset" },
{ 1, UNINIT, "eager_commit" },
{ 0, UNINIT, "eager_region_commit" },
{ 0, UNINIT, "eager_commit" },
{ 0, UNINIT, "eager_region_commit" },
{ 0, UNINIT, "large_os_pages" }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
{ 0, UNINIT, "reset_decommits" },
{ 0, UNINIT, "reset_discards" },

View File

@ -267,8 +267,8 @@ static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
mi_assert_internal(page->heap == NULL);
mi_assert_internal(!mi_page_queue_contains(queue, page));
mi_assert_internal(page->block_size == queue->block_size ||
(page->block_size > MI_LARGE_SIZE_MAX && mi_page_queue_is_huge(queue)) ||
mi_assert_internal(page->block_size == queue->block_size ||
(page->block_size > MI_LARGE_SIZE_MAX && mi_page_queue_is_huge(queue)) ||
(page->flags.in_full && mi_page_queue_is_full(queue)));
page->flags.in_full = mi_page_queue_is_full(queue);

View File

@ -109,17 +109,19 @@ bool _mi_page_is_valid(mi_page_t* page) {
#endif
void _mi_page_use_delayed_free(mi_page_t* page, bool enable) {
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay ) {
mi_thread_free_t tfree;
mi_thread_free_t tfreex;
do {
tfreex.value = tfree.value = page->thread_free.value;
tfreex.delayed = (enable ? MI_USE_DELAYED_FREE : MI_NO_DELAYED_FREE);
if (mi_unlikely(tfree.delayed == MI_DELAYED_FREEING)) {
if (mi_unlikely(tfree.delayed < MI_DELAYED_FREEING)) {
tfreex.delayed = delay;
}
else if (mi_unlikely(tfree.delayed == MI_DELAYED_FREEING)) {
mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done.
continue; // and try again
}
}
}
while(tfreex.delayed != tfree.delayed && // avoid atomic operation if already equal
!mi_atomic_compare_exchange((volatile uintptr_t*)&page->thread_free, tfreex.value, tfree.value));
@ -272,7 +274,7 @@ void _mi_page_unfull(mi_page_t* page) {
mi_assert_expensive(_mi_page_is_valid(page));
mi_assert_internal(page->flags.in_full);
_mi_page_use_delayed_free(page, false);
_mi_page_use_delayed_free(page, MI_NO_DELAYED_FREE);
if (!page->flags.in_full) return;
mi_heap_t* heap = page->heap;
@ -288,7 +290,7 @@ static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
mi_assert_internal(!mi_page_immediate_available(page));
mi_assert_internal(!page->flags.in_full);
_mi_page_use_delayed_free(page, true);
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE);
if (page->flags.in_full) return;
mi_page_queue_enqueue_from(&page->heap->pages[MI_BIN_FULL], pq, page);
@ -305,8 +307,8 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
mi_assert_expensive(_mi_page_is_valid(page));
mi_assert_internal(pq == mi_page_queue_of(page));
mi_assert_internal(page->heap != NULL);
mi_assert_internal(page->thread_free.delayed == MI_NO_DELAYED_FREE);
_mi_page_use_delayed_free(page,MI_NEVER_DELAYED_FREE);
#if MI_DEBUG>1
// check there are no references left..
for (mi_block_t* block = (mi_block_t*)page->heap->thread_delayed_free; block != NULL; block = mi_block_nextx(page->heap->cookie,block)) {
@ -330,7 +332,14 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
mi_assert_expensive(_mi_page_is_valid(page));
mi_assert_internal(pq == mi_page_queue_of(page));
mi_assert_internal(mi_page_all_free(page));
mi_assert_internal(page->thread_free.delayed != MI_DELAYED_FREEING);
#if MI_DEBUG>1
// check if we can safely free
mi_thread_free_t free;
free.value = page->thread_free.value;
free.delayed = MI_NEVER_DELAYED_FREE;
free.value = mi_atomic_exchange(&page->thread_free.value, free.value);
mi_assert_internal(free.delayed != MI_DELAYED_FREEING);
#endif
page->flags.has_aligned = false;
@ -537,6 +546,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert(mi_page_immediate_available(page));
}
/* -----------------------------------------------------------
Find pages with free blocks
-------------------------------------------------------------*/
@ -613,7 +623,6 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
// Find a page with free blocks of `size`.
static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
_mi_heap_delayed_free(heap);
mi_page_queue_t* pq = mi_page_queue(heap,size);
mi_page_t* page = pq->first;
if (page != NULL) {
@ -669,7 +678,7 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(page->block_size == block_size);
mi_heap_stat_increase( heap, huge, block_size);
}
}
return page;
}
@ -689,6 +698,9 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
// call potential deferred free routines
_mi_deferred_free(heap, false);
// free delayed frees from other threads
_mi_heap_delayed_free(heap);
// huge allocation?
mi_page_t* page;
if (mi_unlikely(size > MI_LARGE_SIZE_MAX)) {
@ -710,4 +722,11 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
// and try again, this time succeeding! (i.e. this should never recurse)
return _mi_page_malloc(heap, page, size);
/*
if (page->used == page->reserved) {
// needed for huge pages to free reliably from other threads.
mi_page_to_full(page,mi_page_queue_of(page));
}
return p;
*/
}

View File

@ -683,13 +683,21 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld
mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
mi_page_t* page;
if (block_size <= (MI_SMALL_PAGE_SIZE / 8))
if (block_size <= (MI_SMALL_PAGE_SIZE / 8)) {
// smaller blocks than 8kb (assuming MI_SMALL_PAGE_SIZE == 64kb)
page = mi_segment_small_page_alloc(tld,os_tld);
else if (block_size < (MI_LARGE_SIZE_MAX - sizeof(mi_segment_t)))
page = mi_segment_large_page_alloc(tld, os_tld);
else
}
else if (block_size <= (MI_SMALL_PAGE_SIZE/2) && (MI_SMALL_PAGE_SIZE % block_size) <= (MI_SMALL_PAGE_SIZE / 8)) {
// use small page too if it happens to fit well
page = mi_segment_small_page_alloc(tld, os_tld);
}
else if (block_size < (MI_LARGE_SIZE_MAX - sizeof(mi_segment_t))) {
// otherwise use a large page
page = mi_segment_large_page_alloc(tld, os_tld);
}
else {
page = mi_segment_huge_page_alloc(block_size,tld,os_tld);
}
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page)));
return page;
}