improve page flags handling

This commit is contained in:
daan 2019-08-08 17:18:49 -07:00
parent de57686dac
commit 5e56b40fe6
5 changed files with 21 additions and 15 deletions

View File

@ -315,7 +315,13 @@ static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size)
}
static inline uintptr_t mi_page_thread_id(const mi_page_t* page) {
return (page->flags.padding << MI_PAGE_FLAGS_BITS);
return (page->flags.xthread_id << MI_PAGE_FLAGS_BITS);
}
static inline void mi_page_init_flags(mi_page_t* page, uintptr_t thread_id) {
page->flags.value = 0;
page->flags.xthread_id = (thread_id >> MI_PAGE_FLAGS_BITS);
mi_assert(page->flags.value == thread_id);
}
// -------------------------------------------------------------------

View File

@ -126,18 +126,18 @@ typedef enum mi_delayed_e {
// Use the lowest two bits of a thread id for the `in_full` and `has_aligned` flags
// This allows a single test in `mi_free` to check for unlikely cases
// (namely, non-local free, aligned free, or freeing in a full page)
#define MI_PAGE_FLAGS_BITS (2)
#define MI_PAGE_FLAGS_BITS (2)
#define MI_PAGE_FLAGS_TID_BITS (MI_INTPTR_SIZE*8 - MI_PAGE_FLAGS_BITS)
typedef union mi_page_flags_u {
uintptr_t threadidx;
uintptr_t value;
struct {
#ifdef MI_BIG_ENDIAN
uintptr_t padding : (MI_INTPTR_SIZE*8 - MI_PAGE_FLAGS_BITS);
uintptr_t xthread_id : MI_PAGE_FLAGS_TID_BITS;
#endif
uintptr_t in_full : 1;
uintptr_t has_aligned : 1;
#else
uintptr_t in_full : 1;
uintptr_t has_aligned : 1;
uintptr_t padding : (MI_INTPTR_SIZE*8 - MI_PAGE_FLAGS_BITS);
#ifndef MI_BIG_ENDIAN
uintptr_t xthread_id : MI_PAGE_FLAGS_TID_BITS;
#endif
};
} mi_page_flags_t;

View File

@ -237,7 +237,7 @@ void mi_free(void* p) mi_attr_noexcept
// adjust if it might be an un-aligned block
uintptr_t tid = _mi_thread_id();
if (mi_likely(tid == page->flags.threadidx)) { // local, and not full or aligned
if (mi_likely(tid == page->flags.value)) { // local, and not full or aligned
mi_block_t* block = (mi_block_t*)p;
mi_block_set_next(page, block, page->local_free); // note: moving this write earlier does not matter for performance
page->local_free = block;

View File

@ -75,7 +75,7 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
mi_segment_t* segment = _mi_page_segment(page);
uint8_t* start = _mi_page_start(segment,page,NULL);
mi_assert_internal(start == _mi_segment_page_start(segment,page,page->block_size,NULL));
mi_assert_internal(segment->thread_id == mi_page_thread_id(page));
mi_assert_internal(segment->thread_id==0 || segment->thread_id == mi_page_thread_id(page));
//mi_assert_internal(start + page->capacity*page->block_size == page->top);
mi_assert_internal(mi_page_list_is_valid(page,page->free));
@ -387,7 +387,7 @@ void _mi_page_retire(mi_page_t* page) {
// if its neighbours are almost fully used.
if (mi_likely(page->block_size <= MI_SMALL_SIZE_MAX)) {
if (mi_page_mostly_used(page->prev) && mi_page_mostly_used(page->next)) {
_mi_stat_counter_increase(&page->heap->tld->stats.page_no_retire,1);
_mi_stat_counter_increase(&_mi_stats_main.page_no_retire,1);
return; // dont't retire after all
}
}

View File

@ -618,7 +618,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
}
else {
// otherwise reclaim it
page->flags.threadidx = segment->thread_id;
mi_page_init_flags(page,segment->thread_id);
_mi_page_reclaim(heap,page);
}
}
@ -649,7 +649,7 @@ static mi_page_t* mi_segment_page_alloc_in(mi_segment_t* segment, mi_segments_tl
mi_assert_internal(mi_segment_has_free(segment));
mi_page_t* page = mi_segment_find_free(segment, tld->stats);
page->segment_in_use = true;
page->flags.threadidx = segment->thread_id;
mi_page_init_flags(page,segment->thread_id);
segment->used++;
mi_assert_internal(segment->used <= segment->capacity);
if (segment->used == segment->capacity) {
@ -689,7 +689,7 @@ static mi_page_t* mi_segment_large_page_alloc(mi_segments_tld_t* tld, mi_os_tld_
segment->used = 1;
mi_page_t* page = &segment->pages[0];
page->segment_in_use = true;
page->flags.threadidx = segment->thread_id;
mi_page_init_flags(page,segment->thread_id);
return page;
}
@ -701,7 +701,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld
segment->used = 1;
mi_page_t* page = &segment->pages[0];
page->segment_in_use = true;
page->flags.threadidx = segment->thread_id;
mi_page_init_flags(page,segment->thread_id);
return page;
}