restructure the page flags to use explicit masks
This commit is contained in:
parent
c0258b2d29
commit
ed785253bf
@ -314,16 +314,37 @@ static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size)
|
||||
return &((mi_heap_t*)heap)->pages[_mi_bin(size)];
|
||||
}
|
||||
|
||||
|
||||
//-----------------------------------------------------------
|
||||
// Page flags
|
||||
//-----------------------------------------------------------
|
||||
static inline uintptr_t mi_page_thread_id(const mi_page_t* page) {
|
||||
return (page->flags.xthread_id << MI_PAGE_FLAGS_BITS);
|
||||
return (page->flags & ~MI_PAGE_FLAGS_MASK);
|
||||
}
|
||||
|
||||
static inline void mi_page_init_flags(mi_page_t* page, uintptr_t thread_id) {
|
||||
page->flags.value = 0;
|
||||
page->flags.xthread_id = (thread_id >> MI_PAGE_FLAGS_BITS);
|
||||
mi_assert(page->flags.value == thread_id);
|
||||
page->flags = thread_id;
|
||||
}
|
||||
|
||||
static inline bool mi_page_is_in_full(const mi_page_t* page) {
|
||||
return ((page->flags & 0x01) != 0);
|
||||
}
|
||||
|
||||
static inline void mi_page_set_in_full(mi_page_t* page, bool in_full) {
|
||||
if (in_full) page->flags |= 0x01;
|
||||
else page->flags &= ~0x01;
|
||||
}
|
||||
|
||||
static inline bool mi_page_has_aligned(const mi_page_t* page) {
|
||||
return ((page->flags & 0x02) != 0);
|
||||
}
|
||||
|
||||
static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
|
||||
if (has_aligned) page->flags |= 0x02;
|
||||
else page->flags &= ~0x02;
|
||||
}
|
||||
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Encoding/Decoding the free list next pointers
|
||||
// -------------------------------------------------------------------
|
||||
|
@ -123,25 +123,12 @@ typedef enum mi_delayed_e {
|
||||
} mi_delayed_t;
|
||||
|
||||
|
||||
// Use the lowest two bits of a thread id for the `in_full` and `has_aligned` flags
|
||||
// Use the bottom 2 bits for the `in_full` and `has_aligned` flags
|
||||
// and the rest for the threadid (we assume tid's never use those lower 2 bits).
|
||||
// This allows a single test in `mi_free` to check for unlikely cases
|
||||
// (namely, non-local free, aligned free, or freeing in a full page)
|
||||
#define MI_PAGE_FLAGS_BITS (2)
|
||||
#define MI_PAGE_FLAGS_TID_BITS (MI_INTPTR_SIZE*8 - MI_PAGE_FLAGS_BITS)
|
||||
typedef union mi_page_flags_u {
|
||||
uintptr_t value;
|
||||
struct {
|
||||
#ifdef MI_BIG_ENDIAN
|
||||
uintptr_t xthread_id : MI_PAGE_FLAGS_TID_BITS;
|
||||
#endif
|
||||
uintptr_t in_full : 1;
|
||||
uintptr_t has_aligned : 1;
|
||||
#ifndef MI_BIG_ENDIAN
|
||||
uintptr_t xthread_id : MI_PAGE_FLAGS_TID_BITS;
|
||||
#endif
|
||||
};
|
||||
} mi_page_flags_t;
|
||||
|
||||
#define MI_PAGE_FLAGS_MASK ((uintptr_t)0x03)
|
||||
typedef uintptr_t mi_page_flags_t;
|
||||
|
||||
// Thread free list.
|
||||
// We use the bottom 2 bits of the pointer for mi_delayed_t flags
|
||||
|
@ -43,7 +43,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* heap, size_t size, size_t
|
||||
if (p == NULL) return NULL;
|
||||
|
||||
// .. and align within the allocation
|
||||
_mi_ptr_page(p)->flags.has_aligned = true;
|
||||
mi_page_set_has_aligned( _mi_ptr_page(p), true );
|
||||
uintptr_t adjust = alignment - (((uintptr_t)p + offset) % alignment);
|
||||
mi_assert_internal(adjust % sizeof(uintptr_t) == 0);
|
||||
void* aligned_p = (adjust == alignment ? p : (void*)((uintptr_t)p + adjust));
|
||||
|
@ -174,7 +174,7 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
|
||||
if (mi_unlikely(mi_page_all_free(page))) {
|
||||
_mi_page_retire(page);
|
||||
}
|
||||
else if (mi_unlikely(page->flags.in_full)) {
|
||||
else if (mi_unlikely(mi_page_is_in_full(page))) {
|
||||
_mi_page_unfull(page);
|
||||
}
|
||||
}
|
||||
@ -194,7 +194,7 @@ mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* p
|
||||
|
||||
|
||||
static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool local, void* p) {
|
||||
mi_block_t* block = (page->flags.has_aligned ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
|
||||
mi_block_t* block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
|
||||
_mi_free_block(page, local, block);
|
||||
}
|
||||
|
||||
@ -237,7 +237,7 @@ void mi_free(void* p) mi_attr_noexcept
|
||||
#endif
|
||||
|
||||
uintptr_t tid = _mi_thread_id();
|
||||
if (mi_likely(tid == page->flags.value)) {
|
||||
if (mi_likely(tid == page->flags)) {
|
||||
// local, and not full or aligned
|
||||
mi_block_t* block = (mi_block_t*)p;
|
||||
mi_block_set_next(page, block, page->local_free);
|
||||
@ -273,7 +273,7 @@ size_t mi_usable_size(const void* p) mi_attr_noexcept {
|
||||
const mi_segment_t* segment = _mi_ptr_segment(p);
|
||||
const mi_page_t* page = _mi_segment_page_of(segment,p);
|
||||
size_t size = page->block_size;
|
||||
if (mi_unlikely(page->flags.has_aligned)) {
|
||||
if (mi_unlikely(mi_page_has_aligned(page))) {
|
||||
ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)_mi_page_ptr_unalign(segment,page,p);
|
||||
mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
|
||||
return (size - adjust);
|
||||
|
@ -177,7 +177,7 @@ static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t*
|
||||
#endif
|
||||
|
||||
static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
|
||||
uint8_t bin = (page->flags.in_full ? MI_BIN_FULL : _mi_bin(page->block_size));
|
||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->block_size));
|
||||
mi_heap_t* heap = page->heap;
|
||||
mi_assert_internal(heap != NULL && bin <= MI_BIN_FULL);
|
||||
mi_page_queue_t* pq = &heap->pages[bin];
|
||||
@ -187,10 +187,10 @@ static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
|
||||
}
|
||||
|
||||
static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
|
||||
uint8_t bin = (page->flags.in_full ? MI_BIN_FULL : _mi_bin(page->block_size));
|
||||
uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->block_size));
|
||||
mi_assert_internal(bin <= MI_BIN_FULL);
|
||||
mi_page_queue_t* pq = &heap->pages[bin];
|
||||
mi_assert_internal(page->flags.in_full || page->block_size == pq->block_size);
|
||||
mi_assert_internal(mi_page_is_in_full(page) || page->block_size == pq->block_size);
|
||||
return pq;
|
||||
}
|
||||
|
||||
@ -245,7 +245,7 @@ static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
|
||||
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_assert_expensive(mi_page_queue_contains(queue, page));
|
||||
mi_assert_internal(page->block_size == queue->block_size || (page->block_size > MI_LARGE_SIZE_MAX && mi_page_queue_is_huge(queue)) || (page->flags.in_full && mi_page_queue_is_full(queue)));
|
||||
mi_assert_internal(page->block_size == queue->block_size || (page->block_size > MI_LARGE_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
if (page->prev != NULL) page->prev->next = page->next;
|
||||
if (page->next != NULL) page->next->prev = page->prev;
|
||||
if (page == queue->last) queue->last = page->prev;
|
||||
@ -260,7 +260,7 @@ static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
|
||||
page->next = NULL;
|
||||
page->prev = NULL;
|
||||
page->heap = NULL;
|
||||
page->flags.in_full = false;
|
||||
mi_page_set_in_full(page,false);
|
||||
}
|
||||
|
||||
|
||||
@ -269,9 +269,9 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_
|
||||
mi_assert_internal(!mi_page_queue_contains(queue, page));
|
||||
mi_assert_internal(page->block_size == queue->block_size ||
|
||||
(page->block_size > MI_LARGE_SIZE_MAX && mi_page_queue_is_huge(queue)) ||
|
||||
(page->flags.in_full && mi_page_queue_is_full(queue)));
|
||||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||
|
||||
page->flags.in_full = mi_page_queue_is_full(queue);
|
||||
mi_page_set_in_full(page, mi_page_queue_is_full(queue));
|
||||
page->heap = heap;
|
||||
page->next = queue->first;
|
||||
page->prev = NULL;
|
||||
@ -324,7 +324,7 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
|
||||
mi_heap_queue_first_update(page->heap, to);
|
||||
}
|
||||
|
||||
page->flags.in_full = mi_page_queue_is_full(to);
|
||||
mi_page_set_in_full(page, mi_page_queue_is_full(to));
|
||||
}
|
||||
|
||||
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {
|
||||
|
22
src/page.c
22
src/page.c
@ -102,7 +102,7 @@ bool _mi_page_is_valid(mi_page_t* page) {
|
||||
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == page->heap->thread_id);
|
||||
mi_page_queue_t* pq = mi_page_queue_of(page);
|
||||
mi_assert_internal(mi_page_queue_contains(pq, page));
|
||||
mi_assert_internal(pq->block_size==page->block_size || page->block_size > MI_LARGE_SIZE_MAX || page->flags.in_full);
|
||||
mi_assert_internal(pq->block_size==page->block_size || page->block_size > MI_LARGE_SIZE_MAX || mi_page_is_in_full(page));
|
||||
mi_assert_internal(mi_heap_contains_queue(page->heap,pq));
|
||||
}
|
||||
return true;
|
||||
@ -282,26 +282,26 @@ void _mi_heap_delayed_free(mi_heap_t* heap) {
|
||||
void _mi_page_unfull(mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
mi_assert_internal(page->flags.in_full);
|
||||
mi_assert_internal(mi_page_is_in_full(page));
|
||||
|
||||
_mi_page_use_delayed_free(page, MI_NO_DELAYED_FREE);
|
||||
if (!page->flags.in_full) return;
|
||||
if (!mi_page_is_in_full(page)) return;
|
||||
|
||||
mi_heap_t* heap = page->heap;
|
||||
mi_page_queue_t* pqfull = &heap->pages[MI_BIN_FULL];
|
||||
page->flags.in_full = false; // to get the right queue
|
||||
mi_page_set_in_full(page, false); // to get the right queue
|
||||
mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page);
|
||||
page->flags.in_full = true;
|
||||
mi_page_set_in_full(page, true);
|
||||
mi_page_queue_enqueue_from(pq, pqfull, page);
|
||||
}
|
||||
|
||||
static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
|
||||
mi_assert_internal(pq == mi_page_queue_of(page));
|
||||
mi_assert_internal(!mi_page_immediate_available(page));
|
||||
mi_assert_internal(!page->flags.in_full);
|
||||
mi_assert_internal(!mi_page_is_in_full(page));
|
||||
|
||||
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE);
|
||||
if (page->flags.in_full) return;
|
||||
if (mi_page_is_in_full(page)) return;
|
||||
|
||||
mi_page_queue_enqueue_from(&page->heap->pages[MI_BIN_FULL], pq, page);
|
||||
mi_page_thread_free_collect(page); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set
|
||||
@ -349,7 +349,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
|
||||
mi_assert_internal(mi_tf_delayed(free) != MI_DELAYED_FREEING);
|
||||
#endif
|
||||
|
||||
page->flags.has_aligned = false;
|
||||
mi_page_set_has_aligned(page, false);
|
||||
|
||||
// account for huge pages here
|
||||
if (page->block_size > MI_LARGE_SIZE_MAX) {
|
||||
@ -377,7 +377,7 @@ void _mi_page_retire(mi_page_t* page) {
|
||||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
mi_assert_internal(mi_page_all_free(page));
|
||||
|
||||
page->flags.has_aligned = false;
|
||||
mi_page_set_has_aligned(page, false);
|
||||
|
||||
// don't retire too often..
|
||||
// (or we end up retiring and re-allocating most of the time)
|
||||
@ -560,7 +560,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
||||
mi_assert_internal(page->thread_freed == 0);
|
||||
mi_assert_internal(page->next == NULL);
|
||||
mi_assert_internal(page->prev == NULL);
|
||||
mi_assert_internal(page->flags.has_aligned == false);
|
||||
mi_assert_internal(!mi_page_has_aligned(page));
|
||||
#if MI_SECURE
|
||||
mi_assert_internal(page->cookie != 0);
|
||||
#endif
|
||||
@ -619,7 +619,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
||||
|
||||
// 3. If the page is completely full, move it to the `mi_pages_full`
|
||||
// queue so we don't visit long-lived pages too often.
|
||||
mi_assert_internal(!page->flags.in_full && !mi_page_immediate_available(page));
|
||||
mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page));
|
||||
mi_page_to_full(page,pq);
|
||||
|
||||
page = next;
|
||||
|
Loading…
Reference in New Issue
Block a user