make huge pages part of the regular page queues again
This commit is contained in:
parent
fed883c81f
commit
969d8bc5fe
@ -82,6 +82,7 @@ void* _mi_os_alloc(size_t size, mi_stats_t* stats); // to allocat
|
|||||||
void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to free thread local data
|
void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to free thread local data
|
||||||
size_t _mi_os_good_alloc_size(size_t size);
|
size_t _mi_os_good_alloc_size(size_t size);
|
||||||
bool _mi_os_has_overcommit(void);
|
bool _mi_os_has_overcommit(void);
|
||||||
|
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats);
|
||||||
|
|
||||||
void* _mi_os_alloc_aligned_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool* large, mi_stats_t* tld_stats);
|
void* _mi_os_alloc_aligned_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool* large, mi_stats_t* tld_stats);
|
||||||
void _mi_os_free_aligned(void* p, size_t size, size_t alignment, size_t align_offset, bool was_committed, mi_stats_t* tld_stats);
|
void _mi_os_free_aligned(void* p, size_t size, size_t alignment, size_t align_offset, bool was_committed, mi_stats_t* tld_stats);
|
||||||
@ -105,7 +106,12 @@ mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t pag
|
|||||||
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
|
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
|
||||||
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
|
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
|
||||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t block_size, size_t* page_size, size_t* pre_size); // page start for any page
|
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t block_size, size_t* page_size, size_t* pre_size); // page start for any page
|
||||||
|
|
||||||
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
||||||
|
#else
|
||||||
|
void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
||||||
|
#endif
|
||||||
|
|
||||||
void _mi_segment_thread_collect(mi_segments_tld_t* tld);
|
void _mi_segment_thread_collect(mi_segments_tld_t* tld);
|
||||||
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
|
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
|
||||||
|
@ -71,6 +71,13 @@ terms of the MIT license. A copy of the license can be found in the file
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
// We used to abandon huge pages but to eagerly deallocate if freed from another thread,
|
||||||
|
// but that makes it not possible to visit them during a heap walk or include them in a
|
||||||
|
// `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks if freed from
|
||||||
|
// another thread so most memory is available until it gets properly freed by the owning thread.
|
||||||
|
// #define MI_HUGE_PAGE_ABANDON 1
|
||||||
|
|
||||||
|
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
// Platform specific values
|
// Platform specific values
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
|
@ -59,7 +59,9 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
|
|||||||
uintptr_t adjust = alignment - (((uintptr_t)p + offset) & align_mask);
|
uintptr_t adjust = alignment - (((uintptr_t)p + offset) & align_mask);
|
||||||
mi_assert_internal(adjust <= alignment);
|
mi_assert_internal(adjust <= alignment);
|
||||||
void* aligned_p = (adjust == alignment ? p : (void*)((uintptr_t)p + adjust));
|
void* aligned_p = (adjust == alignment ? p : (void*)((uintptr_t)p + adjust));
|
||||||
if (aligned_p != p) { mi_page_set_has_aligned(_mi_ptr_page(p), true); }
|
if (aligned_p != p) {
|
||||||
|
mi_page_set_has_aligned(_mi_ptr_page(p), true);
|
||||||
|
}
|
||||||
|
|
||||||
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
|
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
|
||||||
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
|
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
|
||||||
|
30
src/alloc.c
30
src/alloc.c
@ -334,6 +334,17 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
|||||||
mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1);
|
mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
#if !MI_HUGE_PAGE_ABANDON
|
||||||
|
else {
|
||||||
|
const size_t bpsize = mi_page_block_size(page);
|
||||||
|
if (bpsize <= MI_HUGE_OBJ_SIZE_MAX) {
|
||||||
|
mi_heap_stat_decrease(heap, huge, bpsize);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
mi_heap_stat_decrease(heap, giant, bpsize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
||||||
@ -341,6 +352,7 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
#if (MI_STAT>0)
|
#if (MI_STAT>0)
|
||||||
// maintain stats for huge objects
|
// maintain stats for huge objects
|
||||||
static void mi_stat_huge_free(const mi_page_t* page) {
|
static void mi_stat_huge_free(const mi_page_t* page) {
|
||||||
@ -358,12 +370,13 @@ static void mi_stat_huge_free(const mi_page_t* page) {
|
|||||||
MI_UNUSED(page);
|
MI_UNUSED(page);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
// Free
|
// Free
|
||||||
// ------------------------------------------------------
|
// ------------------------------------------------------
|
||||||
|
|
||||||
// multi-threaded free (or free in huge block)
|
// multi-threaded free (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
|
||||||
static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
|
static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
|
||||||
{
|
{
|
||||||
// The padding check may access the non-thread-owned page for the key values.
|
// The padding check may access the non-thread-owned page for the key values.
|
||||||
@ -371,13 +384,21 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
|||||||
mi_check_padding(page, block);
|
mi_check_padding(page, block);
|
||||||
mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
|
mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
|
||||||
|
|
||||||
// huge page segments are always abandoned and can be freed immediately
|
|
||||||
mi_segment_t* const segment = _mi_page_segment(page);
|
mi_segment_t* const segment = _mi_page_segment(page);
|
||||||
if (segment->page_kind==MI_PAGE_HUGE) {
|
if (segment->page_kind == MI_PAGE_HUGE) {
|
||||||
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
|
// huge page segments are always abandoned and can be freed immediately
|
||||||
mi_stat_huge_free(page);
|
mi_stat_huge_free(page);
|
||||||
_mi_segment_huge_page_free(segment, page, block);
|
_mi_segment_huge_page_free(segment, page, block);
|
||||||
return;
|
return;
|
||||||
|
#else
|
||||||
|
// huge pages are special as they occupy the entire segment
|
||||||
|
// as these are large we reset the memory occupied by the page so it is available to other threads
|
||||||
|
// (as the owning thread needs to actually free the memory later).
|
||||||
|
_mi_segment_huge_page_reset(segment, page, block);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED // note: when tracking, cannot use mi_usable_size with multi-threading
|
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED // note: when tracking, cannot use mi_usable_size with multi-threading
|
||||||
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
|
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
|
||||||
@ -453,6 +474,9 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
|
|||||||
// Adjust a block that was allocated aligned, to the actual start of the block in the page.
|
// Adjust a block that was allocated aligned, to the actual start of the block in the page.
|
||||||
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) {
|
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) {
|
||||||
mi_assert_internal(page!=NULL && p!=NULL);
|
mi_assert_internal(page!=NULL && p!=NULL);
|
||||||
|
#if !MI_HUGE_PAGE_ABANDON
|
||||||
|
// if (segment->mem_align_offset != 0) return (mi_block_t*)p; // don't unalign blocks that have huge alignment
|
||||||
|
#endif
|
||||||
const size_t diff = (uint8_t*)p - _mi_page_start(segment, page, NULL);
|
const size_t diff = (uint8_t*)p - _mi_page_start(segment, page, NULL);
|
||||||
const size_t adjust = (diff % mi_page_block_size(page));
|
const size_t adjust = (diff % mi_page_block_size(page));
|
||||||
return (mi_block_t*)((uintptr_t)p - adjust);
|
return (mi_block_t*)((uintptr_t)p - adjust);
|
||||||
|
18
src/os.c
18
src/os.c
@ -1007,7 +1007,7 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
|
|||||||
return mi_os_commitx(addr, size, false, true /* conservative */, &is_zero, stats);
|
return mi_os_commitx(addr, size, false, true /* conservative */, &is_zero, stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) {
|
bool _mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) {
|
||||||
return mi_os_commitx(addr, size, true, true /* conservative */, is_zero, stats);
|
return mi_os_commitx(addr, size, true, true /* conservative */, is_zero, stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1072,24 +1072,14 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
|
|||||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats) {
|
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats) {
|
||||||
MI_UNUSED(tld_stats);
|
MI_UNUSED(tld_stats);
|
||||||
mi_stats_t* stats = &_mi_stats_main;
|
mi_stats_t* stats = &_mi_stats_main;
|
||||||
if (mi_option_is_enabled(mi_option_reset_decommits)) {
|
return mi_os_resetx(addr, size, true, stats);
|
||||||
return _mi_os_decommit(addr, size, stats);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
return mi_os_resetx(addr, size, true, stats);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
|
bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
|
||||||
MI_UNUSED(tld_stats);
|
MI_UNUSED(tld_stats);
|
||||||
mi_stats_t* stats = &_mi_stats_main;
|
mi_stats_t* stats = &_mi_stats_main;
|
||||||
if (mi_option_is_enabled(mi_option_reset_decommits)) {
|
*is_zero = false;
|
||||||
return mi_os_commit_unreset(addr, size, is_zero, stats); // re-commit it (conservatively!)
|
return mi_os_resetx(addr, size, false, stats);
|
||||||
}
|
|
||||||
else {
|
|
||||||
*is_zero = false;
|
|
||||||
return mi_os_resetx(addr, size, false, stats);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -228,7 +228,9 @@ static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
|
|||||||
static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
|
static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
|
||||||
mi_assert_internal(mi_page_heap(page) == heap);
|
mi_assert_internal(mi_page_heap(page) == heap);
|
||||||
mi_assert_internal(!mi_page_queue_contains(queue, page));
|
mi_assert_internal(!mi_page_queue_contains(queue, page));
|
||||||
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
||||||
|
#endif
|
||||||
mi_assert_internal(page->xblock_size == queue->block_size ||
|
mi_assert_internal(page->xblock_size == queue->block_size ||
|
||||||
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) ||
|
(page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) ||
|
||||||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
|
||||||
|
30
src/page.c
30
src/page.c
@ -111,7 +111,10 @@ bool _mi_page_is_valid(mi_page_t* page) {
|
|||||||
if (mi_page_heap(page)!=NULL) {
|
if (mi_page_heap(page)!=NULL) {
|
||||||
mi_segment_t* segment = _mi_page_segment(page);
|
mi_segment_t* segment = _mi_page_segment(page);
|
||||||
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == mi_page_heap(page)->thread_id || segment->thread_id==0);
|
mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == mi_page_heap(page)->thread_id || segment->thread_id==0);
|
||||||
if (segment->page_kind != MI_PAGE_HUGE) {
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
|
if (segment->page_kind != MI_PAGE_HUGE)
|
||||||
|
#endif
|
||||||
|
{
|
||||||
mi_page_queue_t* pq = mi_page_queue_of(page);
|
mi_page_queue_t* pq = mi_page_queue_of(page);
|
||||||
mi_assert_internal(mi_page_queue_contains(pq, page));
|
mi_assert_internal(mi_page_queue_contains(pq, page));
|
||||||
mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX || mi_page_is_in_full(page));
|
mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX || mi_page_is_in_full(page));
|
||||||
@ -243,7 +246,9 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
|
|||||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||||
mi_assert_internal(mi_page_heap(page) == heap);
|
mi_assert_internal(mi_page_heap(page) == heap);
|
||||||
mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE);
|
mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE);
|
||||||
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
||||||
|
#endif
|
||||||
mi_assert_internal(!page->is_reset);
|
mi_assert_internal(!page->is_reset);
|
||||||
// TODO: push on full queue immediately if it is full?
|
// TODO: push on full queue immediately if it is full?
|
||||||
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
|
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
|
||||||
@ -253,22 +258,27 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
|
|||||||
|
|
||||||
// allocate a fresh page from a segment
|
// allocate a fresh page from a segment
|
||||||
static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size, size_t page_alignment) {
|
static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size, size_t page_alignment) {
|
||||||
mi_assert_internal(pq==NULL||mi_heap_contains_queue(heap, pq));
|
#if !MI_HUGE_PAGE_ABANDON
|
||||||
mi_assert_internal(pq==NULL||block_size == pq->block_size);
|
mi_assert_internal(pq != NULL);
|
||||||
|
mi_assert_internal(mi_heap_contains_queue(heap, pq));
|
||||||
|
mi_assert_internal(page_alignment > 0 || block_size > MI_LARGE_OBJ_SIZE_MAX || block_size == pq->block_size);
|
||||||
|
#endif
|
||||||
mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &heap->tld->os);
|
mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &heap->tld->os);
|
||||||
if (page == NULL) {
|
if (page == NULL) {
|
||||||
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
|
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
|
||||||
|
#endif
|
||||||
mi_assert_internal(pq!=NULL || page->xblock_size != 0);
|
mi_assert_internal(pq!=NULL || page->xblock_size != 0);
|
||||||
mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
|
mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
|
||||||
// a fresh page was found, initialize it
|
// a fresh page was found, initialize it
|
||||||
const size_t full_block_size = (pq == NULL ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
|
const size_t full_block_size = ((pq == NULL || mi_page_queue_is_huge(pq)) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
|
||||||
mi_assert_internal(full_block_size >= block_size);
|
mi_assert_internal(full_block_size >= block_size);
|
||||||
mi_page_init(heap, page, full_block_size, heap->tld);
|
mi_page_init(heap, page, full_block_size, heap->tld);
|
||||||
mi_heap_stat_increase(heap, pages, 1);
|
mi_heap_stat_increase(heap, pages, 1);
|
||||||
if (pq!=NULL) mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL
|
if (pq != NULL) { mi_page_queue_push(heap, pq, page); }
|
||||||
mi_assert_expensive(_mi_page_is_valid(page));
|
mi_assert_expensive(_mi_page_is_valid(page));
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
@ -799,15 +809,23 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex
|
|||||||
static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
|
static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
|
||||||
size_t block_size = _mi_os_good_alloc_size(size);
|
size_t block_size = _mi_os_good_alloc_size(size);
|
||||||
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
|
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
|
||||||
mi_page_t* page = mi_page_fresh_alloc(heap,NULL,block_size,page_alignment);
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
|
mi_page_queue_t* pq = NULL;
|
||||||
|
#else
|
||||||
|
mi_page_queue_t* pq = mi_page_queue(heap, MI_HUGE_OBJ_SIZE_MAX); // not block_size as that can be low if the page_alignment > 0
|
||||||
|
mi_assert_internal(mi_page_queue_is_huge(pq));
|
||||||
|
#endif
|
||||||
|
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size,page_alignment);
|
||||||
if (page != NULL) {
|
if (page != NULL) {
|
||||||
const size_t bsize = mi_page_block_size(page); // note: not `mi_page_usable_block_size` as `size` includes padding already
|
const size_t bsize = mi_page_block_size(page); // note: not `mi_page_usable_block_size` as `size` includes padding already
|
||||||
mi_assert_internal(bsize >= size);
|
mi_assert_internal(bsize >= size);
|
||||||
mi_assert_internal(mi_page_immediate_available(page));
|
mi_assert_internal(mi_page_immediate_available(page));
|
||||||
mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE);
|
mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE);
|
||||||
mi_assert_internal(_mi_page_segment(page)->used==1);
|
mi_assert_internal(_mi_page_segment(page)->used==1);
|
||||||
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
|
mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
|
||||||
mi_page_set_heap(page, NULL);
|
mi_page_set_heap(page, NULL);
|
||||||
|
#endif
|
||||||
|
|
||||||
if (bsize > MI_HUGE_OBJ_SIZE_MAX) {
|
if (bsize > MI_HUGE_OBJ_SIZE_MAX) {
|
||||||
mi_heap_stat_increase(heap, giant, bsize);
|
mi_heap_stat_increase(heap, giant, bsize);
|
||||||
|
15
src/region.c
15
src/region.c
@ -47,6 +47,7 @@ bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
|
|||||||
bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats);
|
bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats);
|
||||||
bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
|
bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
|
||||||
bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
|
bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
|
||||||
|
bool _mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats);
|
||||||
|
|
||||||
// arena.c
|
// arena.c
|
||||||
mi_arena_id_t _mi_arena_id_none(void);
|
mi_arena_id_t _mi_arena_id_none(void);
|
||||||
@ -481,11 +482,21 @@ void _mi_mem_collect(mi_os_tld_t* tld) {
|
|||||||
-----------------------------------------------------------------------------*/
|
-----------------------------------------------------------------------------*/
|
||||||
|
|
||||||
bool _mi_mem_reset(void* p, size_t size, mi_os_tld_t* tld) {
|
bool _mi_mem_reset(void* p, size_t size, mi_os_tld_t* tld) {
|
||||||
return _mi_os_reset(p, size, tld->stats);
|
if (mi_option_is_enabled(mi_option_reset_decommits)) {
|
||||||
|
return _mi_os_decommit(p, size, tld->stats);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return _mi_os_reset(p, size, tld->stats);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool _mi_mem_unreset(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld) {
|
bool _mi_mem_unreset(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld) {
|
||||||
return _mi_os_unreset(p, size, is_zero, tld->stats);
|
if (mi_option_is_enabled(mi_option_reset_decommits)) {
|
||||||
|
return _mi_os_commit(p, size, is_zero, tld->stats);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return _mi_os_unreset(p, size, is_zero, tld->stats);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool _mi_mem_commit(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld) {
|
bool _mi_mem_commit(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld) {
|
||||||
|
@ -1217,8 +1217,10 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
|
|||||||
mi_segment_t* segment = mi_segment_alloc(size, MI_PAGE_HUGE, MI_SEGMENT_SHIFT + 1, page_alignment, tld, os_tld);
|
mi_segment_t* segment = mi_segment_alloc(size, MI_PAGE_HUGE, MI_SEGMENT_SHIFT + 1, page_alignment, tld, os_tld);
|
||||||
if (segment == NULL) return NULL;
|
if (segment == NULL) return NULL;
|
||||||
mi_assert_internal(mi_segment_page_size(segment) - segment->segment_info_size - (2*(MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= size);
|
mi_assert_internal(mi_segment_page_size(segment) - segment->segment_info_size - (2*(MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= size);
|
||||||
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
segment->thread_id = 0; // huge pages are immediately abandoned
|
segment->thread_id = 0; // huge pages are immediately abandoned
|
||||||
mi_segments_track_size(-(long)segment->segment_size, tld);
|
mi_segments_track_size(-(long)segment->segment_size, tld);
|
||||||
|
#endif
|
||||||
mi_page_t* page = mi_segment_find_free(segment, tld);
|
mi_page_t* page = mi_segment_find_free(segment, tld);
|
||||||
mi_assert_internal(page != NULL);
|
mi_assert_internal(page != NULL);
|
||||||
|
|
||||||
@ -1230,10 +1232,10 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
|
|||||||
mi_assert_internal(_mi_is_aligned(aligned_p, page_alignment));
|
mi_assert_internal(_mi_is_aligned(aligned_p, page_alignment));
|
||||||
mi_assert_internal(psize - (aligned_p - p) >= size);
|
mi_assert_internal(psize - (aligned_p - p) >= size);
|
||||||
if (!segment->mem_is_pinned && page->is_committed) {
|
if (!segment->mem_is_pinned && page->is_committed) {
|
||||||
// decommit the part of the page that is unused; this can be quite large (close to MI_SEGMENT_SIZE)
|
// decommit the part of the page that is unused; this can be quite large (close to MI_SEGMENT_SIZE)
|
||||||
uint8_t* decommit_start = p + sizeof(mi_block_t); // for the free list
|
uint8_t* decommit_start = p + sizeof(mi_block_t); // for the free list
|
||||||
ptrdiff_t decommit_size = aligned_p - decommit_start;
|
ptrdiff_t decommit_size = aligned_p - decommit_start;
|
||||||
_mi_mem_decommit(decommit_start, decommit_size, os_tld);
|
_mi_os_reset(decommit_start, decommit_size, os_tld->stats);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1245,6 +1247,7 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
|
|||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if MI_HUGE_PAGE_ABANDON
|
||||||
// free huge block from another thread
|
// free huge block from another thread
|
||||||
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
|
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
|
||||||
// huge page segments are always abandoned and can be freed immediately by any thread
|
// huge page segments are always abandoned and can be freed immediately by any thread
|
||||||
@ -1273,6 +1276,18 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
// reset memory of a huge block from another thread
|
||||||
|
void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
|
||||||
|
mi_assert_internal(segment->page_kind == MI_PAGE_HUGE);
|
||||||
|
mi_assert_internal(segment == _mi_page_segment(page));
|
||||||
|
mi_assert_internal(page->used == 1); // this is called just before the free
|
||||||
|
mi_assert_internal(page->free == NULL);
|
||||||
|
const size_t bsize = mi_page_block_size(page);
|
||||||
|
_mi_os_reset(block + 1, bsize - sizeof(mi_block_t), &_mi_stats_main);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
Page allocation
|
Page allocation
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
@ -1292,7 +1307,7 @@ mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t pag
|
|||||||
else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
|
else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
|
||||||
page = mi_segment_medium_page_alloc(heap, block_size, tld, os_tld);
|
page = mi_segment_medium_page_alloc(heap, block_size, tld, os_tld);
|
||||||
}
|
}
|
||||||
else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) {
|
else if (block_size <= MI_LARGE_OBJ_SIZE_MAX /* || mi_is_good_fit(block_size, MI_LARGE_PAGE_SIZE - sizeof(mi_segment_t)) */ ) {
|
||||||
page = mi_segment_large_page_alloc(heap, block_size, tld, os_tld);
|
page = mi_segment_large_page_alloc(heap, block_size, tld, os_tld);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -205,7 +205,7 @@ static bool test_visit(const mi_heap_t* heap, const mi_heap_area_t* area, void*
|
|||||||
|
|
||||||
static void test_heap_walk(void) {
|
static void test_heap_walk(void) {
|
||||||
mi_heap_t* heap = mi_heap_new();
|
mi_heap_t* heap = mi_heap_new();
|
||||||
//mi_heap_malloc(heap, 2097152);
|
mi_heap_malloc(heap, 16*2097152);
|
||||||
mi_heap_malloc(heap, 2067152);
|
mi_heap_malloc(heap, 2067152);
|
||||||
mi_heap_malloc(heap, 2097160);
|
mi_heap_malloc(heap, 2097160);
|
||||||
mi_heap_malloc(heap, 24576);
|
mi_heap_malloc(heap, 24576);
|
||||||
|
Loading…
Reference in New Issue
Block a user