more fine grained commit tracking per MiB

This commit is contained in:
daan 2019-11-24 19:06:30 -08:00
parent 128cdd1dfb
commit ec0005b919
4 changed files with 5 additions and 5 deletions

View File

@ -227,7 +227,7 @@ typedef enum mi_segment_kind_e {
MI_SEGMENT_HUGE, // > MI_LARGE_SIZE_MAX segment with just one huge page inside.
} mi_segment_kind_t;
#define MI_COMMIT_SIZE (2UL<<20) // OS large page size
#define MI_COMMIT_SIZE (1UL<<20) // OS large page size
#if ((1 << MI_SEGMENT_SHIFT)/MI_COMMIT_SIZE > 8*MI_INTPTR_SIZE)
#error "not enough commit bits to cover the segment size"

View File

@ -203,7 +203,7 @@ static void mi_cache_purge(mi_os_tld_t* tld) {
void* p = mi_atomic_read_ptr_relaxed(&slot->p);
if (p > MI_SLOT_IN_USE && !slot->is_committed && !slot->is_large) {
mi_msecs_t expire = slot->expire;
if (now >= expire) {
if (expire != 0 && now >= expire) {
// expired, try to claim it
if (mi_atomic_cas_ptr_weak(&slot->p, MI_SLOT_IN_USE, p)) {
// claimed! test again

View File

@ -64,7 +64,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit)
{ 1, UNINIT, MI_OPTION(reset_decommits) }, // reset decommits memory
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
{ 0, UNINIT, MI_OPTION(allow_decommit) }, // decommit pages when not eager committed
{ 1, UNINIT, MI_OPTION(allow_decommit) }, // decommit pages when not eager committed
{ 1000, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
{ 1000, UNINIT, MI_OPTION(arena_reset_delay) }, // reset delay in milli-seconds
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.

View File

@ -381,6 +381,7 @@ static uintptr_t mi_segment_commit_mask(mi_segment_t* segment, bool conservative
mi_assert_internal(start % MI_COMMIT_SIZE==0 && end % MI_COMMIT_SIZE == 0);
*start_p = (uint8_t*)segment + start;
*full_size = (end > start ? end - start : 0);
if (*full_size == 0) return 0;
uintptr_t bitidx = start / MI_COMMIT_SIZE;
mi_assert_internal(bitidx < (MI_INTPTR_SIZE*8));
@ -931,8 +932,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
mi_segments_track_size((long)mi_segment_size(segment),tld);
mi_assert_internal(segment->next == NULL);
_mi_stat_decrease(&tld->stats->segments_abandoned,1);
mi_assert_internal(segment->decommit_mask == 0);
//mi_assert_internal(segment->decommit_mask == 0);
mi_slice_t* slice = &segment->slices[0];
const mi_slice_t* end = mi_segment_slices_end(segment);