wip: full decommit delay, for arena cache as well
This commit is contained in:
parent
321e18777e
commit
7da00c1220
@ -275,6 +275,7 @@ typedef enum mi_option_e {
|
||||
mi_option_eager_commit_delay,
|
||||
mi_option_allow_decommit,
|
||||
mi_option_reset_delay,
|
||||
mi_option_arena_reset_delay,
|
||||
mi_option_use_numa_nodes,
|
||||
mi_option_os_tag,
|
||||
mi_option_max_errors,
|
||||
|
55
src/arena.c
55
src/arena.c
@ -124,16 +124,17 @@ static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t*
|
||||
/* -----------------------------------------------------------
|
||||
Arena cache
|
||||
----------------------------------------------------------- */
|
||||
#define MI_CACHE_MAX (64)
|
||||
#define MI_CACHE_MAX (64) // ~4GiB
|
||||
#define MI_MAX_NUMA (16)
|
||||
|
||||
#define MI_SLOT_IN_USE ((void*)1)
|
||||
|
||||
typedef struct mi_cache_slot_s {
|
||||
volatile _Atomic(void*) p;
|
||||
volatile size_t memid;
|
||||
volatile bool is_committed;
|
||||
volatile bool is_large;
|
||||
volatile size_t memid;
|
||||
volatile mi_msecs_t expire;
|
||||
volatile bool is_committed;
|
||||
volatile bool is_large;
|
||||
} mi_cache_slot_t;
|
||||
|
||||
static mi_cache_slot_t cache[MI_MAX_NUMA][MI_CACHE_MAX];
|
||||
@ -188,7 +189,43 @@ static void* mi_cache_pop(int numa_node, size_t size, size_t alignment, bool* co
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool mi_cache_push(void* start, size_t size, size_t memid, bool is_committed, bool is_large, mi_os_tld_t* tld) {
|
||||
static void mi_cache_purge(mi_os_tld_t* tld) {
|
||||
// TODO: for each numa node instead?
|
||||
// if (mi_option_get(mi_option_arena_reset_delay) == 0) return;
|
||||
|
||||
mi_msecs_t now = _mi_clock_now();
|
||||
int numa_node = _mi_os_numa_node(NULL);
|
||||
if (numa_node > MI_MAX_NUMA) numa_node %= MI_MAX_NUMA;
|
||||
mi_cache_slot_t* slot;
|
||||
int purged = 0;
|
||||
for (int i = 0; i < MI_CACHE_MAX; i++) {
|
||||
slot = &cache[numa_node][i];
|
||||
void* p = mi_atomic_read_ptr_relaxed(&slot->p);
|
||||
if (p > MI_SLOT_IN_USE && !slot->is_committed && !slot->is_large) {
|
||||
mi_msecs_t expire = slot->expire;
|
||||
if (now >= expire) {
|
||||
// expired, try to claim it
|
||||
if (mi_atomic_cas_ptr_weak(&slot->p, MI_SLOT_IN_USE, p)) {
|
||||
// claimed! test again
|
||||
if (!slot->is_committed && !slot->is_large && now >= slot->expire) {
|
||||
_mi_os_decommit(p, MI_SEGMENT_SIZE, tld->stats);
|
||||
slot->is_committed = false;
|
||||
}
|
||||
// and unclaim again
|
||||
mi_atomic_write_ptr(&slot->p, p);
|
||||
purged++;
|
||||
if (purged >= 4) break; // limit to at most 4 decommits per push
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static bool mi_cache_push(void* start, size_t size, size_t memid, bool is_committed, bool is_large, mi_os_tld_t* tld)
|
||||
{
|
||||
mi_cache_purge(tld);
|
||||
|
||||
// only for segment blocks
|
||||
if (size != MI_SEGMENT_SIZE || ((uintptr_t)start % MI_SEGMENT_ALIGN) != 0) return false;
|
||||
|
||||
@ -202,7 +239,12 @@ static bool mi_cache_push(void* start, size_t size, size_t memid, bool is_commit
|
||||
if (p == NULL) { // free slot
|
||||
if (mi_atomic_cas_ptr_weak(&slot->p, MI_SLOT_IN_USE, NULL)) {
|
||||
// claimed!
|
||||
// _mi_os_decommit(start, size, tld->stats);
|
||||
long delay = mi_option_get(mi_option_arena_reset_delay);
|
||||
if (delay == 0 && !is_large) {
|
||||
_mi_os_decommit(start, size, tld->stats);
|
||||
is_committed = false;
|
||||
}
|
||||
slot->expire = (is_committed ? 0 : _mi_clock_now() + delay);
|
||||
slot->is_committed = is_committed;
|
||||
slot->memid = memid;
|
||||
slot->is_large = is_large;
|
||||
@ -214,6 +256,7 @@ static bool mi_cache_push(void* start, size_t size, size_t memid, bool is_commit
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Arena Allocation
|
||||
----------------------------------------------------------- */
|
||||
|
@ -64,8 +64,9 @@ static mi_option_desc_t options[_mi_option_last] =
|
||||
{ 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit)
|
||||
{ 1, UNINIT, MI_OPTION(reset_decommits) }, // reset decommits memory
|
||||
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
|
||||
{ 0, UNINIT, MI_OPTION(allow_decommit) }, // decommit pages when not eager committed
|
||||
{ 1000, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
|
||||
{ 1, UNINIT, MI_OPTION(allow_decommit) }, // decommit pages when not eager committed
|
||||
{ 1000, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
|
||||
{ 1000, UNINIT, MI_OPTION(arena_reset_delay) }, // reset delay in milli-seconds
|
||||
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
|
||||
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
|
||||
{ 16, UNINIT, MI_OPTION(max_errors) } // maximum errors that are output
|
||||
|
@ -286,8 +286,8 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
_mi_os_unprotect(segment, mi_segment_size(segment)); // ensure no more guard pages are set
|
||||
}
|
||||
|
||||
// purge delayed decommits now
|
||||
mi_segment_delayed_decommit(segment,true,tld->stats);
|
||||
// purge delayed decommits now? (no, leave it to the cache)
|
||||
// mi_segment_delayed_decommit(segment,true,tld->stats);
|
||||
|
||||
// _mi_os_free(segment, mi_segment_size(segment), /*segment->memid,*/ tld->stats);
|
||||
_mi_arena_free(segment, mi_segment_size(segment), segment->memid,
|
||||
@ -335,8 +335,7 @@ static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld)
|
||||
if (segment->segment_slices != MI_SLICES_PER_SEGMENT || mi_segment_cache_full(tld)) {
|
||||
return false;
|
||||
}
|
||||
// mi_segment_delayed_decommit(segment, true, tld->stats);
|
||||
// segment->decommit_mask = 0;
|
||||
// mi_segment_delayed_decommit(segment, true, tld->stats);
|
||||
mi_assert_internal(segment->segment_slices == MI_SLICES_PER_SEGMENT);
|
||||
mi_assert_internal(segment->next == NULL);
|
||||
segment->next = tld->cache;
|
||||
@ -876,9 +875,8 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
slice = slice + slice->slice_count;
|
||||
}
|
||||
|
||||
// force delayed decommits
|
||||
mi_segment_delayed_decommit(segment, true, tld->stats);
|
||||
//segment->decommit_mask = 0;
|
||||
// force delayed decommits instead?
|
||||
mi_segment_delayed_decommit(segment, false, tld->stats);
|
||||
|
||||
// add it to the abandoned list
|
||||
_mi_stat_increase(&tld->stats->segments_abandoned, 1);
|
||||
|
Loading…
Reference in New Issue
Block a user