wip: initial work on purgable arenas

This commit is contained in:
Daan Leijen 2023-03-31 20:51:35 -07:00
parent 7cf60deb12
commit 595add5e3d
7 changed files with 65 additions and 42 deletions

View File

@ -117,7 +117,7 @@ void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinn
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
bool _mi_arena_memid_is_suitable(size_t arena_memid, mi_arena_id_t request_arena_id);
bool _mi_arena_is_os_allocated(size_t arena_memid);
void _mi_arena_collect(bool free_arenas, bool force_decommit, mi_stats_t* stats);
// memory.c
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, size_t offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* id, mi_os_tld_t* tld);

View File

@ -287,14 +287,15 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
if (p != NULL) return p;
// otherwise, try to first eagerly reserve a new arena
size_t eager_reserve = mi_option_get_size(mi_option_arena_reserve);
eager_reserve = _mi_align_up(eager_reserve, MI_ARENA_BLOCK_SIZE);
if (eager_reserve > 0 && eager_reserve >= size && // eager reserve enabled and large enough?
size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE);
if (arena_reserve > 0 && arena_reserve >= size && // eager reserve enabled and large enough?
req_arena_id == _mi_arena_id_none() && // not exclusive?
mi_atomic_load_relaxed(&mi_arena_count) < 3*(MI_MAX_ARENAS/4) ) // not too many arenas already?
{
mi_arena_id_t arena_id = 0;
if (mi_reserve_os_memory_ex(eager_reserve, false /* commit */, *large /* allow large*/, false /* exclusive */, &arena_id) == 0) {
const bool arena_commit = _mi_os_has_overcommit();
if (mi_reserve_os_memory_ex(arena_reserve, arena_commit /* commit */, *large /* allow large*/, false /* exclusive */, &arena_id) == 0) {
p = mi_arena_alloc_in(arena_id, numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
if (p != NULL) return p;
}
@ -383,7 +384,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t
}
}
// return true if the full range was purged
// return true if the full range was purged.
static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge, mi_stats_t* stats) {
const size_t endidx = startidx + bitlen;
size_t bitidx = startidx;
@ -401,12 +402,12 @@ static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx,
all_purged = true;
}
}
bitidx += (count+1);
bitidx += (count+1); // +1 to skip the zero bit (or end)
}
return all_purged;
}
// returns true if anything was decommitted
// returns true if anything was purged
static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
{
if (!arena->allow_decommit || arena->blocks_purge == NULL) return false;
@ -425,35 +426,33 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi
if (purge != 0) {
size_t bitidx = 0;
while (bitidx < MI_BITMAP_FIELD_BITS) {
size_t bitlen = 1;
if ((purge & ((size_t)1 << bitidx)) != 0) {
while ((bitidx + bitlen < MI_BITMAP_FIELD_BITS) &&
((purge & ((size_t)1 << (bitidx + bitlen))) != 0)) { bitlen++; }
// found range of purgeable blocks
// try to claim the longest range of corresponding in_use bits
const mi_bitmap_index_t bitmap_index = mi_bitmap_index_create(i, bitidx);
while( bitlen > 0 ) {
if (_mi_bitmap_try_claim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index)) {
break;
}
bitlen--;
}
// claimed count bits at in_use
if (bitlen > 0) {
// read purge again now that we have the in_use bits
purge = mi_atomic_load_acquire(&arena->blocks_purge[i]);
if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) {
full_purge = false;
}
any_purged = true;
}
else {
bitlen = 1; // make progress
}
// find length 1 bit range
size_t bitlen = 0;
while (bitidx + bitlen < MI_BITMAP_FIELD_BITS && (purge & ((size_t)1 << (bitidx + bitlen))) != 0) {
bitlen++;
}
bitidx += bitlen;
}
}
// try to claim the longest range of corresponding in_use bits
const mi_bitmap_index_t bitmap_index = mi_bitmap_index_create(i, bitidx);
while( bitlen > 0 ) {
if (_mi_bitmap_try_claim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index)) {
break;
}
bitlen--;
}
// claimed count bits at in_use
if (bitlen > 0) {
// read purge again now that we have the in_use bits
purge = mi_atomic_load_acquire(&arena->blocks_purge[i]);
if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) {
full_purge = false;
}
any_purged = true;
// release claimed in_use bits again
_mi_bitmap_unclaim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index);
}
bitidx += (bitlen+1); // +1 to skip the zero (or end)
} // while bitidx
} // purge != 0
}
return any_purged;
}
@ -532,9 +531,17 @@ void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset,
return;
};
}
// purge expired decommits
mi_arenas_try_purge(false, false, stats);
}
void _mi_arena_collect(bool free_arenas, bool force_decommit, mi_stats_t* stats) {
MI_UNUSED(free_arenas); // todo
mi_arenas_try_purge(force_decommit, true, stats);
}
/* -----------------------------------------------------------
Add an arena.
----------------------------------------------------------- */
@ -566,7 +573,7 @@ bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is
is_committed = true;
}
const bool allow_decommit = !is_large && !is_committed; // only allow decommit for initially uncommitted memory
const bool allow_decommit = !is_large; // && !is_committed; // only allow decommit for initially uncommitted memory
const size_t bcount = size / MI_ARENA_BLOCK_SIZE;
const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);

View File

@ -118,7 +118,7 @@ bool _mi_bitmap_try_find_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, c
// Set `count` bits at `bitmap_idx` to 0 atomically
// Returns `true` if all `count` bits were 1 previously.
bool mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
const size_t idx = mi_bitmap_index_field(bitmap_idx);
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
const size_t mask = mi_bitmap_mask_(count, bitidx);
@ -153,6 +153,20 @@ static bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_fields, size
return ((field & mask) == mask);
}
// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
// Returns `true` if successful when all previous `count` bits were 0.
bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
const size_t idx = mi_bitmap_index_field(bitmap_idx);
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
const size_t mask = mi_bitmap_mask_(count, bitidx);
mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
size_t expected = 0;
if (mi_atomic_cas_strong_acq_rel(&bitmap[idx], &expected, mask)) return true;
if ((expected & mask) != 0) return false;
return mi_atomic_cas_strong_acq_rel(&bitmap[idx], &expected, expected | mask);
}
bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
return mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, NULL);
}

View File

@ -69,7 +69,7 @@ bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fiel
// Set `count` bits at `bitmap_idx` to 0 atomically
// Returns `true` if all `count` bits were 1 previously.
bool mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
// Returns `true` if successful when all previous `count` bits were 0.

View File

@ -159,6 +159,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
// collect regions on program-exit (or shared library unload)
if (collect >= MI_FORCE && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
_mi_mem_collect(&heap->tld->os);
_mi_arena_collect(false,true,&heap->tld->stats);
}
}

View File

@ -591,6 +591,7 @@ static void mi_cdecl mi_process_done(void) {
if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
_mi_heap_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
_mi_mem_collect(&_mi_heap_main_get()->tld->os); // release all regions
_mi_arena_collect(true,true,&_mi_heap_main_get()->tld->stats);
}
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {

View File

@ -289,7 +289,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool*
bool commit_zero = false;
if (!_mi_mem_commit(p, blocks * MI_SEGMENT_SIZE, &commit_zero, tld)) {
// failed to commit! unclaim and return
mi_bitmap_unclaim(&region->in_use, 1, blocks, bit_idx);
_mi_bitmap_unclaim(&region->in_use, 1, blocks, bit_idx);
return NULL;
}
if (commit_zero) *is_zero = true;
@ -306,7 +306,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool*
// some blocks are still reset
mi_assert_internal(!info.x.is_large && !info.x.is_pinned);
mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit || mi_option_get(mi_option_eager_commit_delay) > 0);
mi_bitmap_unclaim(&region->reset, 1, blocks, bit_idx);
_mi_bitmap_unclaim(&region->reset, 1, blocks, bit_idx);
if (*commit || !mi_option_is_enabled(mi_option_reset_decommits)) { // only if needed
bool reset_zero = false;
_mi_mem_unreset(p, blocks * MI_SEGMENT_SIZE, &reset_zero, tld);
@ -426,7 +426,7 @@ void _mi_mem_free(void* p, size_t size, size_t alignment, size_t align_offset, s
}
// and unclaim
bool all_unclaimed = mi_bitmap_unclaim(&region->in_use, 1, blocks, bit_idx);
bool all_unclaimed = _mi_bitmap_unclaim(&region->in_use, 1, blocks, bit_idx);
mi_assert_internal(all_unclaimed); MI_UNUSED(all_unclaimed);
}
}