only reclaim for exclusive heaps in their associated arena
This commit is contained in:
parent
635d626c82
commit
5501f59f6c
@ -300,6 +300,11 @@ mi_decl_export void mi_subproc_add_current_thread(mi_subproc_id_t sub
|
||||
// Experimental: visit abandoned heap areas (from threads that have been terminated)
|
||||
mi_decl_export bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
|
||||
|
||||
// Experimental: create a new heap with a specified heap tag. Set `allow_destroy` to false to allow the thread
|
||||
// to reclaim abandoned memory (with a compatible heap_tag and arena_id) but in that case `mi_heap_destroy` will
|
||||
// fall back to `mi_heap_delete`.
|
||||
mi_decl_export mi_decl_nodiscard mi_heap_t* mi_heap_new_ex(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id);
|
||||
|
||||
// deprecated
|
||||
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
|
||||
|
||||
|
@ -131,8 +131,8 @@ void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid);
|
||||
void _mi_arena_meta_free(void* p, mi_memid_t memid, size_t size);
|
||||
|
||||
typedef struct mi_arena_field_cursor_s { // abstract struct
|
||||
mi_arena_id_t start;
|
||||
int count;
|
||||
size_t start;
|
||||
size_t end;
|
||||
size_t bitmap_idx;
|
||||
mi_subproc_t* subproc;
|
||||
} mi_arena_field_cursor_t;
|
||||
|
35
src/arena.c
35
src/arena.c
@ -850,11 +850,20 @@ void _mi_arena_segment_mark_abandoned(mi_segment_t* segment)
|
||||
// start a cursor at a randomized arena
|
||||
void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_subproc_t* subproc, mi_arena_field_cursor_t* current) {
|
||||
mi_assert_internal(heap == NULL || heap->tld->segments.subproc == subproc);
|
||||
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
|
||||
current->start = (heap == NULL || max_arena == 0 ? 0 : (mi_arena_id_t)( _mi_heap_random_next(heap) % max_arena));
|
||||
current->count = 0;
|
||||
current->bitmap_idx = 0;
|
||||
current->bitmap_idx = 0;
|
||||
current->subproc = subproc;
|
||||
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
|
||||
if (heap != NULL && heap->arena_id != _mi_arena_id_none()) {
|
||||
// for a heap that is bound to one arena, only visit that arena
|
||||
current->start = mi_arena_id_index(heap->arena_id);
|
||||
current->end = current->start + 1;
|
||||
}
|
||||
else {
|
||||
// otherwise visit all starting at a random location
|
||||
current->start = (heap == NULL || max_arena == 0 ? 0 : (mi_arena_id_t)(_mi_heap_random_next(heap) % max_arena));
|
||||
current->end = current->start + max_arena;
|
||||
}
|
||||
mi_assert_internal(current->start < max_arena);
|
||||
}
|
||||
|
||||
static mi_segment_t* mi_arena_segment_clear_abandoned_at(mi_arena_t* arena, mi_subproc_t* subproc, mi_bitmap_index_t bitmap_idx) {
|
||||
@ -884,16 +893,15 @@ static mi_segment_t* mi_arena_segment_clear_abandoned_at(mi_arena_t* arena, mi_s
|
||||
// this does not set the thread id (so it appears as still abandoned)
|
||||
mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous, bool visit_all )
|
||||
{
|
||||
const int max_arena = (int)mi_atomic_load_relaxed(&mi_arena_count);
|
||||
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
|
||||
if (max_arena <= 0 || mi_atomic_load_relaxed(&previous->subproc->abandoned_count) == 0) return NULL;
|
||||
|
||||
int count = previous->count;
|
||||
size_t field_idx = mi_bitmap_index_field(previous->bitmap_idx);
|
||||
size_t bit_idx = mi_bitmap_index_bit_in_field(previous->bitmap_idx) + 1;
|
||||
// visit arena's (from the previous cursor)
|
||||
for (; count < max_arena; count++, field_idx = 0, bit_idx = 0) {
|
||||
mi_arena_id_t arena_idx = previous->start + count;
|
||||
if (arena_idx >= max_arena) { arena_idx = arena_idx % max_arena; } // wrap around
|
||||
for ( ; previous->start < previous->end; previous->start++, field_idx = 0, bit_idx = 0) {
|
||||
// index wraps around
|
||||
size_t arena_idx = (previous->start >= max_arena ? previous->start % max_arena : previous->start);
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]);
|
||||
if (arena != NULL) {
|
||||
bool has_lock = false;
|
||||
@ -918,11 +926,9 @@ mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* pr
|
||||
// pre-check if the bit is set
|
||||
size_t mask = ((size_t)1 << bit_idx);
|
||||
if mi_unlikely((field & mask) == mask) {
|
||||
const mi_bitmap_index_t bitmap_idx = mi_bitmap_index_create(field_idx, bit_idx);
|
||||
mi_segment_t* const segment = mi_arena_segment_clear_abandoned_at(arena, previous->subproc, bitmap_idx);
|
||||
previous->bitmap_idx = mi_bitmap_index_create(field_idx, bit_idx);
|
||||
mi_segment_t* const segment = mi_arena_segment_clear_abandoned_at(arena, previous->subproc, previous->bitmap_idx);
|
||||
if (segment != NULL) {
|
||||
previous->bitmap_idx = bitmap_idx;
|
||||
previous->count = count;
|
||||
//mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx));
|
||||
if (has_lock) { mi_lock_release(&arena->abandoned_visit_lock); }
|
||||
return segment;
|
||||
@ -935,8 +941,9 @@ mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* pr
|
||||
}
|
||||
}
|
||||
// no more found
|
||||
mi_assert(previous->start == previous->end);
|
||||
previous->bitmap_idx = 0;
|
||||
previous->count = 0;
|
||||
previous->start = previous->end = 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
13
src/heap.c
13
src/heap.c
@ -226,17 +226,22 @@ void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool
|
||||
heap->tld->heaps = heap;
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
|
||||
mi_decl_nodiscard mi_heap_t* mi_heap_new_ex(int heap_tag, bool allow_destroy, mi_arena_id_t arena_id) {
|
||||
mi_heap_t* bheap = mi_heap_get_backing();
|
||||
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
|
||||
if (heap == NULL) return NULL;
|
||||
// don't reclaim abandoned pages or otherwise destroy is unsafe
|
||||
_mi_heap_init(heap, bheap->tld, arena_id, true /* no reclaim */, 0 /* default tag */);
|
||||
mi_assert(heap_tag >= 0 && heap_tag < 256);
|
||||
_mi_heap_init(heap, bheap->tld, arena_id, allow_destroy /* no reclaim? */, (uint8_t)heap_tag /* heap tag */);
|
||||
return heap;
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
|
||||
return mi_heap_new_ex(0 /* default heap tag */, false /* don't allow `mi_heap_destroy` */, arena_id);
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
|
||||
return mi_heap_new_in_arena(_mi_arena_id_none());
|
||||
// don't reclaim abandoned memory or otherwise destroy is unsafe
|
||||
return mi_heap_new_ex(0 /* default heap tag */, true /* no reclaim */, _mi_arena_id_none());
|
||||
}
|
||||
|
||||
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) {
|
||||
|
@ -905,7 +905,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
|
||||
mi_heap_t* target_heap = _mi_heap_by_tag(heap, page->heap_tag); // allow custom heaps to separate objects
|
||||
if (target_heap == NULL) {
|
||||
target_heap = heap;
|
||||
_mi_error_message(EINVAL, "page with tag %u cannot be reclaimed by a heap with the same tag (using tag %u instead)\n", page->heap_tag, heap->tag );
|
||||
_mi_error_message(EINVAL, "page with tag %u cannot be reclaimed by a heap with the same tag (using heap tag %u instead)\n", page->heap_tag, heap->tag );
|
||||
}
|
||||
// associate the heap with this page, and allow heap thread delayed free again.
|
||||
mi_page_set_heap(page, target_heap);
|
||||
@ -948,6 +948,7 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
|
||||
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) {
|
||||
if (mi_atomic_load_relaxed(&segment->thread_id) != 0) return false; // it is not abandoned
|
||||
if (segment->subproc != heap->tld->segments.subproc) return false; // only reclaim within the same subprocess
|
||||
if (!_mi_heap_memid_is_suitable(heap,segment->memid)) return false; // don't reclaim between exclusive and non-exclusive arena's
|
||||
// don't reclaim more from a `free` call than half the current segments
|
||||
// this is to prevent a pure free-ing thread to start owning too many segments
|
||||
if (heap->tld->segments.reclaim_count * 2 > heap->tld->segments.count) return false;
|
||||
|
@ -255,6 +255,12 @@ static void test_leak(void) {
|
||||
#endif
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
#ifdef HEAP_WALK
|
||||
mi_option_enable(mi_option_visit_abandoned);
|
||||
#endif
|
||||
#ifndef NDBEBUG
|
||||
mi_option_set(mi_option_arena_reserve, 32 * 1024 /* in kib = 32MiB */);
|
||||
#endif
|
||||
#ifndef USE_STD_MALLOC
|
||||
mi_stats_reset();
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user