merge from dev (with is_pinned/is_large separation)

This commit is contained in:
daan 2020-09-08 17:54:58 -07:00
commit fa01875eb2
12 changed files with 114 additions and 62 deletions

View File

@ -71,13 +71,13 @@ bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats
size_t _mi_os_good_alloc_size(size_t size);
// arena.c
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
void _mi_arena_free(void* p, size_t size, size_t memid, bool is_committed, mi_os_tld_t* tld);
// "segment-cache.c"
void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
bool _mi_segment_cache_push(void* start, size_t size, size_t memid, mi_commit_mask_t commit_mask, bool is_large, mi_os_tld_t* tld);
void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
bool _mi_segment_cache_push(void* start, size_t size, size_t memid, mi_commit_mask_t commit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld);
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
void _mi_segment_map_freed_at(const mi_segment_t* segment);

View File

@ -288,10 +288,11 @@ typedef uintptr_t mi_commit_mask_t;
// contain blocks.
typedef struct mi_segment_s {
size_t memid; // memory id for arena allocation
bool mem_is_fixed; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
bool mem_is_large; // in large/huge os pages?
bool mem_is_committed; // `true` if the whole segment is eagerly committed
bool allow_decommit;
bool allow_decommit;
mi_msecs_t decommit_expire;
mi_commit_mask_t decommit_mask;
mi_commit_mask_t commit_mask;

View File

@ -304,6 +304,7 @@ typedef enum mi_option_e {
mi_option_reset_decommits,
mi_option_large_os_pages, // implies eager commit
mi_option_reserve_huge_os_pages,
mi_option_reserve_os_memory,
mi_option_segment_cache,
mi_option_page_reset,
mi_option_abandoned_page_reset,
@ -313,6 +314,7 @@ typedef enum mi_option_e {
mi_option_reset_delay,
mi_option_arena_reset_delay,
mi_option_use_numa_nodes,
mi_option_limit_os_alloc,
mi_option_os_tag,
mi_option_max_errors,
_mi_option_last

View File

@ -38,6 +38,7 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_sec
void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
/* -----------------------------------------------------------
@ -59,8 +60,8 @@ typedef struct mi_arena_s {
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
int numa_node; // associated NUMA node
bool is_zero_init; // is the arena zero initialized?
bool is_committed; // is the memory committed
bool is_large; // large OS page allocated
bool is_committed; // is the memory fully committed? (if so, block_committed == NULL)
bool is_large; // large- or huge OS pages (always committed)
_Atomic(uintptr_t) search_idx; // optimization to start the search for free blocks
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
mi_bitmap_field_t* blocks_committed; // if `!is_committed`, are the blocks committed?
@ -116,16 +117,17 @@ static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t*
----------------------------------------------------------- */
static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
{
mi_bitmap_index_t bitmap_index;
if (!mi_arena_alloc(arena, needed_bcount, &bitmap_index)) return NULL;
// claimed it! set the dirty bits (todo: no need for an atomic op here?)
void* p = arena->start + (mi_bitmap_index_bit(bitmap_index)*MI_ARENA_BLOCK_SIZE);
*memid = mi_arena_id_create(arena_index, bitmap_index);
*is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
*large = arena->is_large;
void* p = arena->start + (mi_bitmap_index_bit(bitmap_index)*MI_ARENA_BLOCK_SIZE);
*memid = mi_arena_id_create(arena_index, bitmap_index);
*is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
*large = arena->is_large;
*is_pinned = (arena->is_large || arena->is_committed);
if (arena->is_committed) {
// always committed
*commit = true;
@ -147,7 +149,7 @@ static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t aren
return p;
}
static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
{
UNUSED_RELEASE(alignment);
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
@ -163,7 +165,7 @@ static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size
if ((arena->numa_node<0 || arena->numa_node==numa_node) && // numa local?
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
{
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_zero, memid, tld);
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, memid, tld);
mi_assert_internal((uintptr_t)p % alignment == 0);
if (p != NULL) {
return p;
@ -178,7 +180,7 @@ static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size
if ((arena->numa_node>=0 && arena->numa_node!=numa_node) && // not numa local!
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
{
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_zero, memid, tld);
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, memid, tld);
mi_assert_internal((uintptr_t)p % alignment == 0);
if (p != NULL) {
return p;
@ -189,13 +191,14 @@ static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size
}
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero,
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero,
size_t* memid, mi_os_tld_t* tld)
{
mi_assert_internal(commit != NULL && large != NULL && is_zero != NULL && memid != NULL && tld != NULL);
mi_assert_internal(commit != NULL && is_pinned != NULL && is_zero != NULL && memid != NULL && tld != NULL);
mi_assert_internal(size > 0);
*memid = MI_MEMID_OS;
*is_zero = false;
*is_pinned = false;
bool default_large = false;
if (large==NULL) large = &default_large; // ensure `large != NULL`
@ -203,19 +206,22 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool*
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN) {
void* p = mi_arena_allocate(numa_node, size, alignment, commit, large, is_zero, memid, tld);
void* p = mi_arena_allocate(numa_node, size, alignment, commit, large, is_pinned, is_zero, memid, tld);
if (p != NULL) return p;
}
// finally, fall back to the OS
if (mi_option_is_enabled(mi_option_limit_os_alloc)) return NULL;
*is_zero = true;
*memid = MI_MEMID_OS;
return _mi_os_alloc_aligned(size, alignment, *commit, large, tld->stats);
*memid = MI_MEMID_OS;
void* p = _mi_os_alloc_aligned(size, alignment, *commit, large, tld->stats);
if (p != NULL) *is_pinned = *large;
return p;
}
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
{
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, commit, large, is_zero, memid, tld);
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, commit, large, is_pinned, is_zero, memid, tld);
}
/* -----------------------------------------------------------
@ -239,6 +245,8 @@ void _mi_arena_free(void* p, size_t size, size_t memid, bool is_committed, mi_os
mi_assert_internal(arena_idx < MI_MAX_ARENAS);
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t,&mi_arenas[arena_idx]);
mi_assert_internal(arena != NULL);
const size_t blocks = mi_block_count_of_size(size);
// checks
if (arena == NULL) {
_mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
return;
@ -248,9 +256,18 @@ void _mi_arena_free(void* p, size_t size, size_t memid, bool is_committed, mi_os
_mi_error_message(EINVAL, "trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
return;
}
const size_t blocks = mi_block_count_of_size(size);
bool ones = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
if (!ones) {
// potentially decommit
if (arena->is_committed) {
mi_assert_internal(all_committed);
}
else {
mi_assert_internal(arena->blocks_committed != NULL);
_mi_os_decommit(p, blocks * MI_ARENA_BLOCK_SIZE, tld->stats); // ok if this fails
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
}
// and make it available to others again
bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
if (!all_inuse) {
_mi_error_message(EAGAIN, "trying to free an already freed block: %p, size %zu\n", p, size);
return;
};
@ -277,9 +294,14 @@ static bool mi_arena_add(mi_arena_t* arena) {
bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept
{
if (is_large) {
mi_assert_internal(is_committed);
is_committed = true;
}
const size_t bcount = mi_block_count_of_size(size);
const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
const size_t bitmaps = (is_committed ? 3 : 2);
const size_t bitmaps = (is_committed ? 2 : 3);
const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
mi_arena_t* arena = (mi_arena_t*)_mi_os_alloc(asize, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
if (arena == NULL) return false;
@ -315,12 +337,12 @@ int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noe
bool large = allow_large;
void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, &large, &_mi_stats_main);
if (start==NULL) return ENOMEM;
if (!mi_manage_os_memory(start, size, commit, large, true, -1)) {
if (!mi_manage_os_memory(start, size, (large || commit), large, true, -1)) {
_mi_os_free_ex(start, size, commit, &_mi_stats_main);
_mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size,1024));
return ENOMEM;
}
_mi_verbose_message("reserved %zu kb memory\n", _mi_divide_up(size,1024));
_mi_verbose_message("reserved %zu kb memory%s\n", _mi_divide_up(size,1024), large ? " (in large os pages)" : "");
return 0;
}

View File

@ -387,10 +387,8 @@ bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size
return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL);
}
/*
bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
bool any_ones;
mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones);
return any_ones;
}
*/

View File

@ -102,5 +102,6 @@ bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t
bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero);
bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
#endif

View File

@ -503,6 +503,10 @@ void mi_process_init(void) mi_attr_noexcept {
if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
size_t pages = mi_option_get(mi_option_reserve_huge_os_pages);
mi_reserve_huge_os_pages_interleave(pages, 0, pages*500);
}
if (mi_option_is_enabled(mi_option_reserve_os_memory)) {
long ksize = mi_option_get(mi_option_reserve_os_memory);
if (ksize > 0) mi_reserve_os_memory((size_t)ksize*KiB, true, true);
}
}

View File

@ -74,7 +74,8 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(reset_decommits) }, // reset uses MADV_FREE/MADV_DONTNEED
#endif
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) },
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
{ 0, UNINIT, MI_OPTION(reserve_os_memory) },
{ 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread
{ 0, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free
{ 0, UNINIT, MI_OPTION(abandoned_page_reset) },// reset free page memory when a thread terminates
@ -90,6 +91,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ 500, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
{ 1000, UNINIT, MI_OPTION(arena_reset_delay) }, // reset delay in milli-seconds for freed segments
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
{ 0, UNINIT, MI_OPTION(limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
{ 16, UNINIT, MI_OPTION(max_errors) } // maximum errors that are output
};
@ -507,6 +509,14 @@ static void mi_option_init(mi_option_desc_t* desc) {
else {
char* end = buf;
long value = strtol(buf, &end, 10);
if (desc->option == mi_option_reserve_os_memory) {
// this option is interpreted in KiB to prevent overflow of `long`
if (*end == 'K') { end++; }
else if (*end == 'M') { value *= KiB; end++; }
else if (*end == 'G') { value *= MiB; end++; }
else { value = (value + KiB - 1) / KiB; }
if (*end == 'B') { end++; }
}
if (*end == 0) {
desc->value = value;
desc->init = INITIALIZED;

View File

@ -50,8 +50,8 @@ bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
// arena.c
void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_stats_t* stats);
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
@ -77,7 +77,8 @@ typedef union mi_region_info_u {
uintptr_t value;
struct {
bool valid; // initialized?
bool is_large; // allocated in fixed large/huge OS pages
bool is_large:1; // allocated in fixed large/huge OS pages
bool is_pinned:1; // pinned memory cannot be decommitted
short numa_node; // the associated NUMA node (where -1 means no associated node)
} x;
} mi_region_info_t;
@ -177,8 +178,9 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
bool region_commit = (commit && mi_option_is_enabled(mi_option_eager_region_commit));
bool region_large = (commit && allow_large);
bool is_zero = false;
bool is_pinned = false;
size_t arena_memid = 0;
void* const start = _mi_arena_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, &region_commit, &region_large, &is_zero, &arena_memid, tld);
void* const start = _mi_arena_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, &region_commit, &region_large, &is_pinned, &is_zero, &arena_memid, tld);
if (start == NULL) return false;
mi_assert_internal(!(region_large && !allow_large));
mi_assert_internal(!region_large || region_commit);
@ -208,6 +210,7 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
info.value = 0; // initialize the full union to zero
info.x.valid = true;
info.x.is_large = region_large;
info.x.is_pinned = is_pinned;
info.x.numa_node = (short)_mi_os_numa_node(tld);
mi_atomic_store_release(&r->info, info.value); // now make it available to others
*region = r;
@ -259,16 +262,16 @@ static bool mi_region_try_claim(int numa_node, size_t blocks, bool allow_large,
}
static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
{
mi_assert_internal(blocks <= MI_BITMAP_FIELD_BITS);
mem_region_t* region;
mi_bitmap_index_t bit_idx;
const int numa_node = (_mi_os_numa_node_count() <= 1 ? -1 : _mi_os_numa_node(tld));
// try to claim in existing regions
if (!mi_region_try_claim(numa_node, blocks, *is_large, &region, &bit_idx, tld)) {
if (!mi_region_try_claim(numa_node, blocks, *large, &region, &bit_idx, tld)) {
// otherwise try to allocate a fresh region and claim in there
if (!mi_region_try_alloc_os(blocks, *commit, *is_large, &region, &bit_idx, tld)) {
if (!mi_region_try_alloc_os(blocks, *commit, *large, &region, &bit_idx, tld)) {
// out of regions or memory
return NULL;
}
@ -282,12 +285,13 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
mi_region_info_t info;
info.value = mi_atomic_load_acquire(&region->info);
uint8_t* start = (uint8_t*)mi_atomic_load_ptr_acquire(uint8_t,&region->start);
mi_assert_internal(!(info.x.is_large && !*is_large));
mi_assert_internal(!(info.x.is_large && !*large));
mi_assert_internal(start != NULL);
*is_zero = _mi_bitmap_claim(&region->dirty, 1, blocks, bit_idx, NULL);
*is_large = info.x.is_large;
*memid = mi_memid_create(region, bit_idx);
*is_zero = _mi_bitmap_claim(&region->dirty, 1, blocks, bit_idx, NULL);
*large = info.x.is_large;
*is_pinned = info.x.is_pinned;
*memid = mi_memid_create(region, bit_idx);
void* p = start + (mi_bitmap_index_bit_in_field(bit_idx) * MI_SEGMENT_SIZE);
// commit
@ -296,7 +300,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
bool any_uncommitted;
_mi_bitmap_claim(&region->commit, 1, blocks, bit_idx, &any_uncommitted);
if (any_uncommitted) {
mi_assert_internal(!info.x.is_large);
mi_assert_internal(!info.x.is_large && !info.x.is_pinned);
bool commit_zero;
_mi_mem_commit(p, blocks * MI_SEGMENT_SIZE, &commit_zero, tld);
if (commit_zero) *is_zero = true;
@ -311,7 +315,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
// unreset reset blocks
if (_mi_bitmap_is_any_claimed(&region->reset, 1, blocks, bit_idx)) {
// some blocks are still reset
mi_assert_internal(!info.x.is_large);
mi_assert_internal(!info.x.is_large && !info.x.is_pinned);
mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit || mi_option_get(mi_option_eager_commit_delay) > 0);
mi_bitmap_unclaim(&region->reset, 1, blocks, bit_idx);
if (*commit || !mi_option_is_enabled(mi_option_reset_decommits)) { // only if needed
@ -338,12 +342,13 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo
// Allocate `size` memory aligned at `alignment`. Return non NULL on success, with a given memory `id`.
// (`id` is abstract, but `id = idx*MI_REGION_MAP_BITS + bitidx`)
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
{
mi_assert_internal(memid != NULL && tld != NULL);
mi_assert_internal(size > 0);
*memid = 0;
*is_zero = false;
*is_pinned = false;
bool default_large = false;
if (large==NULL) large = &default_large; // ensure `large != NULL`
if (size == 0) return NULL;
@ -354,14 +359,14 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* l
size_t arena_memid;
const size_t blocks = mi_region_block_count(size);
if (blocks <= MI_REGION_MAX_OBJ_BLOCKS && alignment <= MI_SEGMENT_ALIGN) {
p = mi_region_try_alloc(blocks, commit, large, is_zero, memid, tld);
p = mi_region_try_alloc(blocks, commit, large, is_pinned, is_zero, memid, tld);
if (p == NULL) {
_mi_warning_message("unable to allocate from region: size %zu\n", size);
}
}
if (p == NULL) {
// and otherwise fall back to the OS
p = _mi_arena_alloc_aligned(size, alignment, commit, large, is_zero, &arena_memid, tld);
p = _mi_arena_alloc_aligned(size, alignment, commit, large, is_pinned, is_zero, &arena_memid, tld);
*memid = mi_memid_create_from_arena(arena_memid);
}
@ -418,7 +423,7 @@ void _mi_mem_free(void* p, size_t size, size_t id, bool full_commit, bool any_re
}
// reset the blocks to reduce the working set.
if (!info.x.is_large && mi_option_is_enabled(mi_option_segment_reset)
if (!info.x.is_large && !info.x.is_pinned && mi_option_is_enabled(mi_option_segment_reset)
&& (mi_option_is_enabled(mi_option_eager_commit) ||
mi_option_is_enabled(mi_option_reset_decommits))) // cannot reset halfway committed segments, use only `option_page_reset` instead
{

View File

@ -25,6 +25,7 @@ terms of the MIT license. A copy of the license can be found in the file
typedef struct mi_cache_slot_s {
void* p;
size_t memid;
bool is_pinned;
mi_commit_mask_t commit_mask;
_Atomic(mi_msecs_t) expire;
} mi_cache_slot_t;
@ -36,7 +37,7 @@ static mi_decl_cache_align mi_bitmap_field_t cache_available_large[MI_CACHE_FIEL
static mi_decl_cache_align mi_bitmap_field_t cache_inuse[MI_CACHE_FIELDS]; // zero bit = free
mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
{
// only segment blocks
if (size != MI_SEGMENT_SIZE) return NULL;
@ -67,6 +68,7 @@ mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* comm
mi_cache_slot_t* slot = &cache[mi_bitmap_index_bit(bitidx)];
void* p = slot->p;
*memid = slot->memid;
*is_pinned = slot->is_pinned;
*is_zero = false;
mi_commit_mask_t cmask = slot->commit_mask; // copy
slot->p = NULL;
@ -139,7 +141,7 @@ static mi_decl_noinline void mi_segment_cache_purge(mi_os_tld_t* tld)
}
}
mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t memid, mi_commit_mask_t commit_mask, bool is_large, mi_os_tld_t* tld)
mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t memid, mi_commit_mask_t commit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld)
{
// only for normal segment blocks
if (size != MI_SEGMENT_SIZE || ((uintptr_t)start % MI_SEGMENT_ALIGN) != 0) return false;
@ -162,14 +164,20 @@ mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t me
mi_assert_internal(_mi_bitmap_is_claimed(cache_available, MI_CACHE_FIELDS, 1, bitidx));
mi_assert_internal(_mi_bitmap_is_claimed(cache_available_large, MI_CACHE_FIELDS, 1, bitidx));
#if MI_DEBUG>1
if (is_pinned || is_large) {
mi_assert_internal(mi_commit_mask_is_full(commit_mask));
}
#endif
// set the slot
mi_cache_slot_t* slot = &cache[mi_bitmap_index_bit(bitidx)];
slot->p = start;
slot->memid = memid;
slot->is_pinned = is_pinned;
mi_atomic_storei64_relaxed(&slot->expire,(mi_msecs_t)0);
slot->commit_mask = commit_mask;
if (!mi_commit_mask_is_empty(commit_mask) && !is_large) {
if (!mi_commit_mask_is_empty(commit_mask) && !is_large && !is_pinned) {
long delay = mi_option_get(mi_option_arena_reset_delay);
if (delay == 0) {
_mi_abandoned_await_readers(); // wait until safe to decommit

View File

@ -117,7 +117,6 @@ static bool mi_slice_is_used(const mi_slice_t* slice) {
}
#if (MI_DEBUG>=3)
static bool mi_span_queue_contains(mi_span_queue_t* sq, mi_slice_t* slice) {
for (mi_slice_t* s = sq->first; s != NULL; s = s->next) {
@ -258,11 +257,11 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
// _mi_os_free(segment, mi_segment_size(segment), /*segment->memid,*/ tld->stats);
const size_t size = mi_segment_size(segment);
if (size != MI_SEGMENT_SIZE || !_mi_segment_cache_push(segment, size, segment->memid, segment->commit_mask, segment->mem_is_fixed, tld->os)) {
if (size != MI_SEGMENT_SIZE || !_mi_segment_cache_push(segment, size, segment->memid, segment->commit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os)) {
const size_t csize = mi_commit_mask_committed_size(segment->commit_mask, size);
if (csize > 0 && !segment->mem_is_fixed) _mi_stat_decrease(&_mi_stats_main.committed, csize);
if (csize > 0 && !segment->mem_is_pinned) _mi_stat_decrease(&_mi_stats_main.committed, csize);
_mi_abandoned_await_readers(); // wait until safe to free
_mi_arena_free(segment, mi_segment_size(segment), segment->memid, segment->mem_is_fixed /* pretend not committed to not double count decommits */, tld->os);
_mi_arena_free(segment, mi_segment_size(segment), segment->memid, segment->mem_is_pinned /* pretend not committed to not double count decommits */, tld->os);
}
}
@ -655,10 +654,11 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
if (segment==NULL) {
// Allocate the segment from the OS
bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
bool is_pinned = false;
size_t memid = 0;
segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, &commit_mask, &mem_large, &is_zero, &memid, os_tld);
segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, &commit_mask, &mem_large, &is_pinned, &is_zero, &memid, os_tld);
if (segment==NULL) {
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_zero, &memid, os_tld);
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_pinned, &is_zero, &memid, os_tld);
if (segment == NULL) return NULL; // failed to allocate
commit_mask = (commit ? mi_commit_mask_full() : mi_commit_mask_empty());
}
@ -674,7 +674,8 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
mi_commit_mask_set(&commit_mask,mi_commit_mask_create(0, commit_needed));
}
segment->memid = memid;
segment->mem_is_fixed = mem_large;
segment->mem_is_pinned = is_pinned;
segment->mem_is_large = mem_large;
segment->mem_is_committed = mi_commit_mask_is_full(commit_mask);
mi_segments_track_size((long)(segment_size), tld);
_mi_segment_map_allocated_at(segment);
@ -690,7 +691,7 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
if (!commit_info_still_good) {
segment->commit_mask = commit_mask; // on lazy commit, the initial part is always committed
segment->allow_decommit = (mi_option_is_enabled(mi_option_allow_decommit) && !segment->mem_is_fixed);
segment->allow_decommit = (mi_option_is_enabled(mi_option_allow_decommit) && !segment->mem_is_pinned && !segment->mem_is_large);
segment->decommit_expire = 0;
segment->decommit_mask = mi_commit_mask_empty();
}
@ -801,7 +802,7 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld
_mi_stat_decrease(&tld->stats->pages, 1);
// reset the page memory to reduce memory pressure?
if (!segment->mem_is_fixed && !page->is_reset && mi_option_is_enabled(mi_option_page_reset)) {
if (!segment->mem_is_pinned && !page->is_reset && mi_option_is_enabled(mi_option_page_reset)) {
size_t psize;
uint8_t* start = _mi_page_start(segment, page, &psize);
page->is_reset = true;

View File

@ -235,7 +235,7 @@ int main(int argc, char** argv) {
if (n > 0) ITER = n;
}
printf("Using %d threads with a %d%% load-per-thread and %d iterations\n", THREADS, SCALE, ITER);
//mi_reserve_os_memory(512*1024*1024ULL, true, true);
//mi_reserve_os_memory(1024*1024*1024ULL, false, true);
//int res = mi_reserve_huge_os_pages(4,1);
//printf("(reserve huge: %i\n)", res);