merge arena_id from dev
This commit is contained in:
commit
e961ef705e
@ -89,9 +89,10 @@ size_t _mi_os_good_alloc_size(size_t size);
|
||||
bool _mi_os_has_overcommit(void);
|
||||
|
||||
// arena.c
|
||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
|
||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
|
||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
||||
void _mi_arena_free(void* p, size_t size, size_t memid, bool is_committed, mi_os_tld_t* tld);
|
||||
mi_arena_id_t _mi_arena_id_none(void);
|
||||
|
||||
// "segment-cache.c"
|
||||
void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
|
||||
|
@ -280,6 +280,16 @@ mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_commit
|
||||
|
||||
mi_decl_export void mi_debug_show_arenas(void) mi_attr_noexcept;
|
||||
|
||||
// Experimental: heaps associated with specific memory arena's
|
||||
typedef int mi_arena_id_t;
|
||||
mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
|
||||
#if MI_MALLOC_VERSION >= 200
|
||||
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id, bool exclusive);
|
||||
#endif
|
||||
|
||||
// deprecated
|
||||
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
|
||||
|
||||
|
178
src/arena.c
178
src/arena.c
@ -1,5 +1,5 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2019-2021, Microsoft Research, Daan Leijen
|
||||
Copyright (c) 2019-2022, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
@ -18,7 +18,7 @@ which is sometimes needed for embedded devices or shared memory for example.
|
||||
(We can also employ this with WASI or `sbrk` systems to reserve large arenas
|
||||
on demand and be able to reuse them efficiently).
|
||||
|
||||
The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
|
||||
The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#include "mimalloc.h"
|
||||
#include "mimalloc-internal.h"
|
||||
@ -45,16 +45,17 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
|
||||
Arena allocation
|
||||
----------------------------------------------------------- */
|
||||
|
||||
|
||||
// Block info: bit 0 contains the `in_use` bit, the upper bits the
|
||||
// size in count of arena blocks.
|
||||
typedef uintptr_t mi_block_info_t;
|
||||
#define MI_ARENA_BLOCK_SIZE (MI_SEGMENT_SIZE) // 8MiB (must be at least MI_SEGMENT_ALIGN)
|
||||
#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 4MiB
|
||||
#define MI_MAX_ARENAS (64) // not more than 256 (since we use 8 bits in the memid)
|
||||
#define MI_MAX_ARENAS (64) // not more than 126 (since we use 7 bits in the memid and an arena index + 1)
|
||||
|
||||
// A memory arena descriptor
|
||||
typedef struct mi_arena_s {
|
||||
mi_arena_id_t id; // arena id; 0 for non-specific
|
||||
bool exclusive; // only allow allocations if specifically for this arena
|
||||
_Atomic(uint8_t*) start; // the start of the memory area
|
||||
size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
|
||||
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
|
||||
@ -74,24 +75,60 @@ static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS];
|
||||
static mi_decl_cache_align _Atomic(size_t) mi_arena_count; // = 0
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Arena id's
|
||||
0 is used for non-arena's (like OS memory)
|
||||
id = arena_index + 1
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static size_t mi_arena_id_index(mi_arena_id_t id) {
|
||||
return (size_t)(id <= 0 ? MI_MAX_ARENAS : id - 1);
|
||||
}
|
||||
|
||||
static mi_arena_id_t mi_arena_id_create(size_t arena_index) {
|
||||
mi_assert_internal(arena_index >= 0 && arena_index < MI_MAX_ARENAS);
|
||||
mi_assert_internal(MI_MAX_ARENAS <= 126);
|
||||
int id = (int)arena_index + 1;
|
||||
mi_assert_internal(id >= 1 && id <= 127);
|
||||
return id;
|
||||
}
|
||||
|
||||
mi_arena_id_t _mi_arena_id_none(void) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool mi_arena_id_suitable(mi_arena_id_t arena_id, bool exclusive, mi_arena_id_t req_arena_id) {
|
||||
return (!exclusive || arena_id == req_arena_id);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Arena allocations get a memory id where the lower 8 bits are
|
||||
the arena index +1, and the upper bits the block index.
|
||||
the arena id, and the upper bits the block index.
|
||||
----------------------------------------------------------- */
|
||||
|
||||
// Use `0` as a special id for direct OS allocated memory.
|
||||
#define MI_MEMID_OS 0
|
||||
|
||||
static size_t mi_arena_id_create(size_t arena_index, mi_bitmap_index_t bitmap_index) {
|
||||
mi_assert_internal(arena_index < 0xFE);
|
||||
static size_t mi_arena_memid_create(mi_arena_id_t id, bool exclusive, mi_bitmap_index_t bitmap_index) {
|
||||
mi_assert_internal(((bitmap_index << 8) >> 8) == bitmap_index); // no overflow?
|
||||
return ((bitmap_index << 8) | ((arena_index+1) & 0xFF));
|
||||
mi_assert_internal(id >= 0 && id <= 0x7F);
|
||||
return ((bitmap_index << 8) | ((uint8_t)id & 0x7F) | (exclusive ? 0x80 : 0));
|
||||
}
|
||||
|
||||
static void mi_arena_id_indices(size_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
|
||||
mi_assert_internal(memid != MI_MEMID_OS);
|
||||
*arena_index = (memid & 0xFF) - 1;
|
||||
*bitmap_index = (memid >> 8);
|
||||
static bool mi_arena_memid_indices(size_t arena_memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
|
||||
mi_assert_internal(arena_memid != MI_MEMID_OS);
|
||||
*bitmap_index = (arena_memid >> 8);
|
||||
mi_arena_id_t id = (int)(arena_memid & 0x7F);
|
||||
*arena_index = mi_arena_id_index(id);
|
||||
return ((arena_memid & 0x80) != 0);
|
||||
}
|
||||
|
||||
bool _mi_arena_memid_suitable(size_t arena_memid, mi_arena_id_t request_arena_id) {
|
||||
mi_assert_internal(arena_memid != MI_MEMID_OS);
|
||||
mi_arena_id_t id = (int)(arena_memid & 0x7F);
|
||||
bool exclusive = ((arena_memid & 0x80) != 0);
|
||||
return mi_arena_id_suitable(id, exclusive, request_arena_id);
|
||||
}
|
||||
|
||||
static size_t mi_block_count_of_size(size_t size) {
|
||||
@ -117,14 +154,19 @@ static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t*
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
|
||||
bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
||||
bool* commit, bool* large, bool* is_pinned, bool* is_zero,
|
||||
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
||||
{
|
||||
MI_UNUSED(arena_index);
|
||||
mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
|
||||
if (!mi_arena_id_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL;
|
||||
|
||||
mi_bitmap_index_t bitmap_index;
|
||||
if (!mi_arena_alloc(arena, needed_bcount, &bitmap_index)) return NULL;
|
||||
|
||||
// claimed it! set the dirty bits (todo: no need for an atomic op here?)
|
||||
void* p = arena->start + (mi_bitmap_index_bit(bitmap_index)*MI_ARENA_BLOCK_SIZE);
|
||||
*memid = mi_arena_id_create(arena_index, bitmap_index);
|
||||
*memid = mi_arena_memid_create(arena->id, arena->exclusive, bitmap_index);
|
||||
*is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
|
||||
*large = arena->is_large;
|
||||
*is_pinned = (arena->is_large || !arena->allow_decommit);
|
||||
@ -149,7 +191,9 @@ static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t aren
|
||||
return p;
|
||||
}
|
||||
|
||||
static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
||||
static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size_t alignment, bool* commit, bool* large,
|
||||
bool* is_pinned, bool* is_zero,
|
||||
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
||||
{
|
||||
MI_UNUSED_RELEASE(alignment);
|
||||
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
|
||||
@ -158,32 +202,43 @@ static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size
|
||||
if mi_likely(max_arena == 0) return NULL;
|
||||
mi_assert_internal(size <= bcount*MI_ARENA_BLOCK_SIZE);
|
||||
|
||||
// try numa affine allocation
|
||||
for (size_t i = 0; i < max_arena; i++) {
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
|
||||
if (arena==NULL) break; // end reached
|
||||
if ((arena->numa_node<0 || arena->numa_node==numa_node) && // numa local?
|
||||
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
|
||||
size_t arena_index = mi_arena_id_index(req_arena_id);
|
||||
if (arena_index < MI_MAX_ARENAS) {
|
||||
// try a specific arena if requested
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[arena_index]);
|
||||
if (arena != NULL &&
|
||||
(arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local?
|
||||
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
|
||||
{
|
||||
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, memid, tld);
|
||||
void* p = mi_arena_alloc_from(arena, arena_index, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||
if (p != NULL) {
|
||||
return p;
|
||||
}
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// try numa affine allocation
|
||||
for (size_t i = 0; i < max_arena; i++) {
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
|
||||
if (arena == NULL) break; // end reached
|
||||
if ((arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local?
|
||||
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
|
||||
{
|
||||
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
}
|
||||
|
||||
// try from another numa node instead..
|
||||
for (size_t i = 0; i < max_arena; i++) {
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
|
||||
if (arena==NULL) break; // end reached
|
||||
if ((arena->numa_node>=0 && arena->numa_node!=numa_node) && // not numa local!
|
||||
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
|
||||
{
|
||||
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, memid, tld);
|
||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||
if (p != NULL) {
|
||||
return p;
|
||||
// try from another numa node instead..
|
||||
for (size_t i = 0; i < max_arena; i++) {
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
|
||||
if (arena == NULL) break; // end reached
|
||||
if ((arena->numa_node >= 0 && arena->numa_node != numa_node) && // not numa local!
|
||||
(*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
|
||||
{
|
||||
void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -192,7 +247,7 @@ static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size
|
||||
|
||||
|
||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero,
|
||||
size_t* memid, mi_os_tld_t* tld)
|
||||
mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
||||
{
|
||||
mi_assert_internal(commit != NULL && is_pinned != NULL && is_zero != NULL && memid != NULL && tld != NULL);
|
||||
mi_assert_internal(size > 0);
|
||||
@ -206,7 +261,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool*
|
||||
|
||||
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
|
||||
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN) {
|
||||
void* p = mi_arena_allocate(numa_node, size, alignment, commit, large, is_pinned, is_zero, memid, tld);
|
||||
void* p = mi_arena_allocate(numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
if (p != NULL) return p;
|
||||
}
|
||||
|
||||
@ -222,9 +277,9 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool*
|
||||
return p;
|
||||
}
|
||||
|
||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
|
||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld)
|
||||
{
|
||||
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, commit, large, is_pinned, is_zero, memid, tld);
|
||||
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
@ -244,7 +299,7 @@ void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_o
|
||||
// allocated in an arena
|
||||
size_t arena_idx;
|
||||
size_t bitmap_idx;
|
||||
mi_arena_id_indices(memid, &arena_idx, &bitmap_idx);
|
||||
mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
|
||||
mi_assert_internal(arena_idx < MI_MAX_ARENAS);
|
||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t,&mi_arenas[arena_idx]);
|
||||
mi_assert_internal(arena != NULL);
|
||||
@ -281,10 +336,11 @@ void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_o
|
||||
Add an arena.
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static bool mi_arena_add(mi_arena_t* arena) {
|
||||
static bool mi_arena_add(mi_arena_t* arena, mi_arena_id_t* arena_id) {
|
||||
mi_assert_internal(arena != NULL);
|
||||
mi_assert_internal((uintptr_t)mi_atomic_load_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0);
|
||||
mi_assert_internal(arena->block_count > 0);
|
||||
if (arena_id != NULL) *arena_id = -1;
|
||||
|
||||
size_t i = mi_atomic_increment_acq_rel(&mi_arena_count);
|
||||
if (i >= MI_MAX_ARENAS) {
|
||||
@ -292,11 +348,14 @@ static bool mi_arena_add(mi_arena_t* arena) {
|
||||
return false;
|
||||
}
|
||||
mi_atomic_store_ptr_release(mi_arena_t,&mi_arenas[i], arena);
|
||||
arena->id = mi_arena_id_create(i);
|
||||
if (arena_id != NULL) *arena_id = arena->id;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept
|
||||
bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept
|
||||
{
|
||||
if (arena_id != NULL) *arena_id = _mi_arena_id_none();
|
||||
if (size < MI_ARENA_BLOCK_SIZE) return false;
|
||||
|
||||
if (is_large) {
|
||||
@ -311,6 +370,8 @@ bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_la
|
||||
mi_arena_t* arena = (mi_arena_t*)_mi_os_alloc(asize, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
|
||||
if (arena == NULL) return false;
|
||||
|
||||
arena->id = _mi_arena_id_none();
|
||||
arena->exclusive = exclusive;
|
||||
arena->block_count = bcount;
|
||||
arena->field_count = fields;
|
||||
arena->start = (uint8_t*)start;
|
||||
@ -335,18 +396,19 @@ bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_la
|
||||
_mi_bitmap_claim(arena->blocks_inuse, fields, post, postidx, NULL);
|
||||
}
|
||||
|
||||
mi_arena_add(arena);
|
||||
return true;
|
||||
return mi_arena_add(arena, arena_id);
|
||||
|
||||
}
|
||||
|
||||
// Reserve a range of regular OS memory
|
||||
int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept
|
||||
int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept
|
||||
{
|
||||
if (arena_id != NULL) *arena_id = _mi_arena_id_none();
|
||||
size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block
|
||||
bool large = allow_large;
|
||||
void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, &large, &_mi_stats_main);
|
||||
if (start==NULL) return ENOMEM;
|
||||
if (!mi_manage_os_memory(start, size, (large || commit), large, true, -1)) {
|
||||
if (!mi_manage_os_memory_ex(start, size, (large || commit), large, true, -1, exclusive, arena_id)) {
|
||||
_mi_os_free_ex(start, size, commit, &_mi_stats_main);
|
||||
_mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size,1024));
|
||||
return ENOMEM;
|
||||
@ -355,6 +417,19 @@ int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noe
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept {
|
||||
return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false, NULL);
|
||||
}
|
||||
|
||||
int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept {
|
||||
return mi_reserve_os_memory_ex(size, commit, allow_large, false, NULL);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Debugging
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static size_t mi_debug_show_bitmap(const char* prefix, mi_bitmap_field_t* fields, size_t field_count ) {
|
||||
size_t inuse_count = 0;
|
||||
for (size_t i = 0; i < field_count; i++) {
|
||||
@ -383,11 +458,13 @@ void mi_debug_show_arenas(void) mi_attr_noexcept {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Reserve a huge page arena.
|
||||
----------------------------------------------------------- */
|
||||
// reserve at a specific numa node
|
||||
int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept {
|
||||
int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
|
||||
if (arena_id != NULL) *arena_id = -1;
|
||||
if (pages==0) return 0;
|
||||
if (numa_node < -1) numa_node = -1;
|
||||
if (numa_node >= 0) numa_node = numa_node % _mi_os_numa_node_count();
|
||||
@ -400,13 +477,16 @@ int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msec
|
||||
}
|
||||
_mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
|
||||
|
||||
if (!mi_manage_os_memory(p, hsize, true, true, true, numa_node)) {
|
||||
if (!mi_manage_os_memory_ex(p, hsize, true, true, true, numa_node, exclusive, arena_id)) {
|
||||
_mi_os_free_huge_pages(p, hsize, &_mi_stats_main);
|
||||
return ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept {
|
||||
return mi_reserve_huge_os_pages_at_ex(pages, numa_node, timeout_msecs, false, NULL);
|
||||
}
|
||||
|
||||
// reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected)
|
||||
int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept {
|
||||
|
@ -49,9 +49,10 @@ bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
|
||||
|
||||
// arena.c
|
||||
mi_arena_id_t _mi_arena_id_none(void);
|
||||
void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_stats_t* stats);
|
||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
|
||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
|
||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
|
||||
|
||||
|
||||
|
||||
@ -180,7 +181,7 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
|
||||
bool is_zero = false;
|
||||
bool is_pinned = false;
|
||||
size_t arena_memid = 0;
|
||||
void* const start = _mi_arena_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, ®ion_commit, ®ion_large, &is_pinned, &is_zero, &arena_memid, tld);
|
||||
void* const start = _mi_arena_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, ®ion_commit, ®ion_large, &is_pinned, &is_zero, _mi_arena_id_none(), & arena_memid, tld);
|
||||
if (start == NULL) return false;
|
||||
mi_assert_internal(!(region_large && !allow_large));
|
||||
mi_assert_internal(!region_large || region_commit);
|
||||
@ -370,7 +371,7 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* l
|
||||
}
|
||||
if (p == NULL) {
|
||||
// and otherwise fall back to the OS
|
||||
p = _mi_arena_alloc_aligned(size, alignment, commit, large, is_pinned, is_zero, &arena_memid, tld);
|
||||
p = _mi_arena_alloc_aligned(size, alignment, commit, large, is_pinned, is_zero, _mi_arena_id_none(), & arena_memid, tld);
|
||||
*memid = mi_memid_create_from_arena(arena_memid);
|
||||
}
|
||||
|
||||
|
@ -795,7 +795,7 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
|
||||
size_t memid = 0;
|
||||
segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, &commit_mask, &decommit_mask, &mem_large, &is_pinned, &is_zero, &memid, os_tld);
|
||||
if (segment==NULL) {
|
||||
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_pinned, &is_zero, &memid, os_tld);
|
||||
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_pinned, &is_zero, _mi_arena_id_none(), &memid, os_tld);
|
||||
if (segment == NULL) return NULL; // failed to allocate
|
||||
if (commit) {
|
||||
mi_commit_mask_create_full(&commit_mask);
|
||||
|
Loading…
Reference in New Issue
Block a user