use rich memid's to simplify the internal API's and invariants
This commit is contained in:
parent
0174d19af3
commit
0fc4de1440
@ -116,9 +116,9 @@ void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
|
|||||||
|
|
||||||
// arena.c
|
// arena.c
|
||||||
mi_arena_id_t _mi_arena_id_none(void);
|
mi_arena_id_t _mi_arena_id_none(void);
|
||||||
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, mi_memid_t memid, size_t committed, mi_stats_t* stats);
|
void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid, mi_stats_t* stats);
|
||||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
|
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
|
||||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
|
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
|
||||||
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
|
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
|
||||||
bool _mi_arena_contains(const void* p);
|
bool _mi_arena_contains(const void* p);
|
||||||
void _mi_arena_collect(bool force_purge, mi_stats_t* stats);
|
void _mi_arena_collect(bool force_purge, mi_stats_t* stats);
|
||||||
|
@ -322,16 +322,33 @@ typedef enum mi_page_kind_e {
|
|||||||
|
|
||||||
// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
|
// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
|
||||||
typedef enum mi_memkind_e {
|
typedef enum mi_memkind_e {
|
||||||
MI_MEM_NONE,
|
MI_MEM_NONE, // not allocated
|
||||||
MI_MEM_OS,
|
MI_MEM_EXTERNAL, // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example)
|
||||||
MI_MEM_STATIC,
|
MI_MEM_STATIC, // allocated in a static area and should not be freed (for arena meta data for example)
|
||||||
MI_MEM_ARENA
|
MI_MEM_OS, // allocated from the OS
|
||||||
|
MI_MEM_ARENA // allocated from an arena (the usual case)
|
||||||
} mi_memkind_t;
|
} mi_memkind_t;
|
||||||
|
|
||||||
|
typedef struct mi_memid_os_info {
|
||||||
|
size_t alignment; // allocated with the given alignment
|
||||||
|
size_t align_offset; // the offset that was aligned (used only for huge aligned pages)
|
||||||
|
} mi_memid_os_info_t;
|
||||||
|
|
||||||
|
typedef struct mi_memid_arena_info {
|
||||||
|
size_t block_index; // index in the arena
|
||||||
|
mi_arena_id_t id; // arena id (>= 1)
|
||||||
|
bool is_exclusive; // the arena can only be used for specific arena allocations
|
||||||
|
} mi_memid_arena_info_t;
|
||||||
|
|
||||||
typedef struct mi_memid_s {
|
typedef struct mi_memid_s {
|
||||||
size_t arena_idx;
|
union {
|
||||||
mi_arena_id_t arena_id;
|
mi_memid_os_info_t os; // only used for MI_MEM_OS
|
||||||
bool arena_is_exclusive;
|
mi_memid_arena_info_t arena;// only used for MI_MEM_ARENA
|
||||||
|
} mem;
|
||||||
|
bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large OS pages)
|
||||||
|
bool is_large; // `true` if the memory is in OS large (2MiB) or huge (1GiB) pages. (`is_pinned` will be true)
|
||||||
|
bool was_committed; // `true` if the memory was originally allocated as committed
|
||||||
|
bool was_zero; // `true` if the memory was originally zero initialized
|
||||||
mi_memkind_t memkind;
|
mi_memkind_t memkind;
|
||||||
} mi_memid_t;
|
} mi_memid_t;
|
||||||
|
|
||||||
@ -340,17 +357,12 @@ typedef struct mi_memid_s {
|
|||||||
// the OS. Inside segments we allocated fixed size _pages_ that
|
// the OS. Inside segments we allocated fixed size _pages_ that
|
||||||
// contain blocks.
|
// contain blocks.
|
||||||
typedef struct mi_segment_s {
|
typedef struct mi_segment_s {
|
||||||
// memory fields
|
// constant fields
|
||||||
mi_memid_t memid; // id for the os-level memory manager
|
mi_memid_t memid; // id for the os-level memory manager
|
||||||
bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
|
|
||||||
bool mem_is_large; // `true` if the memory is in OS large or huge pages. (`is_pinned` will be true)
|
|
||||||
bool mem_is_committed; // `true` if the whole segment is eagerly committed
|
|
||||||
size_t mem_alignment; // page alignment for huge pages (only used for alignment > MI_ALIGNMENT_MAX)
|
|
||||||
size_t mem_align_offset; // offset for huge page alignment (only used for alignment > MI_ALIGNMENT_MAX)
|
|
||||||
|
|
||||||
bool allow_decommit;
|
bool allow_decommit;
|
||||||
bool allow_purge;
|
bool allow_purge;
|
||||||
|
size_t segment_size; // for huge pages this may be different from `MI_SEGMENT_SIZE`
|
||||||
|
|
||||||
// segment fields
|
// segment fields
|
||||||
_Atomic(struct mi_segment_s*) abandoned_next;
|
_Atomic(struct mi_segment_s*) abandoned_next;
|
||||||
struct mi_segment_s* next; // must be the first segment field after abandoned_next -- see `segment.c:segment_init`
|
struct mi_segment_s* next; // must be the first segment field after abandoned_next -- see `segment.c:segment_init`
|
||||||
@ -361,7 +373,6 @@ typedef struct mi_segment_s {
|
|||||||
|
|
||||||
size_t used; // count of pages in use (`used <= capacity`)
|
size_t used; // count of pages in use (`used <= capacity`)
|
||||||
size_t capacity; // count of available pages (`#free + used`)
|
size_t capacity; // count of available pages (`#free + used`)
|
||||||
size_t segment_size; // for huge pages this may be different from `MI_SEGMENT_SIZE`
|
|
||||||
size_t segment_info_size;// space we are using from the first page for segment meta-data and possible guard pages.
|
size_t segment_info_size;// space we are using from the first page for segment meta-data and possible guard pages.
|
||||||
uintptr_t cookie; // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie`
|
uintptr_t cookie; // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie`
|
||||||
|
|
||||||
|
198
src/arena.c
198
src/arena.c
@ -46,7 +46,7 @@ typedef struct mi_arena_s {
|
|||||||
_Atomic(uint8_t*) start; // the start of the memory area
|
_Atomic(uint8_t*) start; // the start of the memory area
|
||||||
size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
|
size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
|
||||||
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
|
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
|
||||||
size_t meta_size; // size of the arena structure itself including the bitmaps
|
size_t meta_size; // size of the arena structure itself (including its bitmaps)
|
||||||
mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation)
|
mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation)
|
||||||
int numa_node; // associated NUMA node
|
int numa_node; // associated NUMA node
|
||||||
bool is_zero_init; // is the arena zero initialized?
|
bool is_zero_init; // is the arena zero initialized?
|
||||||
@ -96,55 +96,42 @@ static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclus
|
|||||||
memory id's
|
memory id's
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
static mi_memid_t mi_arena_memid_none(void) {
|
static mi_memid_t mi_memid_none(void) {
|
||||||
mi_memid_t memid;
|
mi_memid_t memid;
|
||||||
|
_mi_memzero(&memid, sizeof(memid));
|
||||||
memid.memkind = MI_MEM_NONE;
|
memid.memkind = MI_MEM_NONE;
|
||||||
memid.arena_id = 0;
|
|
||||||
memid.arena_idx = 0;
|
|
||||||
memid.arena_is_exclusive = false;
|
|
||||||
return memid;
|
return memid;
|
||||||
}
|
}
|
||||||
|
|
||||||
static mi_memid_t mi_arena_memid_os(void) {
|
static mi_memid_t mi_memid_create(mi_memkind_t memkind) {
|
||||||
mi_memid_t memid = mi_arena_memid_none();
|
mi_memid_t memid = mi_memid_none();
|
||||||
|
memid.memkind = memkind;
|
||||||
|
return memid;
|
||||||
|
}
|
||||||
|
|
||||||
|
static mi_memid_t mi_memid_create_os(bool committed) {
|
||||||
|
mi_memid_t memid = mi_memid_none();
|
||||||
memid.memkind = MI_MEM_OS;
|
memid.memkind = MI_MEM_OS;
|
||||||
|
memid.was_committed = committed;
|
||||||
return memid;
|
return memid;
|
||||||
}
|
}
|
||||||
|
|
||||||
static mi_memid_t mi_arena_memid_static(void) {
|
|
||||||
mi_memid_t memid = mi_arena_memid_none();
|
|
||||||
memid.memkind = MI_MEM_STATIC;
|
|
||||||
return memid;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) {
|
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) {
|
||||||
// note: works also for OS and STATIC memory with a zero arena_id.
|
if (memid.memkind == MI_MEM_ARENA) {
|
||||||
return mi_arena_id_is_suitable(memid.arena_id, memid.arena_is_exclusive, request_arena_id);
|
return mi_arena_id_is_suitable(memid.mem.arena.id, memid.mem.arena.is_exclusive, request_arena_id);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return mi_arena_id_is_suitable(0, false, request_arena_id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
Arena allocations get a (currently) 16-bit memory id where the
|
Arena allocations get a (currently) 16-bit memory id where the
|
||||||
lower 8 bits are the arena id, and the upper bits the block index.
|
lower 8 bits are the arena id, and the upper bits the block index.
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
static mi_memid_t mi_arena_memid_create(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) {
|
|
||||||
mi_memid_t memid;
|
|
||||||
memid.memkind = MI_MEM_ARENA;
|
|
||||||
memid.arena_id = id;
|
|
||||||
memid.arena_idx = bitmap_index;
|
|
||||||
memid.arena_is_exclusive = is_exclusive;
|
|
||||||
return memid;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
|
|
||||||
mi_assert_internal(memid.memkind == MI_MEM_ARENA);
|
|
||||||
*arena_index = mi_arena_id_index(memid.arena_id);
|
|
||||||
*bitmap_index = memid.arena_idx;
|
|
||||||
return memid.arena_is_exclusive;
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t mi_block_count_of_size(size_t size) {
|
static size_t mi_block_count_of_size(size_t size) {
|
||||||
return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE);
|
return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE);
|
||||||
}
|
}
|
||||||
@ -157,6 +144,22 @@ static size_t mi_arena_size(mi_arena_t* arena) {
|
|||||||
return mi_arena_block_size(arena->block_count);
|
return mi_arena_block_size(arena->block_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static mi_memid_t mi_memid_create_arena(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) {
|
||||||
|
mi_memid_t memid = mi_memid_create(MI_MEM_ARENA);
|
||||||
|
memid.mem.arena.id = id;
|
||||||
|
memid.mem.arena.block_index = bitmap_index;
|
||||||
|
memid.mem.arena.is_exclusive = is_exclusive;
|
||||||
|
return memid;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
|
||||||
|
mi_assert_internal(memid.memkind == MI_MEM_ARENA);
|
||||||
|
*arena_index = mi_arena_id_index(memid.mem.arena.id);
|
||||||
|
*bitmap_index = memid.mem.arena.block_index;
|
||||||
|
return memid.mem.arena.is_exclusive;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* -----------------------------------------------------------
|
/* -----------------------------------------------------------
|
||||||
Special static area for mimalloc internal structures
|
Special static area for mimalloc internal structures
|
||||||
@ -170,7 +173,7 @@ static uint8_t mi_arena_static[MI_ARENA_STATIC_MAX];
|
|||||||
static _Atomic(size_t) mi_arena_static_top;
|
static _Atomic(size_t) mi_arena_static_top;
|
||||||
|
|
||||||
static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* memid) {
|
static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* memid) {
|
||||||
*memid = mi_arena_memid_static();
|
*memid = mi_memid_none();
|
||||||
if (size == 0 || size > MI_ARENA_STATIC_MAX) return NULL;
|
if (size == 0 || size > MI_ARENA_STATIC_MAX) return NULL;
|
||||||
if (mi_atomic_load_relaxed(&mi_arena_static_top) >= MI_ARENA_STATIC_MAX) return NULL;
|
if (mi_atomic_load_relaxed(&mi_arena_static_top) >= MI_ARENA_STATIC_MAX) return NULL;
|
||||||
|
|
||||||
@ -187,7 +190,7 @@ static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* m
|
|||||||
}
|
}
|
||||||
|
|
||||||
// success
|
// success
|
||||||
*memid = mi_arena_memid_static();
|
*memid = mi_memid_create(MI_MEM_STATIC);
|
||||||
const size_t start = _mi_align_up(oldtop, alignment);
|
const size_t start = _mi_align_up(oldtop, alignment);
|
||||||
uint8_t* const p = &mi_arena_static[start];
|
uint8_t* const p = &mi_arena_static[start];
|
||||||
_mi_memzero(p, size);
|
_mi_memzero(p, size);
|
||||||
@ -195,20 +198,17 @@ static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* m
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) {
|
static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) {
|
||||||
*memid = mi_arena_memid_none();
|
*memid = mi_memid_none();
|
||||||
|
|
||||||
// try static
|
// try static
|
||||||
void* p = mi_arena_static_zalloc(size, MI_ALIGNMENT_MAX, memid);
|
void* p = mi_arena_static_zalloc(size, MI_ALIGNMENT_MAX, memid);
|
||||||
if (p != NULL) {
|
if (p != NULL) return p;
|
||||||
*memid = mi_arena_memid_static();
|
|
||||||
return p;
|
|
||||||
}
|
|
||||||
|
|
||||||
// or fall back to the OS
|
// or fall back to the OS
|
||||||
bool is_zero = false;
|
bool is_zero = false;
|
||||||
p = _mi_os_alloc(size, &is_zero, stats);
|
p = _mi_os_alloc(size, &is_zero, stats);
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
*memid = mi_arena_memid_os();
|
*memid = mi_memid_create_os(true);
|
||||||
if (!is_zero) { _mi_memzero(p, size); }
|
if (!is_zero) { _mi_memzero(p, size); }
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
@ -216,7 +216,7 @@ static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* st
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mi_arena_meta_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats) {
|
static void mi_arena_meta_free(void* p, mi_memid_t memid, size_t size, mi_stats_t* stats) {
|
||||||
if (memid.memkind == MI_MEM_OS) {
|
if (memid.memkind == MI_MEM_OS) {
|
||||||
_mi_os_free(p, size, stats);
|
_mi_os_free(p, size, stats);
|
||||||
}
|
}
|
||||||
@ -244,8 +244,7 @@ static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index
|
|||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
|
static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
|
||||||
bool* commit, bool* large, bool* is_pinned, bool* is_zero,
|
bool commit, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
|
||||||
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
|
|
||||||
{
|
{
|
||||||
MI_UNUSED(arena_index);
|
MI_UNUSED(arena_index);
|
||||||
mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
|
mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
|
||||||
@ -255,10 +254,10 @@ static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_
|
|||||||
if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
|
if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
|
||||||
|
|
||||||
// claimed it!
|
// claimed it!
|
||||||
void* p = arena->start + mi_arena_block_size(mi_bitmap_index_bit(bitmap_index));
|
void* p = arena->start + mi_arena_block_size(mi_bitmap_index_bit(bitmap_index));
|
||||||
*memid = mi_arena_memid_create(arena->id, arena->exclusive, bitmap_index);
|
*memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index);
|
||||||
*large = arena->is_large;
|
memid->is_large = arena->is_large;
|
||||||
*is_pinned = (arena->is_large || !arena->allow_decommit);
|
memid->is_pinned = (arena->is_large || !arena->allow_decommit);
|
||||||
|
|
||||||
// none of the claimed blocks should be scheduled for a decommit
|
// none of the claimed blocks should be scheduled for a decommit
|
||||||
if (arena->blocks_purge != NULL) {
|
if (arena->blocks_purge != NULL) {
|
||||||
@ -267,26 +266,31 @@ static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_
|
|||||||
}
|
}
|
||||||
|
|
||||||
// set the dirty bits (todo: no need for an atomic op here?)
|
// set the dirty bits (todo: no need for an atomic op here?)
|
||||||
*is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
|
memid->was_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
|
||||||
|
|
||||||
// set commit state
|
// set commit state
|
||||||
if (arena->blocks_committed == NULL) {
|
if (arena->blocks_committed == NULL) {
|
||||||
// always committed
|
// always committed
|
||||||
*commit = true;
|
memid->was_committed = true;
|
||||||
}
|
}
|
||||||
else if (*commit) {
|
else if (commit) {
|
||||||
// commit requested, but the range may not be committed as a whole: ensure it is committed now
|
// commit requested, but the range may not be committed as a whole: ensure it is committed now
|
||||||
|
memid->was_committed = true;
|
||||||
bool any_uncommitted;
|
bool any_uncommitted;
|
||||||
_mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
|
_mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
|
||||||
if (any_uncommitted) {
|
if (any_uncommitted) {
|
||||||
bool commit_zero;
|
bool commit_zero = false;
|
||||||
_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats);
|
if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) {
|
||||||
if (commit_zero) { *is_zero = true; }
|
memid->was_committed = false;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
if (commit_zero) { memid->was_zero = true; }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// no need to commit, but check if already fully committed
|
// no need to commit, but check if already fully committed
|
||||||
*commit = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
|
memid->was_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
// mi_track_mem_undefined(p,mi_arena_block_size(needed_bcount));
|
// mi_track_mem_undefined(p,mi_arena_block_size(needed_bcount));
|
||||||
@ -295,7 +299,7 @@ static mi_decl_noinline void* mi_arena_alloc_at(mi_arena_t* arena, size_t arena_
|
|||||||
|
|
||||||
// allocate in a speficic arena
|
// allocate in a speficic arena
|
||||||
static void* mi_arena_alloc_at_id(mi_arena_id_t arena_id, int numa_node, size_t size, size_t alignment,
|
static void* mi_arena_alloc_at_id(mi_arena_id_t arena_id, int numa_node, size_t size, size_t alignment,
|
||||||
bool* commit, bool* large, bool* is_pinned, bool* is_zero,
|
bool commit, bool allow_large,
|
||||||
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
|
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
|
||||||
{
|
{
|
||||||
MI_UNUSED_RELEASE(alignment);
|
MI_UNUSED_RELEASE(alignment);
|
||||||
@ -310,14 +314,14 @@ static void* mi_arena_alloc_at_id(mi_arena_id_t arena_id, int numa_node, size_t
|
|||||||
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[arena_index]);
|
mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[arena_index]);
|
||||||
if (arena == NULL) return NULL;
|
if (arena == NULL) return NULL;
|
||||||
if (arena->numa_node >= 0 && arena->numa_node != numa_node) return NULL;
|
if (arena->numa_node >= 0 && arena->numa_node != numa_node) return NULL;
|
||||||
if (!(*large) && arena->is_large) return NULL;
|
if (!allow_large && arena->is_large) return NULL;
|
||||||
return mi_arena_alloc_at(arena, arena_index, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
return mi_arena_alloc_at(arena, arena_index, bcount, commit, req_arena_id, memid, tld);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// allocate from an arena with fallback to the OS
|
// allocate from an arena with fallback to the OS
|
||||||
static mi_decl_noinline void* mi_arenas_alloc(int numa_node, size_t size, size_t alignment, bool* commit, bool* large,
|
static mi_decl_noinline void* mi_arenas_alloc(int numa_node, size_t size, size_t alignment,
|
||||||
bool* is_pinned, bool* is_zero,
|
bool commit, bool allow_large,
|
||||||
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
|
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
|
||||||
{
|
{
|
||||||
MI_UNUSED(alignment);
|
MI_UNUSED(alignment);
|
||||||
@ -333,9 +337,9 @@ static mi_decl_noinline void* mi_arenas_alloc(int numa_node, size_t size, size_t
|
|||||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
|
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
|
||||||
if ((arena != NULL) &&
|
if ((arena != NULL) &&
|
||||||
// (arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local?
|
// (arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local?
|
||||||
(*large || !arena->is_large)) // large OS pages allowed, or the arena does not consist of large OS pages
|
(allow_large || !arena->is_large)) // large OS pages allowed, or the arena does not consist of large OS pages
|
||||||
{
|
{
|
||||||
void* p = mi_arena_alloc_at(arena, arena_index, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
void* p = mi_arena_alloc_at(arena, arena_index, bcount, commit, req_arena_id, memid, tld);
|
||||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||||
if (p != NULL) return p;
|
if (p != NULL) return p;
|
||||||
}
|
}
|
||||||
@ -346,9 +350,9 @@ static mi_decl_noinline void* mi_arenas_alloc(int numa_node, size_t size, size_t
|
|||||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
|
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
|
||||||
if (arena != NULL &&
|
if (arena != NULL &&
|
||||||
(arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local?
|
(arena->numa_node < 0 || arena->numa_node == numa_node) && // numa local?
|
||||||
(*large || !arena->is_large)) // large OS pages allowed, or the arena does not consist of large OS pages
|
(allow_large || !arena->is_large)) // large OS pages allowed, or the arena does not consist of large OS pages
|
||||||
{
|
{
|
||||||
void* p = mi_arena_alloc_at(arena, i, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
void* p = mi_arena_alloc_at(arena, i, bcount, commit, req_arena_id, memid, tld);
|
||||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||||
if (p != NULL) return p;
|
if (p != NULL) return p;
|
||||||
}
|
}
|
||||||
@ -359,9 +363,9 @@ static mi_decl_noinline void* mi_arenas_alloc(int numa_node, size_t size, size_t
|
|||||||
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
|
mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
|
||||||
if (arena != NULL &&
|
if (arena != NULL &&
|
||||||
(arena->numa_node >= 0 && arena->numa_node != numa_node) && // not numa local!
|
(arena->numa_node >= 0 && arena->numa_node != numa_node) && // not numa local!
|
||||||
(*large || !arena->is_large)) // large OS pages allowed, or the arena does not consist of large OS pages
|
(allow_large || !arena->is_large)) // large OS pages allowed, or the arena does not consist of large OS pages
|
||||||
{
|
{
|
||||||
void* p = mi_arena_alloc_at(arena, i, bcount, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
void* p = mi_arena_alloc_at(arena, i, bcount, commit, req_arena_id, memid, tld);
|
||||||
mi_assert_internal((uintptr_t)p % alignment == 0);
|
mi_assert_internal((uintptr_t)p % alignment == 0);
|
||||||
if (p != NULL) return p;
|
if (p != NULL) return p;
|
||||||
}
|
}
|
||||||
@ -400,48 +404,53 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t re
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero,
|
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
|
||||||
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
|
mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
|
||||||
{
|
{
|
||||||
mi_assert_internal(commit != NULL && is_pinned != NULL && is_zero != NULL && memid != NULL && tld != NULL);
|
mi_assert_internal(memid != NULL && tld != NULL);
|
||||||
mi_assert_internal(size > 0);
|
mi_assert_internal(size > 0);
|
||||||
*memid = mi_arena_memid_none();
|
*memid = mi_memid_none();
|
||||||
*is_zero = false;
|
|
||||||
*is_pinned = false;
|
|
||||||
|
|
||||||
bool default_large = false;
|
|
||||||
if (large == NULL) large = &default_large; // ensure `large != NULL`
|
|
||||||
const int numa_node = _mi_os_numa_node(tld); // current numa node
|
const int numa_node = _mi_os_numa_node(tld); // current numa node
|
||||||
|
|
||||||
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
|
// try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
|
||||||
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
|
if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
|
||||||
void* p = mi_arenas_alloc(numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
void* p = mi_arenas_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
|
||||||
if (p != NULL) return p;
|
if (p != NULL) return p;
|
||||||
|
|
||||||
// otherwise, try to first eagerly reserve a new arena
|
// otherwise, try to first eagerly reserve a new arena
|
||||||
mi_arena_id_t arena_id = 0;
|
mi_arena_id_t arena_id = 0;
|
||||||
if (mi_arena_reserve(size,*large,req_arena_id,&arena_id)) {
|
if (mi_arena_reserve(size,allow_large,req_arena_id,&arena_id)) {
|
||||||
// and try allocate in there
|
// and try allocate in there
|
||||||
p = mi_arena_alloc_at_id(arena_id, numa_node, size, alignment, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
p = mi_arena_alloc_at_id(arena_id, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
|
||||||
if (p != NULL) return p;
|
if (p != NULL) return p;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// finally, fall back to the OS
|
// if we cannot use OS allocation, return NULL
|
||||||
if (mi_option_is_enabled(mi_option_limit_os_alloc) || req_arena_id != _mi_arena_id_none()) {
|
if (mi_option_is_enabled(mi_option_limit_os_alloc) || req_arena_id != _mi_arena_id_none()) {
|
||||||
errno = ENOMEM;
|
errno = ENOMEM;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
*memid = mi_arena_memid_os();
|
// finally, fall back to the OS
|
||||||
void* p = _mi_os_alloc_aligned_offset(size, alignment, align_offset, *commit, large, is_zero, tld->stats);
|
bool os_large = allow_large;
|
||||||
if (p != NULL) { *is_pinned = *large; }
|
bool os_is_zero = false;
|
||||||
|
void* p = _mi_os_alloc_aligned_offset(size, alignment, align_offset, commit, &os_large, &os_is_zero, tld->stats);
|
||||||
|
if (p != NULL) {
|
||||||
|
*memid = mi_memid_create_os(commit);
|
||||||
|
memid->is_large = os_large;
|
||||||
|
memid->is_pinned = os_large;
|
||||||
|
memid->was_zero = os_is_zero;
|
||||||
|
memid->mem.os.alignment = alignment;
|
||||||
|
memid->mem.os.align_offset = align_offset;
|
||||||
|
}
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
|
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
|
||||||
{
|
{
|
||||||
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, large, is_pinned, is_zero, req_arena_id, memid, tld);
|
return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid, tld);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -621,27 +630,28 @@ static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats )
|
|||||||
Arena free
|
Arena free
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, mi_memid_t memid, size_t committed_size, mi_stats_t* stats) {
|
void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid, mi_stats_t* stats) {
|
||||||
mi_assert_internal(size > 0 && stats != NULL);
|
mi_assert_internal(size > 0 && stats != NULL);
|
||||||
mi_assert_internal(committed_size <= size);
|
mi_assert_internal(committed_size <= size);
|
||||||
if (p==NULL) return;
|
if (p==NULL) return;
|
||||||
if (size==0) return;
|
if (size==0) return;
|
||||||
const bool all_committed = (committed_size == size);
|
const bool all_committed = (committed_size == size);
|
||||||
|
|
||||||
if (memid.memkind == MI_MEM_STATIC) {
|
if (memid.memkind == MI_MEM_OS) {
|
||||||
// nothing to do
|
|
||||||
}
|
|
||||||
else if (memid.memkind == MI_MEM_OS) {
|
|
||||||
// was a direct OS allocation, pass through
|
// was a direct OS allocation, pass through
|
||||||
if (!all_committed && committed_size > 0) {
|
if (!all_committed && committed_size > 0) {
|
||||||
// if partially committed, adjust the committed stats
|
// if partially committed, adjust the committed stats
|
||||||
_mi_stat_decrease(&stats->committed, committed_size);
|
_mi_stat_decrease(&stats->committed, committed_size);
|
||||||
}
|
}
|
||||||
_mi_os_free_aligned(p, size, alignment, align_offset, all_committed, stats);
|
if (memid.mem.os.align_offset != 0) {
|
||||||
|
_mi_os_free_aligned(p, size, memid.mem.os.alignment, memid.mem.os.align_offset, all_committed, stats);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
_mi_os_free(p, size, stats);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else {
|
else if (memid.memkind == MI_MEM_ARENA) {
|
||||||
// allocated in an arena
|
// allocated in an arena
|
||||||
mi_assert_internal(align_offset == 0);
|
|
||||||
size_t arena_idx;
|
size_t arena_idx;
|
||||||
size_t bitmap_idx;
|
size_t bitmap_idx;
|
||||||
mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
|
mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
|
||||||
@ -696,6 +706,10 @@ void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset,
|
|||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
|
// arena was none, external, or static; nothing to do
|
||||||
|
mi_assert_internal(memid.memkind <= MI_MEM_STATIC);
|
||||||
|
}
|
||||||
|
|
||||||
// purge expired decommits
|
// purge expired decommits
|
||||||
mi_arenas_try_purge(false, false, stats);
|
mi_arenas_try_purge(false, false, stats);
|
||||||
@ -716,12 +730,12 @@ static void mi_arenas_unsafe_destroy(void) {
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
_mi_os_free(arena->start, mi_arena_size(arena), &_mi_stats_main);
|
_mi_os_free(arena->start, mi_arena_size(arena), &_mi_stats_main);
|
||||||
}
|
}
|
||||||
mi_arena_meta_free(arena, arena->meta_size, arena->meta_memid, &_mi_stats_main);
|
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
new_max_arena = i;
|
new_max_arena = i;
|
||||||
}
|
}
|
||||||
|
mi_arena_meta_free(arena, arena->meta_memid, arena->meta_size, &_mi_stats_main);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -201,7 +201,7 @@ static void mi_segment_protect(mi_segment_t* segment, bool protect, mi_os_tld_t*
|
|||||||
// and protect the last (or only) page too
|
// and protect the last (or only) page too
|
||||||
mi_assert_internal(MI_SECURE <= 1 || segment->page_kind >= MI_PAGE_LARGE);
|
mi_assert_internal(MI_SECURE <= 1 || segment->page_kind >= MI_PAGE_LARGE);
|
||||||
uint8_t* start = (uint8_t*)segment + segment->segment_size - os_psize;
|
uint8_t* start = (uint8_t*)segment + segment->segment_size - os_psize;
|
||||||
if (protect && !segment->mem_is_committed) {
|
if (protect && !segment->memid.was_committed) {
|
||||||
if (protect) {
|
if (protect) {
|
||||||
// ensure secure page is committed
|
// ensure secure page is committed
|
||||||
if (_mi_os_commit(start, os_psize, NULL, tld->stats)) { // if this fails that is ok (as it is an unaccessible page)
|
if (_mi_os_commit(start, os_psize, NULL, tld->stats)) { // if this fails that is ok (as it is an unaccessible page)
|
||||||
@ -331,7 +331,7 @@ static void mi_page_purge_remove(mi_page_t* page, mi_segments_tld_t* tld) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void mi_segment_remove_all_purges(mi_segment_t* segment, bool force_purge, mi_segments_tld_t* tld) {
|
static void mi_segment_remove_all_purges(mi_segment_t* segment, bool force_purge, mi_segments_tld_t* tld) {
|
||||||
if (segment->mem_is_pinned) return; // never reset in huge OS pages
|
if (segment->memid.is_pinned) return; // never reset in huge OS pages
|
||||||
for (size_t i = 0; i < segment->capacity; i++) {
|
for (size_t i = 0; i < segment->capacity; i++) {
|
||||||
mi_page_t* page = &segment->pages[i];
|
mi_page_t* page = &segment->pages[i];
|
||||||
if (!page->segment_in_use) {
|
if (!page->segment_in_use) {
|
||||||
@ -474,23 +474,23 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
|
|||||||
_mi_segment_map_freed_at(segment);
|
_mi_segment_map_freed_at(segment);
|
||||||
mi_segments_track_size(-((long)segment_size),tld);
|
mi_segments_track_size(-((long)segment_size),tld);
|
||||||
if (MI_SECURE != 0) {
|
if (MI_SECURE != 0) {
|
||||||
mi_assert_internal(!segment->mem_is_pinned);
|
mi_assert_internal(!segment->memid.is_pinned);
|
||||||
mi_segment_protect(segment, false, tld->os); // ensure no more guard pages are set
|
mi_segment_protect(segment, false, tld->os); // ensure no more guard pages are set
|
||||||
}
|
}
|
||||||
|
|
||||||
bool fully_committed = true;
|
bool fully_committed = true;
|
||||||
size_t committed = 0;
|
size_t committed_size = 0;
|
||||||
const size_t page_size = mi_segment_raw_page_size(segment);
|
const size_t page_size = mi_segment_raw_page_size(segment);
|
||||||
for (size_t i = 0; i < segment->capacity; i++) {
|
for (size_t i = 0; i < segment->capacity; i++) {
|
||||||
mi_page_t* page = &segment->pages[i];
|
mi_page_t* page = &segment->pages[i];
|
||||||
if (page->is_committed) { committed += page_size; }
|
if (page->is_committed) { committed_size += page_size; }
|
||||||
if (!page->is_committed) { fully_committed = false; }
|
if (!page->is_committed) { fully_committed = false; }
|
||||||
}
|
}
|
||||||
MI_UNUSED(fully_committed);
|
MI_UNUSED(fully_committed);
|
||||||
mi_assert_internal((fully_committed && committed == segment_size) || (!fully_committed && committed < segment_size));
|
mi_assert_internal((fully_committed && committed_size == segment_size) || (!fully_committed && committed_size < segment_size));
|
||||||
|
|
||||||
_mi_abandoned_await_readers(); // prevent ABA issue if concurrent readers try to access our memory (that might be purged)
|
_mi_abandoned_await_readers(); // prevent ABA issue if concurrent readers try to access our memory (that might be purged)
|
||||||
_mi_arena_free(segment, segment_size, segment->mem_alignment, segment->mem_align_offset, segment->memid, committed, tld->stats);
|
_mi_arena_free(segment, segment_size, committed_size, segment->memid, tld->stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
// called by threads that are terminating to free cached segments
|
// called by threads that are terminating to free cached segments
|
||||||
@ -509,47 +509,42 @@ void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
|
|||||||
Segment allocation
|
Segment allocation
|
||||||
----------------------------------------------------------- */
|
----------------------------------------------------------- */
|
||||||
|
|
||||||
static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignment, mi_arena_id_t req_arena_id,
|
static mi_segment_t* mi_segment_os_alloc(bool eager_delayed, size_t page_alignment, mi_arena_id_t req_arena_id,
|
||||||
size_t pre_size, size_t info_size,
|
size_t pre_size, size_t info_size, bool commit, size_t segment_size,
|
||||||
size_t* segment_size, bool* is_zero, bool* commit, mi_segments_tld_t* tld, mi_os_tld_t* tld_os)
|
mi_segments_tld_t* tld, mi_os_tld_t* tld_os)
|
||||||
{
|
{
|
||||||
mi_memid_t memid;
|
mi_memid_t memid;
|
||||||
bool mem_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy
|
bool allow_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy
|
||||||
bool is_pinned = false;
|
|
||||||
size_t align_offset = 0;
|
size_t align_offset = 0;
|
||||||
size_t alignment = MI_SEGMENT_SIZE;
|
size_t alignment = MI_SEGMENT_SIZE;
|
||||||
if (page_alignment > 0) {
|
if (page_alignment > 0) {
|
||||||
alignment = page_alignment;
|
alignment = page_alignment;
|
||||||
align_offset = _mi_align_up(pre_size, MI_SEGMENT_SIZE);
|
align_offset = _mi_align_up(pre_size, MI_SEGMENT_SIZE);
|
||||||
*segment_size = *segment_size + (align_offset - pre_size);
|
segment_size = segment_size + (align_offset - pre_size); // adjust the segment size
|
||||||
}
|
}
|
||||||
|
|
||||||
// mi_segment_t* segment = (mi_segment_t*)_mi_mem_alloc_aligned(*segment_size, alignment, align_offset, commit, &mem_large, &is_pinned, is_zero, &memid, tld_os);
|
mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid, tld_os);
|
||||||
mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(*segment_size, alignment, align_offset, commit, &mem_large, &is_pinned, is_zero, req_arena_id, &memid, tld_os);
|
if (segment == NULL) {
|
||||||
if (segment == NULL) return NULL; // failed to allocate
|
return NULL; // failed to allocate
|
||||||
if (!(*commit)) {
|
}
|
||||||
|
|
||||||
|
if (!memid.was_committed) {
|
||||||
// ensure the initial info is committed
|
// ensure the initial info is committed
|
||||||
mi_assert_internal(!mem_large && !is_pinned);
|
mi_assert_internal(!memid.is_large && !memid.is_pinned);
|
||||||
bool commit_zero = false;
|
bool ok = _mi_os_commit(segment, pre_size, NULL, tld_os->stats);
|
||||||
bool ok = _mi_os_commit(segment, pre_size, &commit_zero, tld_os->stats);
|
|
||||||
if (commit_zero) { *is_zero = true; }
|
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
// commit failed; we cannot touch the memory: free the segment directly and return `NULL`
|
// commit failed; we cannot touch the memory: free the segment directly and return `NULL`
|
||||||
_mi_arena_free(segment, *segment_size, alignment, align_offset, memid, false, tld_os->stats);
|
_mi_arena_free(segment, segment_size, 0, memid, tld_os->stats);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mi_track_mem_undefined(segment, info_size); MI_UNUSED(info_size);
|
mi_track_mem_undefined(segment, info_size); MI_UNUSED(info_size);
|
||||||
segment->memid = memid;
|
segment->memid = memid;
|
||||||
segment->mem_is_pinned = is_pinned;
|
segment->allow_decommit = !memid.is_pinned && !memid.is_large;
|
||||||
segment->mem_is_large = mem_large;
|
|
||||||
segment->mem_is_committed = commit;
|
|
||||||
segment->mem_alignment = alignment;
|
|
||||||
segment->mem_align_offset = align_offset;
|
|
||||||
segment->allow_decommit = !segment->mem_is_pinned && !segment->mem_is_large;
|
|
||||||
segment->allow_purge = segment->allow_decommit && mi_option_is_enabled(mi_option_allow_purge);
|
segment->allow_purge = segment->allow_decommit && mi_option_is_enabled(mi_option_allow_purge);
|
||||||
mi_segments_track_size((long)(*segment_size), tld);
|
segment->segment_size = segment_size;
|
||||||
|
mi_segments_track_size((long)(segment_size), tld);
|
||||||
_mi_segment_map_allocated_at(segment);
|
_mi_segment_map_allocated_at(segment);
|
||||||
return segment;
|
return segment;
|
||||||
}
|
}
|
||||||
@ -576,8 +571,8 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
|
|||||||
}
|
}
|
||||||
size_t info_size;
|
size_t info_size;
|
||||||
size_t pre_size;
|
size_t pre_size;
|
||||||
size_t segment_size = mi_segment_calculate_sizes(capacity, required, &pre_size, &info_size);
|
const size_t init_segment_size = mi_segment_calculate_sizes(capacity, required, &pre_size, &info_size);
|
||||||
mi_assert_internal(segment_size >= required);
|
mi_assert_internal(init_segment_size >= required);
|
||||||
|
|
||||||
// Initialize parameters
|
// Initialize parameters
|
||||||
const bool eager_delayed = (page_kind <= MI_PAGE_MEDIUM && // don't delay for large objects
|
const bool eager_delayed = (page_kind <= MI_PAGE_MEDIUM && // don't delay for large objects
|
||||||
@ -585,39 +580,36 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
|
|||||||
_mi_current_thread_count() > 1 && // do not delay for the first N threads
|
_mi_current_thread_count() > 1 && // do not delay for the first N threads
|
||||||
tld->peak_count < (size_t)mi_option_get(mi_option_eager_commit_delay));
|
tld->peak_count < (size_t)mi_option_get(mi_option_eager_commit_delay));
|
||||||
const bool eager = !eager_delayed && mi_option_is_enabled(mi_option_eager_commit);
|
const bool eager = !eager_delayed && mi_option_is_enabled(mi_option_eager_commit);
|
||||||
bool commit = eager; // || (page_kind >= MI_PAGE_LARGE);
|
const bool init_commit = eager; // || (page_kind >= MI_PAGE_LARGE);
|
||||||
bool is_zero = false;
|
|
||||||
|
|
||||||
// Allocate the segment from the OS (segment_size can change due to alignment)
|
// Allocate the segment from the OS (segment_size can change due to alignment)
|
||||||
mi_segment_t* segment = mi_segment_os_alloc(eager_delayed, page_alignment, req_arena_id, pre_size, info_size, &segment_size, &is_zero, &commit, tld, os_tld);
|
mi_segment_t* segment = mi_segment_os_alloc(eager_delayed, page_alignment, req_arena_id, pre_size, info_size, init_commit, init_segment_size, tld, os_tld);
|
||||||
if (segment == NULL) return NULL;
|
if (segment == NULL) return NULL;
|
||||||
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
|
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
|
||||||
mi_assert_internal(segment->mem_is_pinned ? segment->mem_is_committed : true);
|
mi_assert_internal(segment->memid.is_pinned ? segment->memid.was_committed : true);
|
||||||
|
|
||||||
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan
|
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan
|
||||||
|
|
||||||
// zero the segment info (but not the `mem` fields)
|
// zero the segment info (but not the `mem` fields)
|
||||||
ptrdiff_t ofs = offsetof(mi_segment_t, next);
|
ptrdiff_t ofs = offsetof(mi_segment_t, next);
|
||||||
memset((uint8_t*)segment + ofs, 0, info_size - ofs);
|
_mi_memzero((uint8_t*)segment + ofs, info_size - ofs);
|
||||||
|
|
||||||
// initialize pages info
|
// initialize pages info
|
||||||
for (size_t i = 0; i < capacity; i++) {
|
for (size_t i = 0; i < capacity; i++) {
|
||||||
mi_assert_internal(i <= 255);
|
mi_assert_internal(i <= 255);
|
||||||
segment->pages[i].segment_idx = (uint8_t)i;
|
segment->pages[i].segment_idx = (uint8_t)i;
|
||||||
segment->pages[i].is_committed = commit;
|
segment->pages[i].is_committed = segment->memid.was_committed;
|
||||||
segment->pages[i].is_zero_init = is_zero;
|
segment->pages[i].is_zero_init = segment->memid.was_zero;
|
||||||
}
|
}
|
||||||
|
|
||||||
// initialize
|
// initialize
|
||||||
segment->page_kind = page_kind;
|
segment->page_kind = page_kind;
|
||||||
segment->capacity = capacity;
|
segment->capacity = capacity;
|
||||||
segment->page_shift = page_shift;
|
segment->page_shift = page_shift;
|
||||||
segment->segment_size = segment_size;
|
|
||||||
segment->segment_info_size = pre_size;
|
segment->segment_info_size = pre_size;
|
||||||
segment->thread_id = _mi_thread_id();
|
segment->thread_id = _mi_thread_id();
|
||||||
segment->cookie = _mi_ptr_cookie(segment);
|
segment->cookie = _mi_ptr_cookie(segment);
|
||||||
// _mi_stat_increase(&tld->stats->page_committed, segment->segment_info_size);
|
|
||||||
|
|
||||||
// set protection
|
// set protection
|
||||||
mi_segment_protect(segment, true, tld->os);
|
mi_segment_protect(segment, true, tld->os);
|
||||||
|
|
||||||
@ -626,7 +618,6 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
|
|||||||
mi_segment_insert_in_free_queue(segment, tld);
|
mi_segment_insert_in_free_queue(segment, tld);
|
||||||
}
|
}
|
||||||
|
|
||||||
//fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment);
|
|
||||||
return segment;
|
return segment;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user