ensure huge pages are part of the segment cache to avoid mmap calls
This commit is contained in:
parent
e5e2b789ba
commit
43568fa422
@ -25,7 +25,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
// Define MI_SECURE as 1 to encode free lists
|
||||
// #define MI_SECURE 1
|
||||
|
||||
#if !defined(MI_SECURE)
|
||||
#if !defined(MI_SECURE)
|
||||
#define MI_SECURE 0
|
||||
#endif
|
||||
|
||||
@ -104,11 +104,11 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
#error "define more bins"
|
||||
#endif
|
||||
|
||||
typedef uintptr_t mi_encoded_t;
|
||||
typedef uintptr_t mi_encoded_t;
|
||||
|
||||
// free lists contain blocks
|
||||
typedef struct mi_block_s {
|
||||
mi_encoded_t next;
|
||||
mi_encoded_t next;
|
||||
} mi_block_t;
|
||||
|
||||
|
||||
@ -175,7 +175,7 @@ typedef struct mi_page_s {
|
||||
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
|
||||
uintptr_t cookie; // random cookie to encode the free lists
|
||||
size_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`)
|
||||
|
||||
|
||||
mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
|
||||
volatile uintptr_t thread_freed; // at least this number of blocks are in `thread_free`
|
||||
volatile mi_thread_free_t thread_free; // list of deferred free blocks freed by other threads
|
||||
@ -373,7 +373,7 @@ typedef struct mi_segments_tld_s {
|
||||
size_t count; // current number of segments
|
||||
size_t peak; // peak number of segments
|
||||
size_t cache_count; // number of segments in the cache
|
||||
mi_segment_t* cache; // small cache of segments (to avoid repeated mmap calls)
|
||||
mi_segment_queue_t cache; // (small) cache of segments for small and large pages (to avoid repeated mmap calls)
|
||||
mi_stats_t* stats; // points to tld stats
|
||||
} mi_segments_tld_t;
|
||||
|
||||
@ -390,7 +390,7 @@ typedef struct mi_os_tld_s {
|
||||
struct mi_tld_s {
|
||||
unsigned long long heartbeat; // monotonic heartbeat count
|
||||
mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted)
|
||||
mi_segments_tld_t segments; // segment tld
|
||||
mi_segments_tld_t segments; // segment tld
|
||||
mi_os_tld_t os; // os tld
|
||||
mi_stats_t stats; // statistics
|
||||
};
|
||||
|
@ -90,7 +90,7 @@ mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty;
|
||||
static mi_tld_t tld_main = {
|
||||
0,
|
||||
&_mi_heap_main,
|
||||
{ { NULL, NULL }, 0, 0, 0, NULL, tld_main_stats }, // segments
|
||||
{ { NULL, NULL }, 0, 0, 0, {NULL,NULL}, tld_main_stats }, // segments
|
||||
{ 0, NULL, NULL, 0, tld_main_stats }, // os
|
||||
{ MI_STATS_NULL } // stats
|
||||
};
|
||||
|
111
src/segment.c
111
src/segment.c
@ -112,13 +112,25 @@ static void mi_segment_enqueue(mi_segment_queue_t* queue, mi_segment_t* segment)
|
||||
}
|
||||
}
|
||||
|
||||
static void mi_segment_queue_insert_before(mi_segment_queue_t* queue, mi_segment_t* elem, mi_segment_t* segment) {
|
||||
mi_assert_expensive(elem==NULL || mi_segment_queue_contains(queue, elem));
|
||||
mi_assert_expensive(segment != NULL && !mi_segment_queue_contains(queue, segment));
|
||||
|
||||
segment->prev = (elem == NULL ? queue->last : elem->prev);
|
||||
if (segment->prev != NULL) segment->prev->next = segment;
|
||||
else queue->first = segment;
|
||||
segment->next = elem;
|
||||
if (segment->next != NULL) segment->next->prev = segment;
|
||||
else queue->last = segment;
|
||||
}
|
||||
|
||||
|
||||
// Start of the page available memory
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
|
||||
{
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
|
||||
{
|
||||
size_t psize = (segment->page_kind == MI_PAGE_HUGE ? segment->segment_size : (size_t)1 << segment->page_shift);
|
||||
uint8_t* p = (uint8_t*)segment + page->segment_idx*psize;
|
||||
|
||||
|
||||
if (page->segment_idx == 0) {
|
||||
// the first page starts after the segment info (and possible guard page)
|
||||
p += segment->segment_info_size;
|
||||
@ -130,7 +142,7 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* pa
|
||||
// secure > 1: every page has an os guard page
|
||||
psize -= _mi_os_page_size();
|
||||
}
|
||||
|
||||
|
||||
if (page_size != NULL) *page_size = psize;
|
||||
mi_assert_internal(_mi_ptr_page(p) == page);
|
||||
mi_assert_internal(_mi_ptr_segment(p) == segment);
|
||||
@ -140,9 +152,9 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* pa
|
||||
static size_t mi_segment_size(size_t capacity, size_t required, size_t* pre_size, size_t* info_size) {
|
||||
/*
|
||||
if (mi_option_is_enabled(mi_option_secure)) {
|
||||
// always reserve maximally so the protection falls on
|
||||
// always reserve maximally so the protection falls on
|
||||
// the same address area, as we need to reuse them from the caches interchangably.
|
||||
capacity = MI_SMALL_PAGES_PER_SEGMENT;
|
||||
capacity = MI_SMALL_PAGES_PER_SEGMENT;
|
||||
}
|
||||
*/
|
||||
size_t minsize = sizeof(mi_segment_t) + ((capacity - 1) * sizeof(mi_page_t)) + 16 /* padding */;
|
||||
@ -194,19 +206,34 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
|
||||
_mi_os_free(segment, segment_size,tld->stats);
|
||||
}
|
||||
|
||||
// The segment cache is limited to be at most 1/2 of the peak
|
||||
// The segment cache is limited to be at most 1/6 of the peak
|
||||
// number of segments in use (and no more than 32)
|
||||
#define MI_SEGMENT_CACHE_MAX (16)
|
||||
#define MI_SEGMENT_CACHE_MAX (32)
|
||||
#define MI_SEGMENT_CACHE_FRACTION (6)
|
||||
|
||||
|
||||
static mi_segment_t* mi_segment_cache_pop(mi_segments_tld_t* tld) {
|
||||
mi_segment_t* segment = tld->cache;
|
||||
if (segment == NULL) return NULL;
|
||||
tld->cache_count--;
|
||||
tld->cache = segment->next;
|
||||
segment->next = NULL;
|
||||
return segment;
|
||||
// Get a segment of at least `required` size.
|
||||
static mi_segment_t* _mi_segment_cache_findx(mi_segments_tld_t* tld, size_t required, bool reverse) {
|
||||
mi_segment_t* segment = (reverse ? tld->cache.last : tld->cache.first);
|
||||
while (segment != NULL) {
|
||||
if (segment->segment_size >= required) {
|
||||
tld->cache_count--;
|
||||
mi_segment_queue_remove(&tld->cache, segment);
|
||||
// TODO: unmap excess memory if larger than N%
|
||||
return segment;
|
||||
}
|
||||
segment = (reverse ? segment->prev : segment->next);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static mi_segment_t* mi_segment_cache_find(mi_segments_tld_t* tld, size_t required) {
|
||||
return _mi_segment_cache_findx(tld,required,false);
|
||||
}
|
||||
|
||||
static mi_segment_t* mi_segment_cache_evict(mi_segments_tld_t* tld) {
|
||||
// TODO: random eviction instead?
|
||||
return _mi_segment_cache_findx(tld, 0, true /* from the end */);
|
||||
}
|
||||
|
||||
static bool mi_segment_cache_full(mi_segments_tld_t* tld) {
|
||||
@ -214,24 +241,27 @@ static bool mi_segment_cache_full(mi_segments_tld_t* tld) {
|
||||
tld->cache_count*MI_SEGMENT_CACHE_FRACTION < mi_segments_peak(tld)) return false;
|
||||
// take the opportunity to reduce the segment cache if it is too large (now)
|
||||
while (tld->cache_count*MI_SEGMENT_CACHE_FRACTION >= mi_segments_peak(tld) + 1) {
|
||||
mi_segment_t* segment = mi_segment_cache_pop(tld);
|
||||
mi_segment_t* segment = mi_segment_cache_evict(tld);
|
||||
mi_assert_internal(segment != NULL);
|
||||
if (segment != NULL) mi_segment_os_free(segment, MI_SEGMENT_SIZE, tld);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
static bool mi_segment_cache_insert(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(segment->next==NULL && segment->prev==NULL);
|
||||
mi_assert_internal(!mi_segment_is_in_free_queue(segment,tld));
|
||||
mi_assert_internal(segment->next==NULL);
|
||||
mi_assert_expensive(!mi_segment_queue_contains(&tld->cache, segment));
|
||||
if (mi_segment_cache_full(tld)) return false;
|
||||
|
||||
mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
|
||||
if (mi_option_is_enabled(mi_option_cache_reset) && !mi_option_is_enabled(mi_option_page_reset)) {
|
||||
_mi_os_reset((uint8_t*)segment + segment->segment_info_size, segment->segment_size - segment->segment_info_size);
|
||||
}
|
||||
segment->next = tld->cache;
|
||||
tld->cache = segment;
|
||||
// insert ordered
|
||||
mi_segment_t* seg = tld->cache.first;
|
||||
while (seg != NULL && seg->segment_size < segment->segment_size) {
|
||||
seg = seg->next;
|
||||
}
|
||||
mi_segment_queue_insert_before( &tld->cache, seg, segment );
|
||||
tld->cache_count++;
|
||||
return true;
|
||||
}
|
||||
@ -239,11 +269,11 @@ static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld)
|
||||
// called by ending threads to free cached segments
|
||||
void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
|
||||
mi_segment_t* segment;
|
||||
while ((segment = mi_segment_cache_pop(tld)) != NULL) {
|
||||
while ((segment = mi_segment_cache_find(tld,0)) != NULL) {
|
||||
mi_segment_os_free(segment, MI_SEGMENT_SIZE, tld);
|
||||
}
|
||||
mi_assert_internal(tld->cache_count == 0);
|
||||
mi_assert_internal(tld->cache == NULL);
|
||||
mi_assert_internal(mi_segment_queue_is_empty(&tld->cache));
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
@ -252,14 +282,14 @@ void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
|
||||
|
||||
|
||||
// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
|
||||
static mi_segment_t* mi_segment_alloc( size_t required, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||
{
|
||||
static mi_segment_t* mi_segment_alloc( size_t required, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||
{
|
||||
// calculate needed sizes first
|
||||
|
||||
size_t capacity;
|
||||
if (page_kind == MI_PAGE_HUGE) {
|
||||
mi_assert_internal(page_shift==MI_SEGMENT_SHIFT && required > 0);
|
||||
capacity = 1;
|
||||
capacity = 1;
|
||||
}
|
||||
else {
|
||||
mi_assert_internal(required==0);
|
||||
@ -278,11 +308,9 @@ static mi_segment_t* mi_segment_alloc( size_t required, mi_page_kind_t page_kind
|
||||
mi_segment_t* segment = NULL;
|
||||
|
||||
// try to get it from our caches
|
||||
if (segment_size == MI_SEGMENT_SIZE) {
|
||||
segment = mi_segment_cache_pop(tld);
|
||||
if (segment != NULL && mi_option_is_enabled(mi_option_secure) && segment->page_kind != page_kind) {
|
||||
_mi_os_unprotect(segment,segment->segment_size);
|
||||
}
|
||||
segment = mi_segment_cache_find(tld,segment_size);
|
||||
if (segment != NULL && mi_option_is_enabled(mi_option_secure) && (segment->page_kind != page_kind || segment->segment_size != segment_size)) {
|
||||
_mi_os_unprotect(segment,segment->segment_size);
|
||||
}
|
||||
|
||||
// and otherwise allocate it from the OS
|
||||
@ -293,8 +321,8 @@ static mi_segment_t* mi_segment_alloc( size_t required, mi_page_kind_t page_kind
|
||||
}
|
||||
|
||||
mi_assert_internal((uintptr_t)segment % MI_SEGMENT_SIZE == 0);
|
||||
|
||||
memset(segment, 0, info_size);
|
||||
|
||||
memset(segment, 0, info_size);
|
||||
if (mi_option_is_enabled(mi_option_secure)) {
|
||||
// in secure mode, we set up a protected page in between the segment info
|
||||
// and the page data
|
||||
@ -302,7 +330,7 @@ static mi_segment_t* mi_segment_alloc( size_t required, mi_page_kind_t page_kind
|
||||
_mi_os_protect( (uint8_t*)segment + info_size, (pre_size - info_size) );
|
||||
size_t os_page_size = _mi_os_page_size();
|
||||
if (mi_option_get(mi_option_secure) <= 1) {
|
||||
// and protect the last page too
|
||||
// and protect the last page too
|
||||
_mi_os_protect( (uint8_t*)segment + segment_size - os_page_size, os_page_size );
|
||||
}
|
||||
else {
|
||||
@ -312,7 +340,7 @@ static mi_segment_t* mi_segment_alloc( size_t required, mi_page_kind_t page_kind
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
segment->page_kind = page_kind;
|
||||
segment->capacity = capacity;
|
||||
segment->page_shift = page_shift;
|
||||
@ -366,15 +394,12 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
|
||||
}
|
||||
}
|
||||
|
||||
if (segment->page_kind == MI_PAGE_HUGE) {
|
||||
mi_segment_os_free(segment, segment->segment_size, tld);
|
||||
}
|
||||
else if (!force && mi_segment_cache_push(segment, tld)) {
|
||||
if (!force && mi_segment_cache_insert(segment, tld)) {
|
||||
// it is put in our cache
|
||||
}
|
||||
else {
|
||||
// otherwise return it to the OS
|
||||
mi_segment_os_free(segment, MI_SEGMENT_SIZE,tld);
|
||||
mi_segment_os_free(segment, segment->segment_size, tld);
|
||||
}
|
||||
}
|
||||
|
||||
@ -618,8 +643,8 @@ static mi_page_t* mi_segment_large_page_alloc(mi_segments_tld_t* tld, mi_os_tld_
|
||||
return page;
|
||||
}
|
||||
|
||||
static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||
{
|
||||
static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
|
||||
{
|
||||
mi_segment_t* segment = mi_segment_alloc(size, MI_PAGE_HUGE, MI_SEGMENT_SHIFT,tld,os_tld);
|
||||
if (segment == NULL) return NULL;
|
||||
mi_assert_internal(segment->segment_size - segment->segment_info_size >= size);
|
||||
|
Loading…
x
Reference in New Issue
Block a user