remove thread local segment cache
This commit is contained in:
parent
016b2ad535
commit
6c91c75b14
@ -521,9 +521,6 @@ typedef struct mi_segments_tld_s {
|
||||
size_t peak_count; // peak number of segments
|
||||
size_t current_size; // current size of all segments
|
||||
size_t peak_size; // peak size of all segments
|
||||
size_t cache_count; // number of segments in the cache
|
||||
size_t cache_size; // total size of all segments in the cache
|
||||
mi_segment_t* cache; // (small) cache of segments
|
||||
mi_stats_t* stats; // points to tld stats
|
||||
mi_os_tld_t* os; // points to os stats
|
||||
} mi_segments_tld_t;
|
||||
|
@ -313,7 +313,7 @@ typedef enum mi_option_e {
|
||||
mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB) at startup
|
||||
mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
|
||||
mi_option_reserve_os_memory, // reserve specified amount of OS memory at startup
|
||||
mi_option_segment_cache,
|
||||
mi_option_deprecated_segment_cache,
|
||||
mi_option_page_reset,
|
||||
mi_option_abandoned_page_reset,
|
||||
mi_option_segment_reset,
|
||||
|
@ -112,7 +112,7 @@ static mi_tld_t tld_main = {
|
||||
0, false,
|
||||
&_mi_heap_main, &_mi_heap_main,
|
||||
{ { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0},
|
||||
0, 0, 0, 0, 0, 0, NULL,
|
||||
0, 0, 0, 0,
|
||||
&tld_main.stats, &tld_main.os
|
||||
}, // segments
|
||||
{ 0, &tld_main.stats }, // os
|
||||
|
@ -78,7 +78,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
||||
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
|
||||
{ -1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N
|
||||
{ 0, UNINIT, MI_OPTION(reserve_os_memory) },
|
||||
{ 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread
|
||||
{ 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread
|
||||
{ 1, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free
|
||||
{ 0, UNINIT, MI_OPTION(abandoned_page_reset) },// reset free page memory when a thread terminates
|
||||
{ 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit)
|
||||
|
@ -110,17 +110,6 @@ static void mi_segment_insert_in_free_queue(mi_segment_t* segment, mi_segments_t
|
||||
Invariant checking
|
||||
----------------------------------------------------------- */
|
||||
|
||||
#if (MI_DEBUG>=2)
|
||||
static bool mi_segment_is_in_free_queue(const mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
mi_segment_queue_t* queue = mi_segment_free_queue(segment, tld);
|
||||
bool in_queue = (queue!=NULL && (segment->next != NULL || segment->prev != NULL || queue->first == segment));
|
||||
if (in_queue) {
|
||||
mi_assert_expensive(mi_segment_queue_contains(queue, segment));
|
||||
}
|
||||
return in_queue;
|
||||
}
|
||||
#endif
|
||||
|
||||
static size_t mi_segment_page_size(const mi_segment_t* segment) {
|
||||
if (segment->capacity > 1) {
|
||||
mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM);
|
||||
@ -483,64 +472,8 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
|
||||
_mi_mem_free(segment, segment_size, segment->memid, fully_committed, any_reset, tld->os);
|
||||
}
|
||||
|
||||
|
||||
// The thread local segment cache is limited to be at most 1/8 of the peak size of segments in use,
|
||||
#define MI_SEGMENT_CACHE_FRACTION (8)
|
||||
|
||||
// note: returned segment may be partially reset
|
||||
static mi_segment_t* mi_segment_cache_pop(size_t segment_size, mi_segments_tld_t* tld) {
|
||||
if (segment_size != 0 && segment_size != MI_SEGMENT_SIZE) return NULL;
|
||||
mi_segment_t* segment = tld->cache;
|
||||
if (segment == NULL) return NULL;
|
||||
tld->cache_count--;
|
||||
tld->cache = segment->next;
|
||||
segment->next = NULL;
|
||||
mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
|
||||
_mi_stat_decrease(&tld->stats->segments_cache, 1);
|
||||
return segment;
|
||||
}
|
||||
|
||||
static bool mi_segment_cache_full(mi_segments_tld_t* tld)
|
||||
{
|
||||
// if (tld->count == 1 && tld->cache_count==0) return false; // always cache at least the final segment of a thread
|
||||
size_t max_cache = mi_option_get(mi_option_segment_cache);
|
||||
if (tld->cache_count < max_cache
|
||||
&& tld->cache_count < (1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION)) // at least allow a 1 element cache
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
// take the opportunity to reduce the segment cache if it is too large (now)
|
||||
// TODO: this never happens as we check against peak usage, should we use current usage instead?
|
||||
while (tld->cache_count > max_cache) { //(1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION))) {
|
||||
mi_segment_t* segment = mi_segment_cache_pop(0,tld);
|
||||
mi_assert_internal(segment != NULL);
|
||||
if (segment != NULL) mi_segment_os_free(segment, segment->segment_size, tld);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(!mi_segment_is_in_free_queue(segment, tld));
|
||||
mi_assert_internal(segment->next == NULL);
|
||||
if (segment->segment_size != MI_SEGMENT_SIZE || mi_segment_cache_full(tld)) {
|
||||
return false;
|
||||
}
|
||||
mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
|
||||
segment->next = tld->cache;
|
||||
tld->cache = segment;
|
||||
tld->cache_count++;
|
||||
_mi_stat_increase(&tld->stats->segments_cache,1);
|
||||
return true;
|
||||
}
|
||||
|
||||
// called by threads that are terminating to free cached segments
|
||||
void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
|
||||
mi_segment_t* segment;
|
||||
while ((segment = mi_segment_cache_pop(0,tld)) != NULL) {
|
||||
mi_segment_os_free(segment, segment->segment_size, tld);
|
||||
}
|
||||
mi_assert_internal(tld->cache_count == 0);
|
||||
mi_assert_internal(tld->cache == NULL);
|
||||
void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
|
||||
#if MI_DEBUG>=2
|
||||
if (!_mi_is_main_thread()) {
|
||||
mi_assert_internal(tld->pages_reset.first == NULL);
|
||||
@ -712,13 +645,8 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
|
||||
mi_assert(segment->prev == NULL);
|
||||
_mi_stat_decrease(&tld->stats->page_committed, segment->segment_info_size);
|
||||
|
||||
if (!force && mi_segment_cache_push(segment, tld)) {
|
||||
// it is put in our cache
|
||||
}
|
||||
else {
|
||||
// otherwise return it to the OS
|
||||
mi_segment_os_free(segment, segment->segment_size, tld);
|
||||
}
|
||||
// return it to the OS
|
||||
mi_segment_os_free(segment, segment->segment_size, tld);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
@ -1217,15 +1145,10 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_s
|
||||
{
|
||||
mi_assert_internal(page_kind <= MI_PAGE_LARGE);
|
||||
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
|
||||
// 1. try to get a segment from our cache
|
||||
mi_segment_t* segment = mi_segment_cache_pop(MI_SEGMENT_SIZE, tld);
|
||||
if (segment != NULL) {
|
||||
mi_segment_init(segment, 0, page_kind, page_shift, tld, os_tld);
|
||||
return segment;
|
||||
}
|
||||
// 2. try to reclaim an abandoned segment
|
||||
|
||||
// 1. try to reclaim an abandoned segment
|
||||
bool reclaimed;
|
||||
segment = mi_segment_try_reclaim(heap, block_size, page_kind, &reclaimed, tld);
|
||||
mi_segment_t* segment = mi_segment_try_reclaim(heap, block_size, page_kind, &reclaimed, tld);
|
||||
if (reclaimed) {
|
||||
// reclaimed the right page right into the heap
|
||||
mi_assert_internal(segment != NULL && segment->page_kind == page_kind && page_kind <= MI_PAGE_LARGE);
|
||||
@ -1235,7 +1158,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_s
|
||||
// reclaimed a segment with empty pages (of `page_kind`) in it
|
||||
return segment;
|
||||
}
|
||||
// 3. otherwise allocate a fresh segment
|
||||
// 2. otherwise allocate a fresh segment
|
||||
return mi_segment_alloc(0, page_kind, page_shift, tld, os_tld);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user