wip: first implementation of page free list on segments for effecient delayed page reset

This commit is contained in:
daan 2020-01-14 21:47:18 -08:00
parent 65f4f5144b
commit 941c55ee42
4 changed files with 251 additions and 67 deletions

View File

@ -417,6 +417,8 @@ typedef struct mi_os_tld_s {
typedef struct mi_segments_tld_s {
mi_segment_queue_t small_free; // queue of segments with free small pages
mi_segment_queue_t medium_free; // queue of segments with free medium pages
mi_page_queue_t small_pages_free; // page queue of free small pages
mi_page_queue_t medium_pages_free; // page queue of free medium pages
size_t count; // current number of segments;
size_t peak_count; // peak number of segments
size_t current_size; // current size of all segments

View File

@ -105,9 +105,12 @@ mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty;
static mi_tld_t tld_main = {
0, false,
&_mi_heap_main,
{ { NULL, NULL }, {NULL ,NULL}, 0, 0, 0, 0, 0, 0, NULL, tld_main_stats, tld_main_os }, // segments
{ { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0}, {NULL ,NULL, 0},
0, 0, 0, 0, 0, 0, NULL,
tld_main_stats, tld_main_os
}, // segments
{ 0, tld_main_stats }, // os
{ MI_STATS_NULL } // stats
{ MI_STATS_NULL } // stats
};
#if MI_INTPTR_SIZE==8

View File

@ -70,7 +70,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(page_reset) }, // reset pages on free
{ 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit)
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
{ 500, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
{ 100, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
{ 16, UNINIT, MI_OPTION(max_errors) } // maximum errors that are output

View File

@ -43,7 +43,7 @@ static uint8_t* mi_segment_raw_page_start(const mi_segment_t* segment, const mi_
----------------------------------------------------------- */
#if (MI_DEBUG>=3)
static bool mi_segment_queue_contains(const mi_segment_queue_t* queue, mi_segment_t* segment) {
static bool mi_segment_queue_contains(const mi_segment_queue_t* queue, const mi_segment_t* segment) {
mi_assert_internal(segment != NULL);
mi_segment_t* list = queue->first;
while (list != NULL) {
@ -90,7 +90,7 @@ static mi_segment_queue_t* mi_segment_free_queue_of_kind(mi_page_kind_t kind, mi
else return NULL;
}
static mi_segment_queue_t* mi_segment_free_queue(mi_segment_t* segment, mi_segments_tld_t* tld) {
static mi_segment_queue_t* mi_segment_free_queue(const mi_segment_t* segment, mi_segments_tld_t* tld) {
return mi_segment_free_queue_of_kind(segment->page_kind, tld);
}
@ -113,7 +113,7 @@ static void mi_segment_insert_in_free_queue(mi_segment_t* segment, mi_segments_t
----------------------------------------------------------- */
#if (MI_DEBUG>=2)
static bool mi_segment_is_in_free_queue(mi_segment_t* segment, mi_segments_tld_t* tld) {
static bool mi_segment_is_in_free_queue(const mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_segment_queue_t* queue = mi_segment_free_queue(segment, tld);
bool in_queue = (queue!=NULL && (segment->next != NULL || segment->prev != NULL || queue->first == segment));
if (in_queue) {
@ -123,7 +123,7 @@ static bool mi_segment_is_in_free_queue(mi_segment_t* segment, mi_segments_tld_t
}
#endif
static size_t mi_segment_page_size(mi_segment_t* segment) {
static size_t mi_segment_page_size(const mi_segment_t* segment) {
if (segment->capacity > 1) {
mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM);
return ((size_t)1 << segment->page_shift);
@ -134,15 +134,39 @@ static size_t mi_segment_page_size(mi_segment_t* segment) {
}
}
static mi_page_queue_t* mi_segment_page_free_queue(mi_page_kind_t kind, mi_segments_tld_t* tld) {
if (kind==MI_PAGE_SMALL) return &tld->small_pages_free;
else if (kind==MI_PAGE_MEDIUM) return &tld->medium_pages_free;
else return NULL;
}
#if (MI_DEBUG>=3)
static bool mi_segment_is_valid(mi_segment_t* segment) {
static bool mi_segment_page_free_contains(mi_page_kind_t kind, const mi_page_t* page, mi_segments_tld_t* tld) {
const mi_page_queue_t* const pq = mi_segment_page_free_queue(kind, tld);
if (pq == NULL) return false;
mi_page_t* p = pq->first;
while (p != NULL) {
if (p == page) return true;
p = p->next;
}
return false;
}
static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_assert_internal(segment != NULL);
mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
mi_assert_internal(segment->used <= segment->capacity);
mi_assert_internal(segment->abandoned <= segment->used);
size_t nfree = 0;
for (size_t i = 0; i < segment->capacity; i++) {
if (!segment->pages[i].segment_in_use) nfree++;
const mi_page_t* const page = &segment->pages[i];
if (!page->segment_in_use) {
nfree++;
}
else {
mi_assert_expensive(!mi_segment_page_free_contains(segment->page_kind, page, tld));
}
}
mi_assert_internal(nfree + segment->used == segment->capacity);
mi_assert_internal(segment->thread_id == _mi_thread_id() || (segment->thread_id==0)); // or 0
@ -152,6 +176,20 @@ static bool mi_segment_is_valid(mi_segment_t* segment) {
}
#endif
static bool mi_segment_page_free_not_in_queue(const mi_page_t* page, mi_segments_tld_t* tld) {
mi_page_kind_t kind = _mi_page_segment(page)->page_kind;
if (page->next != NULL || page->prev != NULL) {
mi_assert_internal(mi_segment_page_free_contains(kind, page, tld));
return false;
}
if (kind > MI_PAGE_MEDIUM) return true;
// both next and prev are NULL, check for singleton list
const mi_page_queue_t* const pq = mi_segment_page_free_queue(kind, tld);
mi_assert_internal(pq!=NULL);
return (pq->first != page && pq->last != page);
}
/* -----------------------------------------------------------
Guard pages
----------------------------------------------------------- */
@ -232,6 +270,102 @@ static void mi_page_unreset(mi_segment_t* segment, mi_page_t* page, size_t size,
}
/* -----------------------------------------------------------
The free page queue
----------------------------------------------------------- */
static void mi_segment_page_free_set_expire(mi_page_t* page) {
*((intptr_t*)(&page->heap)) = _mi_clock_now() + mi_option_get(mi_option_reset_delay);
}
static mi_msecs_t mi_segment_page_free_get_expire(mi_page_t* page) {
return *((intptr_t*)(&page->heap));
}
static void mi_segment_page_free_add(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM);
mi_assert_internal(!page->segment_in_use);
mi_assert_internal(_mi_page_segment(page) == segment);
mi_assert_internal(mi_segment_page_free_not_in_queue(page,tld));
mi_assert_expensive(!mi_segment_page_free_contains(segment->page_kind, page, tld));
mi_page_queue_t* pq = mi_segment_page_free_queue(segment->page_kind, tld);
// push on top
mi_segment_page_free_set_expire(page);
page->next = pq->first;
page->prev = NULL;
if (pq->first == NULL) {
mi_assert_internal(pq->last == NULL);
pq->first = pq->last = page;
}
else {
pq->first->prev = page;
pq->first = page;
}
}
static void mi_segment_page_free_remove(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
if (segment->page_kind > MI_PAGE_MEDIUM) return;
if (mi_segment_page_free_not_in_queue(page,tld)) return;
mi_page_queue_t* pq = mi_segment_page_free_queue(segment->page_kind, tld);
mi_assert_internal(pq!=NULL);
mi_assert_internal(_mi_page_segment(page)==segment);
mi_assert_internal(!page->segment_in_use);
mi_assert_internal(mi_segment_page_free_contains(segment->page_kind, page, tld));
if (page->prev != NULL) page->prev->next = page->next;
if (page->next != NULL) page->next->prev = page->prev;
if (page == pq->last) pq->last = page->prev;
if (page == pq->first) pq->first = page->next;
page->next = page->prev = NULL;
page->heap = NULL;
}
static void mi_segment_page_free_remove_all(mi_segment_t* segment, mi_segments_tld_t* tld) {
if (segment->page_kind > MI_PAGE_MEDIUM) return;
for (size_t i = 0; i < segment->capacity; i++) {
mi_page_t* page = &segment->pages[i];
if (!page->segment_in_use) {
mi_segment_page_free_remove(segment, page, tld);
}
}
}
static mi_page_t* mi_segment_page_free_top(mi_page_kind_t kind, mi_segments_tld_t* tld) {
mi_assert_internal(kind <= MI_PAGE_MEDIUM);
mi_page_queue_t* pq = mi_segment_page_free_queue(kind, tld);
return pq->first;
}
static void mi_segment_page_free_reset_delayedx(mi_msecs_t now, mi_page_kind_t kind, mi_segments_tld_t* tld) {
mi_page_queue_t* pq = mi_segment_page_free_queue(kind, tld);
mi_assert_internal(pq != NULL);
mi_page_t* page = pq->last;
while (page != NULL && (now - mi_segment_page_free_get_expire(page)) >= 0) {
mi_page_t* const prev = page->prev;
mi_page_reset(_mi_page_segment(page), page, 0, tld);
page->heap = NULL;
page->prev = page->next = NULL;
page = prev;
}
pq->last = page;
if (page != NULL){
page->next = NULL;
}
else {
pq->first = NULL;
}
}
static void mi_segment_page_free_reset_delayed(mi_segments_tld_t* tld) {
if (!mi_option_is_enabled(mi_option_page_reset)) return;
mi_msecs_t now = _mi_clock_now();
mi_segment_page_free_reset_delayedx(now, MI_PAGE_SMALL, tld);
mi_segment_page_free_reset_delayedx(now, MI_PAGE_MEDIUM, tld);
}
/* -----------------------------------------------------------
Segment size calculations
----------------------------------------------------------- */
@ -407,6 +541,10 @@ void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
}
mi_assert_internal(tld->cache_count == 0);
mi_assert_internal(tld->cache == NULL);
mi_assert_internal(tld->small_pages_free.first == NULL);
mi_assert_internal(tld->medium_pages_free.first == NULL);
mi_assert_internal(tld->small_free.first == NULL);
mi_assert_internal(tld->medium_free.first == NULL);
}
@ -532,9 +670,9 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
UNUSED(force);
//fprintf(stderr,"mimalloc: free segment at %p\n", (void*)segment);
UNUSED(force);
mi_assert(segment != NULL);
mi_segment_page_free_remove_all(segment, tld);
mi_segment_remove_from_free_queue(segment,tld);
mi_assert_expensive(!mi_segment_queue_contains(&tld->small_free, segment));
@ -561,37 +699,38 @@ static bool mi_segment_has_free(const mi_segment_t* segment) {
return (segment->used < segment->capacity);
}
static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_assert_internal(mi_segment_has_free(segment));
mi_assert_expensive(mi_segment_is_valid(segment));
for (size_t i = 0; i < segment->capacity; i++) {
mi_page_t* page = &segment->pages[i];
if (!page->segment_in_use) {
// set in-use before doing unreset to prevent delayed reset
page->segment_in_use = true;
segment->used++;
if (!page->is_committed) {
mi_assert_internal(!segment->mem_is_fixed);
mi_assert_internal(!page->is_reset);
page->is_committed = true;
if (segment->page_kind < MI_PAGE_LARGE || !mi_option_is_enabled(mi_option_eager_page_commit)) {
size_t psize;
uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
bool is_zero = false;
const size_t gsize = (MI_SECURE >= 2 ? _mi_os_page_size() : 0);
_mi_mem_commit(start, psize + gsize, &is_zero, tld->os);
if (gsize > 0) { mi_segment_protect_range(start + psize, gsize, true); }
if (is_zero) { page->is_zero_init = true; }
}
}
if (page->is_reset) {
mi_page_unreset(segment, page, 0, tld); // todo: only unreset the part that was reset?
}
return page;
static void mi_segment_page_claim(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
mi_assert_internal(_mi_page_segment(page) == segment);
mi_assert_internal(!page->segment_in_use);
// set in-use before doing unreset to prevent delayed reset
mi_segment_page_free_remove(segment, page, tld);
page->segment_in_use = true;
segment->used++;
if (!page->is_committed) {
mi_assert_internal(!segment->mem_is_fixed);
mi_assert_internal(!page->is_reset);
page->is_committed = true;
if (segment->page_kind < MI_PAGE_LARGE
|| !mi_option_is_enabled(mi_option_eager_page_commit)) {
size_t psize;
uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
bool is_zero = false;
const size_t gsize = (MI_SECURE >= 2 ? _mi_os_page_size() : 0);
_mi_mem_commit(start, psize + gsize, &is_zero, tld->os);
if (gsize > 0) { mi_segment_protect_range(start + psize, gsize, true); }
if (is_zero) { page->is_zero_init = true; }
}
}
mi_assert(false);
return NULL;
if (page->is_reset) {
mi_page_unreset(segment, page, 0, tld); // todo: only unreset the part that was reset?
}
mi_assert_internal(page->segment_in_use);
mi_assert_internal(segment->used <= segment->capacity);
if (segment->used == segment->capacity && segment->page_kind <= MI_PAGE_MEDIUM) {
// if no more free pages, remove from the queue
mi_assert_internal(!mi_segment_has_free(segment));
mi_segment_remove_from_free_queue(segment, tld);
}
}
@ -605,6 +744,7 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_seg
mi_assert_internal(page->segment_in_use);
mi_assert_internal(mi_page_all_free(page));
mi_assert_internal(page->is_committed);
mi_assert_internal(mi_segment_page_free_not_in_queue(page, tld));
size_t inuse = page->capacity * page->block_size;
_mi_stat_decrease(&tld->stats->page_committed, inuse);
_mi_stat_decrease(&tld->stats->pages, 1);
@ -619,19 +759,27 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_seg
// reset the page memory to reduce memory pressure?
// note: must come after setting `segment_in_use` to false but before block_size becomes 0
mi_page_reset(segment, page, 0 /*used_size*/, tld);
//mi_page_reset(segment, page, 0 /*used_size*/, tld);
// zero the page data, but not the segment fields
// zero the page data, but not the segment fields and block_size (for page size calculations)
size_t block_size = page->block_size;
ptrdiff_t ofs = offsetof(mi_page_t,capacity);
memset((uint8_t*)page + ofs, 0, sizeof(*page) - ofs);
page->block_size = block_size;
segment->used--;
// add to the free page list for reuse/reset
if (segment->page_kind <= MI_PAGE_MEDIUM) {
mi_segment_page_free_add(segment, page, tld);
}
}
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
{
mi_assert(page != NULL);
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_expensive(mi_segment_is_valid(segment));
mi_assert_expensive(mi_segment_is_valid(segment,tld));
mi_segment_page_free_reset_delayed(tld);
// mark it as free now
mi_segment_page_clear(segment, page, tld);
@ -690,10 +838,12 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_assert_internal(segment->used == segment->abandoned);
mi_assert_internal(segment->used > 0);
mi_assert_internal(segment->abandoned_next == NULL);
mi_assert_expensive(mi_segment_is_valid(segment));
mi_assert_expensive(mi_segment_is_valid(segment,tld));
// remove the segment from the free page queue if needed
mi_segment_remove_from_free_queue(segment,tld);
mi_segment_page_free_reset_delayed(tld);
mi_segment_page_free_remove_all(segment, tld);
mi_segment_remove_from_free_queue(segment, tld);
mi_assert_internal(segment->next == NULL && segment->prev == NULL);
// all pages in the segment are abandoned; add it to the abandoned list
@ -708,7 +858,8 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
mi_assert(page != NULL);
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_expensive(mi_segment_is_valid(segment));
mi_assert_expensive(!mi_segment_page_free_contains(segment->page_kind, page, tld));
mi_assert_expensive(mi_segment_is_valid(segment,tld));
segment->abandoned++;
_mi_stat_increase(&tld->stats->pages_abandoned, 1);
mi_assert_internal(segment->abandoned <= segment->used);
@ -755,7 +906,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
segment->abandoned_next = NULL;
mi_segments_track_size((long)segment->segment_size,tld);
mi_assert_internal(segment->next == NULL && segment->prev == NULL);
mi_assert_expensive(mi_segment_is_valid(segment));
mi_assert_expensive(mi_segment_is_valid(segment,tld));
_mi_stat_decrease(&tld->stats->segments_abandoned,1);
// add its abandoned pages to the current thread
@ -765,6 +916,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
if (page->segment_in_use) {
mi_assert_internal(!page->is_reset);
mi_assert_internal(page->is_committed);
mi_assert_internal(mi_segment_page_free_not_in_queue(page, tld));
segment->abandoned--;
mi_assert(page->next == NULL);
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
@ -801,30 +953,55 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
Small page allocation
----------------------------------------------------------- */
// Allocate a small page inside a segment.
// Requires that the page has free pages
static mi_page_t* mi_segment_page_alloc_in(mi_segment_t* segment, mi_segments_tld_t* tld) {
static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_assert_internal(mi_segment_has_free(segment));
mi_page_t* page = mi_segment_find_free(segment, tld);
mi_assert_internal(page->segment_in_use);
mi_assert_internal(segment->used <= segment->capacity);
if (segment->used == segment->capacity) {
// if no more free pages, remove from the queue
mi_assert_internal(!mi_segment_has_free(segment));
mi_segment_remove_from_free_queue(segment,tld);
mi_assert_expensive(mi_segment_is_valid(segment, tld));
for (size_t i = 0; i < segment->capacity; i++) {
mi_page_t* page = &segment->pages[i];
if (!page->segment_in_use) {
mi_segment_page_claim(segment, page, tld);
return page;
}
}
return page;
mi_assert(false);
return NULL;
}
static mi_page_t* mi_segment_page_alloc(mi_page_kind_t kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
mi_segment_queue_t* free_queue = mi_segment_free_queue_of_kind(kind,tld);
if (mi_segment_queue_is_empty(free_queue)) {
mi_segment_t* segment = mi_segment_alloc(0,kind,page_shift,tld,os_tld);
if (segment == NULL) return NULL;
mi_segment_enqueue(free_queue, segment);
// Allocate a page inside a segment. Requires that the page has free pages
static mi_page_t* mi_segment_page_alloc_in(mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_assert_internal(mi_segment_has_free(segment));
return mi_segment_find_free(segment, tld);
}
static mi_page_t* mi_segment_page_alloc(mi_page_kind_t kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
mi_page_t* page = NULL;
mi_segment_queue_t* const free_queue = mi_segment_free_queue_of_kind(kind, tld);
if (free_queue->first != NULL && free_queue->first->used < free_queue->first->capacity) {
// prefer to allocate from an available segment
// (to allow more chance of other segments to become completely freed)
page = mi_segment_page_alloc_in(free_queue->first, tld);
}
mi_assert_internal(free_queue->first != NULL);
mi_page_t* page = mi_segment_page_alloc_in(free_queue->first,tld);
else {
// otherwise try to pop from the page free list
page = mi_segment_page_free_top(kind, tld);
if (page != NULL) {
mi_segment_page_claim(_mi_page_segment(page), page, tld);
}
else {
// if that failed, find an available segment the segment free queue again
if (mi_segment_queue_is_empty(free_queue)) {
// possibly allocate a fresh segment
mi_segment_t* segment = mi_segment_alloc(0, kind, page_shift, tld, os_tld);
if (segment == NULL) return NULL; // return NULL if out-of-memory
mi_segment_enqueue(free_queue, segment);
}
mi_assert_internal(free_queue->first != NULL);
page = mi_segment_page_alloc_in(free_queue->first, tld);
}
}
mi_assert_internal(page != NULL);
#if MI_DEBUG>=2
_mi_segment_page_start(_mi_page_segment(page), page, sizeof(void*), NULL, NULL)[0] = 0;
#endif
@ -883,7 +1060,9 @@ mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_
else {
page = mi_segment_huge_page_alloc(block_size,tld,os_tld);
}
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page)));
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size);
mi_segment_page_free_reset_delayed(tld);
mi_assert_internal(mi_segment_page_free_not_in_queue(page, tld));
return page;
}