add reset_decommit, reset_discard options; maintain more statistics

This commit is contained in:
daan 2019-07-09 20:24:00 -07:00
parent 2cfdbc2cbb
commit 0314373d93
7 changed files with 142 additions and 96 deletions

View File

@ -38,6 +38,7 @@ uintptr_t _mi_random_init(uintptr_t seed /* can be zero */);
// "os.c"
bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_unreset(void* addr, size_t size, mi_stats_t* stats);
void* _mi_os_alloc(size_t size, mi_stats_t* stats);
bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats);
void _mi_os_free(void* p, size_t size, mi_stats_t* stats);

View File

@ -221,7 +221,10 @@ typedef enum mi_option_e {
mi_option_page_reset,
mi_option_cache_reset,
mi_option_pool_commit,
mi_option_eager_commit,
mi_option_large_os_pages,
mi_option_reset_decommits,
mi_option_reset_discards,
mi_option_secure,
mi_option_show_stats,
mi_option_show_errors,
@ -229,6 +232,7 @@ typedef enum mi_option_e {
_mi_option_last
} mi_option_t;
mi_decl_export bool mi_option_is_enabled(mi_option_t option);
mi_decl_export void mi_option_enable(mi_option_t option, bool enable);
mi_decl_export void mi_option_enable_default(mi_option_t option, bool enable);

View File

@ -35,7 +35,10 @@ static mi_option_desc_t options[_mi_option_last] = {
{ 0, UNINIT, "page_reset" },
{ 0, UNINIT, "cache_reset" },
{ 0, UNINIT, "pool_commit" },
{ 1, UNINIT, "eager_commit" }, // secure must have eager commit
{ 0, UNINIT, "large_os_pages" }, // use large OS pages
{ 0, UNINIT, "reset_decommits" },
{ 0, UNINIT, "reset_discards" },
#if MI_SECURE
{ MI_SECURE, INITIALIZED, "secure" }, // in secure build the environment setting is ignored
#else

126
src/os.c
View File

@ -391,29 +391,74 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t*
return mi_os_page_align_areax(true, addr, size, newsize);
}
// Commit/Decommit memory. Commit is aligned liberal, while decommit is aligned conservative.
static bool mi_os_commitx(void* addr, size_t size, bool commit, mi_stats_t* stats) {
// page align in the range, commit liberally, decommit conservative
size_t csize;
void* start = mi_os_page_align_areax(!commit, addr, size, &csize);
if (csize == 0) return true;
int err = 0;
if (commit) {
_mi_stat_increase(&stats->committed, csize);
_mi_stat_increase(&stats->commit_calls, 1);
}
else {
_mi_stat_decrease(&stats->committed, csize);
}
#if defined(_WIN32)
if (commit) {
void* p = VirtualAlloc(start, csize, MEM_COMMIT, PAGE_READWRITE);
err = (p == start ? 0 : GetLastError());
}
else {
BOOL ok = VirtualFree(start, csize, MEM_DECOMMIT);
err = (ok ? 0 : GetLastError());
}
#else
err = mprotect(start, csize, (commit ? (PROT_READ | PROT_WRITE) : PROT_NONE));
#endif
if (err != 0) {
_mi_warning_message("commit/decommit error: start: 0x%8p, csize: 0x%8zux, err: %i\n", start, csize, err);
}
mi_assert_internal(err == 0);
return (err == 0);
}
bool _mi_os_commit(void* addr, size_t size, mi_stats_t* stats) {
return mi_os_commitx(addr, size, true, stats);
}
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats) {
return mi_os_commitx(addr, size, false, stats);
}
// Signal to the OS that the address range is no longer in use
// but may be used later again. This will release physical memory
// pages and reduce swapping while keeping the memory committed.
// We page align to a conservative area inside the range to reset.
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats) {
// page align conservatively within the range
size_t csize;
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
if (csize == 0) return true;
_mi_stat_increase(&stats->reset, csize);
if (reset) _mi_stat_increase(&stats->reset, csize);
else _mi_stat_decrease(&stats->reset, csize);
if (!reset) return true; // nothing to do on unreset!
#if defined(_WIN32)
// Testing shows that for us (on `malloc-large`) MEM_RESET is 2x faster than DiscardVirtualMemory
// (but this is for an access pattern that immediately reuses the memory)
/*
DWORD ok = DiscardVirtualMemory(start, csize);
return (ok != 0);
*/
void* p = VirtualAlloc(start, csize, MEM_RESET, PAGE_READWRITE);
mi_assert(p == start);
if (p != start) return false;
if (mi_option_is_enabled(mi_option_reset_discards)) {
DWORD ok = DiscardVirtualMemory(start, csize);
mi_assert_internal(ok == 0);
if (ok != 0) return false;
}
else {
void* p = VirtualAlloc(start, csize, MEM_RESET, PAGE_READWRITE);
mi_assert_internal(p == start);
if (p != start) return false;
}
/*
// VirtualUnlock removes the memory eagerly from the current working set (which MEM_RESET does lazily on demand)
// TODO: put this behind an option?
@ -441,6 +486,29 @@ bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
#endif
}
// Signal to the OS that the address range is no longer in use
// but may be used later again. This will release physical memory
// pages and reduce swapping while keeping the memory committed.
// We page align to a conservative area inside the range to reset.
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
if (mi_option_is_enabled(mi_option_reset_decommits)) {
return _mi_os_decommit(addr,size,stats);
}
else {
return mi_os_resetx(addr, size, true, stats);
}
}
bool _mi_os_unreset(void* addr, size_t size, mi_stats_t* stats) {
if (mi_option_is_enabled(mi_option_reset_decommits)) {
return _mi_os_commit(addr, size, stats); // re-commit it
}
else {
return mi_os_resetx(addr, size, false, stats);
}
}
// Protect a region in memory to be not accessible.
static bool mi_os_protectx(void* addr, size_t size, bool protect) {
// page align conservatively within the range
@ -470,47 +538,7 @@ bool _mi_os_unprotect(void* addr, size_t size) {
return mi_os_protectx(addr, size, false);
}
// Commit/Decommit memory. Commit is aligned liberal, while decommit is aligned conservative.
static bool mi_os_commitx(void* addr, size_t size, bool commit, mi_stats_t* stats) {
// page align in the range, commit liberally, decommit conservative
size_t csize;
void* start = mi_os_page_align_areax(!commit, addr, size, &csize);
if (csize == 0) return true;
int err = 0;
if (commit) {
_mi_stat_increase(&stats->committed, csize);
_mi_stat_increase(&stats->commit_calls, 1);
}
else {
_mi_stat_decrease(&stats->committed, csize);
}
#if defined(_WIN32)
if (commit) {
void* p = VirtualAlloc(start, csize, MEM_COMMIT, PAGE_READWRITE);
err = (p == start ? 0 : GetLastError());
}
else {
BOOL ok = VirtualFree(start, csize, MEM_DECOMMIT);
err = (ok ? 0 : GetLastError());
}
#else
err = mprotect(start, csize, (commit ? (PROT_READ | PROT_WRITE) : PROT_NONE));
#endif
if (err != 0) {
_mi_warning_message("commit/decommit error: start: 0x%8p, csize: 0x%8zux, err: %i\n", start, csize, err);
}
mi_assert_internal(err == 0);
return (err == 0);
}
bool _mi_os_commit(void* addr, size_t size, mi_stats_t* stats) {
return mi_os_commitx(addr, size, true, stats);
}
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats) {
return mi_os_commitx(addr, size, false, stats);
}
bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats) {
// page align conservatively within the range

View File

@ -456,7 +456,7 @@ static void mi_page_free_list_extend( mi_heap_t* heap, mi_page_t* page, size_t e
}
// enable the new free list
page->capacity += (uint16_t)extend;
mi_stat_increase(stats->page_committed, extend * page->block_size);
_mi_stat_increase(&stats->page_committed, extend * page->block_size);
}
/* -----------------------------------------------------------
@ -484,13 +484,8 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_stats_t* st
if (page->capacity >= page->reserved) return;
size_t page_size;
_mi_page_start(_mi_page_segment(page), page, &page_size);
if (page->is_reset) {
page->is_reset = false;
mi_stat_decrease( stats->reset, page_size);
}
mi_stat_increase( stats->pages_extended, 1);
_mi_page_start(_mi_page_segment(page), page, &page_size);
_mi_stat_increase(&stats->pages_extended, 1);
// calculate the extend count
size_t extend = page->reserved - page->capacity;
@ -595,7 +590,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
page = next;
} // for each page
mi_stat_counter_increase(heap->tld->stats.searches,count);
_mi_stat_counter_increase(&heap->tld->stats.searches,count);
if (page == NULL) {
page = rpage;

View File

@ -194,8 +194,8 @@ proves to be too small for certain workloads).
----------------------------------------------------------- */
static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) {
if (segment_size>=0) mi_stat_increase(tld->stats->segments,1);
else mi_stat_decrease(tld->stats->segments,1);
if (segment_size>=0) _mi_stat_increase(&tld->stats->segments,1);
else _mi_stat_decrease(&tld->stats->segments,1);
tld->current_size += segment_size;
if (tld->current_size > tld->peak_size) tld->peak_size = tld->current_size;
}
@ -254,7 +254,15 @@ static mi_segment_t* _mi_segment_cache_findx(mi_segments_tld_t* tld, size_t requ
}
static mi_segment_t* mi_segment_cache_find(mi_segments_tld_t* tld, size_t required) {
return _mi_segment_cache_findx(tld,required,false);
mi_segment_t* segment = _mi_segment_cache_findx(tld,required,false);
if (segment != NULL &&
mi_option_is_enabled(mi_option_eager_commit) &&
(mi_option_is_enabled(mi_option_cache_reset) || mi_option_is_enabled(mi_option_page_reset)))
{
// ensure the memory is available
_mi_os_unreset((uint8_t*)segment + segment->segment_info_size, segment->segment_size - segment->segment_info_size, tld->stats);
}
return segment;
}
static mi_segment_t* mi_segment_cache_evict(mi_segments_tld_t* tld) {
@ -279,7 +287,8 @@ static bool mi_segment_cache_insert(mi_segment_t* segment, mi_segments_tld_t* tl
mi_assert_internal(!mi_segment_is_in_free_queue(segment,tld));
mi_assert_expensive(!mi_segment_queue_contains(&tld->cache, segment));
if (mi_segment_cache_full(tld)) return false;
if (mi_option_is_enabled(mi_option_cache_reset) && !mi_option_is_enabled(mi_option_page_reset)) {
if (mi_option_is_enabled(mi_option_cache_reset)) { // && !mi_option_is_enabled(mi_option_page_reset)) {
// note: not good if large OS pages are enabled
_mi_os_reset((uint8_t*)segment + segment->segment_info_size, segment->segment_size - segment->segment_info_size, tld->stats);
}
// insert ordered
@ -297,7 +306,7 @@ static bool mi_segment_cache_insert(mi_segment_t* segment, mi_segments_tld_t* tl
void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
mi_segment_t* segment;
while ((segment = mi_segment_cache_find(tld,0)) != NULL) {
mi_segment_os_free(segment, MI_SEGMENT_SIZE, tld);
mi_segment_os_free(segment, segment->segment_size, tld);
}
mi_assert_internal(tld->cache_count == 0 && tld->cache_size == 0);
mi_assert_internal(mi_segment_queue_is_empty(&tld->cache));
@ -334,26 +343,32 @@ static mi_segment_t* mi_segment_alloc( size_t required, mi_page_kind_t page_kind
// Allocate the segment
mi_segment_t* segment = NULL;
// try to get it from our caches
// try to get it from our caches
bool protection_still_good = false;
segment = mi_segment_cache_find(tld,segment_size);
mi_assert_internal(segment == NULL ||
(segment_size==MI_SEGMENT_SIZE && segment_size == segment->segment_size) ||
(segment_size!=MI_SEGMENT_SIZE && segment_size <= segment->segment_size));
if (segment != NULL && mi_option_is_enabled(mi_option_secure) && (segment->page_kind != page_kind || segment->segment_size != segment_size)) {
_mi_os_unprotect(segment,segment->segment_size);
if (segment != NULL) {
if (mi_option_is_enabled(mi_option_secure)) {
if (segment->page_kind != page_kind || segment->segment_size != segment_size) {
_mi_os_unprotect(segment, segment->segment_size);
}
else {
protection_still_good = true; // otherwise, the guard pages are still in place
}
}
}
// and otherwise allocate it from the OS
if (segment == NULL) {
else {
segment = (mi_segment_t*)_mi_os_alloc_aligned(segment_size, MI_SEGMENT_SIZE, true, os_tld);
if (segment == NULL) return NULL;
mi_segments_track_size((long)segment_size,tld);
}
mi_assert_internal((uintptr_t)segment % MI_SEGMENT_SIZE == 0);
mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
memset(segment, 0, info_size);
if (mi_option_is_enabled(mi_option_secure)) {
if (mi_option_is_enabled(mi_option_secure) && !protection_still_good) {
// in secure mode, we set up a protected page in between the segment info
// and the page data
mi_assert_internal( info_size == pre_size - _mi_os_page_size() && info_size % _mi_os_page_size() == 0);
@ -381,7 +396,7 @@ static mi_segment_t* mi_segment_alloc( size_t required, mi_page_kind_t page_kind
for (uint8_t i = 0; i < segment->capacity; i++) {
segment->pages[i].segment_idx = i;
}
mi_stat_increase(tld->stats->page_committed, segment->segment_info_size);
_mi_stat_increase(&tld->stats->page_committed, segment->segment_info_size);
//fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment);
return segment;
}
@ -412,10 +427,11 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
mi_assert_expensive(!mi_segment_queue_contains(&tld->small_free, segment));
mi_assert(segment->next == NULL);
mi_assert(segment->prev == NULL);
mi_stat_decrease( tld->stats->page_committed, segment->segment_info_size);
_mi_stat_decrease(&tld->stats->page_committed, segment->segment_info_size);
segment->thread_id = 0;
// update reset memory statistics
/*
for (uint8_t i = 0; i < segment->capacity; i++) {
mi_page_t* page = &segment->pages[i];
if (page->is_reset) {
@ -423,6 +439,7 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
mi_stat_decrease( tld->stats->reset,mi_page_size(page));
}
}
*/
if (!force && mi_segment_cache_insert(segment, tld)) {
// it is put in our cache
@ -445,12 +462,18 @@ static bool mi_segment_has_free(const mi_segment_t* segment) {
return (segment->used < segment->capacity);
}
static mi_page_t* mi_segment_find_free(mi_segment_t* segment) {
static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_stats_t* stats) {
mi_assert_internal(mi_segment_has_free(segment));
mi_assert_expensive(mi_segment_is_valid(segment));
for (size_t i = 0; i < segment->capacity; i++) {
mi_page_t* page = &segment->pages[i];
if (!page->segment_in_use) {
if (page->is_reset) {
size_t psize;
uint8_t* start = _mi_page_start(segment, page, &psize);
page->is_reset = false;
_mi_os_unreset(start, psize, stats);
}
return page;
}
}
@ -470,18 +493,15 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_sta
mi_assert_internal(page->segment_in_use);
mi_assert_internal(mi_page_all_free(page));
size_t inuse = page->capacity * page->block_size;
mi_stat_decrease( stats->page_committed, inuse);
mi_stat_decrease( stats->pages, 1);
_mi_stat_decrease(&stats->page_committed, inuse);
_mi_stat_decrease(&stats->pages, 1);
// reset the page memory to reduce memory pressure?
if (!page->is_reset && mi_option_is_enabled(mi_option_page_reset)) {
size_t psize;
uint8_t* start = _mi_page_start(segment, page, &psize);
mi_stat_increase( stats->reset, psize); // for stats we assume resetting the full page
page->is_reset = true;
if (inuse > 0) {
_mi_os_reset(start, inuse, stats);
}
_mi_os_reset(start, psize, stats);
}
// zero the page data
@ -550,7 +570,8 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
segment->abandoned_next = (mi_segment_t*)abandoned;
} while (!mi_atomic_compare_exchange_ptr((volatile void**)&abandoned, segment, segment->abandoned_next));
mi_atomic_increment(&abandoned_count);
mi_stat_increase( tld->stats->segments_abandoned,1);
_mi_stat_increase(&tld->stats->segments_abandoned,1);
mi_segments_track_size((long)segment->segment_size, tld);
}
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
@ -558,7 +579,7 @@ void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_expensive(mi_segment_is_valid(segment));
segment->abandoned++;
mi_stat_increase( tld->stats->pages_abandoned, 1);
_mi_stat_increase(&tld->stats->pages_abandoned, 1);
mi_assert_internal(segment->abandoned <= segment->used);
if (segment->used == segment->abandoned) {
// all pages are abandoned, abandon the entire segment
@ -593,7 +614,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
mi_segments_track_size((long)segment->segment_size,tld);
mi_assert_internal(segment->next == NULL && segment->prev == NULL);
mi_assert_expensive(mi_segment_is_valid(segment));
mi_stat_decrease(tld->stats->segments_abandoned,1);
_mi_stat_decrease(&tld->stats->segments_abandoned,1);
// add its free pages to the the current thread
if (segment->page_kind == MI_PAGE_SMALL && mi_segment_has_free(segment)) {
mi_segment_enqueue(&tld->small_free, segment);
@ -605,7 +626,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
if (page->segment_in_use) {
segment->abandoned--;
mi_assert(page->next == NULL);
mi_stat_decrease( tld->stats->pages_abandoned, 1);
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
if (mi_page_all_free(page)) {
// if everything free by now, free the page
mi_segment_page_clear(segment,page,tld->stats);
@ -636,7 +657,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
// Requires that the page has free pages
static mi_page_t* mi_segment_small_page_alloc_in(mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_assert_internal(mi_segment_has_free(segment));
mi_page_t* page = mi_segment_find_free(segment);
mi_page_t* page = mi_segment_find_free(segment, tld->stats);
page->segment_in_use = true;
segment->used++;
mi_assert_internal(segment->used <= segment->capacity);

View File

@ -141,7 +141,7 @@ static void mi_printf_amount(int64_t n, int64_t unit, FILE* out, const char* fmt
_mi_fprintf(out, (fmt==NULL ? "%11s" : fmt), buf);
}
#if MI_STAT>0
static void mi_print_amount(int64_t n, int64_t unit, FILE* out) {
mi_printf_amount(n,unit,out,NULL);
}
@ -175,7 +175,8 @@ static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg
double avg = (stat->count == 0 ? 0.0 : (double)stat->total / (double)stat->count);
_mi_fprintf(out,"%10s: %7.1f avg\n", msg, avg);
}
#endif
static void mi_print_header( FILE* out ) {
_mi_fprintf(out,"%10s: %10s %10s %10s %10s %10s\n", "heap stats", "peak ", "total ", "freed ", "unit ", "count ");
@ -209,28 +210,22 @@ static void mi_process_info(double* utime, double* stime, size_t* peak_rss, size
static void _mi_stats_print(mi_stats_t* stats, double secs, FILE* out) mi_attr_noexcept {
if (out == NULL) out = stderr;
mi_print_header(out);
#if !defined(MI_STAT) || (MI_STAT==0)
UNUSED(stats);
//_mi_fprintf(out,"(mimalloc built without statistics)\n");
#else
#if MI_STAT>1
mi_stat_count_t normal = { 0,0,0,0 };
mi_stats_print_bins(&normal, stats->normal, MI_BIN_HUGE, "normal",out);
mi_stat_print(&normal, "normal", 1, out);
#endif
mi_stat_print(&stats->huge, "huge", 1, out);
#if MI_STAT>1
mi_stat_count_t total = { 0,0,0,0 };
mi_stat_add(&total, &normal, 1);
mi_stat_add(&total, &stats->huge, 1);
mi_stat_print(&total, "total", 1, out);
#endif
_mi_fprintf(out, "malloc requested: ");
mi_print_amount(stats->malloc.allocated, 1, out);
_mi_fprintf(out, "\n\n");
#endif
mi_stat_print(&stats->reserved, "reserved", 1, out);
mi_stat_print(&stats->committed, "committed", 1, out);
mi_stat_print(&stats->reset, "reset", -1, out);
mi_stat_print(&stats->reset, "reset", 1, out);
mi_stat_print(&stats->page_committed, "touched", 1, out);
mi_stat_print(&stats->segments, "segments", -1, out);
mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out);
@ -243,7 +238,6 @@ static void _mi_stats_print(mi_stats_t* stats, double secs, FILE* out) mi_attr_n
mi_stat_print(&stats->commit_calls, "commits", 0, out);
mi_stat_print(&stats->threads, "threads", 0, out);
mi_stat_counter_print(&stats->searches, "searches", out);
#endif
if (secs >= 0.0) _mi_fprintf(out, "%10s: %9.3f s\n", "elapsed", secs);