diff --git a/include/mimalloc-atomic.h b/include/mimalloc-atomic.h index d504634c..b20f47b6 100644 --- a/include/mimalloc-atomic.h +++ b/include/mimalloc-atomic.h @@ -68,6 +68,9 @@ static inline void* mi_atomic_exchange_ptr(volatile void** p, void* exchange) { return (void*)mi_atomic_exchange((volatile uintptr_t*)p, (uintptr_t)exchange); } +static inline intptr_t mi_atomic_iread(volatile intptr_t* p) { + return (intptr_t)mi_atomic_read( (volatile uintptr_t*)p ); +} #ifdef _MSC_VER #define WIN32_LEAN_AND_MEAN diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index f6f2e2ae..9fc6ed5d 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -91,6 +91,8 @@ uintptr_t _mi_heap_random(mi_heap_t* heap); // "stats.c" void _mi_stats_done(mi_stats_t* stats); +double _mi_clock_end(double start); +double _mi_clock_start(void); // "alloc.c" void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_malloc_generic` diff --git a/include/mimalloc.h b/include/mimalloc.h index c6b7b5f8..4e82548a 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -195,7 +195,7 @@ typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_ mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg); mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept; - +mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs) mi_attr_noexcept; // ------------------------------------------------------ // Convenience @@ -227,7 +227,7 @@ typedef enum mi_option_e { mi_option_secure, mi_option_eager_commit, mi_option_eager_region_commit, - mi_option_large_os_pages, // implies eager commit + mi_option_large_os_pages, // implies eager commit mi_option_page_reset, mi_option_cache_reset, mi_option_reset_decommits, diff --git a/src/options.c b/src/options.c index cd7e5da1..339a7546 100644 --- a/src/options.c +++ b/src/options.c @@ -53,7 +53,7 @@ static mi_option_desc_t options[_mi_option_last] = // the following options are experimental and not all combinations make sense. { 1, UNINIT, "eager_commit" }, // note: if eager_region_commit is on, this should be on too. #ifdef _WIN32 // and BSD? - { 0, UNINIT, "eager_region_commit" }, // don't commit too eagerly on windows (just for looks...) + { 1, UNINIT, "eager_region_commit" }, #else { 1, UNINIT, "eager_region_commit" }, #endif diff --git a/src/os.c b/src/os.c index bee5ac64..83dd37d7 100644 --- a/src/os.c +++ b/src/os.c @@ -34,6 +34,9 @@ terms of the MIT license. A copy of the license can be found in the file ----------------------------------------------------------- */ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); +static bool mi_os_is_huge_reserved(void* p); +static void* mi_os_alloc_from_huge_reserved(size_t size, size_t try_alignment, bool commit); + static void* mi_align_up_ptr(void* p, size_t alignment) { return (void*)_mi_align_up((uintptr_t)p, alignment); } @@ -161,7 +164,7 @@ void _mi_os_init() { static bool mi_os_mem_free(void* addr, size_t size, mi_stats_t* stats) { - if (addr == NULL || size == 0) return true; + if (addr == NULL || size == 0 || mi_os_is_huge_reserved(addr)) return true; bool err = false; #if defined(_WIN32) err = (VirtualFree(addr, 0, MEM_RELEASE) == 0); @@ -237,12 +240,13 @@ static void* mi_wasm_heap_grow(size_t size, size_t try_alignment) { return (void*)aligned_base; } #else -static void* mi_unix_mmapx(size_t size, size_t try_alignment, int protect_flags, int flags, int fd) { +#define MI_OS_USE_MMAP +static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) { void* p = NULL; #if (MI_INTPTR_SIZE >= 8) && !defined(MAP_ALIGNED) // on 64-bit systems, use the virtual address area after 4TiB for 4MiB aligned allocations static volatile intptr_t aligned_base = ((intptr_t)1 << 42); // starting at 4TiB - if (try_alignment <= MI_SEGMENT_SIZE && (size%MI_SEGMENT_SIZE)==0) { + if (addr==NULL && try_alignment <= MI_SEGMENT_SIZE && (size%MI_SEGMENT_SIZE)==0) { intptr_t hint = mi_atomic_add(&aligned_base,size) - size; if (hint%try_alignment == 0) { p = mmap((void*)hint,size,protect_flags,flags,fd,0); @@ -251,7 +255,8 @@ static void* mi_unix_mmapx(size_t size, size_t try_alignment, int protect_flags, } #endif if (p==NULL) { - p = mmap(NULL,size,protect_flags,flags,fd,0); + p = mmap(addr,size,protect_flags,flags,fd,0); + if (p==MAP_FAILED) p = NULL; } return p; } @@ -305,19 +310,15 @@ static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags) #endif if (lflags != flags) { // try large OS page allocation - p = mi_unix_mmapx(size, try_alignment, protect_flags, lflags, lfd); - if (p == MAP_FAILED) { + p = mi_unix_mmapx(NULL, size, try_alignment, protect_flags, lflags, lfd); + if (p == NULL) { mi_atomic_write(&large_page_try_ok, 10); // on error, don't try again for the next N allocations - p = NULL; // and fall back to regular mmap } } } } if (p == NULL) { - p = mi_unix_mmapx(size, try_alignment, protect_flags, flags, fd); - if (p == MAP_FAILED) { - p = NULL; - } + p = mi_unix_mmapx(NULL,size, try_alignment, protect_flags, flags, fd); #if defined(MADV_HUGEPAGE) // Many Linux systems don't allow MAP_HUGETLB but they support instead // transparent huge pages (TPH). It is not required to call `madvise` with MADV_HUGE @@ -325,7 +326,7 @@ static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags) // in that case -- in particular for our large regions (in `memory.c`). // However, some systems only allow TPH if called with explicit `madvise`, so // when large OS pages are enabled for mimalloc, we call `madvice` anyways. - else if (use_large_os_page(size, try_alignment)) { + if (use_large_os_page(size, try_alignment)) { madvise(p, size, MADV_HUGEPAGE); } #endif @@ -340,17 +341,19 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, mi_ mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); if (size == 0) return NULL; - void* p = NULL; -#if defined(_WIN32) - int flags = MEM_RESERVE; - if (commit) flags |= MEM_COMMIT; - p = mi_win_virtual_alloc(NULL, size, try_alignment, flags); -#elif defined(__wasi__) - p = mi_wasm_heap_grow(size, try_alignment); -#else - int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); - p = mi_unix_mmap(size, try_alignment, protect_flags); -#endif + void* p = mi_os_alloc_from_huge_reserved(size,try_alignment,commit); + if (p != NULL) return p; + + #if defined(_WIN32) + int flags = MEM_RESERVE; + if (commit) flags |= MEM_COMMIT; + p = mi_win_virtual_alloc(NULL, size, try_alignment, flags); + #elif defined(__wasi__) + p = mi_wasm_heap_grow(size, try_alignment); + #else + int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); + p = mi_unix_mmap(size, try_alignment, protect_flags); + #endif _mi_stat_increase(&stats->mmap_calls, 1); if (p != NULL) { _mi_stat_increase(&stats->reserved, size); @@ -664,3 +667,128 @@ bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats) { return mi_os_mem_free(start, size, stats); #endif } + + +/* ---------------------------------------------------------------------------- + +-----------------------------------------------------------------------------*/ +#define MI_HUGE_OS_PAGE_SIZE ((size_t)1 << 30) // 1GiB + +typedef struct mi_huge_info_s { + uint8_t* start; + ptrdiff_t reserved; + volatile ptrdiff_t used; +} mi_huge_info_t; + +static mi_huge_info_t os_huge_reserved = { NULL, 0, 0 }; + +static bool mi_os_is_huge_reserved(void* p) { + return (os_huge_reserved.start != NULL && + (uint8_t*)p >= os_huge_reserved.start && + (uint8_t*)p < os_huge_reserved.start + os_huge_reserved.reserved); +} + +static void* mi_os_alloc_from_huge_reserved(size_t size, size_t try_alignment, bool commit) +{ + // only allow large aligned allocations + if (size < MI_SEGMENT_SIZE || (size % MI_SEGMENT_SIZE) != 0) return NULL; + if (try_alignment > MI_SEGMENT_SIZE) return NULL; + if (!commit) return NULL; + if (os_huge_reserved.start==NULL) return NULL; + if (mi_atomic_iread(&os_huge_reserved.used) >= os_huge_reserved.reserved) return NULL; // already full + + // always aligned + mi_assert_internal( os_huge_reserved.used % MI_SEGMENT_SIZE == 0 ); + mi_assert_internal( (uintptr_t)os_huge_reserved.start % MI_SEGMENT_SIZE == 0 ); + + // try to reserve space + ptrdiff_t next = mi_atomic_add( &os_huge_reserved.used, (ptrdiff_t)size ); + if (next > os_huge_reserved.reserved) { + // "free" our over-allocation + mi_atomic_add( &os_huge_reserved.used, -((ptrdiff_t)size) ); + return NULL; + } + + // success! + uint8_t* p = os_huge_reserved.start + next - (ptrdiff_t)size; + mi_assert_internal( (uintptr_t)p % MI_SEGMENT_SIZE == 0 ); + return p; +} + +/* +static void mi_os_free_huge_reserved() { + uint8_t* addr = os_huge_reserved.start; + size_t total = os_huge_reserved.reserved; + os_huge_reserved.reserved = 0; + os_huge_reserved.start = NULL; + for( size_t current = 0; current < total; current += MI_HUGE_OS_PAGE_SIZE) { + _mi_os_free(addr + current, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main); + } +} +*/ + +#if !(MI_INTPTR_SIZE >= 8 && (defined(_WIN32) || defined(MI_OS_USE_MMAP))) +int mi_reserve_huge_os_pages(size_t pages, size_t max_secs) { + return -2; // cannot allocate +} +#else +int mi_reserve_huge_os_pages( size_t pages, double max_secs ) mi_attr_noexcept +{ + if (max_secs==0) return -1; // timeout + if (pages==0) return 0; // ok + + // Allocate one page at the time but try to place them contiguously + // We allocate one page at the time to be able to abort if it takes too long + double start_t = _mi_clock_start(); + uint8_t* start = (uint8_t*)((uintptr_t)1 << 43); // 8TiB virtual start address + uint8_t* addr = start; // current top of the allocations + for (size_t page = 0; page < pages; page++, addr += MI_HUGE_OS_PAGE_SIZE ) { + void* p = NULL; + // OS specific calls to allocate huge OS pages + #ifdef _WIN32 + p = mi_win_virtual_allocx(addr, MI_HUGE_OS_PAGE_SIZE, 0, MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE); + #elif defined(MI_OS_USE_MMAP) && defined(MAP_HUGETLB) + int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB; + #ifdef MAP_HUGE_1GB + flags |= MAP_HUGE_1GB + #elif defined(MAP_HUGE_2MB) + flags |= MAP_HUGE_2MB; + #endif + p = mi_unix_mmapx(addr, MI_HUGE_OS_PAGE_SIZE, 0, PROT_WRITE|PROT_READ, flags, -1); + #endif + // Did we succeed at a contiguous address? + if (p != addr) { + if (p != NULL) { + _mi_warning_message("could not allocate contiguous huge page at 0x%p\n", addr); + _mi_os_free(p, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main ); + } + else { + #ifdef _WIN32 + int err = GetLastError(); + #else + int err = errno; + #endif + _mi_warning_message("could not allocate huge page at 0x%p, error: %i\n", addr, err); + } + return -2; + } + // success, record it + if (page==0) { + os_huge_reserved.start = addr; + } + os_huge_reserved.reserved += MI_HUGE_OS_PAGE_SIZE; + _mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE ); + _mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE); + + // check for timeout + double elapsed = _mi_clock_end(start_t); + if (elapsed > max_secs) return (-1); // timeout + if (page >= 1) { + double estimate = ((elapsed / (double)(page+1)) * (double)pages); + if (estimate > 1.5*max_secs) return (-1); // seems like we are going to timeout + } + } + return 0; +} +#endif + diff --git a/src/stats.c b/src/stats.c index e7d398b2..ba06b10c 100644 --- a/src/stats.c +++ b/src/stats.c @@ -276,8 +276,8 @@ static void _mi_stats_print(mi_stats_t* stats, double secs, FILE* out) mi_attr_n _mi_fprintf(out,"\n"); } -static double mi_clock_end(double start); -static double mi_clock_start(void); +double _mi_clock_end(double start); +double _mi_clock_start(void); static double mi_time_start = 0.0; static mi_stats_t* mi_stats_get_default(void) { @@ -289,7 +289,7 @@ void mi_stats_reset(void) mi_attr_noexcept { mi_stats_t* stats = mi_stats_get_default(); if (stats != &_mi_stats_main) { memset(stats, 0, sizeof(mi_stats_t)); } memset(&_mi_stats_main, 0, sizeof(mi_stats_t)); - mi_time_start = mi_clock_start(); + mi_time_start = _mi_clock_start(); } static void mi_stats_print_ex(mi_stats_t* stats, double secs, FILE* out) { @@ -301,11 +301,11 @@ static void mi_stats_print_ex(mi_stats_t* stats, double secs, FILE* out) { } void mi_stats_print(FILE* out) mi_attr_noexcept { - mi_stats_print_ex(mi_stats_get_default(),mi_clock_end(mi_time_start),out); + mi_stats_print_ex(mi_stats_get_default(),_mi_clock_end(mi_time_start),out); } void mi_thread_stats_print(FILE* out) mi_attr_noexcept { - _mi_stats_print(mi_stats_get_default(), mi_clock_end(mi_time_start), out); + _mi_stats_print(mi_stats_get_default(), _mi_clock_end(mi_time_start), out); } @@ -350,7 +350,7 @@ static double mi_clock_now(void) { static double mi_clock_diff = 0.0; -static double mi_clock_start(void) { +double _mi_clock_start(void) { if (mi_clock_diff == 0.0) { double t0 = mi_clock_now(); mi_clock_diff = mi_clock_now() - t0; @@ -358,7 +358,7 @@ static double mi_clock_start(void) { return mi_clock_now(); } -static double mi_clock_end(double start) { +double _mi_clock_end(double start) { double end = mi_clock_now(); return (end - start - mi_clock_diff); } diff --git a/test/test-stress.c b/test/test-stress.c index 511679ac..2b799f33 100644 --- a/test/test-stress.c +++ b/test/test-stress.c @@ -154,6 +154,9 @@ int main(int argc, char** argv) { if (n > 0) N = n; } printf("start with %i threads with a %i%% load-per-thread\n", THREADS, N); + int res = mi_reserve_huge_os_pages(4,1); + printf("(reserve huge: %i\n)", res); + //bench_start_program(); memset((void*)transfer, 0, TRANSFERS*sizeof(void*)); run_os_threads(THREADS);