merge with dev-win
This commit is contained in:
commit
e7bd8c5d14
@ -68,6 +68,9 @@ static inline void* mi_atomic_exchange_ptr(volatile void** p, void* exchange) {
|
||||
return (void*)mi_atomic_exchange((volatile uintptr_t*)p, (uintptr_t)exchange);
|
||||
}
|
||||
|
||||
static inline intptr_t mi_atomic_iread(volatile intptr_t* p) {
|
||||
return (intptr_t)mi_atomic_read( (volatile uintptr_t*)p );
|
||||
}
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
|
@ -78,7 +78,7 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay);
|
||||
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
|
||||
void _mi_deferred_free(mi_heap_t* heap, bool force);
|
||||
|
||||
void _mi_page_free_collect(mi_page_t* page);
|
||||
void _mi_page_free_collect(mi_page_t* page,bool force);
|
||||
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments
|
||||
|
||||
size_t _mi_bin_size(uint8_t bin); // for stats
|
||||
@ -92,6 +92,8 @@ uintptr_t _mi_heap_random(mi_heap_t* heap);
|
||||
|
||||
// "stats.c"
|
||||
void _mi_stats_done(mi_stats_t* stats);
|
||||
double _mi_clock_end(double start);
|
||||
double _mi_clock_start(void);
|
||||
|
||||
// "alloc.c"
|
||||
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_malloc_generic`
|
||||
|
@ -195,7 +195,7 @@ typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_
|
||||
mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
|
||||
|
||||
mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs) mi_attr_noexcept;
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Convenience
|
||||
@ -227,7 +227,8 @@ typedef enum mi_option_e {
|
||||
mi_option_secure,
|
||||
mi_option_eager_commit,
|
||||
mi_option_eager_region_commit,
|
||||
mi_option_large_os_pages, // implies eager commit
|
||||
mi_option_large_os_pages, // implies eager commit
|
||||
mi_option_reserve_huge_os_pages,
|
||||
mi_option_page_reset,
|
||||
mi_option_cache_reset,
|
||||
mi_option_reset_decommits,
|
||||
|
@ -85,7 +85,7 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t
|
||||
UNUSED(arg2);
|
||||
UNUSED(heap);
|
||||
mi_collect_t collect = *((mi_collect_t*)arg_collect);
|
||||
_mi_page_free_collect(page);
|
||||
_mi_page_free_collect(page, collect >= ABANDON);
|
||||
if (mi_page_all_free(page)) {
|
||||
// no more used blocks, free the page. TODO: should we retire here and be less aggressive?
|
||||
_mi_page_free(page, pq, collect != NORMAL);
|
||||
@ -428,7 +428,7 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v
|
||||
mi_assert(page != NULL);
|
||||
if (page == NULL) return true;
|
||||
|
||||
_mi_page_free_collect(page);
|
||||
_mi_page_free_collect(page,true);
|
||||
mi_assert_internal(page->local_free == NULL);
|
||||
if (page->used == 0) return true;
|
||||
|
||||
|
@ -424,6 +424,12 @@ static void mi_process_load(void) {
|
||||
if (msg != NULL && (mi_option_is_enabled(mi_option_verbose) || mi_option_is_enabled(mi_option_show_errors))) {
|
||||
_mi_fputs(stderr,NULL,msg);
|
||||
}
|
||||
|
||||
if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
|
||||
size_t pages = mi_option_get(mi_option_reserve_huge_os_pages);
|
||||
double max_secs = (double)pages / 5.0; // 0.2s per page
|
||||
mi_reserve_huge_os_pages(pages, max_secs);
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize the process; called by thread_init or the process loader
|
||||
|
@ -57,11 +57,12 @@ static mi_option_desc_t options[_mi_option_last] =
|
||||
// the following options are experimental and not all combinations make sense.
|
||||
{ 1, UNINIT, MI_OPTION(eager_commit) }, // note: if eager_region_commit is on, this should be on too.
|
||||
#ifdef _WIN32 // and BSD?
|
||||
{ 0, UNINIT, MI_OPTION(eager_region_commit) }, // don't commit too eagerly on windows (just for looks...)
|
||||
{ 1, UNINIT, MI_OPTION(eager_region_commit) }, // don't commit too eagerly on windows (just for looks...)
|
||||
#else
|
||||
{ 1, UNINIT, MI_OPTION(eager_region_commit) },
|
||||
#endif
|
||||
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
|
||||
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) },
|
||||
{ 0, UNINIT, MI_OPTION(page_reset) },
|
||||
{ 0, UNINIT, MI_OPTION(cache_reset) },
|
||||
{ 0, UNINIT, MI_OPTION(reset_decommits) } // note: cannot enable this if secure is on
|
||||
|
248
src/os.c
248
src/os.c
@ -37,6 +37,9 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
----------------------------------------------------------- */
|
||||
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
|
||||
|
||||
static bool mi_os_is_huge_reserved(void* p);
|
||||
static void* mi_os_alloc_from_huge_reserved(size_t size, size_t try_alignment, bool commit);
|
||||
|
||||
static void* mi_align_up_ptr(void* p, size_t alignment) {
|
||||
return (void*)_mi_align_up((uintptr_t)p, alignment);
|
||||
}
|
||||
@ -84,9 +87,13 @@ static size_t mi_os_good_alloc_size(size_t size, size_t alignment) {
|
||||
#if defined(_WIN32)
|
||||
// We use VirtualAlloc2 for aligned allocation, but it is only supported on Windows 10 and Windows Server 2016.
|
||||
// So, we need to look it up dynamically to run on older systems. (use __stdcall for 32-bit compatibility)
|
||||
// (hide MEM_EXTENDED_PARAMETER to compile with older SDK's)
|
||||
typedef PVOID(__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, /* MEM_EXTENDED_PARAMETER* */ void*, ULONG);
|
||||
// NtAllocateVirtualAllocEx is used for huge OS page allocation (1GiB)
|
||||
// We hide MEM_EXTENDED_PARAMETER to compile with older SDK's.
|
||||
#include <winternl.h>
|
||||
typedef PVOID (__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, /* MEM_EXTENDED_PARAMETER* */ void*, ULONG);
|
||||
typedef NTSTATUS (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*, ULONG, ULONG, /* MEM_EXTENDED_PARAMETER* */ PVOID, ULONG);
|
||||
static PVirtualAlloc2 pVirtualAlloc2 = NULL;
|
||||
static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL;
|
||||
|
||||
void _mi_os_init(void) {
|
||||
// get the page size
|
||||
@ -103,9 +110,14 @@ void _mi_os_init(void) {
|
||||
if (pVirtualAlloc2==NULL) pVirtualAlloc2 = (PVirtualAlloc2)GetProcAddress(hDll, "VirtualAlloc2");
|
||||
FreeLibrary(hDll);
|
||||
}
|
||||
hDll = LoadLibrary(TEXT("ntdll.dll"));
|
||||
if (hDll != NULL) {
|
||||
pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)GetProcAddress(hDll, "NtAllocateVirtualMemoryEx");
|
||||
FreeLibrary(hDll);
|
||||
}
|
||||
// Try to see if large OS pages are supported
|
||||
unsigned long err = 0;
|
||||
bool ok = mi_option_is_enabled(mi_option_large_os_pages);
|
||||
bool ok = mi_option_is_enabled(mi_option_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages);
|
||||
if (ok) {
|
||||
// To use large pages on Windows, we first need access permission
|
||||
// Set "Lock pages in memory" permission in the group policy editor
|
||||
@ -161,7 +173,7 @@ void _mi_os_init() {
|
||||
|
||||
static bool mi_os_mem_free(void* addr, size_t size, mi_stats_t* stats)
|
||||
{
|
||||
if (addr == NULL || size == 0) return true;
|
||||
if (addr == NULL || size == 0 || mi_os_is_huge_reserved(addr)) return true;
|
||||
bool err = false;
|
||||
#if defined(_WIN32)
|
||||
err = (VirtualFree(addr, 0, MEM_RELEASE) == 0);
|
||||
@ -185,25 +197,49 @@ static bool mi_os_mem_free(void* addr, size_t size, mi_stats_t* stats)
|
||||
#ifdef _WIN32
|
||||
static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment, DWORD flags) {
|
||||
#if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS)
|
||||
// on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages
|
||||
if ((size % (uintptr_t)1 << 30) == 0 /* 1GiB multiple */
|
||||
&& (flags & MEM_LARGE_PAGES) != 0 && (flags & MEM_COMMIT) != 0
|
||||
&& (addr != NULL || try_alignment == 0 || try_alignment % _mi_os_page_size() == 0)
|
||||
&& pNtAllocateVirtualMemoryEx != NULL)
|
||||
{
|
||||
#ifndef MEM_EXTENDED_PARAMETER_NONPAGED_HUGE
|
||||
#define MEM_EXTENDED_PARAMETER_NONPAGED_HUGE (0x10)
|
||||
#endif
|
||||
MEM_EXTENDED_PARAMETER param = { 0, 0 };
|
||||
param.Type = 5; // == MemExtendedParameterAttributeFlags;
|
||||
param.ULong64 = MEM_EXTENDED_PARAMETER_NONPAGED_HUGE;
|
||||
SIZE_T psize = size;
|
||||
void* base = addr;
|
||||
NTSTATUS err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags | MEM_RESERVE, PAGE_READWRITE, ¶m, 1);
|
||||
if (err == 0) {
|
||||
return base;
|
||||
}
|
||||
else {
|
||||
// else fall back to regular large OS pages
|
||||
_mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) page instead (error %lx)\n", err);
|
||||
}
|
||||
}
|
||||
|
||||
// on modern Windows try use VirtualAlloc2 for aligned allocation
|
||||
if (try_alignment > 0 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) {
|
||||
// on modern Windows try use VirtualAlloc2 for aligned allocation
|
||||
MEM_ADDRESS_REQUIREMENTS reqs = { 0 };
|
||||
reqs.Alignment = try_alignment;
|
||||
MEM_EXTENDED_PARAMETER param = { 0 };
|
||||
param.Type = MemExtendedParameterAddressRequirements;
|
||||
param.Pointer = &reqs;
|
||||
return (*pVirtualAlloc2)(addr, NULL, size, flags, PAGE_READWRITE, ¶m, 1);
|
||||
return (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, ¶m, 1);
|
||||
}
|
||||
#endif
|
||||
return VirtualAlloc(addr, size, flags, PAGE_READWRITE);
|
||||
}
|
||||
|
||||
static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags) {
|
||||
static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only) {
|
||||
static volatile uintptr_t large_page_try_ok = 0;
|
||||
void* p = NULL;
|
||||
if (use_large_os_page(size, try_alignment)) {
|
||||
if (large_only || use_large_os_page(size, try_alignment)) {
|
||||
uintptr_t try_ok = mi_atomic_read(&large_page_try_ok);
|
||||
if (try_ok > 0) {
|
||||
if (!large_only && try_ok > 0) {
|
||||
// if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive.
|
||||
// therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times.
|
||||
mi_atomic_compare_exchange(&large_page_try_ok, try_ok - 1, try_ok);
|
||||
@ -211,6 +247,7 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment,
|
||||
else {
|
||||
// large OS pages must always reserve and commit.
|
||||
p = mi_win_virtual_allocx(addr, size, try_alignment, MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE | flags);
|
||||
if (large_only) return p;
|
||||
// fall back to non-large page allocation on error (`p == NULL`).
|
||||
if (p == NULL) {
|
||||
mi_atomic_write(&large_page_try_ok,10); // on error, don't try again for the next N allocations
|
||||
@ -237,12 +274,13 @@ static void* mi_wasm_heap_grow(size_t size, size_t try_alignment) {
|
||||
return (void*)aligned_base;
|
||||
}
|
||||
#else
|
||||
static void* mi_unix_mmapx(size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
|
||||
#define MI_OS_USE_MMAP
|
||||
static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
|
||||
void* p = NULL;
|
||||
#if (MI_INTPTR_SIZE >= 8) && !defined(MAP_ALIGNED)
|
||||
// on 64-bit systems, use the virtual address area after 4TiB for 4MiB aligned allocations
|
||||
static volatile intptr_t aligned_base = ((intptr_t)1 << 42); // starting at 4TiB
|
||||
if (try_alignment <= MI_SEGMENT_SIZE && (size%MI_SEGMENT_SIZE)==0) {
|
||||
static volatile intptr_t aligned_base = ((intptr_t)4 << 40); // starting at 4TiB
|
||||
if (addr==NULL && try_alignment <= MI_SEGMENT_SIZE && (size%MI_SEGMENT_SIZE)==0) {
|
||||
intptr_t hint = mi_atomic_add(&aligned_base,size) - size;
|
||||
if (hint%try_alignment == 0) {
|
||||
p = mmap((void*)hint,size,protect_flags,flags,fd,0);
|
||||
@ -251,12 +289,13 @@ static void* mi_unix_mmapx(size_t size, size_t try_alignment, int protect_flags,
|
||||
}
|
||||
#endif
|
||||
if (p==NULL) {
|
||||
p = mmap(NULL,size,protect_flags,flags,fd,0);
|
||||
p = mmap(addr,size,protect_flags,flags,fd,0);
|
||||
if (p==MAP_FAILED) p = NULL;
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags) {
|
||||
static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only) {
|
||||
void* p = NULL;
|
||||
#if !defined(MAP_ANONYMOUS)
|
||||
#define MAP_ANONYMOUS MAP_ANON
|
||||
@ -278,10 +317,10 @@ static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags)
|
||||
// macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99)
|
||||
fd = VM_MAKE_TAG(100);
|
||||
#endif
|
||||
if (use_large_os_page(size, try_alignment)) {
|
||||
if (large_only || use_large_os_page(size, try_alignment)) {
|
||||
static volatile uintptr_t large_page_try_ok = 0;
|
||||
uintptr_t try_ok = mi_atomic_read(&large_page_try_ok);
|
||||
if (try_ok > 0) {
|
||||
if (!large_only && try_ok > 0) {
|
||||
// If the OS is not configured for large OS pages, or the user does not have
|
||||
// enough permission, the `mmap` will always fail (but it might also fail for other reasons).
|
||||
// Therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times
|
||||
@ -297,27 +336,32 @@ static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags)
|
||||
#ifdef MAP_HUGETLB
|
||||
lflags |= MAP_HUGETLB;
|
||||
#endif
|
||||
#ifdef MAP_HUGE_2MB
|
||||
lflags |= MAP_HUGE_2MB;
|
||||
#ifdef MAP_HUGE_1GB
|
||||
if ((size % (uintptr_t)1 << 20) == 0) {
|
||||
lflags |= MAP_HUGE_1GB;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
#ifdef MAP_HUGE_2MB
|
||||
lflags |= MAP_HUGE_2MB;
|
||||
#endif
|
||||
}
|
||||
#ifdef VM_FLAGS_SUPERPAGE_SIZE_2MB
|
||||
lfd |= VM_FLAGS_SUPERPAGE_SIZE_2MB;
|
||||
#endif
|
||||
if (lflags != flags) {
|
||||
if (large_only || lflags != flags) {
|
||||
// try large OS page allocation
|
||||
p = mi_unix_mmapx(size, try_alignment, protect_flags, lflags, lfd);
|
||||
if (p == MAP_FAILED) {
|
||||
p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, lflags, lfd);
|
||||
if (large_only) return p;
|
||||
if (p == NULL) {
|
||||
mi_atomic_write(&large_page_try_ok, 10); // on error, don't try again for the next N allocations
|
||||
p = NULL; // and fall back to regular mmap
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (p == NULL) {
|
||||
p = mi_unix_mmapx(size, try_alignment, protect_flags, flags, fd);
|
||||
if (p == MAP_FAILED) {
|
||||
p = NULL;
|
||||
}
|
||||
p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, flags, fd);
|
||||
#if defined(MADV_HUGEPAGE)
|
||||
// Many Linux systems don't allow MAP_HUGETLB but they support instead
|
||||
// transparent huge pages (TPH). It is not required to call `madvise` with MADV_HUGE
|
||||
@ -325,7 +369,7 @@ static void* mi_unix_mmap(size_t size, size_t try_alignment, int protect_flags)
|
||||
// in that case -- in particular for our large regions (in `memory.c`).
|
||||
// However, some systems only allow TPH if called with explicit `madvise`, so
|
||||
// when large OS pages are enabled for mimalloc, we call `madvice` anyways.
|
||||
else if (use_large_os_page(size, try_alignment)) {
|
||||
if (use_large_os_page(size, try_alignment)) {
|
||||
madvise(p, size, MADV_HUGEPAGE);
|
||||
}
|
||||
#endif
|
||||
@ -340,17 +384,19 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, mi_
|
||||
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
|
||||
if (size == 0) return NULL;
|
||||
|
||||
void* p = NULL;
|
||||
#if defined(_WIN32)
|
||||
int flags = MEM_RESERVE;
|
||||
if (commit) flags |= MEM_COMMIT;
|
||||
p = mi_win_virtual_alloc(NULL, size, try_alignment, flags);
|
||||
#elif defined(__wasi__)
|
||||
p = mi_wasm_heap_grow(size, try_alignment);
|
||||
#else
|
||||
int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
|
||||
p = mi_unix_mmap(size, try_alignment, protect_flags);
|
||||
#endif
|
||||
void* p = mi_os_alloc_from_huge_reserved(size, try_alignment, commit);
|
||||
if (p != NULL) return p;
|
||||
|
||||
#if defined(_WIN32)
|
||||
int flags = MEM_RESERVE;
|
||||
if (commit) flags |= MEM_COMMIT;
|
||||
p = mi_win_virtual_alloc(NULL, size, try_alignment, flags, false);
|
||||
#elif defined(__wasi__)
|
||||
p = mi_wasm_heap_grow(size, try_alignment);
|
||||
#else
|
||||
int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
|
||||
p = mi_unix_mmap(NULL, size, try_alignment, protect_flags, false);
|
||||
#endif
|
||||
_mi_stat_increase(&stats->mmap_calls, 1);
|
||||
if (p != NULL) {
|
||||
_mi_stat_increase(&stats->reserved, size);
|
||||
@ -399,7 +445,7 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
|
||||
// otherwise free and allocate at an aligned address in there
|
||||
mi_os_mem_free(p, over_size, stats);
|
||||
void* aligned_p = mi_align_up_ptr(p, alignment);
|
||||
p = mi_win_virtual_alloc(aligned_p, size, alignment, flags);
|
||||
p = mi_win_virtual_alloc(aligned_p, size, alignment, flags, false);
|
||||
if (p == aligned_p) break; // success!
|
||||
if (p != NULL) { // should not happen?
|
||||
mi_os_mem_free(p, size, stats);
|
||||
@ -656,3 +702,127 @@ bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats) {
|
||||
return mi_os_mem_free(start, size, stats);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
|
||||
-----------------------------------------------------------------------------*/
|
||||
#define MI_HUGE_OS_PAGE_SIZE ((size_t)1 << 30) // 1GiB
|
||||
|
||||
typedef struct mi_huge_info_s {
|
||||
uint8_t* start;
|
||||
ptrdiff_t reserved;
|
||||
volatile ptrdiff_t used;
|
||||
} mi_huge_info_t;
|
||||
|
||||
static mi_huge_info_t os_huge_reserved = { NULL, 0, 0 };
|
||||
|
||||
static bool mi_os_is_huge_reserved(void* p) {
|
||||
return (os_huge_reserved.start != NULL &&
|
||||
(uint8_t*)p >= os_huge_reserved.start &&
|
||||
(uint8_t*)p < os_huge_reserved.start + os_huge_reserved.reserved);
|
||||
}
|
||||
|
||||
static void* mi_os_alloc_from_huge_reserved(size_t size, size_t try_alignment, bool commit)
|
||||
{
|
||||
// only allow large aligned allocations
|
||||
if (size < MI_SEGMENT_SIZE || (size % MI_SEGMENT_SIZE) != 0) return NULL;
|
||||
if (try_alignment > MI_SEGMENT_SIZE) return NULL;
|
||||
if (!commit) return NULL;
|
||||
if (os_huge_reserved.start==NULL) return NULL;
|
||||
if (mi_atomic_iread(&os_huge_reserved.used) >= os_huge_reserved.reserved) return NULL; // already full
|
||||
|
||||
// always aligned
|
||||
mi_assert_internal( os_huge_reserved.used % MI_SEGMENT_SIZE == 0 );
|
||||
mi_assert_internal( (uintptr_t)os_huge_reserved.start % MI_SEGMENT_SIZE == 0 );
|
||||
|
||||
// try to reserve space
|
||||
ptrdiff_t next = mi_atomic_add( &os_huge_reserved.used, (ptrdiff_t)size );
|
||||
if (next > os_huge_reserved.reserved) {
|
||||
// "free" our over-allocation
|
||||
mi_atomic_add( &os_huge_reserved.used, -((ptrdiff_t)size) );
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// success!
|
||||
uint8_t* p = os_huge_reserved.start + next - (ptrdiff_t)size;
|
||||
mi_assert_internal( (uintptr_t)p % MI_SEGMENT_SIZE == 0 );
|
||||
return p;
|
||||
}
|
||||
|
||||
/*
|
||||
static void mi_os_free_huge_reserved() {
|
||||
uint8_t* addr = os_huge_reserved.start;
|
||||
size_t total = os_huge_reserved.reserved;
|
||||
os_huge_reserved.reserved = 0;
|
||||
os_huge_reserved.start = NULL;
|
||||
for( size_t current = 0; current < total; current += MI_HUGE_OS_PAGE_SIZE) {
|
||||
_mi_os_free(addr + current, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main);
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
#if !(MI_INTPTR_SIZE >= 8 && (defined(_WIN32) || defined(MI_OS_USE_MMAP)))
|
||||
int mi_reserve_huge_os_pages(size_t pages, size_t max_secs) {
|
||||
return -2; // cannot allocate
|
||||
}
|
||||
#else
|
||||
int mi_reserve_huge_os_pages( size_t pages, double max_secs ) mi_attr_noexcept
|
||||
{
|
||||
if (max_secs==0) return -1; // timeout
|
||||
if (pages==0) return 0; // ok
|
||||
if (os_huge_reserved.start != NULL) return -2; // already reserved
|
||||
|
||||
// Allocate one page at the time but try to place them contiguously
|
||||
// We allocate one page at the time to be able to abort if it takes too long
|
||||
double start_t = _mi_clock_start();
|
||||
uint8_t* start = (uint8_t*)((uintptr_t)8 << 40); // 8TiB virtual start address
|
||||
uint8_t* addr = start; // current top of the allocations
|
||||
for (size_t page = 0; page < pages; page++, addr += MI_HUGE_OS_PAGE_SIZE ) {
|
||||
// allocate lorgu pages
|
||||
void* p = NULL;
|
||||
#ifdef _WIN32
|
||||
p = mi_win_virtual_alloc(addr, MI_HUGE_OS_PAGE_SIZE, 0, MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE, true);
|
||||
#elif defined(MI_OS_USE_MMAP)
|
||||
p = mi_unix_mmap(addr, MI_HUGE_OS_PAGE_SIZE, 0, PROT_READ | PROT_WRITE, true);
|
||||
#else
|
||||
// always fail
|
||||
#endif
|
||||
|
||||
// Did we succeed at a contiguous address?
|
||||
if (p != addr) {
|
||||
if (p != NULL) {
|
||||
_mi_warning_message("could not allocate contiguous huge page %zu at 0x%p\n", page, addr);
|
||||
_mi_os_free(p, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main );
|
||||
}
|
||||
else {
|
||||
#ifdef _WIN32
|
||||
int err = GetLastError();
|
||||
#else
|
||||
int err = errno;
|
||||
#endif
|
||||
_mi_warning_message("could not allocate huge page %zu at 0x%p, error: %i\n", page, addr, err);
|
||||
}
|
||||
return -2;
|
||||
}
|
||||
// success, record it
|
||||
if (page==0) {
|
||||
os_huge_reserved.start = addr;
|
||||
}
|
||||
os_huge_reserved.reserved += MI_HUGE_OS_PAGE_SIZE;
|
||||
_mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE);
|
||||
_mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE);
|
||||
|
||||
// check for timeout
|
||||
double elapsed = _mi_clock_end(start_t);
|
||||
if (elapsed > max_secs) return (-1); // timeout
|
||||
if (page >= 1) {
|
||||
double estimate = ((elapsed / (double)(page+1)) * (double)pages);
|
||||
if (estimate > 1.5*max_secs) return (-1); // seems like we are going to timeout
|
||||
}
|
||||
}
|
||||
_mi_verbose_message("reserved %zu huge pages\n", pages);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
58
src/page.c
58
src/page.c
@ -71,7 +71,7 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
|
||||
mi_assert_internal(page->block_size > 0);
|
||||
mi_assert_internal(page->used <= page->capacity);
|
||||
mi_assert_internal(page->capacity <= page->reserved);
|
||||
|
||||
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
uint8_t* start = _mi_page_start(segment,page,NULL);
|
||||
mi_assert_internal(start == _mi_segment_page_start(segment,page,page->block_size,NULL));
|
||||
@ -137,7 +137,7 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay ) {
|
||||
// Note: The exchange must be done atomically as this is used right after
|
||||
// moving to the full list in `mi_page_collect_ex` and we need to
|
||||
// ensure that there was no race where the page became unfull just before the move.
|
||||
static void mi_page_thread_free_collect(mi_page_t* page)
|
||||
static void _mi_page_thread_free_collect(mi_page_t* page)
|
||||
{
|
||||
mi_block_t* head;
|
||||
mi_thread_free_t tfree;
|
||||
@ -152,47 +152,51 @@ static void mi_page_thread_free_collect(mi_page_t* page)
|
||||
if (head == NULL) return;
|
||||
|
||||
// find the tail
|
||||
uint16_t count = 1;
|
||||
uintptr_t count = 1;
|
||||
mi_block_t* tail = head;
|
||||
mi_block_t* next;
|
||||
while ((next = mi_block_next(page,tail)) != NULL) {
|
||||
count++;
|
||||
tail = next;
|
||||
}
|
||||
|
||||
// and prepend to the free list
|
||||
mi_block_set_next(page,tail, page->free);
|
||||
page->free = head;
|
||||
// and append the current local free list
|
||||
mi_block_set_next(page,tail, page->local_free);
|
||||
page->local_free = head;
|
||||
|
||||
// update counts now
|
||||
mi_atomic_subtract(&page->thread_freed, count);
|
||||
page->used -= count;
|
||||
}
|
||||
|
||||
void _mi_page_free_collect(mi_page_t* page) {
|
||||
void _mi_page_free_collect(mi_page_t* page, bool force) {
|
||||
mi_assert_internal(page!=NULL);
|
||||
//if (page->free != NULL) return; // avoid expensive append
|
||||
|
||||
// free the local free list
|
||||
// collect the thread free list
|
||||
if (force || mi_tf_block(page->thread_free) != NULL) { // quick test to avoid an atomic operation
|
||||
_mi_page_thread_free_collect(page);
|
||||
}
|
||||
|
||||
// and the local free list
|
||||
if (page->local_free != NULL) {
|
||||
if (mi_likely(page->free == NULL)) {
|
||||
if (mi_unlikely(page->free == NULL)) {
|
||||
// usual case
|
||||
page->free = page->local_free;
|
||||
page->local_free = NULL;
|
||||
}
|
||||
else {
|
||||
mi_block_t* tail = page->free;
|
||||
else if (force) {
|
||||
// append -- only on shutdown (force) as this is a linear operation
|
||||
mi_block_t* tail = page->local_free;
|
||||
mi_block_t* next;
|
||||
while ((next = mi_block_next(page, tail)) != NULL) {
|
||||
tail = next;
|
||||
}
|
||||
mi_block_set_next(page, tail, page->local_free);
|
||||
}
|
||||
page->local_free = NULL;
|
||||
}
|
||||
// and the thread free list
|
||||
if (mi_tf_block(page->thread_free) != NULL) { // quick test to avoid an atomic operation
|
||||
mi_page_thread_free_collect(page);
|
||||
mi_block_set_next(page, tail, page->free);
|
||||
page->free = page->local_free;
|
||||
page->local_free = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
mi_assert_internal(!force || page->local_free == NULL);
|
||||
}
|
||||
|
||||
|
||||
@ -205,7 +209,7 @@ void _mi_page_free_collect(mi_page_t* page) {
|
||||
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
|
||||
mi_assert_expensive(mi_page_is_valid_init(page));
|
||||
mi_assert_internal(page->heap == NULL);
|
||||
_mi_page_free_collect(page);
|
||||
_mi_page_free_collect(page,false);
|
||||
mi_page_queue_t* pq = mi_page_queue(heap, page->block_size);
|
||||
mi_page_queue_push(heap, pq, page);
|
||||
mi_assert_expensive(_mi_page_is_valid(page));
|
||||
@ -304,7 +308,7 @@ static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) {
|
||||
if (mi_page_is_in_full(page)) return;
|
||||
|
||||
mi_page_queue_enqueue_from(&page->heap->pages[MI_BIN_FULL], pq, page);
|
||||
mi_page_thread_free_collect(page); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set
|
||||
_mi_page_free_collect(page,false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set
|
||||
}
|
||||
|
||||
|
||||
@ -390,7 +394,7 @@ void _mi_page_retire(mi_page_t* page) {
|
||||
// is the only page left with free blocks. It is not clear
|
||||
// how to check this efficiently though... for now we just check
|
||||
// if its neighbours are almost fully used.
|
||||
if (mi_likely(page->block_size <= MI_MEDIUM_OBJ_SIZE_MAX)) {
|
||||
if (mi_likely(page->block_size <= MI_SMALL_SIZE_MAX)) {
|
||||
if (mi_page_mostly_used(page->prev) && mi_page_mostly_used(page->next)) {
|
||||
_mi_stat_counter_increase(&_mi_stats_main.page_no_retire,1);
|
||||
return; // dont't retire after all
|
||||
@ -595,7 +599,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
||||
count++;
|
||||
|
||||
// 0. collect freed blocks by us and other threads
|
||||
_mi_page_free_collect(page);
|
||||
_mi_page_free_collect(page,false);
|
||||
|
||||
// 1. if the page contains free blocks, we are done
|
||||
if (mi_page_immediate_available(page)) {
|
||||
@ -662,7 +666,7 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
|
||||
mi_assert_internal(mi_page_immediate_available(page));
|
||||
}
|
||||
else {
|
||||
_mi_page_free_collect(page);
|
||||
_mi_page_free_collect(page,false);
|
||||
}
|
||||
if (mi_page_immediate_available(page)) {
|
||||
return page; // fast path
|
||||
@ -734,10 +738,10 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
|
||||
|
||||
// call potential deferred free routines
|
||||
_mi_deferred_free(heap, false);
|
||||
|
||||
|
||||
// free delayed frees from other threads
|
||||
_mi_heap_delayed_free(heap);
|
||||
|
||||
|
||||
// huge allocation?
|
||||
mi_page_t* page;
|
||||
if (mi_unlikely(size > MI_LARGE_OBJ_SIZE_MAX)) {
|
||||
|
34
src/stats.c
34
src/stats.c
@ -28,11 +28,14 @@ void _mi_stats_done(mi_stats_t* stats) {
|
||||
Statistics operations
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static bool mi_is_in_main(void* stat) {
|
||||
return ((uint8_t*)stat >= (uint8_t*)&_mi_stats_main
|
||||
&& (uint8_t*)stat < ((uint8_t*)&_mi_stats_main + sizeof(mi_stats_t)));
|
||||
}
|
||||
|
||||
static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
|
||||
if (amount == 0) return;
|
||||
bool in_main = ((uint8_t*)stat >= (uint8_t*)&_mi_stats_main
|
||||
&& (uint8_t*)stat < ((uint8_t*)&_mi_stats_main + sizeof(mi_stats_t)));
|
||||
if (in_main)
|
||||
if (mi_is_in_main(stat))
|
||||
{
|
||||
// add atomically (for abandoned pages)
|
||||
int64_t current = mi_atomic_add(&stat->current,amount);
|
||||
@ -58,11 +61,16 @@ static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) {
|
||||
}
|
||||
|
||||
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) {
|
||||
mi_atomic_add( &stat->count, 1 );
|
||||
mi_atomic_add( &stat->total, (int64_t)amount );
|
||||
if (mi_is_in_main(stat)) {
|
||||
mi_atomic_add( &stat->count, 1 );
|
||||
mi_atomic_add( &stat->total, (int64_t)amount );
|
||||
}
|
||||
else {
|
||||
stat->count++;
|
||||
stat->total += amount;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void _mi_stat_increase(mi_stat_count_t* stat, size_t amount) {
|
||||
mi_stat_update(stat, (int64_t)amount);
|
||||
}
|
||||
@ -276,8 +284,8 @@ static void _mi_stats_print(mi_stats_t* stats, double secs, FILE* out) mi_attr_n
|
||||
_mi_fprintf(out,"\n");
|
||||
}
|
||||
|
||||
static double mi_clock_end(double start);
|
||||
static double mi_clock_start(void);
|
||||
double _mi_clock_end(double start);
|
||||
double _mi_clock_start(void);
|
||||
static double mi_time_start = 0.0;
|
||||
|
||||
static mi_stats_t* mi_stats_get_default(void) {
|
||||
@ -289,7 +297,7 @@ void mi_stats_reset(void) mi_attr_noexcept {
|
||||
mi_stats_t* stats = mi_stats_get_default();
|
||||
if (stats != &_mi_stats_main) { memset(stats, 0, sizeof(mi_stats_t)); }
|
||||
memset(&_mi_stats_main, 0, sizeof(mi_stats_t));
|
||||
mi_time_start = mi_clock_start();
|
||||
mi_time_start = _mi_clock_start();
|
||||
}
|
||||
|
||||
static void mi_stats_print_ex(mi_stats_t* stats, double secs, FILE* out) {
|
||||
@ -301,11 +309,11 @@ static void mi_stats_print_ex(mi_stats_t* stats, double secs, FILE* out) {
|
||||
}
|
||||
|
||||
void mi_stats_print(FILE* out) mi_attr_noexcept {
|
||||
mi_stats_print_ex(mi_stats_get_default(),mi_clock_end(mi_time_start),out);
|
||||
mi_stats_print_ex(mi_stats_get_default(),_mi_clock_end(mi_time_start),out);
|
||||
}
|
||||
|
||||
void mi_thread_stats_print(FILE* out) mi_attr_noexcept {
|
||||
_mi_stats_print(mi_stats_get_default(), mi_clock_end(mi_time_start), out);
|
||||
_mi_stats_print(mi_stats_get_default(), _mi_clock_end(mi_time_start), out);
|
||||
}
|
||||
|
||||
|
||||
@ -350,7 +358,7 @@ static double mi_clock_now(void) {
|
||||
|
||||
static double mi_clock_diff = 0.0;
|
||||
|
||||
static double mi_clock_start(void) {
|
||||
double _mi_clock_start(void) {
|
||||
if (mi_clock_diff == 0.0) {
|
||||
double t0 = mi_clock_now();
|
||||
mi_clock_diff = mi_clock_now() - t0;
|
||||
@ -358,7 +366,7 @@ static double mi_clock_start(void) {
|
||||
return mi_clock_now();
|
||||
}
|
||||
|
||||
static double mi_clock_end(double start) {
|
||||
double _mi_clock_end(double start) {
|
||||
double end = mi_clock_now();
|
||||
return (end - start - mi_clock_diff);
|
||||
}
|
||||
|
@ -154,6 +154,9 @@ int main(int argc, char** argv) {
|
||||
if (n > 0) N = n;
|
||||
}
|
||||
printf("start with %i threads with a %i%% load-per-thread\n", THREADS, N);
|
||||
//int res = mi_reserve_huge_os_pages(4,1);
|
||||
//printf("(reserve huge: %i\n)", res);
|
||||
|
||||
//bench_start_program();
|
||||
memset((void*)transfer, 0, TRANSFERS*sizeof(void*));
|
||||
run_os_threads(THREADS);
|
||||
|
Loading…
Reference in New Issue
Block a user