use atomic ops to guard large page tries on windows

This commit is contained in:
daan 2019-07-21 17:13:36 -07:00
parent 7b7c36c8c7
commit b611c7fb34
1 changed files with 9 additions and 3 deletions

View File

@ -206,20 +206,26 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment
}
static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags) {
static size_t large_page_try_ok = 0;
static volatile uintptr_t large_page_try_ok = 0;
void* p = NULL;
if (use_large_os_page(size, try_alignment)) {
<<<<<<< HEAD
if (large_page_try_ok > 0) {
// if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive.
=======
uintptr_t try_ok = mi_atomic_read(&large_page_try_ok);
if (try_ok > 0) {
// if a large page page allocation fails, it seems the calls to VirtualAlloc get very expensive.
>>>>>>> use atomic ops to guard large page tries on windows
// therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times.
large_page_try_ok--;
mi_atomic_compare_exchange(&large_page_try_ok, try_ok - 1, try_ok);
}
else {
// large OS pages must always reserve and commit.
p = mi_win_virtual_allocx(addr, size, try_alignment, MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE | flags);
// fall back to non-large page allocation on error (`p == NULL`).
if (p == NULL) {
large_page_try_ok = 10; // on error, don't try again for the next N allocations
mi_atomic_write(&large_page_try_ok,10); // on error, don't try again for the next N allocations
}
}
}