Merge branch 'dev' into dev-slice

This commit is contained in:
daan 2020-09-05 09:17:59 -07:00
commit 7c2b79bef0
5 changed files with 26 additions and 15 deletions

View File

@ -61,10 +61,10 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_atomic_and_acq_rel(p,x) mi_atomic(fetch_and_explicit)(p,x,mi_memory_order(acq_rel)) #define mi_atomic_and_acq_rel(p,x) mi_atomic(fetch_and_explicit)(p,x,mi_memory_order(acq_rel))
#define mi_atomic_or_acq_rel(p,x) mi_atomic(fetch_or_explicit)(p,x,mi_memory_order(acq_rel)) #define mi_atomic_or_acq_rel(p,x) mi_atomic(fetch_or_explicit)(p,x,mi_memory_order(acq_rel))
#define mi_atomic_increment_relaxed(p) mi_atomic_add_relaxed(p,1) #define mi_atomic_increment_relaxed(p) mi_atomic_add_relaxed(p,(uintptr_t)1)
#define mi_atomic_decrement_relaxed(p) mi_atomic_sub_relaxed(p,1) #define mi_atomic_decrement_relaxed(p) mi_atomic_sub_relaxed(p,(uintptr_t)1)
#define mi_atomic_increment_acq_rel(p) mi_atomic_add_acq_rel(p,1) #define mi_atomic_increment_acq_rel(p) mi_atomic_add_acq_rel(p,(uintptr_t)1)
#define mi_atomic_decrement_acq_rel(p) mi_atomic_sub_acq_rel(p,1) #define mi_atomic_decrement_acq_rel(p) mi_atomic_sub_acq_rel(p,(uintptr_t)1)
static inline void mi_atomic_yield(void); static inline void mi_atomic_yield(void);
static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)* p, intptr_t add); static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)* p, intptr_t add);
@ -73,11 +73,21 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)* p, intptr_t sub);
#if defined(__cplusplus) || !defined(_MSC_VER) #if defined(__cplusplus) || !defined(_MSC_VER)
// In C++/C11 atomics we have polymorpic atomics so can use the typed `ptr` variants // In C++/C11 atomics we have polymorphic atomics so can use the typed `ptr` variants (where `tp` is the type of atomic value)
// (where `tp` is the type of atomic value)
// We use these macros so we can provide a typed wrapper in MSVC in C compilation mode as well // We use these macros so we can provide a typed wrapper in MSVC in C compilation mode as well
#define mi_atomic_load_ptr_acquire(tp,p) mi_atomic_load_acquire(p) #define mi_atomic_load_ptr_acquire(tp,p) mi_atomic_load_acquire(p)
#define mi_atomic_load_ptr_relaxed(tp,p) mi_atomic_load_relaxed(p) #define mi_atomic_load_ptr_relaxed(tp,p) mi_atomic_load_relaxed(p)
// In C++ we need to add casts to help resolve templates if NULL is passed
#if defined(__cplusplus)
#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,(tp*)x)
#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,(tp*)x)
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des)
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des)
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des)
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x)
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x)
#else
#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,x) #define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,x)
#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,x) #define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,x)
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des) #define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des)
@ -85,6 +95,7 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)* p, intptr_t sub);
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des) #define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des)
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x) #define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x)
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x) #define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x)
#endif
// These are used by the statistics // These are used by the statistics
static inline int64_t mi_atomic_addi64_relaxed(volatile int64_t* p, int64_t add) { static inline int64_t mi_atomic_addi64_relaxed(volatile int64_t* p, int64_t add) {

View File

@ -172,7 +172,7 @@ static void mi_out_stderr(const char* msg, void* arg) {
// an output function is registered it is called immediately with // an output function is registered it is called immediately with
// the output up to that point. // the output up to that point.
#ifndef MI_MAX_DELAY_OUTPUT #ifndef MI_MAX_DELAY_OUTPUT
#define MI_MAX_DELAY_OUTPUT (32*1024) #define MI_MAX_DELAY_OUTPUT ((uintptr_t)(32*1024))
#endif #endif
static char out_buf[MI_MAX_DELAY_OUTPUT+1]; static char out_buf[MI_MAX_DELAY_OUTPUT+1];
static _Atomic(uintptr_t) out_len; static _Atomic(uintptr_t) out_len;

View File

@ -296,7 +296,7 @@ static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment,
if (large_only) return p; if (large_only) return p;
// fall back to non-large page allocation on error (`p == NULL`). // fall back to non-large page allocation on error (`p == NULL`).
if (p == NULL) { if (p == NULL) {
mi_atomic_store_release(&large_page_try_ok,10); // on error, don't try again for the next N allocations mi_atomic_store_release(&large_page_try_ok,10UL); // on error, don't try again for the next N allocations
} }
} }
} }
@ -420,7 +420,7 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
#endif #endif
if (large_only) return p; if (large_only) return p;
if (p == NULL) { if (p == NULL) {
mi_atomic_store_release(&large_page_try_ok, 10); // on error, don't try again for the next N allocations mi_atomic_store_release(&large_page_try_ok, 10UL); // on error, don't try again for the next N allocations
} }
} }
} }
@ -776,7 +776,7 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
int err = madvise(start, csize, (int)mi_atomic_load_relaxed(&advice)); int err = madvise(start, csize, (int)mi_atomic_load_relaxed(&advice));
if (err != 0 && errno == EINVAL && advice == MADV_FREE) { if (err != 0 && errno == EINVAL && advice == MADV_FREE) {
// if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on
mi_atomic_store_release(&advice, MADV_DONTNEED); mi_atomic_store_release(&advice, (uintptr_t)MADV_DONTNEED);
err = madvise(start, csize, MADV_DONTNEED); err = madvise(start, csize, MADV_DONTNEED);
} }
#elif defined(__wasi__) #elif defined(__wasi__)

View File

@ -214,7 +214,7 @@ static bool os_random_buf(void* buf, size_t buf_len) {
ssize_t ret = syscall(SYS_getrandom, buf, buf_len, GRND_NONBLOCK); ssize_t ret = syscall(SYS_getrandom, buf, buf_len, GRND_NONBLOCK);
if (ret >= 0) return (buf_len == (size_t)ret); if (ret >= 0) return (buf_len == (size_t)ret);
if (ret != ENOSYS) return false; if (ret != ENOSYS) return false;
mi_atomic_store_release(&no_getrandom,1); // don't call again, and fall back to /dev/urandom mi_atomic_store_release(&no_getrandom, 1UL); // don't call again, and fall back to /dev/urandom
} }
#endif #endif
int flags = O_RDONLY; int flags = O_RDONLY;

View File

@ -195,13 +195,13 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large,
// allocated, initialize and claim the initial blocks // allocated, initialize and claim the initial blocks
mem_region_t* r = &regions[idx]; mem_region_t* r = &regions[idx];
r->arena_memid = arena_memid; r->arena_memid = arena_memid;
mi_atomic_store_release(&r->in_use, 0); mi_atomic_store_release(&r->in_use, (uintptr_t)0);
mi_atomic_store_release(&r->dirty, (is_zero ? 0 : MI_BITMAP_FIELD_FULL)); mi_atomic_store_release(&r->dirty, (is_zero ? 0 : MI_BITMAP_FIELD_FULL));
mi_atomic_store_release(&r->commit, (region_commit ? MI_BITMAP_FIELD_FULL : 0)); mi_atomic_store_release(&r->commit, (region_commit ? MI_BITMAP_FIELD_FULL : 0));
mi_atomic_store_release(&r->reset, 0); mi_atomic_store_release(&r->reset, (uintptr_t)0);
*bit_idx = 0; *bit_idx = 0;
mi_bitmap_claim(&r->in_use, 1, blocks, *bit_idx, NULL); mi_bitmap_claim(&r->in_use, 1, blocks, *bit_idx, NULL);
mi_atomic_store_ptr_release(uint8_t*,&r->start, start); mi_atomic_store_ptr_release(void,&r->start, start);
// and share it // and share it
mi_region_info_t info; mi_region_info_t info;
@ -456,7 +456,7 @@ void _mi_mem_collect(mi_os_tld_t* tld) {
uintptr_t commit = mi_atomic_load_relaxed(&regions[i].commit); uintptr_t commit = mi_atomic_load_relaxed(&regions[i].commit);
memset(&regions[i], 0, sizeof(mem_region_t)); memset(&regions[i], 0, sizeof(mem_region_t));
// and release the whole region // and release the whole region
mi_atomic_store_release(&region->info, 0); mi_atomic_store_release(&region->info, (uintptr_t)0);
if (start != NULL) { // && !_mi_os_is_huge_reserved(start)) { if (start != NULL) { // && !_mi_os_is_huge_reserved(start)) {
_mi_abandoned_await_readers(); // ensure no pending reads _mi_abandoned_await_readers(); // ensure no pending reads
_mi_arena_free(start, MI_REGION_SIZE, arena_memid, (~commit == 0), tld->stats); _mi_arena_free(start, MI_REGION_SIZE, arena_memid, (~commit == 0), tld->stats);