merge from dev
This commit is contained in:
commit
413141ae29
@ -119,7 +119,7 @@
|
||||
<PreprocessorDefinitions>MI_DEBUG=3;%(PreprocessorDefinitions);</PreprocessorDefinitions>
|
||||
<CompileAs>CompileAsCpp</CompileAs>
|
||||
<SupportJustMyCode>false</SupportJustMyCode>
|
||||
<LanguageStandard>Default</LanguageStandard>
|
||||
<LanguageStandard>stdcpp20</LanguageStandard>
|
||||
</ClCompile>
|
||||
<PostBuildEvent>
|
||||
<Command>
|
||||
@ -179,7 +179,7 @@
|
||||
<InlineFunctionExpansion>Default</InlineFunctionExpansion>
|
||||
<CompileAs>CompileAsCpp</CompileAs>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<LanguageStandard>Default</LanguageStandard>
|
||||
<LanguageStandard>stdcpp20</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
|
@ -163,8 +163,11 @@ bool _mi_page_is_valid(mi_page_t* page);
|
||||
// ------------------------------------------------------
|
||||
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define mi_unlikely(x) __builtin_expect(!!(x),false)
|
||||
#define mi_likely(x) __builtin_expect(!!(x),true)
|
||||
#define mi_unlikely(x) (__builtin_expect(!!(x),false))
|
||||
#define mi_likely(x) (__builtin_expect(!!(x),true))
|
||||
#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
|
||||
#define mi_unlikely(x) (x) [[unlikely]]
|
||||
#define mi_likely(x) (x) [[likely]]
|
||||
#else
|
||||
#define mi_unlikely(x) (x)
|
||||
#define mi_likely(x) (x)
|
||||
@ -299,7 +302,7 @@ static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* tot
|
||||
*total = size;
|
||||
return false;
|
||||
}
|
||||
else if (mi_unlikely(mi_mul_overflow(count, size, total))) {
|
||||
else if mi_unlikely(mi_mul_overflow(count, size, total)) {
|
||||
#if MI_DEBUG > 0
|
||||
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size);
|
||||
#endif
|
||||
@ -373,7 +376,7 @@ extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate
|
||||
static inline mi_heap_t* mi_get_default_heap(void) {
|
||||
#if defined(MI_TLS_SLOT)
|
||||
mi_heap_t* heap = (mi_heap_t*)mi_tls_slot(MI_TLS_SLOT);
|
||||
if (mi_unlikely(heap == NULL)) {
|
||||
if mi_unlikely(heap == NULL) {
|
||||
#ifdef __GNUC__
|
||||
__asm(""); // prevent conditional load of the address of _mi_heap_empty
|
||||
#endif
|
||||
@ -487,7 +490,7 @@ static inline mi_page_t* _mi_ptr_page(void* p) {
|
||||
static inline size_t mi_page_block_size(const mi_page_t* page) {
|
||||
const size_t bsize = page->xblock_size;
|
||||
mi_assert_internal(bsize > 0);
|
||||
if (mi_likely(bsize < MI_HUGE_BLOCK_SIZE)) {
|
||||
if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) {
|
||||
return bsize;
|
||||
}
|
||||
else {
|
||||
@ -650,11 +653,11 @@ static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) {
|
||||
|
||||
static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) {
|
||||
void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]);
|
||||
return (mi_unlikely(p==null) ? NULL : p);
|
||||
return (p==null ? NULL : p);
|
||||
}
|
||||
|
||||
static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) {
|
||||
uintptr_t x = (uintptr_t)(mi_unlikely(p==NULL) ? null : p);
|
||||
uintptr_t x = (uintptr_t)(p==NULL ? null : p);
|
||||
return mi_rotl(x ^ keys[1], keys[0]) + keys[0];
|
||||
}
|
||||
|
||||
@ -681,7 +684,7 @@ static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t*
|
||||
mi_block_t* next = mi_block_nextx(page,block,page->keys);
|
||||
// check for free list corruption: is `next` at least in the same page?
|
||||
// TODO: check if `next` is `page->block_size` aligned?
|
||||
if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) {
|
||||
if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) {
|
||||
_mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
|
||||
next = NULL;
|
||||
}
|
||||
@ -780,12 +783,12 @@ size_t _mi_os_numa_node_count_get(void);
|
||||
|
||||
extern _Atomic(size_t) _mi_numa_node_count;
|
||||
static inline int _mi_os_numa_node(mi_os_tld_t* tld) {
|
||||
if (mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1)) return 0;
|
||||
if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
|
||||
else return _mi_os_numa_node_get(tld);
|
||||
}
|
||||
static inline size_t _mi_os_numa_node_count(void) {
|
||||
const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count);
|
||||
if (mi_likely(count>0)) return count;
|
||||
if mi_likely(count > 0) { return count; }
|
||||
else return _mi_os_numa_node_count_get();
|
||||
}
|
||||
|
||||
|
@ -49,19 +49,19 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
|
||||
{
|
||||
// note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size.
|
||||
mi_assert(alignment > 0);
|
||||
if (mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment))) { // require power-of-two (see <https://en.cppreference.com/w/c/memory/aligned_alloc>)
|
||||
if mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment)) { // require power-of-two (see <https://en.cppreference.com/w/c/memory/aligned_alloc>)
|
||||
#if MI_DEBUG > 0
|
||||
_mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment);
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
if (mi_unlikely(alignment > MI_ALIGNMENT_MAX)) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers)
|
||||
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers)
|
||||
#if MI_DEBUG > 0
|
||||
_mi_error_message(EOVERFLOW, "aligned allocation has a maximum alignment of %zu (size %zu, alignment %zu)\n", MI_ALIGNMENT_MAX, size, alignment);
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
if (mi_unlikely(size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
||||
if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
||||
#if MI_DEBUG > 0
|
||||
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
|
||||
#endif
|
||||
@ -71,10 +71,10 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
|
||||
const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size > PTRDIFF_MAX check
|
||||
|
||||
// try first if there happens to be a small block available with just the right alignment
|
||||
if (mi_likely(padsize <= MI_SMALL_SIZE_MAX)) {
|
||||
if mi_likely(padsize <= MI_SMALL_SIZE_MAX) {
|
||||
mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
|
||||
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
|
||||
if (mi_likely(page->free != NULL && is_aligned))
|
||||
if mi_likely(page->free != NULL && is_aligned)
|
||||
{
|
||||
#if MI_STAT>1
|
||||
mi_heap_stat_increase(heap, malloc, size);
|
||||
@ -102,7 +102,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap,
|
||||
#if !MI_PADDING
|
||||
// without padding, any small sized allocation is naturally aligned (see also `_mi_segment_page_start`)
|
||||
if (!_mi_is_power_of_two(alignment)) return NULL;
|
||||
if (mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX))
|
||||
if mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX)
|
||||
#else
|
||||
// with padding, we can only guarantee this for fixed alignments
|
||||
if (mi_likely((alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)))
|
||||
|
@ -254,7 +254,7 @@ static malloc_zone_t mi_malloc_zone = {
|
||||
static inline malloc_zone_t* mi_get_default_zone(void)
|
||||
{
|
||||
static bool init;
|
||||
if (mi_unlikely(!init)) {
|
||||
if mi_unlikely(!init) {
|
||||
init = true;
|
||||
malloc_zone_register(&mi_malloc_zone); // by calling register we avoid a zone error on free (see <http://eatmyrandom.blogspot.com/2010/03/mallocfree-interception-on-mac-os-x.html>)
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcep
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept {
|
||||
if (mi_unlikely((size&(alignment-1)) != 0)) { // C11 requires alignment>0 && integral multiple, see <https://en.cppreference.com/w/c/memory/aligned_alloc>
|
||||
if mi_unlikely((size&(alignment-1)) != 0) { // C11 requires alignment>0 && integral multiple, see <https://en.cppreference.com/w/c/memory/aligned_alloc>
|
||||
#if MI_DEBUG > 0
|
||||
_mi_error_message(EOVERFLOW, "(mi_)aligned_alloc requires the size to be an integral multiple of the alignment (size %zu, alignment %zu)\n", size, alignment);
|
||||
#endif
|
||||
@ -109,7 +109,7 @@ mi_decl_nodiscard int mi_reallocarr( void* p, size_t count, size_t size ) mi_att
|
||||
}
|
||||
void** op = (void**)p;
|
||||
void* newp = mi_reallocarray(*op, count, size);
|
||||
if (mi_unlikely(newp == NULL)) return errno;
|
||||
if mi_unlikely(newp == NULL) { return errno; }
|
||||
*op = newp;
|
||||
return 0;
|
||||
}
|
||||
|
54
src/alloc.c
54
src/alloc.c
@ -28,7 +28,7 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept {
|
||||
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
|
||||
mi_block_t* const block = page->free;
|
||||
if (mi_unlikely(block == NULL)) {
|
||||
if mi_unlikely(block == NULL) {
|
||||
return _mi_malloc_generic(heap, size, zero);
|
||||
}
|
||||
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
|
||||
@ -38,9 +38,9 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
||||
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
|
||||
|
||||
// zero the block?
|
||||
if (mi_unlikely(zero)) {
|
||||
if mi_unlikely(zero) {
|
||||
mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks
|
||||
const size_t zsize = (mi_unlikely(page->is_zero) ? sizeof(block->next) : page->xblock_size);
|
||||
const size_t zsize = (page->is_zero ? sizeof(block->next) : page->xblock_size);
|
||||
_mi_memzero_aligned(block, zsize);
|
||||
}
|
||||
|
||||
@ -108,7 +108,7 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t si
|
||||
|
||||
// The main allocation function
|
||||
mi_decl_nodiscard extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
|
||||
if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
|
||||
if mi_likely(size <= MI_SMALL_SIZE_MAX) {
|
||||
return mi_heap_malloc_small_zero(heap, size, zero);
|
||||
}
|
||||
else {
|
||||
@ -356,7 +356,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
||||
mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
|
||||
do {
|
||||
use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE);
|
||||
if (mi_unlikely(use_delayed)) {
|
||||
if mi_unlikely(use_delayed) {
|
||||
// unlikely: this only happens on the first concurrent free in a page that is in the full list
|
||||
tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING);
|
||||
}
|
||||
@ -367,7 +367,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
||||
}
|
||||
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
|
||||
|
||||
if (mi_unlikely(use_delayed)) {
|
||||
if mi_unlikely(use_delayed) {
|
||||
// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
|
||||
mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page);
|
||||
mi_assert_internal(heap != NULL);
|
||||
@ -393,9 +393,9 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
|
||||
static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
|
||||
{
|
||||
// and push it on the free list
|
||||
if (mi_likely(local)) {
|
||||
if mi_likely(local) {
|
||||
// owning thread can free a block directly
|
||||
if (mi_unlikely(mi_check_is_double_free(page, block))) return;
|
||||
if mi_unlikely(mi_check_is_double_free(page, block)) return;
|
||||
mi_check_padding(page, block);
|
||||
#if (MI_DEBUG!=0)
|
||||
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
|
||||
@ -403,10 +403,10 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
|
||||
mi_block_set_next(page, block, page->local_free);
|
||||
page->local_free = block;
|
||||
page->used--;
|
||||
if (mi_unlikely(mi_page_all_free(page))) {
|
||||
if mi_unlikely(mi_page_all_free(page)) {
|
||||
_mi_page_retire(page);
|
||||
}
|
||||
else if (mi_unlikely(mi_page_is_in_full(page))) {
|
||||
else if mi_unlikely(mi_page_is_in_full(page)) {
|
||||
_mi_page_unfull(page);
|
||||
}
|
||||
}
|
||||
@ -439,26 +439,26 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
|
||||
{
|
||||
MI_UNUSED(msg);
|
||||
#if (MI_DEBUG>0)
|
||||
if (mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0)) {
|
||||
if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) {
|
||||
_mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
mi_segment_t* const segment = _mi_ptr_segment(p);
|
||||
if (mi_unlikely(segment == NULL)) return NULL; // checks also for (p==NULL)
|
||||
if mi_unlikely(segment == NULL) return NULL; // checks also for (p==NULL)
|
||||
|
||||
#if (MI_DEBUG>0)
|
||||
if (mi_unlikely(!mi_is_in_heap_region(p))) {
|
||||
if mi_unlikely(!mi_is_in_heap_region(p)) {
|
||||
_mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
|
||||
"(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
|
||||
if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) {
|
||||
if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
|
||||
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if (MI_DEBUG>0 || MI_SECURE>=4)
|
||||
if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) {
|
||||
if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) {
|
||||
_mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
|
||||
return NULL;
|
||||
}
|
||||
@ -470,15 +470,15 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
|
||||
void mi_free(void* p) mi_attr_noexcept
|
||||
{
|
||||
mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
|
||||
if (mi_unlikely(segment == NULL)) return;
|
||||
if mi_unlikely(segment == NULL) return;
|
||||
|
||||
mi_threadid_t tid = _mi_thread_id();
|
||||
mi_page_t* const page = _mi_segment_page_of(segment, p);
|
||||
|
||||
if (mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
|
||||
if mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0) { // the thread id matches and it is not a full page, nor has aligned blocks
|
||||
// local, and not full or aligned
|
||||
mi_block_t* block = (mi_block_t*)(p);
|
||||
if (mi_unlikely(mi_check_is_double_free(page,block))) return;
|
||||
if mi_unlikely(mi_check_is_double_free(page,block)) return;
|
||||
mi_check_padding(page, block);
|
||||
mi_stat_free(page, block);
|
||||
#if (MI_DEBUG!=0)
|
||||
@ -486,7 +486,7 @@ void mi_free(void* p) mi_attr_noexcept
|
||||
#endif
|
||||
mi_block_set_next(page, block, page->local_free);
|
||||
page->local_free = block;
|
||||
if (mi_unlikely(--page->used == 0)) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
|
||||
if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
|
||||
_mi_page_retire(page);
|
||||
}
|
||||
}
|
||||
@ -532,7 +532,7 @@ static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noe
|
||||
const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
|
||||
if (segment==NULL) return 0; // also returns 0 if `p == NULL`
|
||||
const mi_page_t* const page = _mi_segment_page_of(segment, p);
|
||||
if (mi_likely(!mi_page_has_aligned(page))) {
|
||||
if mi_likely(!mi_page_has_aligned(page)) {
|
||||
const mi_block_t* block = (const mi_block_t*)p;
|
||||
return mi_page_usable_size_of(page, block);
|
||||
}
|
||||
@ -627,18 +627,18 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
|
||||
// else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
|
||||
// (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
|
||||
const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0)
|
||||
if (mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0)) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
|
||||
if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
|
||||
// todo: adjust potential padding to reflect the new size?
|
||||
return p; // reallocation still fits and not more than 50% waste
|
||||
}
|
||||
void* newp = mi_heap_malloc(heap,newsize);
|
||||
if (mi_likely(newp != NULL)) {
|
||||
if mi_likely(newp != NULL) {
|
||||
if (zero && newsize > size) {
|
||||
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
|
||||
const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
|
||||
memset((uint8_t*)newp + start, 0, newsize - start);
|
||||
}
|
||||
if (mi_likely(p != NULL)) {
|
||||
if mi_likely(p != NULL) {
|
||||
_mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
|
||||
mi_free(p); // only free the original pointer if successful
|
||||
}
|
||||
@ -863,13 +863,13 @@ static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) {
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) {
|
||||
void* p = mi_malloc(size);
|
||||
if (mi_unlikely(p == NULL)) return mi_try_new(size,false);
|
||||
if mi_unlikely(p == NULL) return mi_try_new(size,false);
|
||||
return p;
|
||||
}
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
|
||||
void* p = mi_malloc(size);
|
||||
if (mi_unlikely(p == NULL)) return mi_try_new(size, true);
|
||||
if mi_unlikely(p == NULL) return mi_try_new(size, true);
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -893,7 +893,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, siz
|
||||
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
|
||||
size_t total;
|
||||
if (mi_unlikely(mi_count_size_overflow(count, size, &total))) {
|
||||
if mi_unlikely(mi_count_size_overflow(count, size, &total)) {
|
||||
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
|
||||
return NULL;
|
||||
}
|
||||
@ -912,7 +912,7 @@ mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) {
|
||||
|
||||
mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
|
||||
size_t total;
|
||||
if (mi_unlikely(mi_count_size_overflow(newcount, size, &total))) {
|
||||
if mi_unlikely(mi_count_size_overflow(newcount, size, &total)) {
|
||||
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
|
||||
return NULL;
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size
|
||||
mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
|
||||
const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
|
||||
const size_t bcount = mi_block_count_of_size(size);
|
||||
if (mi_likely(max_arena == 0)) return NULL;
|
||||
if mi_likely(max_arena == 0) return NULL;
|
||||
mi_assert_internal(size <= bcount*MI_ARENA_BLOCK_SIZE);
|
||||
|
||||
// try numa affine allocation
|
||||
|
@ -283,7 +283,7 @@ bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitm
|
||||
static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_fields, size_t count, size_t* pre_mask, size_t* mid_mask, size_t* post_mask) {
|
||||
MI_UNUSED_RELEASE(bitmap_fields);
|
||||
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
|
||||
if (mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS)) {
|
||||
if mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS) {
|
||||
*pre_mask = mi_bitmap_mask_(count, bitidx);
|
||||
*mid_mask = 0;
|
||||
*post_mask = 0;
|
||||
|
@ -421,7 +421,7 @@ static mi_heap_t* mi_heap_of_block(const void* p) {
|
||||
mi_segment_t* segment = _mi_ptr_segment(p);
|
||||
bool valid = (_mi_ptr_cookie(segment) == segment->cookie);
|
||||
mi_assert_internal(valid);
|
||||
if (mi_unlikely(!valid)) return NULL;
|
||||
if mi_unlikely(!valid) return NULL;
|
||||
return mi_page_heap(_mi_segment_page_of(segment,p));
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ mi_decl_nodiscard long mi_option_get(mi_option_t option) {
|
||||
if (option < 0 || option >= _mi_option_last) return 0;
|
||||
mi_option_desc_t* desc = &options[option];
|
||||
mi_assert(desc->option == option); // index should match the option
|
||||
if (mi_unlikely(desc->init == UNINIT)) {
|
||||
if mi_unlikely(desc->init == UNINIT) {
|
||||
mi_option_init(desc);
|
||||
}
|
||||
return desc->value;
|
||||
|
2
src/os.c
2
src/os.c
@ -122,7 +122,7 @@ size_t _mi_os_good_alloc_size(size_t size) {
|
||||
else if (size < 8*MI_MiB) align_size = 256*MI_KiB;
|
||||
else if (size < 32*MI_MiB) align_size = 1*MI_MiB;
|
||||
else align_size = 4*MI_MiB;
|
||||
if (mi_unlikely(size >= (SIZE_MAX - align_size))) return size; // possible overflow?
|
||||
if mi_unlikely(size >= (SIZE_MAX - align_size)) return size; // possible overflow?
|
||||
return _mi_align_up(size, align_size);
|
||||
}
|
||||
|
||||
|
20
src/page.c
20
src/page.c
@ -131,7 +131,7 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool overrid
|
||||
tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat this loop and not do a CAS;
|
||||
tfreex = mi_tf_set_delayed(tfree, delay);
|
||||
old_delay = mi_tf_delayed(tfree);
|
||||
if (mi_unlikely(old_delay == MI_DELAYED_FREEING)) {
|
||||
if mi_unlikely(old_delay == MI_DELAYED_FREEING) {
|
||||
mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done.
|
||||
// tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail
|
||||
}
|
||||
@ -199,7 +199,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
|
||||
|
||||
// and the local free list
|
||||
if (page->local_free != NULL) {
|
||||
if (mi_likely(page->free == NULL)) {
|
||||
if mi_likely(page->free == NULL) {
|
||||
// usual case
|
||||
page->free = page->local_free;
|
||||
page->local_free = NULL;
|
||||
@ -403,7 +403,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
|
||||
// how to check this efficiently though...
|
||||
// for now, we don't retire if it is the only page left of this size class.
|
||||
mi_page_queue_t* pq = mi_page_queue_of(page);
|
||||
if (mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page))) {
|
||||
if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page)) {
|
||||
if (pq->last==page && pq->first==page) { // the only page in the queue?
|
||||
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
|
||||
page->retire_expire = 1 + (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
|
||||
@ -812,8 +812,8 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size) {
|
||||
static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size) mi_attr_noexcept {
|
||||
// huge allocation?
|
||||
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
|
||||
if (mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) )) {
|
||||
if (mi_unlikely(req_size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
||||
if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE)) {
|
||||
if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
|
||||
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
|
||||
return NULL;
|
||||
}
|
||||
@ -835,10 +835,10 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexce
|
||||
mi_assert_internal(heap != NULL);
|
||||
|
||||
// initialize if necessary
|
||||
if (mi_unlikely(!mi_heap_is_initialized(heap))) {
|
||||
if mi_unlikely(!mi_heap_is_initialized(heap)) {
|
||||
mi_thread_init(); // calls `_mi_heap_init` in turn
|
||||
heap = mi_get_default_heap();
|
||||
if (mi_unlikely(!mi_heap_is_initialized(heap))) { return NULL; }
|
||||
if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; }
|
||||
}
|
||||
mi_assert_internal(mi_heap_is_initialized(heap));
|
||||
|
||||
@ -850,12 +850,12 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexce
|
||||
|
||||
// find (or allocate) a page of the right size
|
||||
mi_page_t* page = mi_find_page(heap, size);
|
||||
if (mi_unlikely(page == NULL)) { // first time out of memory, try to collect and retry the allocation once more
|
||||
if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more
|
||||
mi_heap_collect(heap, true /* force */);
|
||||
page = mi_find_page(heap, size);
|
||||
}
|
||||
|
||||
if (mi_unlikely(page == NULL)) { // out of memory
|
||||
if mi_unlikely(page == NULL) { // out of memory
|
||||
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
|
||||
_mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size);
|
||||
return NULL;
|
||||
@ -865,7 +865,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexce
|
||||
mi_assert_internal(mi_page_block_size(page) >= size);
|
||||
|
||||
// and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
|
||||
if (mi_unlikely(zero && page->xblock_size == 0)) {
|
||||
if mi_unlikely(zero && page->xblock_size == 0) {
|
||||
// note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
|
||||
void* p = _mi_page_malloc(heap, page, size, false);
|
||||
mi_assert_internal(p != NULL);
|
||||
|
@ -283,7 +283,7 @@ static mi_segment_t* _mi_segment_of(const void* p) {
|
||||
size_t index = mi_segment_map_index_of(segment, &bitidx);
|
||||
// fast path: for any pointer to valid small/medium/large object or first MI_SEGMENT_SIZE in huge
|
||||
const uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]);
|
||||
if (mi_likely((mask & ((uintptr_t)1 << bitidx)) != 0)) {
|
||||
if mi_likely((mask & ((uintptr_t)1 << bitidx)) != 0) {
|
||||
return segment; // yes, allocated by us
|
||||
}
|
||||
if (index==MI_SEGMENT_MAP_WSIZE) return NULL;
|
||||
@ -324,7 +324,7 @@ static mi_segment_t* _mi_segment_of(const void* p) {
|
||||
mi_assert_internal((void*)segment < p);
|
||||
bool cookie_ok = (_mi_ptr_cookie(segment) == segment->cookie);
|
||||
mi_assert_internal(cookie_ok);
|
||||
if (mi_unlikely(!cookie_ok)) return NULL;
|
||||
if mi_unlikely(!cookie_ok) return NULL;
|
||||
if (((uint8_t*)segment + mi_segment_size(segment)) <= (uint8_t*)p) return NULL; // outside the range
|
||||
mi_assert_internal(p >= (void*)segment && (uint8_t*)p < (uint8_t*)segment + mi_segment_size(segment));
|
||||
return segment;
|
||||
|
@ -1149,8 +1149,8 @@ static mi_segment_t* mi_abandoned_pop(void) {
|
||||
// Check efficiently if it is empty (or if the visited list needs to be moved)
|
||||
mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned);
|
||||
segment = mi_tagged_segment_ptr(ts);
|
||||
if (mi_likely(segment == NULL)) {
|
||||
if (mi_likely(!mi_abandoned_visited_revisit())) { // try to swap in the visited list on NULL
|
||||
if mi_likely(segment == NULL) {
|
||||
if mi_likely(!mi_abandoned_visited_revisit()) { // try to swap in the visited list on NULL
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user