2019-06-20 02:26:12 +03:00
/* ----------------------------------------------------------------------------
2022-01-10 23:01:26 +03:00
Copyright ( c ) 2018 - 2022 , Microsoft Research , Daan Leijen
2019-06-20 02:26:12 +03:00
This is free software ; you can redistribute it and / or modify it under the
terms of the MIT license . A copy of the license can be found in the file
2019-06-23 14:53:34 +03:00
" LICENSE " at the root of this distribution .
2019-06-20 02:26:12 +03:00
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
# pragma once
2019-06-26 07:57:13 +03:00
# ifndef MIMALLOC_INTERNAL_H
# define MIMALLOC_INTERNAL_H
2019-06-20 02:26:12 +03:00
# include "mimalloc-types.h"
2022-10-29 05:54:56 +03:00
# include "mimalloc-track.h"
2019-06-20 02:26:12 +03:00
2019-07-04 19:28:22 +03:00
# if (MI_DEBUG>0)
# define mi_trace_message(...) _mi_trace_message(__VA_ARGS__)
# else
2019-11-12 21:16:59 +03:00
# define mi_trace_message(...)
2019-07-04 19:28:22 +03:00
# endif
2020-01-25 06:02:13 +03:00
# define MI_CACHE_LINE 64
2019-10-19 04:11:04 +03:00
# if defined(_MSC_VER)
2020-01-23 22:06:25 +03:00
# pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths)
2022-04-15 02:10:36 +03:00
# pragma warning(disable:26812) // unscoped enum warning
2020-01-23 22:06:25 +03:00
# define mi_decl_noinline __declspec(noinline)
# define mi_decl_thread __declspec(thread)
2020-01-25 06:02:13 +03:00
# define mi_decl_cache_align __declspec(align(MI_CACHE_LINE))
2021-06-24 12:29:06 +03:00
# elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc
2020-01-23 22:06:25 +03:00
# define mi_decl_noinline __attribute__((noinline))
# define mi_decl_thread __thread
2020-01-25 06:02:13 +03:00
# define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE)))
2019-10-19 04:11:04 +03:00
# else
# define mi_decl_noinline
2020-01-23 22:06:25 +03:00
# define mi_decl_thread __thread // hope for the best :-)
2020-01-30 09:46:44 +03:00
# define mi_decl_cache_align
2019-10-19 04:11:04 +03:00
# endif
2021-10-02 01:05:01 +03:00
# if defined(__EMSCRIPTEN__) && !defined(__wasi__)
# define __wasi__
# endif
2021-11-03 07:53:20 +03:00
# if defined(__cplusplus)
2021-11-05 04:54:57 +03:00
# define mi_decl_externc extern "C"
2021-11-03 07:53:20 +03:00
# else
2022-12-03 02:23:43 +03:00
# define mi_decl_externc
2021-11-03 07:53:20 +03:00
# endif
2022-12-03 02:23:43 +03:00
# if !defined(_WIN32) && !defined(__wasi__)
2021-12-16 06:28:34 +03:00
# define MI_USE_PTHREADS
# include <pthread.h>
# endif
2019-06-20 02:26:12 +03:00
// "options.c"
2020-01-15 21:53:54 +03:00
void _mi_fputs ( mi_output_fun * out , void * arg , const char * prefix , const char * message ) ;
void _mi_fprintf ( mi_output_fun * out , void * arg , const char * fmt , . . . ) ;
2019-06-20 02:26:12 +03:00
void _mi_warning_message ( const char * fmt , . . . ) ;
void _mi_verbose_message ( const char * fmt , . . . ) ;
2019-07-04 19:28:22 +03:00
void _mi_trace_message ( const char * fmt , . . . ) ;
2019-08-26 21:44:41 +03:00
void _mi_options_init ( void ) ;
2020-01-18 06:59:55 +03:00
void _mi_error_message ( int err , const char * fmt , . . . ) ;
2019-06-20 02:26:12 +03:00
2019-12-23 04:07:01 +03:00
// random.c
void _mi_random_init ( mi_random_ctx_t * ctx ) ;
2022-11-08 01:47:53 +03:00
void _mi_random_init_weak ( mi_random_ctx_t * ctx ) ;
void _mi_random_reinit_if_weak ( mi_random_ctx_t * ctx ) ;
2019-12-23 04:07:01 +03:00
void _mi_random_split ( mi_random_ctx_t * ctx , mi_random_ctx_t * new_ctx ) ;
uintptr_t _mi_random_next ( mi_random_ctx_t * ctx ) ;
uintptr_t _mi_heap_random_next ( mi_heap_t * heap ) ;
2021-10-21 22:39:28 +03:00
uintptr_t _mi_os_random_weak ( uintptr_t extra_seed ) ;
2019-12-23 04:07:01 +03:00
static inline uintptr_t _mi_random_shuffle ( uintptr_t x ) ;
// init.c
2021-01-30 03:21:50 +03:00
extern mi_decl_cache_align mi_stats_t _mi_stats_main ;
extern mi_decl_cache_align const mi_page_t _mi_page_empty ;
2019-06-25 13:16:36 +03:00
bool _mi_is_main_thread ( void ) ;
2021-11-13 06:03:20 +03:00
size_t _mi_current_thread_count ( void ) ;
2021-10-19 11:38:57 +03:00
bool _mi_preloading ( void ) ; // true while the C runtime is not ready
2019-06-20 02:26:12 +03:00
2019-07-02 17:23:24 +03:00
// os.c
2019-06-25 13:16:36 +03:00
size_t _mi_os_page_size ( void ) ;
2019-07-02 17:23:24 +03:00
void _mi_os_init ( void ) ; // called from process init
void * _mi_os_alloc ( size_t size , mi_stats_t * stats ) ; // to allocate thread local data
void _mi_os_free ( void * p , size_t size , mi_stats_t * stats ) ; // to free thread local data
2019-09-10 23:26:51 +03:00
size_t _mi_os_good_alloc_size ( size_t size ) ;
2021-11-14 22:23:11 +03:00
bool _mi_os_has_overcommit ( void ) ;
2022-11-23 07:56:35 +03:00
bool _mi_os_reset ( void * addr , size_t size , mi_stats_t * tld_stats ) ;
2019-11-01 01:35:10 +03:00
2022-11-06 00:46:52 +03:00
void * _mi_os_alloc_aligned_offset ( size_t size , size_t alignment , size_t align_offset , bool commit , bool * large , mi_stats_t * tld_stats ) ;
void _mi_os_free_aligned ( void * p , size_t size , size_t alignment , size_t align_offset , bool was_committed , mi_stats_t * tld_stats ) ;
2019-07-02 17:23:24 +03:00
// memory.c
2022-11-06 00:46:52 +03:00
void * _mi_mem_alloc_aligned ( size_t size , size_t alignment , size_t offset , bool * commit , bool * large , bool * is_pinned , bool * is_zero , size_t * id , mi_os_tld_t * tld ) ;
void _mi_mem_free ( void * p , size_t size , size_t alignment , size_t align_offset , size_t id , bool fully_committed , bool any_reset , mi_os_tld_t * tld ) ;
2019-07-02 17:23:24 +03:00
2019-11-04 22:48:41 +03:00
bool _mi_mem_reset ( void * p , size_t size , mi_os_tld_t * tld ) ;
bool _mi_mem_unreset ( void * p , size_t size , bool * is_zero , mi_os_tld_t * tld ) ;
bool _mi_mem_commit ( void * p , size_t size , bool * is_zero , mi_os_tld_t * tld ) ;
2022-11-08 03:41:40 +03:00
bool _mi_mem_decommit ( void * p , size_t size , mi_os_tld_t * tld ) ;
2019-07-02 17:23:24 +03:00
bool _mi_mem_protect ( void * addr , size_t size ) ;
bool _mi_mem_unprotect ( void * addr , size_t size ) ;
2019-06-20 02:26:12 +03:00
2019-11-04 22:48:41 +03:00
void _mi_mem_collect ( mi_os_tld_t * tld ) ;
2019-07-16 00:53:03 +03:00
2019-06-20 02:26:12 +03:00
// "segment.c"
2022-11-06 00:46:52 +03:00
mi_page_t * _mi_segment_page_alloc ( mi_heap_t * heap , size_t block_size , size_t page_alignment , mi_segments_tld_t * tld , mi_os_tld_t * os_tld ) ;
2019-06-20 02:26:12 +03:00
void _mi_segment_page_free ( mi_page_t * page , bool force , mi_segments_tld_t * tld ) ;
void _mi_segment_page_abandon ( mi_page_t * page , mi_segments_tld_t * tld ) ;
2019-11-21 01:55:12 +03:00
uint8_t * _mi_segment_page_start ( const mi_segment_t * segment , const mi_page_t * page , size_t block_size , size_t * page_size , size_t * pre_size ) ; // page start for any page
2022-11-23 07:56:35 +03:00
# if MI_HUGE_PAGE_ABANDON
2020-01-30 17:25:42 +03:00
void _mi_segment_huge_page_free ( mi_segment_t * segment , mi_page_t * page , mi_block_t * block ) ;
2022-11-23 07:56:35 +03:00
# else
void _mi_segment_huge_page_reset ( mi_segment_t * segment , mi_page_t * page , mi_block_t * block ) ;
# endif
2019-06-20 02:26:12 +03:00
2020-01-24 06:50:35 +03:00
void _mi_segment_thread_collect ( mi_segments_tld_t * tld ) ;
void _mi_abandoned_reclaim_all ( mi_heap_t * heap , mi_segments_tld_t * tld ) ;
void _mi_abandoned_await_readers ( void ) ;
2019-06-20 02:26:12 +03:00
2020-01-30 18:24:35 +03:00
2019-06-20 02:26:12 +03:00
// "page.c"
2022-11-06 00:46:52 +03:00
void * _mi_malloc_generic ( mi_heap_t * heap , size_t size , bool zero , size_t huge_alignment ) mi_attr_noexcept mi_attr_malloc ;
2019-06-20 02:26:12 +03:00
2021-11-05 04:54:57 +03:00
void _mi_page_retire ( mi_page_t * page ) mi_attr_noexcept ; // free the page if there are no other pages with many free blocks
2019-06-20 02:26:12 +03:00
void _mi_page_unfull ( mi_page_t * page ) ;
void _mi_page_free ( mi_page_t * page , mi_page_queue_t * pq , bool force ) ; // free the page
void _mi_page_abandon ( mi_page_t * page , mi_page_queue_t * pq ) ; // abandon the page, to be picked up by another thread...
2022-10-31 20:34:55 +03:00
void _mi_heap_delayed_free_all ( mi_heap_t * heap ) ;
bool _mi_heap_delayed_free_partial ( mi_heap_t * heap ) ;
2020-01-04 08:39:18 +03:00
void _mi_heap_collect_retired ( mi_heap_t * heap , bool force ) ;
2019-06-20 02:26:12 +03:00
2022-10-31 21:01:01 +03:00
void _mi_page_use_delayed_free ( mi_page_t * page , mi_delayed_t delay , bool override_never ) ;
bool _mi_page_try_use_delayed_free ( mi_page_t * page , mi_delayed_t delay , bool override_never ) ;
2019-06-27 23:29:55 +03:00
size_t _mi_page_queue_append ( mi_heap_t * heap , mi_page_queue_t * pq , mi_page_queue_t * append ) ;
2019-06-20 02:26:12 +03:00
void _mi_deferred_free ( mi_heap_t * heap , bool force ) ;
2019-08-14 17:46:38 +03:00
void _mi_page_free_collect ( mi_page_t * page , bool force ) ;
2019-06-20 02:26:12 +03:00
void _mi_page_reclaim ( mi_heap_t * heap , mi_page_t * page ) ; // callback from segments
size_t _mi_bin_size ( uint8_t bin ) ; // for stats
uint8_t _mi_bin ( size_t size ) ; // for stats
// "heap.c"
void _mi_heap_destroy_pages ( mi_heap_t * heap ) ;
void _mi_heap_collect_abandon ( mi_heap_t * heap ) ;
2019-11-14 04:22:03 +03:00
void _mi_heap_set_default_direct ( mi_heap_t * heap ) ;
2022-11-18 22:00:23 +03:00
void _mi_heap_destroy_all ( void ) ;
2019-06-20 02:26:12 +03:00
// "stats.c"
void _mi_stats_done ( mi_stats_t * stats ) ;
2019-11-04 19:44:40 +03:00
mi_msecs_t _mi_clock_now ( void ) ;
mi_msecs_t _mi_clock_end ( mi_msecs_t start ) ;
mi_msecs_t _mi_clock_start ( void ) ;
2019-06-20 02:26:12 +03:00
// "alloc.c"
2022-04-20 04:32:35 +03:00
void * _mi_page_malloc ( mi_heap_t * heap , mi_page_t * page , size_t size , bool zero ) mi_attr_noexcept ; // called from `_mi_malloc_generic`
2022-04-08 05:09:31 +03:00
void * _mi_heap_malloc_zero ( mi_heap_t * heap , size_t size , bool zero ) mi_attr_noexcept ;
2022-11-06 02:40:42 +03:00
void * _mi_heap_malloc_zero_ex ( mi_heap_t * heap , size_t size , bool zero , size_t huge_alignment ) mi_attr_noexcept ; // called from `_mi_heap_malloc_aligned`
2022-04-08 05:09:31 +03:00
void * _mi_heap_realloc_zero ( mi_heap_t * heap , void * p , size_t newsize , bool zero ) mi_attr_noexcept ;
2019-07-08 22:00:59 +03:00
mi_block_t * _mi_page_ptr_unalign ( const mi_segment_t * segment , const mi_page_t * page , const void * p ) ;
2019-07-15 02:48:53 +03:00
bool _mi_free_delayed_block ( mi_block_t * block ) ;
2022-11-18 21:13:51 +03:00
void _mi_free_generic ( const mi_segment_t * segment , mi_page_t * page , bool is_local , void * p ) mi_attr_noexcept ; // for runtime integration
2019-06-20 02:26:12 +03:00
# if MI_DEBUG>1
bool _mi_page_is_valid ( mi_page_t * page ) ;
# endif
// ------------------------------------------------------
// Branches
// ------------------------------------------------------
# if defined(__GNUC__) || defined(__clang__)
2022-04-20 05:50:06 +03:00
# define mi_unlikely(x) (__builtin_expect(!!(x),false))
# define mi_likely(x) (__builtin_expect(!!(x),true))
# elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
# define mi_unlikely(x) (x) [[unlikely]]
# define mi_likely(x) (x) [[likely]]
2019-06-20 02:26:12 +03:00
# else
# define mi_unlikely(x) (x)
# define mi_likely(x) (x)
# endif
2019-07-07 07:56:40 +03:00
# ifndef __has_builtin
# define __has_builtin(x) 0
# endif
2019-06-20 02:26:12 +03:00
2020-01-18 06:59:55 +03:00
/* -----------------------------------------------------------
Error codes passed to ` _mi_fatal_error `
All are recoverable but EFAULT is a serious error and aborts by default in secure mode .
For portability define undefined error codes using common Unix codes :
< https : //www-numi.fnal.gov/offline_software/srt_public_context/WebDocs/Errors/unix_system_errors.html>
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
# include <errno.h>
# ifndef EAGAIN // double free
# define EAGAIN (11)
# endif
# ifndef ENOMEM // out of memory
# define ENOMEM (12)
# endif
# ifndef EFAULT // corrupted free-list or meta-data
# define EFAULT (14)
# endif
# ifndef EINVAL // trying to free an invalid pointer
# define EINVAL (22)
# endif
# ifndef EOVERFLOW // count*size overflow
# define EOVERFLOW (75)
# endif
2020-05-27 02:04:28 +03:00
2019-06-20 02:26:12 +03:00
/* -----------------------------------------------------------
Inlined definitions
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
2021-11-14 01:46:03 +03:00
# define MI_UNUSED(x) (void)(x)
2019-11-12 21:16:59 +03:00
# if (MI_DEBUG>0)
2021-11-14 01:46:03 +03:00
# define MI_UNUSED_RELEASE(x)
2019-07-04 19:28:22 +03:00
# else
2021-11-14 01:46:03 +03:00
# define MI_UNUSED_RELEASE(x) MI_UNUSED(x)
2019-07-04 19:28:22 +03:00
# endif
2019-06-20 02:26:12 +03:00
# define MI_INIT4(x) x(),x(),x(),x()
# define MI_INIT8(x) MI_INIT4(x),MI_INIT4(x)
# define MI_INIT16(x) MI_INIT8(x),MI_INIT8(x)
# define MI_INIT32(x) MI_INIT16(x),MI_INIT16(x)
# define MI_INIT64(x) MI_INIT32(x),MI_INIT32(x)
# define MI_INIT128(x) MI_INIT64(x),MI_INIT64(x)
# define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x)
2019-09-12 03:49:28 +03:00
// Is `x` a power of two? (0 is considered a power of two)
2019-09-09 18:12:50 +03:00
static inline bool _mi_is_power_of_two ( uintptr_t x ) {
2019-07-23 06:51:12 +03:00
return ( ( x & ( x - 1 ) ) = = 0 ) ;
}
2019-09-09 18:12:50 +03:00
2022-04-21 03:33:31 +03:00
// Is a pointer aligned?
static inline bool _mi_is_aligned ( void * p , size_t alignment ) {
mi_assert_internal ( alignment ! = 0 ) ;
return ( ( ( uintptr_t ) p % alignment ) = = 0 ) ;
}
2019-09-09 18:12:50 +03:00
// Align upwards
2019-07-23 06:51:12 +03:00
static inline uintptr_t _mi_align_up ( uintptr_t sz , size_t alignment ) {
2019-11-07 21:26:52 +03:00
mi_assert_internal ( alignment ! = 0 ) ;
2019-07-23 06:51:12 +03:00
uintptr_t mask = alignment - 1 ;
if ( ( alignment & mask ) = = 0 ) { // power of two?
return ( ( sz + mask ) & ~ mask ) ;
}
else {
return ( ( ( sz + mask ) / alignment ) * alignment ) ;
}
}
2019-11-07 21:26:52 +03:00
// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`.
static inline uintptr_t _mi_divide_up ( uintptr_t size , size_t divider ) {
mi_assert_internal ( divider ! = 0 ) ;
return ( divider = = 0 ? size : ( ( size + divider - 1 ) / divider ) ) ;
}
2019-09-03 20:11:24 +03:00
// Is memory zero initialized?
static inline bool mi_mem_is_zero ( void * p , size_t size ) {
for ( size_t i = 0 ; i < size ; i + + ) {
if ( ( ( uint8_t * ) p ) [ i ] ! = 0 ) return false ;
}
return true ;
}
2019-06-20 02:26:12 +03:00
// Align a byte size to a size in _machine words_,
// i.e. byte size == `wsize*sizeof(void*)`.
static inline size_t _mi_wsize_from_size ( size_t size ) {
2019-06-27 23:29:55 +03:00
mi_assert_internal ( size < = SIZE_MAX - sizeof ( uintptr_t ) ) ;
2019-06-20 02:26:12 +03:00
return ( size + sizeof ( uintptr_t ) - 1 ) / sizeof ( uintptr_t ) ;
}
2020-01-18 06:59:55 +03:00
// Overflow detecting multiply
2021-06-24 12:29:06 +03:00
# if __has_builtin(__builtin_umul_overflow) || (defined(__GNUC__) && (__GNUC__ >= 5))
2020-07-22 04:51:25 +03:00
# include <limits.h> // UINT_MAX, ULONG_MAX
# if defined(_CLOCK_T) // for Illumos
2020-07-10 05:23:20 +03:00
# undef _CLOCK_T
# endif
2020-07-22 04:51:25 +03:00
static inline bool mi_mul_overflow ( size_t count , size_t size , size_t * total ) {
2021-10-02 01:05:01 +03:00
# if (SIZE_MAX == ULONG_MAX)
2021-10-29 00:43:21 +03:00
return __builtin_umull_overflow ( count , size , ( unsigned long * ) total ) ;
2021-10-02 01:05:01 +03:00
# elif (SIZE_MAX == UINT_MAX)
2021-10-29 00:43:21 +03:00
return __builtin_umul_overflow ( count , size , ( unsigned int * ) total ) ;
2020-07-22 04:51:25 +03:00
# else
2021-10-29 00:43:21 +03:00
return __builtin_umulll_overflow ( count , size , ( unsigned long long * ) total ) ;
2020-07-22 04:51:25 +03:00
# endif
}
2020-01-18 06:59:55 +03:00
# else /* __builtin_umul_overflow is unavailable */
2020-07-22 04:51:25 +03:00
static inline bool mi_mul_overflow ( size_t count , size_t size , size_t * total ) {
2020-01-18 06:59:55 +03:00
# define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX)
* total = count * size ;
2022-04-20 04:32:35 +03:00
// note: gcc/clang optimize this to directly check the overflow flag
return ( ( size > = MI_MUL_NO_OVERFLOW | | count > = MI_MUL_NO_OVERFLOW ) & & size > 0 & & ( SIZE_MAX / size ) < count ) ;
2020-01-18 06:59:55 +03:00
}
2020-07-22 04:51:25 +03:00
# endif
2020-01-18 06:59:55 +03:00
// Safe multiply `count*size` into `total`; return `true` on overflow.
static inline bool mi_count_size_overflow ( size_t count , size_t size , size_t * total ) {
if ( count = = 1 ) { // quick check for the case where count is one (common for C++ allocators)
* total = size ;
return false ;
}
2022-04-20 05:50:06 +03:00
else if mi_unlikely ( mi_mul_overflow ( count , size , total ) ) {
2022-04-20 04:32:35 +03:00
# if MI_DEBUG > 0
2020-05-19 20:16:28 +03:00
_mi_error_message ( EOVERFLOW , " allocation request is too large (%zu * %zu bytes) \n " , count , size ) ;
2022-04-20 03:35:07 +03:00
# endif
2020-01-18 06:59:55 +03:00
* total = SIZE_MAX ;
return true ;
}
else return false ;
}
2020-02-03 03:09:09 +03:00
/* ----------------------------------------------------------------------------------------
2020-02-03 09:15:09 +03:00
The thread local default heap : ` _mi_get_default_heap ` returns the thread local heap .
2020-02-03 06:07:26 +03:00
On most platforms ( Windows , Linux , FreeBSD , NetBSD , etc ) , this just returns a
2020-02-03 03:09:09 +03:00
__thread local variable ( ` _mi_heap_default ` ) . With the initial - exec TLS model this ensures
2020-02-03 06:07:26 +03:00
that the storage will always be available ( allocated on the thread stacks ) .
On some platforms though we cannot use that when overriding ` malloc ` since the underlying
TLS implementation ( or the loader ) will call itself ` malloc ` on a first access and recurse .
2020-02-03 03:09:09 +03:00
We try to circumvent this in an efficient way :
- macOSX : we use an unused TLS slot from the OS allocated slots ( MI_TLS_SLOT ) . On OSX , the
loader itself calls ` malloc ` even before the modules are initialized .
- OpenBSD : we use an unused slot from the pthread block ( MI_TLS_PTHREAD_SLOT_OFS ) .
2021-12-16 03:52:36 +03:00
- DragonFly : defaults are working but seem slow compared to freeBSD ( see PR # 323 )
2020-02-03 03:09:09 +03:00
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
2019-09-03 20:11:24 +03:00
2019-06-20 02:26:12 +03:00
extern const mi_heap_t _mi_heap_empty ; // read-only empty heap, initial value of the thread local default heap
extern bool _mi_process_is_initialized ;
2020-02-03 00:12:22 +03:00
mi_heap_t * _mi_heap_main_get ( void ) ; // statically allocated main backing heap
2019-06-20 02:26:12 +03:00
2020-02-03 06:07:26 +03:00
# if defined(MI_MALLOC_OVERRIDE)
2021-04-18 20:02:13 +03:00
# if defined(__APPLE__) // macOS
2022-12-03 02:23:43 +03:00
# define MI_TLS_SLOT 89 // seems unused?
// #define MI_TLS_RECURSE_GUARD 1
2020-02-03 09:46:38 +03:00
// other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89)
// see <https://github.com/rweichler/substrate/blob/master/include/pthread_machdep.h>
2020-02-03 06:07:26 +03:00
# elif defined(__OpenBSD__)
2022-12-03 02:23:43 +03:00
// use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16)
2020-02-03 09:46:38 +03:00
// see <https://github.com/openbsd/src/blob/master/lib/libc/include/thread_private.h#L371>
2022-12-03 02:23:43 +03:00
# define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24)
2021-12-16 03:52:36 +03:00
// #elif defined(__DragonFly__)
// #warning "mimalloc is not working correctly on DragonFly yet."
// #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) <https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/lib/libthread_xu/thread/thr_private.h#L458>
2021-12-16 03:48:57 +03:00
# elif defined(__ANDROID__)
// See issue #381
# define MI_TLS_PTHREAD
2020-02-03 00:12:22 +03:00
# endif
# endif
# if defined(MI_TLS_SLOT)
2020-02-03 08:03:09 +03:00
static inline void * mi_tls_slot ( size_t slot ) mi_attr_noexcept ; // forward declaration
2020-02-03 06:07:26 +03:00
# elif defined(MI_TLS_PTHREAD_SLOT_OFS)
2020-02-03 00:12:22 +03:00
static inline mi_heap_t * * mi_tls_pthread_heap_slot ( void ) {
pthread_t self = pthread_self ( ) ;
2020-02-03 01:31:28 +03:00
# if defined(__DragonFly__)
if ( self = = NULL ) {
2020-10-22 13:15:37 +03:00
mi_heap_t * pheap_main = _mi_heap_main_get ( ) ;
2020-02-03 01:31:28 +03:00
return & pheap_main ;
}
2020-02-03 06:07:26 +03:00
# endif
2020-02-03 00:12:22 +03:00
return ( mi_heap_t * * ) ( ( uint8_t * ) self + MI_TLS_PTHREAD_SLOT_OFS ) ;
2020-02-02 03:57:00 +03:00
}
2020-02-03 00:12:22 +03:00
# elif defined(MI_TLS_PTHREAD)
extern pthread_key_t _mi_heap_default_key ;
2020-01-30 10:08:12 +03:00
# endif
2021-02-02 21:46:30 +03:00
// Default heap to allocate from (if not using TLS- or pthread slots).
// Do not use this directly but use through `mi_heap_get_default()` (or the unchecked `mi_get_default_heap`).
// This thread local variable is only used when neither MI_TLS_SLOT, MI_TLS_PTHREAD, or MI_TLS_PTHREAD_SLOT_OFS are defined.
// However, on the Apple M1 we do use the address of this variable as the unique thread-id (issue #356).
extern mi_decl_thread mi_heap_t * _mi_heap_default ; // default heap to allocate from
2020-01-30 09:46:44 +03:00
static inline mi_heap_t * mi_get_default_heap ( void ) {
2020-02-03 06:07:26 +03:00
# if defined(MI_TLS_SLOT)
2020-02-03 00:12:22 +03:00
mi_heap_t * heap = ( mi_heap_t * ) mi_tls_slot ( MI_TLS_SLOT ) ;
2022-04-20 05:50:06 +03:00
if mi_unlikely ( heap = = NULL ) {
2022-01-10 22:40:36 +03:00
# ifdef __GNUC__
__asm ( " " ) ; // prevent conditional load of the address of _mi_heap_empty
# endif
2022-12-03 02:23:43 +03:00
heap = ( mi_heap_t * ) & _mi_heap_empty ;
2022-01-10 22:40:36 +03:00
}
2021-11-03 07:53:20 +03:00
return heap ;
2020-02-03 00:12:22 +03:00
# elif defined(MI_TLS_PTHREAD_SLOT_OFS)
2020-02-03 00:25:26 +03:00
mi_heap_t * heap = * mi_tls_pthread_heap_slot ( ) ;
2020-02-03 00:12:22 +03:00
return ( mi_unlikely ( heap = = NULL ) ? ( mi_heap_t * ) & _mi_heap_empty : heap ) ;
# elif defined(MI_TLS_PTHREAD)
mi_heap_t * heap = ( mi_unlikely ( _mi_heap_default_key = = ( pthread_key_t ) ( - 1 ) ) ? _mi_heap_main_get ( ) : ( mi_heap_t * ) pthread_getspecific ( _mi_heap_default_key ) ) ;
2020-02-02 00:11:48 +03:00
return ( mi_unlikely ( heap = = NULL ) ? ( mi_heap_t * ) & _mi_heap_empty : heap ) ;
# else
2022-12-03 02:23:43 +03:00
# if defined(MI_TLS_RECURSE_GUARD)
2020-02-03 00:12:22 +03:00
if ( mi_unlikely ( ! _mi_process_is_initialized ) ) return _mi_heap_main_get ( ) ;
2020-01-30 10:08:12 +03:00
# endif
2019-06-22 21:50:03 +03:00
return _mi_heap_default ;
2020-02-02 00:11:48 +03:00
# endif
2019-06-20 02:26:12 +03:00
}
static inline bool mi_heap_is_default ( const mi_heap_t * heap ) {
return ( heap = = mi_get_default_heap ( ) ) ;
}
static inline bool mi_heap_is_backing ( const mi_heap_t * heap ) {
return ( heap - > tld - > heap_backing = = heap ) ;
}
static inline bool mi_heap_is_initialized ( mi_heap_t * heap ) {
mi_assert_internal ( heap ! = NULL ) ;
return ( heap ! = & _mi_heap_empty ) ;
}
2019-10-19 18:34:18 +03:00
static inline uintptr_t _mi_ptr_cookie ( const void * p ) {
2020-02-03 00:12:22 +03:00
extern mi_heap_t _mi_heap_main ;
2020-01-30 09:46:44 +03:00
mi_assert_internal ( _mi_heap_main . cookie ! = 0 ) ;
2019-10-19 18:34:18 +03:00
return ( ( uintptr_t ) p ^ _mi_heap_main . cookie ) ;
}
2019-09-03 20:11:24 +03:00
/* -----------------------------------------------------------
Pages
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
2019-06-20 02:26:12 +03:00
static inline mi_page_t * _mi_heap_get_free_small_page ( mi_heap_t * heap , size_t size ) {
2020-02-01 07:34:24 +03:00
mi_assert_internal ( size < = ( MI_SMALL_SIZE_MAX + MI_PADDING_SIZE ) ) ;
const size_t idx = _mi_wsize_from_size ( size ) ;
mi_assert_internal ( idx < MI_PAGES_DIRECT ) ;
return heap - > pages_free_direct [ idx ] ;
2019-06-20 02:26:12 +03:00
}
// Get the page belonging to a certain size class
static inline mi_page_t * _mi_get_free_small_page ( size_t size ) {
return _mi_heap_get_free_small_page ( mi_get_default_heap ( ) , size ) ;
}
// Segment that contains the pointer
2022-11-07 22:23:04 +03:00
// Large aligned blocks may be aligned at N*MI_SEGMENT_SIZE (inside a huge segment > MI_SEGMENT_SIZE),
2022-12-03 02:23:43 +03:00
// and we need align "down" to the segment info which is `MI_SEGMENT_SIZE` bytes before it;
2022-11-07 22:23:04 +03:00
// therefore we align one byte before `p`.
2019-06-20 02:26:12 +03:00
static inline mi_segment_t * _mi_ptr_segment ( const void * p ) {
2022-11-06 20:52:54 +03:00
mi_assert_internal ( p ! = NULL ) ;
2022-11-06 02:40:42 +03:00
return ( mi_segment_t * ) ( ( ( uintptr_t ) p - 1 ) & ~ MI_SEGMENT_MASK ) ;
2019-06-20 02:26:12 +03:00
}
// Segment belonging to a page
static inline mi_segment_t * _mi_page_segment ( const mi_page_t * page ) {
mi_segment_t * segment = _mi_ptr_segment ( page ) ;
2019-06-27 23:29:55 +03:00
mi_assert_internal ( segment = = NULL | | page = = & segment - > pages [ page - > segment_idx ] ) ;
2019-06-20 02:26:12 +03:00
return segment ;
}
2019-11-22 02:21:23 +03:00
// used internally
2021-11-14 01:46:03 +03:00
static inline size_t _mi_segment_page_idx_of ( const mi_segment_t * segment , const void * p ) {
2019-06-20 02:26:12 +03:00
// if (segment->page_size > MI_SEGMENT_SIZE) return &segment->pages[0]; // huge pages
ptrdiff_t diff = ( uint8_t * ) p - ( uint8_t * ) segment ;
2022-11-06 02:40:42 +03:00
mi_assert_internal ( diff > = 0 & & ( size_t ) diff < = MI_SEGMENT_SIZE /* for huge alignment it can be equal */ ) ;
2021-11-14 01:46:03 +03:00
size_t idx = ( size_t ) diff > > segment - > page_shift ;
2019-06-20 02:26:12 +03:00
mi_assert_internal ( idx < segment - > capacity ) ;
2019-07-13 06:11:39 +03:00
mi_assert_internal ( segment - > page_kind < = MI_PAGE_MEDIUM | | idx = = 0 ) ;
2019-11-22 02:21:23 +03:00
return idx ;
}
// Get the page containing the pointer
static inline mi_page_t * _mi_segment_page_of ( const mi_segment_t * segment , const void * p ) {
2021-11-14 01:46:03 +03:00
size_t idx = _mi_segment_page_idx_of ( segment , p ) ;
2019-06-20 02:26:12 +03:00
return & ( ( mi_segment_t * ) segment ) - > pages [ idx ] ;
}
// Quick page start for initialized pages
static inline uint8_t * _mi_page_start ( const mi_segment_t * segment , const mi_page_t * page , size_t * page_size ) {
2020-01-16 04:19:01 +03:00
const size_t bsize = page - > xblock_size ;
2019-11-21 01:55:12 +03:00
mi_assert_internal ( bsize > 0 & & ( bsize % sizeof ( void * ) ) = = 0 ) ;
return _mi_segment_page_start ( segment , page , bsize , page_size , NULL ) ;
2019-06-20 02:26:12 +03:00
}
// Get the page containing the pointer
static inline mi_page_t * _mi_ptr_page ( void * p ) {
return _mi_segment_page_of ( _mi_ptr_segment ( p ) , p ) ;
}
2021-06-21 17:36:47 +03:00
// Get the block size of a page (special case for huge objects)
2020-01-16 04:19:01 +03:00
static inline size_t mi_page_block_size ( const mi_page_t * page ) {
const size_t bsize = page - > xblock_size ;
mi_assert_internal ( bsize > 0 ) ;
2022-04-20 05:50:06 +03:00
if mi_likely ( bsize < MI_HUGE_BLOCK_SIZE ) {
2020-01-16 04:19:01 +03:00
return bsize ;
}
else {
size_t psize ;
_mi_segment_page_start ( _mi_page_segment ( page ) , page , bsize , & psize , NULL ) ;
return psize ;
}
}
2022-11-08 03:41:40 +03:00
static inline bool mi_page_is_huge ( const mi_page_t * page ) {
return ( _mi_page_segment ( page ) - > page_kind = = MI_PAGE_HUGE ) ;
}
2020-02-01 10:39:51 +03:00
// Get the usable block size of a page without fixed padding.
// This may still include internal padding due to alignment and rounding up size classes.
2020-02-01 07:34:24 +03:00
static inline size_t mi_page_usable_block_size ( const mi_page_t * page ) {
return mi_page_block_size ( page ) - MI_PADDING_SIZE ;
}
2019-07-15 02:20:27 +03:00
// Thread free access
2020-01-16 04:19:01 +03:00
static inline mi_block_t * mi_page_thread_free ( const mi_page_t * page ) {
2020-07-27 04:00:38 +03:00
return ( mi_block_t * ) ( mi_atomic_load_relaxed ( & ( ( mi_page_t * ) page ) - > xthread_free ) & ~ 3 ) ;
2020-01-16 04:19:01 +03:00
}
static inline mi_delayed_t mi_page_thread_free_flag ( const mi_page_t * page ) {
2020-07-27 04:00:38 +03:00
return ( mi_delayed_t ) ( mi_atomic_load_relaxed ( & ( ( mi_page_t * ) page ) - > xthread_free ) & 3 ) ;
2020-01-16 04:19:01 +03:00
}
// Heap access
static inline mi_heap_t * mi_page_heap ( const mi_page_t * page ) {
2020-07-27 04:00:38 +03:00
return ( mi_heap_t * ) ( mi_atomic_load_relaxed ( & ( ( mi_page_t * ) page ) - > xheap ) ) ;
2020-01-16 04:19:01 +03:00
}
static inline void mi_page_set_heap ( mi_page_t * page , mi_heap_t * heap ) {
mi_assert_internal ( mi_page_thread_free_flag ( page ) ! = MI_DELAYED_FREEING ) ;
2020-07-27 04:00:38 +03:00
mi_atomic_store_release ( & page - > xheap , ( uintptr_t ) heap ) ;
2020-01-16 04:19:01 +03:00
}
// Thread free flag helpers
2019-07-15 02:20:27 +03:00
static inline mi_block_t * mi_tf_block ( mi_thread_free_t tf ) {
return ( mi_block_t * ) ( tf & ~ 0x03 ) ;
}
static inline mi_delayed_t mi_tf_delayed ( mi_thread_free_t tf ) {
return ( mi_delayed_t ) ( tf & 0x03 ) ;
}
static inline mi_thread_free_t mi_tf_make ( mi_block_t * block , mi_delayed_t delayed ) {
return ( mi_thread_free_t ) ( ( uintptr_t ) block | ( uintptr_t ) delayed ) ;
}
static inline mi_thread_free_t mi_tf_set_delayed ( mi_thread_free_t tf , mi_delayed_t delayed ) {
return mi_tf_make ( mi_tf_block ( tf ) , delayed ) ;
}
static inline mi_thread_free_t mi_tf_set_block ( mi_thread_free_t tf , mi_block_t * block ) {
return mi_tf_make ( block , mi_tf_delayed ( tf ) ) ;
}
2020-01-30 09:46:44 +03:00
// are all blocks in a page freed?
2020-01-26 23:39:11 +03:00
// note: needs up-to-date used count, (as the `xthread_free` list may not be empty). see `_mi_page_collect_free`.
2019-06-20 02:26:12 +03:00
static inline bool mi_page_all_free ( const mi_page_t * page ) {
mi_assert_internal ( page ! = NULL ) ;
2020-01-16 04:19:01 +03:00
return ( page - > used = = 0 ) ;
2019-06-20 02:26:12 +03:00
}
2020-01-30 09:46:44 +03:00
// are there any available blocks?
2020-01-26 23:39:11 +03:00
static inline bool mi_page_has_any_available ( const mi_page_t * page ) {
mi_assert_internal ( page ! = NULL & & page - > reserved > 0 ) ;
return ( page - > used < page - > reserved | | ( mi_page_thread_free ( page ) ! = NULL ) ) ;
2019-06-20 02:26:12 +03:00
}
2020-01-26 23:39:11 +03:00
// are there immediately available blocks, i.e. blocks available on the free list.
static inline bool mi_page_immediate_available ( const mi_page_t * page ) {
2019-06-20 02:26:12 +03:00
mi_assert_internal ( page ! = NULL ) ;
2020-01-26 23:39:11 +03:00
return ( page - > free ! = NULL ) ;
2019-06-20 02:26:12 +03:00
}
// is more than 7/8th of a page in use?
static inline bool mi_page_mostly_used ( const mi_page_t * page ) {
if ( page = = NULL ) return true ;
uint16_t frac = page - > reserved / 8U ;
2020-01-16 04:19:01 +03:00
return ( page - > reserved - page - > used < = frac ) ;
2019-06-20 02:26:12 +03:00
}
static inline mi_page_queue_t * mi_page_queue ( const mi_heap_t * heap , size_t size ) {
return & ( ( mi_heap_t * ) heap ) - > pages [ _mi_bin ( size ) ] ;
}
2019-08-11 03:48:00 +03:00
2019-08-24 00:08:00 +03:00
2019-08-11 03:48:00 +03:00
//-----------------------------------------------------------
// Page flags
//-----------------------------------------------------------
static inline bool mi_page_is_in_full ( const mi_page_t * page ) {
2019-10-18 02:48:16 +03:00
return page - > flags . x . in_full ;
2019-08-11 03:48:00 +03:00
}
static inline void mi_page_set_in_full ( mi_page_t * page , bool in_full ) {
2019-10-18 02:48:16 +03:00
page - > flags . x . in_full = in_full ;
2019-08-09 01:23:18 +03:00
}
2019-08-11 03:48:00 +03:00
static inline bool mi_page_has_aligned ( const mi_page_t * page ) {
2019-10-18 02:48:16 +03:00
return page - > flags . x . has_aligned ;
2019-08-11 03:48:00 +03:00
}
static inline void mi_page_set_has_aligned ( mi_page_t * page , bool has_aligned ) {
2019-10-18 02:48:16 +03:00
page - > flags . x . has_aligned = has_aligned ;
2019-08-11 03:48:00 +03:00
}
2019-12-28 10:33:50 +03:00
/* -------------------------------------------------------------------
Encoding / Decoding the free list next pointers
2020-01-30 09:46:44 +03:00
This is to protect against buffer overflow exploits where the
free list is mutated . Many hardened allocators xor the next pointer ` p `
2019-12-29 02:17:49 +03:00
with a secret key ` k1 ` , as ` p ^ k1 ` . This prevents overwriting with known
2020-01-30 09:46:44 +03:00
values but might be still too weak : if the attacker can guess
the pointer ` p ` this can reveal ` k1 ` ( since ` p ^ k1 ^ p = = k1 ` ) .
2019-12-29 02:17:49 +03:00
Moreover , if multiple blocks can be read as well , the attacker can
2019-12-28 10:33:50 +03:00
xor both as ` ( p1 ^ k1 ) ^ ( p2 ^ k1 ) = = p1 ^ p2 ` which may reveal a lot
about the pointers ( and subsequently ` k1 ` ) .
2019-12-29 02:17:49 +03:00
Instead mimalloc uses an extra key ` k2 ` and encodes as ` ( ( p ^ k2 ) < < < k1 ) + k1 ` .
2019-12-28 10:33:50 +03:00
Since these operations are not associative , the above approaches do not
2019-12-29 02:17:49 +03:00
work so well any more even if the ` p ` can be guesstimated . For example ,
2020-01-30 09:46:44 +03:00
for the read case we can subtract two entries to discard the ` + k1 ` term ,
2019-12-29 02:17:49 +03:00
but that leads to ` ( ( p1 ^ k2 ) < < < k1 ) - ( ( p2 ^ k2 ) < < < k1 ) ` at best .
2020-01-30 09:46:44 +03:00
We include the left - rotation since xor and addition are otherwise linear
2019-12-29 02:17:49 +03:00
in the lowest bit . Finally , both keys are unique per page which reduces
the re - use of keys by a large factor .
2019-12-28 10:33:50 +03:00
We also pass a separate ` null ` value to be used as ` NULL ` or otherwise
2019-12-29 02:17:49 +03:00
` ( k2 < < < k1 ) + k1 ` would appear ( too ) often as a sentinel value .
2019-12-28 10:33:50 +03:00
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
2019-10-19 04:11:04 +03:00
static inline bool mi_is_in_same_segment ( const void * p , const void * q ) {
return ( _mi_ptr_segment ( p ) = = _mi_ptr_segment ( q ) ) ;
}
2019-11-22 02:21:23 +03:00
static inline bool mi_is_in_same_page ( const void * p , const void * q ) {
mi_segment_t * segmentp = _mi_ptr_segment ( p ) ;
mi_segment_t * segmentq = _mi_ptr_segment ( q ) ;
if ( segmentp ! = segmentq ) return false ;
2021-11-14 01:46:03 +03:00
size_t idxp = _mi_segment_page_idx_of ( segmentp , p ) ;
size_t idxq = _mi_segment_page_idx_of ( segmentq , q ) ;
2019-11-22 02:21:23 +03:00
return ( idxp = = idxq ) ;
}
2019-12-28 10:33:50 +03:00
static inline uintptr_t mi_rotl ( uintptr_t x , uintptr_t shift ) {
2019-12-29 02:17:49 +03:00
shift % = MI_INTPTR_BITS ;
2020-07-26 05:33:02 +03:00
return ( shift = = 0 ? x : ( ( x < < shift ) | ( x > > ( MI_INTPTR_BITS - shift ) ) ) ) ;
2019-12-28 10:33:50 +03:00
}
static inline uintptr_t mi_rotr ( uintptr_t x , uintptr_t shift ) {
2019-12-29 02:17:49 +03:00
shift % = MI_INTPTR_BITS ;
2020-07-26 05:33:02 +03:00
return ( shift = = 0 ? x : ( ( x > > shift ) | ( x < < ( MI_INTPTR_BITS - shift ) ) ) ) ;
2019-12-28 10:33:50 +03:00
}
2019-12-29 02:17:49 +03:00
2020-02-01 23:15:12 +03:00
static inline void * mi_ptr_decode ( const void * null , const mi_encoded_t x , const uintptr_t * keys ) {
void * p = ( void * ) ( mi_rotr ( x - keys [ 0 ] , keys [ 0 ] ) ^ keys [ 1 ] ) ;
2022-04-20 05:50:06 +03:00
return ( p = = null ? NULL : p ) ;
2020-02-01 23:15:12 +03:00
}
static inline mi_encoded_t mi_ptr_encode ( const void * null , const void * p , const uintptr_t * keys ) {
2022-04-20 05:50:06 +03:00
uintptr_t x = ( uintptr_t ) ( p = = NULL ? null : p ) ;
2020-02-01 23:15:12 +03:00
return mi_rotl ( x ^ keys [ 1 ] , keys [ 0 ] ) + keys [ 0 ] ;
}
static inline mi_block_t * mi_block_nextx ( const void * null , const mi_block_t * block , const uintptr_t * keys ) {
2022-10-29 20:44:10 +03:00
mi_track_mem_defined ( block , sizeof ( mi_block_t ) ) ;
2022-10-29 05:54:56 +03:00
mi_block_t * next ;
2019-10-29 01:54:33 +03:00
# ifdef MI_ENCODE_FREELIST
2022-10-29 05:54:56 +03:00
next = ( mi_block_t * ) mi_ptr_decode ( null , block - > next , keys ) ;
2019-06-20 02:26:12 +03:00
# else
2021-11-14 01:46:03 +03:00
MI_UNUSED ( keys ) ; MI_UNUSED ( null ) ;
2022-10-29 05:54:56 +03:00
next = ( mi_block_t * ) block - > next ;
2019-06-20 02:26:12 +03:00
# endif
2022-10-29 20:44:10 +03:00
mi_track_mem_noaccess ( block , sizeof ( mi_block_t ) ) ;
2022-12-03 02:23:43 +03:00
return next ;
2019-06-20 02:26:12 +03:00
}
2020-02-01 23:15:12 +03:00
static inline void mi_block_set_nextx ( const void * null , mi_block_t * block , const mi_block_t * next , const uintptr_t * keys ) {
2022-10-29 20:44:10 +03:00
mi_track_mem_undefined ( block , sizeof ( mi_block_t ) ) ;
2019-10-29 01:54:33 +03:00
# ifdef MI_ENCODE_FREELIST
2020-02-01 23:15:12 +03:00
block - > next = mi_ptr_encode ( null , next , keys ) ;
2019-06-20 02:26:12 +03:00
# else
2021-11-14 01:46:03 +03:00
MI_UNUSED ( keys ) ; MI_UNUSED ( null ) ;
2019-06-20 02:26:12 +03:00
block - > next = ( mi_encoded_t ) next ;
# endif
2022-10-29 20:44:10 +03:00
mi_track_mem_noaccess ( block , sizeof ( mi_block_t ) ) ;
2019-06-20 02:26:12 +03:00
}
2019-10-19 04:11:04 +03:00
static inline mi_block_t * mi_block_next ( const mi_page_t * page , const mi_block_t * block ) {
2019-10-29 01:54:33 +03:00
# ifdef MI_ENCODE_FREELIST
2020-02-01 23:15:12 +03:00
mi_block_t * next = mi_block_nextx ( page , block , page - > keys ) ;
2019-12-28 10:33:50 +03:00
// check for free list corruption: is `next` at least in the same page?
2019-11-22 02:21:23 +03:00
// TODO: check if `next` is `page->block_size` aligned?
2022-04-20 05:50:06 +03:00
if mi_unlikely ( next ! = NULL & & ! mi_is_in_same_page ( block , next ) ) {
2020-01-18 06:59:55 +03:00
_mi_error_message ( EFAULT , " corrupted free list entry of size %zub at %p: value 0x%zx \n " , mi_page_block_size ( page ) , block , ( uintptr_t ) next ) ;
2019-10-29 01:54:33 +03:00
next = NULL ;
2019-11-12 21:16:59 +03:00
}
2019-10-19 04:11:04 +03:00
return next ;
2019-07-23 06:51:12 +03:00
# else
2021-11-14 01:46:03 +03:00
MI_UNUSED ( page ) ;
2020-02-01 23:15:12 +03:00
return mi_block_nextx ( page , block , NULL ) ;
2019-07-23 06:51:12 +03:00
# endif
2019-06-20 02:26:12 +03:00
}
2019-10-19 04:11:04 +03:00
static inline void mi_block_set_next ( const mi_page_t * page , mi_block_t * block , const mi_block_t * next ) {
2019-10-29 01:54:33 +03:00
# ifdef MI_ENCODE_FREELIST
2020-02-01 23:15:12 +03:00
mi_block_set_nextx ( page , block , next , page - > keys ) ;
2019-07-23 06:51:12 +03:00
# else
2021-11-14 01:46:03 +03:00
MI_UNUSED ( page ) ;
2020-02-01 23:15:12 +03:00
mi_block_set_nextx ( page , block , next , NULL ) ;
2019-07-23 06:51:12 +03:00
# endif
2019-06-20 02:26:12 +03:00
}
2019-07-23 06:51:12 +03:00
2019-12-23 04:07:01 +03:00
// -------------------------------------------------------------------
// Fast "random" shuffle
// -------------------------------------------------------------------
static inline uintptr_t _mi_random_shuffle ( uintptr_t x ) {
2019-12-29 02:17:49 +03:00
if ( x = = 0 ) { x = 17 ; } // ensure we don't get stuck in generating zeros
2019-12-23 04:07:01 +03:00
# if (MI_INTPTR_SIZE==8)
// by Sebastiano Vigna, see: <http://xoshiro.di.unimi.it/splitmix64.c>
x ^ = x > > 30 ;
x * = 0xbf58476d1ce4e5b9UL ;
x ^ = x > > 27 ;
x * = 0x94d049bb133111ebUL ;
x ^ = x > > 31 ;
# elif (MI_INTPTR_SIZE==4)
// by Chris Wellons, see: <https://nullprogram.com/blog/2018/07/31/>
x ^ = x > > 16 ;
x * = 0x7feb352dUL ;
x ^ = x > > 15 ;
x * = 0x846ca68bUL ;
x ^ = x > > 16 ;
# endif
return x ;
}
2019-11-12 21:16:59 +03:00
// -------------------------------------------------------------------
// Optimize numa node access for the common case (= one node)
// -------------------------------------------------------------------
2019-11-14 00:35:50 +03:00
int _mi_os_numa_node_get ( mi_os_tld_t * tld ) ;
size_t _mi_os_numa_node_count_get ( void ) ;
2019-11-12 21:16:59 +03:00
2021-06-18 05:38:51 +03:00
extern _Atomic ( size_t ) _mi_numa_node_count ;
2019-11-12 21:16:59 +03:00
static inline int _mi_os_numa_node ( mi_os_tld_t * tld ) {
2022-04-20 05:50:06 +03:00
if mi_likely ( mi_atomic_load_relaxed ( & _mi_numa_node_count ) = = 1 ) { return 0 ; }
2019-11-12 21:16:59 +03:00
else return _mi_os_numa_node_get ( tld ) ;
}
2019-11-14 00:35:50 +03:00
static inline size_t _mi_os_numa_node_count ( void ) {
2021-06-18 05:38:51 +03:00
const size_t count = mi_atomic_load_relaxed ( & _mi_numa_node_count ) ;
2022-04-20 05:50:06 +03:00
if mi_likely ( count > 0 ) { return count ; }
2019-11-12 21:16:59 +03:00
else return _mi_os_numa_node_count_get ( ) ;
}
2019-06-20 02:26:12 +03:00
// -------------------------------------------------------------------
2020-02-03 06:07:26 +03:00
// Getting the thread id should be performant as it is called in the
2020-02-03 03:09:09 +03:00
// fast path of `_mi_free` and we specialize for various platforms.
2022-01-02 02:54:06 +03:00
// We only require _mi_threadid() to return a unique id for each thread.
2019-06-20 02:26:12 +03:00
// -------------------------------------------------------------------
# if defined(_WIN32)
2022-01-02 02:54:06 +03:00
2019-06-20 02:26:12 +03:00
# define WIN32_LEAN_AND_MEAN
# include <windows.h>
2021-11-14 01:46:03 +03:00
static inline mi_threadid_t _mi_thread_id ( void ) mi_attr_noexcept {
2019-06-20 02:26:12 +03:00
// Windows: works on Intel and ARM in both 32- and 64-bit
2019-06-26 06:06:34 +03:00
return ( uintptr_t ) NtCurrentTeb ( ) ;
2019-06-20 02:26:12 +03:00
}
2020-02-03 00:12:22 +03:00
2022-12-03 02:23:43 +03:00
// We use assembly for a fast thread id on the main platforms. The TLS layout depends on
2022-01-02 02:54:06 +03:00
// both the OS and libc implementation so we use specific tests for each main platform.
// If you test on another platform and it works please send a PR :-)
2021-12-18 00:48:39 +03:00
// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register.
2022-01-02 02:54:06 +03:00
# elif defined(__GNUC__) && ( \
( defined ( __GLIBC__ ) & & ( defined ( __x86_64__ ) | | defined ( __i386__ ) | | defined ( __arm__ ) | | defined ( __aarch64__ ) ) ) \
| | ( defined ( __APPLE__ ) & & ( defined ( __x86_64__ ) | | defined ( __aarch64__ ) ) ) \
| | ( defined ( __BIONIC__ ) & & ( defined ( __x86_64__ ) | | defined ( __i386__ ) | | defined ( __arm__ ) | | defined ( __aarch64__ ) ) ) \
| | ( defined ( __FreeBSD__ ) & & ( defined ( __x86_64__ ) | | defined ( __i386__ ) | | defined ( __aarch64__ ) ) ) \
2022-01-27 10:01:32 +03:00
| | ( defined ( __OpenBSD__ ) & & ( defined ( __x86_64__ ) | | defined ( __i386__ ) | | defined ( __aarch64__ ) ) ) \
2022-01-02 02:54:06 +03:00
)
2020-02-03 00:12:22 +03:00
static inline void * mi_tls_slot ( size_t slot ) mi_attr_noexcept {
void * res ;
const size_t ofs = ( slot * sizeof ( void * ) ) ;
2022-01-02 02:54:06 +03:00
# if defined(__i386__)
__asm__ ( " movl %%gs:%1, %0 " : " =r " ( res ) : " m " ( * ( ( void * * ) ofs ) ) : ) ; // x86 32-bit always uses GS
# elif defined(__APPLE__) && defined(__x86_64__)
__asm__ ( " movq %%gs:%1, %0 " : " =r " ( res ) : " m " ( * ( ( void * * ) ofs ) ) : ) ; // x86_64 macOSX uses GS
# elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
__asm__ ( " movl %%fs:%1, %0 " : " =r " ( res ) : " m " ( * ( ( void * * ) ofs ) ) : ) ; // x32 ABI
# elif defined(__x86_64__)
__asm__ ( " movq %%fs:%1, %0 " : " =r " ( res ) : " m " ( * ( ( void * * ) ofs ) ) : ) ; // x86_64 Linux, BSD uses FS
# elif defined(__arm__)
void * * tcb ; MI_UNUSED ( ofs ) ;
__asm__ volatile ( " mrc p15, 0, %0, c13, c0, 3 \n bic %0, %0, #3 " : " =r " ( tcb ) ) ;
res = tcb [ slot ] ;
# elif defined(__aarch64__)
void * * tcb ; MI_UNUSED ( ofs ) ;
# if defined(__APPLE__) // M1, issue #343
__asm__ volatile ( " mrs %0, tpidrro_el0 \n bic %0, %0, #7 " : " =r " ( tcb ) ) ;
# else
__asm__ volatile ( " mrs %0, tpidr_el0 " : " =r " ( tcb ) ) ;
# endif
res = tcb [ slot ] ;
2021-11-03 07:53:20 +03:00
# endif
2020-02-03 00:12:22 +03:00
return res ;
}
2022-01-02 02:54:06 +03:00
// setting a tls slot is only used on macOS for now
2020-02-03 00:12:22 +03:00
static inline void mi_tls_slot_set ( size_t slot , void * value ) mi_attr_noexcept {
const size_t ofs = ( slot * sizeof ( void * ) ) ;
2022-01-02 02:54:06 +03:00
# if defined(__i386__)
__asm__ ( " movl %1,%%gs:%0 " : " =m " ( * ( ( void * * ) ofs ) ) : " rn " ( value ) : ) ; // 32-bit always uses GS
# elif defined(__APPLE__) && defined(__x86_64__)
__asm__ ( " movq %1,%%gs:%0 " : " =m " ( * ( ( void * * ) ofs ) ) : " rn " ( value ) : ) ; // x86_64 macOS uses GS
# elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
__asm__ ( " movl %1,%%fs:%0 " : " =m " ( * ( ( void * * ) ofs ) ) : " rn " ( value ) : ) ; // x32 ABI
# elif defined(__x86_64__)
__asm__ ( " movq %1,%%fs:%0 " : " =m " ( * ( ( void * * ) ofs ) ) : " rn " ( value ) : ) ; // x86_64 Linux, BSD uses FS
# elif defined(__arm__)
void * * tcb ; MI_UNUSED ( ofs ) ;
__asm__ volatile ( " mrc p15, 0, %0, c13, c0, 3 \n bic %0, %0, #3 " : " =r " ( tcb ) ) ;
tcb [ slot ] = value ;
# elif defined(__aarch64__)
void * * tcb ; MI_UNUSED ( ofs ) ;
# if defined(__APPLE__) // M1, issue #343
__asm__ volatile ( " mrs %0, tpidrro_el0 \n bic %0, %0, #7 " : " =r " ( tcb ) ) ;
# else
__asm__ volatile ( " mrs %0, tpidr_el0 " : " =r " ( tcb ) ) ;
# endif
tcb [ slot ] = value ;
2021-11-03 07:53:20 +03:00
# endif
2020-02-03 00:12:22 +03:00
}
2021-11-14 01:46:03 +03:00
static inline mi_threadid_t _mi_thread_id ( void ) mi_attr_noexcept {
2022-01-02 02:54:06 +03:00
# if defined(__BIONIC__)
// issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id
// see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86
return ( uintptr_t ) mi_tls_slot ( 1 ) ;
# else
// in all our other targets, slot 0 is the thread id
// glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h
// apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36
return ( uintptr_t ) mi_tls_slot ( 0 ) ;
# endif
2019-06-20 02:26:12 +03:00
}
2022-01-02 02:54:06 +03:00
2019-06-20 02:26:12 +03:00
# else
2022-01-02 02:54:06 +03:00
// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms).
2021-11-14 01:46:03 +03:00
static inline mi_threadid_t _mi_thread_id ( void ) mi_attr_noexcept {
2019-06-22 21:33:18 +03:00
return ( uintptr_t ) & _mi_heap_default ;
2019-06-20 02:26:12 +03:00
}
2022-01-02 02:54:06 +03:00
2019-06-20 02:26:12 +03:00
# endif
2022-01-02 02:54:06 +03:00
2020-09-08 19:27:57 +03:00
// -----------------------------------------------------------------------
// Count bits: trailing or leading zeros (with MI_INTPTR_BITS on all zero)
// -----------------------------------------------------------------------
# if defined(__GNUC__)
# include <limits.h> // LONG_MAX
# define MI_HAVE_FAST_BITSCAN
static inline size_t mi_clz ( uintptr_t x ) {
if ( x = = 0 ) return MI_INTPTR_BITS ;
# if (INTPTR_MAX == LONG_MAX)
return __builtin_clzl ( x ) ;
# else
return __builtin_clzll ( x ) ;
# endif
}
static inline size_t mi_ctz ( uintptr_t x ) {
if ( x = = 0 ) return MI_INTPTR_BITS ;
# if (INTPTR_MAX == LONG_MAX)
return __builtin_ctzl ( x ) ;
# else
return __builtin_ctzll ( x ) ;
# endif
}
2022-12-03 02:23:43 +03:00
# elif defined(_MSC_VER)
2020-09-08 19:27:57 +03:00
2020-09-08 20:14:13 +03:00
# include <limits.h> // LONG_MAX
2020-09-08 19:27:57 +03:00
# define MI_HAVE_FAST_BITSCAN
static inline size_t mi_clz ( uintptr_t x ) {
if ( x = = 0 ) return MI_INTPTR_BITS ;
unsigned long idx ;
# if (INTPTR_MAX == LONG_MAX)
_BitScanReverse ( & idx , x ) ;
# else
_BitScanReverse64 ( & idx , x ) ;
2022-12-03 02:23:43 +03:00
# endif
2020-09-08 19:27:57 +03:00
return ( ( MI_INTPTR_BITS - 1 ) - idx ) ;
}
static inline size_t mi_ctz ( uintptr_t x ) {
if ( x = = 0 ) return MI_INTPTR_BITS ;
unsigned long idx ;
# if (INTPTR_MAX == LONG_MAX)
_BitScanForward ( & idx , x ) ;
# else
_BitScanForward64 ( & idx , x ) ;
2022-12-03 02:23:43 +03:00
# endif
2020-09-08 19:27:57 +03:00
return idx ;
}
# else
static inline size_t mi_ctz32 ( uint32_t x ) {
// de Bruijn multiplication, see <http://supertech.csail.mit.edu/papers/debruijn.pdf>
static const unsigned char debruijn [ 32 ] = {
0 , 1 , 28 , 2 , 29 , 14 , 24 , 3 , 30 , 22 , 20 , 15 , 25 , 17 , 4 , 8 ,
31 , 27 , 13 , 23 , 21 , 19 , 16 , 7 , 26 , 12 , 18 , 6 , 11 , 5 , 10 , 9
} ;
if ( x = = 0 ) return 32 ;
return debruijn [ ( ( x & - ( int32_t ) x ) * 0x077CB531UL ) > > 27 ] ;
}
static inline size_t mi_clz32 ( uint32_t x ) {
// de Bruijn multiplication, see <http://supertech.csail.mit.edu/papers/debruijn.pdf>
static const uint8_t debruijn [ 32 ] = {
31 , 22 , 30 , 21 , 18 , 10 , 29 , 2 , 20 , 17 , 15 , 13 , 9 , 6 , 28 , 1 ,
23 , 19 , 11 , 3 , 16 , 14 , 7 , 24 , 12 , 4 , 8 , 25 , 5 , 26 , 27 , 0
} ;
if ( x = = 0 ) return 32 ;
x | = x > > 1 ;
x | = x > > 2 ;
x | = x > > 4 ;
x | = x > > 8 ;
x | = x > > 16 ;
return debruijn [ ( uint32_t ) ( x * 0x07C4ACDDUL ) > > 27 ] ;
}
static inline size_t mi_clz ( uintptr_t x ) {
2022-12-03 02:23:43 +03:00
if ( x = = 0 ) return MI_INTPTR_BITS ;
2020-09-08 19:27:57 +03:00
# if (MI_INTPTR_BITS <= 32)
return mi_clz32 ( ( uint32_t ) x ) ;
# else
size_t count = mi_clz32 ( ( uint32_t ) ( x > > 32 ) ) ;
if ( count < 32 ) return count ;
return ( 32 + mi_clz32 ( ( uint32_t ) x ) ) ;
# endif
}
static inline size_t mi_ctz ( uintptr_t x ) {
if ( x = = 0 ) return MI_INTPTR_BITS ;
# if (MI_INTPTR_BITS <= 32)
return mi_ctz32 ( ( uint32_t ) x ) ;
# else
size_t count = mi_ctz32 ( ( uint32_t ) x ) ;
if ( count < 32 ) return count ;
return ( 32 + mi_ctz32 ( ( uint32_t ) ( x > > 32 ) ) ) ;
# endif
}
# endif
// "bit scan reverse": Return index of the highest bit (or MI_INTPTR_BITS if `x` is zero)
static inline size_t mi_bsr ( uintptr_t x ) {
return ( x = = 0 ? MI_INTPTR_BITS : MI_INTPTR_BITS - 1 - mi_clz ( x ) ) ;
}
2019-06-20 02:26:12 +03:00
2021-01-31 01:33:46 +03:00
// ---------------------------------------------------------------------------------
// Provide our own `_mi_memcpy` for potential performance optimizations.
//
2022-12-03 02:23:43 +03:00
// For now, only on Windows with msvc/clang-cl we optimize to `rep movsb` if
// we happen to run on x86/x64 cpu's that have "fast short rep movsb" (FSRM) support
// (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253.
2021-01-31 01:33:46 +03:00
// ---------------------------------------------------------------------------------
2022-10-29 21:43:09 +03:00
# if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
2021-01-31 01:33:46 +03:00
# include <intrin.h>
# include <string.h>
extern bool _mi_cpu_has_fsrm ;
static inline void _mi_memcpy ( void * dst , const void * src , size_t n ) {
if ( _mi_cpu_has_fsrm ) {
__movsb ( ( unsigned char * ) dst , ( const unsigned char * ) src , n ) ;
}
else {
2022-04-20 04:32:35 +03:00
memcpy ( dst , src , n ) ;
}
}
static inline void _mi_memzero ( void * dst , size_t n ) {
if ( _mi_cpu_has_fsrm ) {
__stosb ( ( unsigned char * ) dst , 0 , n ) ;
}
else {
memset ( dst , 0 , n ) ;
2021-01-31 01:33:46 +03:00
}
}
# else
# include <string.h>
static inline void _mi_memcpy ( void * dst , const void * src , size_t n ) {
memcpy ( dst , src , n ) ;
}
2022-04-20 04:32:35 +03:00
static inline void _mi_memzero ( void * dst , size_t n ) {
memset ( dst , 0 , n ) ;
}
2021-01-31 01:33:46 +03:00
# endif
// -------------------------------------------------------------------------------
2022-12-03 02:23:43 +03:00
// The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned
2021-01-31 01:33:46 +03:00
// This is used for example in `mi_realloc`.
// -------------------------------------------------------------------------------
2021-06-24 12:29:06 +03:00
# if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__)
2021-01-31 01:33:46 +03:00
// On GCC/CLang we provide a hint that the pointers are word aligned.
# include <string.h>
static inline void _mi_memcpy_aligned ( void * dst , const void * src , size_t n ) {
mi_assert_internal ( ( ( uintptr_t ) dst % MI_INTPTR_SIZE = = 0 ) & & ( ( uintptr_t ) src % MI_INTPTR_SIZE = = 0 ) ) ;
void * adst = __builtin_assume_aligned ( dst , MI_INTPTR_SIZE ) ;
const void * asrc = __builtin_assume_aligned ( src , MI_INTPTR_SIZE ) ;
2022-04-08 05:09:31 +03:00
_mi_memcpy ( adst , asrc , n ) ;
2021-01-31 01:33:46 +03:00
}
2022-04-20 04:32:35 +03:00
static inline void _mi_memzero_aligned ( void * dst , size_t n ) {
mi_assert_internal ( ( uintptr_t ) dst % MI_INTPTR_SIZE = = 0 ) ;
void * adst = __builtin_assume_aligned ( dst , MI_INTPTR_SIZE ) ;
_mi_memzero ( adst , n ) ;
}
2021-01-31 01:33:46 +03:00
# else
// Default fallback on `_mi_memcpy`
static inline void _mi_memcpy_aligned ( void * dst , const void * src , size_t n ) {
mi_assert_internal ( ( ( uintptr_t ) dst % MI_INTPTR_SIZE = = 0 ) & & ( ( uintptr_t ) src % MI_INTPTR_SIZE = = 0 ) ) ;
_mi_memcpy ( dst , src , n ) ;
}
2022-04-20 04:32:35 +03:00
static inline void _mi_memzero_aligned ( void * dst , size_t n ) {
mi_assert_internal ( ( uintptr_t ) dst % MI_INTPTR_SIZE = = 0 ) ;
_mi_memzero ( dst , n ) ;
}
2021-01-31 01:33:46 +03:00
# endif
2019-06-20 02:26:12 +03:00
# endif