This commit is contained in:
daan 2021-10-19 15:07:19 -07:00
commit 505ea78cae
15 changed files with 104 additions and 77 deletions

View File

@ -43,7 +43,7 @@ set(mi_sources
src/init.c) src/init.c)
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
# Converience: set default build type depending on the build directory # Convenience: set default build type depending on the build directory
# ----------------------------------------------------------------------------- # -----------------------------------------------------------------------------
if (NOT CMAKE_BUILD_TYPE) if (NOT CMAKE_BUILD_TYPE)
@ -165,7 +165,7 @@ endif()
# Compiler flags # Compiler flags
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU") if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU")
list(APPEND mi_cflags -Wall -Wextra -Wno-unknown-pragmas -fvisibility=hidden) list(APPEND mi_cflags -Wall -Wextra -Wno-unknown-pragmas -Wstrict-prototypes -fvisibility=hidden)
if(CMAKE_C_COMPILER_ID MATCHES "GNU") if(CMAKE_C_COMPILER_ID MATCHES "GNU")
list(APPEND mi_cflags -Wno-invalid-memory-model) list(APPEND mi_cflags -Wno-invalid-memory-model)
endif() endif()
@ -224,7 +224,7 @@ else()
endif() endif()
string(TOLOWER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_LC) string(TOLOWER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_LC)
if(NOT(CMAKE_BUILD_TYPE_LC MATCHES "^(release|relwithdebinfo|minsizerel)$")) if(NOT(CMAKE_BUILD_TYPE_LC MATCHES "^(release|relwithdebinfo|minsizerel|none)$"))
set(mi_basename "${mi_basename}-${CMAKE_BUILD_TYPE_LC}") #append build type (e.g. -debug) if not a release version set(mi_basename "${mi_basename}-${CMAKE_BUILD_TYPE_LC}") #append build type (e.g. -debug) if not a release version
endif() endif()
if(MI_BUILD_SHARED) if(MI_BUILD_SHARED)

View File

@ -25,7 +25,7 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_memory_order(name) std::memory_order_##name #define mi_memory_order(name) std::memory_order_##name
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
// Use MSVC C wrapper for C11 atomics // Use MSVC C wrapper for C11 atomics
#define _Atomic(tp) tp #define _Atomic(tp) tp
#define ATOMIC_VAR_INIT(x) x #define ATOMIC_VAR_INIT(x) x
#define mi_atomic(name) mi_atomic_##name #define mi_atomic(name) mi_atomic_##name
#define mi_memory_order(name) mi_memory_order_##name #define mi_memory_order(name) mi_memory_order_##name
@ -173,7 +173,7 @@ static inline uintptr_t mi_atomic_exchange_explicit(_Atomic(uintptr_t)*p, uintpt
} }
static inline void mi_atomic_thread_fence(mi_memory_order mo) { static inline void mi_atomic_thread_fence(mi_memory_order mo) {
(void)(mo); (void)(mo);
_Atomic(uintptr_t)x = 0; _Atomic(uintptr_t) x = 0;
mi_atomic_exchange_explicit(&x, 1, mo); mi_atomic_exchange_explicit(&x, 1, mo);
} }
static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_memory_order mo) { static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_memory_order mo) {

View File

@ -57,7 +57,7 @@ static inline uintptr_t _mi_random_shuffle(uintptr_t x);
extern mi_decl_cache_align mi_stats_t _mi_stats_main; extern mi_decl_cache_align mi_stats_t _mi_stats_main;
extern mi_decl_cache_align const mi_page_t _mi_page_empty; extern mi_decl_cache_align const mi_page_t _mi_page_empty;
bool _mi_is_main_thread(void); bool _mi_is_main_thread(void);
bool _mi_preloading(); // true while the C runtime is not ready bool _mi_preloading(void); // true while the C runtime is not ready
// os.c // os.c
size_t _mi_os_page_size(void); size_t _mi_os_page_size(void);
@ -431,7 +431,7 @@ static inline mi_page_t* _mi_ptr_page(void* p) {
return _mi_segment_page_of(_mi_ptr_segment(p), p); return _mi_segment_page_of(_mi_ptr_segment(p), p);
} }
// Get the block size of a page (special cased for huge objects) // Get the block size of a page (special case for huge objects)
static inline size_t mi_page_block_size(const mi_page_t* page) { static inline size_t mi_page_block_size(const mi_page_t* page) {
const size_t bsize = page->xblock_size; const size_t bsize = page->xblock_size;
mi_assert_internal(bsize > 0); mi_assert_internal(bsize > 0);

View File

@ -105,10 +105,10 @@ terms of the MIT license. A copy of the license can be found in the file
// Main tuning parameters for segment and page sizes // Main tuning parameters for segment and page sizes
// Sizes for 64-bit, divide by two for 32-bit // Sizes for 64-bit, divide by two for 32-bit
#define MI_SMALL_PAGE_SHIFT (13 + MI_INTPTR_SHIFT) // 64kb #define MI_SMALL_PAGE_SHIFT (13 + MI_INTPTR_SHIFT) // 64KiB
#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512kb #define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB
#define MI_LARGE_PAGE_SHIFT ( 3 + MI_MEDIUM_PAGE_SHIFT) // 4mb #define MI_LARGE_PAGE_SHIFT ( 3 + MI_MEDIUM_PAGE_SHIFT) // 4MiB
#define MI_SEGMENT_SHIFT ( MI_LARGE_PAGE_SHIFT) // 4mb #define MI_SEGMENT_SHIFT ( MI_LARGE_PAGE_SHIFT) // 4MiB
// Derived constants // Derived constants
#define MI_SEGMENT_SIZE (1UL<<MI_SEGMENT_SHIFT) #define MI_SEGMENT_SIZE (1UL<<MI_SEGMENT_SHIFT)
@ -124,9 +124,9 @@ terms of the MIT license. A copy of the license can be found in the file
// The max object size are checked to not waste more than 12.5% internally over the page sizes. // The max object size are checked to not waste more than 12.5% internally over the page sizes.
// (Except for large pages since huge objects are allocated in 4MiB chunks) // (Except for large pages since huge objects are allocated in 4MiB chunks)
#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // 16kb #define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // 16KiB
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128kb #define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128KiB
#define MI_LARGE_OBJ_SIZE_MAX (MI_LARGE_PAGE_SIZE/2) // 2mb #define MI_LARGE_OBJ_SIZE_MAX (MI_LARGE_PAGE_SIZE/2) // 2MiB
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE) #define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
#define MI_HUGE_OBJ_SIZE_MAX (2*MI_INTPTR_SIZE*MI_SEGMENT_SIZE) // (must match MI_REGION_MAX_ALLOC_SIZE in memory.c) #define MI_HUGE_OBJ_SIZE_MAX (2*MI_INTPTR_SIZE*MI_SEGMENT_SIZE) // (must match MI_REGION_MAX_ALLOC_SIZE in memory.c)
@ -249,13 +249,13 @@ typedef struct mi_page_s {
typedef enum mi_page_kind_e { typedef enum mi_page_kind_e {
MI_PAGE_SMALL, // small blocks go into 64kb pages inside a segment MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment
MI_PAGE_MEDIUM, // medium blocks go into 512kb pages inside a segment MI_PAGE_MEDIUM, // medium blocks go into 512KiB pages inside a segment
MI_PAGE_LARGE, // larger blocks go into a single page spanning a whole segment MI_PAGE_LARGE, // larger blocks go into a single page spanning a whole segment
MI_PAGE_HUGE // huge blocks (>512kb) are put into a single page in a segment of the exact size (but still 2mb aligned) MI_PAGE_HUGE // huge blocks (>512KiB) are put into a single page in a segment of the exact size (but still 2MiB aligned)
} mi_page_kind_t; } mi_page_kind_t;
// Segments are large allocated memory blocks (2mb on 64 bit) from // Segments are large allocated memory blocks (2MiB on 64 bit) from
// the OS. Inside segments we allocated fixed size _pages_ that // the OS. Inside segments we allocated fixed size _pages_ that
// contain blocks. // contain blocks.
typedef struct mi_segment_s { typedef struct mi_segment_s {
@ -319,7 +319,7 @@ typedef struct mi_random_cxt_s {
} mi_random_ctx_t; } mi_random_ctx_t;
// In debug mode there is a padding stucture at the end of the blocks to check for buffer overflows // In debug mode there is a padding structure at the end of the blocks to check for buffer overflows
#if (MI_PADDING) #if (MI_PADDING)
typedef struct mi_padding_s { typedef struct mi_padding_s {
uint32_t canary; // encoded block value to check validity of the padding (in case of overflow) uint32_t canary; // encoded block value to check validity of the padding (in case of overflow)

View File

@ -4,6 +4,10 @@ This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution. "LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/ -----------------------------------------------------------------------------*/
#ifndef _DEFAULT_SOURCE
#define _DEFAULT_SOURCE // for realpath() on Linux
#endif
#include "mimalloc.h" #include "mimalloc.h"
#include "mimalloc-internal.h" #include "mimalloc-internal.h"
#include "mimalloc-atomic.h" #include "mimalloc-atomic.h"
@ -465,7 +469,7 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms
#endif #endif
#if (MI_DEBUG>0 || MI_SECURE>=4) #if (MI_DEBUG>0 || MI_SECURE>=4)
if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) { if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) {
_mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", p); _mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
} }
#endif #endif
return segment; return segment;
@ -747,7 +751,7 @@ mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char
} }
#else #else
#include <unistd.h> // pathconf #include <unistd.h> // pathconf
static size_t mi_path_max() { static size_t mi_path_max(void) {
static size_t path_max = 0; static size_t path_max = 0;
if (path_max <= 0) { if (path_max <= 0) {
long m = pathconf("/",_PC_PATH_MAX); long m = pathconf("/",_PC_PATH_MAX);
@ -807,13 +811,13 @@ static bool mi_try_new_handler(bool nothrow) {
} }
} }
#else #else
typedef void (*std_new_handler_t)(); typedef void (*std_new_handler_t)(void);
#if (defined(__GNUC__) || defined(__clang__)) #if (defined(__GNUC__) || defined(__clang__))
std_new_handler_t __attribute((weak)) _ZSt15get_new_handlerv() { std_new_handler_t __attribute((weak)) _ZSt15get_new_handlerv(void) {
return NULL; return NULL;
} }
static std_new_handler_t mi_get_new_handler() { static std_new_handler_t mi_get_new_handler(void) {
return _ZSt15get_new_handlerv(); return _ZSt15get_new_handlerv();
} }
#else #else

View File

@ -62,11 +62,11 @@ typedef struct mi_arena_s {
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`) size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
int numa_node; // associated NUMA node int numa_node; // associated NUMA node
bool is_zero_init; // is the arena zero initialized? bool is_zero_init; // is the arena zero initialized?
bool is_committed; // is the memory fully committed? (if so, block_committed == NULL) bool allow_decommit; // is decommit allowed? if true, is_large should be false and blocks_committed != NULL
bool is_large; // large- or huge OS pages (always committed) bool is_large; // large- or huge OS pages (always committed)
_Atomic(uintptr_t) search_idx; // optimization to start the search for free blocks _Atomic(uintptr_t) search_idx; // optimization to start the search for free blocks
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero? mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
mi_bitmap_field_t* blocks_committed; // if `!is_committed`, are the blocks committed? mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`) mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
} mi_arena_t; } mi_arena_t;
@ -129,8 +129,8 @@ static void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t n
*memid = mi_arena_id_create(arena_index, bitmap_index); *memid = mi_arena_id_create(arena_index, bitmap_index);
*is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL); *is_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
*large = arena->is_large; *large = arena->is_large;
*is_pinned = (arena->is_large || arena->is_committed); *is_pinned = (arena->is_large || !arena->allow_decommit);
if (arena->is_committed) { if (arena->blocks_committed == NULL) {
// always committed // always committed
*commit = true; *commit = true;
} }
@ -245,12 +245,13 @@ void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_s
return; return;
} }
// potentially decommit // potentially decommit
if (arena->is_committed) { if (!arena->allow_decommit || arena->blocks_committed == NULL) {
mi_assert_internal(all_committed); mi_assert_internal(all_committed); // note: may be not true as we may "pretend" to be not committed (in segment.c)
} }
else { else {
mi_assert_internal(arena->blocks_committed != NULL); mi_assert_internal(arena->blocks_committed != NULL);
_mi_os_decommit(p, blocks * MI_ARENA_BLOCK_SIZE, stats); // ok if this fails _mi_os_decommit(p, blocks * MI_ARENA_BLOCK_SIZE, stats); // ok if this fails
// todo: use reset instead of decommit on windows?
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx); _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
} }
// and make it available to others again // and make it available to others again
@ -302,12 +303,16 @@ bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_la
arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1) arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1)
arena->is_large = is_large; arena->is_large = is_large;
arena->is_zero_init = is_zero; arena->is_zero_init = is_zero;
arena->is_committed = is_committed; arena->allow_decommit = !is_large && !is_committed; // only allow decommit for initially uncommitted memory
arena->search_idx = 0; arena->search_idx = 0;
arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
arena->blocks_committed = (is_committed ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap arena->blocks_committed = (!arena->allow_decommit ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap
// the bitmaps are already zero initialized due to os_alloc // the bitmaps are already zero initialized due to os_alloc
// just claim leftover blocks if needed // initialize committed bitmap?
if (arena->blocks_committed != NULL && is_committed) {
memset(arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t));
}
// and claim leftover blocks if needed (so we never allocate there)
ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount; ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
mi_assert_internal(post >= 0); mi_assert_internal(post >= 0);
if (post > 0) { if (post > 0) {
@ -332,7 +337,7 @@ int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noe
_mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size,1024)); _mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size,1024));
return ENOMEM; return ENOMEM;
} }
_mi_verbose_message("reserved %zu kb memory%s\n", _mi_divide_up(size,1024), large ? " (in large os pages)" : ""); _mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size,1024), large ? " (in large os pages)" : "");
return 0; return 0;
} }
@ -349,10 +354,10 @@ int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msec
size_t pages_reserved = 0; size_t pages_reserved = 0;
void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize); void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize);
if (p==NULL || pages_reserved==0) { if (p==NULL || pages_reserved==0) {
_mi_warning_message("failed to reserve %zu gb huge pages\n", pages); _mi_warning_message("failed to reserve %zu GiB huge pages\n", pages);
return ENOMEM; return ENOMEM;
} }
_mi_verbose_message("numa node %i: reserved %zu gb huge pages (of the %zu gb requested)\n", numa_node, pages_reserved, pages); _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
if (!mi_manage_os_memory(p, hsize, true, true, true, numa_node)) { if (!mi_manage_os_memory(p, hsize, true, true, true, numa_node)) {
_mi_os_free_huge_pages(p, hsize, &_mi_stats_main); _mi_os_free_huge_pages(p, hsize, &_mi_stats_main);

View File

@ -333,7 +333,7 @@ void mi_heap_destroy(mi_heap_t* heap) {
Safe Heap delete Safe Heap delete
----------------------------------------------------------- */ ----------------------------------------------------------- */
// Tranfer the pages from one heap to the other // Transfer the pages from one heap to the other
static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) { static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
mi_assert_internal(heap!=NULL); mi_assert_internal(heap!=NULL);
if (from==NULL || from->page_count == 0) return; if (from==NULL || from->page_count == 0) return;

View File

@ -22,7 +22,7 @@ terms of the MIT license. A copy of the license can be found in the file
static uintptr_t mi_max_error_count = 16; // stop outputting errors after this static uintptr_t mi_max_error_count = 16; // stop outputting errors after this
static uintptr_t mi_max_warning_count = 16; // stop outputting warnings after this static uintptr_t mi_max_warning_count = 16; // stop outputting warnings after this
static void mi_add_stderr_output(); static void mi_add_stderr_output(void);
int mi_version(void) mi_attr_noexcept { int mi_version(void) mi_attr_noexcept {
return MI_MALLOC_VERSION; return MI_MALLOC_VERSION;
@ -409,6 +409,14 @@ static void mi_strlcat(char* dest, const char* src, size_t dest_size) {
dest[dest_size - 1] = 0; dest[dest_size - 1] = 0;
} }
#ifdef MI_NO_GETENV
static bool mi_getenv(const char* name, char* result, size_t result_size) {
UNUSED(name);
UNUSED(result);
UNUSED(result_size);
return false;
}
#else
static inline int mi_strnicmp(const char* s, const char* t, size_t n) { static inline int mi_strnicmp(const char* s, const char* t, size_t n) {
if (n==0) return 0; if (n==0) return 0;
for (; *s != 0 && *t != 0 && n > 0; s++, t++, n--) { for (; *s != 0 && *t != 0 && n > 0; s++, t++, n--) {
@ -416,7 +424,6 @@ static inline int mi_strnicmp(const char* s, const char* t, size_t n) {
} }
return (n==0 ? 0 : *s - *t); return (n==0 ? 0 : *s - *t);
} }
#if defined _WIN32 #if defined _WIN32
// On Windows use GetEnvironmentVariable instead of getenv to work // On Windows use GetEnvironmentVariable instead of getenv to work
// reliably even when this is invoked before the C runtime is initialized. // reliably even when this is invoked before the C runtime is initialized.
@ -484,7 +491,8 @@ static bool mi_getenv(const char* name, char* result, size_t result_size) {
return false; return false;
} }
} }
#endif #endif // !MI_USE_ENVIRON
#endif // !MI_NO_GETENV
static void mi_option_init(mi_option_desc_t* desc) { static void mi_option_init(mi_option_desc_t* desc) {
// Read option value from the environment // Read option value from the environment

View File

@ -98,7 +98,7 @@ size_t _mi_os_page_size() {
} }
// if large OS pages are supported (2 or 4MiB), then return the size, otherwise return the small page size (4KiB) // if large OS pages are supported (2 or 4MiB), then return the size, otherwise return the small page size (4KiB)
size_t _mi_os_large_page_size() { size_t _mi_os_large_page_size(void) {
return (large_os_page_size != 0 ? large_os_page_size : _mi_os_page_size()); return (large_os_page_size != 0 ? large_os_page_size : _mi_os_page_size());
} }
@ -215,7 +215,7 @@ void _mi_os_init(void) {
} }
#elif defined(__wasi__) #elif defined(__wasi__)
void _mi_os_init() { void _mi_os_init() {
os_page_size = 0x10000; // WebAssembly has a fixed page size: 64KB os_page_size = 0x10000; // WebAssembly has a fixed page size: 64KiB
os_alloc_granularity = 16; os_alloc_granularity = 16;
} }
#else #else
@ -641,6 +641,10 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
mi_os_mem_free(p, over_size, commit, stats); mi_os_mem_free(p, over_size, commit, stats);
void* aligned_p = mi_align_up_ptr(p, alignment); void* aligned_p = mi_align_up_ptr(p, alignment);
p = mi_win_virtual_alloc(aligned_p, size, alignment, flags, false, allow_large, is_large); p = mi_win_virtual_alloc(aligned_p, size, alignment, flags, false, allow_large, is_large);
if (p != NULL) {
_mi_stat_increase(&stats->reserved, size);
if (commit) { _mi_stat_increase(&stats->committed, size); }
}
if (p == aligned_p) break; // success! if (p == aligned_p) break; // success!
if (p != NULL) { // should not happen? if (p != NULL) { // should not happen?
mi_os_mem_free(p, size, commit, stats); mi_os_mem_free(p, size, commit, stats);
@ -792,9 +796,9 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ
// for commit, just change the protection // for commit, just change the protection
err = mprotect(start, csize, (PROT_READ | PROT_WRITE)); err = mprotect(start, csize, (PROT_READ | PROT_WRITE));
if (err != 0) { err = errno; } if (err != 0) { err = errno; }
#if defined(MADV_FREE_REUSE) //#if defined(MADV_FREE_REUSE)
while ((err = madvise(start, csize, MADV_FREE_REUSE)) != 0 && errno == EAGAIN) { errno = 0; } // while ((err = madvise(start, csize, MADV_FREE_REUSE)) != 0 && errno == EAGAIN) { errno = 0; }
#endif //#endif
} }
#else #else
err = mprotect(start, csize, (commit ? (PROT_READ | PROT_WRITE) : PROT_NONE)); err = mprotect(start, csize, (commit ? (PROT_READ | PROT_WRITE) : PROT_NONE));
@ -856,17 +860,12 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
if (p != start) return false; if (p != start) return false;
#else #else
#if defined(MADV_FREE) #if defined(MADV_FREE)
#if defined(MADV_FREE_REUSABLE) static _Atomic(uintptr_t) advice = ATOMIC_VAR_INIT(MADV_FREE);
#define KK_MADV_FREE_INITIAL MADV_FREE_REUSABLE
#else
#define KK_MADV_FREE_INITIAL MADV_FREE
#endif
static _Atomic(uintptr_t) advice = ATOMIC_VAR_INIT(KK_MADV_FREE_INITIAL);
int oadvice = (int)mi_atomic_load_relaxed(&advice); int oadvice = (int)mi_atomic_load_relaxed(&advice);
int err; int err;
while ((err = madvise(start, csize, oadvice)) != 0 && errno == EAGAIN) { errno = 0; }; while ((err = madvise(start, csize, oadvice)) != 0 && errno == EAGAIN) { errno = 0; };
if (err != 0 && errno == EINVAL && oadvice == KK_MADV_FREE_INITIAL) { if (err != 0 && errno == EINVAL && oadvice == MADV_FREE) {
// if MADV_FREE/MADV_FREE_REUSABLE is not supported, fall back to MADV_DONTNEED from now on // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on
mi_atomic_store_release(&advice, (uintptr_t)MADV_DONTNEED); mi_atomic_store_release(&advice, (uintptr_t)MADV_DONTNEED);
err = madvise(start, csize, MADV_DONTNEED); err = madvise(start, csize, MADV_DONTNEED);
} }
@ -1012,7 +1011,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
else { else {
// fall back to regular large pages // fall back to regular large pages
mi_huge_pages_available = false; // don't try further huge pages mi_huge_pages_available = false; // don't try further huge pages
_mi_warning_message("unable to allocate using huge (1gb) pages, trying large (2mb) pages instead (status 0x%lx)\n", err); _mi_warning_message("unable to allocate using huge (1GiB) pages, trying large (2MiB) pages instead (status 0x%lx)\n", err);
} }
} }
// on modern Windows try use VirtualAlloc2 for numa aware large OS page allocation // on modern Windows try use VirtualAlloc2 for numa aware large OS page allocation
@ -1055,7 +1054,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
// see: <https://lkml.org/lkml/2017/2/9/875> // see: <https://lkml.org/lkml/2017/2/9/875>
long err = mi_os_mbind(p, size, MPOL_PREFERRED, &numa_mask, 8*MI_INTPTR_SIZE, 0); long err = mi_os_mbind(p, size, MPOL_PREFERRED, &numa_mask, 8*MI_INTPTR_SIZE, 0);
if (err != 0) { if (err != 0) {
_mi_warning_message("failed to bind huge (1gb) pages to numa node %d: %s\n", numa_node, strerror(errno)); _mi_warning_message("failed to bind huge (1GiB) pages to numa node %d: %s\n", numa_node, strerror(errno));
} }
} }
return p; return p;

View File

@ -7,7 +7,7 @@ terms of the MIT license. A copy of the license can be found in the file
/* ----------------------------------------------------------- /* -----------------------------------------------------------
The core of the allocator. Every segment contains The core of the allocator. Every segment contains
pages of a {certain block size. The main function pages of a certain block size. The main function
exported is `mi_malloc_generic`. exported is `mi_malloc_generic`.
----------------------------------------------------------- */ ----------------------------------------------------------- */

View File

@ -4,6 +4,10 @@ This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution. "LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/ -----------------------------------------------------------------------------*/
#ifndef _DEFAULT_SOURCE
#define _DEFAULT_SOURCE // for syscall() on Linux
#endif
#include "mimalloc.h" #include "mimalloc.h"
#include "mimalloc-internal.h" #include "mimalloc-internal.h"
@ -194,8 +198,10 @@ static bool os_random_buf(void* buf, size_t buf_len) {
arc4random_buf(buf, buf_len); arc4random_buf(buf, buf_len);
return true; return true;
} }
#elif defined(__linux__) #elif defined(__linux__) || defined(__HAIKU__)
#if defined(__linux__)
#include <sys/syscall.h> #include <sys/syscall.h>
#endif
#include <unistd.h> #include <unistd.h>
#include <sys/types.h> #include <sys/types.h>
#include <sys/stat.h> #include <sys/stat.h>

View File

@ -40,7 +40,7 @@ Possible issues:
#include "bitmap.h" #include "bitmap.h"
// Internal raw OS interface // Internal raw OS interface
size_t _mi_os_large_page_size(); size_t _mi_os_large_page_size(void);
bool _mi_os_protect(void* addr, size_t size); bool _mi_os_protect(void* addr, size_t size);
bool _mi_os_unprotect(void* addr, size_t size); bool _mi_os_unprotect(void* addr, size_t size);
bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats); bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);

View File

@ -17,14 +17,14 @@ static uint8_t* mi_segment_raw_page_start(const mi_segment_t* segment, const mi_
/* -------------------------------------------------------------------------------- /* --------------------------------------------------------------------------------
Segment allocation Segment allocation
We allocate pages inside bigger "segments" (4mb on 64-bit). This is to avoid We allocate pages inside bigger "segments" (4MiB on 64-bit). This is to avoid
splitting VMA's on Linux and reduce fragmentation on other OS's. splitting VMA's on Linux and reduce fragmentation on other OS's.
Each thread owns its own segments. Each thread owns its own segments.
Currently we have: Currently we have:
- small pages (64kb), 64 in one segment - small pages (64KiB), 64 in one segment
- medium pages (512kb), 8 in one segment - medium pages (512KiB), 8 in one segment
- large pages (4mb), 1 in one segment - large pages (4MiB), 1 in one segment
- huge blocks > MI_LARGE_OBJ_SIZE_MAX become large segment with 1 page - huge blocks > MI_LARGE_OBJ_SIZE_MAX become large segment with 1 page
In any case the memory for a segment is virtual and usually committed on demand. In any case the memory for a segment is virtual and usually committed on demand.

View File

@ -133,25 +133,29 @@ static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) {
// unit == 0: count as decimal // unit == 0: count as decimal
// unit < 0 : count in binary // unit < 0 : count in binary
static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, void* arg, const char* fmt) { static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, void* arg, const char* fmt) {
char buf[32]; char buf[32]; buf[0] = 0;
int len = 32; int len = 32;
const char* suffix = (unit <= 0 ? " " : "b"); const char* suffix = (unit <= 0 ? " " : "B");
const int64_t base = (unit == 0 ? 1000 : 1024); const int64_t base = (unit == 0 ? 1000 : 1024);
if (unit>0) n *= unit; if (unit>0) n *= unit;
const int64_t pos = (n < 0 ? -n : n); const int64_t pos = (n < 0 ? -n : n);
if (pos < base) { if (pos < base) {
snprintf(buf, len, "%d %s ", (int)n, suffix); if (n!=1 || suffix[0] != 'B') { // skip printing 1 B for the unit column
snprintf(buf, len, "%d %-3s", (int)n, (n==0 ? "" : suffix));
}
} }
else { else {
int64_t divider = base; int64_t divider = base;
const char* magnitude = "k"; const char* magnitude = "K";
if (pos >= divider*base) { divider *= base; magnitude = "m"; } if (pos >= divider*base) { divider *= base; magnitude = "M"; }
if (pos >= divider*base) { divider *= base; magnitude = "g"; } if (pos >= divider*base) { divider *= base; magnitude = "G"; }
const int64_t tens = (n / (divider/10)); const int64_t tens = (n / (divider/10));
const long whole = (long)(tens/10); const long whole = (long)(tens/10);
const long frac1 = (long)(tens%10); const long frac1 = (long)(tens%10);
snprintf(buf, len, "%ld.%ld %s%s", whole, (frac1 < 0 ? -frac1 : frac1), magnitude, suffix); char unitdesc[16];
snprintf(unitdesc, 16, "%s%s%s", magnitude, (base==1024 ? "i" : ""), suffix);
snprintf(buf, len, "%ld.%ld %-3s", whole, (frac1 < 0 ? -frac1 : frac1), unitdesc);
} }
_mi_fprintf(out, arg, (fmt==NULL ? "%11s" : fmt), buf); _mi_fprintf(out, arg, (fmt==NULL ? "%11s" : fmt), buf);
} }
@ -221,7 +225,7 @@ static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char*
static void mi_print_header(mi_output_fun* out, void* arg ) { static void mi_print_header(mi_output_fun* out, void* arg ) {
_mi_fprintf(out, arg, "%10s: %10s %10s %10s %10s %10s %10s\n", "heap stats", "peak ", "total ", "freed ", "current ", "unit ", "count "); _mi_fprintf(out, arg, "%10s: %10s %10s %10s %10s %10s %10s\n", "heap stats", "peak ", "total ", "freed ", "current ", "unit ", "count ");
} }
#if MI_STAT>1 #if MI_STAT>1
@ -524,6 +528,7 @@ static void mi_stat_process_info(mi_msecs_t* elapsed, mi_msecs_t* utime, mi_msec
while (get_next_area_info(tid.team, &c, &mem) == B_OK) { while (get_next_area_info(tid.team, &c, &mem) == B_OK) {
*peak_rss += mem.ram_size; *peak_rss += mem.ram_size;
} }
*page_faults = 0;
#elif defined(__APPLE__) #elif defined(__APPLE__)
*peak_rss = rusage.ru_maxrss; // BSD reports in bytes *peak_rss = rusage.ru_maxrss; // BSD reports in bytes
struct mach_task_basic_info info; struct mach_task_basic_info info;

View File

@ -64,15 +64,15 @@ static int failed = 0;
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Test functions // Test functions
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
bool test_heap1(); bool test_heap1(void);
bool test_heap2(); bool test_heap2(void);
bool test_stl_allocator1(); bool test_stl_allocator1(void);
bool test_stl_allocator2(); bool test_stl_allocator2(void);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Main testing // Main testing
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
int main() { int main(void) {
mi_option_disable(mi_option_verbose); mi_option_disable(mi_option_verbose);
// --------------------------------------------------- // ---------------------------------------------------
@ -83,7 +83,7 @@ int main() {
void* p = mi_malloc(0); mi_free(p); void* p = mi_malloc(0); mi_free(p);
}); });
CHECK_BODY("malloc-nomem1",{ CHECK_BODY("malloc-nomem1",{
result = (mi_malloc(SIZE_MAX/2) == NULL); result = (mi_malloc((size_t)PTRDIFF_MAX + (size_t)1) == NULL);
}); });
CHECK_BODY("malloc-null",{ CHECK_BODY("malloc-null",{
mi_free(NULL); mi_free(NULL);