From 49acc88924c7afd6a00c0836231e8923769fbe26 Mon Sep 17 00:00:00 2001 From: Daan Date: Tue, 24 Dec 2019 10:38:13 -0800 Subject: [PATCH 01/46] Update readme.md --- readme.md | 1 + 1 file changed, 1 insertion(+) diff --git a/readme.md b/readme.md index 9d3974c9..32332c08 100644 --- a/readme.md +++ b/readme.md @@ -56,6 +56,7 @@ Enjoy! ### Releases +* 2019-12-22, `v1.2.2`: stable release 1.2: minor updates. * 2019-11-22, `v1.2.0`: stable release 1.2: bug fixes, improved secure mode (free list corruption checks, double free mitigation). Improved dynamic overriding on Windows. * 2019-10-07, `v1.1.0`: stable release 1.1. * 2019-09-01, `v1.0.8`: pre-release 8: more robust windows dynamic overriding, initial huge page support. From 743e89173819a9fe3283fb94f4f6830d2f648186 Mon Sep 17 00:00:00 2001 From: Kirsten Lee Date: Mon, 6 Jan 2020 16:18:22 -0800 Subject: [PATCH 02/46] add stl mimalloc wrapper --- CMakeLists.txt | 3 +- ide/vs2017/mimalloc-override.vcxproj | 3 +- ide/vs2017/mimalloc.vcxproj | 3 +- ide/vs2019/mimalloc-override.vcxproj | 3 +- ide/vs2019/mimalloc.vcxproj | 3 +- include/mimalloc-stl-allocator.h | 44 ++++++++++++++++++++++++++++ test/test-api.c | 31 ++++++++++++++++++++ 7 files changed, 85 insertions(+), 5 deletions(-) create mode 100644 include/mimalloc-stl-allocator.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 467fad95..dcbdefef 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -187,6 +187,7 @@ install(TARGETS mimalloc-static EXPORT mimalloc DESTINATION ${mi_install_dir}) install(FILES include/mimalloc.h DESTINATION ${mi_install_dir}/include) install(FILES include/mimalloc-override.h DESTINATION ${mi_install_dir}/include) install(FILES include/mimalloc-new-delete.h DESTINATION ${mi_install_dir}/include) +install(FILES include/mimalloc-stl-allocator.h DESTINATION ${mi_install_dir}/include) install(FILES cmake/mimalloc-config.cmake DESTINATION ${mi_install_dir}/cmake) install(FILES cmake/mimalloc-config-version.cmake DESTINATION ${mi_install_dir}/cmake) install(EXPORT mimalloc DESTINATION ${mi_install_dir}/cmake) @@ -233,7 +234,7 @@ if (MI_BUILD_TESTS MATCHES "ON") target_compile_options(mimalloc-test-stress PRIVATE ${mi_cflags}) target_include_directories(mimalloc-test-stress PRIVATE include) target_link_libraries(mimalloc-test-stress PRIVATE mimalloc-static ${mi_libraries}) - + enable_testing() add_test(test_api, mimalloc-test-api) add_test(test_stress, mimalloc-test-stress) diff --git a/ide/vs2017/mimalloc-override.vcxproj b/ide/vs2017/mimalloc-override.vcxproj index 511c0fab..e0a6d85b 100644 --- a/ide/vs2017/mimalloc-override.vcxproj +++ b/ide/vs2017/mimalloc-override.vcxproj @@ -1,4 +1,4 @@ - + @@ -214,6 +214,7 @@ + diff --git a/ide/vs2017/mimalloc.vcxproj b/ide/vs2017/mimalloc.vcxproj index 6147c349..ff6c8edb 100644 --- a/ide/vs2017/mimalloc.vcxproj +++ b/ide/vs2017/mimalloc.vcxproj @@ -1,4 +1,4 @@ - + @@ -239,6 +239,7 @@ + diff --git a/ide/vs2019/mimalloc-override.vcxproj b/ide/vs2019/mimalloc-override.vcxproj index 96a8924f..e6416e05 100644 --- a/ide/vs2019/mimalloc-override.vcxproj +++ b/ide/vs2019/mimalloc-override.vcxproj @@ -1,4 +1,4 @@ - + @@ -214,6 +214,7 @@ + diff --git a/ide/vs2019/mimalloc.vcxproj b/ide/vs2019/mimalloc.vcxproj index 28e96d71..ffede6ca 100644 --- a/ide/vs2019/mimalloc.vcxproj +++ b/ide/vs2019/mimalloc.vcxproj @@ -1,4 +1,4 @@ - + @@ -239,6 +239,7 @@ + diff --git a/include/mimalloc-stl-allocator.h b/include/mimalloc-stl-allocator.h new file mode 100644 index 00000000..11ba30fb --- /dev/null +++ b/include/mimalloc-stl-allocator.h @@ -0,0 +1,44 @@ +#pragma once +#ifndef MIMALLOC_STL_ALLOCATOR_H +#define MIMALLOC_STL_ALLOCATOR_H + +#ifdef __cplusplus +/* ---------------------------------------------------------------------------- +This header can be used to hook mimalloc into STL containers in place of +std::allocator. +-----------------------------------------------------------------------------*/ +#include +#include // true_type + +#pragma warning(disable: 4100) + +template +struct mi_stl_allocator { + typedef T value_type; + + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + using is_always_equal = std::true_type; + + mi_stl_allocator() noexcept {} + mi_stl_allocator(const mi_stl_allocator& other) noexcept {} + template + mi_stl_allocator(const mi_stl_allocator& other) noexcept {} + + T* allocate(size_t n, const void* hint = 0) { + return (T*)mi_mallocn(n, sizeof(T)); + } + + void deallocate(T* p, size_t n) { + mi_free(p); + } +}; + +template +bool operator==(const mi_stl_allocator& lhs, const mi_stl_allocator& rhs) noexcept { return true; } +template +bool operator!=(const mi_stl_allocator& lhs, const mi_stl_allocator& rhs) noexcept { return false; } + +#endif // __cplusplus +#endif // MIMALLOC_STL_ALLOCATOR_H diff --git a/test/test-api.c b/test/test-api.c index bd2291da..7a9ee785 100644 --- a/test/test-api.c +++ b/test/test-api.c @@ -25,8 +25,10 @@ we therefore test the API over various inputs. Please add more tests :-) #include #include #include +#include #include "mimalloc.h" #include "mimalloc-internal.h" +#include "mimalloc-stl-allocator.h" // --------------------------------------------------------------------------- // Test macros: CHECK(name,predicate) and CHECK_BODY(name,body) @@ -61,6 +63,8 @@ static int failed = 0; // --------------------------------------------------------------------------- bool test_heap1(); bool test_heap2(); +bool test_stl_allocator1(); +bool test_stl_allocator2(); // --------------------------------------------------------------------------- // Main testing @@ -150,6 +154,9 @@ int main() { mi_free(s); }); + CHECK("stl_allocator1", test_stl_allocator1()); + CHECK("stl_allocator2", test_stl_allocator2()); + // --------------------------------------------------- // Done // ---------------------------------------------------[] @@ -182,3 +189,27 @@ bool test_heap2() { mi_free(p2); return true; } + +bool test_stl_allocator1() { +#ifdef __cplusplus + std::vector> vec; + vec.push_back(1); + vec.pop_back(); + return vec.size() == 0; +#else + return true; +#endif +} + +bool test_stl_allocator2() { +#ifdef __cplusplus + struct some_struct { int i; int j; double z; }; + + std::vector> vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +#else + return true; +#endif +} From d97c56d4c10d9161d7d5b8bec43f67b8f291b67f Mon Sep 17 00:00:00 2001 From: Kirsten Lee Date: Mon, 6 Jan 2020 16:25:21 -0800 Subject: [PATCH 03/46] fix unintended diff --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index dcbdefef..93560951 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -234,7 +234,7 @@ if (MI_BUILD_TESTS MATCHES "ON") target_compile_options(mimalloc-test-stress PRIVATE ${mi_cflags}) target_include_directories(mimalloc-test-stress PRIVATE include) target_link_libraries(mimalloc-test-stress PRIVATE mimalloc-static ${mi_libraries}) - + enable_testing() add_test(test_api, mimalloc-test-api) add_test(test_stress, mimalloc-test-stress) From 0a2520490b951d791ebc9b34e8eae69e65fbeda6 Mon Sep 17 00:00:00 2001 From: Kirsten Lee Date: Mon, 6 Jan 2020 16:44:55 -0800 Subject: [PATCH 04/46] only include vector header when compiling c++ --- test/test-api.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/test-api.c b/test/test-api.c index 7a9ee785..f93884d0 100644 --- a/test/test-api.c +++ b/test/test-api.c @@ -25,7 +25,11 @@ we therefore test the API over various inputs. Please add more tests :-) #include #include #include + +#ifdef __cplusplus #include +#endif + #include "mimalloc.h" #include "mimalloc-internal.h" #include "mimalloc-stl-allocator.h" From 941c55ee42e1b3a14b27a1df1ceab3ebfcbcf46d Mon Sep 17 00:00:00 2001 From: daan Date: Tue, 14 Jan 2020 21:47:18 -0800 Subject: [PATCH 05/46] wip: first implementation of page free list on segments for effecient delayed page reset --- include/mimalloc-types.h | 2 + src/init.c | 7 +- src/options.c | 2 +- src/segment.c | 307 +++++++++++++++++++++++++++++++-------- 4 files changed, 251 insertions(+), 67 deletions(-) diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index da9bfbac..51306808 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -417,6 +417,8 @@ typedef struct mi_os_tld_s { typedef struct mi_segments_tld_s { mi_segment_queue_t small_free; // queue of segments with free small pages mi_segment_queue_t medium_free; // queue of segments with free medium pages + mi_page_queue_t small_pages_free; // page queue of free small pages + mi_page_queue_t medium_pages_free; // page queue of free medium pages size_t count; // current number of segments; size_t peak_count; // peak number of segments size_t current_size; // current size of all segments diff --git a/src/init.c b/src/init.c index 3df854cf..085a5011 100644 --- a/src/init.c +++ b/src/init.c @@ -105,9 +105,12 @@ mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty; static mi_tld_t tld_main = { 0, false, &_mi_heap_main, - { { NULL, NULL }, {NULL ,NULL}, 0, 0, 0, 0, 0, 0, NULL, tld_main_stats, tld_main_os }, // segments + { { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0}, {NULL ,NULL, 0}, + 0, 0, 0, 0, 0, 0, NULL, + tld_main_stats, tld_main_os + }, // segments { 0, tld_main_stats }, // os - { MI_STATS_NULL } // stats + { MI_STATS_NULL } // stats }; #if MI_INTPTR_SIZE==8 diff --git a/src/options.c b/src/options.c index 0d3bd393..77205713 100644 --- a/src/options.c +++ b/src/options.c @@ -70,7 +70,7 @@ static mi_option_desc_t options[_mi_option_last] = { 0, UNINIT, MI_OPTION(page_reset) }, // reset pages on free { 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit) { 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed - { 500, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds + { 100, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds { 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes. { 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose { 16, UNINIT, MI_OPTION(max_errors) } // maximum errors that are output diff --git a/src/segment.c b/src/segment.c index 97859fa9..fb5ea0ec 100644 --- a/src/segment.c +++ b/src/segment.c @@ -43,7 +43,7 @@ static uint8_t* mi_segment_raw_page_start(const mi_segment_t* segment, const mi_ ----------------------------------------------------------- */ #if (MI_DEBUG>=3) -static bool mi_segment_queue_contains(const mi_segment_queue_t* queue, mi_segment_t* segment) { +static bool mi_segment_queue_contains(const mi_segment_queue_t* queue, const mi_segment_t* segment) { mi_assert_internal(segment != NULL); mi_segment_t* list = queue->first; while (list != NULL) { @@ -90,7 +90,7 @@ static mi_segment_queue_t* mi_segment_free_queue_of_kind(mi_page_kind_t kind, mi else return NULL; } -static mi_segment_queue_t* mi_segment_free_queue(mi_segment_t* segment, mi_segments_tld_t* tld) { +static mi_segment_queue_t* mi_segment_free_queue(const mi_segment_t* segment, mi_segments_tld_t* tld) { return mi_segment_free_queue_of_kind(segment->page_kind, tld); } @@ -113,7 +113,7 @@ static void mi_segment_insert_in_free_queue(mi_segment_t* segment, mi_segments_t ----------------------------------------------------------- */ #if (MI_DEBUG>=2) -static bool mi_segment_is_in_free_queue(mi_segment_t* segment, mi_segments_tld_t* tld) { +static bool mi_segment_is_in_free_queue(const mi_segment_t* segment, mi_segments_tld_t* tld) { mi_segment_queue_t* queue = mi_segment_free_queue(segment, tld); bool in_queue = (queue!=NULL && (segment->next != NULL || segment->prev != NULL || queue->first == segment)); if (in_queue) { @@ -123,7 +123,7 @@ static bool mi_segment_is_in_free_queue(mi_segment_t* segment, mi_segments_tld_t } #endif -static size_t mi_segment_page_size(mi_segment_t* segment) { +static size_t mi_segment_page_size(const mi_segment_t* segment) { if (segment->capacity > 1) { mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM); return ((size_t)1 << segment->page_shift); @@ -134,15 +134,39 @@ static size_t mi_segment_page_size(mi_segment_t* segment) { } } +static mi_page_queue_t* mi_segment_page_free_queue(mi_page_kind_t kind, mi_segments_tld_t* tld) { + if (kind==MI_PAGE_SMALL) return &tld->small_pages_free; + else if (kind==MI_PAGE_MEDIUM) return &tld->medium_pages_free; + else return NULL; +} + + #if (MI_DEBUG>=3) -static bool mi_segment_is_valid(mi_segment_t* segment) { +static bool mi_segment_page_free_contains(mi_page_kind_t kind, const mi_page_t* page, mi_segments_tld_t* tld) { + const mi_page_queue_t* const pq = mi_segment_page_free_queue(kind, tld); + if (pq == NULL) return false; + mi_page_t* p = pq->first; + while (p != NULL) { + if (p == page) return true; + p = p->next; + } + return false; +} + +static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t* tld) { mi_assert_internal(segment != NULL); mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie); mi_assert_internal(segment->used <= segment->capacity); mi_assert_internal(segment->abandoned <= segment->used); size_t nfree = 0; for (size_t i = 0; i < segment->capacity; i++) { - if (!segment->pages[i].segment_in_use) nfree++; + const mi_page_t* const page = &segment->pages[i]; + if (!page->segment_in_use) { + nfree++; + } + else { + mi_assert_expensive(!mi_segment_page_free_contains(segment->page_kind, page, tld)); + } } mi_assert_internal(nfree + segment->used == segment->capacity); mi_assert_internal(segment->thread_id == _mi_thread_id() || (segment->thread_id==0)); // or 0 @@ -152,6 +176,20 @@ static bool mi_segment_is_valid(mi_segment_t* segment) { } #endif +static bool mi_segment_page_free_not_in_queue(const mi_page_t* page, mi_segments_tld_t* tld) { + mi_page_kind_t kind = _mi_page_segment(page)->page_kind; + if (page->next != NULL || page->prev != NULL) { + mi_assert_internal(mi_segment_page_free_contains(kind, page, tld)); + return false; + } + if (kind > MI_PAGE_MEDIUM) return true; + // both next and prev are NULL, check for singleton list + const mi_page_queue_t* const pq = mi_segment_page_free_queue(kind, tld); + mi_assert_internal(pq!=NULL); + return (pq->first != page && pq->last != page); +} + + /* ----------------------------------------------------------- Guard pages ----------------------------------------------------------- */ @@ -232,6 +270,102 @@ static void mi_page_unreset(mi_segment_t* segment, mi_page_t* page, size_t size, } +/* ----------------------------------------------------------- + The free page queue +----------------------------------------------------------- */ + +static void mi_segment_page_free_set_expire(mi_page_t* page) { + *((intptr_t*)(&page->heap)) = _mi_clock_now() + mi_option_get(mi_option_reset_delay); +} + +static mi_msecs_t mi_segment_page_free_get_expire(mi_page_t* page) { + return *((intptr_t*)(&page->heap)); +} + +static void mi_segment_page_free_add(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) { + mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM); + mi_assert_internal(!page->segment_in_use); + mi_assert_internal(_mi_page_segment(page) == segment); + mi_assert_internal(mi_segment_page_free_not_in_queue(page,tld)); + mi_assert_expensive(!mi_segment_page_free_contains(segment->page_kind, page, tld)); + mi_page_queue_t* pq = mi_segment_page_free_queue(segment->page_kind, tld); + // push on top + mi_segment_page_free_set_expire(page); + page->next = pq->first; + page->prev = NULL; + if (pq->first == NULL) { + mi_assert_internal(pq->last == NULL); + pq->first = pq->last = page; + } + else { + pq->first->prev = page; + pq->first = page; + } +} + +static void mi_segment_page_free_remove(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) { + if (segment->page_kind > MI_PAGE_MEDIUM) return; + if (mi_segment_page_free_not_in_queue(page,tld)) return; + + mi_page_queue_t* pq = mi_segment_page_free_queue(segment->page_kind, tld); + mi_assert_internal(pq!=NULL); + mi_assert_internal(_mi_page_segment(page)==segment); + mi_assert_internal(!page->segment_in_use); + mi_assert_internal(mi_segment_page_free_contains(segment->page_kind, page, tld)); + if (page->prev != NULL) page->prev->next = page->next; + if (page->next != NULL) page->next->prev = page->prev; + if (page == pq->last) pq->last = page->prev; + if (page == pq->first) pq->first = page->next; + page->next = page->prev = NULL; + page->heap = NULL; +} + +static void mi_segment_page_free_remove_all(mi_segment_t* segment, mi_segments_tld_t* tld) { + if (segment->page_kind > MI_PAGE_MEDIUM) return; + for (size_t i = 0; i < segment->capacity; i++) { + mi_page_t* page = &segment->pages[i]; + if (!page->segment_in_use) { + mi_segment_page_free_remove(segment, page, tld); + } + } +} + +static mi_page_t* mi_segment_page_free_top(mi_page_kind_t kind, mi_segments_tld_t* tld) { + mi_assert_internal(kind <= MI_PAGE_MEDIUM); + mi_page_queue_t* pq = mi_segment_page_free_queue(kind, tld); + return pq->first; +} + +static void mi_segment_page_free_reset_delayedx(mi_msecs_t now, mi_page_kind_t kind, mi_segments_tld_t* tld) { + mi_page_queue_t* pq = mi_segment_page_free_queue(kind, tld); + mi_assert_internal(pq != NULL); + mi_page_t* page = pq->last; + while (page != NULL && (now - mi_segment_page_free_get_expire(page)) >= 0) { + mi_page_t* const prev = page->prev; + mi_page_reset(_mi_page_segment(page), page, 0, tld); + page->heap = NULL; + page->prev = page->next = NULL; + page = prev; + } + pq->last = page; + if (page != NULL){ + page->next = NULL; + } + else { + pq->first = NULL; + } +} + +static void mi_segment_page_free_reset_delayed(mi_segments_tld_t* tld) { + if (!mi_option_is_enabled(mi_option_page_reset)) return; + mi_msecs_t now = _mi_clock_now(); + mi_segment_page_free_reset_delayedx(now, MI_PAGE_SMALL, tld); + mi_segment_page_free_reset_delayedx(now, MI_PAGE_MEDIUM, tld); +} + + + + /* ----------------------------------------------------------- Segment size calculations ----------------------------------------------------------- */ @@ -407,6 +541,10 @@ void _mi_segment_thread_collect(mi_segments_tld_t* tld) { } mi_assert_internal(tld->cache_count == 0); mi_assert_internal(tld->cache == NULL); + mi_assert_internal(tld->small_pages_free.first == NULL); + mi_assert_internal(tld->medium_pages_free.first == NULL); + mi_assert_internal(tld->small_free.first == NULL); + mi_assert_internal(tld->medium_free.first == NULL); } @@ -532,9 +670,9 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) { - UNUSED(force); - //fprintf(stderr,"mimalloc: free segment at %p\n", (void*)segment); + UNUSED(force); mi_assert(segment != NULL); + mi_segment_page_free_remove_all(segment, tld); mi_segment_remove_from_free_queue(segment,tld); mi_assert_expensive(!mi_segment_queue_contains(&tld->small_free, segment)); @@ -561,37 +699,38 @@ static bool mi_segment_has_free(const mi_segment_t* segment) { return (segment->used < segment->capacity); } -static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_segments_tld_t* tld) { - mi_assert_internal(mi_segment_has_free(segment)); - mi_assert_expensive(mi_segment_is_valid(segment)); - for (size_t i = 0; i < segment->capacity; i++) { - mi_page_t* page = &segment->pages[i]; - if (!page->segment_in_use) { - // set in-use before doing unreset to prevent delayed reset - page->segment_in_use = true; - segment->used++; - if (!page->is_committed) { - mi_assert_internal(!segment->mem_is_fixed); - mi_assert_internal(!page->is_reset); - page->is_committed = true; - if (segment->page_kind < MI_PAGE_LARGE || !mi_option_is_enabled(mi_option_eager_page_commit)) { - size_t psize; - uint8_t* start = mi_segment_raw_page_start(segment, page, &psize); - bool is_zero = false; - const size_t gsize = (MI_SECURE >= 2 ? _mi_os_page_size() : 0); - _mi_mem_commit(start, psize + gsize, &is_zero, tld->os); - if (gsize > 0) { mi_segment_protect_range(start + psize, gsize, true); } - if (is_zero) { page->is_zero_init = true; } - } - } - if (page->is_reset) { - mi_page_unreset(segment, page, 0, tld); // todo: only unreset the part that was reset? - } - return page; +static void mi_segment_page_claim(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) { + mi_assert_internal(_mi_page_segment(page) == segment); + mi_assert_internal(!page->segment_in_use); + // set in-use before doing unreset to prevent delayed reset + mi_segment_page_free_remove(segment, page, tld); + page->segment_in_use = true; + segment->used++; + if (!page->is_committed) { + mi_assert_internal(!segment->mem_is_fixed); + mi_assert_internal(!page->is_reset); + page->is_committed = true; + if (segment->page_kind < MI_PAGE_LARGE + || !mi_option_is_enabled(mi_option_eager_page_commit)) { + size_t psize; + uint8_t* start = mi_segment_raw_page_start(segment, page, &psize); + bool is_zero = false; + const size_t gsize = (MI_SECURE >= 2 ? _mi_os_page_size() : 0); + _mi_mem_commit(start, psize + gsize, &is_zero, tld->os); + if (gsize > 0) { mi_segment_protect_range(start + psize, gsize, true); } + if (is_zero) { page->is_zero_init = true; } } } - mi_assert(false); - return NULL; + if (page->is_reset) { + mi_page_unreset(segment, page, 0, tld); // todo: only unreset the part that was reset? + } + mi_assert_internal(page->segment_in_use); + mi_assert_internal(segment->used <= segment->capacity); + if (segment->used == segment->capacity && segment->page_kind <= MI_PAGE_MEDIUM) { + // if no more free pages, remove from the queue + mi_assert_internal(!mi_segment_has_free(segment)); + mi_segment_remove_from_free_queue(segment, tld); + } } @@ -605,6 +744,7 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_seg mi_assert_internal(page->segment_in_use); mi_assert_internal(mi_page_all_free(page)); mi_assert_internal(page->is_committed); + mi_assert_internal(mi_segment_page_free_not_in_queue(page, tld)); size_t inuse = page->capacity * page->block_size; _mi_stat_decrease(&tld->stats->page_committed, inuse); _mi_stat_decrease(&tld->stats->pages, 1); @@ -619,19 +759,27 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_seg // reset the page memory to reduce memory pressure? // note: must come after setting `segment_in_use` to false but before block_size becomes 0 - mi_page_reset(segment, page, 0 /*used_size*/, tld); + //mi_page_reset(segment, page, 0 /*used_size*/, tld); - // zero the page data, but not the segment fields + // zero the page data, but not the segment fields and block_size (for page size calculations) + size_t block_size = page->block_size; ptrdiff_t ofs = offsetof(mi_page_t,capacity); memset((uint8_t*)page + ofs, 0, sizeof(*page) - ofs); + page->block_size = block_size; segment->used--; + + // add to the free page list for reuse/reset + if (segment->page_kind <= MI_PAGE_MEDIUM) { + mi_segment_page_free_add(segment, page, tld); + } } void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld) { mi_assert(page != NULL); mi_segment_t* segment = _mi_page_segment(page); - mi_assert_expensive(mi_segment_is_valid(segment)); + mi_assert_expensive(mi_segment_is_valid(segment,tld)); + mi_segment_page_free_reset_delayed(tld); // mark it as free now mi_segment_page_clear(segment, page, tld); @@ -690,10 +838,12 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) { mi_assert_internal(segment->used == segment->abandoned); mi_assert_internal(segment->used > 0); mi_assert_internal(segment->abandoned_next == NULL); - mi_assert_expensive(mi_segment_is_valid(segment)); + mi_assert_expensive(mi_segment_is_valid(segment,tld)); // remove the segment from the free page queue if needed - mi_segment_remove_from_free_queue(segment,tld); + mi_segment_page_free_reset_delayed(tld); + mi_segment_page_free_remove_all(segment, tld); + mi_segment_remove_from_free_queue(segment, tld); mi_assert_internal(segment->next == NULL && segment->prev == NULL); // all pages in the segment are abandoned; add it to the abandoned list @@ -708,7 +858,8 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) { void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) { mi_assert(page != NULL); mi_segment_t* segment = _mi_page_segment(page); - mi_assert_expensive(mi_segment_is_valid(segment)); + mi_assert_expensive(!mi_segment_page_free_contains(segment->page_kind, page, tld)); + mi_assert_expensive(mi_segment_is_valid(segment,tld)); segment->abandoned++; _mi_stat_increase(&tld->stats->pages_abandoned, 1); mi_assert_internal(segment->abandoned <= segment->used); @@ -755,7 +906,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen segment->abandoned_next = NULL; mi_segments_track_size((long)segment->segment_size,tld); mi_assert_internal(segment->next == NULL && segment->prev == NULL); - mi_assert_expensive(mi_segment_is_valid(segment)); + mi_assert_expensive(mi_segment_is_valid(segment,tld)); _mi_stat_decrease(&tld->stats->segments_abandoned,1); // add its abandoned pages to the current thread @@ -765,6 +916,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen if (page->segment_in_use) { mi_assert_internal(!page->is_reset); mi_assert_internal(page->is_committed); + mi_assert_internal(mi_segment_page_free_not_in_queue(page, tld)); segment->abandoned--; mi_assert(page->next == NULL); _mi_stat_decrease(&tld->stats->pages_abandoned, 1); @@ -801,30 +953,55 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen Small page allocation ----------------------------------------------------------- */ -// Allocate a small page inside a segment. -// Requires that the page has free pages -static mi_page_t* mi_segment_page_alloc_in(mi_segment_t* segment, mi_segments_tld_t* tld) { + +static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_segments_tld_t* tld) { mi_assert_internal(mi_segment_has_free(segment)); - mi_page_t* page = mi_segment_find_free(segment, tld); - mi_assert_internal(page->segment_in_use); - mi_assert_internal(segment->used <= segment->capacity); - if (segment->used == segment->capacity) { - // if no more free pages, remove from the queue - mi_assert_internal(!mi_segment_has_free(segment)); - mi_segment_remove_from_free_queue(segment,tld); + mi_assert_expensive(mi_segment_is_valid(segment, tld)); + for (size_t i = 0; i < segment->capacity; i++) { + mi_page_t* page = &segment->pages[i]; + if (!page->segment_in_use) { + mi_segment_page_claim(segment, page, tld); + return page; + } } - return page; + mi_assert(false); + return NULL; } -static mi_page_t* mi_segment_page_alloc(mi_page_kind_t kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { - mi_segment_queue_t* free_queue = mi_segment_free_queue_of_kind(kind,tld); - if (mi_segment_queue_is_empty(free_queue)) { - mi_segment_t* segment = mi_segment_alloc(0,kind,page_shift,tld,os_tld); - if (segment == NULL) return NULL; - mi_segment_enqueue(free_queue, segment); + +// Allocate a page inside a segment. Requires that the page has free pages +static mi_page_t* mi_segment_page_alloc_in(mi_segment_t* segment, mi_segments_tld_t* tld) { + mi_assert_internal(mi_segment_has_free(segment)); + return mi_segment_find_free(segment, tld); +} + +static mi_page_t* mi_segment_page_alloc(mi_page_kind_t kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { + mi_page_t* page = NULL; + mi_segment_queue_t* const free_queue = mi_segment_free_queue_of_kind(kind, tld); + if (free_queue->first != NULL && free_queue->first->used < free_queue->first->capacity) { + // prefer to allocate from an available segment + // (to allow more chance of other segments to become completely freed) + page = mi_segment_page_alloc_in(free_queue->first, tld); } - mi_assert_internal(free_queue->first != NULL); - mi_page_t* page = mi_segment_page_alloc_in(free_queue->first,tld); + else { + // otherwise try to pop from the page free list + page = mi_segment_page_free_top(kind, tld); + if (page != NULL) { + mi_segment_page_claim(_mi_page_segment(page), page, tld); + } + else { + // if that failed, find an available segment the segment free queue again + if (mi_segment_queue_is_empty(free_queue)) { + // possibly allocate a fresh segment + mi_segment_t* segment = mi_segment_alloc(0, kind, page_shift, tld, os_tld); + if (segment == NULL) return NULL; // return NULL if out-of-memory + mi_segment_enqueue(free_queue, segment); + } + mi_assert_internal(free_queue->first != NULL); + page = mi_segment_page_alloc_in(free_queue->first, tld); + } + } + mi_assert_internal(page != NULL); #if MI_DEBUG>=2 _mi_segment_page_start(_mi_page_segment(page), page, sizeof(void*), NULL, NULL)[0] = 0; #endif @@ -883,7 +1060,9 @@ mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_ else { page = mi_segment_huge_page_alloc(block_size,tld,os_tld); } - mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page))); + mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size); + mi_segment_page_free_reset_delayed(tld); + mi_assert_internal(mi_segment_page_free_not_in_queue(page, tld)); return page; } From f92a2a72649b568a7d359f6b05f315c2919bc8c8 Mon Sep 17 00:00:00 2001 From: daan Date: Wed, 15 Jan 2020 10:18:32 -0800 Subject: [PATCH 06/46] add argument pointer to the register output routine --- include/mimalloc.h | 4 ++-- src/options.c | 37 ++++++++++++++++++++++--------------- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/include/mimalloc.h b/include/mimalloc.h index 94d9edfc..08af2eb9 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -111,8 +111,8 @@ mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept; typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat); mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun* deferred_free) mi_attr_noexcept; -typedef void (mi_output_fun)(const char* msg); -mi_decl_export void mi_register_output(mi_output_fun* out) mi_attr_noexcept; +typedef void (mi_output_fun)(const char* msg, void* arg); +mi_decl_export void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept; mi_decl_export void mi_collect(bool force) mi_attr_noexcept; mi_decl_export int mi_version(void) mi_attr_noexcept; diff --git a/src/options.c b/src/options.c index 0d3bd393..ed1237d1 100644 --- a/src/options.c +++ b/src/options.c @@ -140,7 +140,8 @@ void mi_option_disable(mi_option_t option) { } -static void mi_out_stderr(const char* msg) { +static void mi_out_stderr(const char* msg, void* arg) { + UNUSED(arg); #ifdef _WIN32 // on windows with redirection, the C runtime cannot handle locale dependent output // after the main thread closes so we use direct console output. @@ -160,7 +161,8 @@ static void mi_out_stderr(const char* msg) { static char out_buf[MI_MAX_DELAY_OUTPUT+1]; static _Atomic(uintptr_t) out_len; -static void mi_out_buf(const char* msg) { +static void mi_out_buf(const char* msg, void* arg) { + UNUSED(arg); if (msg==NULL) return; if (mi_atomic_read_relaxed(&out_len)>=MI_MAX_DELAY_OUTPUT) return; size_t n = strlen(msg); @@ -175,14 +177,14 @@ static void mi_out_buf(const char* msg) { memcpy(&out_buf[start], msg, n); } -static void mi_out_buf_flush(mi_output_fun* out, bool no_more_buf) { +static void mi_out_buf_flush(mi_output_fun* out, bool no_more_buf, void* arg) { if (out==NULL) return; // claim (if `no_more_buf == true`, no more output will be added after this point) size_t count = mi_atomic_addu(&out_len, (no_more_buf ? MI_MAX_DELAY_OUTPUT : 1)); // and output the current contents if (count>MI_MAX_DELAY_OUTPUT) count = MI_MAX_DELAY_OUTPUT; out_buf[count] = 0; - out(out_buf); + out(out_buf,arg); if (!no_more_buf) { out_buf[count] = '\n'; // if continue with the buffer, insert a newline } @@ -191,9 +193,9 @@ static void mi_out_buf_flush(mi_output_fun* out, bool no_more_buf) { // Once this module is loaded, switch to this routine // which outputs to stderr and the delayed output buffer. -static void mi_out_buf_stderr(const char* msg) { - mi_out_stderr(msg); - mi_out_buf(msg); +static void mi_out_buf_stderr(const char* msg, void* arg) { + mi_out_stderr(msg,arg); + mi_out_buf(msg,arg); } @@ -206,21 +208,25 @@ static void mi_out_buf_stderr(const char* msg) { // For now, don't register output from multiple threads. #pragma warning(suppress:4180) static mi_output_fun* volatile mi_out_default; // = NULL +static volatile _Atomic(void*) mi_out_arg; // = NULL -static mi_output_fun* mi_out_get_default(void) { +static mi_output_fun* mi_out_get_default(void** parg) { + if (parg != NULL) { *parg = mi_atomic_read_ptr(&mi_out_arg); } mi_output_fun* out = mi_out_default; return (out == NULL ? &mi_out_buf : out); } -void mi_register_output(mi_output_fun* out) mi_attr_noexcept { +void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept { mi_out_default = (out == NULL ? &mi_out_stderr : out); // stop using the delayed output buffer - if (out!=NULL) mi_out_buf_flush(out,true); // output all the delayed output now + mi_atomic_write_ptr(&mi_out_arg, arg); + if (out!=NULL) mi_out_buf_flush(out,true,arg); // output all the delayed output now } // add stderr to the delayed output after the module is loaded static void mi_add_stderr_output() { - mi_out_buf_flush(&mi_out_stderr, false); // flush current contents to stderr - mi_out_default = &mi_out_buf_stderr; // and add stderr to the delayed output + mi_assert_internal(mi_out_default == NULL); + mi_out_buf_flush(&mi_out_stderr, false, NULL); // flush current contents to stderr + mi_out_default = &mi_out_buf_stderr; // and add stderr to the delayed output } // -------------------------------------------------------- @@ -234,10 +240,11 @@ static mi_decl_thread bool recurse = false; void _mi_fputs(mi_output_fun* out, const char* prefix, const char* message) { if (recurse) return; - if (out==NULL || (FILE*)out==stdout || (FILE*)out==stderr) out = mi_out_get_default(); + void* arg = NULL; + if (out==NULL || (FILE*)out==stdout || (FILE*)out==stderr) out = mi_out_get_default(&arg); recurse = true; - if (prefix != NULL) out(prefix); - out(message); + if (prefix != NULL) out(prefix,arg); + out(message,arg); recurse = false; return; } From 0956a05bf6fc731e811a8696364caffd5b7e6da3 Mon Sep 17 00:00:00 2001 From: daan Date: Wed, 15 Jan 2020 10:21:32 -0800 Subject: [PATCH 07/46] add argument pointer to the register deferred free callback --- include/mimalloc.h | 4 ++-- src/page.c | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/include/mimalloc.h b/include/mimalloc.h index 08af2eb9..1f6f1ef7 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -108,8 +108,8 @@ mi_decl_export mi_decl_allocator void* mi_reallocf(void* p, size_t newsize) mi_decl_export size_t mi_usable_size(const void* p) mi_attr_noexcept; mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept; -typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat); -mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun* deferred_free) mi_attr_noexcept; +typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg); +mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg) mi_attr_noexcept; typedef void (mi_output_fun)(const char* msg, void* arg); mi_decl_export void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept; diff --git a/src/page.c b/src/page.c index 7491bd61..6a6e09d6 100644 --- a/src/page.c +++ b/src/page.c @@ -764,18 +764,20 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { ----------------------------------------------------------- */ static mi_deferred_free_fun* volatile deferred_free = NULL; +static volatile _Atomic(void*) deferred_arg; // = NULL void _mi_deferred_free(mi_heap_t* heap, bool force) { heap->tld->heartbeat++; if (deferred_free != NULL && !heap->tld->recurse) { heap->tld->recurse = true; - deferred_free(force, heap->tld->heartbeat); + deferred_free(force, heap->tld->heartbeat, mi_atomic_read_ptr_relaxed(&deferred_arg)); heap->tld->recurse = false; } } -void mi_register_deferred_free(mi_deferred_free_fun* fn) mi_attr_noexcept { +void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noexcept { deferred_free = fn; + mi_atomic_write_ptr(&deferred_arg, arg); } From 783e3377f79ee82af43a0793910a9f2d01ac7863 Mon Sep 17 00:00:00 2001 From: daan Date: Wed, 15 Jan 2020 10:53:54 -0800 Subject: [PATCH 08/46] add output argument to stat printing --- include/mimalloc-internal.h | 4 +- include/mimalloc.h | 5 +- src/init.c | 2 +- src/options.c | 27 +++--- src/stats.c | 160 ++++++++++++++++++------------------ test/main-override-static.c | 4 +- test/test-stress.c | 4 +- 7 files changed, 104 insertions(+), 102 deletions(-) diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index 3042e6f9..d5ce9f59 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -33,8 +33,8 @@ terms of the MIT license. A copy of the license can be found in the file // "options.c" -void _mi_fputs(mi_output_fun* out, const char* prefix, const char* message); -void _mi_fprintf(mi_output_fun* out, const char* fmt, ...); +void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message); +void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...); void _mi_error_message(const char* fmt, ...); void _mi_warning_message(const char* fmt, ...); void _mi_verbose_message(const char* fmt, ...); diff --git a/include/mimalloc.h b/include/mimalloc.h index 1f6f1ef7..51d96609 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -118,12 +118,13 @@ mi_decl_export void mi_collect(bool force) mi_attr_noexcept; mi_decl_export int mi_version(void) mi_attr_noexcept; mi_decl_export void mi_stats_reset(void) mi_attr_noexcept; mi_decl_export void mi_stats_merge(void) mi_attr_noexcept; -mi_decl_export void mi_stats_print(mi_output_fun* out) mi_attr_noexcept; +mi_decl_export void mi_stats_print(void* out) mi_attr_noexcept; // backward compatibility: `out` is ignored and should be NULL +mi_decl_export void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; mi_decl_export void mi_process_init(void) mi_attr_noexcept; mi_decl_export void mi_thread_init(void) mi_attr_noexcept; mi_decl_export void mi_thread_done(void) mi_attr_noexcept; -mi_decl_export void mi_thread_stats_print(mi_output_fun* out) mi_attr_noexcept; +mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; // ------------------------------------------------------------------------------------- diff --git a/src/init.c b/src/init.c index 3df854cf..79e1e044 100644 --- a/src/init.c +++ b/src/init.c @@ -390,7 +390,7 @@ static void mi_process_load(void) { const char* msg = NULL; mi_allocator_init(&msg); if (msg != NULL && (mi_option_is_enabled(mi_option_verbose) || mi_option_is_enabled(mi_option_show_errors))) { - _mi_fputs(NULL,NULL,msg); + _mi_fputs(NULL,NULL,NULL,msg); } } diff --git a/src/options.c b/src/options.c index ed1237d1..017b9d59 100644 --- a/src/options.c +++ b/src/options.c @@ -238,10 +238,11 @@ static volatile _Atomic(uintptr_t) error_count; // = 0; // when MAX_ERROR_COUNT // inside the C runtime causes another message. static mi_decl_thread bool recurse = false; -void _mi_fputs(mi_output_fun* out, const char* prefix, const char* message) { +void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message) { if (recurse) return; - void* arg = NULL; - if (out==NULL || (FILE*)out==stdout || (FILE*)out==stderr) out = mi_out_get_default(&arg); + if (out==NULL || (FILE*)out==stdout || (FILE*)out==stderr) { // TODO: use mi_out_stderr for stderr? + out = mi_out_get_default(&arg); + } recurse = true; if (prefix != NULL) out(prefix,arg); out(message,arg); @@ -251,21 +252,21 @@ void _mi_fputs(mi_output_fun* out, const char* prefix, const char* message) { // Define our own limited `fprintf` that avoids memory allocation. // We do this using `snprintf` with a limited buffer. -static void mi_vfprintf( mi_output_fun* out, const char* prefix, const char* fmt, va_list args ) { +static void mi_vfprintf( mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args ) { char buf[512]; if (fmt==NULL) return; if (recurse) return; recurse = true; vsnprintf(buf,sizeof(buf)-1,fmt,args); recurse = false; - _mi_fputs(out,prefix,buf); + _mi_fputs(out,arg,prefix,buf); } -void _mi_fprintf( mi_output_fun* out, const char* fmt, ... ) { +void _mi_fprintf( mi_output_fun* out, void* arg, const char* fmt, ... ) { va_list args; va_start(args,fmt); - mi_vfprintf(out,NULL,fmt,args); + mi_vfprintf(out,arg,NULL,fmt,args); va_end(args); } @@ -273,7 +274,7 @@ void _mi_trace_message(const char* fmt, ...) { if (mi_option_get(mi_option_verbose) <= 1) return; // only with verbose level 2 or higher va_list args; va_start(args, fmt); - mi_vfprintf(NULL, "mimalloc: ", fmt, args); + mi_vfprintf(NULL, NULL, "mimalloc: ", fmt, args); va_end(args); } @@ -281,7 +282,7 @@ void _mi_verbose_message(const char* fmt, ...) { if (!mi_option_is_enabled(mi_option_verbose)) return; va_list args; va_start(args,fmt); - mi_vfprintf(NULL, "mimalloc: ", fmt, args); + mi_vfprintf(NULL, NULL, "mimalloc: ", fmt, args); va_end(args); } @@ -290,7 +291,7 @@ void _mi_error_message(const char* fmt, ...) { if (mi_atomic_increment(&error_count) > mi_max_error_count) return; va_list args; va_start(args,fmt); - mi_vfprintf(NULL, "mimalloc: error: ", fmt, args); + mi_vfprintf(NULL, NULL, "mimalloc: error: ", fmt, args); va_end(args); mi_assert(false); } @@ -300,14 +301,14 @@ void _mi_warning_message(const char* fmt, ...) { if (mi_atomic_increment(&error_count) > mi_max_error_count) return; va_list args; va_start(args,fmt); - mi_vfprintf(NULL, "mimalloc: warning: ", fmt, args); + mi_vfprintf(NULL, NULL, "mimalloc: warning: ", fmt, args); va_end(args); } #if MI_DEBUG void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, const char* func ) { - _mi_fprintf(NULL,"mimalloc: assertion failed: at \"%s\":%u, %s\n assertion: \"%s\"\n", fname, line, (func==NULL?"":func), assertion); + _mi_fprintf(NULL, NULL, "mimalloc: assertion failed: at \"%s\":%u, %s\n assertion: \"%s\"\n", fname, line, (func==NULL?"":func), assertion); abort(); } #endif @@ -315,7 +316,7 @@ void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, co mi_attr_noreturn void _mi_fatal_error(const char* fmt, ...) { va_list args; va_start(args, fmt); - mi_vfprintf(NULL, "mimalloc: fatal: ", fmt, args); + mi_vfprintf(NULL, NULL, "mimalloc: fatal: ", fmt, args); va_end(args); #if (MI_SECURE>=0) abort(); diff --git a/src/stats.c b/src/stats.c index cb6d8866..57599821 100644 --- a/src/stats.c +++ b/src/stats.c @@ -126,7 +126,7 @@ static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) { // unit > 0 : size in binary bytes // unit == 0: count as decimal // unit < 0 : count in binary -static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, const char* fmt) { +static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, void* arg, const char* fmt) { char buf[32]; int len = 32; const char* suffix = (unit <= 0 ? " " : "b"); @@ -147,75 +147,75 @@ static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, const const long frac1 = (long)(tens%10); snprintf(buf, len, "%ld.%ld %s%s", whole, frac1, magnitude, suffix); } - _mi_fprintf(out, (fmt==NULL ? "%11s" : fmt), buf); + _mi_fprintf(out, arg, (fmt==NULL ? "%11s" : fmt), buf); } -static void mi_print_amount(int64_t n, int64_t unit, mi_output_fun* out) { - mi_printf_amount(n,unit,out,NULL); +static void mi_print_amount(int64_t n, int64_t unit, mi_output_fun* out, void* arg) { + mi_printf_amount(n,unit,out,arg,NULL); } -static void mi_print_count(int64_t n, int64_t unit, mi_output_fun* out) { - if (unit==1) _mi_fprintf(out,"%11s"," "); - else mi_print_amount(n,0,out); +static void mi_print_count(int64_t n, int64_t unit, mi_output_fun* out, void* arg) { + if (unit==1) _mi_fprintf(out, arg, "%11s"," "); + else mi_print_amount(n,0,out,arg); } -static void mi_stat_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out ) { - _mi_fprintf(out,"%10s:", msg); +static void mi_stat_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg ) { + _mi_fprintf(out, arg,"%10s:", msg); if (unit>0) { - mi_print_amount(stat->peak, unit, out); - mi_print_amount(stat->allocated, unit, out); - mi_print_amount(stat->freed, unit, out); - mi_print_amount(unit, 1, out); - mi_print_count(stat->allocated, unit, out); + mi_print_amount(stat->peak, unit, out, arg); + mi_print_amount(stat->allocated, unit, out, arg); + mi_print_amount(stat->freed, unit, out, arg); + mi_print_amount(unit, 1, out, arg); + mi_print_count(stat->allocated, unit, out, arg); if (stat->allocated > stat->freed) - _mi_fprintf(out, " not all freed!\n"); + _mi_fprintf(out, arg, " not all freed!\n"); else - _mi_fprintf(out, " ok\n"); + _mi_fprintf(out, arg, " ok\n"); } else if (unit<0) { - mi_print_amount(stat->peak, -1, out); - mi_print_amount(stat->allocated, -1, out); - mi_print_amount(stat->freed, -1, out); + mi_print_amount(stat->peak, -1, out, arg); + mi_print_amount(stat->allocated, -1, out, arg); + mi_print_amount(stat->freed, -1, out, arg); if (unit==-1) { - _mi_fprintf(out, "%22s", ""); + _mi_fprintf(out, arg, "%22s", ""); } else { - mi_print_amount(-unit, 1, out); - mi_print_count((stat->allocated / -unit), 0, out); + mi_print_amount(-unit, 1, out, arg); + mi_print_count((stat->allocated / -unit), 0, out, arg); } if (stat->allocated > stat->freed) - _mi_fprintf(out, " not all freed!\n"); + _mi_fprintf(out, arg, " not all freed!\n"); else - _mi_fprintf(out, " ok\n"); + _mi_fprintf(out, arg, " ok\n"); } else { - mi_print_amount(stat->peak, 1, out); - mi_print_amount(stat->allocated, 1, out); - _mi_fprintf(out, "\n"); + mi_print_amount(stat->peak, 1, out, arg); + mi_print_amount(stat->allocated, 1, out, arg); + _mi_fprintf(out, arg, "\n"); } } -static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out ) { - _mi_fprintf(out, "%10s:", msg); - mi_print_amount(stat->total, -1, out); - _mi_fprintf(out, "\n"); +static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg ) { + _mi_fprintf(out, arg, "%10s:", msg); + mi_print_amount(stat->total, -1, out, arg); + _mi_fprintf(out, arg, "\n"); } -static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out) { +static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg) { const int64_t avg_tens = (stat->count == 0 ? 0 : (stat->total*10 / stat->count)); const long avg_whole = (long)(avg_tens/10); const long avg_frac1 = (long)(avg_tens%10); - _mi_fprintf(out, "%10s: %5ld.%ld avg\n", msg, avg_whole, avg_frac1); + _mi_fprintf(out, arg, "%10s: %5ld.%ld avg\n", msg, avg_whole, avg_frac1); } -static void mi_print_header(mi_output_fun* out ) { - _mi_fprintf(out,"%10s: %10s %10s %10s %10s %10s\n", "heap stats", "peak ", "total ", "freed ", "unit ", "count "); +static void mi_print_header(mi_output_fun* out, void* arg ) { + _mi_fprintf(out, arg, "%10s: %10s %10s %10s %10s %10s\n", "heap stats", "peak ", "total ", "freed ", "unit ", "count "); } #if MI_STAT>1 -static void mi_stats_print_bins(mi_stat_count_t* all, const mi_stat_count_t* bins, size_t max, const char* fmt, mi_output_fun* out) { +static void mi_stats_print_bins(mi_stat_count_t* all, const mi_stat_count_t* bins, size_t max, const char* fmt, mi_output_fun* out, void* arg) { bool found = false; char buf[64]; for (size_t i = 0; i <= max; i++) { @@ -224,14 +224,14 @@ static void mi_stats_print_bins(mi_stat_count_t* all, const mi_stat_count_t* bin int64_t unit = _mi_bin_size((uint8_t)i); snprintf(buf, 64, "%s %3zu", fmt, i); mi_stat_add(all, &bins[i], unit); - mi_stat_print(&bins[i], buf, unit, out); + mi_stat_print(&bins[i], buf, unit, out, arg); } } //snprintf(buf, 64, "%s all", fmt); //mi_stat_print(all, buf, 1); if (found) { - _mi_fprintf(out, "\n"); - mi_print_header(out); + _mi_fprintf(out, arg, "\n"); + mi_print_header(out, arg); } } #endif @@ -239,40 +239,40 @@ static void mi_stats_print_bins(mi_stat_count_t* all, const mi_stat_count_t* bin static void mi_process_info(mi_msecs_t* utime, mi_msecs_t* stime, size_t* peak_rss, size_t* page_faults, size_t* page_reclaim, size_t* peak_commit); -static void _mi_stats_print(mi_stats_t* stats, mi_msecs_t elapsed, mi_output_fun* out) mi_attr_noexcept { - mi_print_header(out); +static void _mi_stats_print(mi_stats_t* stats, mi_msecs_t elapsed, mi_output_fun* out, void* arg) mi_attr_noexcept { + mi_print_header(out,arg); #if MI_STAT>1 mi_stat_count_t normal = { 0,0,0,0 }; - mi_stats_print_bins(&normal, stats->normal, MI_BIN_HUGE, "normal",out); - mi_stat_print(&normal, "normal", 1, out); - mi_stat_print(&stats->huge, "huge", (stats->huge_count.count == 0 ? 1 : -(stats->huge.allocated / stats->huge_count.count)), out); - mi_stat_print(&stats->giant, "giant", (stats->giant_count.count == 0 ? 1 : -(stats->giant.allocated / stats->giant_count.count)), out); + mi_stats_print_bins(&normal, stats->normal, MI_BIN_HUGE, "normal",out,arg); + mi_stat_print(&normal, "normal", 1, out, arg); + mi_stat_print(&stats->huge, "huge", (stats->huge_count.count == 0 ? 1 : -(stats->huge.allocated / stats->huge_count.count)), out, arg); + mi_stat_print(&stats->giant, "giant", (stats->giant_count.count == 0 ? 1 : -(stats->giant.allocated / stats->giant_count.count)), out, arg); mi_stat_count_t total = { 0,0,0,0 }; mi_stat_add(&total, &normal, 1); mi_stat_add(&total, &stats->huge, 1); mi_stat_add(&total, &stats->giant, 1); - mi_stat_print(&total, "total", 1, out); - _mi_fprintf(out, "malloc requested: "); - mi_print_amount(stats->malloc.allocated, 1, out); - _mi_fprintf(out, "\n\n"); + mi_stat_print(&total, "total", 1, out, arg); + _mi_fprintf(out, arg, "malloc requested: "); + mi_print_amount(stats->malloc.allocated, 1, out, arg); + _mi_fprintf(out, arg, "\n\n"); #endif - mi_stat_print(&stats->reserved, "reserved", 1, out); - mi_stat_print(&stats->committed, "committed", 1, out); - mi_stat_print(&stats->reset, "reset", 1, out); - mi_stat_print(&stats->page_committed, "touched", 1, out); - mi_stat_print(&stats->segments, "segments", -1, out); - mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out); - mi_stat_print(&stats->segments_cache, "-cached", -1, out); - mi_stat_print(&stats->pages, "pages", -1, out); - mi_stat_print(&stats->pages_abandoned, "-abandoned", -1, out); - mi_stat_counter_print(&stats->pages_extended, "-extended", out); - mi_stat_counter_print(&stats->page_no_retire, "-noretire", out); - mi_stat_counter_print(&stats->mmap_calls, "mmaps", out); - mi_stat_counter_print(&stats->commit_calls, "commits", out); - mi_stat_print(&stats->threads, "threads", -1, out); - mi_stat_counter_print_avg(&stats->searches, "searches", out); - _mi_fprintf(out, "%10s: %7i\n", "numa nodes", _mi_os_numa_node_count()); - if (elapsed > 0) _mi_fprintf(out, "%10s: %7ld.%03ld s\n", "elapsed", elapsed/1000, elapsed%1000); + mi_stat_print(&stats->reserved, "reserved", 1, out, arg); + mi_stat_print(&stats->committed, "committed", 1, out, arg); + mi_stat_print(&stats->reset, "reset", 1, out, arg); + mi_stat_print(&stats->page_committed, "touched", 1, out, arg); + mi_stat_print(&stats->segments, "segments", -1, out, arg); + mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out, arg); + mi_stat_print(&stats->segments_cache, "-cached", -1, out, arg); + mi_stat_print(&stats->pages, "pages", -1, out, arg); + mi_stat_print(&stats->pages_abandoned, "-abandoned", -1, out, arg); + mi_stat_counter_print(&stats->pages_extended, "-extended", out, arg); + mi_stat_counter_print(&stats->page_no_retire, "-noretire", out, arg); + mi_stat_counter_print(&stats->mmap_calls, "mmaps", out, arg); + mi_stat_counter_print(&stats->commit_calls, "commits", out, arg); + mi_stat_print(&stats->threads, "threads", -1, out, arg); + mi_stat_counter_print_avg(&stats->searches, "searches", out, arg); + _mi_fprintf(out, arg, "%10s: %7i\n", "numa nodes", _mi_os_numa_node_count()); + if (elapsed > 0) _mi_fprintf(out, arg, "%10s: %7ld.%03ld s\n", "elapsed", elapsed/1000, elapsed%1000); mi_msecs_t user_time; mi_msecs_t sys_time; @@ -281,13 +281,13 @@ static void _mi_stats_print(mi_stats_t* stats, mi_msecs_t elapsed, mi_output_fun size_t page_reclaim; size_t peak_commit; mi_process_info(&user_time, &sys_time, &peak_rss, &page_faults, &page_reclaim, &peak_commit); - _mi_fprintf(out,"%10s: user: %ld.%03ld s, system: %ld.%03ld s, faults: %lu, reclaims: %lu, rss: ", "process", user_time/1000, user_time%1000, sys_time/1000, sys_time%1000, (unsigned long)page_faults, (unsigned long)page_reclaim ); - mi_printf_amount((int64_t)peak_rss, 1, out, "%s"); + _mi_fprintf(out, arg, "%10s: user: %ld.%03ld s, system: %ld.%03ld s, faults: %lu, reclaims: %lu, rss: ", "process", user_time/1000, user_time%1000, sys_time/1000, sys_time%1000, (unsigned long)page_faults, (unsigned long)page_reclaim ); + mi_printf_amount((int64_t)peak_rss, 1, out, arg, "%s"); if (peak_commit > 0) { - _mi_fprintf(out,", commit charge: "); - mi_printf_amount((int64_t)peak_commit, 1, out, "%s"); + _mi_fprintf(out, arg, ", commit charge: "); + mi_printf_amount((int64_t)peak_commit, 1, out, arg, "%s"); } - _mi_fprintf(out,"\n"); + _mi_fprintf(out, arg, "\n"); } static mi_msecs_t mi_time_start; // = 0 @@ -319,20 +319,20 @@ void _mi_stats_done(mi_stats_t* stats) { // called from `mi_thread_done` mi_stats_merge_from(stats); } - -static void mi_stats_print_ex(mi_stats_t* stats, mi_msecs_t elapsed, mi_output_fun* out) { - mi_stats_merge_from(stats); - _mi_stats_print(&_mi_stats_main, elapsed, out); +void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept { + mi_msecs_t elapsed = _mi_clock_end(mi_time_start); + mi_stats_merge_from(mi_stats_get_default()); + _mi_stats_print(&_mi_stats_main, elapsed, out, arg); } -void mi_stats_print(mi_output_fun* out) mi_attr_noexcept { - mi_msecs_t elapsed = _mi_clock_end(mi_time_start); - mi_stats_print_ex(mi_stats_get_default(),elapsed,out); +void mi_stats_print(void* out) mi_attr_noexcept { + // for compatibility there is an `out` parameter (which can be `stdout` or `stderr`) + mi_stats_print_out((mi_output_fun*)out, NULL); } -void mi_thread_stats_print(mi_output_fun* out) mi_attr_noexcept { +void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept { mi_msecs_t elapsed = _mi_clock_end(mi_time_start); - _mi_stats_print(mi_stats_get_default(), elapsed, out); + _mi_stats_print(mi_stats_get_default(), elapsed, out, arg); } diff --git a/test/main-override-static.c b/test/main-override-static.c index b04bfeef..54a5ea66 100644 --- a/test/main-override-static.c +++ b/test/main-override-static.c @@ -13,7 +13,7 @@ static void corrupt_free(); int main() { mi_version(); - + // detect double frees and heap corruption // double_free1(); // double_free2(); @@ -106,4 +106,4 @@ static void corrupt_free() { for (int i = 0; i < 4096; i++) { malloc(SZ); } -} \ No newline at end of file +} diff --git a/test/test-stress.c b/test/test-stress.c index d295f741..42628d7c 100644 --- a/test/test-stress.c +++ b/test/test-stress.c @@ -119,7 +119,7 @@ static void free_items(void* p) { static void stress(intptr_t tid) { //bench_start_thread(); uintptr_t r = tid * 43; - const size_t max_item_shift = 5; // 128 + const size_t max_item_shift = 5; // 128 const size_t max_item_retained_shift = max_item_shift + 2; size_t allocs = 100 * ((size_t)SCALE) * (tid % 8 + 1); // some threads do more size_t retain = allocs / 2; @@ -135,7 +135,7 @@ static void stress(intptr_t tid) { allocs--; if (data_top >= data_size) { data_size += 100000; - data = (void**)custom_realloc(data, data_size * sizeof(void*)); + data = (void**)custom_realloc(data, data_size * sizeof(void*)); } data[data_top++] = alloc_items(1ULL << (pick(&r) % max_item_shift), &r); } From c9b5ac80b3a22a2456035651afcae1966ce6d3ee Mon Sep 17 00:00:00 2001 From: daan Date: Wed, 15 Jan 2020 12:00:44 -0800 Subject: [PATCH 09/46] update page reset queue to just do delayed page resets --- include/mimalloc-types.h | 3 +- src/init.c | 2 +- src/options.c | 2 +- src/segment.c | 192 ++++++++++++++++++--------------------- 4 files changed, 89 insertions(+), 110 deletions(-) diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index 51306808..5d5f6dfc 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -417,8 +417,7 @@ typedef struct mi_os_tld_s { typedef struct mi_segments_tld_s { mi_segment_queue_t small_free; // queue of segments with free small pages mi_segment_queue_t medium_free; // queue of segments with free medium pages - mi_page_queue_t small_pages_free; // page queue of free small pages - mi_page_queue_t medium_pages_free; // page queue of free medium pages + mi_page_queue_t pages_reset; // queue of freed pages that can be reset size_t count; // current number of segments; size_t peak_count; // peak number of segments size_t current_size; // current size of all segments diff --git a/src/init.c b/src/init.c index 085a5011..debc2517 100644 --- a/src/init.c +++ b/src/init.c @@ -105,7 +105,7 @@ mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty; static mi_tld_t tld_main = { 0, false, &_mi_heap_main, - { { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0}, {NULL ,NULL, 0}, + { { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0}, 0, 0, 0, 0, 0, 0, NULL, tld_main_stats, tld_main_os }, // segments diff --git a/src/options.c b/src/options.c index 77205713..17e3a836 100644 --- a/src/options.c +++ b/src/options.c @@ -67,7 +67,7 @@ static mi_option_desc_t options[_mi_option_last] = { 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's { 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, { 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread - { 0, UNINIT, MI_OPTION(page_reset) }, // reset pages on free + { 1, UNINIT, MI_OPTION(page_reset) }, // reset pages on free { 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit) { 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed { 100, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds diff --git a/src/segment.c b/src/segment.c index fb5ea0ec..a2cd945c 100644 --- a/src/segment.c +++ b/src/segment.c @@ -134,18 +134,10 @@ static size_t mi_segment_page_size(const mi_segment_t* segment) { } } -static mi_page_queue_t* mi_segment_page_free_queue(mi_page_kind_t kind, mi_segments_tld_t* tld) { - if (kind==MI_PAGE_SMALL) return &tld->small_pages_free; - else if (kind==MI_PAGE_MEDIUM) return &tld->medium_pages_free; - else return NULL; -} - #if (MI_DEBUG>=3) -static bool mi_segment_page_free_contains(mi_page_kind_t kind, const mi_page_t* page, mi_segments_tld_t* tld) { - const mi_page_queue_t* const pq = mi_segment_page_free_queue(kind, tld); - if (pq == NULL) return false; - mi_page_t* p = pq->first; +static bool mi_pages_reset_contains(const mi_page_t* page, mi_segments_tld_t* tld) { + mi_page_t* p = tld->pages_reset.first; while (p != NULL) { if (p == page) return true; p = p->next; @@ -164,8 +156,8 @@ static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t* if (!page->segment_in_use) { nfree++; } - else { - mi_assert_expensive(!mi_segment_page_free_contains(segment->page_kind, page, tld)); + if (page->segment_in_use || page->is_reset) { + mi_assert_expensive(!mi_pages_reset_contains(page, tld)); } } mi_assert_internal(nfree + segment->used == segment->capacity); @@ -176,17 +168,15 @@ static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t* } #endif -static bool mi_segment_page_free_not_in_queue(const mi_page_t* page, mi_segments_tld_t* tld) { - mi_page_kind_t kind = _mi_page_segment(page)->page_kind; +static bool mi_page_not_in_queue(const mi_page_t* page, mi_segments_tld_t* tld) { if (page->next != NULL || page->prev != NULL) { - mi_assert_internal(mi_segment_page_free_contains(kind, page, tld)); + mi_assert_internal(mi_pages_reset_contains(page, tld)); return false; } - if (kind > MI_PAGE_MEDIUM) return true; - // both next and prev are NULL, check for singleton list - const mi_page_queue_t* const pq = mi_segment_page_free_queue(kind, tld); - mi_assert_internal(pq!=NULL); - return (pq->first != page && pq->last != page); + else { + // both next and prev are NULL, check for singleton list + return (tld->pages_reset.first != page && tld->pages_reset.last != page); + } } @@ -274,44 +264,57 @@ static void mi_page_unreset(mi_segment_t* segment, mi_page_t* page, size_t size, The free page queue ----------------------------------------------------------- */ -static void mi_segment_page_free_set_expire(mi_page_t* page) { - *((intptr_t*)(&page->heap)) = _mi_clock_now() + mi_option_get(mi_option_reset_delay); +// we re-use the heap field for the expiration counter. Since this is a +// pointer, it can be 32-bit while the clock is always 64-bit. To guard +// against overflow, we use substraction to check for expiry which work +// as long as the reset delay is under (2^30 - 1) milliseconds (~12 days) +static void mi_page_reset_set_expire(mi_page_t* page) { + intptr_t expire = (intptr_t)(_mi_clock_now() + mi_option_get(mi_option_reset_delay)); + page->heap = (mi_heap_t*)expire; } -static mi_msecs_t mi_segment_page_free_get_expire(mi_page_t* page) { - return *((intptr_t*)(&page->heap)); +static bool mi_page_reset_is_expired(mi_page_t* page, mi_msecs_t now) { + intptr_t expire = (intptr_t)(page->heap); + return (((intptr_t)now - expire) >= 0); } -static void mi_segment_page_free_add(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) { - mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM); +static void mi_pages_reset_add(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) { mi_assert_internal(!page->segment_in_use); - mi_assert_internal(_mi_page_segment(page) == segment); - mi_assert_internal(mi_segment_page_free_not_in_queue(page,tld)); - mi_assert_expensive(!mi_segment_page_free_contains(segment->page_kind, page, tld)); - mi_page_queue_t* pq = mi_segment_page_free_queue(segment->page_kind, tld); - // push on top - mi_segment_page_free_set_expire(page); - page->next = pq->first; - page->prev = NULL; - if (pq->first == NULL) { - mi_assert_internal(pq->last == NULL); - pq->first = pq->last = page; + mi_assert_internal(mi_page_not_in_queue(page,tld)); + mi_assert_expensive(!mi_pages_reset_contains(page, tld)); + mi_assert_internal(_mi_page_segment(page)==segment); + if (!mi_option_is_enabled(mi_option_page_reset)) return; + if (segment->mem_is_fixed || page->segment_in_use || page->is_reset) return; + + if (mi_option_get(mi_option_reset_delay) == 0) { + // reset immediately? + mi_page_reset(segment, page, 0, tld); } else { - pq->first->prev = page; - pq->first = page; + // otherwise push on the delayed page reset queue + mi_page_queue_t* pq = &tld->pages_reset; + // push on top + mi_page_reset_set_expire(page); + page->next = pq->first; + page->prev = NULL; + if (pq->first == NULL) { + mi_assert_internal(pq->last == NULL); + pq->first = pq->last = page; + } + else { + pq->first->prev = page; + pq->first = page; + } } } -static void mi_segment_page_free_remove(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) { - if (segment->page_kind > MI_PAGE_MEDIUM) return; - if (mi_segment_page_free_not_in_queue(page,tld)) return; +static void mi_pages_reset_remove(mi_page_t* page, mi_segments_tld_t* tld) { + if (mi_page_not_in_queue(page,tld)) return; - mi_page_queue_t* pq = mi_segment_page_free_queue(segment->page_kind, tld); + mi_page_queue_t* pq = &tld->pages_reset; mi_assert_internal(pq!=NULL); - mi_assert_internal(_mi_page_segment(page)==segment); mi_assert_internal(!page->segment_in_use); - mi_assert_internal(mi_segment_page_free_contains(segment->page_kind, page, tld)); + mi_assert_internal(mi_pages_reset_contains(page, tld)); if (page->prev != NULL) page->prev->next = page->next; if (page->next != NULL) page->next->prev = page->prev; if (page == pq->last) pq->last = page->prev; @@ -320,33 +323,33 @@ static void mi_segment_page_free_remove(mi_segment_t* segment, mi_page_t* page, page->heap = NULL; } -static void mi_segment_page_free_remove_all(mi_segment_t* segment, mi_segments_tld_t* tld) { - if (segment->page_kind > MI_PAGE_MEDIUM) return; +static void mi_pages_reset_remove_all_in_segment(mi_segment_t* segment, mi_segments_tld_t* tld) { + if (segment->mem_is_fixed) return; for (size_t i = 0; i < segment->capacity; i++) { mi_page_t* page = &segment->pages[i]; - if (!page->segment_in_use) { - mi_segment_page_free_remove(segment, page, tld); + if (!page->segment_in_use && !page->is_reset) { + mi_pages_reset_remove(page, tld); } + else { + mi_assert_internal(mi_page_not_in_queue(page,tld)); + } } } -static mi_page_t* mi_segment_page_free_top(mi_page_kind_t kind, mi_segments_tld_t* tld) { - mi_assert_internal(kind <= MI_PAGE_MEDIUM); - mi_page_queue_t* pq = mi_segment_page_free_queue(kind, tld); - return pq->first; -} - -static void mi_segment_page_free_reset_delayedx(mi_msecs_t now, mi_page_kind_t kind, mi_segments_tld_t* tld) { - mi_page_queue_t* pq = mi_segment_page_free_queue(kind, tld); - mi_assert_internal(pq != NULL); +static void mi_reset_delayed(mi_segments_tld_t* tld) { + if (!mi_option_is_enabled(mi_option_page_reset)) return; + mi_msecs_t now = _mi_clock_now(); + mi_page_queue_t* pq = &tld->pages_reset; + // from oldest up to the first that has not expired yet mi_page_t* page = pq->last; - while (page != NULL && (now - mi_segment_page_free_get_expire(page)) >= 0) { - mi_page_t* const prev = page->prev; + while (page != NULL && mi_page_reset_is_expired(page,now)) { + mi_page_t* const prev = page->prev; // save previous field mi_page_reset(_mi_page_segment(page), page, 0, tld); page->heap = NULL; page->prev = page->next = NULL; page = prev; } + // discard the reset pages from the queue pq->last = page; if (page != NULL){ page->next = NULL; @@ -356,12 +359,6 @@ static void mi_segment_page_free_reset_delayedx(mi_msecs_t now, mi_page_kind_t k } } -static void mi_segment_page_free_reset_delayed(mi_segments_tld_t* tld) { - if (!mi_option_is_enabled(mi_option_page_reset)) return; - mi_msecs_t now = _mi_clock_now(); - mi_segment_page_free_reset_delayedx(now, MI_PAGE_SMALL, tld); - mi_segment_page_free_reset_delayedx(now, MI_PAGE_MEDIUM, tld); -} @@ -541,10 +538,8 @@ void _mi_segment_thread_collect(mi_segments_tld_t* tld) { } mi_assert_internal(tld->cache_count == 0); mi_assert_internal(tld->cache == NULL); - mi_assert_internal(tld->small_pages_free.first == NULL); - mi_assert_internal(tld->medium_pages_free.first == NULL); - mi_assert_internal(tld->small_free.first == NULL); - mi_assert_internal(tld->medium_free.first == NULL); + mi_assert_internal(tld->pages_reset.first == NULL); + mi_assert_internal(tld->pages_reset.last == NULL); } @@ -672,7 +667,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) { UNUSED(force); mi_assert(segment != NULL); - mi_segment_page_free_remove_all(segment, tld); + mi_pages_reset_remove_all_in_segment(segment, tld); mi_segment_remove_from_free_queue(segment,tld); mi_assert_expensive(!mi_segment_queue_contains(&tld->small_free, segment)); @@ -703,7 +698,7 @@ static void mi_segment_page_claim(mi_segment_t* segment, mi_page_t* page, mi_seg mi_assert_internal(_mi_page_segment(page) == segment); mi_assert_internal(!page->segment_in_use); // set in-use before doing unreset to prevent delayed reset - mi_segment_page_free_remove(segment, page, tld); + mi_pages_reset_remove(page, tld); page->segment_in_use = true; segment->used++; if (!page->is_committed) { @@ -744,7 +739,7 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_seg mi_assert_internal(page->segment_in_use); mi_assert_internal(mi_page_all_free(page)); mi_assert_internal(page->is_committed); - mi_assert_internal(mi_segment_page_free_not_in_queue(page, tld)); + mi_assert_internal(mi_page_not_in_queue(page, tld)); size_t inuse = page->capacity * page->block_size; _mi_stat_decrease(&tld->stats->page_committed, inuse); _mi_stat_decrease(&tld->stats->pages, 1); @@ -770,7 +765,7 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_seg // add to the free page list for reuse/reset if (segment->page_kind <= MI_PAGE_MEDIUM) { - mi_segment_page_free_add(segment, page, tld); + mi_pages_reset_add(segment, page, tld); } } @@ -779,7 +774,7 @@ void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld) mi_assert(page != NULL); mi_segment_t* segment = _mi_page_segment(page); mi_assert_expensive(mi_segment_is_valid(segment,tld)); - mi_segment_page_free_reset_delayed(tld); + mi_reset_delayed(tld); // mark it as free now mi_segment_page_clear(segment, page, tld); @@ -841,8 +836,8 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) { mi_assert_expensive(mi_segment_is_valid(segment,tld)); // remove the segment from the free page queue if needed - mi_segment_page_free_reset_delayed(tld); - mi_segment_page_free_remove_all(segment, tld); + mi_reset_delayed(tld); + mi_pages_reset_remove_all_in_segment(segment, tld); // do not force reset on free pages in an abandoned segment, as it is already done in segment_thread_collect mi_segment_remove_from_free_queue(segment, tld); mi_assert_internal(segment->next == NULL && segment->prev == NULL); @@ -858,7 +853,7 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) { void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) { mi_assert(page != NULL); mi_segment_t* segment = _mi_page_segment(page); - mi_assert_expensive(!mi_segment_page_free_contains(segment->page_kind, page, tld)); + mi_assert_expensive(!mi_pages_reset_contains(page, tld)); mi_assert_expensive(mi_segment_is_valid(segment,tld)); segment->abandoned++; _mi_stat_increase(&tld->stats->pages_abandoned, 1); @@ -916,7 +911,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen if (page->segment_in_use) { mi_assert_internal(!page->is_reset); mi_assert_internal(page->is_committed); - mi_assert_internal(mi_segment_page_free_not_in_queue(page, tld)); + mi_assert_internal(mi_page_not_in_queue(page, tld)); segment->abandoned--; mi_assert(page->next == NULL); _mi_stat_decrease(&tld->stats->pages_abandoned, 1); @@ -957,7 +952,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_segments_tld_t* tld) { mi_assert_internal(mi_segment_has_free(segment)); mi_assert_expensive(mi_segment_is_valid(segment, tld)); - for (size_t i = 0; i < segment->capacity; i++) { + for (size_t i = 0; i < segment->capacity; i++) { // TODO: use a bitmap instead of search? mi_page_t* page = &segment->pages[i]; if (!page->segment_in_use) { mi_segment_page_claim(segment, page, tld); @@ -968,7 +963,6 @@ static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_segments_tld_t* return NULL; } - // Allocate a page inside a segment. Requires that the page has free pages static mi_page_t* mi_segment_page_alloc_in(mi_segment_t* segment, mi_segments_tld_t* tld) { mi_assert_internal(mi_segment_has_free(segment)); @@ -976,33 +970,19 @@ static mi_page_t* mi_segment_page_alloc_in(mi_segment_t* segment, mi_segments_tl } static mi_page_t* mi_segment_page_alloc(mi_page_kind_t kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { - mi_page_t* page = NULL; + // find an available segment the segment free queue mi_segment_queue_t* const free_queue = mi_segment_free_queue_of_kind(kind, tld); - if (free_queue->first != NULL && free_queue->first->used < free_queue->first->capacity) { - // prefer to allocate from an available segment - // (to allow more chance of other segments to become completely freed) - page = mi_segment_page_alloc_in(free_queue->first, tld); - } - else { - // otherwise try to pop from the page free list - page = mi_segment_page_free_top(kind, tld); - if (page != NULL) { - mi_segment_page_claim(_mi_page_segment(page), page, tld); - } - else { - // if that failed, find an available segment the segment free queue again - if (mi_segment_queue_is_empty(free_queue)) { - // possibly allocate a fresh segment - mi_segment_t* segment = mi_segment_alloc(0, kind, page_shift, tld, os_tld); - if (segment == NULL) return NULL; // return NULL if out-of-memory - mi_segment_enqueue(free_queue, segment); - } - mi_assert_internal(free_queue->first != NULL); - page = mi_segment_page_alloc_in(free_queue->first, tld); - } + if (mi_segment_queue_is_empty(free_queue)) { + // possibly allocate a fresh segment + mi_segment_t* segment = mi_segment_alloc(0, kind, page_shift, tld, os_tld); + if (segment == NULL) return NULL; // return NULL if out-of-memory + mi_segment_enqueue(free_queue, segment); } + mi_assert_internal(free_queue->first != NULL); + mi_page_t* const page = mi_segment_page_alloc_in(free_queue->first, tld); mi_assert_internal(page != NULL); #if MI_DEBUG>=2 + // verify it is committed _mi_segment_page_start(_mi_page_segment(page), page, sizeof(void*), NULL, NULL)[0] = 0; #endif return page; @@ -1062,7 +1042,7 @@ mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_ } mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size); - mi_segment_page_free_reset_delayed(tld); - mi_assert_internal(mi_segment_page_free_not_in_queue(page, tld)); + mi_reset_delayed(tld); + mi_assert_internal(mi_page_not_in_queue(page, tld)); return page; } From 202246425b5c0f2f0dc68a6de9fc9fa6628d7822 Mon Sep 17 00:00:00 2001 From: daan Date: Wed, 15 Jan 2020 12:16:01 -0800 Subject: [PATCH 10/46] bump version to 1.4 for further development --- cmake/mimalloc-config-version.cmake | 2 +- include/mimalloc.h | 2 +- readme.md | 3 +++ test/CMakeLists.txt | 2 +- 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/cmake/mimalloc-config-version.cmake b/cmake/mimalloc-config-version.cmake index f64948d3..0a982bdf 100644 --- a/cmake/mimalloc-config-version.cmake +++ b/cmake/mimalloc-config-version.cmake @@ -1,5 +1,5 @@ set(mi_version_major 1) -set(mi_version_minor 3) +set(mi_version_minor 4) set(mi_version ${mi_version_major}.${mi_version_minor}) set(PACKAGE_VERSION ${mi_version}) diff --git a/include/mimalloc.h b/include/mimalloc.h index 51d96609..fe09c7f2 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file #ifndef MIMALLOC_H #define MIMALLOC_H -#define MI_MALLOC_VERSION 130 // major + 2 digits minor +#define MI_MALLOC_VERSION 140 // major + 2 digits minor // ------------------------------------------------------ // Compiler specific attributes diff --git a/readme.md b/readme.md index 9d3974c9..0a096b5e 100644 --- a/readme.md +++ b/readme.md @@ -56,6 +56,9 @@ Enjoy! ### Releases + +* 2020-01-15, `v1.3.0`: stable release 1.3: bug fixes, improved randomness and stronger +free list encoding in secure mode. * 2019-11-22, `v1.2.0`: stable release 1.2: bug fixes, improved secure mode (free list corruption checks, double free mitigation). Improved dynamic overriding on Windows. * 2019-10-07, `v1.1.0`: stable release 1.1. * 2019-09-01, `v1.0.8`: pre-release 8: more robust windows dynamic overriding, initial huge page support. diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index ed204888..4862c0ec 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -13,7 +13,7 @@ if (NOT CMAKE_BUILD_TYPE) endif() # Import mimalloc (if installed) -find_package(mimalloc 1.3 REQUIRED NO_SYSTEM_ENVIRONMENT_PATH) +find_package(mimalloc 1.4 REQUIRED NO_SYSTEM_ENVIRONMENT_PATH) message(STATUS "Found mimalloc installed at: ${MIMALLOC_TARGET_DIR}") # overriding with a dynamic library From 0099707af905cddaab3d51a5639a1a2ae21ecf3c Mon Sep 17 00:00:00 2001 From: daan Date: Wed, 15 Jan 2020 17:19:01 -0800 Subject: [PATCH 11/46] use delayed free for all pages; reduce size of the page structure for improved address calculation --- include/mimalloc-internal.h | 45 +++++++-- include/mimalloc-types.h | 57 ++++++----- src/alloc.c | 131 +++++++++++++------------ src/heap.c | 67 +++++++------ src/init.c | 11 +-- src/page-queue.c | 44 ++++----- src/page.c | 190 +++++++++++++++--------------------- src/segment.c | 19 ++-- 8 files changed, 296 insertions(+), 268 deletions(-) diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index d5ce9f59..a9391a40 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -308,7 +308,7 @@ static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const // Quick page start for initialized pages static inline uint8_t* _mi_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) { - const size_t bsize = page->block_size; + const size_t bsize = page->xblock_size; mi_assert_internal(bsize > 0 && (bsize%sizeof(void*)) == 0); return _mi_segment_page_start(segment, page, bsize, page_size, NULL); } @@ -318,7 +318,40 @@ static inline mi_page_t* _mi_ptr_page(void* p) { return _mi_segment_page_of(_mi_ptr_segment(p), p); } +// Get the block size of a page (special cased for huge objects) +static inline size_t mi_page_block_size(const mi_page_t* page) { + const size_t bsize = page->xblock_size; + mi_assert_internal(bsize > 0); + if (mi_likely(bsize < MI_HUGE_BLOCK_SIZE)) { + return bsize; + } + else { + size_t psize; + _mi_segment_page_start(_mi_page_segment(page), page, bsize, &psize, NULL); + return psize; + } +} + // Thread free access +static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) { + return (mi_block_t*)(mi_atomic_read_relaxed(&page->xthread_free) & ~3); +} + +static inline mi_delayed_t mi_page_thread_free_flag(const mi_page_t* page) { + return (mi_delayed_t)(mi_atomic_read_relaxed(&page->xthread_free) & 3); +} + +// Heap access +static inline mi_heap_t* mi_page_heap(const mi_page_t* page) { + return (mi_heap_t*)(mi_atomic_read_relaxed(&page->xheap)); +} + +static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) { + mi_assert_internal(mi_page_thread_free_flag(page) != MI_DELAYED_FREEING); + mi_atomic_write(&page->xheap,(uintptr_t)heap); +} + +// Thread free flag helpers static inline mi_block_t* mi_tf_block(mi_thread_free_t tf) { return (mi_block_t*)(tf & ~0x03); } @@ -338,7 +371,7 @@ static inline mi_thread_free_t mi_tf_set_block(mi_thread_free_t tf, mi_block_t* // are all blocks in a page freed? static inline bool mi_page_all_free(const mi_page_t* page) { mi_assert_internal(page != NULL); - return (page->used - page->thread_freed == 0); + return (page->used == 0); } // are there immediately available blocks @@ -349,8 +382,8 @@ static inline bool mi_page_immediate_available(const mi_page_t* page) { // are there free blocks in this page? static inline bool mi_page_has_free(mi_page_t* page) { mi_assert_internal(page != NULL); - bool hasfree = (mi_page_immediate_available(page) || page->local_free != NULL || (mi_tf_block(page->thread_free) != NULL)); - mi_assert_internal(hasfree || page->used - page->thread_freed == page->capacity); + bool hasfree = (mi_page_immediate_available(page) || page->local_free != NULL || (mi_page_thread_free(page) != NULL)); + mi_assert_internal(hasfree || page->used == page->capacity); return hasfree; } @@ -364,7 +397,7 @@ static inline bool mi_page_all_used(mi_page_t* page) { static inline bool mi_page_mostly_used(const mi_page_t* page) { if (page==NULL) return true; uint16_t frac = page->reserved / 8U; - return (page->reserved - page->used + page->thread_freed <= frac); + return (page->reserved - page->used <= frac); } static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size) { @@ -467,7 +500,7 @@ static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* // check for free list corruption: is `next` at least in the same page? // TODO: check if `next` is `page->block_size` aligned? if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) { - _mi_fatal_error("corrupted free list entry of size %zub at %p: value 0x%zx\n", page->block_size, block, (uintptr_t)next); + _mi_fatal_error("corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next); next = NULL; } return next; diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h index da9bfbac..bf288d60 100644 --- a/include/mimalloc-types.h +++ b/include/mimalloc-types.h @@ -124,6 +124,9 @@ terms of the MIT license. A copy of the license can be found in the file #error "define more bins" #endif +// Used as a special value to encode block sizes in 32 bits. +#define MI_HUGE_BLOCK_SIZE ((uint32_t)MI_HUGE_OBJ_SIZE_MAX) + // The free lists use encoded next fields // (Only actually encodes when MI_ENCODED_FREELIST is defined.) typedef uintptr_t mi_encoded_t; @@ -136,10 +139,10 @@ typedef struct mi_block_s { // The delayed flags are used for efficient multi-threaded free-ing typedef enum mi_delayed_e { - MI_NO_DELAYED_FREE = 0, - MI_USE_DELAYED_FREE = 1, - MI_DELAYED_FREEING = 2, - MI_NEVER_DELAYED_FREE = 3 + MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list + MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap + MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list + MI_NEVER_DELAYED_FREE = 3 // sticky, only resets on page reclaim } mi_delayed_t; @@ -167,14 +170,28 @@ typedef uintptr_t mi_thread_free_t; // implement a monotonic heartbeat. The `thread_free` list is needed for // avoiding atomic operations in the common case. // -// `used - thread_freed` == actual blocks that are in use (alive) -// `used - thread_freed + |free| + |local_free| == capacity` // -// note: we don't count `freed` (as |free|) instead of `used` to reduce -// the number of memory accesses in the `mi_page_all_free` function(s). -// note: the funny layout here is due to: -// - access is optimized for `mi_free` and `mi_page_alloc` -// - using `uint16_t` does not seem to slow things down +// `used - |thread_free|` == actual blocks that are in use (alive) +// `used - |thread_free| + |free| + |local_free| == capacity` +// +// We don't count `freed` (as |free|) but use `used` to reduce +// the number of memory accesses in the `mi_page_all_free` function(s). +// +// Notes: +// - Access is optimized for `mi_free` and `mi_page_alloc` (in `alloc.c`) +// - Using `uint16_t` does not seem to slow things down +// - The size is 8 words on 64-bit which helps the page index calculations +// (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10 +// and 12 are still good for address calculation) +// - To limit the structure size, the `xblock_size` is 32-bits only; for +// blocks > MI_HUGE_BLOCK_SIZE the size is determined from the segment page size +// - `thread_free` uses the bottom bits as a delayed-free flags to optimize +// concurrent frees where only the first concurrent free adds to the owning +// heap `thread_delayed_free` list (see `alloc.c:mi_free_block_mt`). +// The invariant is that no-delayed-free is only set if there is +// at least one block that will be added, or as already been added, to +// the owning heap `thread_delayed_free` list. This guarantees that pages +// will be freed correctly even if only other threads free blocks. typedef struct mi_page_s { // "owned" by the segment uint8_t segment_idx; // index in the segment `pages` array, `page == &segment->pages[page->segment_idx]` @@ -194,23 +211,15 @@ typedef struct mi_page_s { #ifdef MI_ENCODE_FREELIST uintptr_t key[2]; // two random keys to encode the free lists (see `_mi_block_next`) #endif - size_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`) + uint32_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`) + uint32_t xblock_size; // size available in each block (always `>0`) mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`) - volatile _Atomic(uintptr_t) thread_freed; // at least this number of blocks are in `thread_free` - volatile _Atomic(mi_thread_free_t) thread_free; // list of deferred free blocks freed by other threads - - // less accessed info - size_t block_size; // size available in each block (always `>0`) - mi_heap_t* heap; // the owning heap + volatile _Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads + volatile _Atomic(uintptr_t) xheap; + struct mi_page_s* next; // next page owned by this thread with the same `block_size` struct mi_page_s* prev; // previous page owned by this thread with the same `block_size` - - // improve page index calculation - // without padding: 10 words on 64-bit, 11 on 32-bit. Secure adds two words - #if (MI_INTPTR_SIZE==4) - void* padding[1]; // 12/14 words on 32-bit plain - #endif } mi_page_t; diff --git a/src/alloc.c b/src/alloc.c index bd81aba0..621fb0db 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -22,7 +22,7 @@ terms of the MIT license. A copy of the license can be found in the file // Fast allocation in a page: just pop from the free list. // Fall back to generic allocation only if the list is empty. extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept { - mi_assert_internal(page->block_size==0||page->block_size >= size); + mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size); mi_block_t* block = page->free; if (mi_unlikely(block == NULL)) { return _mi_malloc_generic(heap, size); // slow path @@ -94,16 +94,16 @@ void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) { // or the recalloc/rezalloc functions cannot safely expand in place (see issue #63) UNUSED(size); mi_assert_internal(p != NULL); - mi_assert_internal(size > 0 && page->block_size >= size); + mi_assert_internal(size > 0 && mi_page_block_size(page) >= size); mi_assert_internal(_mi_ptr_page(p)==page); if (page->is_zero) { // already zero initialized memory? ((mi_block_t*)p)->next = 0; // clear the free list pointer - mi_assert_expensive(mi_mem_is_zero(p,page->block_size)); + mi_assert_expensive(mi_mem_is_zero(p, mi_page_block_size(page))); } else { // otherwise memset - memset(p, 0, page->block_size); + memset(p, 0, mi_page_block_size(page)); } } @@ -141,13 +141,12 @@ static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, cons static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) { // The decoded value is in the same page (or NULL). - // Walk the free lists to verify positively if it is already freed - mi_thread_free_t tf = (mi_thread_free_t)mi_atomic_read_relaxed(mi_atomic_cast(uintptr_t, &page->thread_free)); + // Walk the free lists to verify positively if it is already freed if (mi_list_contains(page, page->free, block) || mi_list_contains(page, page->local_free, block) || - mi_list_contains(page, mi_tf_block(tf), block)) + mi_list_contains(page, mi_page_thread_free(page), block)) { - _mi_fatal_error("double free detected of block %p with size %zu\n", block, page->block_size); + _mi_fatal_error("double free detected of block %p with size %zu\n", block, mi_page_block_size(page)); return true; } return false; @@ -177,44 +176,50 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block // Free // ------------------------------------------------------ +// free huge block from another thread +static mi_decl_noinline void mi_free_huge_block_mt(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) { + // huge page segments are always abandoned and can be freed immediately + mi_assert_internal(segment->page_kind==MI_PAGE_HUGE); + mi_assert_internal(segment == _mi_page_segment(page)); + mi_assert_internal(mi_atomic_read_relaxed(&segment->thread_id)==0); + + // claim it and free + mi_heap_t* heap = mi_get_default_heap(); + // paranoia: if this it the last reference, the cas should always succeed + if (mi_atomic_cas_strong(&segment->thread_id, heap->thread_id, 0)) { + mi_block_set_next(page, block, page->free); + page->free = block; + page->used--; + page->is_zero = false; + mi_assert(page->used == 0); + mi_tld_t* tld = heap->tld; + const size_t bsize = mi_page_block_size(page); + if (bsize > MI_HUGE_OBJ_SIZE_MAX) { + _mi_stat_decrease(&tld->stats.giant, bsize); + } + else { + _mi_stat_decrease(&tld->stats.huge, bsize); + } + _mi_segment_page_free(page, true, &tld->segments); + } +} + // multi-threaded free static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block) { - mi_thread_free_t tfree; - mi_thread_free_t tfreex; - bool use_delayed; - + // huge page segments are always abandoned and can be freed immediately mi_segment_t* segment = _mi_page_segment(page); if (segment->page_kind==MI_PAGE_HUGE) { - // huge page segments are always abandoned and can be freed immediately - mi_assert_internal(mi_atomic_read_relaxed(&segment->thread_id)==0); - mi_assert_internal(mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*,&segment->abandoned_next))==NULL); - // claim it and free - mi_heap_t* heap = mi_get_default_heap(); - // paranoia: if this it the last reference, the cas should always succeed - if (mi_atomic_cas_strong(&segment->thread_id,heap->thread_id,0)) { - mi_block_set_next(page, block, page->free); - page->free = block; - page->used--; - page->is_zero = false; - mi_assert(page->used == 0); - mi_tld_t* tld = heap->tld; - if (page->block_size > MI_HUGE_OBJ_SIZE_MAX) { - _mi_stat_decrease(&tld->stats.giant, page->block_size); - } - else { - _mi_stat_decrease(&tld->stats.huge, page->block_size); - } - _mi_segment_page_free(page,true,&tld->segments); - } + mi_free_huge_block_mt(segment, page, block); return; } + mi_thread_free_t tfree; + mi_thread_free_t tfreex; + bool use_delayed; do { - tfree = page->thread_free; - use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE || - (mi_tf_delayed(tfree) == MI_NO_DELAYED_FREE && page->used == mi_atomic_read_relaxed(&page->thread_freed)+1) // data-race but ok, just optimizes early release of the page - ); + tfree = mi_atomic_read_relaxed(&page->xthread_free); + use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE); if (mi_unlikely(use_delayed)) { // unlikely: this only happens on the first concurrent free in a page that is in the full list tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING); @@ -224,15 +229,11 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc mi_block_set_next(page, block, mi_tf_block(tfree)); tfreex = mi_tf_set_block(tfree,block); } - } while (!mi_atomic_cas_weak(mi_atomic_cast(uintptr_t,&page->thread_free), tfreex, tfree)); + } while (!mi_atomic_cas_weak(&page->xthread_free, tfreex, tfree)); - if (mi_likely(!use_delayed)) { - // increment the thread free count and return - mi_atomic_increment(&page->thread_freed); - } - else { + if (mi_unlikely(use_delayed)) { // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`) - mi_heap_t* heap = (mi_heap_t*)mi_atomic_read_ptr(mi_atomic_cast(void*, &page->heap)); + mi_heap_t* heap = mi_page_heap(page); mi_assert_internal(heap != NULL); if (heap != NULL) { // add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity) @@ -245,10 +246,10 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc // and reset the MI_DELAYED_FREEING flag do { - tfreex = tfree = page->thread_free; - mi_assert_internal(mi_tf_delayed(tfree) == MI_NEVER_DELAYED_FREE || mi_tf_delayed(tfree) == MI_DELAYED_FREEING); - if (mi_tf_delayed(tfree) != MI_NEVER_DELAYED_FREE) tfreex = mi_tf_set_delayed(tfree,MI_NO_DELAYED_FREE); - } while (!mi_atomic_cas_weak(mi_atomic_cast(uintptr_t,&page->thread_free), tfreex, tfree)); + tfreex = tfree = mi_atomic_read_relaxed(&page->xthread_free); + mi_assert_internal(mi_tf_delayed(tfree) == MI_DELAYED_FREEING); + tfreex = mi_tf_set_delayed(tfree,MI_NO_DELAYED_FREE); + } while (!mi_atomic_cas_weak(&page->xthread_free, tfreex, tfree)); } } @@ -257,7 +258,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block) { #if (MI_DEBUG) - memset(block, MI_DEBUG_FREED, page->block_size); + memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); #endif // and push it on the free list @@ -284,7 +285,7 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) { mi_assert_internal(page!=NULL && p!=NULL); size_t diff = (uint8_t*)p - _mi_page_start(segment, page, NULL); - size_t adjust = (diff % page->block_size); + size_t adjust = (diff % mi_page_block_size(page)); return (mi_block_t*)((uintptr_t)p - adjust); } @@ -329,8 +330,8 @@ void mi_free(void* p) mi_attr_noexcept #if (MI_STAT>1) mi_heap_t* heap = mi_heap_get_default(); mi_heap_stat_decrease(heap, malloc, mi_usable_size(p)); - if (page->block_size <= MI_LARGE_OBJ_SIZE_MAX) { - mi_heap_stat_decrease(heap, normal[_mi_bin(page->block_size)], 1); + if (page->xblock_size <= MI_LARGE_OBJ_SIZE_MAX) { + mi_heap_stat_decrease(heap, normal[_mi_bin(page->xblock_size)], 1); } // huge page stat is accounted for in `_mi_page_retire` #endif @@ -342,7 +343,9 @@ void mi_free(void* p) mi_attr_noexcept mi_block_set_next(page, block, page->local_free); page->local_free = block; page->used--; - if (mi_unlikely(mi_page_all_free(page))) { _mi_page_retire(page); } + if (mi_unlikely(mi_page_all_free(page))) { + _mi_page_retire(page); + } } else { // non-local, aligned blocks, or a full page; use the more generic path @@ -356,13 +359,19 @@ bool _mi_free_delayed_block(mi_block_t* block) { mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie); mi_assert_internal(_mi_thread_id() == segment->thread_id); mi_page_t* page = _mi_segment_page_of(segment, block); - if (mi_tf_delayed(page->thread_free) == MI_DELAYED_FREEING) { - // we might already start delayed freeing while another thread has not yet - // reset the delayed_freeing flag; in that case don't free it quite yet if - // this is the last block remaining. - if (page->used - page->thread_freed == 1) return false; - } - _mi_free_block(page,true,block); + + // Clear the no-delayed flag so delayed freeing is used again for this page. + // This must be done before collecting the free lists on this page -- otherwise + // some blocks may end up in the page `thread_free` list with no blocks in the + // heap `thread_delayed_free` list which may cause the page to be never freed! + // (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`) + _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */); + + // collect all other non-local frees to ensure up-to-date `used` count + _mi_page_free_collect(page, false); + + // and free the block (possibly freeing the page as well since used is updated) + _mi_free_block(page, true, block); return true; } @@ -371,7 +380,7 @@ size_t mi_usable_size(const void* p) mi_attr_noexcept { if (p==NULL) return 0; const mi_segment_t* segment = _mi_ptr_segment(p); const mi_page_t* page = _mi_segment_page_of(segment,p); - size_t size = page->block_size; + size_t size = mi_page_block_size(page); if (mi_unlikely(mi_page_has_aligned(page))) { ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)_mi_page_ptr_unalign(segment,page,p); mi_assert_internal(adjust >= 0 && (size_t)adjust <= size); diff --git a/src/heap.c b/src/heap.c index 4a589e5c..9f2a4457 100644 --- a/src/heap.c +++ b/src/heap.c @@ -34,7 +34,7 @@ static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void mi_page_t* page = pq->first; while(page != NULL) { mi_page_t* next = page->next; // save next in case the page gets removed from the queue - mi_assert_internal(page->heap == heap); + mi_assert_internal(mi_page_heap(page) == heap); count++; if (!fn(heap, pq, page, arg1, arg2)) return false; page = next; // and continue @@ -50,7 +50,7 @@ static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ UNUSED(arg1); UNUSED(arg2); UNUSED(pq); - mi_assert_internal(page->heap == heap); + mi_assert_internal(mi_page_heap(page) == heap); mi_segment_t* segment = _mi_page_segment(page); mi_assert_internal(segment->thread_id == heap->thread_id); mi_assert_expensive(_mi_page_is_valid(page)); @@ -118,13 +118,18 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) // this may free some segments (but also take ownership of abandoned pages) _mi_segment_try_reclaim_abandoned(heap, false, &heap->tld->segments); } - #if MI_DEBUG - else if (collect == ABANDON && _mi_is_main_thread() && mi_heap_is_backing(heap)) { + else if ( + #ifdef NDEBUG + collect == FORCE + #else + collect >= FORCE + #endif + && _mi_is_main_thread() && mi_heap_is_backing(heap)) + { // the main thread is abandoned, try to free all abandoned segments. // if all memory is freed by now, all segments should be freed. _mi_segment_try_reclaim_abandoned(heap, true, &heap->tld->segments); } - #endif } // if abandoning, mark all pages to no longer add to delayed_free @@ -245,25 +250,27 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); // stats - if (page->block_size > MI_LARGE_OBJ_SIZE_MAX) { - if (page->block_size > MI_HUGE_OBJ_SIZE_MAX) { - _mi_stat_decrease(&heap->tld->stats.giant,page->block_size); + const size_t bsize = mi_page_block_size(page); + if (bsize > MI_LARGE_OBJ_SIZE_MAX) { + if (bsize > MI_HUGE_OBJ_SIZE_MAX) { + _mi_stat_decrease(&heap->tld->stats.giant, bsize); } else { - _mi_stat_decrease(&heap->tld->stats.huge, page->block_size); + _mi_stat_decrease(&heap->tld->stats.huge, bsize); } } - #if (MI_STAT>1) - size_t inuse = page->used - page->thread_freed; - if (page->block_size <= MI_LARGE_OBJ_SIZE_MAX) { - mi_heap_stat_decrease(heap,normal[_mi_bin(page->block_size)], inuse); +#if (MI_STAT>1) + _mi_page_free_collect(page, false); // update used count + const size_t inuse = page->used; + if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + mi_heap_stat_decrease(heap, normal[_mi_bin(bsize)], inuse); } - mi_heap_stat_decrease(heap,malloc, page->block_size * inuse); // todo: off for aligned blocks... - #endif + mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks... +#endif - // pretend it is all free now - mi_assert_internal(page->thread_freed<=0xFFFF); - page->used = (uint16_t)page->thread_freed; + /// pretend it is all free now + mi_assert_internal(mi_page_thread_free(page) == NULL); + page->used = 0; // and free the page _mi_segment_page_free(page,false /* no force? */, &heap->tld->segments); @@ -374,7 +381,7 @@ static mi_heap_t* mi_heap_of_block(const void* p) { bool valid = (_mi_ptr_cookie(segment) == segment->cookie); mi_assert_internal(valid); if (mi_unlikely(!valid)) return NULL; - return _mi_segment_page_of(segment,p)->heap; + return mi_page_heap(_mi_segment_page_of(segment,p)); } bool mi_heap_contains_block(mi_heap_t* heap, const void* p) { @@ -390,7 +397,7 @@ static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa bool* found = (bool*)vfound; mi_segment_t* segment = _mi_page_segment(page); void* start = _mi_page_start(segment, page, NULL); - void* end = (uint8_t*)start + (page->capacity * page->block_size); + void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page)); *found = (p >= start && p < end); return (!*found); // continue if not found } @@ -432,13 +439,14 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v mi_assert_internal(page->local_free == NULL); if (page->used == 0) return true; + const size_t bsize = mi_page_block_size(page); size_t psize; uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize); if (page->capacity == 1) { // optimize page with one block mi_assert_internal(page->used == 1 && page->free == NULL); - return visitor(page->heap, area, pstart, page->block_size, arg); + return visitor(mi_page_heap(page), area, pstart, bsize, arg); } // create a bitmap of free blocks. @@ -451,8 +459,8 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v free_count++; mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize)); size_t offset = (uint8_t*)block - pstart; - mi_assert_internal(offset % page->block_size == 0); - size_t blockidx = offset / page->block_size; // Todo: avoid division? + mi_assert_internal(offset % bsize == 0); + size_t blockidx = offset / bsize; // Todo: avoid division? mi_assert_internal( blockidx < MI_MAX_BLOCKS); size_t bitidx = (blockidx / sizeof(uintptr_t)); size_t bit = blockidx - (bitidx * sizeof(uintptr_t)); @@ -471,8 +479,8 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v } else if ((m & ((uintptr_t)1 << bit)) == 0) { used_count++; - uint8_t* block = pstart + (i * page->block_size); - if (!visitor(page->heap, area, block, page->block_size, arg)) return false; + uint8_t* block = pstart + (i * bsize); + if (!visitor(mi_page_heap(page), area, block, bsize, arg)) return false; } } mi_assert_internal(page->used == used_count); @@ -487,12 +495,13 @@ static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa UNUSED(pq); mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun; mi_heap_area_ex_t xarea; + const size_t bsize = mi_page_block_size(page); xarea.page = page; - xarea.area.reserved = page->reserved * page->block_size; - xarea.area.committed = page->capacity * page->block_size; + xarea.area.reserved = page->reserved * bsize; + xarea.area.committed = page->capacity * bsize; xarea.area.blocks = _mi_page_start(_mi_page_segment(page), page, NULL); - xarea.area.used = page->used - page->thread_freed; // race is ok - xarea.area.block_size = page->block_size; + xarea.area.used = page->used; + xarea.area.block_size = bsize; return fun(heap, &xarea, arg); } diff --git a/src/init.c b/src/init.c index 79e1e044..d81d7459 100644 --- a/src/init.c +++ b/src/init.c @@ -23,12 +23,11 @@ const mi_page_t _mi_page_empty = { { 0, 0 }, #endif 0, // used - NULL, - ATOMIC_VAR_INIT(0), ATOMIC_VAR_INIT(0), - 0, NULL, NULL, NULL - #if (MI_INTPTR_SIZE==4) - , { NULL } // padding - #endif + 0, // xblock_size + NULL, // local_free + ATOMIC_VAR_INIT(0), // xthread_free + ATOMIC_VAR_INIT(0), // xheap + NULL, NULL }; #define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty) diff --git a/src/page-queue.c b/src/page-queue.c index 95443a69..68e2aaa4 100644 --- a/src/page-queue.c +++ b/src/page-queue.c @@ -178,20 +178,20 @@ static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t* #endif static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) { - uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->block_size)); - mi_heap_t* heap = page->heap; + uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->xblock_size)); + mi_heap_t* heap = mi_page_heap(page); mi_assert_internal(heap != NULL && bin <= MI_BIN_FULL); mi_page_queue_t* pq = &heap->pages[bin]; - mi_assert_internal(bin >= MI_BIN_HUGE || page->block_size == pq->block_size); + mi_assert_internal(bin >= MI_BIN_HUGE || page->xblock_size == pq->block_size); mi_assert_expensive(mi_page_queue_contains(pq, page)); return pq; } static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) { - uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->block_size)); + uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->xblock_size)); mi_assert_internal(bin <= MI_BIN_FULL); mi_page_queue_t* pq = &heap->pages[bin]; - mi_assert_internal(mi_page_is_in_full(page) || page->block_size == pq->block_size); + mi_assert_internal(mi_page_is_in_full(page) || page->xblock_size == pq->block_size); return pq; } @@ -246,35 +246,35 @@ static bool mi_page_queue_is_empty(mi_page_queue_t* queue) { static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) { mi_assert_internal(page != NULL); mi_assert_expensive(mi_page_queue_contains(queue, page)); - mi_assert_internal(page->block_size == queue->block_size || (page->block_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); + mi_assert_internal(page->xblock_size == queue->block_size || (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); + mi_heap_t* heap = mi_page_heap(page); if (page->prev != NULL) page->prev->next = page->next; if (page->next != NULL) page->next->prev = page->prev; if (page == queue->last) queue->last = page->prev; if (page == queue->first) { queue->first = page->next; // update first - mi_heap_t* heap = page->heap; mi_assert_internal(mi_heap_contains_queue(heap, queue)); mi_heap_queue_first_update(heap,queue); } - page->heap->page_count--; + heap->page_count--; page->next = NULL; page->prev = NULL; - mi_atomic_write_ptr(mi_atomic_cast(void*, &page->heap), NULL); + // mi_atomic_write_ptr(mi_atomic_cast(void*, &page->heap), NULL); mi_page_set_in_full(page,false); } static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) { - mi_assert_internal(page->heap == NULL); + mi_assert_internal(mi_page_heap(page) == heap); mi_assert_internal(!mi_page_queue_contains(queue, page)); mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE); - mi_assert_internal(page->block_size == queue->block_size || - (page->block_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || + mi_assert_internal(page->xblock_size == queue->block_size || + (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); mi_page_set_in_full(page, mi_page_queue_is_full(queue)); - mi_atomic_write_ptr(mi_atomic_cast(void*, &page->heap), heap); + // mi_atomic_write_ptr(mi_atomic_cast(void*, &page->heap), heap); page->next = queue->first; page->prev = NULL; if (queue->first != NULL) { @@ -296,19 +296,19 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro mi_assert_internal(page != NULL); mi_assert_expensive(mi_page_queue_contains(from, page)); mi_assert_expensive(!mi_page_queue_contains(to, page)); - mi_assert_internal((page->block_size == to->block_size && page->block_size == from->block_size) || - (page->block_size == to->block_size && mi_page_queue_is_full(from)) || - (page->block_size == from->block_size && mi_page_queue_is_full(to)) || - (page->block_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) || - (page->block_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_full(to))); + mi_assert_internal((page->xblock_size == to->block_size && page->xblock_size == from->block_size) || + (page->xblock_size == to->block_size && mi_page_queue_is_full(from)) || + (page->xblock_size == from->block_size && mi_page_queue_is_full(to)) || + (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(to)) || + (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_full(to))); + mi_heap_t* heap = mi_page_heap(page); if (page->prev != NULL) page->prev->next = page->next; if (page->next != NULL) page->next->prev = page->prev; if (page == from->last) from->last = page->prev; if (page == from->first) { from->first = page->next; // update first - mi_heap_t* heap = page->heap; mi_assert_internal(mi_heap_contains_queue(heap, from)); mi_heap_queue_first_update(heap, from); } @@ -316,14 +316,14 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro page->prev = to->last; page->next = NULL; if (to->last != NULL) { - mi_assert_internal(page->heap == to->last->heap); + mi_assert_internal(heap == mi_page_heap(to->last)); to->last->next = page; to->last = page; } else { to->first = page; to->last = page; - mi_heap_queue_first_update(page->heap, to); + mi_heap_queue_first_update(heap, to); } mi_page_set_in_full(page, mi_page_queue_is_full(to)); @@ -338,7 +338,7 @@ size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue // set append pages to new heap and count size_t count = 0; for (mi_page_t* page = append->first; page != NULL; page = page->next) { - mi_atomic_write_ptr(mi_atomic_cast(void*, &page->heap), heap); + mi_page_set_heap(page,heap); count++; } diff --git a/src/page.c b/src/page.c index 6a6e09d6..40aec0c6 100644 --- a/src/page.c +++ b/src/page.c @@ -29,10 +29,11 @@ terms of the MIT license. A copy of the license can be found in the file ----------------------------------------------------------- */ // Index a block in a page -static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_start, size_t i) { +static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_start, size_t block_size, size_t i) { + UNUSED(page); mi_assert_internal(page != NULL); mi_assert_internal(i <= page->reserved); - return (mi_block_t*)((uint8_t*)page_start + (i * page->block_size)); + return (mi_block_t*)((uint8_t*)page_start + (i * block_size)); } static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld); @@ -69,13 +70,14 @@ static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) { } static bool mi_page_is_valid_init(mi_page_t* page) { - mi_assert_internal(page->block_size > 0); + mi_assert_internal(page->xblock_size > 0); mi_assert_internal(page->used <= page->capacity); mi_assert_internal(page->capacity <= page->reserved); + const size_t bsize = mi_page_block_size(page); mi_segment_t* segment = _mi_page_segment(page); uint8_t* start = _mi_page_start(segment,page,NULL); - mi_assert_internal(start == _mi_segment_page_start(segment,page,page->block_size,NULL,NULL)); + mi_assert_internal(start == _mi_segment_page_start(segment,page,bsize,NULL,NULL)); //mi_assert_internal(start + page->capacity*page->block_size == page->top); mi_assert_internal(mi_page_list_is_valid(page,page->free)); @@ -89,10 +91,10 @@ static bool mi_page_is_valid_init(mi_page_t* page) { } #endif - mi_block_t* tfree = mi_tf_block(page->thread_free); + mi_block_t* tfree = mi_page_thread_free(page); mi_assert_internal(mi_page_list_is_valid(page, tfree)); - size_t tfree_count = mi_page_list_count(page, tfree); - mi_assert_internal(tfree_count <= page->thread_freed + 1); + //size_t tfree_count = mi_page_list_count(page, tfree); + //mi_assert_internal(tfree_count <= page->thread_freed + 1); size_t free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free); mi_assert_internal(page->used + free_count == page->capacity); @@ -105,14 +107,14 @@ bool _mi_page_is_valid(mi_page_t* page) { #if MI_SECURE mi_assert_internal(page->key != 0); #endif - if (page->heap!=NULL) { + if (mi_page_heap(page)!=NULL) { mi_segment_t* segment = _mi_page_segment(page); - mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == page->heap->thread_id || segment->thread_id==0); + mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == mi_page_heap(page)->thread_id || segment->thread_id==0); if (segment->page_kind != MI_PAGE_HUGE) { mi_page_queue_t* pq = mi_page_queue_of(page); mi_assert_internal(mi_page_queue_contains(pq, page)); - mi_assert_internal(pq->block_size==page->block_size || page->block_size > MI_LARGE_OBJ_SIZE_MAX || mi_page_is_in_full(page)); - mi_assert_internal(mi_heap_contains_queue(page->heap,pq)); + mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX || mi_page_is_in_full(page)); + mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq)); } } return true; @@ -124,20 +126,20 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool overrid mi_thread_free_t tfreex; mi_delayed_t old_delay; do { - tfree = mi_atomic_read_relaxed(&page->thread_free); + tfree = mi_atomic_read(&page->xthread_free); tfreex = mi_tf_set_delayed(tfree, delay); old_delay = mi_tf_delayed(tfree); if (mi_unlikely(old_delay == MI_DELAYED_FREEING)) { - mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done. + // mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done. + tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail } else if (delay == old_delay) { break; // avoid atomic operation if already equal } else if (!override_never && old_delay == MI_NEVER_DELAYED_FREE) { - break; // leave never set + break; // leave never-delayed flag set } - } while ((old_delay == MI_DELAYED_FREEING) || - !mi_atomic_cas_weak(mi_atomic_cast(uintptr_t, &page->thread_free), tfreex, tfree)); + } while (!mi_atomic_cas_weak(&page->xthread_free, tfreex, tfree)); } /* ----------------------------------------------------------- @@ -154,17 +156,17 @@ static void _mi_page_thread_free_collect(mi_page_t* page) mi_thread_free_t tfree; mi_thread_free_t tfreex; do { - tfree = page->thread_free; + tfree = mi_atomic_read_relaxed(&page->xthread_free); head = mi_tf_block(tfree); tfreex = mi_tf_set_block(tfree,NULL); - } while (!mi_atomic_cas_weak(mi_atomic_cast(uintptr_t,&page->thread_free), tfreex, tfree)); + } while (!mi_atomic_cas_weak(&page->xthread_free, tfreex, tfree)); // return if the list is empty if (head == NULL) return; // find the tail -- also to get a proper count (without data races) - uintptr_t max_count = page->capacity; // cannot collect more than capacity - uintptr_t count = 1; + uint32_t max_count = page->capacity; // cannot collect more than capacity + uint32_t count = 1; mi_block_t* tail = head; mi_block_t* next; while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) { @@ -182,7 +184,6 @@ static void _mi_page_thread_free_collect(mi_page_t* page) page->local_free = head; // update counts now - mi_atomic_subu(&page->thread_freed, count); page->used -= count; } @@ -190,7 +191,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) { mi_assert_internal(page!=NULL); // collect the thread free list - if (force || mi_tf_block(page->thread_free) != NULL) { // quick test to avoid an atomic operation + if (force || mi_page_thread_free(page) != NULL) { // quick test to avoid an atomic operation _mi_page_thread_free_collect(page); } @@ -228,15 +229,16 @@ void _mi_page_free_collect(mi_page_t* page, bool force) { // called from segments when reclaiming abandoned pages void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) { mi_assert_expensive(mi_page_is_valid_init(page)); - mi_assert_internal(page->heap == NULL); + mi_assert_internal(mi_page_heap(page) == NULL); mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE); mi_assert_internal(!page->is_reset); - mi_assert_internal(mi_tf_delayed(page->thread_free) == MI_NEVER_DELAYED_FREE); - _mi_page_free_collect(page,false); - mi_page_queue_t* pq = mi_page_queue(heap, page->block_size); + mi_assert_internal(mi_page_thread_free_flag(page) == MI_NEVER_DELAYED_FREE); + mi_page_set_heap(page, heap); + mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page)); mi_page_queue_push(heap, pq, page); - mi_assert_internal(page->heap != NULL); - _mi_page_use_delayed_free(page, MI_NO_DELAYED_FREE, true); // override never (after push so heap is set) + _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set) + // _mi_page_free_collect(page,false); // no need, as it is just done before reclaim + mi_assert_internal(mi_page_heap(page)!= NULL); mi_assert_expensive(_mi_page_is_valid(page)); } @@ -270,8 +272,8 @@ static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) { // otherwise allocate the page page = mi_page_fresh_alloc(heap, pq, pq->block_size); if (page==NULL) return NULL; - mi_assert_internal(pq->block_size==page->block_size); - mi_assert_internal(pq==mi_page_queue(heap,page->block_size)); + mi_assert_internal(pq->block_size==mi_page_block_size(page)); + mi_assert_internal(pq==mi_page_queue(heap, mi_page_block_size(page))); return page; } @@ -312,11 +314,9 @@ void _mi_page_unfull(mi_page_t* page) { mi_assert_internal(page != NULL); mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_internal(mi_page_is_in_full(page)); - - _mi_page_use_delayed_free(page, MI_NO_DELAYED_FREE, false); if (!mi_page_is_in_full(page)) return; - mi_heap_t* heap = page->heap; + mi_heap_t* heap = mi_page_heap(page); mi_page_queue_t* pqfull = &heap->pages[MI_BIN_FULL]; mi_page_set_in_full(page, false); // to get the right queue mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page); @@ -329,10 +329,8 @@ static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) { mi_assert_internal(!mi_page_immediate_available(page)); mi_assert_internal(!mi_page_is_in_full(page)); - _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false); if (mi_page_is_in_full(page)) return; - - mi_page_queue_enqueue_from(&page->heap->pages[MI_BIN_FULL], pq, page); + mi_page_queue_enqueue_from(&mi_page_heap(page)->pages[MI_BIN_FULL], pq, page); _mi_page_free_collect(page,false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set } @@ -345,18 +343,17 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) { mi_assert_internal(page != NULL); mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_internal(pq == mi_page_queue_of(page)); - mi_assert_internal(page->heap != NULL); + mi_assert_internal(mi_page_heap(page) != NULL); -#if MI_DEBUG > 1 - mi_heap_t* pheap = (mi_heap_t*)mi_atomic_read_ptr(mi_atomic_cast(void*, &page->heap)); -#endif + mi_heap_t* pheap = mi_page_heap(page); // remove from our page list - mi_segments_tld_t* segments_tld = &page->heap->tld->segments; + mi_segments_tld_t* segments_tld = &pheap->tld->segments; mi_page_queue_remove(pq, page); // page is no longer associated with our heap - mi_atomic_write_ptr(mi_atomic_cast(void*, &page->heap), NULL); + mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); + mi_page_set_heap(page, NULL); #if MI_DEBUG>1 // check there are no references left.. @@ -366,7 +363,7 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) { #endif // and abandon it - mi_assert_internal(page->heap == NULL); + mi_assert_internal(mi_page_heap(page) == NULL); _mi_segment_page_abandon(page,segments_tld); } @@ -377,33 +374,18 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) { mi_assert_expensive(_mi_page_is_valid(page)); mi_assert_internal(pq == mi_page_queue_of(page)); mi_assert_internal(mi_page_all_free(page)); - #if MI_DEBUG>1 - // check if we can safely free - mi_thread_free_t free = mi_tf_set_delayed(page->thread_free,MI_NEVER_DELAYED_FREE); - free = mi_atomic_exchange(&page->thread_free, free); - mi_assert_internal(mi_tf_delayed(free) != MI_DELAYED_FREEING); - #endif + mi_assert_internal(mi_page_thread_free_flag(page)!=MI_DELAYED_FREEING); + // no more aligned blocks in here mi_page_set_has_aligned(page, false); - // account for huge pages here - // (note: no longer necessary as huge pages are always abandoned) - if (page->block_size > MI_LARGE_OBJ_SIZE_MAX) { - if (page->block_size > MI_HUGE_OBJ_SIZE_MAX) { - _mi_stat_decrease(&page->heap->tld->stats.giant, page->block_size); - } - else { - _mi_stat_decrease(&page->heap->tld->stats.huge, page->block_size); - } - } - // remove from the page list // (no need to do _mi_heap_delayed_free first as all blocks are already free) - mi_segments_tld_t* segments_tld = &page->heap->tld->segments; + mi_segments_tld_t* segments_tld = &mi_page_heap(page)->tld->segments; mi_page_queue_remove(pq, page); // and free it - mi_assert_internal(page->heap == NULL); + mi_page_set_heap(page,NULL); _mi_segment_page_free(page, force, segments_tld); } @@ -427,7 +409,7 @@ void _mi_page_retire(mi_page_t* page) { // how to check this efficiently though... // for now, we don't retire if it is the only page left of this size class. mi_page_queue_t* pq = mi_page_queue_of(page); - if (mi_likely(page->block_size <= MI_SMALL_SIZE_MAX)) { + if (mi_likely(page->xblock_size <= MI_SMALL_SIZE_MAX && !mi_page_is_in_full(page))) { if (pq->last==page && pq->first==page) { // the only page in the queue? mi_stat_counter_increase(_mi_stats_main.page_no_retire,1); page->retire_expire = 4; @@ -469,15 +451,15 @@ void _mi_heap_collect_retired(mi_heap_t* heap, bool force) { #define MI_MAX_SLICES (1UL << MI_MAX_SLICE_SHIFT) #define MI_MIN_SLICES (2) -static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* const page, const size_t extend, mi_stats_t* const stats) { +static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) { UNUSED(stats); #if (MI_SECURE<=2) mi_assert_internal(page->free == NULL); mi_assert_internal(page->local_free == NULL); #endif mi_assert_internal(page->capacity + extend <= page->reserved); + mi_assert_internal(bsize == mi_page_block_size(page)); void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL); - const size_t bsize = page->block_size; // initialize a randomized free list // set up `slice_count` slices to alternate between @@ -491,7 +473,7 @@ static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* co mi_block_t* blocks[MI_MAX_SLICES]; // current start of the slice size_t counts[MI_MAX_SLICES]; // available objects in the slice for (size_t i = 0; i < slice_count; i++) { - blocks[i] = mi_page_block_at(page, page_area, page->capacity + i*slice_extend); + blocks[i] = mi_page_block_at(page, page_area, bsize, page->capacity + i*slice_extend); counts[i] = slice_extend; } counts[slice_count-1] += (extend % slice_count); // final slice holds the modulus too (todo: distribute evenly?) @@ -526,7 +508,7 @@ static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* co page->free = free_start; } -static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t extend, mi_stats_t* const stats) +static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) { UNUSED(stats); #if (MI_SECURE <= 2) @@ -534,12 +516,13 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co mi_assert_internal(page->local_free == NULL); #endif mi_assert_internal(page->capacity + extend <= page->reserved); + mi_assert_internal(bsize == mi_page_block_size(page)); void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL ); - const size_t bsize = page->block_size; - mi_block_t* const start = mi_page_block_at(page, page_area, page->capacity); + + mi_block_t* const start = mi_page_block_at(page, page_area, bsize, page->capacity); // initialize a sequential free list - mi_block_t* const last = mi_page_block_at(page, page_area, page->capacity + extend - 1); + mi_block_t* const last = mi_page_block_at(page, page_area, bsize, page->capacity + extend - 1); mi_block_t* block = start; while(block <= last) { mi_block_t* next = (mi_block_t*)((uint8_t*)block + bsize); @@ -581,8 +564,9 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) mi_stat_counter_increase(tld->stats.pages_extended, 1); // calculate the extend count + const size_t bsize = (page->xblock_size < MI_HUGE_BLOCK_SIZE ? page->xblock_size : page_size); size_t extend = page->reserved - page->capacity; - size_t max_extend = (page->block_size >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/(uint32_t)page->block_size); + size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/(uint32_t)bsize); if (max_extend < MI_MIN_EXTEND) max_extend = MI_MIN_EXTEND; if (extend > max_extend) { @@ -596,20 +580,20 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) // commit on-demand for large and huge pages? if (_mi_page_segment(page)->page_kind >= MI_PAGE_LARGE && !mi_option_is_enabled(mi_option_eager_page_commit)) { - uint8_t* start = page_start + (page->capacity * page->block_size); - _mi_mem_commit(start, extend * page->block_size, NULL, &tld->os); + uint8_t* start = page_start + (page->capacity * bsize); + _mi_mem_commit(start, extend * bsize, NULL, &tld->os); } // and append the extend the free list if (extend < MI_MIN_SLICES || MI_SECURE==0) { //!mi_option_is_enabled(mi_option_secure)) { - mi_page_free_list_extend(page, extend, &tld->stats ); + mi_page_free_list_extend(page, bsize, extend, &tld->stats ); } else { - mi_page_free_list_extend_secure(heap, page, extend, &tld->stats); + mi_page_free_list_extend_secure(heap, page, bsize, extend, &tld->stats); } // enable the new free list page->capacity += (uint16_t)extend; - mi_stat_increase(tld->stats.page_committed, extend * page->block_size); + mi_stat_increase(tld->stats.page_committed, extend * bsize); // extension into zero initialized memory preserves the zero'd free list if (!page->is_zero_init) { @@ -625,9 +609,10 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi mi_assert(segment != NULL); mi_assert_internal(block_size > 0); // set fields + mi_page_set_heap(page, heap); size_t page_size; _mi_segment_page_start(segment, page, block_size, &page_size, NULL); - page->block_size = block_size; + page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE); mi_assert_internal(page_size / block_size < (1L<<16)); page->reserved = (uint16_t)(page_size / block_size); #ifdef MI_ENCODE_FREELIST @@ -639,14 +624,14 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi mi_assert_internal(page->capacity == 0); mi_assert_internal(page->free == NULL); mi_assert_internal(page->used == 0); - mi_assert_internal(page->thread_free == 0); - mi_assert_internal(page->thread_freed == 0); + mi_assert_internal(page->xthread_free == 0); mi_assert_internal(page->next == NULL); mi_assert_internal(page->prev == NULL); mi_assert_internal(page->retire_expire == 0); mi_assert_internal(!mi_page_has_aligned(page)); #if (MI_ENCODE_FREELIST) - mi_assert_internal(page->key != 0); + mi_assert_internal(page->key[1] != 0); + mi_assert_internal(page->key[2] != 0); #endif mi_assert_expensive(mi_page_is_valid_init(page)); @@ -664,34 +649,19 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq) { // search through the pages in "next fit" order - mi_page_t* rpage = NULL; size_t count = 0; - size_t page_free_count = 0; mi_page_t* page = pq->first; - while( page != NULL) + while (page != NULL) { mi_page_t* next = page->next; // remember next count++; // 0. collect freed blocks by us and other threads - _mi_page_free_collect(page,false); + _mi_page_free_collect(page, false); // 1. if the page contains free blocks, we are done if (mi_page_immediate_available(page)) { - // If all blocks are free, we might retire this page instead. - // do this at most 8 times to bound allocation time. - // (note: this can happen if a page was earlier not retired due - // to having neighbours that were mostly full or due to concurrent frees) - if (page_free_count < 8 && mi_page_all_free(page)) { - page_free_count++; - if (rpage != NULL) _mi_page_free(rpage,pq,false); - rpage = page; - page = next; - continue; // and keep looking - } - else { - break; // pick this one - } + break; // pick this one } // 2. Try to extend @@ -704,20 +674,12 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p // 3. If the page is completely full, move it to the `mi_pages_full` // queue so we don't visit long-lived pages too often. mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); - mi_page_to_full(page,pq); + mi_page_to_full(page, pq); page = next; } // for each page - mi_stat_counter_increase(heap->tld->stats.searches,count); - - if (page == NULL) { - page = rpage; - rpage = NULL; - } - if (rpage != NULL) { - _mi_page_free(rpage,pq,false); - } + mi_stat_counter_increase(heap->tld->stats.searches, count); if (page == NULL) { page = mi_page_fresh(heap, pq); @@ -729,11 +691,12 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p mi_assert_internal(page == NULL || mi_page_immediate_available(page)); // finally collect retired pages - _mi_heap_collect_retired(heap,false); + _mi_heap_collect_retired(heap, false); return page; } + // Find a page with free blocks of `size`. static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { mi_page_queue_t* pq = mi_page_queue(heap,size); @@ -794,14 +757,15 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) { mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE); mi_page_t* page = mi_page_fresh_alloc(heap,NULL,block_size); if (page != NULL) { + const size_t bsize = mi_page_block_size(page); mi_assert_internal(mi_page_immediate_available(page)); - mi_assert_internal(page->block_size == block_size); + mi_assert_internal(bsize >= size); mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE); mi_assert_internal(_mi_page_segment(page)->used==1); mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue - mi_atomic_write_ptr(mi_atomic_cast(void*, &page->heap), NULL); + mi_page_set_heap(page, NULL); - if (page->block_size > MI_HUGE_OBJ_SIZE_MAX) { + if (bsize > MI_HUGE_OBJ_SIZE_MAX) { _mi_stat_increase(&heap->tld->stats.giant, block_size); _mi_stat_counter_increase(&heap->tld->stats.giant_count, 1); } @@ -849,7 +813,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept if (page == NULL) return NULL; // out of memory mi_assert_internal(mi_page_immediate_available(page)); - mi_assert_internal(page->block_size >= size); + mi_assert_internal(mi_page_block_size(page) >= size); // and try again, this time succeeding! (i.e. this should never recurse) return _mi_page_malloc(heap, page, size); diff --git a/src/segment.c b/src/segment.c index 97859fa9..4fb3e28b 100644 --- a/src/segment.c +++ b/src/segment.c @@ -208,8 +208,8 @@ static void mi_page_reset(mi_segment_t* segment, mi_page_t* page, size_t size, m mi_assert_internal(size <= psize); size_t reset_size = (size == 0 || size > psize ? psize : size); if (size == 0 && segment->page_kind >= MI_PAGE_LARGE && !mi_option_is_enabled(mi_option_eager_page_commit)) { - mi_assert_internal(page->block_size > 0); - reset_size = page->capacity * page->block_size; + mi_assert_internal(page->xblock_size > 0); + reset_size = page->capacity * mi_page_block_size(page); } _mi_mem_reset(start, reset_size, tld->os); } @@ -223,8 +223,8 @@ static void mi_page_unreset(mi_segment_t* segment, mi_page_t* page, size_t size, uint8_t* start = mi_segment_raw_page_start(segment, page, &psize); size_t unreset_size = (size == 0 || size > psize ? psize : size); if (size == 0 && segment->page_kind >= MI_PAGE_LARGE && !mi_option_is_enabled(mi_option_eager_page_commit)) { - mi_assert_internal(page->block_size > 0); - unreset_size = page->capacity * page->block_size; + mi_assert_internal(page->xblock_size > 0); + unreset_size = page->capacity * mi_page_block_size(page); } bool is_zero = false; _mi_mem_unreset(start, unreset_size, &is_zero, tld->os); @@ -255,7 +255,7 @@ static uint8_t* mi_segment_raw_page_start(const mi_segment_t* segment, const mi_ } if (page_size != NULL) *page_size = psize; - mi_assert_internal(page->block_size == 0 || _mi_ptr_page(p) == page); + mi_assert_internal(page->xblock_size == 0 || _mi_ptr_page(p) == page); mi_assert_internal(_mi_ptr_segment(p) == segment); return p; } @@ -278,7 +278,7 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* pa } if (page_size != NULL) *page_size = psize; - mi_assert_internal(page->block_size==0 || _mi_ptr_page(p) == page); + mi_assert_internal(page->xblock_size==0 || _mi_ptr_page(p) == page); mi_assert_internal(_mi_ptr_segment(p) == segment); return p; } @@ -605,7 +605,7 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_seg mi_assert_internal(page->segment_in_use); mi_assert_internal(mi_page_all_free(page)); mi_assert_internal(page->is_committed); - size_t inuse = page->capacity * page->block_size; + size_t inuse = page->capacity * mi_page_block_size(page); _mi_stat_decrease(&tld->stats->page_committed, inuse); _mi_stat_decrease(&tld->stats->pages, 1); @@ -707,6 +707,8 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) { void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) { mi_assert(page != NULL); + mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); + mi_assert_internal(mi_page_heap(page) == NULL); mi_segment_t* segment = _mi_page_segment(page); mi_assert_expensive(mi_segment_is_valid(segment)); segment->abandoned++; @@ -765,9 +767,12 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen if (page->segment_in_use) { mi_assert_internal(!page->is_reset); mi_assert_internal(page->is_committed); + mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); + mi_assert_internal(mi_page_heap(page) == NULL); segment->abandoned--; mi_assert(page->next == NULL); _mi_stat_decrease(&tld->stats->pages_abandoned, 1); + _mi_page_free_collect(page, false); // ensure used count is up to date if (mi_page_all_free(page)) { // if everything free by now, free the page mi_segment_page_clear(segment,page,tld); From ad32eb1dfb2b73ed8eaecfdc14e01cbbf43d05b2 Mon Sep 17 00:00:00 2001 From: daan Date: Wed, 15 Jan 2020 17:57:19 -0800 Subject: [PATCH 12/46] eager collect on page reclamation --- src/page.c | 10 +++------- src/segment.c | 7 +++++-- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/src/page.c b/src/page.c index 40aec0c6..02f10238 100644 --- a/src/page.c +++ b/src/page.c @@ -229,16 +229,12 @@ void _mi_page_free_collect(mi_page_t* page, bool force) { // called from segments when reclaiming abandoned pages void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) { mi_assert_expensive(mi_page_is_valid_init(page)); - mi_assert_internal(mi_page_heap(page) == NULL); + mi_assert_internal(mi_page_heap(page) == heap); + mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE); mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE); mi_assert_internal(!page->is_reset); - mi_assert_internal(mi_page_thread_free_flag(page) == MI_NEVER_DELAYED_FREE); - mi_page_set_heap(page, heap); mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page)); - mi_page_queue_push(heap, pq, page); - _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set) - // _mi_page_free_collect(page,false); // no need, as it is just done before reclaim - mi_assert_internal(mi_page_heap(page)!= NULL); + mi_page_queue_push(heap, pq, page); mi_assert_expensive(_mi_page_is_valid(page)); } diff --git a/src/segment.c b/src/segment.c index 4fb3e28b..d27a7c13 100644 --- a/src/segment.c +++ b/src/segment.c @@ -772,13 +772,16 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen segment->abandoned--; mi_assert(page->next == NULL); _mi_stat_decrease(&tld->stats->pages_abandoned, 1); + // set the heap again and allow delayed free again + mi_page_set_heap(page, heap); + _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set) _mi_page_free_collect(page, false); // ensure used count is up to date if (mi_page_all_free(page)) { - // if everything free by now, free the page + // if everything free already, clear the page directly mi_segment_page_clear(segment,page,tld); } else { - // otherwise reclaim it + // otherwise reclaim it into the heap _mi_page_reclaim(heap,page); } } From 9629d731888f64db99e43016c916268a73a5f02f Mon Sep 17 00:00:00 2001 From: daan Date: Wed, 15 Jan 2020 18:07:29 -0800 Subject: [PATCH 13/46] fix options --- src/options.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/options.c b/src/options.c index ce21309d..f1d8205f 100644 --- a/src/options.c +++ b/src/options.c @@ -67,6 +67,7 @@ static mi_option_desc_t options[_mi_option_last] = { 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's { 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, { 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread + { 0, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free { 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit) { 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed { 100, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds From b8072aaacb581b9655545b9960456c239b7c59af Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 03:54:51 -0800 Subject: [PATCH 14/46] fix debug build --- src/heap.c | 34 +++++++++++++++++----------------- src/segment.c | 7 +++++-- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/src/heap.c b/src/heap.c index 9f2a4457..12aa0840 100644 --- a/src/heap.c +++ b/src/heap.c @@ -56,7 +56,8 @@ static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ mi_assert_expensive(_mi_page_is_valid(page)); return true; } - +#endif +#if MI_DEBUG>=3 static bool mi_heap_is_valid(mi_heap_t* heap) { mi_assert_internal(heap!=NULL); mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL); @@ -111,7 +112,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) { if (!mi_heap_is_initialized(heap)) return; _mi_deferred_free(heap, collect > NORMAL); - + // collect (some) abandoned pages if (collect >= NORMAL && !heap->no_reclaim) { if (collect == NORMAL) { @@ -123,8 +124,8 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) collect == FORCE #else collect >= FORCE - #endif - && _mi_is_main_thread() && mi_heap_is_backing(heap)) + #endif + && _mi_is_main_thread() && mi_heap_is_backing(heap)) { // the main thread is abandoned, try to free all abandoned segments. // if all memory is freed by now, all segments should be freed. @@ -135,19 +136,19 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) // if abandoning, mark all pages to no longer add to delayed_free if (collect == ABANDON) { //for (mi_page_t* page = heap->pages[MI_BIN_FULL].first; page != NULL; page = page->next) { - // _mi_page_use_delayed_free(page, false); // set thread_free.delayed to MI_NO_DELAYED_FREE - //} + // _mi_page_use_delayed_free(page, false); // set thread_free.delayed to MI_NO_DELAYED_FREE + //} mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL); } - // free thread delayed blocks. + // free thread delayed blocks. // (if abandoning, after this there are no more local references into the pages.) _mi_heap_delayed_free(heap); // collect all pages owned by this thread mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL); mi_assert_internal( collect != ABANDON || heap->thread_delayed_free == NULL ); - + // collect segment caches if (collect >= FORCE) { _mi_segment_thread_collect(&heap->tld->segments); @@ -177,7 +178,7 @@ void mi_collect(bool force) mi_attr_noexcept { ----------------------------------------------------------- */ mi_heap_t* mi_heap_get_default(void) { - mi_thread_init(); + mi_thread_init(); return mi_get_default_heap(); } @@ -198,7 +199,7 @@ mi_heap_t* mi_heap_new(void) { heap->tld = bheap->tld; heap->thread_id = _mi_thread_id(); _mi_random_split(&bheap->random, &heap->random); - heap->cookie = _mi_heap_random_next(heap) | 1; + heap->cookie = _mi_heap_random_next(heap) | 1; heap->key[0] = _mi_heap_random_next(heap); heap->key[1] = _mi_heap_random_next(heap); heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe @@ -226,7 +227,7 @@ static void mi_heap_reset_pages(mi_heap_t* heap) { static void mi_heap_free(mi_heap_t* heap) { mi_assert_internal(mi_heap_is_initialized(heap)); if (mi_heap_is_backing(heap)) return; // dont free the backing heap - + // reset default if (mi_heap_is_default(heap)) { _mi_heap_set_default_direct(heap->tld->heap_backing); @@ -247,7 +248,7 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_ UNUSED(pq); // ensure no more thread_delayed_free will be added - _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); + _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); // stats const size_t bsize = mi_page_block_size(page); @@ -311,7 +312,7 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) { if (from==NULL || from->page_count == 0) return; // unfull all full pages in the `from` heap - mi_page_t* page = from->pages[MI_BIN_FULL].first; + mi_page_t* page = from->pages[MI_BIN_FULL].first; while (page != NULL) { mi_page_t* next = page->next; _mi_page_unfull(page); @@ -323,7 +324,7 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) { _mi_heap_delayed_free(from); // transfer all pages by appending the queues; this will set - // a new heap field which is ok as all pages are unfull'd and thus + // a new heap field which is ok as all pages are unfull'd and thus // other threads won't access this field anymore (see `mi_free_block_mt`) for (size_t i = 0; i < MI_BIN_FULL; i++) { mi_page_queue_t* pq = &heap->pages[i]; @@ -334,7 +335,7 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) { } mi_assert_internal(from->thread_delayed_free == NULL); mi_assert_internal(from->page_count == 0); - + // and reset the `from` heap mi_heap_reset_pages(from); } @@ -362,7 +363,7 @@ mi_heap_t* mi_heap_set_default(mi_heap_t* heap) { mi_assert(mi_heap_is_initialized(heap)); if (!mi_heap_is_initialized(heap)) return NULL; mi_assert_expensive(mi_heap_is_valid(heap)); - mi_heap_t* old = mi_get_default_heap(); + mi_heap_t* old = mi_get_default_heap(); _mi_heap_set_default_direct(heap); return old; } @@ -534,4 +535,3 @@ bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_vis mi_visit_blocks_args_t args = { visit_blocks, visitor, arg }; return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args); } - diff --git a/src/segment.c b/src/segment.c index ee1de005..3f99c790 100644 --- a/src/segment.c +++ b/src/segment.c @@ -135,7 +135,7 @@ static size_t mi_segment_page_size(const mi_segment_t* segment) { } -#if (MI_DEBUG>=3) +#if (MI_DEBUG>=2) static bool mi_pages_reset_contains(const mi_page_t* page, mi_segments_tld_t* tld) { mi_page_t* p = tld->pages_reset.first; while (p != NULL) { @@ -144,7 +144,9 @@ static bool mi_pages_reset_contains(const mi_page_t* page, mi_segments_tld_t* tl } return false; } +#endif +#if (MI_DEBUG>=3) static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t* tld) { mi_assert_internal(segment != NULL); mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie); @@ -169,6 +171,7 @@ static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t* #endif static bool mi_page_not_in_queue(const mi_page_t* page, mi_segments_tld_t* tld) { + mi_assert_internal(page != NULL); if (page->next != NULL || page->prev != NULL) { mi_assert_internal(mi_pages_reset_contains(page, tld)); return false; @@ -1052,6 +1055,6 @@ mi_page_t* _mi_segment_page_alloc(size_t block_size, mi_segments_tld_t* tld, mi_ mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size); mi_reset_delayed(tld); - mi_assert_internal(mi_page_not_in_queue(page, tld)); + mi_assert_internal(page == NULL || mi_page_not_in_queue(page, tld)); return page; } From 8d8f355ed0190702edcce7d16d9fdad7466ae2b7 Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 11:25:02 -0800 Subject: [PATCH 15/46] add option to reset eagerly when a segment is abandoned --- include/mimalloc.h | 1 + src/options.c | 1 + src/segment.c | 19 ++++++++++++------- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/include/mimalloc.h b/include/mimalloc.h index fe09c7f2..e45b7e4d 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -273,6 +273,7 @@ typedef enum mi_option_e { mi_option_reserve_huge_os_pages, mi_option_segment_cache, mi_option_page_reset, + mi_option_abandoned_page_reset, mi_option_segment_reset, mi_option_eager_commit_delay, mi_option_reset_delay, diff --git a/src/options.c b/src/options.c index f1d8205f..c12c77e0 100644 --- a/src/options.c +++ b/src/options.c @@ -68,6 +68,7 @@ static mi_option_desc_t options[_mi_option_last] = { 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, { 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread { 0, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free + { 0, UNINIT, MI_OPTION(abandoned_page_reset) },// reset free page memory when a thread terminates { 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit) { 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed { 100, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds diff --git a/src/segment.c b/src/segment.c index 3f99c790..ea030d7a 100644 --- a/src/segment.c +++ b/src/segment.c @@ -326,12 +326,15 @@ static void mi_pages_reset_remove(mi_page_t* page, mi_segments_tld_t* tld) { page->used = 0; } -static void mi_pages_reset_remove_all_in_segment(mi_segment_t* segment, mi_segments_tld_t* tld) { - if (segment->mem_is_fixed) return; +static void mi_pages_reset_remove_all_in_segment(mi_segment_t* segment, bool force_reset, mi_segments_tld_t* tld) { + if (segment->mem_is_fixed) return; // never reset in huge OS pages for (size_t i = 0; i < segment->capacity; i++) { mi_page_t* page = &segment->pages[i]; if (!page->segment_in_use && !page->is_reset) { mi_pages_reset_remove(page, tld); + if (force_reset) { + mi_page_reset(segment, page, 0, tld); + } } else { mi_assert_internal(mi_page_not_in_queue(page,tld)); @@ -668,9 +671,11 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) { - UNUSED(force); - mi_assert(segment != NULL); - mi_pages_reset_remove_all_in_segment(segment, tld); + UNUSED(force); + mi_assert(segment != NULL); + // note: don't reset pages even on abandon as the whole segment is freed? (and ready for reuse) + bool force_reset = (force && mi_option_is_enabled(mi_option_abandoned_page_reset)); + mi_pages_reset_remove_all_in_segment(segment, force_reset, tld); mi_segment_remove_from_free_queue(segment,tld); mi_assert_expensive(!mi_segment_queue_contains(&tld->small_free, segment)); @@ -840,8 +845,8 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) { mi_assert_expensive(mi_segment_is_valid(segment,tld)); // remove the segment from the free page queue if needed - mi_reset_delayed(tld); - mi_pages_reset_remove_all_in_segment(segment, tld); // do not force reset on free pages in an abandoned segment, as it is already done in segment_thread_collect + mi_reset_delayed(tld); + mi_pages_reset_remove_all_in_segment(segment, mi_option_is_enabled(mi_option_abandoned_page_reset), tld); mi_segment_remove_from_free_queue(segment, tld); mi_assert_internal(segment->next == NULL && segment->prev == NULL); From 4e91eab8fca9dfa95f74a7205f8f216dd9f22f02 Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 14:12:37 -0800 Subject: [PATCH 16/46] specialize mi_mallocn for count=1 --- src/alloc.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/alloc.c b/src/alloc.c index 621fb0db..be63f86a 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -443,7 +443,12 @@ mi_decl_allocator void* mi_calloc(size_t count, size_t size) mi_attr_noexcept { // Uninitialized `calloc` extern mi_decl_allocator void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept { size_t total; - if (mi_mul_overflow(count,size,&total)) return NULL; + if (count==1) { + total = size; + } + else if (mi_mul_overflow(count, size, &total)) { + return NULL; + } return mi_heap_malloc(heap, total); } From 24f8bcbc8f4236b2bd37b1c8bfc169ec9a941942 Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 14:25:09 -0800 Subject: [PATCH 17/46] add explicit calling convention to registered functions --- include/mimalloc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/mimalloc.h b/include/mimalloc.h index e45b7e4d..de4282da 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -108,10 +108,10 @@ mi_decl_export mi_decl_allocator void* mi_reallocf(void* p, size_t newsize) mi_decl_export size_t mi_usable_size(const void* p) mi_attr_noexcept; mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept; -typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg); +typedef void (mi_cdecl mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg); mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg) mi_attr_noexcept; -typedef void (mi_output_fun)(const char* msg, void* arg); +typedef void (mi_cdecl mi_output_fun)(const char* msg, void* arg); mi_decl_export void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept; mi_decl_export void mi_collect(bool force) mi_attr_noexcept; From 1b2b7404f7770022ec806a294fa35e145cb93849 Mon Sep 17 00:00:00 2001 From: Kirsten Lee Date: Thu, 16 Jan 2020 14:54:13 -0800 Subject: [PATCH 18/46] flip the order of includes for ease of use --- include/mimalloc-stl-allocator.h | 1 - include/mimalloc.h | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/include/mimalloc-stl-allocator.h b/include/mimalloc-stl-allocator.h index 11ba30fb..a98e398b 100644 --- a/include/mimalloc-stl-allocator.h +++ b/include/mimalloc-stl-allocator.h @@ -7,7 +7,6 @@ This header can be used to hook mimalloc into STL containers in place of std::allocator. -----------------------------------------------------------------------------*/ -#include #include // true_type #pragma warning(disable: 4100) diff --git a/include/mimalloc.h b/include/mimalloc.h index 7f26896c..988a080d 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -73,6 +73,8 @@ terms of the MIT license. A copy of the license can be found in the file #include // bool #ifdef __cplusplus +#include + extern "C" { #endif From 526bee6843e2d80a57671f68115c504138791cd0 Mon Sep 17 00:00:00 2001 From: Kirsten Lee Date: Thu, 16 Jan 2020 15:17:15 -0800 Subject: [PATCH 19/46] merge stl and main header --- CMakeLists.txt | 1 - ide/vs2017/mimalloc-override.vcxproj | 1 - ide/vs2017/mimalloc.vcxproj | 1 - ide/vs2019/mimalloc-override.vcxproj | 1 - ide/vs2019/mimalloc.vcxproj | 1 - include/mimalloc-stl-allocator.h | 43 ---------------------------- include/mimalloc.h | 38 +++++++++++++++++++++++- test/test-api.c | 1 - 8 files changed, 37 insertions(+), 50 deletions(-) delete mode 100644 include/mimalloc-stl-allocator.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 93560951..467fad95 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -187,7 +187,6 @@ install(TARGETS mimalloc-static EXPORT mimalloc DESTINATION ${mi_install_dir}) install(FILES include/mimalloc.h DESTINATION ${mi_install_dir}/include) install(FILES include/mimalloc-override.h DESTINATION ${mi_install_dir}/include) install(FILES include/mimalloc-new-delete.h DESTINATION ${mi_install_dir}/include) -install(FILES include/mimalloc-stl-allocator.h DESTINATION ${mi_install_dir}/include) install(FILES cmake/mimalloc-config.cmake DESTINATION ${mi_install_dir}/cmake) install(FILES cmake/mimalloc-config-version.cmake DESTINATION ${mi_install_dir}/cmake) install(EXPORT mimalloc DESTINATION ${mi_install_dir}/cmake) diff --git a/ide/vs2017/mimalloc-override.vcxproj b/ide/vs2017/mimalloc-override.vcxproj index e0a6d85b..863195a3 100644 --- a/ide/vs2017/mimalloc-override.vcxproj +++ b/ide/vs2017/mimalloc-override.vcxproj @@ -214,7 +214,6 @@ - diff --git a/ide/vs2017/mimalloc.vcxproj b/ide/vs2017/mimalloc.vcxproj index ff6c8edb..064a13dc 100644 --- a/ide/vs2017/mimalloc.vcxproj +++ b/ide/vs2017/mimalloc.vcxproj @@ -239,7 +239,6 @@ - diff --git a/ide/vs2019/mimalloc-override.vcxproj b/ide/vs2019/mimalloc-override.vcxproj index e6416e05..950a0a1a 100644 --- a/ide/vs2019/mimalloc-override.vcxproj +++ b/ide/vs2019/mimalloc-override.vcxproj @@ -214,7 +214,6 @@ - diff --git a/ide/vs2019/mimalloc.vcxproj b/ide/vs2019/mimalloc.vcxproj index ffede6ca..17adc958 100644 --- a/ide/vs2019/mimalloc.vcxproj +++ b/ide/vs2019/mimalloc.vcxproj @@ -239,7 +239,6 @@ - diff --git a/include/mimalloc-stl-allocator.h b/include/mimalloc-stl-allocator.h deleted file mode 100644 index a98e398b..00000000 --- a/include/mimalloc-stl-allocator.h +++ /dev/null @@ -1,43 +0,0 @@ -#pragma once -#ifndef MIMALLOC_STL_ALLOCATOR_H -#define MIMALLOC_STL_ALLOCATOR_H - -#ifdef __cplusplus -/* ---------------------------------------------------------------------------- -This header can be used to hook mimalloc into STL containers in place of -std::allocator. ------------------------------------------------------------------------------*/ -#include // true_type - -#pragma warning(disable: 4100) - -template -struct mi_stl_allocator { - typedef T value_type; - - using propagate_on_container_copy_assignment = std::true_type; - using propagate_on_container_move_assignment = std::true_type; - using propagate_on_container_swap = std::true_type; - using is_always_equal = std::true_type; - - mi_stl_allocator() noexcept {} - mi_stl_allocator(const mi_stl_allocator& other) noexcept {} - template - mi_stl_allocator(const mi_stl_allocator& other) noexcept {} - - T* allocate(size_t n, const void* hint = 0) { - return (T*)mi_mallocn(n, sizeof(T)); - } - - void deallocate(T* p, size_t n) { - mi_free(p); - } -}; - -template -bool operator==(const mi_stl_allocator& lhs, const mi_stl_allocator& rhs) noexcept { return true; } -template -bool operator!=(const mi_stl_allocator& lhs, const mi_stl_allocator& rhs) noexcept { return false; } - -#endif // __cplusplus -#endif // MIMALLOC_STL_ALLOCATOR_H diff --git a/include/mimalloc.h b/include/mimalloc.h index 988a080d..e664b668 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -73,7 +73,7 @@ terms of the MIT license. A copy of the license can be found in the file #include // bool #ifdef __cplusplus -#include +#include // true_type extern "C" { #endif @@ -328,5 +328,41 @@ mi_decl_export void* mi_new_aligned_nothrow(size_t n, size_t alignment) mi_attr_ } #endif +#ifdef __cplusplus + +// ------------------------------------------------------ +// STL allocator - an extension to hook mimalloc into STL +// containers in place of std::allocator. +// ------------------------------------------------------ + +#pragma warning(disable: 4100) +template +struct mi_stl_allocator { + typedef T value_type; + + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + using is_always_equal = std::true_type; + + mi_stl_allocator() noexcept {} + mi_stl_allocator(const mi_stl_allocator& other) noexcept {} + template + mi_stl_allocator(const mi_stl_allocator& other) noexcept {} + + T* allocate(size_t n, const void* hint = 0) { + return (T*)mi_mallocn(n, sizeof(T)); + } + + void deallocate(T* p, size_t n) { + mi_free(p); + } +}; + +template +bool operator==(const mi_stl_allocator& lhs, const mi_stl_allocator& rhs) noexcept { return true; } +template +bool operator!=(const mi_stl_allocator& lhs, const mi_stl_allocator& rhs) noexcept { return false; } +#endif #endif diff --git a/test/test-api.c b/test/test-api.c index f93884d0..060efc44 100644 --- a/test/test-api.c +++ b/test/test-api.c @@ -32,7 +32,6 @@ we therefore test the API over various inputs. Please add more tests :-) #include "mimalloc.h" #include "mimalloc-internal.h" -#include "mimalloc-stl-allocator.h" // --------------------------------------------------------------------------- // Test macros: CHECK(name,predicate) and CHECK_BODY(name,body) From 7a9502973d4c20bd5ac962a9b6e5869494990025 Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 15:57:11 -0800 Subject: [PATCH 20/46] rearrange STL allocator code: remove pragma, ifdef for C++11 --- ide/vs2019/mimalloc.vcxproj | 2 +- include/mimalloc-internal.h | 5 ++++ include/mimalloc.h | 46 +++++++++++++++---------------------- src/alloc.c | 7 +----- 4 files changed, 25 insertions(+), 35 deletions(-) diff --git a/ide/vs2019/mimalloc.vcxproj b/ide/vs2019/mimalloc.vcxproj index f59de292..037e380d 100644 --- a/ide/vs2019/mimalloc.vcxproj +++ b/ide/vs2019/mimalloc.vcxproj @@ -111,7 +111,7 @@ - Level2 + Level4 Disabled true true diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index a9391a40..500764ed 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -167,6 +167,11 @@ bool _mi_page_is_valid(mi_page_t* page); // Overflow detecting multiply static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) { + // quick check for the case where count is one (common for C++ allocators) + if (count==1) { + *total = size; + return false; + } #if __has_builtin(__builtin_umul_overflow) || __GNUC__ >= 5 #include // UINT_MAX, ULONG_MAX #if (SIZE_MAX == UINT_MAX) diff --git a/include/mimalloc.h b/include/mimalloc.h index 29481c80..4c5b0cad 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -73,8 +73,6 @@ terms of the MIT license. A copy of the license can be found in the file #include // bool #ifdef __cplusplus -#include // true_type - extern "C" { #endif @@ -337,41 +335,33 @@ mi_decl_export void* mi_new_aligned_nothrow(size_t n, size_t alignment) mi_attr_ } #endif +// --------------------------------------------------------------------------------------------- +// Implement the C++ std::allocator interface for use in STL containers. +// (note: see `mimalloc-new-delete.h` for overriding the new/delete operators globally) +// --------------------------------------------------------------------------------------------- #ifdef __cplusplus -// ------------------------------------------------------ -// STL allocator - an extension to hook mimalloc into STL -// containers in place of std::allocator. -// ------------------------------------------------------ +#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 +#include // true_type +#endif -#pragma warning(disable: 4100) -template -struct mi_stl_allocator { +template struct mi_stl_allocator { typedef T value_type; - +#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 using propagate_on_container_copy_assignment = std::true_type; using propagate_on_container_move_assignment = std::true_type; using propagate_on_container_swap = std::true_type; using is_always_equal = std::true_type; - - mi_stl_allocator() noexcept {} - mi_stl_allocator(const mi_stl_allocator& other) noexcept {} - template - mi_stl_allocator(const mi_stl_allocator& other) noexcept {} - - T* allocate(size_t n, const void* hint = 0) { - return (T*)mi_mallocn(n, sizeof(T)); - } - - void deallocate(T* p, size_t n) { - mi_free(p); - } +#endif + mi_stl_allocator() mi_attr_noexcept {} + mi_stl_allocator(const mi_stl_allocator& other) mi_attr_noexcept { (void)other; } + template mi_stl_allocator(const mi_stl_allocator& other) mi_attr_noexcept { (void)other; } + T* allocate(size_t n, const void* hint = 0) { (void)hint; return (T*)mi_mallocn(n, sizeof(T)); } + void deallocate(T* p, size_t n) { mi_free_size(p,n); } }; -template -bool operator==(const mi_stl_allocator& lhs, const mi_stl_allocator& rhs) noexcept { return true; } -template -bool operator!=(const mi_stl_allocator& lhs, const mi_stl_allocator& rhs) noexcept { return false; } -#endif +template bool operator==(const mi_stl_allocator& lhs, const mi_stl_allocator& rhs) mi_attr_noexcept { (void)lhs; (void)rhs; return true; } +template bool operator!=(const mi_stl_allocator& lhs, const mi_stl_allocator& rhs) mi_attr_noexcept { (void)lhs; (void)rhs; return false; } +#endif // __cplusplus #endif diff --git a/src/alloc.c b/src/alloc.c index be63f86a..d66c629b 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -443,12 +443,7 @@ mi_decl_allocator void* mi_calloc(size_t count, size_t size) mi_attr_noexcept { // Uninitialized `calloc` extern mi_decl_allocator void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept { size_t total; - if (count==1) { - total = size; - } - else if (mi_mul_overflow(count, size, &total)) { - return NULL; - } + if (mi_mul_overflow(count, size, &total)) return NULL; return mi_heap_malloc(heap, total); } From a0bee081dde0ed7ef102f484c1436f9dd292522b Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 15:58:48 -0800 Subject: [PATCH 21/46] use proper C++11 check for noexcept attribute --- ide/vs2019/mimalloc.vcxproj | 2 +- include/mimalloc.h | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ide/vs2019/mimalloc.vcxproj b/ide/vs2019/mimalloc.vcxproj index 037e380d..f59de292 100644 --- a/ide/vs2019/mimalloc.vcxproj +++ b/ide/vs2019/mimalloc.vcxproj @@ -111,7 +111,7 @@ - Level4 + Level2 Disabled true true diff --git a/include/mimalloc.h b/include/mimalloc.h index 4c5b0cad..59f394a7 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -15,10 +15,10 @@ terms of the MIT license. A copy of the license can be found in the file // ------------------------------------------------------ #ifdef __cplusplus - #if (__GNUC__ <= 5) || (_MSC_VER <= 1900) - #define mi_attr_noexcept throw() - #else + #if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 #define mi_attr_noexcept noexcept + #else + #define mi_attr_noexcept throw() #endif #else #define mi_attr_noexcept From 3447debf26e41e25e2f18908ebeeb3b99ca93fa0 Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 16:08:23 -0800 Subject: [PATCH 22/46] add Linux gcc C++ build to azure pipeline --- azure-pipelines.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index f88b2e1a..844a4d08 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -61,6 +61,11 @@ jobs: CXX: g++ BuildType: secure cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON + Debug++: + CC: gcc + CXX: g++ + BuildType: debug + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON Debug Clang: CC: clang CXX: clang++ From 7a98a461a333b195b8ab090484a4e4be4fcd05fb Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 16:21:59 -0800 Subject: [PATCH 23/46] fix type of bitmap field to avoid C++ error on older gcc --- src/bitmap.inc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bitmap.inc.c b/src/bitmap.inc.c index 11ada472..c3813a44 100644 --- a/src/bitmap.inc.c +++ b/src/bitmap.inc.c @@ -113,7 +113,7 @@ static inline bool mi_bitmap_try_claim_field(mi_bitmap_t bitmap, size_t bitmap_f mi_assert_internal(bitmap_fields > idx); UNUSED(bitmap_fields); mi_assert_internal(bitidx + count <= MI_BITMAP_FIELD_BITS); - mi_bitmap_field_t field = mi_atomic_read_relaxed(&bitmap[idx]); + uintptr_t field = mi_atomic_read_relaxed(&bitmap[idx]); if ((field & mask) == 0) { // free? if (mi_atomic_cas_strong(&bitmap[idx], (field|mask), field)) { // claimed! @@ -221,7 +221,7 @@ static inline bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_field const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); const uintptr_t mask = mi_bitmap_mask_(count, bitidx); mi_assert_internal(bitmap_fields > idx); UNUSED(bitmap_fields); - mi_bitmap_field_t field = mi_atomic_read_relaxed(&bitmap[idx]); + uintptr_t field = mi_atomic_read_relaxed(&bitmap[idx]); if (any_ones != NULL) *any_ones = ((field & mask) != 0); return ((field & mask) == mask); } From da709bcf110c74356ba5f0fc3267e7ccb47bf91c Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 16:30:44 -0800 Subject: [PATCH 24/46] test for gcc version for aligned new/delete overrides --- src/alloc-override.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/alloc-override.c b/src/alloc-override.c index 002374bb..89c5126a 100644 --- a/src/alloc-override.c +++ b/src/alloc-override.c @@ -98,7 +98,7 @@ terms of the MIT license. A copy of the license can be found in the file void operator delete[](void* p, std::size_t n) MI_FORWARD02(mi_free_size,p,n); #endif - #if (__cplusplus > 201402L || defined(__cpp_aligned_new)) + #if (__cplusplus > 201402L || defined(__cpp_aligned_new)) && (!defined(__GNUC__) || (__GNUC__ > 5)) void operator delete (void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast(al)); } void operator delete[](void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast(al)); } void operator delete (void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast(al)); }; From 3f8b8b7a55b2113f75c9eb8173841f1d9c0de676 Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 16:45:34 -0800 Subject: [PATCH 25/46] initialize thread_delayed_free field atomically --- src/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/init.c b/src/init.c index d99eeb40..b8422c2f 100644 --- a/src/init.c +++ b/src/init.c @@ -122,7 +122,7 @@ mi_heap_t _mi_heap_main = { &tld_main, MI_SMALL_PAGES_EMPTY, MI_PAGE_QUEUES_EMPTY, - NULL, + ATOMIC_VAR_INIT(NULL), 0, // thread id MI_INIT_COOKIE, // initial cookie { MI_INIT_COOKIE, MI_INIT_COOKIE }, // the key of the main heap can be fixed (unlike page keys that need to be secure!) From 5c82b459d6401dec4b92b4ae81248f35ca3fa1fb Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 16:49:59 -0800 Subject: [PATCH 26/46] enable debug clang++ compilation in azure pipelines --- azure-pipelines.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 844a4d08..8ff0b629 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -81,6 +81,11 @@ jobs: CXX: clang++ BuildType: secure-clang cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON + Debug++ Clang: + CC: clang + CXX: clang++ + BuildType: debug-clang + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON steps: - task: CMake@1 inputs: From f750e793ff60c6353d50ae106579d9317a17a54f Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 16:52:44 -0800 Subject: [PATCH 27/46] ensure unique build names for C++ azure pipeline jobs --- azure-pipelines.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 8ff0b629..6c7bad96 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -64,7 +64,7 @@ jobs: Debug++: CC: gcc CXX: g++ - BuildType: debug + BuildType: debug-cxx cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON Debug Clang: CC: clang @@ -84,7 +84,7 @@ jobs: Debug++ Clang: CC: clang CXX: clang++ - BuildType: debug-clang + BuildType: debug-clang-cxx cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON steps: - task: CMake@1 From c6037ac8f2c92cc07248051e58e88bf3aac7cf05 Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 16:56:02 -0800 Subject: [PATCH 28/46] fix assertion index --- src/page.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/page.c b/src/page.c index 02f10238..84baf306 100644 --- a/src/page.c +++ b/src/page.c @@ -626,8 +626,8 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi mi_assert_internal(page->retire_expire == 0); mi_assert_internal(!mi_page_has_aligned(page)); #if (MI_ENCODE_FREELIST) + mi_assert_internal(page->key[0] != 0); mi_assert_internal(page->key[1] != 0); - mi_assert_internal(page->key[2] != 0); #endif mi_assert_expensive(mi_page_is_valid_init(page)); From b84b11b6a90ab26d581c4a569748cef80be38276 Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 18:11:29 -0800 Subject: [PATCH 29/46] update readme --- readme.md | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/readme.md b/readme.md index 2cbd1baf..b6258cfc 100644 --- a/readme.md +++ b/readme.md @@ -56,8 +56,10 @@ Enjoy! ### Releases -* 2020-01-15, `v1.3.0`: stable release 1.3: bug fixes, improved randomness and stronger -free list encoding in secure mode. +* 2020-01-XX, `v1.4.0`: stable release 1.4: delayed OS page reset for (much) better performance + with page reset enabled, more eager concurrent free, addition of STL allocator. +* 2020-01-15, `v1.3.0`: stable release 1.3: bug fixes, improved randomness and [stronger +free list encoding](https://github.com/microsoft/mimalloc/blob/783e3377f79ee82af43a0793910a9f2d01ac7863/include/mimalloc-internal.h#L396) in secure mode. * 2019-12-22, `v1.2.2`: stable release 1.2: minor updates. * 2019-11-22, `v1.2.0`: stable release 1.2: bug fixes, improved secure mode (free list corruption checks, double free mitigation). Improved dynamic overriding on Windows. * 2019-10-07, `v1.1.0`: stable release 1.1. @@ -130,7 +132,7 @@ mimalloc uses only safe OS calls (`mmap` and `VirtualAlloc`) and can co-exist with other allocators linked to the same program. If you use `cmake`, you can simply use: ``` -find_package(mimalloc 1.0 REQUIRED) +find_package(mimalloc 1.4 REQUIRED) ``` in your `CMakeLists.txt` to find a locally installed mimalloc. Then use either: ``` @@ -144,7 +146,9 @@ to link with the static library. See `test\CMakeLists.txt` for an example. For best performance in C++ programs, it is also recommended to override the global `new` and `delete` operators. For convience, mimalloc provides -[mimalloc-new-delete.h](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) which does this for you -- just include it in a single(!) source file in your project. +[`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) which does this for you -- just include it in a single(!) source file in your project. +In C++, mimalloc also provides the `mi_stl_allocator` struct which implements the `std::allocator` +interface. You can pass environment variables to print verbose messages (`MIMALLOC_VERBOSE=1`) and statistics (`MIMALLOC_SHOW_STATS=1`) (in the debug version): @@ -195,11 +199,15 @@ or via environment variables. - `MIMALLOC_SHOW_STATS=1`: show statistics when the program terminates. - `MIMALLOC_VERBOSE=1`: show verbose messages. - `MIMALLOC_SHOW_ERRORS=1`: show error and warning messages. +- `MIMALLOC_PAGE_RESET=1`: reset (or purge) OS pages when not in use. This can reduce + memory fragmentation in long running (server) programs. If performance is impacted, + `MIMALLOC_RESET_DELAY=` can be set higher (100ms by default) to make the page + reset occur less frequently. - `MIMALLOC_LARGE_OS_PAGES=1`: use large OS pages when available; for some workloads this can significantly improve performance. Use `MIMALLOC_VERBOSE` to check if the large OS pages are enabled -- usually one needs to explicitly allow large OS pages (as on [Windows][windows-huge] and [Linux][linux-huge]). However, sometimes the OS is very slow to reserve contiguous physical memory for large OS pages so use with care on systems that - can have fragmented memory. + can have fragmented memory (for that reason, we generally recommend to use `MIMALLOC_RESERVE_HUGE_OS_PAGES` instead when possible). - `MIMALLOC_EAGER_REGION_COMMIT=1`: on Windows, commit large (256MiB) regions eagerly. On Windows, these regions show in the working set even though usually just a small part is committed to physical memory. This is why it turned off by default on Windows as it looks not good in the task manager. However, in reality it is always better @@ -207,11 +215,16 @@ or via environment variables. - `MIMALLOC_RESERVE_HUGE_OS_PAGES=N`: where N is the number of 1GiB huge OS pages. This reserves the huge pages at startup and can give quite a performance improvement on long running workloads. Usually it is better to not use `MIMALLOC_LARGE_OS_PAGES` in combination with this setting. Just like large OS pages, use with care as reserving - contiguous physical memory can take a long time when memory is fragmented. Still experimental. + contiguous physical memory can take a long time when memory is fragmented. + Note that we usually need to explicitly enable huge OS pages (as on [Windows][windows-huge] and [Linux][linux-huge])). With huge OS pages, it may be beneficial to set the setting + `MIMALLOC_EAGER_COMMIT_DELAY=N` (with usually `N` as 1) to delay the initial `N` segments + of a thread to not allocate in the huge OS pages; this prevents threads that are short lived + and allocate just a little to take up space in the huge OS page area (which cannot be reset). [linux-huge]: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html/tuning_and_optimizing_red_hat_enterprise_linux_for_oracle_9i_and_10g_databases/sect-oracle_9i_and_10g_tuning_guide-large_memory_optimization_big_pages_and_huge_pages-configuring_huge_pages_in_red_hat_enterprise_linux_4_or_5 [windows-huge]: https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/enable-the-lock-pages-in-memory-option-windows?view=sql-server-2017 + # Overriding Malloc Overriding the standard `malloc` can be done either _dynamically_ or _statically_. @@ -251,13 +264,13 @@ resolved to the _mimalloc_ library. Note that certain security restrictions may apply when doing this from the [shell](https://stackoverflow.com/questions/43941322/dyld-insert-libraries-ignored-when-calling-application-through-bash). -Note: unfortunately, at this time, dynamic overriding on macOS seems broken but it is actively worked on to fix this -(see issue [`#50`](https://github.com/microsoft/mimalloc/issues/50)). +Note: unfortunately, at this time, dynamic overriding on macOS seems broken but it is +actively worked on to fix this (see issue [`#50`](https://github.com/microsoft/mimalloc/issues/50)). ### Windows -On Windows you need to link your program explicitly with the mimalloc -DLL and use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch). +Overriding on Windows is robust but requires that you link your program explicitly with +the mimalloc DLL and use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch). Moreover, you need to ensure the `mimalloc-redirect.dll` (or `mimalloc-redirect32.dll`) is available in the same folder as the main `mimalloc-override.dll` at runtime (as it is a dependency). The redirection DLL ensures that all calls to the C runtime malloc API get redirected to @@ -267,8 +280,8 @@ To ensure the mimalloc DLL is loaded at run-time it is easiest to insert some call to the mimalloc API in the `main` function, like `mi_version()` (or use the `/INCLUDE:mi_version` switch on the linker). See the `mimalloc-override-test` project for an example on how to use this. For best performance on Windows with C++, it -is highly recommended to also override the `new`/`delete` operations (as described -in the introduction). +is highly recommended to also override the `new`/`delete` operations (by including +[`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) a single(!) source file in your project). The environment variable `MIMALLOC_DISABLE_REDIRECT=1` can be used to disable dynamic overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully redirected. From 6b7356a10aad3ff839689fbc2e50e11512d910b5 Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 18:12:01 -0800 Subject: [PATCH 30/46] make mimalloc compile with the highest warning level on msvc --- ide/vs2019/mimalloc.vcxproj | 2 +- include/mimalloc-internal.h | 1 + src/memory.c | 22 +++++++++++----------- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/ide/vs2019/mimalloc.vcxproj b/ide/vs2019/mimalloc.vcxproj index f59de292..037e380d 100644 --- a/ide/vs2019/mimalloc.vcxproj +++ b/ide/vs2019/mimalloc.vcxproj @@ -111,7 +111,7 @@ - Level2 + Level4 Disabled true true diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index 500764ed..f039fc50 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -21,6 +21,7 @@ terms of the MIT license. A copy of the license can be found in the file #endif #if defined(_MSC_VER) +#pragma warning(disable:4127) // constant conditional due to MI_SECURE paths #define mi_decl_noinline __declspec(noinline) #define mi_attr_noreturn #elif defined(__GNUC__) || defined(__clang__) diff --git a/src/memory.c b/src/memory.c index ee84f755..9603a26f 100644 --- a/src/memory.c +++ b/src/memory.c @@ -80,7 +80,7 @@ typedef union mi_region_info_u { bool valid; bool is_large; short numa_node; - }; + } x; } mi_region_info_t; @@ -204,9 +204,9 @@ static bool mi_region_try_alloc_os(size_t blocks, bool commit, bool allow_large, // and share it mi_region_info_t info; - info.valid = true; - info.is_large = region_large; - info.numa_node = _mi_os_numa_node(tld); + info.x.valid = true; + info.x.is_large = region_large; + info.x.numa_node = (short)_mi_os_numa_node(tld); mi_atomic_write(&r->info, info.value); // now make it available to others *region = r; return true; @@ -224,12 +224,12 @@ static bool mi_region_is_suitable(const mem_region_t* region, int numa_node, boo // numa correct if (numa_node >= 0) { // use negative numa node to always succeed - int rnode = info.numa_node; + int rnode = info.x.numa_node; if (rnode >= 0 && rnode != numa_node) return false; } // check allow-large - if (!allow_large && info.is_large) return false; + if (!allow_large && info.x.is_large) return false; return true; } @@ -278,11 +278,11 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo mi_region_info_t info; info.value = mi_atomic_read(®ion->info); void* start = mi_atomic_read_ptr(®ion->start); - mi_assert_internal(!(info.is_large && !*is_large)); + mi_assert_internal(!(info.x.is_large && !*is_large)); mi_assert_internal(start != NULL); *is_zero = mi_bitmap_unclaim(®ion->dirty, 1, blocks, bit_idx); - *is_large = info.is_large; + *is_large = info.x.is_large; *memid = mi_memid_create(region, bit_idx); void* p = (uint8_t*)start + (mi_bitmap_index_bit_in_field(bit_idx) * MI_SEGMENT_SIZE); @@ -292,7 +292,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo bool any_uncommitted; mi_bitmap_claim(®ion->commit, 1, blocks, bit_idx, &any_uncommitted); if (any_uncommitted) { - mi_assert_internal(!info.is_large); + mi_assert_internal(!info.x.is_large); bool commit_zero; _mi_mem_commit(p, blocks * MI_SEGMENT_SIZE, &commit_zero, tld); if (commit_zero) *is_zero = true; @@ -307,7 +307,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* is_large, bo // unreset reset blocks if (mi_bitmap_is_any_claimed(®ion->reset, 1, blocks, bit_idx)) { // some blocks are still reset - mi_assert_internal(!info.is_large); + mi_assert_internal(!info.x.is_large); mi_assert_internal(!mi_option_is_enabled(mi_option_eager_commit) || *commit || mi_option_get(mi_option_eager_commit_delay) > 0); mi_bitmap_unclaim(®ion->reset, 1, blocks, bit_idx); if (*commit || !mi_option_is_enabled(mi_option_reset_decommits)) { // only if needed @@ -412,7 +412,7 @@ void _mi_mem_free(void* p, size_t size, size_t id, bool full_commit, bool any_re } // reset the blocks to reduce the working set. - if (!info.is_large && mi_option_is_enabled(mi_option_segment_reset) + if (!info.x.is_large && mi_option_is_enabled(mi_option_segment_reset) && (mi_option_is_enabled(mi_option_eager_commit) || mi_option_is_enabled(mi_option_reset_decommits))) // cannot reset halfway committed segments, use only `option_page_reset` instead { From dc94d25890e965fb317ee15f4bca6e7b30b8898f Mon Sep 17 00:00:00 2001 From: daan Date: Thu, 16 Jan 2020 19:39:14 -0800 Subject: [PATCH 31/46] update documentation --- doc/mimalloc-doc.h | 128 +++++-- docs/annotated.html | 2 +- docs/bench.html | 2 +- docs/build.html | 4 +- docs/classes.html | 2 +- docs/environment.html | 127 +++++++ docs/functions.html | 2 +- docs/functions_vars.html | 2 +- docs/group__aligned.html | 2 +- docs/group__analysis.html | 2 +- docs/group__extended.html | 349 ++++++++++++------ docs/group__extended.js | 16 +- docs/group__heap.html | 2 +- docs/group__malloc.html | 2 +- docs/group__options.html | 21 +- docs/group__options.js | 5 +- docs/group__posix.html | 2 +- docs/group__typed.html | 2 +- docs/group__zeroinit.html | 597 +++++++++++++++++++++++++++++++ docs/group__zeroinit.js | 14 + docs/index.html | 4 +- docs/mimalloc-doc_8h_source.html | 68 ++-- docs/modules.html | 2 +- docs/navtreeindex0.js | 41 ++- docs/overrides.html | 10 +- docs/pages.html | 2 +- docs/search/all_6.js | 18 +- docs/search/all_c.html | 30 ++ docs/search/all_c.js | 4 + docs/search/all_d.html | 30 ++ docs/search/all_d.js | 4 + docs/search/enumvalues_1.js | 3 +- docs/search/functions_0.js | 11 +- docs/search/functions_1.html | 30 ++ docs/search/functions_1.js | 4 + docs/search/groups_7.html | 30 ++ docs/search/groups_7.js | 4 + docs/search/pages_4.html | 30 ++ docs/search/pages_4.js | 4 + docs/search/typedefs_0.js | 4 +- docs/search/typedefs_1.html | 30 ++ docs/search/typedefs_1.js | 4 + docs/search/typedefs_2.html | 30 ++ docs/search/typedefs_2.js | 5 + docs/using.html | 8 +- 45 files changed, 1437 insertions(+), 256 deletions(-) create mode 100644 docs/environment.html create mode 100644 docs/group__zeroinit.html create mode 100644 docs/group__zeroinit.js create mode 100644 docs/search/all_c.html create mode 100644 docs/search/all_c.js create mode 100644 docs/search/all_d.html create mode 100644 docs/search/all_d.js create mode 100644 docs/search/functions_1.html create mode 100644 docs/search/functions_1.js create mode 100644 docs/search/groups_7.html create mode 100644 docs/search/groups_7.js create mode 100644 docs/search/pages_4.html create mode 100644 docs/search/pages_4.js create mode 100644 docs/search/typedefs_1.html create mode 100644 docs/search/typedefs_1.js create mode 100644 docs/search/typedefs_2.html create mode 100644 docs/search/typedefs_2.js diff --git a/doc/mimalloc-doc.h b/doc/mimalloc-doc.h index 5ad5a1e6..71cc1589 100644 --- a/doc/mimalloc-doc.h +++ b/doc/mimalloc-doc.h @@ -26,7 +26,7 @@ without code changes, for example, on Unix you can use it as: Notable aspects of the design include: -- __small and consistent__: the library is less than 3500 LOC using simple and +- __small and consistent__: the library is less than 6k LOC using simple and consistent data structures. This makes it very suitable to integrate and adapt in other projects. For runtime systems it provides hooks for a monotonic _heartbeat_ and deferred freeing (for @@ -297,10 +297,17 @@ size_t mi_good_size(size_t size); void mi_collect(bool force); /// Print the main statistics. -/// @param out Output function. Use \a NULL for outputting to \a stderr. +/// @param out Ignored, outputs to the registered output function or stderr by default. /// /// Most detailed when using a debug build. -void mi_stats_print(mi_output_fun* out); +void mi_stats_print(void* out); + +/// Print the main statistics. +/// @param out An output function or \a NULL for the default. +/// @param arg Optional argument passed to \a out (if not \a NULL) +/// +/// Most detailed when using a debug build. +void mi_stats_print(mi_output_fun* out, void* arg); /// Reset statistics. void mi_stats_reset(void); @@ -320,20 +327,23 @@ void mi_thread_init(void); void mi_thread_done(void); /// Print out heap statistics for this thread. -/// @param out Output function. Use \a NULL for outputting to \a stderr. +/// @param out An output function or \a NULL for the default. +/// @param arg Optional argument passed to \a out (if not \a NULL) /// /// Most detailed when using a debug build. -void mi_thread_stats_print(mi_output_fun* out); +void mi_thread_stats_print_out(mi_output_fun* out, void* arg); /// Type of deferred free functions. /// @param force If \a true all outstanding items should be freed. /// @param heartbeat A monotonically increasing count. +/// @param arg Argument that was passed at registration to hold extra state. /// /// @see mi_register_deferred_free -typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat); +typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg); /// Register a deferred free function. /// @param deferred_free Address of a deferred free-ing function or \a NULL to unregister. +/// @param arg Argument that will be passed on to the deferred free function. /// /// Some runtime systems use deferred free-ing, for example when using /// reference counting to limit the worst case free time. @@ -346,20 +356,22 @@ typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat); /// to be called deterministically after some number of allocations /// (regardless of freeing or available free memory). /// At most one \a deferred_free function can be active. -void mi_register_deferred_free(mi_deferred_free_fun* deferred_free); +void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg); /// Type of output functions. /// @param msg Message to output. +/// @param arg Argument that was passed at registration to hold extra state. /// /// @see mi_register_output() -typedef void (mi_output_fun)(const char* msg); +typedef void (mi_output_fun)(const char* msg, void* arg); /// Register an output function. -/// @param out The output function, use `NULL` to output to stdout. +/// @param out The output function, use `NULL` to output to stderr. +/// @param arg Argument that will be passed on to the output function. /// /// The `out` function is called to output any information from mimalloc, /// like verbose or warning messages. -void mi_register_output(mi_output_fun* out) mi_attr_noexcept; +void mi_register_output(mi_output_fun* out, void* arg); /// Is a pointer part of our heap? /// @param p The pointer to check. @@ -367,18 +379,35 @@ void mi_register_output(mi_output_fun* out) mi_attr_noexcept; /// This function is relatively fast. bool mi_is_in_heap_region(const void* p); -/// Reserve \a pages of huge OS pages (1GiB) but stops after at most `max_secs` seconds. + +/// Reserve \a pages of huge OS pages (1GiB) evenly divided over \a numa_nodes nodes, +/// but stops after at most `timeout_msecs` seconds. /// @param pages The number of 1GiB pages to reserve. -/// @param max_secs Maximum number of seconds to try reserving. -/// @param pages_reserved If not \a NULL, it is set to the actual number of pages that were reserved. +/// @param numa_nodes The number of nodes do evenly divide the pages over, or 0 for using the actual number of NUMA nodes. +/// @param timeout_msecs Maximum number of milli-seconds to try reserving, or 0 for no timeout. /// @returns 0 if successfull, \a ENOMEM if running out of memory, or \a ETIMEDOUT if timed out. /// /// The reserved memory is used by mimalloc to satisfy allocations. -/// May quit before \a max_secs are expired if it estimates it will take more than -/// 1.5 times \a max_secs. The time limit is needed because on some operating systems +/// May quit before \a timeout_msecs are expired if it estimates it will take more than +/// 1.5 times \a timeout_msecs. The time limit is needed because on some operating systems /// it can take a long time to reserve contiguous memory if the physical memory is /// fragmented. -int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved); +int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs); + +/// Reserve \a pages of huge OS pages (1GiB) at a specific \a numa_node, +/// but stops after at most `timeout_msecs` seconds. +/// @param pages The number of 1GiB pages to reserve. +/// @param numa_node The NUMA node where the memory is reserved (start at 0). +/// @param timeout_msecs Maximum number of milli-seconds to try reserving, or 0 for no timeout. +/// @returns 0 if successfull, \a ENOMEM if running out of memory, or \a ETIMEDOUT if timed out. +/// +/// The reserved memory is used by mimalloc to satisfy allocations. +/// May quit before \a timeout_msecs are expired if it estimates it will take more than +/// 1.5 times \a timeout_msecs. The time limit is needed because on some operating systems +/// it can take a long time to reserve contiguous memory if the physical memory is +/// fragmented. +int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs); + /// Is the C runtime \a malloc API redirected? /// @returns \a true if all malloc API calls are redirected to mimalloc. @@ -702,13 +731,14 @@ typedef enum mi_option_e { mi_option_eager_region_commit, ///< Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows) mi_option_large_os_pages, ///< Use large OS pages (2MiB in size) if possible mi_option_reserve_huge_os_pages, ///< The number of huge OS pages (1GiB in size) to reserve at the start of the program. - mi_option_segment_cache, ///< The number of segments per thread to keep cached. - mi_option_page_reset, ///< Reset page memory when it becomes free. - mi_option_cache_reset, ///< Reset segment memory when a segment is cached. + mi_option_segment_cache, ///< The number of segments per thread to keep cached. + mi_option_page_reset, ///< Reset page memory after \a mi_option_reset_delay milliseconds when it becomes free. + mi_option_segment_reset, ///< Experimental + mi_option_reset_delay, ///< Delay in milli-seconds before resetting a page (100ms by default) + mi_option_use_numa_nodes, ///< Pretend there are at most N NUMA nodes mi_option_reset_decommits, ///< Experimental mi_option_eager_commit_delay, ///< Experimental - mi_option_segment_reset, ///< Experimental - mi_option_os_tag, ///< OS tag to assign to mimalloc'd memory + mi_option_os_tag, ///< OS tag to assign to mimalloc'd memory _mi_option_last } mi_option_t; @@ -774,7 +804,7 @@ git clone https://github.com/microsoft/mimalloc ## Windows -Open `ide/vs2017/mimalloc.sln` in Visual Studio 2017 and build. +Open `ide/vs2019/mimalloc.sln` in Visual Studio 2019 and build (or `ide/vs2017/mimalloc.sln`). The `mimalloc` project builds a static library (in `out/msvc-x64`), while the `mimalloc-override` project builds a DLL for overriding malloc in the entire program. @@ -826,6 +856,7 @@ Notes: /*! \page using Using the library +### Build The preferred usage is including ``, linking with the shared- or static library, and using the `mi_malloc` API exclusively for allocation. For example, @@ -849,6 +880,19 @@ target_link_libraries(myapp PUBLIC mimalloc-static) ``` to link with the static library. See `test\CMakeLists.txt` for an example. +### C++ +For best performance in C++ programs, it is also recommended to override the +global `new` and `delete` operators. For convience, mimalloc provides +[`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) which does this for you -- just include it in a single(!) source file in your project. + +In C++, mimalloc also provides the `mi_stl_allocator` struct which implements the `std::allocator` +interface. For example: +``` +std::vector> vec; +vec.push_back(some_struct()); +``` + +### Statistics You can pass environment variables to print verbose messages (`MIMALLOC_VERBOSE=1`) and statistics (`MIMALLOC_SHOW_STATS=1`) (in the debug version): @@ -897,20 +941,33 @@ See \ref overrides for more info. /*! \page environment Environment Options -You can set further options either programmatically -(using [`mi_option_set`](https://microsoft.github.io/mimalloc/group__options.html)), +You can set further options either programmatically (using [`mi_option_set`](https://microsoft.github.io/mimalloc/group__options.html)), or via environment variables. - `MIMALLOC_SHOW_STATS=1`: show statistics when the program terminates. - `MIMALLOC_VERBOSE=1`: show verbose messages. - `MIMALLOC_SHOW_ERRORS=1`: show error and warning messages. +- `MIMALLOC_PAGE_RESET=1`: reset (or purge) OS pages when not in use. This can reduce + memory fragmentation in long running (server) programs. If performance is impacted, + `MIMALLOC_RESET_DELAY=`_msecs_ can be set higher (100ms by default) to make the page + reset occur less frequently. - `MIMALLOC_LARGE_OS_PAGES=1`: use large OS pages when available; for some workloads this can significantly improve performance. Use `MIMALLOC_VERBOSE` to check if the large OS pages are enabled -- usually one needs - to explicitly allow large OS pages (as on [Windows][windows-huge] and [Linux][linux-huge]). + to explicitly allow large OS pages (as on [Windows][windows-huge] and [Linux][linux-huge]). However, sometimes + the OS is very slow to reserve contiguous physical memory for large OS pages so use with care on systems that + can have fragmented memory (for that reason, we generally recommend to use `MIMALLOC_RESERVE_HUGE_OS_PAGES` instead when possible). - `MIMALLOC_EAGER_REGION_COMMIT=1`: on Windows, commit large (256MiB) regions eagerly. On Windows, these regions show in the working set even though usually just a small part is committed to physical memory. This is why it turned off by default on Windows as it looks not good in the task manager. However, in reality it is always better to turn it on as it improves performance and has no other drawbacks. +- `MIMALLOC_RESERVE_HUGE_OS_PAGES=N`: where N is the number of 1GiB huge OS pages. This reserves the huge pages at + startup and can give quite a performance improvement on long running workloads. Usually it is better to not use + `MIMALLOC_LARGE_OS_PAGES` in combination with this setting. Just like large OS pages, use with care as reserving + contiguous physical memory can take a long time when memory is fragmented. + Note that we usually need to explicitly enable huge OS pages (as on [Windows][windows-huge] and [Linux][linux-huge])). With huge OS pages, it may be beneficial to set the setting + `MIMALLOC_EAGER_COMMIT_DELAY=N` (with usually `N` as 1) to delay the initial `N` segments + of a thread to not allocate in the huge OS pages; this prevents threads that are short lived + and allocate just a little to take up space in the huge OS page area (which cannot be reset). [linux-huge]: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html/tuning_and_optimizing_red_hat_enterprise_linux_for_oracle_9i_and_10g_databases/sect-oracle_9i_and_10g_tuning_guide-large_memory_optimization_big_pages_and_huge_pages-configuring_huge_pages_in_red_hat_enterprise_linux_4_or_5 [windows-huge]: https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/enable-the-lock-pages-in-memory-option-windows?view=sql-server-2017 @@ -960,25 +1017,28 @@ Note: unfortunately, at this time, dynamic overriding on macOS seems broken but ### Windows -On Windows you need to link your program explicitly with the mimalloc -DLL and use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch). +Overriding on Windows is robust but requires that you link your program explicitly with +the mimalloc DLL and use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch). Moreover, you need to ensure the `mimalloc-redirect.dll` (or `mimalloc-redirect32.dll`) is available -in the same folder as the mimalloc DLL at runtime (as it as referred to by the mimalloc DLL). -The redirection DLL's ensure all calls to the C runtime malloc API get redirected to mimalloc. +in the same folder as the main `mimalloc-override.dll` at runtime (as it is a dependency). +The redirection DLL ensures that all calls to the C runtime malloc API get redirected to +mimalloc (in `mimalloc-override.dll`). To ensure the mimalloc DLL is loaded at run-time it is easiest to insert some call to the mimalloc API in the `main` function, like `mi_version()` (or use the `/INCLUDE:mi_version` switch on the linker). See the `mimalloc-override-test` project -for an example on how to use this. +for an example on how to use this. For best performance on Windows with C++, it +is highly recommended to also override the `new`/`delete` operations (by including +[`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) a single(!) source file in your project). The environment variable `MIMALLOC_DISABLE_REDIRECT=1` can be used to disable dynamic -overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc successfully redirected. +overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully redirected. -(Note: in principle, it should be possible to patch existing executables -that are linked with the dynamic C runtime (`ucrtbase.dll`) by just putting the mimalloc DLL into -the import table (and putting `mimalloc-redirect.dll` in the same folder) +(Note: in principle, it is possible to patch existing executables +that are linked with the dynamic C runtime (`ucrtbase.dll`) by just putting the `mimalloc-override.dll` into the import table (and putting `mimalloc-redirect.dll` in the same folder) Such patching can be done for example with [CFF Explorer](https://ntcore.com/?page_id=388)). + ## Static override On Unix systems, you can also statically link with _mimalloc_ to override the standard diff --git a/docs/annotated.html b/docs/annotated.html index dcc2e74d..4d2a8bcc 100644 --- a/docs/annotated.html +++ b/docs/annotated.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
diff --git a/docs/bench.html b/docs/bench.html index 11b18550..6b289c04 100644 --- a/docs/bench.html +++ b/docs/bench.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
diff --git a/docs/build.html b/docs/build.html index 3e870697..755aad88 100644 --- a/docs/build.html +++ b/docs/build.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
@@ -103,7 +103,7 @@ $(document).ready(function(){initNavTree('build.html','');});

Checkout the sources from Github:

git clone https://github.com/microsoft/mimalloc

Windows

-

Open ide/vs2017/mimalloc.sln in Visual Studio 2017 and build. The mimalloc project builds a static library (in out/msvc-x64), while the mimalloc-override project builds a DLL for overriding malloc in the entire program.

+

Open ide/vs2019/mimalloc.sln in Visual Studio 2019 and build (or ide/vs2017/mimalloc.sln). The mimalloc project builds a static library (in out/msvc-x64), while the mimalloc-override project builds a DLL for overriding malloc in the entire program.

macOS, Linux, BSD, etc.

We use cmake1 as the build system:

> mkdir -p out/release
> cd out/release
> cmake ../..
> make

This builds the library as a shared (dynamic) library (.so or .dylib), a static library (.a), and as a single object file (.o).

diff --git a/docs/classes.html b/docs/classes.html index 760b28de..e5ea3ea8 100644 --- a/docs/classes.html +++ b/docs/classes.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
diff --git a/docs/environment.html b/docs/environment.html new file mode 100644 index 00000000..1063654e --- /dev/null +++ b/docs/environment.html @@ -0,0 +1,127 @@ + + + + + + + +mi-malloc: Environment Options + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+
mi-malloc +  1.4 +
+
+ + + + + + +
+
+
+ + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
Environment Options
+
+
+

You can set further options either programmatically (using mi_option_set), or via environment variables.

+
    +
  • MIMALLOC_SHOW_STATS=1: show statistics when the program terminates.
  • +
  • MIMALLOC_VERBOSE=1: show verbose messages.
  • +
  • MIMALLOC_SHOW_ERRORS=1: show error and warning messages.
  • +
  • MIMALLOC_PAGE_RESET=1: reset (or purge) OS pages when not in use. This can reduce memory fragmentation in long running (server) programs. If performance is impacted, MIMALLOC_RESET_DELAY=_msecs_ can be set higher (100ms by default) to make the page reset occur less frequently.
  • +
  • MIMALLOC_LARGE_OS_PAGES=1: use large OS pages when available; for some workloads this can significantly improve performance. Use MIMALLOC_VERBOSE to check if the large OS pages are enabled – usually one needs to explicitly allow large OS pages (as on Windows and Linux). However, sometimes the OS is very slow to reserve contiguous physical memory for large OS pages so use with care on systems that can have fragmented memory (for that reason, we generally recommend to use MIMALLOC_RESERVE_HUGE_OS_PAGES instead when possible).
  • +
  • MIMALLOC_EAGER_REGION_COMMIT=1: on Windows, commit large (256MiB) regions eagerly. On Windows, these regions show in the working set even though usually just a small part is committed to physical memory. This is why it turned off by default on Windows as it looks not good in the task manager. However, in reality it is always better to turn it on as it improves performance and has no other drawbacks.
  • +
  • MIMALLOC_RESERVE_HUGE_OS_PAGES=N: where N is the number of 1GiB huge OS pages. This reserves the huge pages at startup and can give quite a performance improvement on long running workloads. Usually it is better to not use MIMALLOC_LARGE_OS_PAGES in combination with this setting. Just like large OS pages, use with care as reserving contiguous physical memory can take a long time when memory is fragmented. Note that we usually need to explicitly enable huge OS pages (as on Windows and Linux)). With huge OS pages, it may be beneficial to set the setting MIMALLOC_EAGER_COMMIT_DELAY=N (with usually N as 1) to delay the initial N segments of a thread to not allocate in the huge OS pages; this prevents threads that are short lived and allocate just a little to take up space in the huge OS page area (which cannot be reset).
  • +
+
+
+
+ + + + diff --git a/docs/functions.html b/docs/functions.html index d2615a17..43e116ed 100644 --- a/docs/functions.html +++ b/docs/functions.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
diff --git a/docs/functions_vars.html b/docs/functions_vars.html index b824832f..060a18d2 100644 --- a/docs/functions_vars.html +++ b/docs/functions_vars.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
diff --git a/docs/group__aligned.html b/docs/group__aligned.html index 4980b45a..88c10eb4 100644 --- a/docs/group__aligned.html +++ b/docs/group__aligned.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
diff --git a/docs/group__analysis.html b/docs/group__analysis.html index 3301fdef..b8d644aa 100644 --- a/docs/group__analysis.html +++ b/docs/group__analysis.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
diff --git a/docs/group__extended.html b/docs/group__extended.html index 4d07f38d..85ea3624 100644 --- a/docs/group__extended.html +++ b/docs/group__extended.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
@@ -118,12 +118,12 @@ Macros - - - - - - + + + + + +

Typedefs

typedef void() mi_deferred_free_fun(bool force, unsigned long long heartbeat)
 Type of deferred free functions. More...
 
typedef void() mi_output_fun(const char *msg)
 Type of output functions. More...
 
typedef void() mi_deferred_free_fun(bool force, unsigned long long heartbeat, void *arg)
 Type of deferred free functions. More...
 
typedef void() mi_output_fun(const char *msg, void *arg)
 Type of output functions. More...
 
@@ -142,9 +142,12 @@ Functions - - - + + + + + + @@ -157,21 +160,24 @@ Functions - - - - - - - - - + + + + + + + + + - - - + + + + + + @@ -196,14 +202,14 @@ Functions

Typedef Documentation

- -

◆ mi_deferred_free_fun

+ +

◆ mi_deferred_free_fun

Functions

void mi_collect (bool force)
 Eagerly free memory. More...
 
void mi_stats_print (mi_output_fun *out)
 Print the main statistics. More...
 
void mi_stats_print (void *out)
 Print the main statistics. More...
 
void mi_stats_print (mi_output_fun *out, void *arg)
 Print the main statistics. More...
 
void mi_stats_reset (void)
 Reset statistics. More...
 
void mi_thread_done (void)
 Uninitialize mimalloc on a thread. More...
 
void mi_thread_stats_print (mi_output_fun *out)
 Print out heap statistics for this thread. More...
 
void mi_register_deferred_free (mi_deferred_free_fun *deferred_free)
 Register a deferred free function. More...
 
void mi_register_output (mi_output_fun *out) mi_attr_noexcept
 Register an output function. More...
 
void mi_thread_stats_print_out (mi_output_fun *out, void *arg)
 Print out heap statistics for this thread. More...
 
void mi_register_deferred_free (mi_deferred_free_fun *deferred_free, void *arg)
 Register a deferred free function. More...
 
void mi_register_output (mi_output_fun *out, void *arg)
 Register an output function. More...
 
bool mi_is_in_heap_region (const void *p)
 Is a pointer part of our heap? More...
 
int mi_reserve_huge_os_pages (size_t pages, double max_secs, size_t *pages_reserved)
 Reserve pages of huge OS pages (1GiB) but stops after at most max_secs seconds. More...
 
int mi_reserve_huge_os_pages_interleave (size_t pages, size_t numa_nodes, size_t timeout_msecs)
 Reserve pages of huge OS pages (1GiB) evenly divided over numa_nodes nodes, but stops after at most timeout_msecs seconds. More...
 
int mi_reserve_huge_os_pages_at (size_t pages, int numa_node, size_t timeout_msecs)
 Reserve pages of huge OS pages (1GiB) at a specific numa_node, but stops after at most timeout_msecs seconds. More...
 
bool mi_is_redirected ()
 Is the C runtime malloc API redirected? More...
 
- +
typedef void() mi_deferred_free_fun(bool force, unsigned long long heartbeat)typedef void() mi_deferred_free_fun(bool force, unsigned long long heartbeat, void *arg)
@@ -212,22 +218,23 @@ Functions
Parameters
- + +
forceIf true all outstanding items should be freed.
heartbeatA monotonically increasing count.
heartbeatA monotonically increasing count.
argArgument that was passed at registration to hold extra state.
-
See also
mi_register_deferred_free
+
See also
mi_register_deferred_free
- -

◆ mi_output_fun

+ +

◆ mi_output_fun

- +
typedef void() mi_output_fun(const char *msg)typedef void() mi_output_fun(const char *msg, void *arg)
@@ -235,11 +242,12 @@ Functions

Type of output functions.

Parameters
- + +
msgMessage to output.
msgMessage to output.
argArgument that was passed at registration to hold extra state.
-
See also
mi_register_output()
+
See also
mi_register_output()
@@ -375,8 +383,8 @@ Functions
- -

◆ mi_register_deferred_free()

+ +

◆ mi_register_deferred_free()

@@ -384,74 +392,14 @@ Functions void mi_register_deferred_free ( - mi_deferred_free_fun *  - deferred_free) - - - -
- -

Register a deferred free function.

-
Parameters
- - -
deferred_freeAddress of a deferred free-ing function or NULL to unregister.
-
-
-

Some runtime systems use deferred free-ing, for example when using reference counting to limit the worst case free time. Such systems can register (re-entrant) deferred free function to free more memory on demand. When the force parameter is true all possible memory should be freed. The per-thread heartbeat parameter is monotonically increasing and guaranteed to be deterministic if the program allocates deterministically. The deferred_free function is guaranteed to be called deterministically after some number of allocations (regardless of freeing or available free memory). At most one deferred_free function can be active.

- -
-
- -

◆ mi_register_output()

- -
-
- - - - - - - - -
void mi_register_output (mi_output_funout)
-
- -

Register an output function.

-
Parameters
- - -
outThe output function, use NULL to output to stdout.
-
-
-

The out function is called to output any information from mimalloc, like verbose or warning messages.

- -
-
- -

◆ mi_reserve_huge_os_pages()

- -
-
- - - - - - + + - - - - - - - - + + @@ -461,17 +409,145 @@ Functions
int mi_reserve_huge_os_pages (size_t pages, mi_deferred_free_fundeferred_free,
double max_secs,
size_t * pages_reserved void * arg 
-

Reserve pages of huge OS pages (1GiB) but stops after at most max_secs seconds.

+

Register a deferred free function.

+
Parameters
+ + + +
deferred_freeAddress of a deferred free-ing function or NULL to unregister.
argArgument that will be passed on to the deferred free function.
+
+
+

Some runtime systems use deferred free-ing, for example when using reference counting to limit the worst case free time. Such systems can register (re-entrant) deferred free function to free more memory on demand. When the force parameter is true all possible memory should be freed. The per-thread heartbeat parameter is monotonically increasing and guaranteed to be deterministic if the program allocates deterministically. The deferred_free function is guaranteed to be called deterministically after some number of allocations (regardless of freeing or available free memory). At most one deferred_free function can be active.

+ +
+
+ +

◆ mi_register_output()

+ +
+
+ + + + + + + + + + + + + + + + + + +
void mi_register_output (mi_output_funout,
void * arg 
)
+
+ +

Register an output function.

+
Parameters
+ + + +
outThe output function, use NULL to output to stderr.
argArgument that will be passed on to the output function.
+
+
+

The out function is called to output any information from mimalloc, like verbose or warning messages.

+ +
+
+ +

◆ mi_reserve_huge_os_pages_at()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
int mi_reserve_huge_os_pages_at (size_t pages,
int numa_node,
size_t timeout_msecs 
)
+
+ +

Reserve pages of huge OS pages (1GiB) at a specific numa_node, but stops after at most timeout_msecs seconds.

Parameters
- - + +
pagesThe number of 1GiB pages to reserve.
max_secsMaximum number of seconds to try reserving.
pages_reservedIf not NULL, it is set to the actual number of pages that were reserved.
numa_nodeThe NUMA node where the memory is reserved (start at 0).
timeout_msecsMaximum number of milli-seconds to try reserving, or 0 for no timeout.
Returns
0 if successfull, ENOMEM if running out of memory, or ETIMEDOUT if timed out.
-

The reserved memory is used by mimalloc to satisfy allocations. May quit before max_secs are expired if it estimates it will take more than 1.5 times max_secs. The time limit is needed because on some operating systems it can take a long time to reserve contiguous memory if the physical memory is fragmented.

+

The reserved memory is used by mimalloc to satisfy allocations. May quit before timeout_msecs are expired if it estimates it will take more than 1.5 times timeout_msecs. The time limit is needed because on some operating systems it can take a long time to reserve contiguous memory if the physical memory is fragmented.

+ +
+
+ +

◆ mi_reserve_huge_os_pages_interleave()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
int mi_reserve_huge_os_pages_interleave (size_t pages,
size_t numa_nodes,
size_t timeout_msecs 
)
+
+ +

Reserve pages of huge OS pages (1GiB) evenly divided over numa_nodes nodes, but stops after at most timeout_msecs seconds.

+
Parameters
+ + + + +
pagesThe number of 1GiB pages to reserve.
numa_nodesThe number of nodes do evenly divide the pages over, or 0 for using the actual number of NUMA nodes.
timeout_msecsMaximum number of milli-seconds to try reserving, or 0 for no timeout.
+
+
+
Returns
0 if successfull, ENOMEM if running out of memory, or ETIMEDOUT if timed out.
+

The reserved memory is used by mimalloc to satisfy allocations. May quit before timeout_msecs are expired if it estimates it will take more than 1.5 times timeout_msecs. The time limit is needed because on some operating systems it can take a long time to reserve contiguous memory if the physical memory is fragmented.

@@ -495,8 +571,8 @@ Functions
- -

◆ mi_stats_print()

+ +

◆ mi_stats_print() [1/2]

@@ -504,7 +580,7 @@ Functions void mi_stats_print ( - mi_output_fun *  + void *  out) @@ -514,7 +590,45 @@ Functions

Print the main statistics.

Parameters
- + +
outOutput function. Use NULL for outputting to stderr.
outIgnored, outputs to the registered output function or stderr by default.
+
+
+

Most detailed when using a debug build.

+ +
+
+ +

◆ mi_stats_print() [2/2]

+ +
+
+ + + + + + + + + + + + + + + + + + +
void mi_stats_print (mi_output_funout,
void * arg 
)
+
+ +

Print the main statistics.

+
Parameters
+ + +
outAn output function or NULL for the default.
argOptional argument passed to out (if not NULL)
@@ -584,18 +698,28 @@ Functions
- -

◆ mi_thread_stats_print()

+ +

◆ mi_thread_stats_print_out()

- + - - + + + + + + + + + + + +
void mi_thread_stats_print void mi_thread_stats_print_out (mi_output_funout)mi_output_funout,
void * arg 
)
@@ -603,7 +727,8 @@ Functions

Print out heap statistics for this thread.

Parameters
- + +
outOutput function. Use NULL for outputting to stderr.
outAn output function or NULL for the default.
argOptional argument passed to out (if not NULL)
diff --git a/docs/group__extended.js b/docs/group__extended.js index 00c73614..7152b518 100644 --- a/docs/group__extended.js +++ b/docs/group__extended.js @@ -1,22 +1,24 @@ var group__extended = [ [ "MI_SMALL_SIZE_MAX", "group__extended.html#ga1ea64283508718d9d645c38efc2f4305", null ], - [ "mi_deferred_free_fun", "group__extended.html#ga22213691c3ce5ab4d91b24aff1023529", null ], - [ "mi_output_fun", "group__extended.html#ga2bed6d40b74591a67f81daea4b4a246f", null ], + [ "mi_deferred_free_fun", "group__extended.html#ga299dae78d25ce112e384a98b7309c5be", null ], + [ "mi_output_fun", "group__extended.html#gad823d23444a4b77a40f66bf075a98a0c", null ], [ "mi_collect", "group__extended.html#ga421430e2226d7d468529cec457396756", null ], [ "mi_good_size", "group__extended.html#gac057927cd06c854b45fe7847e921bd47", null ], [ "mi_is_in_heap_region", "group__extended.html#ga5f071b10d4df1c3658e04e7fd67a94e6", null ], [ "mi_is_redirected", "group__extended.html#gaad25050b19f30cd79397b227e0157a3f", null ], [ "mi_malloc_small", "group__extended.html#ga7136c2e55cb22c98ecf95d08d6debb99", null ], - [ "mi_register_deferred_free", "group__extended.html#ga24dc9cc6fca8daa2aa30aa8025467ce2", null ], - [ "mi_register_output", "group__extended.html#ga84a0c8b401e42eb5b1bce156852f44c5", null ], - [ "mi_reserve_huge_os_pages", "group__extended.html#ga2664f36a2dd557741c429cb799f04641", null ], + [ "mi_register_deferred_free", "group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece", null ], + [ "mi_register_output", "group__extended.html#gae5b17ff027cd2150b43a33040250cf3f", null ], + [ "mi_reserve_huge_os_pages_at", "group__extended.html#ga7795a13d20087447281858d2c771cca1", null ], + [ "mi_reserve_huge_os_pages_interleave", "group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50", null ], [ "mi_stats_merge", "group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1", null ], - [ "mi_stats_print", "group__extended.html#ga8ca07ccff283956d71f48272f4fd5c01", null ], + [ "mi_stats_print", "group__extended.html#ga2d126e5c62d3badc35445e5d84166df2", null ], + [ "mi_stats_print", "group__extended.html#ga256cc6f13a142deabbadd954a217e228", null ], [ "mi_stats_reset", "group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99", null ], [ "mi_thread_done", "group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf", null ], [ "mi_thread_init", "group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17", null ], - [ "mi_thread_stats_print", "group__extended.html#ga489670a15d1a257ab4639e645ee4612a", null ], + [ "mi_thread_stats_print_out", "group__extended.html#gab1dac8476c46cb9eecab767eb40c1525", null ], [ "mi_usable_size", "group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee", null ], [ "mi_zalloc_small", "group__extended.html#ga220f29f40a44404b0061c15bc1c31152", null ] ]; \ No newline at end of file diff --git a/docs/group__heap.html b/docs/group__heap.html index 753aaba3..0973279a 100644 --- a/docs/group__heap.html +++ b/docs/group__heap.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
diff --git a/docs/group__malloc.html b/docs/group__malloc.html index 6bd71d06..bee7b4eb 100644 --- a/docs/group__malloc.html +++ b/docs/group__malloc.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
diff --git a/docs/group__options.html b/docs/group__options.html index a34a9307..71c7ba24 100644 --- a/docs/group__options.html +++ b/docs/group__options.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
@@ -123,11 +123,12 @@ Enumerations mi_option_segment_cache,
  mi_option_page_reset, -mi_option_cache_reset, -mi_option_reset_decommits, -mi_option_eager_commit_delay, +mi_option_segment_reset, +mi_option_reset_delay, +mi_option_use_numa_nodes,
-  mi_option_segment_reset, +  mi_option_reset_decommits, +mi_option_eager_commit_delay, mi_option_os_tag, _mi_option_last
@@ -183,16 +184,18 @@ Functions mi_option_segment_cache 

The number of segments per thread to keep cached.

-mi_option_page_reset 

Reset page memory when it becomes free.

+mi_option_page_reset 

Reset page memory after mi_option_reset_delay milliseconds when it becomes free.

-mi_option_cache_reset 

Reset segment memory when a segment is cached.

+mi_option_segment_reset 

Experimental.

+ +mi_option_reset_delay 

Delay in milli-seconds before resetting a page (100ms by default)

+ +mi_option_use_numa_nodes 

Pretend there are at most N NUMA nodes.

mi_option_reset_decommits 

Experimental.

mi_option_eager_commit_delay 

Experimental.

-mi_option_segment_reset 

Experimental.

- mi_option_os_tag 

OS tag to assign to mimalloc'd memory.

_mi_option_last  diff --git a/docs/group__options.js b/docs/group__options.js index 4bf52d54..1d84ea8b 100644 --- a/docs/group__options.js +++ b/docs/group__options.js @@ -10,10 +10,11 @@ var group__options = [ "mi_option_reserve_huge_os_pages", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caca7ed041be3b0b9d0b82432c7bf41af2", null ], [ "mi_option_segment_cache", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca2ecbe7ef32f5c84de3739aa4f0b805a1", null ], [ "mi_option_page_reset", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cada854dd272c66342f18a93ee254a2968", null ], - [ "mi_option_cache_reset", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac2157a0cb79cd996c1db7d9f6a090c07", null ], + [ "mi_option_segment_reset", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d", null ], + [ "mi_option_reset_delay", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca154fe170131d5212cff57e22b99523c5", null ], + [ "mi_option_use_numa_nodes", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74", null ], [ "mi_option_reset_decommits", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac81ee965b130fa81238913a3c239d536", null ], [ "mi_option_eager_commit_delay", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c", null ], - [ "mi_option_segment_reset", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d", null ], [ "mi_option_os_tag", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4b74ae2a69e445de6c2361b73c1d14bf", null ], [ "_mi_option_last", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca5b4357b74be0d87568036c32eb1a2e4a", null ] ] ], diff --git a/docs/group__posix.html b/docs/group__posix.html index b9cf0b52..65e8ff7e 100644 --- a/docs/group__posix.html +++ b/docs/group__posix.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
diff --git a/docs/group__typed.html b/docs/group__typed.html index 8ea0f095..cf5ac5d1 100644 --- a/docs/group__typed.html +++ b/docs/group__typed.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
diff --git a/docs/group__zeroinit.html b/docs/group__zeroinit.html new file mode 100644 index 00000000..28983138 --- /dev/null +++ b/docs/group__zeroinit.html @@ -0,0 +1,597 @@ + + + + + + + +mi-malloc: Zero initialized re-allocation + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+
mi-malloc +  1.4 +
+
+ + + + + + +
+
+
+ + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+ +
+
Zero initialized re-allocation
+
+
+ +

The zero-initialized re-allocations are only valid on memory that was originally allocated with zero initialization too. +More...

+ + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

void * mi_rezalloc (void *p, size_t newsize)
 
void * mi_rezalloc_aligned (void *p, size_t newsize, size_t alignment)
 
void * mi_rezalloc_aligned_at (void *p, size_t newsize, size_t alignment, size_t offset)
 
void * mi_recalloc_aligned (void *p, size_t newcount, size_t size, size_t alignment)
 
void * mi_recalloc_aligned_at (void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
 
void * mi_heap_rezalloc (mi_heap_t *heap, void *p, size_t newsize)
 
void * mi_heap_recalloc (mi_heap_t *heap, void *p, size_t newcount, size_t size)
 
void * mi_heap_rezalloc_aligned (mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
 
void * mi_heap_rezalloc_aligned_at (mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
 
void * mi_heap_recalloc_aligned (mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment)
 
void * mi_heap_recalloc_aligned_at (mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
 
+

Detailed Description

+

The zero-initialized re-allocations are only valid on memory that was originally allocated with zero initialization too.

+

e.g. mi_calloc, mi_zalloc, mi_zalloc_aligned etc. see https://github.com/microsoft/mimalloc/issues/63#issuecomment-508272992

+

Function Documentation

+ +

◆ mi_heap_recalloc()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
void* mi_heap_recalloc (mi_heap_theap,
void * p,
size_t newcount,
size_t size 
)
+
+ +
+
+ +

◆ mi_heap_recalloc_aligned()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
void* mi_heap_recalloc_aligned (mi_heap_theap,
void * p,
size_t newcount,
size_t size,
size_t alignment 
)
+
+ +
+
+ +

◆ mi_heap_recalloc_aligned_at()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
void* mi_heap_recalloc_aligned_at (mi_heap_theap,
void * p,
size_t newcount,
size_t size,
size_t alignment,
size_t offset 
)
+
+ +
+
+ +

◆ mi_heap_rezalloc()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
void* mi_heap_rezalloc (mi_heap_theap,
void * p,
size_t newsize 
)
+
+ +
+
+ +

◆ mi_heap_rezalloc_aligned()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
void* mi_heap_rezalloc_aligned (mi_heap_theap,
void * p,
size_t newsize,
size_t alignment 
)
+
+ +
+
+ +

◆ mi_heap_rezalloc_aligned_at()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
void* mi_heap_rezalloc_aligned_at (mi_heap_theap,
void * p,
size_t newsize,
size_t alignment,
size_t offset 
)
+
+ +
+
+ +

◆ mi_recalloc_aligned()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
void* mi_recalloc_aligned (void * p,
size_t newcount,
size_t size,
size_t alignment 
)
+
+ +
+
+ +

◆ mi_recalloc_aligned_at()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
void* mi_recalloc_aligned_at (void * p,
size_t newcount,
size_t size,
size_t alignment,
size_t offset 
)
+
+ +
+
+ +

◆ mi_rezalloc()

+ +
+
+ + + + + + + + + + + + + + + + + + +
void* mi_rezalloc (void * p,
size_t newsize 
)
+
+ +
+
+ +

◆ mi_rezalloc_aligned()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
void* mi_rezalloc_aligned (void * p,
size_t newsize,
size_t alignment 
)
+
+ +
+
+ +

◆ mi_rezalloc_aligned_at()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
void* mi_rezalloc_aligned_at (void * p,
size_t newsize,
size_t alignment,
size_t offset 
)
+
+ +
+
+
+
+ + + + diff --git a/docs/group__zeroinit.js b/docs/group__zeroinit.js new file mode 100644 index 00000000..b9297d21 --- /dev/null +++ b/docs/group__zeroinit.js @@ -0,0 +1,14 @@ +var group__zeroinit = +[ + [ "mi_heap_recalloc", "group__zeroinit.html#ga8648c5fbb22a80f0262859099f06dfbd", null ], + [ "mi_heap_recalloc_aligned", "group__zeroinit.html#ga9f3f999396c8f77ca5e80e7b40ac29e3", null ], + [ "mi_heap_recalloc_aligned_at", "group__zeroinit.html#ga496452c96f1de8c500be9fddf52edaf7", null ], + [ "mi_heap_rezalloc", "group__zeroinit.html#gacfad83f14eb5d6a42a497a898e19fc76", null ], + [ "mi_heap_rezalloc_aligned", "group__zeroinit.html#ga375fa8a611c51905e592d5d467c49664", null ], + [ "mi_heap_rezalloc_aligned_at", "group__zeroinit.html#gac90da54fa7e5d10bdc97ce0b51dce2eb", null ], + [ "mi_recalloc_aligned", "group__zeroinit.html#ga3e7e5c291acf1c7fd7ffd9914a9f945f", null ], + [ "mi_recalloc_aligned_at", "group__zeroinit.html#ga4ff5e92ad73585418a072c9d059e5cf9", null ], + [ "mi_rezalloc", "group__zeroinit.html#ga8c292e142110229a2980b37ab036dbc6", null ], + [ "mi_rezalloc_aligned", "group__zeroinit.html#gacd71a7bce96aab38ae6de17af2eb2cf0", null ], + [ "mi_rezalloc_aligned_at", "group__zeroinit.html#gae8b358c417e61d5307da002702b0a8e1", null ] +]; \ No newline at end of file diff --git a/docs/index.html b/docs/index.html index bf758c3c..0efc9c09 100644 --- a/docs/index.html +++ b/docs/index.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
@@ -105,7 +105,7 @@ $(document).ready(function(){initNavTree('index.html','');});

This is the API documentation of the mimalloc allocator (pronounced "me-malloc") – a general purpose allocator with excellent performance characteristics. Initially developed by Daan Leijen for the run-time systems of the Koka and Lean languages.

It is a drop-in replacement for malloc and can be used in other programs without code changes, for example, on Unix you can use it as:

> LD_PRELOAD=/usr/bin/libmimalloc.so myprogram

Notable aspects of the design include:

    -
  • small and consistent: the library is less than 3500 LOC using simple and consistent data structures. This makes it very suitable to integrate and adapt in other projects. For runtime systems it provides hooks for a monotonic heartbeat and deferred freeing (for bounded worst-case times with reference counting).
  • +
  • small and consistent: the library is less than 6k LOC using simple and consistent data structures. This makes it very suitable to integrate and adapt in other projects. For runtime systems it provides hooks for a monotonic heartbeat and deferred freeing (for bounded worst-case times with reference counting).
  • free list sharding: the big idea: instead of one big free list (per size class) we have many smaller lists per memory "page" which both reduces fragmentation and increases locality – things that are allocated close in time get allocated close in memory. (A memory "page" in mimalloc contains blocks of one size class and is usually 64KiB on a 64-bit system).
  • eager page reset: when a "page" becomes empty (with increased chance due to free list sharding) the memory is marked to the OS as unused ("reset" or "purged") reducing (real) memory pressure and fragmentation, especially in long running programs.
  • secure: mimalloc can be build in secure mode, adding guard pages, randomized allocation, encrypted free lists, etc. to protect against various heap vulnerabilities. The performance penalty is only around 3% on average over our benchmarks.
  • diff --git a/docs/mimalloc-doc_8h_source.html b/docs/mimalloc-doc_8h_source.html index 3a235533..c240f151 100644 --- a/docs/mimalloc-doc_8h_source.html +++ b/docs/mimalloc-doc_8h_source.html @@ -37,7 +37,7 @@ Logo
    mi-malloc -  1.0 +  1.4
    @@ -102,35 +102,30 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
    mimalloc-doc.h
-
1 /* ----------------------------------------------------------------------------
2 Copyright (c) 2018, Microsoft Research, Daan Leijen
3 This is free software; you can redistribute it and/or modify it under the
4 terms of the MIT license. A copy of the license can be found in the file
5 "LICENSE" at the root of this distribution.
6 -----------------------------------------------------------------------------*/
7 
8 #error "documentation file only!"
9 
10 
81 
85 
89 void mi_free(void* p);
90 
95 void* mi_malloc(size_t size);
96 
101 void* mi_zalloc(size_t size);
102 
112 void* mi_calloc(size_t count, size_t size);
113 
126 void* mi_realloc(void* p, size_t newsize);
127 
138 void* mi_recalloc(void* p, size_t count, size_t size);
139 
153 void* mi_expand(void* p, size_t newsize);
154 
164 void* mi_mallocn(size_t count, size_t size);
165 
175 void* mi_reallocn(void* p, size_t count, size_t size);
176 
193 void* mi_reallocf(void* p, size_t newsize);
194 
195 
204 char* mi_strdup(const char* s);
205 
215 char* mi_strndup(const char* s, size_t n);
216 
229 char* mi_realpath(const char* fname, char* resolved_name);
230 
232 
233 // ------------------------------------------------------
234 // Extended functionality
235 // ------------------------------------------------------
236 
240 
243 #define MI_SMALL_SIZE_MAX (128*sizeof(void*))
244 
252 void* mi_malloc_small(size_t size);
253 
261 void* mi_zalloc_small(size_t size);
262 
277 size_t mi_usable_size(void* p);
278 
288 size_t mi_good_size(size_t size);
289 
297 void mi_collect(bool force);
298 
303 void mi_stats_print(mi_output_fun* out);
304 
306 void mi_stats_reset(void);
307 
309 void mi_stats_merge(void);
310 
314 void mi_thread_init(void);
315 
320 void mi_thread_done(void);
321 
327 
333 typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat);
334 
350 
355 typedef void (mi_output_fun)(const char* msg);
356 
362 void mi_register_output(mi_output_fun* out) mi_attr_noexcept;
363 
368 bool mi_is_in_heap_region(const void* p);
369 
381 int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved);
382 
387 bool mi_is_redirected();
388 
389 
391 
392 // ------------------------------------------------------
393 // Aligned allocation
394 // ------------------------------------------------------
395 
401 
414 void* mi_malloc_aligned(size_t size, size_t alignment);
415 void* mi_zalloc_aligned(size_t size, size_t alignment);
416 void* mi_calloc_aligned(size_t count, size_t size, size_t alignment);
417 void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment);
418 
429 void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset);
430 void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset);
431 void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset);
432 void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
433 
435 
441 
446 struct mi_heap_s;
447 
452 typedef struct mi_heap_s mi_heap_t;
453 
456 
464 void mi_heap_delete(mi_heap_t* heap);
465 
473 void mi_heap_destroy(mi_heap_t* heap);
474 
479 
483 
490 
492 void mi_heap_collect(mi_heap_t* heap, bool force);
493 
496 void* mi_heap_malloc(mi_heap_t* heap, size_t size);
497 
501 void* mi_heap_malloc_small(mi_heap_t* heap, size_t size);
502 
505 void* mi_heap_zalloc(mi_heap_t* heap, size_t size);
506 
509 void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size);
510 
513 void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size);
514 
517 char* mi_heap_strdup(mi_heap_t* heap, const char* s);
518 
521 char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n);
522 
525 char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name);
526 
527 void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize);
528 void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size);
529 void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize);
530 
531 void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
532 void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
533 void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
534 void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
535 void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment);
536 void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset);
537 void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
538 void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
539 
541 
542 
551 
552 void* mi_rezalloc(void* p, size_t newsize);
553 void* mi_recalloc(void* p, size_t newcount, size_t size) ;
554 
555 void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment);
556 void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
557 void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment);
558 void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
559 
560 void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize);
561 void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size);
562 
563 void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
564 void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
565 void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment);
566 void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
567 
569 
575 
587 #define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
588 
590 #define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
591 
593 #define mi_calloc_tp(tp,count) ((tp*)mi_calloc(count,sizeof(tp)))
594 
596 #define mi_mallocn_tp(tp,count) ((tp*)mi_mallocn(count,sizeof(tp)))
597 
599 #define mi_reallocn_tp(p,tp,count) ((tp*)mi_reallocn(p,count,sizeof(tp)))
600 
602 #define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp)))
603 
605 #define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp)))
606 
608 #define mi_heap_calloc_tp(hp,tp,count) ((tp*)mi_heap_calloc(hp,count,sizeof(tp)))
609 
611 #define mi_heap_mallocn_tp(hp,tp,count) ((tp*)mi_heap_mallocn(hp,count,sizeof(tp)))
612 
614 #define mi_heap_reallocn_tp(hp,p,tp,count) ((tp*)mi_heap_reallocn(p,count,sizeof(tp)))
615 
617 #define mi_heap_recalloc_tp(hp,p,tp,count) ((tp*)mi_heap_recalloc(p,count,sizeof(tp)))
618 
620 
626 
633 bool mi_heap_contains_block(mi_heap_t* heap, const void* p);
634 
643 bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
644 
652 bool mi_check_owned(const void* p);
653 
656 typedef struct mi_heap_area_s {
657  void* blocks;
658  size_t reserved;
659  size_t committed;
660  size_t used;
661  size_t block_size;
663 
671 typedef bool (mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
672 
684 bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
685 
687 
693 
695 typedef enum mi_option_e {
696  // stable options
700  // the following options are experimental
713 } mi_option_t;
714 
715 
716 bool mi_option_enabled(mi_option_t option);
717 void mi_option_enable(mi_option_t option, bool enable);
718 void mi_option_enable_default(mi_option_t option, bool enable);
719 
720 long mi_option_get(mi_option_t option);
721 void mi_option_set(mi_option_t option, long value);
722 void mi_option_set_default(mi_option_t option, long value);
723 
724 
726 
733 
734 void* mi_recalloc(void* p, size_t count, size_t size);
735 size_t mi_malloc_size(const void* p);
736 size_t mi_malloc_usable_size(const void *p);
737 
739 void mi_cfree(void* p);
740 
741 int mi_posix_memalign(void** p, size_t alignment, size_t size);
742 int mi__posix_memalign(void** p, size_t alignment, size_t size);
743 void* mi_memalign(size_t alignment, size_t size);
744 void* mi_valloc(size_t size);
745 
746 void* mi_pvalloc(size_t size);
747 void* mi_aligned_alloc(size_t alignment, size_t size);
748 void* mi_reallocarray(void* p, size_t count, size_t size);
749 
750 void mi_free_size(void* p, size_t size);
751 void mi_free_size_aligned(void* p, size_t size, size_t alignment);
752 void mi_free_aligned(void* p, size_t alignment);
753 
755 void* mi_new(std::size_t n) noexcept(false);
756 
758 void* mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false);
759 
761 void* mi_new_nothrow(size_t n);
762 ``
764 void* mi_new_aligned_nothrow(size_t n, size_t alignment);
765 
767 
void mi_option_enable_default(mi_option_t option, bool enable)
+
1 /* ----------------------------------------------------------------------------
2 Copyright (c) 2018, Microsoft Research, Daan Leijen
3 This is free software; you can redistribute it and/or modify it under the
4 terms of the MIT license. A copy of the license can be found in the file
5 "LICENSE" at the root of this distribution.
6 -----------------------------------------------------------------------------*/
7 
8 #error "documentation file only!"
9 
10 
81 
85 
89 void mi_free(void* p);
90 
95 void* mi_malloc(size_t size);
96 
101 void* mi_zalloc(size_t size);
102 
112 void* mi_calloc(size_t count, size_t size);
113 
126 void* mi_realloc(void* p, size_t newsize);
127 
138 void* mi_recalloc(void* p, size_t count, size_t size);
139 
153 void* mi_expand(void* p, size_t newsize);
154 
164 void* mi_mallocn(size_t count, size_t size);
165 
175 void* mi_reallocn(void* p, size_t count, size_t size);
176 
193 void* mi_reallocf(void* p, size_t newsize);
194 
195 
204 char* mi_strdup(const char* s);
205 
215 char* mi_strndup(const char* s, size_t n);
216 
229 char* mi_realpath(const char* fname, char* resolved_name);
230 
232 
233 // ------------------------------------------------------
234 // Extended functionality
235 // ------------------------------------------------------
236 
240 
243 #define MI_SMALL_SIZE_MAX (128*sizeof(void*))
244 
252 void* mi_malloc_small(size_t size);
253 
261 void* mi_zalloc_small(size_t size);
262 
277 size_t mi_usable_size(void* p);
278 
288 size_t mi_good_size(size_t size);
289 
297 void mi_collect(bool force);
298 
303 void mi_stats_print(void* out);
304 
310 void mi_stats_print(mi_output_fun* out, void* arg);
311 
313 void mi_stats_reset(void);
314 
316 void mi_stats_merge(void);
317 
321 void mi_thread_init(void);
322 
327 void mi_thread_done(void);
328 
334 void mi_thread_stats_print_out(mi_output_fun* out, void* arg);
335 
342 typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg);
343 
359 void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg);
360 
366 typedef void (mi_output_fun)(const char* msg, void* arg);
367 
374 void mi_register_output(mi_output_fun* out, void* arg);
375 
380 bool mi_is_in_heap_region(const void* p);
381 
382 
395 int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs);
396 
409 int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs);
410 
411 
416 bool mi_is_redirected();
417 
418 
420 
421 // ------------------------------------------------------
422 // Aligned allocation
423 // ------------------------------------------------------
424 
430 
443 void* mi_malloc_aligned(size_t size, size_t alignment);
444 void* mi_zalloc_aligned(size_t size, size_t alignment);
445 void* mi_calloc_aligned(size_t count, size_t size, size_t alignment);
446 void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment);
447 
458 void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset);
459 void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset);
460 void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset);
461 void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
462 
464 
470 
475 struct mi_heap_s;
476 
481 typedef struct mi_heap_s mi_heap_t;
482 
485 
493 void mi_heap_delete(mi_heap_t* heap);
494 
502 void mi_heap_destroy(mi_heap_t* heap);
503 
508 
512 
519 
521 void mi_heap_collect(mi_heap_t* heap, bool force);
522 
525 void* mi_heap_malloc(mi_heap_t* heap, size_t size);
526 
530 void* mi_heap_malloc_small(mi_heap_t* heap, size_t size);
531 
534 void* mi_heap_zalloc(mi_heap_t* heap, size_t size);
535 
538 void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size);
539 
542 void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size);
543 
546 char* mi_heap_strdup(mi_heap_t* heap, const char* s);
547 
550 char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n);
551 
554 char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name);
555 
556 void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize);
557 void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size);
558 void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize);
559 
560 void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
561 void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
562 void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
563 void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
564 void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment);
565 void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset);
566 void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
567 void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
568 
570 
571 
580 
581 void* mi_rezalloc(void* p, size_t newsize);
582 void* mi_recalloc(void* p, size_t newcount, size_t size) ;
583 
584 void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment);
585 void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
586 void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment);
587 void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
588 
589 void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize);
590 void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size);
591 
592 void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
593 void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
594 void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment);
595 void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
596 
598 
604 
616 #define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
617 
619 #define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
620 
622 #define mi_calloc_tp(tp,count) ((tp*)mi_calloc(count,sizeof(tp)))
623 
625 #define mi_mallocn_tp(tp,count) ((tp*)mi_mallocn(count,sizeof(tp)))
626 
628 #define mi_reallocn_tp(p,tp,count) ((tp*)mi_reallocn(p,count,sizeof(tp)))
629 
631 #define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp)))
632 
634 #define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp)))
635 
637 #define mi_heap_calloc_tp(hp,tp,count) ((tp*)mi_heap_calloc(hp,count,sizeof(tp)))
638 
640 #define mi_heap_mallocn_tp(hp,tp,count) ((tp*)mi_heap_mallocn(hp,count,sizeof(tp)))
641 
643 #define mi_heap_reallocn_tp(hp,p,tp,count) ((tp*)mi_heap_reallocn(p,count,sizeof(tp)))
644 
646 #define mi_heap_recalloc_tp(hp,p,tp,count) ((tp*)mi_heap_recalloc(p,count,sizeof(tp)))
647 
649 
655 
662 bool mi_heap_contains_block(mi_heap_t* heap, const void* p);
663 
672 bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
673 
681 bool mi_check_owned(const void* p);
682 
685 typedef struct mi_heap_area_s {
686  void* blocks;
687  size_t reserved;
688  size_t committed;
689  size_t used;
690  size_t block_size;
692 
700 typedef bool (mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
701 
713 bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
714 
716 
722 
724 typedef enum mi_option_e {
725  // stable options
729  // the following options are experimental
743 } mi_option_t;
744 
745 
746 bool mi_option_enabled(mi_option_t option);
747 void mi_option_enable(mi_option_t option, bool enable);
748 void mi_option_enable_default(mi_option_t option, bool enable);
749 
750 long mi_option_get(mi_option_t option);
751 void mi_option_set(mi_option_t option, long value);
752 void mi_option_set_default(mi_option_t option, long value);
753 
754 
756 
763 
764 void* mi_recalloc(void* p, size_t count, size_t size);
765 size_t mi_malloc_size(const void* p);
766 size_t mi_malloc_usable_size(const void *p);
767 
769 void mi_cfree(void* p);
770 
771 int mi_posix_memalign(void** p, size_t alignment, size_t size);
772 int mi__posix_memalign(void** p, size_t alignment, size_t size);
773 void* mi_memalign(size_t alignment, size_t size);
774 void* mi_valloc(size_t size);
775 
776 void* mi_pvalloc(size_t size);
777 void* mi_aligned_alloc(size_t alignment, size_t size);
778 void* mi_reallocarray(void* p, size_t count, size_t size);
779 
780 void mi_free_size(void* p, size_t size);
781 void mi_free_size_aligned(void* p, size_t size, size_t alignment);
782 void mi_free_aligned(void* p, size_t alignment);
783 
785 void* mi_new(std::size_t n) noexcept(false);
786 
788 void* mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false);
789 
791 void* mi_new_nothrow(size_t n);
792 ``
794 void* mi_new_aligned_nothrow(size_t n, size_t alignment);
795 
797 
void mi_option_enable_default(mi_option_t option, bool enable)
size_t mi_usable_size(void *p)
Return the available bytes in a memory block.
void * mi_reallocn(void *p, size_t count, size_t size)
Re-allocate memory to count elements of size bytes.
void * mi_malloc_aligned(size_t size, size_t alignment)
Allocate size bytes aligned by alignment.
void * mi_recalloc_aligned_at(void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
-
void mi_stats_print(mi_output_fun *out)
Print the main statistics.
void mi_stats_reset(void)
Reset statistics.
void * mi_heap_realloc_aligned(mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
void * mi_recalloc(void *p, size_t count, size_t size)
Re-allocate memory to count elements of size bytes, with extra memory initialized to zero.
void * mi_mallocn(size_t count, size_t size)
Allocate count elements of size bytes.
size_t mi_malloc_size(const void *p)
-
Reset segment memory when a segment is cached.
Definition: mimalloc-doc.h:707
int mi_posix_memalign(void **p, size_t alignment, size_t size)
void mi_stats_merge(void)
Merge thread local statistics with the main statistics and reset.
-
void() mi_output_fun(const char *msg)
Type of output functions.
Definition: mimalloc-doc.h:355
-
void mi_register_output(mi_output_fun *out) mi_attr_noexcept
Register an output function.
void mi_option_set_default(mi_option_t option, long value)
void * mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false)
raise std::bad_alloc exception on failure.
void * mi_rezalloc(void *p, size_t newsize)
-
Eagerly commit segments (4MiB) (enabled by default).
Definition: mimalloc-doc.h:701
+
Eagerly commit segments (4MiB) (enabled by default).
Definition: mimalloc-doc.h:730
void * mi_heap_zalloc(mi_heap_t *heap, size_t size)
Allocate zero-initialized in a specific heap.
void mi_option_set(mi_option_t option, long value)
-
void mi_register_deferred_free(mi_deferred_free_fun *deferred_free)
Register a deferred free function.
-
Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows)
Definition: mimalloc-doc.h:702
+
Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows)
Definition: mimalloc-doc.h:731
void mi_cfree(void *p)
Just as free but also checks if the pointer p belongs to our heap.
void * mi_recalloc_aligned(void *p, size_t newcount, size_t size, size_t alignment)
-
Definition: mimalloc-doc.h:712
+
Definition: mimalloc-doc.h:742
void * mi_realloc_aligned_at(void *p, size_t newsize, size_t alignment, size_t offset)
-
void * blocks
start of the area containing heap blocks
Definition: mimalloc-doc.h:657
+
void * blocks
start of the area containing heap blocks
Definition: mimalloc-doc.h:686
void * mi_realloc_aligned(void *p, size_t newsize, size_t alignment)
int mi__posix_memalign(void **p, size_t alignment, size_t size)
void mi_free(void *p)
Free previously allocated memory.
@@ -146,35 +141,36 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
void * mi_heap_rezalloc_aligned_at(mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
void * mi_zalloc(size_t size)
Allocate zero-initialized size bytes.
void * mi_heap_rezalloc(mi_heap_t *heap, void *p, size_t newsize)
-
The number of segments per thread to keep cached.
Definition: mimalloc-doc.h:705
+
The number of segments per thread to keep cached.
Definition: mimalloc-doc.h:734
void * mi_heap_calloc(mi_heap_t *heap, size_t count, size_t size)
Allocate count zero-initialized elements in a specific heap.
void * mi_new(std::size_t n) noexcept(false)
raise std::bad_alloc exception on failure.
void * mi_heap_calloc_aligned(mi_heap_t *heap, size_t count, size_t size, size_t alignment)
bool mi_is_redirected()
Is the C runtime malloc API redirected?
-
size_t block_size
size in bytes of one block
Definition: mimalloc-doc.h:661
+
size_t block_size
size in bytes of one block
Definition: mimalloc-doc.h:690
void * mi_reallocarray(void *p, size_t count, size_t size)
+
int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs)
Reserve pages of huge OS pages (1GiB) evenly divided over numa_nodes nodes, but stops after at most t...
+
void() mi_deferred_free_fun(bool force, unsigned long long heartbeat, void *arg)
Type of deferred free functions.
Definition: mimalloc-doc.h:342
bool mi_is_in_heap_region(const void *p)
Is a pointer part of our heap?
void mi_option_enable(mi_option_t option, bool enable)
void * mi_realloc(void *p, size_t newsize)
Re-allocate memory to newsize bytes.
-
The number of huge OS pages (1GiB in size) to reserve at the start of the program.
Definition: mimalloc-doc.h:704
-
int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t *pages_reserved)
Reserve pages of huge OS pages (1GiB) but stops after at most max_secs seconds.
+
The number of huge OS pages (1GiB in size) to reserve at the start of the program.
Definition: mimalloc-doc.h:733
void * mi_heap_reallocf(mi_heap_t *heap, void *p, size_t newsize)
void mi_free_size_aligned(void *p, size_t size, size_t alignment)
void * mi_rezalloc_aligned_at(void *p, size_t newsize, size_t alignment, size_t offset)
-
Reset page memory when it becomes free.
Definition: mimalloc-doc.h:706
+
Reset page memory after mi_option_reset_delay milliseconds when it becomes free.
Definition: mimalloc-doc.h:735
void mi_thread_done(void)
Uninitialize mimalloc on a thread.
bool mi_heap_visit_blocks(const mi_heap_t *heap, bool visit_all_blocks, mi_block_visit_fun *visitor, void *arg)
Visit all areas and blocks in a heap.
-
void mi_thread_stats_print(mi_output_fun *out)
Print out heap statistics for this thread.
+
Pretend there are at most N NUMA nodes.
Definition: mimalloc-doc.h:738
void * mi_malloc(size_t size)
Allocate size bytes.
bool mi_option_enabled(mi_option_t option)
-
Experimental.
Definition: mimalloc-doc.h:708
+
Experimental.
Definition: mimalloc-doc.h:739
char * mi_heap_strndup(mi_heap_t *heap, const char *s, size_t n)
Duplicate a string of at most length n in a specific heap.
-
bool() mi_block_visit_fun(const mi_heap_t *heap, const mi_heap_area_t *area, void *block, size_t block_size, void *arg)
Visitor function passed to mi_heap_visit_blocks()
Definition: mimalloc-doc.h:671
+
bool() mi_block_visit_fun(const mi_heap_t *heap, const mi_heap_area_t *area, void *block, size_t block_size, void *arg)
Visitor function passed to mi_heap_visit_blocks()
Definition: mimalloc-doc.h:700
void * mi_heap_recalloc(mi_heap_t *heap, void *p, size_t newcount, size_t size)
void * mi_heap_malloc_aligned_at(mi_heap_t *heap, size_t size, size_t alignment, size_t offset)
char * mi_realpath(const char *fname, char *resolved_name)
Resolve a file path name.
-
Print error messages to stderr.
Definition: mimalloc-doc.h:698
-
Experimental.
Definition: mimalloc-doc.h:710
+
Print error messages to stderr.
Definition: mimalloc-doc.h:727
+
Experimental.
Definition: mimalloc-doc.h:736
void * mi_heap_rezalloc_aligned(mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
void * mi_memalign(size_t alignment, size_t size)
void * mi_new_aligned_nothrow(size_t n, size_t alignment)
return NULL on failure.
@@ -183,35 +179,40 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
bool mi_heap_contains_block(mi_heap_t *heap, const void *p)
Does a heap contain a pointer to a previously allocated block?
void mi_heap_collect(mi_heap_t *heap, bool force)
Release outstanding resources in a specific heap.
void * mi_heap_recalloc_aligned_at(mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
-
Print verbose messages to stderr.
Definition: mimalloc-doc.h:699
+
Print verbose messages to stderr.
Definition: mimalloc-doc.h:728
void * mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset)
void * mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset)
Allocate size bytes aligned by alignment at a specified offset.
void mi_heap_delete(mi_heap_t *heap)
Delete a previously allocated heap.
-
OS tag to assign to mimalloc'd memory.
Definition: mimalloc-doc.h:711
+
OS tag to assign to mimalloc'd memory.
Definition: mimalloc-doc.h:741
mi_heap_t * mi_heap_get_default()
Get the default heap that is used for mi_malloc() et al.
+
int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs)
Reserve pages of huge OS pages (1GiB) at a specific numa_node, but stops after at most timeout_msecs ...
void * mi_aligned_alloc(size_t alignment, size_t size)
void * mi_valloc(size_t size)
void mi_thread_init(void)
Initialize mimalloc on a thread.
size_t mi_good_size(size_t size)
Return the used allocation size.
-
Experimental.
Definition: mimalloc-doc.h:709
+
void mi_stats_print(void *out)
Print the main statistics.
+
Experimental.
Definition: mimalloc-doc.h:740
void * mi_heap_recalloc_aligned(mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment)
void * mi_heap_mallocn(mi_heap_t *heap, size_t count, size_t size)
Allocate count elements in a specific heap.
-
An area of heap space contains blocks of a single size.
Definition: mimalloc-doc.h:656
-
Print statistics to stderr when the program is done.
Definition: mimalloc-doc.h:697
+
An area of heap space contains blocks of a single size.
Definition: mimalloc-doc.h:685
+
void mi_thread_stats_print_out(mi_output_fun *out, void *arg)
Print out heap statistics for this thread.
+
Print statistics to stderr when the program is done.
Definition: mimalloc-doc.h:726
void * mi_zalloc_aligned(size_t size, size_t alignment)
-
size_t reserved
bytes reserved for this area
Definition: mimalloc-doc.h:658
-
struct mi_heap_s mi_heap_t
Type of first-class heaps.
Definition: mimalloc-doc.h:452
-
size_t used
bytes in use by allocated blocks
Definition: mimalloc-doc.h:660
-
void() mi_deferred_free_fun(bool force, unsigned long long heartbeat)
Type of deferred free functions.
Definition: mimalloc-doc.h:333
+
size_t reserved
bytes reserved for this area
Definition: mimalloc-doc.h:687
+
struct mi_heap_s mi_heap_t
Type of first-class heaps.
Definition: mimalloc-doc.h:481
+
size_t used
bytes in use by allocated blocks
Definition: mimalloc-doc.h:689
+
void mi_register_deferred_free(mi_deferred_free_fun *deferred_free, void *arg)
Register a deferred free function.
void mi_free_size(void *p, size_t size)
void mi_collect(bool force)
Eagerly free memory.
void mi_heap_destroy(mi_heap_t *heap)
Destroy a heap, freeing all its still allocated blocks.
void * mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset)
-
Use large OS pages (2MiB in size) if possible.
Definition: mimalloc-doc.h:703
+
Use large OS pages (2MiB in size) if possible.
Definition: mimalloc-doc.h:732
void * mi_heap_reallocn(mi_heap_t *heap, void *p, size_t count, size_t size)
+
void mi_register_output(mi_output_fun *out, void *arg)
Register an output function.
void * mi_heap_malloc_small(mi_heap_t *heap, size_t size)
Allocate a small object in a specific heap.
void * mi_heap_realloc(mi_heap_t *heap, void *p, size_t newsize)
size_t mi_malloc_usable_size(const void *p)
+
void() mi_output_fun(const char *msg, void *arg)
Type of output functions.
Definition: mimalloc-doc.h:366
char * mi_strdup(const char *s)
Allocate and duplicate a string.
void * mi_heap_realloc_aligned_at(mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
void * mi_reallocf(void *p, size_t newsize)
Re-allocate memory to newsize bytes,.
@@ -223,10 +224,11 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
long mi_option_get(mi_option_t option)
mi_heap_t * mi_heap_get_backing()
Get the backing heap.
void mi_free_aligned(void *p, size_t alignment)
+
Delay in milli-seconds before resetting a page (100ms by default)
Definition: mimalloc-doc.h:737
mi_heap_t * mi_heap_new()
Create a new heap that can be used for allocation.
void * mi_heap_malloc(mi_heap_t *heap, size_t size)
Allocate in a specific heap.
-
size_t committed
current committed bytes of this area
Definition: mimalloc-doc.h:659
-
mi_option_t
Runtime options.
Definition: mimalloc-doc.h:695
+
size_t committed
current committed bytes of this area
Definition: mimalloc-doc.h:688
+
mi_option_t
Runtime options.
Definition: mimalloc-doc.h:724
bool mi_heap_check_owned(mi_heap_t *heap, const void *p)
Check safely if any pointer is part of a heap.
mi_heap_t * mi_heap_set_default(mi_heap_t *heap)
Set the default heap to use for mi_malloc() et al.
diff --git a/docs/modules.html b/docs/modules.html index ca18e1eb..0bc6036d 100644 --- a/docs/modules.html +++ b/docs/modules.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
diff --git a/docs/navtreeindex0.js b/docs/navtreeindex0.js index 90be7d78..d1b0e072 100644 --- a/docs/navtreeindex0.js +++ b/docs/navtreeindex0.js @@ -29,25 +29,27 @@ var NAVTREEINDEX0 = "group__analysis.html#gadfa01e2900f0e5d515ad5506b26f6d65":[5,6,1], "group__analysis.html#structmi__heap__area__t":[5,6,0], "group__extended.html":[5,1], -"group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee":[5,1,17], -"group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf":[5,1,14], +"group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee":[5,1,19], +"group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf":[5,1,16], "group__extended.html#ga1ea64283508718d9d645c38efc2f4305":[5,1,0], -"group__extended.html#ga220f29f40a44404b0061c15bc1c31152":[5,1,18], -"group__extended.html#ga22213691c3ce5ab4d91b24aff1023529":[5,1,1], -"group__extended.html#ga24dc9cc6fca8daa2aa30aa8025467ce2":[5,1,8], -"group__extended.html#ga2664f36a2dd557741c429cb799f04641":[5,1,10], -"group__extended.html#ga2bed6d40b74591a67f81daea4b4a246f":[5,1,2], -"group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99":[5,1,13], +"group__extended.html#ga220f29f40a44404b0061c15bc1c31152":[5,1,20], +"group__extended.html#ga256cc6f13a142deabbadd954a217e228":[5,1,14], +"group__extended.html#ga299dae78d25ce112e384a98b7309c5be":[5,1,1], +"group__extended.html#ga2d126e5c62d3badc35445e5d84166df2":[5,1,13], +"group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50":[5,1,11], +"group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece":[5,1,8], +"group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99":[5,1,15], "group__extended.html#ga421430e2226d7d468529cec457396756":[5,1,3], -"group__extended.html#ga489670a15d1a257ab4639e645ee4612a":[5,1,16], "group__extended.html#ga5f071b10d4df1c3658e04e7fd67a94e6":[5,1,5], "group__extended.html#ga7136c2e55cb22c98ecf95d08d6debb99":[5,1,7], -"group__extended.html#ga84a0c8b401e42eb5b1bce156852f44c5":[5,1,9], -"group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1":[5,1,11], -"group__extended.html#ga8ca07ccff283956d71f48272f4fd5c01":[5,1,12], +"group__extended.html#ga7795a13d20087447281858d2c771cca1":[5,1,10], +"group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1":[5,1,12], "group__extended.html#gaad25050b19f30cd79397b227e0157a3f":[5,1,6], +"group__extended.html#gab1dac8476c46cb9eecab767eb40c1525":[5,1,18], "group__extended.html#gac057927cd06c854b45fe7847e921bd47":[5,1,4], -"group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17":[5,1,15], +"group__extended.html#gad823d23444a4b77a40f66bf075a98a0c":[5,1,2], +"group__extended.html#gae5b17ff027cd2150b43a33040250cf3f":[5,1,9], +"group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17":[5,1,17], "group__heap.html":[5,3], "group__heap.html#ga00e95ba1e01acac3cfd95bb7a357a6f0":[5,3,20], "group__heap.html#ga08ca6419a5c057a4d965868998eef487":[5,3,3], @@ -99,19 +101,20 @@ var NAVTREEINDEX0 = "group__options.html#gaf84921c32375e25754dc2ee6a911fa60":[5,7,5], "group__options.html#gafebf7ed116adb38ae5218bc3ce06884c":[5,7,0], "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0957ef73b2550764b4840edf48422fda":[5,7,0,0], -"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c":[5,7,0,11], +"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74":[5,7,0,11], +"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca154fe170131d5212cff57e22b99523c5":[5,7,0,10], +"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c":[5,7,0,13], "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b":[5,7,0,3], "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca2ecbe7ef32f5c84de3739aa4f0b805a1":[5,7,0,7], "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca32ce97ece29f69e82579679cf8a307ad":[5,7,0,4], "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4192d491200d0055df0554d4cf65054e":[5,7,0,5], -"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4b74ae2a69e445de6c2361b73c1d14bf":[5,7,0,13], -"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca5b4357b74be0d87568036c32eb1a2e4a":[5,7,0,14], +"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4b74ae2a69e445de6c2361b73c1d14bf":[5,7,0,14], +"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca5b4357b74be0d87568036c32eb1a2e4a":[5,7,0,15], "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7c8b7bf5281c581bad64f5daa6442777":[5,7,0,2], -"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac2157a0cb79cd996c1db7d9f6a090c07":[5,7,0,9], -"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac81ee965b130fa81238913a3c239d536":[5,7,0,10], +"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac81ee965b130fa81238913a3c239d536":[5,7,0,12], "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caca7ed041be3b0b9d0b82432c7bf41af2":[5,7,0,6], "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cada854dd272c66342f18a93ee254a2968":[5,7,0,8], -"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d":[5,7,0,12], +"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d":[5,7,0,9], "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4822e5c00732c5984b32a032837f0":[5,7,0,1], "group__posix.html":[5,8], "group__posix.html#ga06d07cf357bbac5c73ba5d0c0c421e17":[5,8,7], diff --git a/docs/overrides.html b/docs/overrides.html index 74ef9dbd..3b5d9bd3 100644 --- a/docs/overrides.html +++ b/docs/overrides.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
@@ -118,10 +118,10 @@ $(document).ready(function(){initNavTree('overrides.html','');});

Note that certain security restrictions may apply when doing this from the shell.

Note: unfortunately, at this time, dynamic overriding on macOS seems broken but it is actively worked on to fix this (see issue #50).

Windows

-

On Windows you need to link your program explicitly with the mimalloc DLL and use the C-runtime library as a DLL (using the /MD or /MDd switch). Moreover, you need to ensure the mimalloc-redirect.dll (or mimalloc-redirect32.dll) is available in the same folder as the mimalloc DLL at runtime (as it as referred to by the mimalloc DLL). The redirection DLL's ensure all calls to the C runtime malloc API get redirected to mimalloc.

-

To ensure the mimalloc DLL is loaded at run-time it is easiest to insert some call to the mimalloc API in the main function, like mi_version() (or use the /INCLUDE:mi_version switch on the linker). See the mimalloc-override-test project for an example on how to use this.

-

The environment variable MIMALLOC_DISABLE_REDIRECT=1 can be used to disable dynamic overriding at run-time. Use MIMALLOC_VERBOSE=1 to check if mimalloc successfully redirected.

-

(Note: in principle, it should be possible to patch existing executables that are linked with the dynamic C runtime (ucrtbase.dll) by just putting the mimalloc DLL into the import table (and putting mimalloc-redirect.dll in the same folder) Such patching can be done for example with CFF Explorer).

+

Overriding on Windows is robust but requires that you link your program explicitly with the mimalloc DLL and use the C-runtime library as a DLL (using the /MD or /MDd switch). Moreover, you need to ensure the mimalloc-redirect.dll (or mimalloc-redirect32.dll) is available in the same folder as the main mimalloc-override.dll at runtime (as it is a dependency). The redirection DLL ensures that all calls to the C runtime malloc API get redirected to mimalloc (in mimalloc-override.dll).

+

To ensure the mimalloc DLL is loaded at run-time it is easiest to insert some call to the mimalloc API in the main function, like mi_version() (or use the /INCLUDE:mi_version switch on the linker). See the mimalloc-override-test project for an example on how to use this. For best performance on Windows with C++, it is highly recommended to also override the new/delete operations (by including mimalloc-new-delete.h a single(!) source file in your project).

+

The environment variable MIMALLOC_DISABLE_REDIRECT=1 can be used to disable dynamic overriding at run-time. Use MIMALLOC_VERBOSE=1 to check if mimalloc was successfully redirected.

+

(Note: in principle, it is possible to patch existing executables that are linked with the dynamic C runtime (ucrtbase.dll) by just putting the mimalloc-override.dll into the import table (and putting mimalloc-redirect.dll in the same folder) Such patching can be done for example with CFF Explorer).

Static override

On Unix systems, you can also statically link with mimalloc to override the standard malloc interface. The recommended way is to link the final program with the mimalloc single object file (mimalloc-override.o). We use an object file instead of a library file as linkers give preference to that over archives to resolve symbols. To ensure that the standard malloc interface resolves to the mimalloc library, link it as the first object file. For example:

gcc -o myprogram mimalloc-override.o myfile1.c ...

List of Overrides:

diff --git a/docs/pages.html b/docs/pages.html index d0ee9f7a..ad5549bf 100644 --- a/docs/pages.html +++ b/docs/pages.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
diff --git a/docs/search/all_6.js b/docs/search/all_6.js index 2edb9986..cc7a26ec 100644 --- a/docs/search/all_6.js +++ b/docs/search/all_6.js @@ -10,7 +10,7 @@ var searchData= ['mi_5fcfree',['mi_cfree',['../group__posix.html#ga705dc7a64bffacfeeb0141501a5c35d7',1,'mimalloc-doc.h']]], ['mi_5fcheck_5fowned',['mi_check_owned',['../group__analysis.html#ga628c237489c2679af84a4d0d143b3dd5',1,'mimalloc-doc.h']]], ['mi_5fcollect',['mi_collect',['../group__extended.html#ga421430e2226d7d468529cec457396756',1,'mimalloc-doc.h']]], - ['mi_5fdeferred_5ffree_5ffun',['mi_deferred_free_fun',['../group__extended.html#ga22213691c3ce5ab4d91b24aff1023529',1,'mimalloc-doc.h']]], + ['mi_5fdeferred_5ffree_5ffun',['mi_deferred_free_fun',['../group__extended.html#ga299dae78d25ce112e384a98b7309c5be',1,'mimalloc-doc.h']]], ['mi_5fexpand',['mi_expand',['../group__malloc.html#gaaee66a1d483c3e28f585525fb96707e4',1,'mimalloc-doc.h']]], ['mi_5ffree',['mi_free',['../group__malloc.html#gaf2c7b89c327d1f60f59e68b9ea644d95',1,'mimalloc-doc.h']]], ['mi_5ffree_5faligned',['mi_free_aligned',['../group__posix.html#ga0d28d5cf61e6bfbb18c63092939fe5c9',1,'mimalloc-doc.h']]], @@ -76,7 +76,6 @@ var searchData= ['mi_5fnew_5faligned',['mi_new_aligned',['../group__posix.html#gaef2c2bdb4f70857902d3c8903ac095f3',1,'mimalloc-doc.h']]], ['mi_5fnew_5faligned_5fnothrow',['mi_new_aligned_nothrow',['../group__posix.html#gab5e29558926d934c3f1cae8c815f942c',1,'mimalloc-doc.h']]], ['mi_5fnew_5fnothrow',['mi_new_nothrow',['../group__posix.html#gaeaded64eda71ed6b1d569d3e723abc4a',1,'mimalloc-doc.h']]], - ['mi_5foption_5fcache_5freset',['mi_option_cache_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac2157a0cb79cd996c1db7d9f6a090c07',1,'mimalloc-doc.h']]], ['mi_5foption_5feager_5fcommit',['mi_option_eager_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b',1,'mimalloc-doc.h']]], ['mi_5foption_5feager_5fcommit_5fdelay',['mi_option_eager_commit_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c',1,'mimalloc-doc.h']]], ['mi_5foption_5feager_5fregion_5fcommit',['mi_option_eager_region_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca32ce97ece29f69e82579679cf8a307ad',1,'mimalloc-doc.h']]], @@ -89,6 +88,7 @@ var searchData= ['mi_5foption_5fpage_5freset',['mi_option_page_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cada854dd272c66342f18a93ee254a2968',1,'mimalloc-doc.h']]], ['mi_5foption_5freserve_5fhuge_5fos_5fpages',['mi_option_reserve_huge_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caca7ed041be3b0b9d0b82432c7bf41af2',1,'mimalloc-doc.h']]], ['mi_5foption_5freset_5fdecommits',['mi_option_reset_decommits',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac81ee965b130fa81238913a3c239d536',1,'mimalloc-doc.h']]], + ['mi_5foption_5freset_5fdelay',['mi_option_reset_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca154fe170131d5212cff57e22b99523c5',1,'mimalloc-doc.h']]], ['mi_5foption_5fsegment_5fcache',['mi_option_segment_cache',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca2ecbe7ef32f5c84de3739aa4f0b805a1',1,'mimalloc-doc.h']]], ['mi_5foption_5fsegment_5freset',['mi_option_segment_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d',1,'mimalloc-doc.h']]], ['mi_5foption_5fset',['mi_option_set',['../group__options.html#gaf84921c32375e25754dc2ee6a911fa60',1,'mimalloc-doc.h']]], @@ -96,8 +96,9 @@ var searchData= ['mi_5foption_5fshow_5ferrors',['mi_option_show_errors',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4822e5c00732c5984b32a032837f0',1,'mimalloc-doc.h']]], ['mi_5foption_5fshow_5fstats',['mi_option_show_stats',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0957ef73b2550764b4840edf48422fda',1,'mimalloc-doc.h']]], ['mi_5foption_5ft',['mi_option_t',['../group__options.html#gafebf7ed116adb38ae5218bc3ce06884c',1,'mimalloc-doc.h']]], + ['mi_5foption_5fuse_5fnuma_5fnodes',['mi_option_use_numa_nodes',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74',1,'mimalloc-doc.h']]], ['mi_5foption_5fverbose',['mi_option_verbose',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7c8b7bf5281c581bad64f5daa6442777',1,'mimalloc-doc.h']]], - ['mi_5foutput_5ffun',['mi_output_fun',['../group__extended.html#ga2bed6d40b74591a67f81daea4b4a246f',1,'mimalloc-doc.h']]], + ['mi_5foutput_5ffun',['mi_output_fun',['../group__extended.html#gad823d23444a4b77a40f66bf075a98a0c',1,'mimalloc-doc.h']]], ['mi_5fposix_5fmemalign',['mi_posix_memalign',['../group__posix.html#gacff84f226ba9feb2031b8992e5579447',1,'mimalloc-doc.h']]], ['mi_5fpvalloc',['mi_pvalloc',['../group__posix.html#gaeb325c39b887d3b90d85d1eb1712fb1e',1,'mimalloc-doc.h']]], ['mi_5frealloc',['mi_realloc',['../group__malloc.html#gaf11eb497da57bdfb2de65eb191c69db6',1,'mimalloc-doc.h']]], @@ -111,21 +112,22 @@ var searchData= ['mi_5frecalloc',['mi_recalloc',['../group__malloc.html#ga23a0fbb452b5dce8e31fab1a1958cacc',1,'mimalloc-doc.h']]], ['mi_5frecalloc_5faligned',['mi_recalloc_aligned',['../group__zeroinit.html#ga3e7e5c291acf1c7fd7ffd9914a9f945f',1,'mimalloc-doc.h']]], ['mi_5frecalloc_5faligned_5fat',['mi_recalloc_aligned_at',['../group__zeroinit.html#ga4ff5e92ad73585418a072c9d059e5cf9',1,'mimalloc-doc.h']]], - ['mi_5fregister_5fdeferred_5ffree',['mi_register_deferred_free',['../group__extended.html#ga24dc9cc6fca8daa2aa30aa8025467ce2',1,'mimalloc-doc.h']]], - ['mi_5fregister_5foutput',['mi_register_output',['../group__extended.html#ga84a0c8b401e42eb5b1bce156852f44c5',1,'mimalloc-doc.h']]], - ['mi_5freserve_5fhuge_5fos_5fpages',['mi_reserve_huge_os_pages',['../group__extended.html#ga2664f36a2dd557741c429cb799f04641',1,'mimalloc-doc.h']]], + ['mi_5fregister_5fdeferred_5ffree',['mi_register_deferred_free',['../group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece',1,'mimalloc-doc.h']]], + ['mi_5fregister_5foutput',['mi_register_output',['../group__extended.html#gae5b17ff027cd2150b43a33040250cf3f',1,'mimalloc-doc.h']]], + ['mi_5freserve_5fhuge_5fos_5fpages_5fat',['mi_reserve_huge_os_pages_at',['../group__extended.html#ga7795a13d20087447281858d2c771cca1',1,'mimalloc-doc.h']]], + ['mi_5freserve_5fhuge_5fos_5fpages_5finterleave',['mi_reserve_huge_os_pages_interleave',['../group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50',1,'mimalloc-doc.h']]], ['mi_5frezalloc',['mi_rezalloc',['../group__zeroinit.html#ga8c292e142110229a2980b37ab036dbc6',1,'mimalloc-doc.h']]], ['mi_5frezalloc_5faligned',['mi_rezalloc_aligned',['../group__zeroinit.html#gacd71a7bce96aab38ae6de17af2eb2cf0',1,'mimalloc-doc.h']]], ['mi_5frezalloc_5faligned_5fat',['mi_rezalloc_aligned_at',['../group__zeroinit.html#gae8b358c417e61d5307da002702b0a8e1',1,'mimalloc-doc.h']]], ['mi_5fsmall_5fsize_5fmax',['MI_SMALL_SIZE_MAX',['../group__extended.html#ga1ea64283508718d9d645c38efc2f4305',1,'mimalloc-doc.h']]], ['mi_5fstats_5fmerge',['mi_stats_merge',['../group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1',1,'mimalloc-doc.h']]], - ['mi_5fstats_5fprint',['mi_stats_print',['../group__extended.html#ga8ca07ccff283956d71f48272f4fd5c01',1,'mimalloc-doc.h']]], + ['mi_5fstats_5fprint',['mi_stats_print',['../group__extended.html#ga2d126e5c62d3badc35445e5d84166df2',1,'mi_stats_print(void *out): mimalloc-doc.h'],['../group__extended.html#ga256cc6f13a142deabbadd954a217e228',1,'mi_stats_print(mi_output_fun *out, void *arg): mimalloc-doc.h']]], ['mi_5fstats_5freset',['mi_stats_reset',['../group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99',1,'mimalloc-doc.h']]], ['mi_5fstrdup',['mi_strdup',['../group__malloc.html#gac7cffe13f1f458ed16789488bf92b9b2',1,'mimalloc-doc.h']]], ['mi_5fstrndup',['mi_strndup',['../group__malloc.html#gaaabf971c2571891433477e2d21a35266',1,'mimalloc-doc.h']]], ['mi_5fthread_5fdone',['mi_thread_done',['../group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf',1,'mimalloc-doc.h']]], ['mi_5fthread_5finit',['mi_thread_init',['../group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17',1,'mimalloc-doc.h']]], - ['mi_5fthread_5fstats_5fprint',['mi_thread_stats_print',['../group__extended.html#ga489670a15d1a257ab4639e645ee4612a',1,'mimalloc-doc.h']]], + ['mi_5fthread_5fstats_5fprint_5fout',['mi_thread_stats_print_out',['../group__extended.html#gab1dac8476c46cb9eecab767eb40c1525',1,'mimalloc-doc.h']]], ['mi_5fusable_5fsize',['mi_usable_size',['../group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee',1,'mimalloc-doc.h']]], ['mi_5fvalloc',['mi_valloc',['../group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b',1,'mimalloc-doc.h']]], ['mi_5fzalloc',['mi_zalloc',['../group__malloc.html#gafdd9d8bb2986e668ba9884f28af38000',1,'mimalloc-doc.h']]], diff --git a/docs/search/all_c.html b/docs/search/all_c.html new file mode 100644 index 00000000..3de15867 --- /dev/null +++ b/docs/search/all_c.html @@ -0,0 +1,30 @@ + + + + + + + + + +
+
Loading...
+
+ +
Searching...
+
No Matches
+ +
+ + diff --git a/docs/search/all_c.js b/docs/search/all_c.js new file mode 100644 index 00000000..2b9b4cea --- /dev/null +++ b/docs/search/all_c.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['zero_20initialized_20re_2dallocation',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]] +]; diff --git a/docs/search/all_d.html b/docs/search/all_d.html new file mode 100644 index 00000000..a2d5bd7e --- /dev/null +++ b/docs/search/all_d.html @@ -0,0 +1,30 @@ + + + + + + + + + +
+
Loading...
+
+ +
Searching...
+
No Matches
+ +
+ + diff --git a/docs/search/all_d.js b/docs/search/all_d.js new file mode 100644 index 00000000..2b9b4cea --- /dev/null +++ b/docs/search/all_d.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['zero_20initialized_20re_2dallocation',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]] +]; diff --git a/docs/search/enumvalues_1.js b/docs/search/enumvalues_1.js index 3ed91631..3b712708 100644 --- a/docs/search/enumvalues_1.js +++ b/docs/search/enumvalues_1.js @@ -1,6 +1,5 @@ var searchData= [ - ['mi_5foption_5fcache_5freset',['mi_option_cache_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac2157a0cb79cd996c1db7d9f6a090c07',1,'mimalloc-doc.h']]], ['mi_5foption_5feager_5fcommit',['mi_option_eager_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b',1,'mimalloc-doc.h']]], ['mi_5foption_5feager_5fcommit_5fdelay',['mi_option_eager_commit_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c',1,'mimalloc-doc.h']]], ['mi_5foption_5feager_5fregion_5fcommit',['mi_option_eager_region_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca32ce97ece29f69e82579679cf8a307ad',1,'mimalloc-doc.h']]], @@ -9,9 +8,11 @@ var searchData= ['mi_5foption_5fpage_5freset',['mi_option_page_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cada854dd272c66342f18a93ee254a2968',1,'mimalloc-doc.h']]], ['mi_5foption_5freserve_5fhuge_5fos_5fpages',['mi_option_reserve_huge_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caca7ed041be3b0b9d0b82432c7bf41af2',1,'mimalloc-doc.h']]], ['mi_5foption_5freset_5fdecommits',['mi_option_reset_decommits',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac81ee965b130fa81238913a3c239d536',1,'mimalloc-doc.h']]], + ['mi_5foption_5freset_5fdelay',['mi_option_reset_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca154fe170131d5212cff57e22b99523c5',1,'mimalloc-doc.h']]], ['mi_5foption_5fsegment_5fcache',['mi_option_segment_cache',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca2ecbe7ef32f5c84de3739aa4f0b805a1',1,'mimalloc-doc.h']]], ['mi_5foption_5fsegment_5freset',['mi_option_segment_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d',1,'mimalloc-doc.h']]], ['mi_5foption_5fshow_5ferrors',['mi_option_show_errors',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4822e5c00732c5984b32a032837f0',1,'mimalloc-doc.h']]], ['mi_5foption_5fshow_5fstats',['mi_option_show_stats',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0957ef73b2550764b4840edf48422fda',1,'mimalloc-doc.h']]], + ['mi_5foption_5fuse_5fnuma_5fnodes',['mi_option_use_numa_nodes',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74',1,'mimalloc-doc.h']]], ['mi_5foption_5fverbose',['mi_option_verbose',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7c8b7bf5281c581bad64f5daa6442777',1,'mimalloc-doc.h']]] ]; diff --git a/docs/search/functions_0.js b/docs/search/functions_0.js index c5eeb540..d1d209a1 100644 --- a/docs/search/functions_0.js +++ b/docs/search/functions_0.js @@ -81,20 +81,21 @@ var searchData= ['mi_5frecalloc',['mi_recalloc',['../group__malloc.html#ga23a0fbb452b5dce8e31fab1a1958cacc',1,'mimalloc-doc.h']]], ['mi_5frecalloc_5faligned',['mi_recalloc_aligned',['../group__zeroinit.html#ga3e7e5c291acf1c7fd7ffd9914a9f945f',1,'mimalloc-doc.h']]], ['mi_5frecalloc_5faligned_5fat',['mi_recalloc_aligned_at',['../group__zeroinit.html#ga4ff5e92ad73585418a072c9d059e5cf9',1,'mimalloc-doc.h']]], - ['mi_5fregister_5fdeferred_5ffree',['mi_register_deferred_free',['../group__extended.html#ga24dc9cc6fca8daa2aa30aa8025467ce2',1,'mimalloc-doc.h']]], - ['mi_5fregister_5foutput',['mi_register_output',['../group__extended.html#ga84a0c8b401e42eb5b1bce156852f44c5',1,'mimalloc-doc.h']]], - ['mi_5freserve_5fhuge_5fos_5fpages',['mi_reserve_huge_os_pages',['../group__extended.html#ga2664f36a2dd557741c429cb799f04641',1,'mimalloc-doc.h']]], + ['mi_5fregister_5fdeferred_5ffree',['mi_register_deferred_free',['../group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece',1,'mimalloc-doc.h']]], + ['mi_5fregister_5foutput',['mi_register_output',['../group__extended.html#gae5b17ff027cd2150b43a33040250cf3f',1,'mimalloc-doc.h']]], + ['mi_5freserve_5fhuge_5fos_5fpages_5fat',['mi_reserve_huge_os_pages_at',['../group__extended.html#ga7795a13d20087447281858d2c771cca1',1,'mimalloc-doc.h']]], + ['mi_5freserve_5fhuge_5fos_5fpages_5finterleave',['mi_reserve_huge_os_pages_interleave',['../group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50',1,'mimalloc-doc.h']]], ['mi_5frezalloc',['mi_rezalloc',['../group__zeroinit.html#ga8c292e142110229a2980b37ab036dbc6',1,'mimalloc-doc.h']]], ['mi_5frezalloc_5faligned',['mi_rezalloc_aligned',['../group__zeroinit.html#gacd71a7bce96aab38ae6de17af2eb2cf0',1,'mimalloc-doc.h']]], ['mi_5frezalloc_5faligned_5fat',['mi_rezalloc_aligned_at',['../group__zeroinit.html#gae8b358c417e61d5307da002702b0a8e1',1,'mimalloc-doc.h']]], ['mi_5fstats_5fmerge',['mi_stats_merge',['../group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1',1,'mimalloc-doc.h']]], - ['mi_5fstats_5fprint',['mi_stats_print',['../group__extended.html#ga8ca07ccff283956d71f48272f4fd5c01',1,'mimalloc-doc.h']]], + ['mi_5fstats_5fprint',['mi_stats_print',['../group__extended.html#ga2d126e5c62d3badc35445e5d84166df2',1,'mi_stats_print(void *out): mimalloc-doc.h'],['../group__extended.html#ga256cc6f13a142deabbadd954a217e228',1,'mi_stats_print(mi_output_fun *out, void *arg): mimalloc-doc.h']]], ['mi_5fstats_5freset',['mi_stats_reset',['../group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99',1,'mimalloc-doc.h']]], ['mi_5fstrdup',['mi_strdup',['../group__malloc.html#gac7cffe13f1f458ed16789488bf92b9b2',1,'mimalloc-doc.h']]], ['mi_5fstrndup',['mi_strndup',['../group__malloc.html#gaaabf971c2571891433477e2d21a35266',1,'mimalloc-doc.h']]], ['mi_5fthread_5fdone',['mi_thread_done',['../group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf',1,'mimalloc-doc.h']]], ['mi_5fthread_5finit',['mi_thread_init',['../group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17',1,'mimalloc-doc.h']]], - ['mi_5fthread_5fstats_5fprint',['mi_thread_stats_print',['../group__extended.html#ga489670a15d1a257ab4639e645ee4612a',1,'mimalloc-doc.h']]], + ['mi_5fthread_5fstats_5fprint_5fout',['mi_thread_stats_print_out',['../group__extended.html#gab1dac8476c46cb9eecab767eb40c1525',1,'mimalloc-doc.h']]], ['mi_5fusable_5fsize',['mi_usable_size',['../group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee',1,'mimalloc-doc.h']]], ['mi_5fvalloc',['mi_valloc',['../group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b',1,'mimalloc-doc.h']]], ['mi_5fzalloc',['mi_zalloc',['../group__malloc.html#gafdd9d8bb2986e668ba9884f28af38000',1,'mimalloc-doc.h']]], diff --git a/docs/search/functions_1.html b/docs/search/functions_1.html new file mode 100644 index 00000000..bfcf880b --- /dev/null +++ b/docs/search/functions_1.html @@ -0,0 +1,30 @@ + + + + + + + + + +
+
Loading...
+
+ +
Searching...
+
No Matches
+ +
+ + diff --git a/docs/search/functions_1.js b/docs/search/functions_1.js new file mode 100644 index 00000000..06dbb19b --- /dev/null +++ b/docs/search/functions_1.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['void',['void',['../group__extended.html#gadc49452cc1634aa03ac83ffe9b97a19c',1,'mimalloc-doc.h']]] +]; diff --git a/docs/search/groups_7.html b/docs/search/groups_7.html new file mode 100644 index 00000000..6a24e7cf --- /dev/null +++ b/docs/search/groups_7.html @@ -0,0 +1,30 @@ + + + + + + + + + +
+
Loading...
+
+ +
Searching...
+
No Matches
+ +
+ + diff --git a/docs/search/groups_7.js b/docs/search/groups_7.js new file mode 100644 index 00000000..2b9b4cea --- /dev/null +++ b/docs/search/groups_7.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['zero_20initialized_20re_2dallocation',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]] +]; diff --git a/docs/search/pages_4.html b/docs/search/pages_4.html new file mode 100644 index 00000000..021d277a --- /dev/null +++ b/docs/search/pages_4.html @@ -0,0 +1,30 @@ + + + + + + + + + +
+
Loading...
+
+ +
Searching...
+
No Matches
+ +
+ + diff --git a/docs/search/pages_4.js b/docs/search/pages_4.js new file mode 100644 index 00000000..b47682a4 --- /dev/null +++ b/docs/search/pages_4.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['using_20the_20library',['Using the library',['../using.html',1,'']]] +]; diff --git a/docs/search/typedefs_0.js b/docs/search/typedefs_0.js index c6f0f7ec..17816828 100644 --- a/docs/search/typedefs_0.js +++ b/docs/search/typedefs_0.js @@ -1,7 +1,7 @@ var searchData= [ ['mi_5fblock_5fvisit_5ffun',['mi_block_visit_fun',['../group__analysis.html#gadfa01e2900f0e5d515ad5506b26f6d65',1,'mimalloc-doc.h']]], - ['mi_5fdeferred_5ffree_5ffun',['mi_deferred_free_fun',['../group__extended.html#ga22213691c3ce5ab4d91b24aff1023529',1,'mimalloc-doc.h']]], + ['mi_5fdeferred_5ffree_5ffun',['mi_deferred_free_fun',['../group__extended.html#ga299dae78d25ce112e384a98b7309c5be',1,'mimalloc-doc.h']]], ['mi_5fheap_5ft',['mi_heap_t',['../group__heap.html#ga34a47cde5a5b38c29f1aa3c5e76943c2',1,'mimalloc-doc.h']]], - ['mi_5foutput_5ffun',['mi_output_fun',['../group__extended.html#ga2bed6d40b74591a67f81daea4b4a246f',1,'mimalloc-doc.h']]] + ['mi_5foutput_5ffun',['mi_output_fun',['../group__extended.html#gad823d23444a4b77a40f66bf075a98a0c',1,'mimalloc-doc.h']]] ]; diff --git a/docs/search/typedefs_1.html b/docs/search/typedefs_1.html new file mode 100644 index 00000000..c8a02685 --- /dev/null +++ b/docs/search/typedefs_1.html @@ -0,0 +1,30 @@ + + + + + + + + + +
+
Loading...
+
+ +
Searching...
+
No Matches
+ +
+ + diff --git a/docs/search/typedefs_1.js b/docs/search/typedefs_1.js new file mode 100644 index 00000000..ecccb16a --- /dev/null +++ b/docs/search/typedefs_1.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['heartbeat',['heartbeat',['../group__extended.html#ga411f6e94394a2400aa460c796beff8d8',1,'mimalloc-doc.h']]] +]; diff --git a/docs/search/typedefs_2.html b/docs/search/typedefs_2.html new file mode 100644 index 00000000..86a91955 --- /dev/null +++ b/docs/search/typedefs_2.html @@ -0,0 +1,30 @@ + + + + + + + + + +
+
Loading...
+
+ +
Searching...
+
No Matches
+ +
+ + diff --git a/docs/search/typedefs_2.js b/docs/search/typedefs_2.js new file mode 100644 index 00000000..2af06079 --- /dev/null +++ b/docs/search/typedefs_2.js @@ -0,0 +1,5 @@ +var searchData= +[ + ['mi_5fblock_5fvisit_5ffun',['mi_block_visit_fun',['../group__analysis.html#gadfa01e2900f0e5d515ad5506b26f6d65',1,'mimalloc-doc.h']]], + ['mi_5fheap_5ft',['mi_heap_t',['../group__heap.html#ga34a47cde5a5b38c29f1aa3c5e76943c2',1,'mimalloc-doc.h']]] +]; diff --git a/docs/using.html b/docs/using.html index 9b7305b0..eae37a5e 100644 --- a/docs/using.html +++ b/docs/using.html @@ -37,7 +37,7 @@ Logo
mi-malloc -  1.0 +  1.4
@@ -102,7 +102,11 @@ $(document).ready(function(){initNavTree('using.html','');});
Using the library
-

The preferred usage is including <mimalloc.h>, linking with the shared- or static library, and using the mi_malloc API exclusively for allocation. For example,

gcc -o myprogram -lmimalloc myfile.c

mimalloc uses only safe OS calls (mmap and VirtualAlloc) and can co-exist with other allocators linked to the same program. If you use cmake, you can simply use:

find_package(mimalloc 1.0 REQUIRED)

in your CMakeLists.txt to find a locally installed mimalloc. Then use either:

target_link_libraries(myapp PUBLIC mimalloc)

to link with the shared (dynamic) library, or:

target_link_libraries(myapp PUBLIC mimalloc-static)

to link with the static library. See test\CMakeLists.txt for an example.

+

Build

+

The preferred usage is including <mimalloc.h>, linking with the shared- or static library, and using the mi_malloc API exclusively for allocation. For example,

gcc -o myprogram -lmimalloc myfile.c

mimalloc uses only safe OS calls (mmap and VirtualAlloc) and can co-exist with other allocators linked to the same program. If you use cmake, you can simply use:

find_package(mimalloc 1.0 REQUIRED)

in your CMakeLists.txt to find a locally installed mimalloc. Then use either:

target_link_libraries(myapp PUBLIC mimalloc)

to link with the shared (dynamic) library, or:

target_link_libraries(myapp PUBLIC mimalloc-static)

to link with the static library. See test\CMakeLists.txt for an example.

+

C++

+

For best performance in C++ programs, it is also recommended to override the global new and delete operators. For convience, mimalloc provides mimalloc-new-delete.h which does this for you – just include it in a single(!) source file in your project.

+

In C++, mimalloc also provides the mi_stl_allocator struct which implements the std::allocator interface. For example:

std::vector<some_struct, mi_stl_allocator<some_struct>> vec;
vec.push_back(some_struct());

Statistics

You can pass environment variables to print verbose messages (MIMALLOC_VERBOSE=1) and statistics (MIMALLOC_SHOW_STATS=1) (in the debug version):

> env MIMALLOC_SHOW_STATS=1 ./cfrac 175451865205073170563711388363
175451865205073170563711388363 = 374456281610909315237213 * 468551
heap stats: peak total freed unit
normal 2: 16.4 kb 17.5 mb 17.5 mb 16 b ok
normal 3: 16.3 kb 15.2 mb 15.2 mb 24 b ok
normal 4: 64 b 4.6 kb 4.6 kb 32 b ok
normal 5: 80 b 118.4 kb 118.4 kb 40 b ok
normal 6: 48 b 48 b 48 b 48 b ok
normal 17: 960 b 960 b 960 b 320 b ok
heap stats: peak total freed unit
normal: 33.9 kb 32.8 mb 32.8 mb 1 b ok
huge: 0 b 0 b 0 b 1 b ok
total: 33.9 kb 32.8 mb 32.8 mb 1 b ok
malloc requested: 32.8 mb
committed: 58.2 kb 58.2 kb 58.2 kb 1 b ok
reserved: 2.0 mb 2.0 mb 2.0 mb 1 b ok
reset: 0 b 0 b 0 b 1 b ok
segments: 1 1 1
-abandoned: 0
pages: 6 6 6
-abandoned: 0
mmaps: 3
mmap fast: 0
mmap slow: 1
threads: 0
elapsed: 2.022s
process: user: 1.781s, system: 0.016s, faults: 756, reclaims: 0, rss: 2.7 mb

The above model of using the mi_ prefixed API is not always possible though in existing programs that already use the standard malloc interface, and another option is to override the standard malloc interface completely and redirect all calls to the mimalloc library instead.

See Overriding Malloc for more info.

From 9453d8b4683ccc347458666bf3a10de7fa8c2638 Mon Sep 17 00:00:00 2001 From: daan Date: Fri, 17 Jan 2020 15:39:06 -0800 Subject: [PATCH 32/46] update documentation --- doc/doxyfile | 4 ++-- doc/mimalloc-doc.h | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/doc/doxyfile b/doc/doxyfile index 11d71667..91adbeb8 100644 --- a/doc/doxyfile +++ b/doc/doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = mi-malloc # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 1.0 +PROJECT_NUMBER = 1.4 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a @@ -1235,7 +1235,7 @@ HTML_EXTRA_STYLESHEET = mimalloc-doxygen.css # files will be copied as-is; there are no commands or markers available. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_EXTRA_FILES = +HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to diff --git a/doc/mimalloc-doc.h b/doc/mimalloc-doc.h index 71cc1589..ea526b12 100644 --- a/doc/mimalloc-doc.h +++ b/doc/mimalloc-doc.h @@ -784,6 +784,9 @@ void mi_free_aligned(void* p, size_t alignment); /// raise `std::bad_alloc` exception on failure. void* mi_new(std::size_t n) noexcept(false); +/// raise `std::bad_alloc` exception on failure or overflow. +void* mi_new_n(size_t count, size_t size) noexcept(false); + /// raise `std::bad_alloc` exception on failure. void* mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false); From f4ee1760b8f58bdb4967e47f34c2495387c2cff2 Mon Sep 17 00:00:00 2001 From: daan Date: Fri, 17 Jan 2020 15:39:41 -0800 Subject: [PATCH 33/46] Suppress C source compiled as C++ warning on clang --- CMakeLists.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a894de9b..8f05c883 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -96,7 +96,10 @@ endif() if(MI_USE_CXX MATCHES "ON") message(STATUS "Use the C++ compiler to compile (MI_USE_CXX=ON)") set_source_files_properties(${mi_sources} PROPERTIES LANGUAGE CXX ) - set_source_files_properties(src/static.c test/test-api.c PROPERTIES LANGUAGE CXX ) + set_source_files_properties(src/static.c test/test-api.c test/test-stress.c PROPERTIES LANGUAGE CXX ) + if(CMAKE_CXX_COMPILER_ID MATCHES "AppleClang|Clang") + list(APPEND mi_cflags -Wno-deprecated) + endif() endif() # Compiler flags From 6dd636d82db6516f325c1c3b2695e20a024230ce Mon Sep 17 00:00:00 2001 From: daan Date: Fri, 17 Jan 2020 15:41:52 -0800 Subject: [PATCH 34/46] improve STL allocator using mi_new_n and removing unused parameter names; follow up from pr #193 and #188 --- include/mimalloc.h | 31 +++++++++++++++++------------- src/alloc.c | 47 ++++++++++++++++++++++++++++++---------------- 2 files changed, 49 insertions(+), 29 deletions(-) diff --git a/include/mimalloc.h b/include/mimalloc.h index 59f394a7..485978e6 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -326,10 +326,11 @@ mi_decl_export void mi_free_size(void* p, size_t size) mi_attr_noexcept; mi_decl_export void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept; mi_decl_export void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept; -mi_decl_export void* mi_new(size_t n) mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_export void* mi_new_aligned(size_t n, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_export void* mi_new_nothrow(size_t n) mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_export void* mi_new_aligned_nothrow(size_t n, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_export void* mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_export void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1,2); +mi_decl_export void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_export void* mi_new_nothrow(size_t size) mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_export void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1); #ifdef __cplusplus } @@ -347,21 +348,25 @@ mi_decl_export void* mi_new_aligned_nothrow(size_t n, size_t alignment) mi_attr_ template struct mi_stl_allocator { typedef T value_type; -#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 + #if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 using propagate_on_container_copy_assignment = std::true_type; using propagate_on_container_move_assignment = std::true_type; using propagate_on_container_swap = std::true_type; using is_always_equal = std::true_type; -#endif - mi_stl_allocator() mi_attr_noexcept {} - mi_stl_allocator(const mi_stl_allocator& other) mi_attr_noexcept { (void)other; } - template mi_stl_allocator(const mi_stl_allocator& other) mi_attr_noexcept { (void)other; } - T* allocate(size_t n, const void* hint = 0) { (void)hint; return (T*)mi_mallocn(n, sizeof(T)); } - void deallocate(T* p, size_t n) { mi_free_size(p,n); } + #endif + mi_stl_allocator() mi_attr_noexcept { } + mi_stl_allocator(const mi_stl_allocator& ) mi_attr_noexcept { } + template mi_stl_allocator(const mi_stl_allocator& ) mi_attr_noexcept { } + void deallocate(T* p, size_t size) { mi_free_size(p, size); } + #if (__cplusplus >= 201703L) // C++17 + T* allocate(size_t count) { return (T*)mi_new_n(count, sizeof(T)); } + #else + T* allocate(size_t count, const void* hint = 0) { (void)hint; return (T*)mi_new_n(count, sizeof(T)); } + #endif }; -template bool operator==(const mi_stl_allocator& lhs, const mi_stl_allocator& rhs) mi_attr_noexcept { (void)lhs; (void)rhs; return true; } -template bool operator!=(const mi_stl_allocator& lhs, const mi_stl_allocator& rhs) mi_attr_noexcept { (void)lhs; (void)rhs; return false; } +template bool operator==(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return true; } +template bool operator!=(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return false; } #endif // __cplusplus #endif diff --git a/src/alloc.c b/src/alloc.c index d66c629b..37d43d9f 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -678,36 +678,51 @@ static bool mi_try_new_handler(bool nothrow) { } #endif -static mi_decl_noinline void* mi_try_new(size_t n, bool nothrow ) { +static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) { void* p = NULL; while(p == NULL && mi_try_new_handler(nothrow)) { - p = mi_malloc(n); + p = mi_malloc(size); } return p; } -void* mi_new(size_t n) { - void* p = mi_malloc(n); - if (mi_unlikely(p == NULL)) return mi_try_new(n,false); +void* mi_new(size_t size) { + void* p = mi_malloc(size); + if (mi_unlikely(p == NULL)) return mi_try_new(size,false); return p; } -void* mi_new_aligned(size_t n, size_t alignment) { +void* mi_new_nothrow(size_t size) { + void* p = mi_malloc(size); + if (mi_unlikely(p == NULL)) return mi_try_new(size, true); + return p; +} + +void* mi_new_aligned(size_t size, size_t alignment) { void* p; - do { p = mi_malloc_aligned(n, alignment); } + do { + p = mi_malloc_aligned(size, alignment); + } while(p == NULL && mi_try_new_handler(false)); return p; } -void* mi_new_nothrow(size_t n) { - void* p = mi_malloc(n); - if (mi_unlikely(p == NULL)) return mi_try_new(n,true); +void* mi_new_aligned_nothrow(size_t size, size_t alignment) { + void* p; + do { + p = mi_malloc_aligned(size, alignment); + } + while(p == NULL && mi_try_new_handler(true)); return p; } -void* mi_new_aligned_nothrow(size_t n, size_t alignment) { - void* p; - do { p = mi_malloc_aligned(n, alignment); } - while (p == NULL && mi_try_new_handler(true)); - return p; -} +void* mi_new_n(size_t count, size_t size) { + size_t total; + if (mi_unlikely(mi_mul_overflow(count, size, &total))) { + mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc + return NULL; + } + else { + return mi_new(total); + } +} \ No newline at end of file From dbe721de393cbfce7a699cc6f1b5cf5955a85f7a Mon Sep 17 00:00:00 2001 From: daan Date: Fri, 17 Jan 2020 15:45:12 -0800 Subject: [PATCH 35/46] dont compile test-stress.c as C++ code (or we get atomic compilation errors) --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8f05c883..27729584 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -96,7 +96,7 @@ endif() if(MI_USE_CXX MATCHES "ON") message(STATUS "Use the C++ compiler to compile (MI_USE_CXX=ON)") set_source_files_properties(${mi_sources} PROPERTIES LANGUAGE CXX ) - set_source_files_properties(src/static.c test/test-api.c test/test-stress.c PROPERTIES LANGUAGE CXX ) + set_source_files_properties(src/static.c test/test-api.c PROPERTIES LANGUAGE CXX ) if(CMAKE_CXX_COMPILER_ID MATCHES "AppleClang|Clang") list(APPEND mi_cflags -Wno-deprecated) endif() From 3e982a3813f191b32fee60b06fba758a98f3a633 Mon Sep 17 00:00:00 2001 From: daan Date: Fri, 17 Jan 2020 15:58:43 -0800 Subject: [PATCH 36/46] fix STL deallocate passing count (instead of size) to mi_free_size --- include/mimalloc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/mimalloc.h b/include/mimalloc.h index 485978e6..67ff1a35 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -357,7 +357,7 @@ template struct mi_stl_allocator { mi_stl_allocator() mi_attr_noexcept { } mi_stl_allocator(const mi_stl_allocator& ) mi_attr_noexcept { } template mi_stl_allocator(const mi_stl_allocator& ) mi_attr_noexcept { } - void deallocate(T* p, size_t size) { mi_free_size(p, size); } + void deallocate(T* p, size_t /* count */) { mi_free(p); } #if (__cplusplus >= 201703L) // C++17 T* allocate(size_t count) { return (T*)mi_new_n(count, sizeof(T)); } #else From dc5838896837cc4bd6bc1583f390fa51e389ae48 Mon Sep 17 00:00:00 2001 From: daan Date: Fri, 17 Jan 2020 19:59:55 -0800 Subject: [PATCH 37/46] Add ability to register custom error function called on various error conditions; including ENOMEM --- doc/mimalloc-doc.h | 24 +++++++++ docs/group__extended.html | 74 ++++++++++++++++++++++++++ docs/group__extended.js | 2 + docs/group__posix.html | 41 +++++++++++++++ docs/group__posix.js | 1 + docs/mimalloc-doc_8h_source.html | 55 ++++++++++---------- docs/navtreeindex0.js | 51 +++++++++--------- docs/search/all_6.js | 3 ++ docs/search/functions_0.js | 2 + docs/search/typedefs_0.js | 1 + include/mimalloc-internal.h | 89 +++++++++++++++++++++----------- include/mimalloc.h | 15 ++++-- src/alloc-aligned.c | 6 +-- src/alloc.c | 27 ++++------ src/arena.c | 6 +-- src/init.c | 2 +- src/options.c | 44 ++++++++++++---- src/os.c | 10 ++-- src/page.c | 10 ++-- test/test-api.c | 6 ++- 20 files changed, 342 insertions(+), 127 deletions(-) diff --git a/doc/mimalloc-doc.h b/doc/mimalloc-doc.h index ea526b12..ca744e4c 100644 --- a/doc/mimalloc-doc.h +++ b/doc/mimalloc-doc.h @@ -373,6 +373,30 @@ typedef void (mi_output_fun)(const char* msg, void* arg); /// like verbose or warning messages. void mi_register_output(mi_output_fun* out, void* arg); +/// Type of error callback functions. +/// @param err Error code (see mi_register_error() for a complete list). +/// @param arg Argument that was passed at registration to hold extra state. +/// +/// @see mi_register_error() +typedef void (mi_error_fun)(int err, void* arg); + +/// Register an error callback function. +/// @param errfun The error function that is called on an error (use \a NULL for default) +/// @param arg Extra argument that will be passed on to the error function. +/// +/// The \a errfun function is called on an error in mimalloc after emitting +/// an error message (through the output function). It as always legal to just +/// return from the \a errfun function in which case allocation functions generally +/// return \a NULL or ignore the condition. The default function only calls abort() +/// when compiled in secure mode with an \a EFAULT error. The possible error +/// codes are: +/// * \a EAGAIN: Double free was detected (only in debug and secure mode). +/// * \a EFAULT: Corrupted free list or meta-data was detected (only in debug and secure mode). +/// * \a ENOMEM: Not enough memory available to satisfy the request. +/// * \a EOVERFLOW: Too large a request, for example in mi_calloc(), the \a count and \a size parameters are too large. +/// * \a EINVAL: Trying to free or re-allocate an invalid pointer. +void mi_register_error(mi_error_fun* errfun, void* arg); + /// Is a pointer part of our heap? /// @param p The pointer to check. /// @returns \a true if this is a pointer into our heap. diff --git a/docs/group__extended.html b/docs/group__extended.html index 85ea3624..9e2a2efc 100644 --- a/docs/group__extended.html +++ b/docs/group__extended.html @@ -124,6 +124,9 @@ Typedefs typedef void() mi_output_fun(const char *msg, void *arg)  Type of output functions. More...
  +typedef void() mi_error_fun(int err, void *arg) + Type of error callback functions. More...
+  @@ -169,6 +172,9 @@ Functions + + + @@ -225,6 +231,30 @@ Functions
See also
mi_register_deferred_free
+ + + +

◆ mi_error_fun

+ +
+
+

Functions

void mi_register_output (mi_output_fun *out, void *arg)
 Register an output function. More...
 
void mi_register_error (mi_error_fun *errfun, void *arg)
 Register an error callback function. More...
 
bool mi_is_in_heap_region (const void *p)
 Is a pointer part of our heap? More...
 
+ + + +
typedef void() mi_error_fun(int err, void *arg)
+
+ +

Type of error callback functions.

+
Parameters
+ + + +
errError code (see mi_register_error() for a complete list).
argArgument that was passed at registration to hold extra state.
+
+
+
See also
mi_register_error()
+
@@ -419,6 +449,50 @@ Functions

Some runtime systems use deferred free-ing, for example when using reference counting to limit the worst case free time. Such systems can register (re-entrant) deferred free function to free more memory on demand. When the force parameter is true all possible memory should be freed. The per-thread heartbeat parameter is monotonically increasing and guaranteed to be deterministic if the program allocates deterministically. The deferred_free function is guaranteed to be called deterministically after some number of allocations (regardless of freeing or available free memory). At most one deferred_free function can be active.

+
+
+ +

◆ mi_register_error()

+ +
+
+ + + + + + + + + + + + + + + + + + +
void mi_register_error (mi_error_funerrfun,
void * arg 
)
+
+ +

Register an error callback function.

+
Parameters
+ + + +
errfunThe error function that is called on an error (use NULL for default)
argExtra argument that will be passed on to the error function.
+
+
+

The errfun function is called on an error in mimalloc after emitting an error message (through the output function). It as always legal to just return from the errfun function in which case allocation functions generally return NULL or ignore the condition. The default function only calls abort() when compiled in secure mode with an EFAULT error. The possible error codes are:

    +
  • EAGAIN: Double free was detected (only in debug and secure mode).
  • +
  • EFAULT: Corrupted free list or meta-data was detected (only in debug and secure mode).
  • +
  • ENOMEM: Not enough memory available to satisfy the request.
  • +
  • EOVERFLOW: Too large a request, for example in mi_calloc(), the count and size parameters are too large.
  • +
  • EINVAL: Trying to free or re-allocate an invalid pointer.
  • +
+
diff --git a/docs/group__extended.js b/docs/group__extended.js index 7152b518..ff8891b2 100644 --- a/docs/group__extended.js +++ b/docs/group__extended.js @@ -2,6 +2,7 @@ var group__extended = [ [ "MI_SMALL_SIZE_MAX", "group__extended.html#ga1ea64283508718d9d645c38efc2f4305", null ], [ "mi_deferred_free_fun", "group__extended.html#ga299dae78d25ce112e384a98b7309c5be", null ], + [ "mi_error_fun", "group__extended.html#ga251d369cda3f1c2a955c555486ed90e5", null ], [ "mi_output_fun", "group__extended.html#gad823d23444a4b77a40f66bf075a98a0c", null ], [ "mi_collect", "group__extended.html#ga421430e2226d7d468529cec457396756", null ], [ "mi_good_size", "group__extended.html#gac057927cd06c854b45fe7847e921bd47", null ], @@ -9,6 +10,7 @@ var group__extended = [ "mi_is_redirected", "group__extended.html#gaad25050b19f30cd79397b227e0157a3f", null ], [ "mi_malloc_small", "group__extended.html#ga7136c2e55cb22c98ecf95d08d6debb99", null ], [ "mi_register_deferred_free", "group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece", null ], + [ "mi_register_error", "group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45", null ], [ "mi_register_output", "group__extended.html#gae5b17ff027cd2150b43a33040250cf3f", null ], [ "mi_reserve_huge_os_pages_at", "group__extended.html#ga7795a13d20087447281858d2c771cca1", null ], [ "mi_reserve_huge_os_pages_interleave", "group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50", null ], diff --git a/docs/group__posix.html b/docs/group__posix.html index 65e8ff7e..eaa4a10f 100644 --- a/docs/group__posix.html +++ b/docs/group__posix.html @@ -140,6 +140,9 @@ Functions void * mi_new (std::size_t n) noexcept(false)  raise std::bad_alloc exception on failure. More...
  +void * mi_new_n (size_t count, size_t size) noexcept(false) + raise std::bad_alloc exception on failure or overflow. More...
+  void * mi_new_aligned (std::size_t n, std::align_val_t alignment) noexcept(false)  raise std::bad_alloc exception on failure. More...
  @@ -484,6 +487,44 @@ Functions

return NULL on failure.

+
+
+ +

◆ mi_new_n()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
void* mi_new_n (size_t count,
size_t size 
)
+
+noexcept
+
+ +

raise std::bad_alloc exception on failure or overflow.

+
diff --git a/docs/group__posix.js b/docs/group__posix.js index 5584092b..0f2b895d 100644 --- a/docs/group__posix.js +++ b/docs/group__posix.js @@ -12,6 +12,7 @@ var group__posix = [ "mi_new", "group__posix.html#gaad048a9fce3d02c5909cd05c6ec24545", null ], [ "mi_new_aligned", "group__posix.html#gaef2c2bdb4f70857902d3c8903ac095f3", null ], [ "mi_new_aligned_nothrow", "group__posix.html#gab5e29558926d934c3f1cae8c815f942c", null ], + [ "mi_new_n", "group__posix.html#gae7bc4f56cd57ed3359060ff4f38bda81", null ], [ "mi_new_nothrow", "group__posix.html#gaeaded64eda71ed6b1d569d3e723abc4a", null ], [ "mi_posix_memalign", "group__posix.html#gacff84f226ba9feb2031b8992e5579447", null ], [ "mi_pvalloc", "group__posix.html#gaeb325c39b887d3b90d85d1eb1712fb1e", null ], diff --git a/docs/mimalloc-doc_8h_source.html b/docs/mimalloc-doc_8h_source.html index c240f151..12d0f799 100644 --- a/docs/mimalloc-doc_8h_source.html +++ b/docs/mimalloc-doc_8h_source.html @@ -102,7 +102,7 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
mimalloc-doc.h
-
1 /* ----------------------------------------------------------------------------
2 Copyright (c) 2018, Microsoft Research, Daan Leijen
3 This is free software; you can redistribute it and/or modify it under the
4 terms of the MIT license. A copy of the license can be found in the file
5 "LICENSE" at the root of this distribution.
6 -----------------------------------------------------------------------------*/
7 
8 #error "documentation file only!"
9 
10 
81 
85 
89 void mi_free(void* p);
90 
95 void* mi_malloc(size_t size);
96 
101 void* mi_zalloc(size_t size);
102 
112 void* mi_calloc(size_t count, size_t size);
113 
126 void* mi_realloc(void* p, size_t newsize);
127 
138 void* mi_recalloc(void* p, size_t count, size_t size);
139 
153 void* mi_expand(void* p, size_t newsize);
154 
164 void* mi_mallocn(size_t count, size_t size);
165 
175 void* mi_reallocn(void* p, size_t count, size_t size);
176 
193 void* mi_reallocf(void* p, size_t newsize);
194 
195 
204 char* mi_strdup(const char* s);
205 
215 char* mi_strndup(const char* s, size_t n);
216 
229 char* mi_realpath(const char* fname, char* resolved_name);
230 
232 
233 // ------------------------------------------------------
234 // Extended functionality
235 // ------------------------------------------------------
236 
240 
243 #define MI_SMALL_SIZE_MAX (128*sizeof(void*))
244 
252 void* mi_malloc_small(size_t size);
253 
261 void* mi_zalloc_small(size_t size);
262 
277 size_t mi_usable_size(void* p);
278 
288 size_t mi_good_size(size_t size);
289 
297 void mi_collect(bool force);
298 
303 void mi_stats_print(void* out);
304 
310 void mi_stats_print(mi_output_fun* out, void* arg);
311 
313 void mi_stats_reset(void);
314 
316 void mi_stats_merge(void);
317 
321 void mi_thread_init(void);
322 
327 void mi_thread_done(void);
328 
334 void mi_thread_stats_print_out(mi_output_fun* out, void* arg);
335 
342 typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg);
343 
359 void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg);
360 
366 typedef void (mi_output_fun)(const char* msg, void* arg);
367 
374 void mi_register_output(mi_output_fun* out, void* arg);
375 
380 bool mi_is_in_heap_region(const void* p);
381 
382 
395 int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs);
396 
409 int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs);
410 
411 
416 bool mi_is_redirected();
417 
418 
420 
421 // ------------------------------------------------------
422 // Aligned allocation
423 // ------------------------------------------------------
424 
430 
443 void* mi_malloc_aligned(size_t size, size_t alignment);
444 void* mi_zalloc_aligned(size_t size, size_t alignment);
445 void* mi_calloc_aligned(size_t count, size_t size, size_t alignment);
446 void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment);
447 
458 void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset);
459 void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset);
460 void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset);
461 void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
462 
464 
470 
475 struct mi_heap_s;
476 
481 typedef struct mi_heap_s mi_heap_t;
482 
485 
493 void mi_heap_delete(mi_heap_t* heap);
494 
502 void mi_heap_destroy(mi_heap_t* heap);
503 
508 
512 
519 
521 void mi_heap_collect(mi_heap_t* heap, bool force);
522 
525 void* mi_heap_malloc(mi_heap_t* heap, size_t size);
526 
530 void* mi_heap_malloc_small(mi_heap_t* heap, size_t size);
531 
534 void* mi_heap_zalloc(mi_heap_t* heap, size_t size);
535 
538 void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size);
539 
542 void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size);
543 
546 char* mi_heap_strdup(mi_heap_t* heap, const char* s);
547 
550 char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n);
551 
554 char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name);
555 
556 void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize);
557 void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size);
558 void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize);
559 
560 void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
561 void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
562 void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
563 void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
564 void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment);
565 void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset);
566 void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
567 void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
568 
570 
571 
580 
581 void* mi_rezalloc(void* p, size_t newsize);
582 void* mi_recalloc(void* p, size_t newcount, size_t size) ;
583 
584 void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment);
585 void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
586 void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment);
587 void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
588 
589 void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize);
590 void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size);
591 
592 void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
593 void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
594 void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment);
595 void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
596 
598 
604 
616 #define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
617 
619 #define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
620 
622 #define mi_calloc_tp(tp,count) ((tp*)mi_calloc(count,sizeof(tp)))
623 
625 #define mi_mallocn_tp(tp,count) ((tp*)mi_mallocn(count,sizeof(tp)))
626 
628 #define mi_reallocn_tp(p,tp,count) ((tp*)mi_reallocn(p,count,sizeof(tp)))
629 
631 #define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp)))
632 
634 #define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp)))
635 
637 #define mi_heap_calloc_tp(hp,tp,count) ((tp*)mi_heap_calloc(hp,count,sizeof(tp)))
638 
640 #define mi_heap_mallocn_tp(hp,tp,count) ((tp*)mi_heap_mallocn(hp,count,sizeof(tp)))
641 
643 #define mi_heap_reallocn_tp(hp,p,tp,count) ((tp*)mi_heap_reallocn(p,count,sizeof(tp)))
644 
646 #define mi_heap_recalloc_tp(hp,p,tp,count) ((tp*)mi_heap_recalloc(p,count,sizeof(tp)))
647 
649 
655 
662 bool mi_heap_contains_block(mi_heap_t* heap, const void* p);
663 
672 bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
673 
681 bool mi_check_owned(const void* p);
682 
685 typedef struct mi_heap_area_s {
686  void* blocks;
687  size_t reserved;
688  size_t committed;
689  size_t used;
690  size_t block_size;
692 
700 typedef bool (mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
701 
713 bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
714 
716 
722 
724 typedef enum mi_option_e {
725  // stable options
729  // the following options are experimental
743 } mi_option_t;
744 
745 
746 bool mi_option_enabled(mi_option_t option);
747 void mi_option_enable(mi_option_t option, bool enable);
748 void mi_option_enable_default(mi_option_t option, bool enable);
749 
750 long mi_option_get(mi_option_t option);
751 void mi_option_set(mi_option_t option, long value);
752 void mi_option_set_default(mi_option_t option, long value);
753 
754 
756 
763 
764 void* mi_recalloc(void* p, size_t count, size_t size);
765 size_t mi_malloc_size(const void* p);
766 size_t mi_malloc_usable_size(const void *p);
767 
769 void mi_cfree(void* p);
770 
771 int mi_posix_memalign(void** p, size_t alignment, size_t size);
772 int mi__posix_memalign(void** p, size_t alignment, size_t size);
773 void* mi_memalign(size_t alignment, size_t size);
774 void* mi_valloc(size_t size);
775 
776 void* mi_pvalloc(size_t size);
777 void* mi_aligned_alloc(size_t alignment, size_t size);
778 void* mi_reallocarray(void* p, size_t count, size_t size);
779 
780 void mi_free_size(void* p, size_t size);
781 void mi_free_size_aligned(void* p, size_t size, size_t alignment);
782 void mi_free_aligned(void* p, size_t alignment);
783 
785 void* mi_new(std::size_t n) noexcept(false);
786 
788 void* mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false);
789 
791 void* mi_new_nothrow(size_t n);
792 ``
794 void* mi_new_aligned_nothrow(size_t n, size_t alignment);
795 
797 
void mi_option_enable_default(mi_option_t option, bool enable)
+
1 /* ----------------------------------------------------------------------------
2 Copyright (c) 2018, Microsoft Research, Daan Leijen
3 This is free software; you can redistribute it and/or modify it under the
4 terms of the MIT license. A copy of the license can be found in the file
5 "LICENSE" at the root of this distribution.
6 -----------------------------------------------------------------------------*/
7 
8 #error "documentation file only!"
9 
10 
81 
85 
89 void mi_free(void* p);
90 
95 void* mi_malloc(size_t size);
96 
101 void* mi_zalloc(size_t size);
102 
112 void* mi_calloc(size_t count, size_t size);
113 
126 void* mi_realloc(void* p, size_t newsize);
127 
138 void* mi_recalloc(void* p, size_t count, size_t size);
139 
153 void* mi_expand(void* p, size_t newsize);
154 
164 void* mi_mallocn(size_t count, size_t size);
165 
175 void* mi_reallocn(void* p, size_t count, size_t size);
176 
193 void* mi_reallocf(void* p, size_t newsize);
194 
195 
204 char* mi_strdup(const char* s);
205 
215 char* mi_strndup(const char* s, size_t n);
216 
229 char* mi_realpath(const char* fname, char* resolved_name);
230 
232 
233 // ------------------------------------------------------
234 // Extended functionality
235 // ------------------------------------------------------
236 
240 
243 #define MI_SMALL_SIZE_MAX (128*sizeof(void*))
244 
252 void* mi_malloc_small(size_t size);
253 
261 void* mi_zalloc_small(size_t size);
262 
277 size_t mi_usable_size(void* p);
278 
288 size_t mi_good_size(size_t size);
289 
297 void mi_collect(bool force);
298 
303 void mi_stats_print(void* out);
304 
310 void mi_stats_print(mi_output_fun* out, void* arg);
311 
313 void mi_stats_reset(void);
314 
316 void mi_stats_merge(void);
317 
321 void mi_thread_init(void);
322 
327 void mi_thread_done(void);
328 
334 void mi_thread_stats_print_out(mi_output_fun* out, void* arg);
335 
342 typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg);
343 
359 void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg);
360 
366 typedef void (mi_output_fun)(const char* msg, void* arg);
367 
374 void mi_register_output(mi_output_fun* out, void* arg);
375 
381 typedef void (mi_error_fun)(int err, void* arg);
382 
398 void mi_register_error(mi_error_fun* errfun, void* arg);
399 
404 bool mi_is_in_heap_region(const void* p);
405 
406 
419 int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs);
420 
433 int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs);
434 
435 
440 bool mi_is_redirected();
441 
442 
444 
445 // ------------------------------------------------------
446 // Aligned allocation
447 // ------------------------------------------------------
448 
454 
467 void* mi_malloc_aligned(size_t size, size_t alignment);
468 void* mi_zalloc_aligned(size_t size, size_t alignment);
469 void* mi_calloc_aligned(size_t count, size_t size, size_t alignment);
470 void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment);
471 
482 void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset);
483 void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset);
484 void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset);
485 void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
486 
488 
494 
499 struct mi_heap_s;
500 
505 typedef struct mi_heap_s mi_heap_t;
506 
509 
517 void mi_heap_delete(mi_heap_t* heap);
518 
526 void mi_heap_destroy(mi_heap_t* heap);
527 
532 
536 
543 
545 void mi_heap_collect(mi_heap_t* heap, bool force);
546 
549 void* mi_heap_malloc(mi_heap_t* heap, size_t size);
550 
554 void* mi_heap_malloc_small(mi_heap_t* heap, size_t size);
555 
558 void* mi_heap_zalloc(mi_heap_t* heap, size_t size);
559 
562 void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size);
563 
566 void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size);
567 
570 char* mi_heap_strdup(mi_heap_t* heap, const char* s);
571 
574 char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n);
575 
578 char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name);
579 
580 void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize);
581 void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size);
582 void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize);
583 
584 void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
585 void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
586 void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
587 void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
588 void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment);
589 void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset);
590 void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
591 void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
592 
594 
595 
604 
605 void* mi_rezalloc(void* p, size_t newsize);
606 void* mi_recalloc(void* p, size_t newcount, size_t size) ;
607 
608 void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment);
609 void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
610 void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment);
611 void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
612 
613 void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize);
614 void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size);
615 
616 void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
617 void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
618 void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment);
619 void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
620 
622 
628 
640 #define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
641 
643 #define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
644 
646 #define mi_calloc_tp(tp,count) ((tp*)mi_calloc(count,sizeof(tp)))
647 
649 #define mi_mallocn_tp(tp,count) ((tp*)mi_mallocn(count,sizeof(tp)))
650 
652 #define mi_reallocn_tp(p,tp,count) ((tp*)mi_reallocn(p,count,sizeof(tp)))
653 
655 #define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp)))
656 
658 #define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp)))
659 
661 #define mi_heap_calloc_tp(hp,tp,count) ((tp*)mi_heap_calloc(hp,count,sizeof(tp)))
662 
664 #define mi_heap_mallocn_tp(hp,tp,count) ((tp*)mi_heap_mallocn(hp,count,sizeof(tp)))
665 
667 #define mi_heap_reallocn_tp(hp,p,tp,count) ((tp*)mi_heap_reallocn(p,count,sizeof(tp)))
668 
670 #define mi_heap_recalloc_tp(hp,p,tp,count) ((tp*)mi_heap_recalloc(p,count,sizeof(tp)))
671 
673 
679 
686 bool mi_heap_contains_block(mi_heap_t* heap, const void* p);
687 
696 bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
697 
705 bool mi_check_owned(const void* p);
706 
709 typedef struct mi_heap_area_s {
710  void* blocks;
711  size_t reserved;
712  size_t committed;
713  size_t used;
714  size_t block_size;
716 
724 typedef bool (mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
725 
737 bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
738 
740 
746 
748 typedef enum mi_option_e {
749  // stable options
753  // the following options are experimental
767 } mi_option_t;
768 
769 
770 bool mi_option_enabled(mi_option_t option);
771 void mi_option_enable(mi_option_t option, bool enable);
772 void mi_option_enable_default(mi_option_t option, bool enable);
773 
774 long mi_option_get(mi_option_t option);
775 void mi_option_set(mi_option_t option, long value);
776 void mi_option_set_default(mi_option_t option, long value);
777 
778 
780 
787 
788 void* mi_recalloc(void* p, size_t count, size_t size);
789 size_t mi_malloc_size(const void* p);
790 size_t mi_malloc_usable_size(const void *p);
791 
793 void mi_cfree(void* p);
794 
795 int mi_posix_memalign(void** p, size_t alignment, size_t size);
796 int mi__posix_memalign(void** p, size_t alignment, size_t size);
797 void* mi_memalign(size_t alignment, size_t size);
798 void* mi_valloc(size_t size);
799 
800 void* mi_pvalloc(size_t size);
801 void* mi_aligned_alloc(size_t alignment, size_t size);
802 void* mi_reallocarray(void* p, size_t count, size_t size);
803 
804 void mi_free_size(void* p, size_t size);
805 void mi_free_size_aligned(void* p, size_t size, size_t alignment);
806 void mi_free_aligned(void* p, size_t alignment);
807 
809 void* mi_new(std::size_t n) noexcept(false);
810 
812 void* mi_new_n(size_t count, size_t size) noexcept(false);
813 
815 void* mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false);
816 
818 void* mi_new_nothrow(size_t n);
819 ``
821 void* mi_new_aligned_nothrow(size_t n, size_t alignment);
822 
824 
void mi_option_enable_default(mi_option_t option, bool enable)
size_t mi_usable_size(void *p)
Return the available bytes in a memory block.
void * mi_reallocn(void *p, size_t count, size_t size)
Re-allocate memory to count elements of size bytes.
void * mi_malloc_aligned(size_t size, size_t alignment)
Allocate size bytes aligned by alignment.
@@ -116,16 +116,18 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
void mi_stats_merge(void)
Merge thread local statistics with the main statistics and reset.
void mi_option_set_default(mi_option_t option, long value)
void * mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false)
raise std::bad_alloc exception on failure.
+
void() mi_error_fun(int err, void *arg)
Type of error callback functions.
Definition: mimalloc-doc.h:381
void * mi_rezalloc(void *p, size_t newsize)
-
Eagerly commit segments (4MiB) (enabled by default).
Definition: mimalloc-doc.h:730
+
Eagerly commit segments (4MiB) (enabled by default).
Definition: mimalloc-doc.h:754
void * mi_heap_zalloc(mi_heap_t *heap, size_t size)
Allocate zero-initialized in a specific heap.
void mi_option_set(mi_option_t option, long value)
-
Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows)
Definition: mimalloc-doc.h:731
+
Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows)
Definition: mimalloc-doc.h:755
void mi_cfree(void *p)
Just as free but also checks if the pointer p belongs to our heap.
void * mi_recalloc_aligned(void *p, size_t newcount, size_t size, size_t alignment)
-
Definition: mimalloc-doc.h:742
+
Definition: mimalloc-doc.h:766
void * mi_realloc_aligned_at(void *p, size_t newsize, size_t alignment, size_t offset)
-
void * blocks
start of the area containing heap blocks
Definition: mimalloc-doc.h:686
+
void * blocks
start of the area containing heap blocks
Definition: mimalloc-doc.h:710
+
void * mi_new_n(size_t count, size_t size) noexcept(false)
raise std::bad_alloc exception on failure or overflow.
void * mi_realloc_aligned(void *p, size_t newsize, size_t alignment)
int mi__posix_memalign(void **p, size_t alignment, size_t size)
void mi_free(void *p)
Free previously allocated memory.
@@ -141,36 +143,37 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
void * mi_heap_rezalloc_aligned_at(mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
void * mi_zalloc(size_t size)
Allocate zero-initialized size bytes.
void * mi_heap_rezalloc(mi_heap_t *heap, void *p, size_t newsize)
-
The number of segments per thread to keep cached.
Definition: mimalloc-doc.h:734
+
The number of segments per thread to keep cached.
Definition: mimalloc-doc.h:758
void * mi_heap_calloc(mi_heap_t *heap, size_t count, size_t size)
Allocate count zero-initialized elements in a specific heap.
void * mi_new(std::size_t n) noexcept(false)
raise std::bad_alloc exception on failure.
void * mi_heap_calloc_aligned(mi_heap_t *heap, size_t count, size_t size, size_t alignment)
bool mi_is_redirected()
Is the C runtime malloc API redirected?
-
size_t block_size
size in bytes of one block
Definition: mimalloc-doc.h:690
+
size_t block_size
size in bytes of one block
Definition: mimalloc-doc.h:714
void * mi_reallocarray(void *p, size_t count, size_t size)
int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs)
Reserve pages of huge OS pages (1GiB) evenly divided over numa_nodes nodes, but stops after at most t...
void() mi_deferred_free_fun(bool force, unsigned long long heartbeat, void *arg)
Type of deferred free functions.
Definition: mimalloc-doc.h:342
bool mi_is_in_heap_region(const void *p)
Is a pointer part of our heap?
void mi_option_enable(mi_option_t option, bool enable)
void * mi_realloc(void *p, size_t newsize)
Re-allocate memory to newsize bytes.
-
The number of huge OS pages (1GiB in size) to reserve at the start of the program.
Definition: mimalloc-doc.h:733
+
The number of huge OS pages (1GiB in size) to reserve at the start of the program.
Definition: mimalloc-doc.h:757
void * mi_heap_reallocf(mi_heap_t *heap, void *p, size_t newsize)
void mi_free_size_aligned(void *p, size_t size, size_t alignment)
void * mi_rezalloc_aligned_at(void *p, size_t newsize, size_t alignment, size_t offset)
-
Reset page memory after mi_option_reset_delay milliseconds when it becomes free.
Definition: mimalloc-doc.h:735
+
Reset page memory after mi_option_reset_delay milliseconds when it becomes free.
Definition: mimalloc-doc.h:759
void mi_thread_done(void)
Uninitialize mimalloc on a thread.
bool mi_heap_visit_blocks(const mi_heap_t *heap, bool visit_all_blocks, mi_block_visit_fun *visitor, void *arg)
Visit all areas and blocks in a heap.
-
Pretend there are at most N NUMA nodes.
Definition: mimalloc-doc.h:738
+
Pretend there are at most N NUMA nodes.
Definition: mimalloc-doc.h:762
void * mi_malloc(size_t size)
Allocate size bytes.
bool mi_option_enabled(mi_option_t option)
-
Experimental.
Definition: mimalloc-doc.h:739
+
void mi_register_error(mi_error_fun *errfun, void *arg)
Register an error callback function.
+
Experimental.
Definition: mimalloc-doc.h:763
char * mi_heap_strndup(mi_heap_t *heap, const char *s, size_t n)
Duplicate a string of at most length n in a specific heap.
-
bool() mi_block_visit_fun(const mi_heap_t *heap, const mi_heap_area_t *area, void *block, size_t block_size, void *arg)
Visitor function passed to mi_heap_visit_blocks()
Definition: mimalloc-doc.h:700
+
bool() mi_block_visit_fun(const mi_heap_t *heap, const mi_heap_area_t *area, void *block, size_t block_size, void *arg)
Visitor function passed to mi_heap_visit_blocks()
Definition: mimalloc-doc.h:724
void * mi_heap_recalloc(mi_heap_t *heap, void *p, size_t newcount, size_t size)
void * mi_heap_malloc_aligned_at(mi_heap_t *heap, size_t size, size_t alignment, size_t offset)
char * mi_realpath(const char *fname, char *resolved_name)
Resolve a file path name.
-
Print error messages to stderr.
Definition: mimalloc-doc.h:727
-
Experimental.
Definition: mimalloc-doc.h:736
+
Print error messages to stderr.
Definition: mimalloc-doc.h:751
+
Experimental.
Definition: mimalloc-doc.h:760
void * mi_heap_rezalloc_aligned(mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
void * mi_memalign(size_t alignment, size_t size)
void * mi_new_aligned_nothrow(size_t n, size_t alignment)
return NULL on failure.
@@ -179,11 +182,11 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
bool mi_heap_contains_block(mi_heap_t *heap, const void *p)
Does a heap contain a pointer to a previously allocated block?
void mi_heap_collect(mi_heap_t *heap, bool force)
Release outstanding resources in a specific heap.
void * mi_heap_recalloc_aligned_at(mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
-
Print verbose messages to stderr.
Definition: mimalloc-doc.h:728
+
Print verbose messages to stderr.
Definition: mimalloc-doc.h:752
void * mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset)
void * mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset)
Allocate size bytes aligned by alignment at a specified offset.
void mi_heap_delete(mi_heap_t *heap)
Delete a previously allocated heap.
-
OS tag to assign to mimalloc'd memory.
Definition: mimalloc-doc.h:741
+
OS tag to assign to mimalloc'd memory.
Definition: mimalloc-doc.h:765
mi_heap_t * mi_heap_get_default()
Get the default heap that is used for mi_malloc() et al.
int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs)
Reserve pages of huge OS pages (1GiB) at a specific numa_node, but stops after at most timeout_msecs ...
void * mi_aligned_alloc(size_t alignment, size_t size)
@@ -191,22 +194,22 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
void mi_thread_init(void)
Initialize mimalloc on a thread.
size_t mi_good_size(size_t size)
Return the used allocation size.
void mi_stats_print(void *out)
Print the main statistics.
-
Experimental.
Definition: mimalloc-doc.h:740
+
Experimental.
Definition: mimalloc-doc.h:764
void * mi_heap_recalloc_aligned(mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment)
void * mi_heap_mallocn(mi_heap_t *heap, size_t count, size_t size)
Allocate count elements in a specific heap.
-
An area of heap space contains blocks of a single size.
Definition: mimalloc-doc.h:685
+
An area of heap space contains blocks of a single size.
Definition: mimalloc-doc.h:709
void mi_thread_stats_print_out(mi_output_fun *out, void *arg)
Print out heap statistics for this thread.
-
Print statistics to stderr when the program is done.
Definition: mimalloc-doc.h:726
+
Print statistics to stderr when the program is done.
Definition: mimalloc-doc.h:750
void * mi_zalloc_aligned(size_t size, size_t alignment)
-
size_t reserved
bytes reserved for this area
Definition: mimalloc-doc.h:687
-
struct mi_heap_s mi_heap_t
Type of first-class heaps.
Definition: mimalloc-doc.h:481
-
size_t used
bytes in use by allocated blocks
Definition: mimalloc-doc.h:689
+
size_t reserved
bytes reserved for this area
Definition: mimalloc-doc.h:711
+
struct mi_heap_s mi_heap_t
Type of first-class heaps.
Definition: mimalloc-doc.h:505
+
size_t used
bytes in use by allocated blocks
Definition: mimalloc-doc.h:713
void mi_register_deferred_free(mi_deferred_free_fun *deferred_free, void *arg)
Register a deferred free function.
void mi_free_size(void *p, size_t size)
void mi_collect(bool force)
Eagerly free memory.
void mi_heap_destroy(mi_heap_t *heap)
Destroy a heap, freeing all its still allocated blocks.
void * mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset)
-
Use large OS pages (2MiB in size) if possible.
Definition: mimalloc-doc.h:732
+
Use large OS pages (2MiB in size) if possible.
Definition: mimalloc-doc.h:756
void * mi_heap_reallocn(mi_heap_t *heap, void *p, size_t count, size_t size)
void mi_register_output(mi_output_fun *out, void *arg)
Register an output function.
void * mi_heap_malloc_small(mi_heap_t *heap, size_t size)
Allocate a small object in a specific heap.
@@ -224,11 +227,11 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
long mi_option_get(mi_option_t option)
mi_heap_t * mi_heap_get_backing()
Get the backing heap.
void mi_free_aligned(void *p, size_t alignment)
-
Delay in milli-seconds before resetting a page (100ms by default)
Definition: mimalloc-doc.h:737
+
Delay in milli-seconds before resetting a page (100ms by default)
Definition: mimalloc-doc.h:761
mi_heap_t * mi_heap_new()
Create a new heap that can be used for allocation.
void * mi_heap_malloc(mi_heap_t *heap, size_t size)
Allocate in a specific heap.
-
size_t committed
current committed bytes of this area
Definition: mimalloc-doc.h:688
-
mi_option_t
Runtime options.
Definition: mimalloc-doc.h:724
+
size_t committed
current committed bytes of this area
Definition: mimalloc-doc.h:712
+
mi_option_t
Runtime options.
Definition: mimalloc-doc.h:748
bool mi_heap_check_owned(mi_heap_t *heap, const void *p)
Check safely if any pointer is part of a heap.
mi_heap_t * mi_heap_set_default(mi_heap_t *heap)
Set the default heap to use for mi_malloc() et al.
diff --git a/docs/navtreeindex0.js b/docs/navtreeindex0.js index d1b0e072..e2667728 100644 --- a/docs/navtreeindex0.js +++ b/docs/navtreeindex0.js @@ -29,27 +29,29 @@ var NAVTREEINDEX0 = "group__analysis.html#gadfa01e2900f0e5d515ad5506b26f6d65":[5,6,1], "group__analysis.html#structmi__heap__area__t":[5,6,0], "group__extended.html":[5,1], -"group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee":[5,1,19], -"group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf":[5,1,16], +"group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee":[5,1,21], +"group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf":[5,1,18], "group__extended.html#ga1ea64283508718d9d645c38efc2f4305":[5,1,0], -"group__extended.html#ga220f29f40a44404b0061c15bc1c31152":[5,1,20], -"group__extended.html#ga256cc6f13a142deabbadd954a217e228":[5,1,14], +"group__extended.html#ga220f29f40a44404b0061c15bc1c31152":[5,1,22], +"group__extended.html#ga251d369cda3f1c2a955c555486ed90e5":[5,1,2], +"group__extended.html#ga256cc6f13a142deabbadd954a217e228":[5,1,16], "group__extended.html#ga299dae78d25ce112e384a98b7309c5be":[5,1,1], -"group__extended.html#ga2d126e5c62d3badc35445e5d84166df2":[5,1,13], -"group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50":[5,1,11], -"group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece":[5,1,8], -"group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99":[5,1,15], -"group__extended.html#ga421430e2226d7d468529cec457396756":[5,1,3], -"group__extended.html#ga5f071b10d4df1c3658e04e7fd67a94e6":[5,1,5], -"group__extended.html#ga7136c2e55cb22c98ecf95d08d6debb99":[5,1,7], -"group__extended.html#ga7795a13d20087447281858d2c771cca1":[5,1,10], -"group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1":[5,1,12], -"group__extended.html#gaad25050b19f30cd79397b227e0157a3f":[5,1,6], -"group__extended.html#gab1dac8476c46cb9eecab767eb40c1525":[5,1,18], -"group__extended.html#gac057927cd06c854b45fe7847e921bd47":[5,1,4], -"group__extended.html#gad823d23444a4b77a40f66bf075a98a0c":[5,1,2], -"group__extended.html#gae5b17ff027cd2150b43a33040250cf3f":[5,1,9], -"group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17":[5,1,17], +"group__extended.html#ga2d126e5c62d3badc35445e5d84166df2":[5,1,15], +"group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50":[5,1,13], +"group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece":[5,1,9], +"group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99":[5,1,17], +"group__extended.html#ga421430e2226d7d468529cec457396756":[5,1,4], +"group__extended.html#ga5f071b10d4df1c3658e04e7fd67a94e6":[5,1,6], +"group__extended.html#ga7136c2e55cb22c98ecf95d08d6debb99":[5,1,8], +"group__extended.html#ga7795a13d20087447281858d2c771cca1":[5,1,12], +"group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1":[5,1,14], +"group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45":[5,1,10], +"group__extended.html#gaad25050b19f30cd79397b227e0157a3f":[5,1,7], +"group__extended.html#gab1dac8476c46cb9eecab767eb40c1525":[5,1,20], +"group__extended.html#gac057927cd06c854b45fe7847e921bd47":[5,1,5], +"group__extended.html#gad823d23444a4b77a40f66bf075a98a0c":[5,1,3], +"group__extended.html#gae5b17ff027cd2150b43a33040250cf3f":[5,1,11], +"group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17":[5,1,19], "group__heap.html":[5,3], "group__heap.html#ga00e95ba1e01acac3cfd95bb7a357a6f0":[5,3,20], "group__heap.html#ga08ca6419a5c057a4d965868998eef487":[5,3,3], @@ -121,18 +123,19 @@ var NAVTREEINDEX0 = "group__posix.html#ga0d28d5cf61e6bfbb18c63092939fe5c9":[5,8,3], "group__posix.html#ga1326d2e4388630b5f81ca7206318b8e5":[5,8,1], "group__posix.html#ga4531c9e775bb3ae12db57c1ba8a5d7de":[5,8,6], -"group__posix.html#ga48fad8648a2f1dab9c87ea9448a52088":[5,8,15], +"group__posix.html#ga48fad8648a2f1dab9c87ea9448a52088":[5,8,16], "group__posix.html#ga705dc7a64bffacfeeb0141501a5c35d7":[5,8,2], "group__posix.html#ga72e9d7ffb5fe94d69bc722c8506e27bc":[5,8,5], -"group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b":[5,8,16], +"group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b":[5,8,17], "group__posix.html#gaab7fa71ea93b96873f5d9883db57d40e":[5,8,8], "group__posix.html#gaad048a9fce3d02c5909cd05c6ec24545":[5,8,9], "group__posix.html#gab5e29558926d934c3f1cae8c815f942c":[5,8,11], -"group__posix.html#gacff84f226ba9feb2031b8992e5579447":[5,8,13], +"group__posix.html#gacff84f226ba9feb2031b8992e5579447":[5,8,14], "group__posix.html#gad5a69c8fea96aa2b7a7c818c2130090a":[5,8,0], "group__posix.html#gae01389eedab8d67341ff52e2aad80ebb":[5,8,4], -"group__posix.html#gaeaded64eda71ed6b1d569d3e723abc4a":[5,8,12], -"group__posix.html#gaeb325c39b887d3b90d85d1eb1712fb1e":[5,8,14], +"group__posix.html#gae7bc4f56cd57ed3359060ff4f38bda81":[5,8,12], +"group__posix.html#gaeaded64eda71ed6b1d569d3e723abc4a":[5,8,13], +"group__posix.html#gaeb325c39b887d3b90d85d1eb1712fb1e":[5,8,15], "group__posix.html#gaef2c2bdb4f70857902d3c8903ac095f3":[5,8,10], "group__typed.html":[5,5], "group__typed.html#ga0619a62c5fd886f1016030abe91f0557":[5,5,7], diff --git a/docs/search/all_6.js b/docs/search/all_6.js index cc7a26ec..7af11c0f 100644 --- a/docs/search/all_6.js +++ b/docs/search/all_6.js @@ -11,6 +11,7 @@ var searchData= ['mi_5fcheck_5fowned',['mi_check_owned',['../group__analysis.html#ga628c237489c2679af84a4d0d143b3dd5',1,'mimalloc-doc.h']]], ['mi_5fcollect',['mi_collect',['../group__extended.html#ga421430e2226d7d468529cec457396756',1,'mimalloc-doc.h']]], ['mi_5fdeferred_5ffree_5ffun',['mi_deferred_free_fun',['../group__extended.html#ga299dae78d25ce112e384a98b7309c5be',1,'mimalloc-doc.h']]], + ['mi_5ferror_5ffun',['mi_error_fun',['../group__extended.html#ga251d369cda3f1c2a955c555486ed90e5',1,'mimalloc-doc.h']]], ['mi_5fexpand',['mi_expand',['../group__malloc.html#gaaee66a1d483c3e28f585525fb96707e4',1,'mimalloc-doc.h']]], ['mi_5ffree',['mi_free',['../group__malloc.html#gaf2c7b89c327d1f60f59e68b9ea644d95',1,'mimalloc-doc.h']]], ['mi_5ffree_5faligned',['mi_free_aligned',['../group__posix.html#ga0d28d5cf61e6bfbb18c63092939fe5c9',1,'mimalloc-doc.h']]], @@ -75,6 +76,7 @@ var searchData= ['mi_5fnew',['mi_new',['../group__posix.html#gaad048a9fce3d02c5909cd05c6ec24545',1,'mimalloc-doc.h']]], ['mi_5fnew_5faligned',['mi_new_aligned',['../group__posix.html#gaef2c2bdb4f70857902d3c8903ac095f3',1,'mimalloc-doc.h']]], ['mi_5fnew_5faligned_5fnothrow',['mi_new_aligned_nothrow',['../group__posix.html#gab5e29558926d934c3f1cae8c815f942c',1,'mimalloc-doc.h']]], + ['mi_5fnew_5fn',['mi_new_n',['../group__posix.html#gae7bc4f56cd57ed3359060ff4f38bda81',1,'mimalloc-doc.h']]], ['mi_5fnew_5fnothrow',['mi_new_nothrow',['../group__posix.html#gaeaded64eda71ed6b1d569d3e723abc4a',1,'mimalloc-doc.h']]], ['mi_5foption_5feager_5fcommit',['mi_option_eager_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b',1,'mimalloc-doc.h']]], ['mi_5foption_5feager_5fcommit_5fdelay',['mi_option_eager_commit_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c',1,'mimalloc-doc.h']]], @@ -113,6 +115,7 @@ var searchData= ['mi_5frecalloc_5faligned',['mi_recalloc_aligned',['../group__zeroinit.html#ga3e7e5c291acf1c7fd7ffd9914a9f945f',1,'mimalloc-doc.h']]], ['mi_5frecalloc_5faligned_5fat',['mi_recalloc_aligned_at',['../group__zeroinit.html#ga4ff5e92ad73585418a072c9d059e5cf9',1,'mimalloc-doc.h']]], ['mi_5fregister_5fdeferred_5ffree',['mi_register_deferred_free',['../group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece',1,'mimalloc-doc.h']]], + ['mi_5fregister_5ferror',['mi_register_error',['../group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45',1,'mimalloc-doc.h']]], ['mi_5fregister_5foutput',['mi_register_output',['../group__extended.html#gae5b17ff027cd2150b43a33040250cf3f',1,'mimalloc-doc.h']]], ['mi_5freserve_5fhuge_5fos_5fpages_5fat',['mi_reserve_huge_os_pages_at',['../group__extended.html#ga7795a13d20087447281858d2c771cca1',1,'mimalloc-doc.h']]], ['mi_5freserve_5fhuge_5fos_5fpages_5finterleave',['mi_reserve_huge_os_pages_interleave',['../group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50',1,'mimalloc-doc.h']]], diff --git a/docs/search/functions_0.js b/docs/search/functions_0.js index d1d209a1..098041bb 100644 --- a/docs/search/functions_0.js +++ b/docs/search/functions_0.js @@ -62,6 +62,7 @@ var searchData= ['mi_5fnew',['mi_new',['../group__posix.html#gaad048a9fce3d02c5909cd05c6ec24545',1,'mimalloc-doc.h']]], ['mi_5fnew_5faligned',['mi_new_aligned',['../group__posix.html#gaef2c2bdb4f70857902d3c8903ac095f3',1,'mimalloc-doc.h']]], ['mi_5fnew_5faligned_5fnothrow',['mi_new_aligned_nothrow',['../group__posix.html#gab5e29558926d934c3f1cae8c815f942c',1,'mimalloc-doc.h']]], + ['mi_5fnew_5fn',['mi_new_n',['../group__posix.html#gae7bc4f56cd57ed3359060ff4f38bda81',1,'mimalloc-doc.h']]], ['mi_5fnew_5fnothrow',['mi_new_nothrow',['../group__posix.html#gaeaded64eda71ed6b1d569d3e723abc4a',1,'mimalloc-doc.h']]], ['mi_5foption_5fenable',['mi_option_enable',['../group__options.html#ga6d45a20a3131f18bc351b69763b38ce4',1,'mimalloc-doc.h']]], ['mi_5foption_5fenable_5fdefault',['mi_option_enable_default',['../group__options.html#ga37988264b915a7db92530cc02d5494cb',1,'mimalloc-doc.h']]], @@ -82,6 +83,7 @@ var searchData= ['mi_5frecalloc_5faligned',['mi_recalloc_aligned',['../group__zeroinit.html#ga3e7e5c291acf1c7fd7ffd9914a9f945f',1,'mimalloc-doc.h']]], ['mi_5frecalloc_5faligned_5fat',['mi_recalloc_aligned_at',['../group__zeroinit.html#ga4ff5e92ad73585418a072c9d059e5cf9',1,'mimalloc-doc.h']]], ['mi_5fregister_5fdeferred_5ffree',['mi_register_deferred_free',['../group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece',1,'mimalloc-doc.h']]], + ['mi_5fregister_5ferror',['mi_register_error',['../group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45',1,'mimalloc-doc.h']]], ['mi_5fregister_5foutput',['mi_register_output',['../group__extended.html#gae5b17ff027cd2150b43a33040250cf3f',1,'mimalloc-doc.h']]], ['mi_5freserve_5fhuge_5fos_5fpages_5fat',['mi_reserve_huge_os_pages_at',['../group__extended.html#ga7795a13d20087447281858d2c771cca1',1,'mimalloc-doc.h']]], ['mi_5freserve_5fhuge_5fos_5fpages_5finterleave',['mi_reserve_huge_os_pages_interleave',['../group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50',1,'mimalloc-doc.h']]], diff --git a/docs/search/typedefs_0.js b/docs/search/typedefs_0.js index 17816828..44a0a6c6 100644 --- a/docs/search/typedefs_0.js +++ b/docs/search/typedefs_0.js @@ -2,6 +2,7 @@ var searchData= [ ['mi_5fblock_5fvisit_5ffun',['mi_block_visit_fun',['../group__analysis.html#gadfa01e2900f0e5d515ad5506b26f6d65',1,'mimalloc-doc.h']]], ['mi_5fdeferred_5ffree_5ffun',['mi_deferred_free_fun',['../group__extended.html#ga299dae78d25ce112e384a98b7309c5be',1,'mimalloc-doc.h']]], + ['mi_5ferror_5ffun',['mi_error_fun',['../group__extended.html#ga251d369cda3f1c2a955c555486ed90e5',1,'mimalloc-doc.h']]], ['mi_5fheap_5ft',['mi_heap_t',['../group__heap.html#ga34a47cde5a5b38c29f1aa3c5e76943c2',1,'mimalloc-doc.h']]], ['mi_5foutput_5ffun',['mi_output_fun',['../group__extended.html#gad823d23444a4b77a40f66bf075a98a0c',1,'mimalloc-doc.h']]] ]; diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h index f039fc50..eaa327be 100644 --- a/include/mimalloc-internal.h +++ b/include/mimalloc-internal.h @@ -23,25 +23,21 @@ terms of the MIT license. A copy of the license can be found in the file #if defined(_MSC_VER) #pragma warning(disable:4127) // constant conditional due to MI_SECURE paths #define mi_decl_noinline __declspec(noinline) -#define mi_attr_noreturn #elif defined(__GNUC__) || defined(__clang__) #define mi_decl_noinline __attribute__((noinline)) -#define mi_attr_noreturn __attribute__((noreturn)) #else #define mi_decl_noinline -#define mi_attr_noreturn #endif // "options.c" void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message); void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...); -void _mi_error_message(const char* fmt, ...); void _mi_warning_message(const char* fmt, ...); void _mi_verbose_message(const char* fmt, ...); void _mi_trace_message(const char* fmt, ...); void _mi_options_init(void); -void _mi_fatal_error(const char* fmt, ...) mi_attr_noreturn; +void _mi_error_message(int err, const char* fmt, ...); // random.c void _mi_random_init(mi_random_ctx_t* ctx); @@ -146,6 +142,29 @@ bool _mi_page_is_valid(mi_page_t* page); #endif +/* ----------------------------------------------------------- + Error codes passed to `_mi_fatal_error` + All are recoverable but EFAULT is a serious error and aborts by default in secure mode. + For portability define undefined error codes using common Unix codes: + +----------------------------------------------------------- */ +#include +#ifndef EAGAIN // double free +#define EAGAIN (11) +#endif +#ifndef ENOMEM // out of memory +#define ENOMEM (12) +#endif +#ifndef EFAULT // corrupted free-list or meta-data +#define EFAULT (14) +#endif +#ifndef EINVAL // trying to free an invalid pointer +#define EINVAL (22) +#endif +#ifndef EOVERFLOW // count*size overflow +#define EOVERFLOW (75) +#endif + /* ----------------------------------------------------------- Inlined definitions @@ -166,30 +185,6 @@ bool _mi_page_is_valid(mi_page_t* page); #define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x) -// Overflow detecting multiply -static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) { - // quick check for the case where count is one (common for C++ allocators) - if (count==1) { - *total = size; - return false; - } -#if __has_builtin(__builtin_umul_overflow) || __GNUC__ >= 5 -#include // UINT_MAX, ULONG_MAX -#if (SIZE_MAX == UINT_MAX) - return __builtin_umul_overflow(count, size, total); -#elif (SIZE_MAX == ULONG_MAX) - return __builtin_umull_overflow(count, size, total); -#else - return __builtin_umulll_overflow(count, size, total); -#endif -#else /* __builtin_umul_overflow is unavailable */ - #define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX) - *total = count * size; - return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW) - && size > 0 && (SIZE_MAX / size) < count); -#endif -} - // Is `x` a power of two? (0 is considered a power of two) static inline bool _mi_is_power_of_two(uintptr_t x) { return ((x & (x - 1)) == 0); @@ -229,6 +224,40 @@ static inline size_t _mi_wsize_from_size(size_t size) { } +// Overflow detecting multiply +static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) { +#if __has_builtin(__builtin_umul_overflow) || __GNUC__ >= 5 +#include // UINT_MAX, ULONG_MAX +#if (SIZE_MAX == UINT_MAX) + return __builtin_umul_overflow(count, size, total); +#elif (SIZE_MAX == ULONG_MAX) + return __builtin_umull_overflow(count, size, total); +#else + return __builtin_umulll_overflow(count, size, total); +#endif +#else /* __builtin_umul_overflow is unavailable */ + #define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX) + *total = count * size; + return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW) + && size > 0 && (SIZE_MAX / size) < count); +#endif +} + +// Safe multiply `count*size` into `total`; return `true` on overflow. +static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* total) { + if (count==1) { // quick check for the case where count is one (common for C++ allocators) + *total = size; + return false; + } + else if (mi_unlikely(mi_mul_overflow(count, size, total))) { + _mi_error_message(EOVERFLOW, "allocation request too large (%zu * %zu bytes)\n", count, size); + *total = SIZE_MAX; + return true; + } + else return false; +} + + /* ----------------------------------------------------------- The thread local default heap ----------------------------------------------------------- */ @@ -506,7 +535,7 @@ static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* // check for free list corruption: is `next` at least in the same page? // TODO: check if `next` is `page->block_size` aligned? if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) { - _mi_fatal_error("corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next); + _mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next); next = NULL; } return next; diff --git a/include/mimalloc.h b/include/mimalloc.h index 67ff1a35..1250314c 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -104,16 +104,23 @@ mi_decl_export mi_decl_allocator void* mi_mallocn(size_t count, size_t size) mi_decl_export mi_decl_allocator void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2,3); mi_decl_export mi_decl_allocator void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); - mi_decl_export size_t mi_usable_size(const void* p) mi_attr_noexcept; mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept; + +// ------------------------------------------------------ +// Internals +// ------------------------------------------------------ + typedef void (mi_cdecl mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg); mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg) mi_attr_noexcept; typedef void (mi_cdecl mi_output_fun)(const char* msg, void* arg); mi_decl_export void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept; +typedef void (mi_cdecl mi_error_fun)(int err, void* arg); +mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg); + mi_decl_export void mi_collect(bool force) mi_attr_noexcept; mi_decl_export int mi_version(void) mi_attr_noexcept; mi_decl_export void mi_stats_reset(void) mi_attr_noexcept; @@ -143,9 +150,9 @@ mi_decl_export mi_decl_allocator void* mi_realloc_aligned(void* p, size_t newsiz mi_decl_export mi_decl_allocator void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); -// ------------------------------------------------------ -// Heaps -// ------------------------------------------------------ +// ------------------------------------------------------------------------------------- +// Heaps: first-class, but can only allocate from the same thread that created it. +// ------------------------------------------------------------------------------------- struct mi_heap_s; typedef struct mi_heap_s mi_heap_t; diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c index 5a59a63a..55b0e041 100644 --- a/src/alloc-aligned.c +++ b/src/alloc-aligned.c @@ -79,7 +79,7 @@ mi_decl_allocator void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, siz mi_decl_allocator void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { size_t total; - if (mi_mul_overflow(count, size, &total)) return NULL; + if (mi_count_size_overflow(count, size, &total)) return NULL; return mi_heap_zalloc_aligned_at(heap, total, alignment, offset); } @@ -168,13 +168,13 @@ mi_decl_allocator void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_ mi_decl_allocator void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { size_t total; - if (mi_mul_overflow(newcount, size, &total)) return NULL; + if (mi_count_size_overflow(newcount, size, &total)) return NULL; return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset); } mi_decl_allocator void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { size_t total; - if (mi_mul_overflow(newcount, size, &total)) return NULL; + if (mi_count_size_overflow(newcount, size, &total)) return NULL; return mi_heap_rezalloc_aligned(heap, p, total, alignment); } diff --git a/src/alloc.c b/src/alloc.c index 37d43d9f..e605c017 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -146,7 +146,7 @@ static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, con mi_list_contains(page, page->local_free, block) || mi_list_contains(page, mi_page_thread_free(page), block)) { - _mi_fatal_error("double free detected of block %p with size %zu\n", block, mi_page_block_size(page)); + _mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page)); return true; } return false; @@ -300,7 +300,7 @@ void mi_free(void* p) mi_attr_noexcept { #if (MI_DEBUG>0) if (mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0)) { - _mi_error_message("trying to free an invalid (unaligned) pointer: %p\n", p); + _mi_error_message(EINVAL, "trying to free an invalid (unaligned) pointer: %p\n", p); return; } #endif @@ -310,16 +310,16 @@ void mi_free(void* p) mi_attr_noexcept #if (MI_DEBUG!=0) if (mi_unlikely(!mi_is_in_heap_region(p))) { - _mi_warning_message("possibly trying to free a pointer that does not point to a valid heap region: 0x%p\n" + _mi_warning_message("possibly trying to free a pointer that does not point to a valid heap region: %p\n" "(this may still be a valid very large allocation (over 64MiB))\n", p); if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) { - _mi_warning_message("(yes, the previous pointer 0x%p was valid after all)\n", p); + _mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p); } } #endif #if (MI_DEBUG!=0 || MI_SECURE>=4) if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) { - _mi_error_message("trying to free a pointer that does not point to a valid heap space: %p\n", p); + _mi_error_message(EINVAL, "trying to free a pointer that does not point to a valid heap space: %p\n", p); return; } #endif @@ -432,7 +432,7 @@ void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept { extern inline mi_decl_allocator void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept { size_t total; - if (mi_mul_overflow(count,size,&total)) return NULL; + if (mi_count_size_overflow(count,size,&total)) return NULL; return mi_heap_zalloc(heap,total); } @@ -443,7 +443,7 @@ mi_decl_allocator void* mi_calloc(size_t count, size_t size) mi_attr_noexcept { // Uninitialized `calloc` extern mi_decl_allocator void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept { size_t total; - if (mi_mul_overflow(count, size, &total)) return NULL; + if (mi_count_size_overflow(count, size, &total)) return NULL; return mi_heap_malloc(heap, total); } @@ -484,7 +484,7 @@ mi_decl_allocator void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize mi_decl_allocator void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { size_t total; - if (mi_mul_overflow(count, size, &total)) return NULL; + if (mi_count_size_overflow(count, size, &total)) return NULL; return mi_heap_realloc(heap, p, total); } @@ -502,7 +502,7 @@ mi_decl_allocator void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsiz mi_decl_allocator void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { size_t total; - if (mi_mul_overflow(count, size, &total)) return NULL; + if (mi_count_size_overflow(count, size, &total)) return NULL; return mi_heap_rezalloc(heap, p, total); } @@ -570,7 +570,6 @@ char* mi_strndup(const char* s, size_t n) mi_attr_noexcept { #define PATH_MAX MAX_PATH #endif #include -#include char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept { // todo: use GetFullPathNameW to allow longer file names char buf[PATH_MAX]; @@ -645,10 +644,6 @@ static bool mi_try_new_handler(bool nothrow) { } } #else -#include -#ifndef ENOMEM -#define ENOMEM 12 -#endif typedef void (*std_new_handler_t)(); #if (defined(__GNUC__) || defined(__clang__)) @@ -668,7 +663,7 @@ std_new_handler_t mi_get_new_handler() { static bool mi_try_new_handler(bool nothrow) { std_new_handler_t h = mi_get_new_handler(); if (h==NULL) { - if (!nothrow) exit(ENOMEM); + if (!nothrow) exit(ENOMEM); // cannot throw in plain C, use exit as we are out of memory anyway. return false; } else { @@ -718,7 +713,7 @@ void* mi_new_aligned_nothrow(size_t size, size_t alignment) { void* mi_new_n(size_t count, size_t size) { size_t total; - if (mi_unlikely(mi_mul_overflow(count, size, &total))) { + if (mi_unlikely(mi_count_size_overflow(count, size, &total))) { mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc return NULL; } diff --git a/src/arena.c b/src/arena.c index 7f1a1caf..f20a03e9 100644 --- a/src/arena.c +++ b/src/arena.c @@ -229,18 +229,18 @@ void _mi_arena_free(void* p, size_t size, size_t memid, mi_stats_t* stats) { mi_arena_t* arena = (mi_arena_t*)mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*, &mi_arenas[arena_idx])); mi_assert_internal(arena != NULL); if (arena == NULL) { - _mi_fatal_error("trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid); + _mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid); return; } mi_assert_internal(arena->field_count > mi_bitmap_index_field(bitmap_idx)); if (arena->field_count <= mi_bitmap_index_field(bitmap_idx)) { - _mi_fatal_error("trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid); + _mi_error_message(EINVAL, "trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid); return; } const size_t blocks = mi_block_count_of_size(size); bool ones = mi_bitmap_unclaim(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx); if (!ones) { - _mi_fatal_error("trying to free an already freed block: %p, size %zu\n", p, size); + _mi_error_message(EAGAIN, "trying to free an already freed block: %p, size %zu\n", p, size); return; }; } diff --git a/src/init.c b/src/init.c index b8422c2f..18a18f60 100644 --- a/src/init.c +++ b/src/init.c @@ -157,7 +157,7 @@ static bool _mi_heap_init(void) { // use `_mi_os_alloc` to allocate directly from the OS mi_thread_data_t* td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t),&_mi_stats_main); // Todo: more efficient allocation? if (td == NULL) { - _mi_error_message("failed to allocate thread local heap memory\n"); + _mi_error_message(ENOMEM, "failed to allocate thread local heap memory\n"); return false; } mi_tld_t* tld = &td->tld; diff --git a/src/options.c b/src/options.c index c12c77e0..b06cbdb4 100644 --- a/src/options.c +++ b/src/options.c @@ -287,14 +287,10 @@ void _mi_verbose_message(const char* fmt, ...) { va_end(args); } -void _mi_error_message(const char* fmt, ...) { +static void mi_show_error_message(const char* fmt, va_list args) { if (!mi_option_is_enabled(mi_option_show_errors) && !mi_option_is_enabled(mi_option_verbose)) return; if (mi_atomic_increment(&error_count) > mi_max_error_count) return; - va_list args; - va_start(args,fmt); - mi_vfprintf(NULL, NULL, "mimalloc: error: ", fmt, args); - va_end(args); - mi_assert(false); + mi_vfprintf(NULL, NULL, "mimalloc: error: ", fmt, args); } void _mi_warning_message(const char* fmt, ...) { @@ -314,14 +310,40 @@ void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, co } #endif -mi_attr_noreturn void _mi_fatal_error(const char* fmt, ...) { +// -------------------------------------------------------- +// Errors +// -------------------------------------------------------- + +static mi_error_fun* volatile mi_error_handler; // = NULL +static volatile _Atomic(void*) mi_error_arg; // = NULL + +static void mi_error_default(int err) { + UNUSED(err); +#if (MI_SECURE>0) + if (err==EFAULT) { // abort on serious errors in secure mode (corrupted meta-data) + abort(); + } +#endif +} + +void mi_register_error(mi_error_fun* fun, void* arg) { + mi_error_handler = fun; // can be NULL + mi_atomic_write_ptr(&mi_error_arg, arg); +} + +void _mi_error_message(int err, const char* fmt, ...) { + // show detailed error message va_list args; va_start(args, fmt); - mi_vfprintf(NULL, NULL, "mimalloc: fatal: ", fmt, args); + mi_show_error_message(fmt, args); va_end(args); - #if (MI_SECURE>=0) - abort(); - #endif + // and call the error handler which may abort (or return normally) + if (mi_error_handler != NULL) { + mi_error_handler(err, mi_atomic_read_ptr(&mi_error_arg)); + } + else { + mi_error_default(err); + } } // -------------------------------------------------------- diff --git a/src/os.c b/src/os.c index b5bd0ad9..be507b69 100644 --- a/src/os.c +++ b/src/os.c @@ -13,7 +13,7 @@ terms of the MIT license. A copy of the license can be found in the file #include "mimalloc-atomic.h" #include // strerror -#include + #if defined(_WIN32) #include @@ -655,7 +655,7 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ if (err != 0) { err = errno; } #endif if (err != 0) { - _mi_warning_message("%s error: start: 0x%p, csize: 0x%x, err: %i\n", commit ? "commit" : "decommit", start, csize, err); + _mi_warning_message("%s error: start: %p, csize: 0x%x, err: %i\n", commit ? "commit" : "decommit", start, csize, err); mi_mprotect_hint(err); } mi_assert_internal(err == 0); @@ -719,7 +719,7 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats) int err = madvise(start, csize, MADV_DONTNEED); #endif if (err != 0) { - _mi_warning_message("madvise reset error: start: 0x%p, csize: 0x%x, errno: %i\n", start, csize, errno); + _mi_warning_message("madvise reset error: start: %p, csize: 0x%x, errno: %i\n", start, csize, errno); } //mi_assert(err == 0); if (err != 0) return false; @@ -774,7 +774,7 @@ static bool mi_os_protectx(void* addr, size_t size, bool protect) { if (err != 0) { err = errno; } #endif if (err != 0) { - _mi_warning_message("mprotect error: start: 0x%p, csize: 0x%x, err: %i\n", start, csize, err); + _mi_warning_message("mprotect error: start: %p, csize: 0x%x, err: %i\n", start, csize, err); mi_mprotect_hint(err); } return (err == 0); @@ -961,7 +961,7 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse if (p != addr) { // no success, issue a warning and break if (p != NULL) { - _mi_warning_message("could not allocate contiguous huge page %zu at 0x%p\n", page, addr); + _mi_warning_message("could not allocate contiguous huge page %zu at %p\n", page, addr); _mi_os_free(p, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main); } break; diff --git a/src/page.c b/src/page.c index 84baf306..d67a44de 100644 --- a/src/page.c +++ b/src/page.c @@ -175,7 +175,7 @@ static void _mi_page_thread_free_collect(mi_page_t* page) } // if `count > max_count` there was a memory corruption (possibly infinite list due to double multi-threaded free) if (count > max_count) { - _mi_fatal_error("corrupted thread-free list\n"); + _mi_error_message(EFAULT, "corrupted thread-free list\n"); return; // the thread-free items cannot be freed } @@ -796,7 +796,8 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_page_t* page; if (mi_unlikely(size > MI_LARGE_OBJ_SIZE_MAX)) { if (mi_unlikely(size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see ) - page = NULL; + _mi_error_message(EOVERFLOW, "allocation request is too large (%zu b requested)\n", size); + return NULL; } else { page = mi_huge_page_alloc(heap,size); @@ -806,7 +807,10 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept // otherwise find a page with free blocks in our size segregated queues page = mi_find_free_page(heap,size); } - if (page == NULL) return NULL; // out of memory + if (mi_unlikely(page == NULL)) { // out of memory + _mi_error_message(ENOMEM, "cannot allocate memory (%zu bytes requested)\n", size); + return NULL; + } mi_assert_internal(mi_page_immediate_available(page)); mi_assert_internal(mi_page_block_size(page) >= size); diff --git a/test/test-api.c b/test/test-api.c index 060efc44..68df314e 100644 --- a/test/test-api.c +++ b/test/test-api.c @@ -9,7 +9,7 @@ terms of the MIT license. A copy of the license can be found in the file Testing allocators is difficult as bugs may only surface after particular allocation patterns. The main approach to testing _mimalloc_ is therefore to have extensive internal invariant checking (see `page_is_valid` in `page.c` -for example), which is enabled in debug mode with `-DMI_CHECK_FULL=ON`. +for example), which is enabled in debug mode with `-DMI_DEBUG_FULL=ON`. The main testing is then to run `mimalloc-bench` [1] using full invariant checking to catch any potential problems over a wide range of intensive allocation bench marks. @@ -88,6 +88,10 @@ int main() { CHECK_BODY("malloc-null",{ mi_free(NULL); }); + CHECK_BODY("calloc-overflow",{ + // use (size_t)&mi_calloc to get some number without triggering compiler warnings + result = (mi_calloc((size_t)&mi_calloc,SIZE_MAX/1000) == NULL); + }); // --------------------------------------------------- // Extended From 41e717c2e0bdb4e6c5cbd1fd8a0bae3b0afb46d2 Mon Sep 17 00:00:00 2001 From: daan Date: Sat, 18 Jan 2020 20:30:12 -0800 Subject: [PATCH 38/46] fix assertion in mi_block_zero_init (issue #194) --- src/alloc.c | 4 ++-- test/test-api.c | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/alloc.c b/src/alloc.c index e605c017..8f98b647 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -92,9 +92,9 @@ extern inline mi_decl_allocator void* mi_malloc(size_t size) mi_attr_noexcept { void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) { // note: we need to initialize the whole block to zero, not just size // or the recalloc/rezalloc functions cannot safely expand in place (see issue #63) - UNUSED(size); + UNUSED_RELEASE(size); mi_assert_internal(p != NULL); - mi_assert_internal(size > 0 && mi_page_block_size(page) >= size); + mi_assert_internal(size >= 0 && mi_page_block_size(page) >= size); mi_assert_internal(_mi_ptr_page(p)==page); if (page->is_zero) { // already zero initialized memory? diff --git a/test/test-api.c b/test/test-api.c index 68df314e..d7a7be59 100644 --- a/test/test-api.c +++ b/test/test-api.c @@ -92,6 +92,9 @@ int main() { // use (size_t)&mi_calloc to get some number without triggering compiler warnings result = (mi_calloc((size_t)&mi_calloc,SIZE_MAX/1000) == NULL); }); + CHECK_BODY("calloc0",{ + result = (mi_usable_size(mi_calloc(0,1000)) >= 0); + }); // --------------------------------------------------- // Extended From e8d7c80c74e7bcc789e2d2dd74e0306f5a0d9b8e Mon Sep 17 00:00:00 2001 From: daan Date: Sun, 19 Jan 2020 17:33:36 -0800 Subject: [PATCH 39/46] fix build warnings on linux --- src/alloc.c | 18 +++++++++--------- test/test-api.c | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/alloc.c b/src/alloc.c index 8f98b647..7fc9023c 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -94,7 +94,7 @@ void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) { // or the recalloc/rezalloc functions cannot safely expand in place (see issue #63) UNUSED_RELEASE(size); mi_assert_internal(p != NULL); - mi_assert_internal(size >= 0 && mi_page_block_size(page) >= size); + mi_assert_internal(mi_page_block_size(page) >= size); // size can be zero mi_assert_internal(_mi_ptr_page(p)==page); if (page->is_zero) { // already zero initialized memory? @@ -141,7 +141,7 @@ static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, cons static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) { // The decoded value is in the same page (or NULL). - // Walk the free lists to verify positively if it is already freed + // Walk the free lists to verify positively if it is already freed if (mi_list_contains(page, page->free, block) || mi_list_contains(page, page->local_free, block) || mi_list_contains(page, mi_page_thread_free(page), block)) @@ -343,8 +343,8 @@ void mi_free(void* p) mi_attr_noexcept mi_block_set_next(page, block, page->local_free); page->local_free = block; page->used--; - if (mi_unlikely(mi_page_all_free(page))) { - _mi_page_retire(page); + if (mi_unlikely(mi_page_all_free(page))) { + _mi_page_retire(page); } } else { @@ -695,8 +695,8 @@ void* mi_new_nothrow(size_t size) { void* mi_new_aligned(size_t size, size_t alignment) { void* p; - do { - p = mi_malloc_aligned(size, alignment); + do { + p = mi_malloc_aligned(size, alignment); } while(p == NULL && mi_try_new_handler(false)); return p; @@ -704,8 +704,8 @@ void* mi_new_aligned(size_t size, size_t alignment) { void* mi_new_aligned_nothrow(size_t size, size_t alignment) { void* p; - do { - p = mi_malloc_aligned(size, alignment); + do { + p = mi_malloc_aligned(size, alignment); } while(p == NULL && mi_try_new_handler(true)); return p; @@ -720,4 +720,4 @@ void* mi_new_n(size_t count, size_t size) { else { return mi_new(total); } -} \ No newline at end of file +} diff --git a/test/test-api.c b/test/test-api.c index d7a7be59..a837946f 100644 --- a/test/test-api.c +++ b/test/test-api.c @@ -93,7 +93,7 @@ int main() { result = (mi_calloc((size_t)&mi_calloc,SIZE_MAX/1000) == NULL); }); CHECK_BODY("calloc0",{ - result = (mi_usable_size(mi_calloc(0,1000)) >= 0); + result = (mi_usable_size(mi_calloc(0,1000)) <= 16); }); // --------------------------------------------------- From 9d7ac76d93c8995631bb7ed264406a12aa2564d2 Mon Sep 17 00:00:00 2001 From: daan Date: Sun, 19 Jan 2020 18:35:45 -0800 Subject: [PATCH 40/46] fix compilation under Intel C compiler (icc) --- CMakeLists.txt | 23 ++++++++++++++++------- include/mimalloc-atomic.h | 6 +++--- test/test-stress.c | 9 ++++++++- 3 files changed, 27 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 27729584..366ffc44 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -54,7 +54,7 @@ endif() # Process options # ----------------------------------------------------------------------------- -if(CMAKE_C_COMPILER_ID MATCHES "MSVC") +if(CMAKE_C_COMPILER_ID MATCHES "MSVC|Intel") set(MI_USE_CXX "ON") endif() @@ -96,25 +96,34 @@ endif() if(MI_USE_CXX MATCHES "ON") message(STATUS "Use the C++ compiler to compile (MI_USE_CXX=ON)") set_source_files_properties(${mi_sources} PROPERTIES LANGUAGE CXX ) - set_source_files_properties(src/static.c test/test-api.c PROPERTIES LANGUAGE CXX ) + set_source_files_properties(src/static.c test/test-api.c test/test-stress PROPERTIES LANGUAGE CXX ) if(CMAKE_CXX_COMPILER_ID MATCHES "AppleClang|Clang") list(APPEND mi_cflags -Wno-deprecated) endif() + if(CMAKE_CXX_COMPILER_ID MATCHES "Intel") + list(APPEND mi_cflags -Kc++) + endif() endif() # Compiler flags if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU") list(APPEND mi_cflags -Wall -Wextra -Wno-unknown-pragmas) + if(CMAKE_C_COMPILER_ID MATCHES "GNU") + list(APPEND mi_cflags -Wno-invalid-memory-model) + list(APPEND mi_cflags -fvisibility=hidden) + endif() +endif() + +if(CMAKE_C_COMPILER_ID MATCHES "Intel") + list(APPEND mi_cflags -Wall -fvisibility=hidden) +endif() + +if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU|Intel") if(MI_LOCAL_DYNAMIC_TLS MATCHES "ON") list(APPEND mi_cflags -ftls-model=local-dynamic) else() list(APPEND mi_cflags -ftls-model=initial-exec) endif() - if(CMAKE_C_COMPILER_ID MATCHES "GNU") - list(APPEND mi_cflags -Wno-invalid-memory-model) - list(APPEND mi_cflags -fvisibility=hidden) - list(APPEND mi_cflags -fbranch-target-load-optimize) - endif() endif() # extra needed libraries diff --git a/include/mimalloc-atomic.h b/include/mimalloc-atomic.h index ecdfba0d..5d140f0c 100644 --- a/include/mimalloc-atomic.h +++ b/include/mimalloc-atomic.h @@ -9,7 +9,7 @@ terms of the MIT license. A copy of the license can be found in the file #define MIMALLOC_ATOMIC_H // ------------------------------------------------------ -// Atomics +// Atomics // We need to be portable between C, C++, and MSVC. // ------------------------------------------------------ @@ -29,7 +29,7 @@ terms of the MIT license. A copy of the license can be found in the file // Atomic operations specialized for mimalloc // ------------------------------------------------------ -// Atomically add a 64-bit value; returns the previous value. +// Atomically add a 64-bit value; returns the previous value. // Note: not using _Atomic(int64_t) as it is only used for statistics. static inline void mi_atomic_add64(volatile int64_t* p, int64_t add); @@ -43,7 +43,7 @@ static inline uintptr_t mi_atomic_and(volatile _Atomic(uintptr_t)* p, uintptr_t static inline uintptr_t mi_atomic_or(volatile _Atomic(uintptr_t)* p, uintptr_t x); -// Atomically compare and exchange a value; returns `true` if successful. +// Atomically compare and exchange a value; returns `true` if successful. // May fail spuriously. Memory ordering as release on success, and relaxed on failure. // (Note: expected and desired are in opposite order from atomic_compare_exchange) static inline bool mi_atomic_cas_weak(volatile _Atomic(uintptr_t)* p, uintptr_t desired, uintptr_t expected); diff --git a/test/test-stress.c b/test/test-stress.c index 42628d7c..83f9b87b 100644 --- a/test/test-stress.c +++ b/test/test-stress.c @@ -255,7 +255,6 @@ static void* atomic_exchange_ptr(volatile void** p, void* newval) { #else #include -#include static void* thread_entry(void* param) { stress((uintptr_t)param); @@ -275,8 +274,16 @@ static void run_os_threads(size_t nthreads) { custom_free(threads); } +#ifdef __cplusplus +#include +static void* atomic_exchange_ptr(volatile void** p, void* newval) { + return std::atomic_exchange_explicit((volatile std::atomic*)p, newval, std::memory_order_acquire); +} +#else +#include static void* atomic_exchange_ptr(volatile void** p, void* newval) { return atomic_exchange_explicit((volatile _Atomic(void*)*)p, newval, memory_order_acquire); } +#endif #endif From 514b3152839d6a5524d64c7a00f88875c9d5ec3f Mon Sep 17 00:00:00 2001 From: daan Date: Sun, 19 Jan 2020 21:27:46 -0800 Subject: [PATCH 41/46] add max_size member to STL allocator --- include/mimalloc.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/mimalloc.h b/include/mimalloc.h index 1250314c..add1c550 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -351,6 +351,7 @@ mi_decl_export void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_at #if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 #include // true_type +#include // PTRDIFF_MAX #endif template struct mi_stl_allocator { @@ -360,6 +361,7 @@ template struct mi_stl_allocator { using propagate_on_container_move_assignment = std::true_type; using propagate_on_container_swap = std::true_type; using is_always_equal = std::true_type; + size_t max_size() const noexcept { return (PTRDIFF_MAX / sizeof(value_type)); } #endif mi_stl_allocator() mi_attr_noexcept { } mi_stl_allocator(const mi_stl_allocator& ) mi_attr_noexcept { } From a33ebb8625fde438f61a5bddd0f71fa9adb7acb2 Mon Sep 17 00:00:00 2001 From: daan Date: Sun, 19 Jan 2020 22:14:35 -0800 Subject: [PATCH 42/46] add alloc_align attribute to aligned allocation functions --- include/mimalloc.h | 48 +++++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/include/mimalloc.h b/include/mimalloc.h index add1c550..153e11c7 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -37,32 +37,40 @@ terms of the MIT license. A copy of the license can be found in the file #else #define mi_decl_allocator __declspec(restrict) #endif + #define mi_cdecl __cdecl #define mi_decl_thread __declspec(thread) #define mi_attr_malloc #define mi_attr_alloc_size(s) #define mi_attr_alloc_size2(s1,s2) - #define mi_cdecl __cdecl + #define mi_attr_alloc_align(p) #elif defined(__GNUC__) || defined(__clang__) + #define mi_cdecl // leads to warnings... __attribute__((cdecl)) #define mi_decl_thread __thread #define mi_decl_export __attribute__((visibility("default"))) #define mi_decl_allocator #define mi_attr_malloc __attribute__((malloc)) - #if defined(__clang_major__) && (__clang_major__ < 4) + #if (defined(__clang_major__) && (__clang_major__ < 4)) || (__GNUC__ < 5) #define mi_attr_alloc_size(s) #define mi_attr_alloc_size2(s1,s2) + #define mi_attr_alloc_align(p) + #elif defined(__INTEL_COMPILER) + #define mi_attr_alloc_size(s) __attribute__((alloc_size(s))) + #define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2))) + #define mi_attr_alloc_align(p) #else #define mi_attr_alloc_size(s) __attribute__((alloc_size(s))) #define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2))) + #define mi_attr_alloc_align(p) __attribute__((alloc_align(p))) #endif - #define mi_cdecl // leads to warnings... __attribute__((cdecl)) #else + #define mi_cdecl #define mi_decl_thread __thread #define mi_decl_export #define mi_decl_allocator #define mi_attr_malloc #define mi_attr_alloc_size(s) #define mi_attr_alloc_size2(s1,s2) - #define mi_cdecl + #define mi_attr_alloc_align(p) #endif // ------------------------------------------------------ @@ -140,13 +148,13 @@ mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_ // allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`. // ------------------------------------------------------------------------------------- -mi_decl_export mi_decl_allocator void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_export mi_decl_allocator void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); mi_decl_export mi_decl_allocator void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_export mi_decl_allocator void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_export mi_decl_allocator void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); mi_decl_export mi_decl_allocator void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_export mi_decl_allocator void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); +mi_decl_export mi_decl_allocator void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2) mi_attr_alloc_align(3); mi_decl_export mi_decl_allocator void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); -mi_decl_export mi_decl_allocator void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_export mi_decl_allocator void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); mi_decl_export mi_decl_allocator void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); @@ -178,13 +186,13 @@ mi_decl_export char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noex mi_decl_export char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept; mi_decl_export char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept; -mi_decl_export mi_decl_allocator void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_export mi_decl_allocator void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); mi_decl_export mi_decl_allocator void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); -mi_decl_export mi_decl_allocator void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_export mi_decl_allocator void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); mi_decl_export mi_decl_allocator void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); -mi_decl_export mi_decl_allocator void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); +mi_decl_export mi_decl_allocator void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3) mi_attr_alloc_align(4); mi_decl_export mi_decl_allocator void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); -mi_decl_export mi_decl_allocator void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(3); +mi_decl_export mi_decl_allocator void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(3) mi_attr_alloc_align(4); mi_decl_export mi_decl_allocator void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(3); @@ -198,17 +206,17 @@ mi_decl_export mi_decl_allocator void* mi_heap_realloc_aligned_at(mi_heap_t* hea mi_decl_export mi_decl_allocator void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); mi_decl_export mi_decl_allocator void* mi_recalloc(void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2,3); -mi_decl_export mi_decl_allocator void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_export mi_decl_allocator void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); mi_decl_export mi_decl_allocator void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); -mi_decl_export mi_decl_allocator void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2,3); +mi_decl_export mi_decl_allocator void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2,3) mi_attr_alloc_align(4); mi_decl_export mi_decl_allocator void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2,3); mi_decl_export mi_decl_allocator void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(3); mi_decl_export mi_decl_allocator void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(3,4); -mi_decl_export mi_decl_allocator void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(3); +mi_decl_export mi_decl_allocator void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(3) mi_attr_alloc_align(4); mi_decl_export mi_decl_allocator void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(3); -mi_decl_export mi_decl_allocator void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(3,4); +mi_decl_export mi_decl_allocator void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(3,4) mi_attr_alloc_align(5); mi_decl_export mi_decl_allocator void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(3,4); @@ -314,11 +322,11 @@ mi_decl_export void mi_cfree(void* p) mi_attr_noexcept; mi_decl_export void* mi__expand(void* p, size_t newsize) mi_attr_noexcept; mi_decl_export int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept; -mi_decl_export void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_export void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1); mi_decl_export void* mi_valloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); mi_decl_export void* mi_pvalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_export void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_export void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1); mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2,3); mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept; @@ -335,9 +343,9 @@ mi_decl_export void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept; mi_decl_export void* mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1); mi_decl_export void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1,2); -mi_decl_export void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_export void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); mi_decl_export void* mi_new_nothrow(size_t size) mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_export void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_export void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); #ifdef __cplusplus } From b77be05e4001debdcdcdc27d82bebb6b04faac11 Mon Sep 17 00:00:00 2001 From: daan Date: Mon, 20 Jan 2020 12:14:34 -0800 Subject: [PATCH 43/46] only collect retired at fresh page allocation --- src/page.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/page.c b/src/page.c index d67a44de..7840a590 100644 --- a/src/page.c +++ b/src/page.c @@ -234,7 +234,7 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) { mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE); mi_assert_internal(!page->is_reset); mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page)); - mi_page_queue_push(heap, pq, page); + mi_page_queue_push(heap, pq, page); mi_assert_expensive(_mi_page_is_valid(page)); } @@ -408,7 +408,7 @@ void _mi_page_retire(mi_page_t* page) { if (mi_likely(page->xblock_size <= MI_SMALL_SIZE_MAX && !mi_page_is_in_full(page))) { if (pq->last==page && pq->first==page) { // the only page in the queue? mi_stat_counter_increase(_mi_stats_main.page_no_retire,1); - page->retire_expire = 4; + page->retire_expire = 16; mi_assert_internal(mi_page_all_free(page)); return; // dont't free after all } @@ -514,7 +514,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co mi_assert_internal(page->capacity + extend <= page->reserved); mi_assert_internal(bsize == mi_page_block_size(page)); void* const page_area = _mi_page_start(_mi_page_segment(page), page, NULL ); - + mi_block_t* const start = mi_page_block_at(page, page_area, bsize, page->capacity); // initialize a sequential free list @@ -678,6 +678,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p mi_stat_counter_increase(heap->tld->stats.searches, count); if (page == NULL) { + _mi_heap_collect_retired(heap, false); // perhaps make a page available page = mi_page_fresh(heap, pq); } else { @@ -686,8 +687,6 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p } mi_assert_internal(page == NULL || mi_page_immediate_available(page)); - // finally collect retired pages - _mi_heap_collect_retired(heap, false); return page; } From 146899af8aad7ee3b447f1d0ffe4939f8e3bcd88 Mon Sep 17 00:00:00 2001 From: daan Date: Mon, 20 Jan 2020 15:27:05 -0800 Subject: [PATCH 44/46] add missing members to stl allocator (#193) --- include/mimalloc.h | 51 +++++++++++++++++++++++++++++------------- test/main-override.cpp | 16 +++++++++++++ test/test-api.c | 8 +++---- 3 files changed, 56 insertions(+), 19 deletions(-) diff --git a/include/mimalloc.h b/include/mimalloc.h index 153e11c7..97c26e18 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -357,29 +357,50 @@ mi_decl_export void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_at // --------------------------------------------------------------------------------------------- #ifdef __cplusplus +#include // std::numeric_limits #if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 -#include // true_type -#include // PTRDIFF_MAX +#include // std::true_type +#include // std::forward #endif template struct mi_stl_allocator { - typedef T value_type; - #if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 + typedef T value_type; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef value_type& reference; + typedef value_type const& const_reference; + typedef value_type* pointer; + typedef value_type const* const_pointer; + template struct rebind { typedef mi_stl_allocator other; }; + + mi_stl_allocator() mi_attr_noexcept { } + mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept { } + template mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept { } + mi_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T* p, size_type) { mi_free(p); } + + #if (__cplusplus >= 201703L) // C++17 + T* allocate(size_type count) { return static_cast(mi_new_n(count, sizeof(T))); } + T* allocate(size_type count, const void*) { return allocate(count); } + #else + pointer allocate(size_type count, const void* = 0) { return static_cast(mi_new_n(count, sizeof(value_type))); } + #endif + + #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 using propagate_on_container_copy_assignment = std::true_type; using propagate_on_container_move_assignment = std::true_type; - using propagate_on_container_swap = std::true_type; - using is_always_equal = std::true_type; - size_t max_size() const noexcept { return (PTRDIFF_MAX / sizeof(value_type)); } - #endif - mi_stl_allocator() mi_attr_noexcept { } - mi_stl_allocator(const mi_stl_allocator& ) mi_attr_noexcept { } - template mi_stl_allocator(const mi_stl_allocator& ) mi_attr_noexcept { } - void deallocate(T* p, size_t /* count */) { mi_free(p); } - #if (__cplusplus >= 201703L) // C++17 - T* allocate(size_t count) { return (T*)mi_new_n(count, sizeof(T)); } + using propagate_on_container_swap = std::true_type; + using is_always_equal = std::true_type; + template void construct(U* p, Args&& ...args) { ::new(p) U(std::forward(args)...); } + template void destroy(U* p) mi_attr_noexcept { p->~U(); } #else - T* allocate(size_t count, const void* hint = 0) { (void)hint; return (T*)mi_new_n(count, sizeof(T)); } + void construct(pointer p, value_type const& val) { ::new(p) value_type(val); } + void destroy(pointer p) { p->~value_type(); } #endif + + size_type max_size() const mi_attr_noexcept { return (std::numeric_limits::max() / sizeof(value_type)); } + pointer address(reference x) const { return &x; } + const_pointer address(const_reference x) const { return &x; } }; template bool operator==(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return true; } diff --git a/test/main-override.cpp b/test/main-override.cpp index f7a7f1bd..d082ade3 100644 --- a/test/main-override.cpp +++ b/test/main-override.cpp @@ -6,6 +6,7 @@ #include #include +#include static void* p = malloc(8); @@ -69,3 +70,18 @@ public: static Static s = Static(); +bool test_stl_allocator1() { + std::vector> vec; + vec.push_back(1); + vec.pop_back(); + return vec.size() == 0; +} + +bool test_stl_allocator2() { + struct some_struct { int i; int j; double z; }; + + std::vector> vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +} \ No newline at end of file diff --git a/test/test-api.c b/test/test-api.c index a837946f..95891754 100644 --- a/test/test-api.c +++ b/test/test-api.c @@ -202,7 +202,7 @@ bool test_heap2() { bool test_stl_allocator1() { #ifdef __cplusplus - std::vector> vec; + std::vector > vec; vec.push_back(1); vec.pop_back(); return vec.size() == 0; @@ -211,11 +211,11 @@ bool test_stl_allocator1() { #endif } +struct some_struct { int i; int j; double z; }; + bool test_stl_allocator2() { #ifdef __cplusplus - struct some_struct { int i; int j; double z; }; - - std::vector> vec; + std::vector > vec; vec.push_back(some_struct()); vec.pop_back(); return vec.size() == 0; From 3957b2fd28e95e9dcc787ccda320abee412ac82e Mon Sep 17 00:00:00 2001 From: daan Date: Mon, 20 Jan 2020 15:41:56 -0800 Subject: [PATCH 45/46] add mi_new_realloc(n) to support C++ style reallocation that raises std::bad_alloc on out-of-memory --- include/mimalloc.h | 6 +++++- src/alloc.c | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/include/mimalloc.h b/include/mimalloc.h index 97c26e18..3861ad4f 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -341,11 +341,15 @@ mi_decl_export void mi_free_size(void* p, size_t size) mi_attr_noexcept; mi_decl_export void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept; mi_decl_export void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept; +// The `mi_new` wrappers implement C++ semantics on out-of-memory instead of directly returning `NULL`. +// (and call `std::get_new_handler` and potentially raise a `std::bad_alloc` exception). mi_decl_export void* mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1); -mi_decl_export void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1,2); mi_decl_export void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); mi_decl_export void* mi_new_nothrow(size_t size) mi_attr_malloc mi_attr_alloc_size(1); mi_decl_export void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_export void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1, 2); +mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_malloc mi_attr_alloc_size2(2, 3); #ifdef __cplusplus } diff --git a/src/alloc.c b/src/alloc.c index 7fc9023c..20339204 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -721,3 +721,22 @@ void* mi_new_n(size_t count, size_t size) { return mi_new(total); } } + +void* mi_new_realloc(void* p, size_t newsize) { + void* q; + do { + q = mi_realloc(p, newsize); + } while (q == NULL && mi_try_new_handler(false)); + return q; +} + +void* mi_new_reallocn(void* p, size_t newcount, size_t size) { + size_t total; + if (mi_unlikely(mi_count_size_overflow(newcount, size, &total))) { + mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc + return NULL; + } + else { + return mi_new_realloc(p, total); + } +} From 5bc1c52ae6e83bc65c506d682bf732507b3e0f61 Mon Sep 17 00:00:00 2001 From: daan Date: Mon, 20 Jan 2020 17:34:29 -0800 Subject: [PATCH 46/46] update documentation --- doc/mimalloc-doc.h | 49 +++- docs/annotated.html | 1 + docs/annotated_dup.js | 3 +- docs/classes.html | 8 +- docs/group__cpp.html | 396 +++++++++++++++++++++++++++++++ docs/group__cpp.js | 11 + docs/group__posix.html | 169 ------------- docs/group__posix.js | 5 - docs/mimalloc-doc_8h_source.html | 15 +- docs/modules.html | 1 + docs/modules.js | 3 +- docs/navtreeindex0.js | 22 +- docs/search/all_3.js | 3 +- docs/search/all_6.js | 13 +- docs/search/classes_0.js | 3 +- docs/search/functions_0.js | 12 +- docs/search/groups_2.js | 2 +- docs/search/groups_3.js | 3 +- docs/search/groups_4.js | 3 +- docs/search/groups_5.js | 2 +- docs/search/groups_6.js | 2 +- docs/search/groups_7.js | 2 +- docs/search/groups_8.html | 30 +++ docs/search/groups_8.js | 4 + docs/search/searchdata.js | 2 +- docs/using.html | 2 +- test/main-override.cpp | 8 +- 27 files changed, 547 insertions(+), 227 deletions(-) create mode 100644 docs/group__cpp.html create mode 100644 docs/group__cpp.js create mode 100644 docs/search/groups_8.html create mode 100644 docs/search/groups_8.js diff --git a/doc/mimalloc-doc.h b/doc/mimalloc-doc.h index ca744e4c..3f24a623 100644 --- a/doc/mimalloc-doc.h +++ b/doc/mimalloc-doc.h @@ -74,6 +74,8 @@ Further information: - \ref typed - \ref analysis - \ref options +- \ref posix +- \ref cpp */ @@ -622,7 +624,10 @@ void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, siz /// \defgroup typed Typed Macros /// -/// Typed allocation macros +/// Typed allocation macros. For example: +/// ``` +/// int* p = mi_malloc_tp(int) +/// ``` /// /// \{ @@ -805,21 +810,51 @@ void mi_free_size(void* p, size_t size); void mi_free_size_aligned(void* p, size_t size, size_t alignment); void mi_free_aligned(void* p, size_t alignment); -/// raise `std::bad_alloc` exception on failure. +/// \} + +/// \defgroup cpp C++ wrappers +/// +/// `mi_` prefixed implementations of various allocation functions +/// that use C++ semantics on out-of-memory, generally calling +/// `std::get_new_handler` and raising a `std::bad_alloc` exception on failure. +/// +/// Note: use the `mimalloc-new-delete.h` header to override the \a new +/// and \a delete operators globally. The wrappers here are mostly +/// for convience for library writers that need to interface with +/// mimalloc from C++. +/// +/// \{ + +/// like mi_malloc(), but when out of memory, use `std::get_new_handler` and raise `std::bad_alloc` exception on failure. void* mi_new(std::size_t n) noexcept(false); -/// raise `std::bad_alloc` exception on failure or overflow. +/// like mi_mallocn(), but when out of memory, use `std::get_new_handler` and raise `std::bad_alloc` exception on failure. void* mi_new_n(size_t count, size_t size) noexcept(false); -/// raise `std::bad_alloc` exception on failure. +/// like mi_malloc_aligned(), but when out of memory, use `std::get_new_handler` and raise `std::bad_alloc` exception on failure. void* mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false); -/// return `NULL` on failure. +/// like `mi_malloc`, but when out of memory, use `std::get_new_handler` but return \a NULL on failure. void* mi_new_nothrow(size_t n); -`` -/// return `NULL` on failure. + +/// like `mi_malloc_aligned`, but when out of memory, use `std::get_new_handler` but return \a NULL on failure. void* mi_new_aligned_nothrow(size_t n, size_t alignment); +/// like mi_realloc(), but when out of memory, use `std::get_new_handler` and raise `std::bad_alloc` exception on failure. +void* mi_new_realloc(void* p, size_t newsize); + +/// like mi_reallocn(), but when out of memory, use `std::get_new_handler` and raise `std::bad_alloc` exception on failure. +void* mi_new_reallocn(void* p, size_t newcount, size_t size); + +/// \a std::allocator implementation for mimalloc for use in STL containers. +/// For example: +/// ``` +/// std::vector > vec; +/// vec.push_back(1); +/// vec.pop_back(); +/// ``` +template struct mi_stl_allocator { } + /// \} /*! \page build Building diff --git a/docs/annotated.html b/docs/annotated.html index 4d2a8bcc..5120b803 100644 --- a/docs/annotated.html +++ b/docs/annotated.html @@ -105,6 +105,7 @@ $(document).ready(function(){initNavTree('annotated.html','');});
Here are the data structures with brief descriptions:
+
 Cmi_heap_area_tAn area of heap space contains blocks of a single size
 Cmi_stl_allocatorstd::allocator implementation for mimalloc for use in STL containers
diff --git a/docs/annotated_dup.js b/docs/annotated_dup.js index 6ed68bc3..67229123 100644 --- a/docs/annotated_dup.js +++ b/docs/annotated_dup.js @@ -1,4 +1,5 @@ var annotated_dup = [ - [ "mi_heap_area_t", "group__analysis.html#structmi__heap__area__t", "group__analysis_structmi__heap__area__t" ] + [ "mi_heap_area_t", "group__analysis.html#structmi__heap__area__t", "group__analysis_structmi__heap__area__t" ], + [ "mi_stl_allocator", "group__cpp.html#structmi__stl__allocator", null ] ]; \ No newline at end of file diff --git a/docs/classes.html b/docs/classes.html index e5ea3ea8..de960fb6 100644 --- a/docs/classes.html +++ b/docs/classes.html @@ -105,10 +105,10 @@ $(document).ready(function(){initNavTree('classes.html','');}); - - - + + + +
  m  
-
mi_heap_area_t   
mi_stl_allocator   
mi_heap_area_t   
diff --git a/docs/group__cpp.html b/docs/group__cpp.html new file mode 100644 index 00000000..caf758a8 --- /dev/null +++ b/docs/group__cpp.html @@ -0,0 +1,396 @@ + + + + + + + +mi-malloc: C++ wrappers + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+
mi-malloc +  1.4 +
+
+ + + + + + +
+
+
+ + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+ +
+
C++ wrappers
+
+
+ +

mi_ prefixed implementations of various allocation functions that use C++ semantics on out-of-memory, generally calling std::get_new_handler and raising a std::bad_alloc exception on failure. +More...

+ + + + + +

+Data Structures

struct  mi_stl_allocator< T >
 std::allocator implementation for mimalloc for use in STL containers. More...
 
+ + + + + + + + + + + + + + + + + + + + + + +

+Functions

void * mi_new (std::size_t n) noexcept(false)
 like mi_malloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure. More...
 
void * mi_new_n (size_t count, size_t size) noexcept(false)
 like mi_mallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure. More...
 
void * mi_new_aligned (std::size_t n, std::align_val_t alignment) noexcept(false)
 like mi_malloc_aligned(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure. More...
 
void * mi_new_nothrow (size_t n)
 like mi_malloc, but when out of memory, use std::get_new_handler but return NULL on failure. More...
 
void * mi_new_aligned_nothrow (size_t n, size_t alignment)
 like mi_malloc_aligned, but when out of memory, use std::get_new_handler but return NULL on failure. More...
 
void * mi_new_realloc (void *p, size_t newsize)
 like mi_realloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure. More...
 
void * mi_new_reallocn (void *p, size_t newcount, size_t size)
 like mi_reallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure. More...
 
+

Detailed Description

+

mi_ prefixed implementations of various allocation functions that use C++ semantics on out-of-memory, generally calling std::get_new_handler and raising a std::bad_alloc exception on failure.

+

Note: use the mimalloc-new-delete.h header to override the new and delete operators globally. The wrappers here are mostly for convience for library writers that need to interface with mimalloc from C++.

+

Data Structure Documentation

+ +

◆ mi_stl_allocator

+ +
+
+ + + + +
struct mi_stl_allocator
+
+

template<class T>
+struct mi_stl_allocator< T >

+ +

std::allocator implementation for mimalloc for use in STL containers.

+

For example:

std::vector<int, mi_stl_allocator<int> > vec;
vec.push_back(1);
vec.pop_back();
+
+
+

Function Documentation

+ +

◆ mi_new()

+ +
+
+ + + + + +
+ + + + + + + + +
void* mi_new (std::size_t n)
+
+noexcept
+
+ +

like mi_malloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.

+ +
+
+ +

◆ mi_new_aligned()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
void* mi_new_aligned (std::size_t n,
std::align_val_t alignment 
)
+
+noexcept
+
+ +

like mi_malloc_aligned(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.

+ +
+
+ +

◆ mi_new_aligned_nothrow()

+ +
+
+ + + + + + + + + + + + + + + + + + +
void* mi_new_aligned_nothrow (size_t n,
size_t alignment 
)
+
+ +

like mi_malloc_aligned, but when out of memory, use std::get_new_handler but return NULL on failure.

+ +
+
+ +

◆ mi_new_n()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
void* mi_new_n (size_t count,
size_t size 
)
+
+noexcept
+
+ +

like mi_mallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.

+ +
+
+ +

◆ mi_new_nothrow()

+ +
+
+ + + + + + + + +
void* mi_new_nothrow (size_t n)
+
+ +

like mi_malloc, but when out of memory, use std::get_new_handler but return NULL on failure.

+ +
+
+ +

◆ mi_new_realloc()

+ +
+
+ + + + + + + + + + + + + + + + + + +
void* mi_new_realloc (void * p,
size_t newsize 
)
+
+ +

like mi_realloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.

+ +
+
+ +

◆ mi_new_reallocn()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
void* mi_new_reallocn (void * p,
size_t newcount,
size_t size 
)
+
+ +

like mi_reallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception on failure.

+ +
+
+
+
+ + + + diff --git a/docs/group__cpp.js b/docs/group__cpp.js new file mode 100644 index 00000000..20706646 --- /dev/null +++ b/docs/group__cpp.js @@ -0,0 +1,11 @@ +var group__cpp = +[ + [ "mi_stl_allocator", "group__cpp.html#structmi__stl__allocator", null ], + [ "mi_new", "group__cpp.html#gaad048a9fce3d02c5909cd05c6ec24545", null ], + [ "mi_new_aligned", "group__cpp.html#gaef2c2bdb4f70857902d3c8903ac095f3", null ], + [ "mi_new_aligned_nothrow", "group__cpp.html#gab5e29558926d934c3f1cae8c815f942c", null ], + [ "mi_new_n", "group__cpp.html#gae7bc4f56cd57ed3359060ff4f38bda81", null ], + [ "mi_new_nothrow", "group__cpp.html#gaeaded64eda71ed6b1d569d3e723abc4a", null ], + [ "mi_new_realloc", "group__cpp.html#gaab78a32f55149e9fbf432d5288e38e1e", null ], + [ "mi_new_reallocn", "group__cpp.html#ga756f4b2bc6a7ecd0a90baea8e90c7907", null ] +]; \ No newline at end of file diff --git a/docs/group__posix.html b/docs/group__posix.html index eaa4a10f..1aea8dc8 100644 --- a/docs/group__posix.html +++ b/docs/group__posix.html @@ -137,21 +137,6 @@ Functions   void mi_free_aligned (void *p, size_t alignment)   -void * mi_new (std::size_t n) noexcept(false) - raise std::bad_alloc exception on failure. More...
-  -void * mi_new_n (size_t count, size_t size) noexcept(false) - raise std::bad_alloc exception on failure or overflow. More...
-  -void * mi_new_aligned (std::size_t n, std::align_val_t alignment) noexcept(false) - raise std::bad_alloc exception on failure. More...
-  -void * mi_new_nothrow (size_t n) - return NULL on failure. More...
-  -void * mi_new_aligned_nothrow (size_t n, size_t alignment) - return NULL on failure. More...

Detailed Description

mi_ prefixed implementations of various Posix, Unix, and C++ allocation functions.

@@ -391,160 +376,6 @@ Functions
-
-
- -

◆ mi_new()

- -
-
- - - - - -
- - - - - - - - -
void* mi_new (std::size_t n)
-
-noexcept
-
- -

raise std::bad_alloc exception on failure.

- -
-
- -

◆ mi_new_aligned()

- -
-
- - - - - -
- - - - - - - - - - - - - - - - - - -
void* mi_new_aligned (std::size_t n,
std::align_val_t alignment 
)
-
-noexcept
-
- -

raise std::bad_alloc exception on failure.

- -
-
- -

◆ mi_new_aligned_nothrow()

- -
-
- - - - - - - - - - - - - - - - - - -
void* mi_new_aligned_nothrow (size_t n,
size_t alignment 
)
-
- -

return NULL on failure.

- -
-
- -

◆ mi_new_n()

- -
-
- - - - - -
- - - - - - - - - - - - - - - - - - -
void* mi_new_n (size_t count,
size_t size 
)
-
-noexcept
-
- -

raise std::bad_alloc exception on failure or overflow.

- -
-
- -

◆ mi_new_nothrow()

- -
-
- - - - - - - - -
void* mi_new_nothrow (size_t n)
-
- -

return NULL on failure.

-
diff --git a/docs/group__posix.js b/docs/group__posix.js index 0f2b895d..e43453d9 100644 --- a/docs/group__posix.js +++ b/docs/group__posix.js @@ -9,11 +9,6 @@ var group__posix = [ "mi_malloc_size", "group__posix.html#ga4531c9e775bb3ae12db57c1ba8a5d7de", null ], [ "mi_malloc_usable_size", "group__posix.html#ga06d07cf357bbac5c73ba5d0c0c421e17", null ], [ "mi_memalign", "group__posix.html#gaab7fa71ea93b96873f5d9883db57d40e", null ], - [ "mi_new", "group__posix.html#gaad048a9fce3d02c5909cd05c6ec24545", null ], - [ "mi_new_aligned", "group__posix.html#gaef2c2bdb4f70857902d3c8903ac095f3", null ], - [ "mi_new_aligned_nothrow", "group__posix.html#gab5e29558926d934c3f1cae8c815f942c", null ], - [ "mi_new_n", "group__posix.html#gae7bc4f56cd57ed3359060ff4f38bda81", null ], - [ "mi_new_nothrow", "group__posix.html#gaeaded64eda71ed6b1d569d3e723abc4a", null ], [ "mi_posix_memalign", "group__posix.html#gacff84f226ba9feb2031b8992e5579447", null ], [ "mi_pvalloc", "group__posix.html#gaeb325c39b887d3b90d85d1eb1712fb1e", null ], [ "mi_reallocarray", "group__posix.html#ga48fad8648a2f1dab9c87ea9448a52088", null ], diff --git a/docs/mimalloc-doc_8h_source.html b/docs/mimalloc-doc_8h_source.html index 12d0f799..f70ae81f 100644 --- a/docs/mimalloc-doc_8h_source.html +++ b/docs/mimalloc-doc_8h_source.html @@ -102,20 +102,22 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
mimalloc-doc.h
-
1 /* ----------------------------------------------------------------------------
2 Copyright (c) 2018, Microsoft Research, Daan Leijen
3 This is free software; you can redistribute it and/or modify it under the
4 terms of the MIT license. A copy of the license can be found in the file
5 "LICENSE" at the root of this distribution.
6 -----------------------------------------------------------------------------*/
7 
8 #error "documentation file only!"
9 
10 
81 
85 
89 void mi_free(void* p);
90 
95 void* mi_malloc(size_t size);
96 
101 void* mi_zalloc(size_t size);
102 
112 void* mi_calloc(size_t count, size_t size);
113 
126 void* mi_realloc(void* p, size_t newsize);
127 
138 void* mi_recalloc(void* p, size_t count, size_t size);
139 
153 void* mi_expand(void* p, size_t newsize);
154 
164 void* mi_mallocn(size_t count, size_t size);
165 
175 void* mi_reallocn(void* p, size_t count, size_t size);
176 
193 void* mi_reallocf(void* p, size_t newsize);
194 
195 
204 char* mi_strdup(const char* s);
205 
215 char* mi_strndup(const char* s, size_t n);
216 
229 char* mi_realpath(const char* fname, char* resolved_name);
230 
232 
233 // ------------------------------------------------------
234 // Extended functionality
235 // ------------------------------------------------------
236 
240 
243 #define MI_SMALL_SIZE_MAX (128*sizeof(void*))
244 
252 void* mi_malloc_small(size_t size);
253 
261 void* mi_zalloc_small(size_t size);
262 
277 size_t mi_usable_size(void* p);
278 
288 size_t mi_good_size(size_t size);
289 
297 void mi_collect(bool force);
298 
303 void mi_stats_print(void* out);
304 
310 void mi_stats_print(mi_output_fun* out, void* arg);
311 
313 void mi_stats_reset(void);
314 
316 void mi_stats_merge(void);
317 
321 void mi_thread_init(void);
322 
327 void mi_thread_done(void);
328 
334 void mi_thread_stats_print_out(mi_output_fun* out, void* arg);
335 
342 typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg);
343 
359 void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg);
360 
366 typedef void (mi_output_fun)(const char* msg, void* arg);
367 
374 void mi_register_output(mi_output_fun* out, void* arg);
375 
381 typedef void (mi_error_fun)(int err, void* arg);
382 
398 void mi_register_error(mi_error_fun* errfun, void* arg);
399 
404 bool mi_is_in_heap_region(const void* p);
405 
406 
419 int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs);
420 
433 int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs);
434 
435 
440 bool mi_is_redirected();
441 
442 
444 
445 // ------------------------------------------------------
446 // Aligned allocation
447 // ------------------------------------------------------
448 
454 
467 void* mi_malloc_aligned(size_t size, size_t alignment);
468 void* mi_zalloc_aligned(size_t size, size_t alignment);
469 void* mi_calloc_aligned(size_t count, size_t size, size_t alignment);
470 void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment);
471 
482 void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset);
483 void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset);
484 void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset);
485 void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
486 
488 
494 
499 struct mi_heap_s;
500 
505 typedef struct mi_heap_s mi_heap_t;
506 
509 
517 void mi_heap_delete(mi_heap_t* heap);
518 
526 void mi_heap_destroy(mi_heap_t* heap);
527 
532 
536 
543 
545 void mi_heap_collect(mi_heap_t* heap, bool force);
546 
549 void* mi_heap_malloc(mi_heap_t* heap, size_t size);
550 
554 void* mi_heap_malloc_small(mi_heap_t* heap, size_t size);
555 
558 void* mi_heap_zalloc(mi_heap_t* heap, size_t size);
559 
562 void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size);
563 
566 void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size);
567 
570 char* mi_heap_strdup(mi_heap_t* heap, const char* s);
571 
574 char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n);
575 
578 char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name);
579 
580 void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize);
581 void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size);
582 void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize);
583 
584 void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
585 void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
586 void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
587 void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
588 void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment);
589 void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset);
590 void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
591 void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
592 
594 
595 
604 
605 void* mi_rezalloc(void* p, size_t newsize);
606 void* mi_recalloc(void* p, size_t newcount, size_t size) ;
607 
608 void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment);
609 void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
610 void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment);
611 void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
612 
613 void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize);
614 void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size);
615 
616 void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
617 void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
618 void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment);
619 void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
620 
622 
628 
640 #define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
641 
643 #define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
644 
646 #define mi_calloc_tp(tp,count) ((tp*)mi_calloc(count,sizeof(tp)))
647 
649 #define mi_mallocn_tp(tp,count) ((tp*)mi_mallocn(count,sizeof(tp)))
650 
652 #define mi_reallocn_tp(p,tp,count) ((tp*)mi_reallocn(p,count,sizeof(tp)))
653 
655 #define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp)))
656 
658 #define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp)))
659 
661 #define mi_heap_calloc_tp(hp,tp,count) ((tp*)mi_heap_calloc(hp,count,sizeof(tp)))
662 
664 #define mi_heap_mallocn_tp(hp,tp,count) ((tp*)mi_heap_mallocn(hp,count,sizeof(tp)))
665 
667 #define mi_heap_reallocn_tp(hp,p,tp,count) ((tp*)mi_heap_reallocn(p,count,sizeof(tp)))
668 
670 #define mi_heap_recalloc_tp(hp,p,tp,count) ((tp*)mi_heap_recalloc(p,count,sizeof(tp)))
671 
673 
679 
686 bool mi_heap_contains_block(mi_heap_t* heap, const void* p);
687 
696 bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
697 
705 bool mi_check_owned(const void* p);
706 
709 typedef struct mi_heap_area_s {
710  void* blocks;
711  size_t reserved;
712  size_t committed;
713  size_t used;
714  size_t block_size;
716 
724 typedef bool (mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
725 
737 bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
738 
740 
746 
748 typedef enum mi_option_e {
749  // stable options
753  // the following options are experimental
767 } mi_option_t;
768 
769 
770 bool mi_option_enabled(mi_option_t option);
771 void mi_option_enable(mi_option_t option, bool enable);
772 void mi_option_enable_default(mi_option_t option, bool enable);
773 
774 long mi_option_get(mi_option_t option);
775 void mi_option_set(mi_option_t option, long value);
776 void mi_option_set_default(mi_option_t option, long value);
777 
778 
780 
787 
788 void* mi_recalloc(void* p, size_t count, size_t size);
789 size_t mi_malloc_size(const void* p);
790 size_t mi_malloc_usable_size(const void *p);
791 
793 void mi_cfree(void* p);
794 
795 int mi_posix_memalign(void** p, size_t alignment, size_t size);
796 int mi__posix_memalign(void** p, size_t alignment, size_t size);
797 void* mi_memalign(size_t alignment, size_t size);
798 void* mi_valloc(size_t size);
799 
800 void* mi_pvalloc(size_t size);
801 void* mi_aligned_alloc(size_t alignment, size_t size);
802 void* mi_reallocarray(void* p, size_t count, size_t size);
803 
804 void mi_free_size(void* p, size_t size);
805 void mi_free_size_aligned(void* p, size_t size, size_t alignment);
806 void mi_free_aligned(void* p, size_t alignment);
807 
809 void* mi_new(std::size_t n) noexcept(false);
810 
812 void* mi_new_n(size_t count, size_t size) noexcept(false);
813 
815 void* mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false);
816 
818 void* mi_new_nothrow(size_t n);
819 ``
821 void* mi_new_aligned_nothrow(size_t n, size_t alignment);
822 
824 
void mi_option_enable_default(mi_option_t option, bool enable)
+
1 /* ----------------------------------------------------------------------------
2 Copyright (c) 2018, Microsoft Research, Daan Leijen
3 This is free software; you can redistribute it and/or modify it under the
4 terms of the MIT license. A copy of the license can be found in the file
5 "LICENSE" at the root of this distribution.
6 -----------------------------------------------------------------------------*/
7 
8 #error "documentation file only!"
9 
10 
81 
85 
89 void mi_free(void* p);
90 
95 void* mi_malloc(size_t size);
96 
101 void* mi_zalloc(size_t size);
102 
112 void* mi_calloc(size_t count, size_t size);
113 
126 void* mi_realloc(void* p, size_t newsize);
127 
138 void* mi_recalloc(void* p, size_t count, size_t size);
139 
153 void* mi_expand(void* p, size_t newsize);
154 
164 void* mi_mallocn(size_t count, size_t size);
165 
175 void* mi_reallocn(void* p, size_t count, size_t size);
176 
193 void* mi_reallocf(void* p, size_t newsize);
194 
195 
204 char* mi_strdup(const char* s);
205 
215 char* mi_strndup(const char* s, size_t n);
216 
229 char* mi_realpath(const char* fname, char* resolved_name);
230 
232 
233 // ------------------------------------------------------
234 // Extended functionality
235 // ------------------------------------------------------
236 
240 
243 #define MI_SMALL_SIZE_MAX (128*sizeof(void*))
244 
252 void* mi_malloc_small(size_t size);
253 
261 void* mi_zalloc_small(size_t size);
262 
277 size_t mi_usable_size(void* p);
278 
288 size_t mi_good_size(size_t size);
289 
297 void mi_collect(bool force);
298 
303 void mi_stats_print(void* out);
304 
310 void mi_stats_print(mi_output_fun* out, void* arg);
311 
313 void mi_stats_reset(void);
314 
316 void mi_stats_merge(void);
317 
321 void mi_thread_init(void);
322 
327 void mi_thread_done(void);
328 
334 void mi_thread_stats_print_out(mi_output_fun* out, void* arg);
335 
342 typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg);
343 
359 void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg);
360 
366 typedef void (mi_output_fun)(const char* msg, void* arg);
367 
374 void mi_register_output(mi_output_fun* out, void* arg);
375 
381 typedef void (mi_error_fun)(int err, void* arg);
382 
398 void mi_register_error(mi_error_fun* errfun, void* arg);
399 
404 bool mi_is_in_heap_region(const void* p);
405 
406 
419 int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs);
420 
433 int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs);
434 
435 
440 bool mi_is_redirected();
441 
442 
444 
445 // ------------------------------------------------------
446 // Aligned allocation
447 // ------------------------------------------------------
448 
454 
467 void* mi_malloc_aligned(size_t size, size_t alignment);
468 void* mi_zalloc_aligned(size_t size, size_t alignment);
469 void* mi_calloc_aligned(size_t count, size_t size, size_t alignment);
470 void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment);
471 
482 void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset);
483 void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset);
484 void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset);
485 void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
486 
488 
494 
499 struct mi_heap_s;
500 
505 typedef struct mi_heap_s mi_heap_t;
506 
509 
517 void mi_heap_delete(mi_heap_t* heap);
518 
526 void mi_heap_destroy(mi_heap_t* heap);
527 
532 
536 
543 
545 void mi_heap_collect(mi_heap_t* heap, bool force);
546 
549 void* mi_heap_malloc(mi_heap_t* heap, size_t size);
550 
554 void* mi_heap_malloc_small(mi_heap_t* heap, size_t size);
555 
558 void* mi_heap_zalloc(mi_heap_t* heap, size_t size);
559 
562 void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size);
563 
566 void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size);
567 
570 char* mi_heap_strdup(mi_heap_t* heap, const char* s);
571 
574 char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n);
575 
578 char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name);
579 
580 void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize);
581 void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size);
582 void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize);
583 
584 void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
585 void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
586 void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment);
587 void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset);
588 void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment);
589 void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset);
590 void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
591 void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
592 
594 
595 
604 
605 void* mi_rezalloc(void* p, size_t newsize);
606 void* mi_recalloc(void* p, size_t newcount, size_t size) ;
607 
608 void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment);
609 void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset);
610 void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment);
611 void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
612 
613 void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize);
614 void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size);
615 
616 void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment);
617 void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset);
618 void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment);
619 void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset);
620 
622 
628 
640 #define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
641 
643 #define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
644 
646 #define mi_calloc_tp(tp,count) ((tp*)mi_calloc(count,sizeof(tp)))
647 
649 #define mi_mallocn_tp(tp,count) ((tp*)mi_mallocn(count,sizeof(tp)))
650 
652 #define mi_reallocn_tp(p,tp,count) ((tp*)mi_reallocn(p,count,sizeof(tp)))
653 
655 #define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp)))
656 
658 #define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp)))
659 
661 #define mi_heap_calloc_tp(hp,tp,count) ((tp*)mi_heap_calloc(hp,count,sizeof(tp)))
662 
664 #define mi_heap_mallocn_tp(hp,tp,count) ((tp*)mi_heap_mallocn(hp,count,sizeof(tp)))
665 
667 #define mi_heap_reallocn_tp(hp,p,tp,count) ((tp*)mi_heap_reallocn(p,count,sizeof(tp)))
668 
670 #define mi_heap_recalloc_tp(hp,p,tp,count) ((tp*)mi_heap_recalloc(p,count,sizeof(tp)))
671 
673 
679 
686 bool mi_heap_contains_block(mi_heap_t* heap, const void* p);
687 
696 bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
697 
705 bool mi_check_owned(const void* p);
706 
709 typedef struct mi_heap_area_s {
710  void* blocks;
711  size_t reserved;
712  size_t committed;
713  size_t used;
714  size_t block_size;
716 
724 typedef bool (mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
725 
737 bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
738 
740 
746 
748 typedef enum mi_option_e {
749  // stable options
753  // the following options are experimental
767 } mi_option_t;
768 
769 
770 bool mi_option_enabled(mi_option_t option);
771 void mi_option_enable(mi_option_t option, bool enable);
772 void mi_option_enable_default(mi_option_t option, bool enable);
773 
774 long mi_option_get(mi_option_t option);
775 void mi_option_set(mi_option_t option, long value);
776 void mi_option_set_default(mi_option_t option, long value);
777 
778 
780 
787 
788 void* mi_recalloc(void* p, size_t count, size_t size);
789 size_t mi_malloc_size(const void* p);
790 size_t mi_malloc_usable_size(const void *p);
791 
793 void mi_cfree(void* p);
794 
795 int mi_posix_memalign(void** p, size_t alignment, size_t size);
796 int mi__posix_memalign(void** p, size_t alignment, size_t size);
797 void* mi_memalign(size_t alignment, size_t size);
798 void* mi_valloc(size_t size);
799 
800 void* mi_pvalloc(size_t size);
801 void* mi_aligned_alloc(size_t alignment, size_t size);
802 void* mi_reallocarray(void* p, size_t count, size_t size);
803 
804 void mi_free_size(void* p, size_t size);
805 void mi_free_size_aligned(void* p, size_t size, size_t alignment);
806 void mi_free_aligned(void* p, size_t alignment);
807 
809 
822 
824 void* mi_new(std::size_t n) noexcept(false);
825 
827 void* mi_new_n(size_t count, size_t size) noexcept(false);
828 
830 void* mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false);
831 
833 void* mi_new_nothrow(size_t n);
834 
836 void* mi_new_aligned_nothrow(size_t n, size_t alignment);
837 
839 void* mi_new_realloc(void* p, size_t newsize);
840 
842 void* mi_new_reallocn(void* p, size_t newcount, size_t size);
843 
851 template<class T> struct mi_stl_allocator { }
852 
854 
void mi_option_enable_default(mi_option_t option, bool enable)
size_t mi_usable_size(void *p)
Return the available bytes in a memory block.
+
void * mi_new_nothrow(size_t n)
like mi_malloc, but when out of memory, use std::get_new_handler but return NULL on failure.
void * mi_reallocn(void *p, size_t count, size_t size)
Re-allocate memory to count elements of size bytes.
void * mi_malloc_aligned(size_t size, size_t alignment)
Allocate size bytes aligned by alignment.
void * mi_recalloc_aligned_at(void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
void mi_stats_reset(void)
Reset statistics.
void * mi_heap_realloc_aligned(mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
+
void * mi_new_realloc(void *p, size_t newsize)
like mi_realloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exceptio...
void * mi_recalloc(void *p, size_t count, size_t size)
Re-allocate memory to count elements of size bytes, with extra memory initialized to zero.
void * mi_mallocn(size_t count, size_t size)
Allocate count elements of size bytes.
size_t mi_malloc_size(const void *p)
int mi_posix_memalign(void **p, size_t alignment, size_t size)
void mi_stats_merge(void)
Merge thread local statistics with the main statistics and reset.
+
void * mi_new_n(size_t count, size_t size) noexcept(false)
like mi_mallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exceptio...
void mi_option_set_default(mi_option_t option, long value)
-
void * mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false)
raise std::bad_alloc exception on failure.
void() mi_error_fun(int err, void *arg)
Type of error callback functions.
Definition: mimalloc-doc.h:381
void * mi_rezalloc(void *p, size_t newsize)
Eagerly commit segments (4MiB) (enabled by default).
Definition: mimalloc-doc.h:754
@@ -127,7 +129,6 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
Definition: mimalloc-doc.h:766
void * mi_realloc_aligned_at(void *p, size_t newsize, size_t alignment, size_t offset)
void * blocks
start of the area containing heap blocks
Definition: mimalloc-doc.h:710
-
void * mi_new_n(size_t count, size_t size) noexcept(false)
raise std::bad_alloc exception on failure or overflow.
void * mi_realloc_aligned(void *p, size_t newsize, size_t alignment)
int mi__posix_memalign(void **p, size_t alignment, size_t size)
void mi_free(void *p)
Free previously allocated memory.
@@ -145,7 +146,6 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
void * mi_heap_rezalloc(mi_heap_t *heap, void *p, size_t newsize)
The number of segments per thread to keep cached.
Definition: mimalloc-doc.h:758
void * mi_heap_calloc(mi_heap_t *heap, size_t count, size_t size)
Allocate count zero-initialized elements in a specific heap.
-
void * mi_new(std::size_t n) noexcept(false)
raise std::bad_alloc exception on failure.
void * mi_heap_calloc_aligned(mi_heap_t *heap, size_t count, size_t size, size_t alignment)
bool mi_is_redirected()
Is the C runtime malloc API redirected?
size_t block_size
size in bytes of one block
Definition: mimalloc-doc.h:714
@@ -154,6 +154,7 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
void() mi_deferred_free_fun(bool force, unsigned long long heartbeat, void *arg)
Type of deferred free functions.
Definition: mimalloc-doc.h:342
bool mi_is_in_heap_region(const void *p)
Is a pointer part of our heap?
void mi_option_enable(mi_option_t option, bool enable)
+
void * mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false)
like mi_malloc_aligned(), but when out of memory, use std::get_new_handler and raise std::bad_alloc e...
void * mi_realloc(void *p, size_t newsize)
Re-allocate memory to newsize bytes.
The number of huge OS pages (1GiB in size) to reserve at the start of the program.
Definition: mimalloc-doc.h:757
void * mi_heap_reallocf(mi_heap_t *heap, void *p, size_t newsize)
@@ -175,9 +176,8 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
Print error messages to stderr.
Definition: mimalloc-doc.h:751
Experimental.
Definition: mimalloc-doc.h:760
void * mi_heap_rezalloc_aligned(mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
+
void * mi_new_aligned_nothrow(size_t n, size_t alignment)
like mi_malloc_aligned, but when out of memory, use std::get_new_handler but return NULL on failure.
void * mi_memalign(size_t alignment, size_t size)
-
void * mi_new_aligned_nothrow(size_t n, size_t alignment)
return NULL on failure.
-
void * mi_new_nothrow(size_t n)
return NULL on failure.
void * mi_rezalloc_aligned(void *p, size_t newsize, size_t alignment)
bool mi_heap_contains_block(mi_heap_t *heap, const void *p)
Does a heap contain a pointer to a previously allocated block?
void mi_heap_collect(mi_heap_t *heap, bool force)
Release outstanding resources in a specific heap.
@@ -207,11 +207,13 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
void mi_register_deferred_free(mi_deferred_free_fun *deferred_free, void *arg)
Register a deferred free function.
void mi_free_size(void *p, size_t size)
void mi_collect(bool force)
Eagerly free memory.
+
void * mi_new_reallocn(void *p, size_t newcount, size_t size)
like mi_reallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc excepti...
void mi_heap_destroy(mi_heap_t *heap)
Destroy a heap, freeing all its still allocated blocks.
void * mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset)
Use large OS pages (2MiB in size) if possible.
Definition: mimalloc-doc.h:756
void * mi_heap_reallocn(mi_heap_t *heap, void *p, size_t count, size_t size)
void mi_register_output(mi_output_fun *out, void *arg)
Register an output function.
+
std::allocator implementation for mimalloc for use in STL containers.
Definition: mimalloc-doc.h:851
void * mi_heap_malloc_small(mi_heap_t *heap, size_t size)
Allocate a small object in a specific heap.
void * mi_heap_realloc(mi_heap_t *heap, void *p, size_t newsize)
size_t mi_malloc_usable_size(const void *p)
@@ -227,6 +229,7 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
long mi_option_get(mi_option_t option)
mi_heap_t * mi_heap_get_backing()
Get the backing heap.
void mi_free_aligned(void *p, size_t alignment)
+
void * mi_new(std::size_t n) noexcept(false)
like mi_malloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception...
Delay in milli-seconds before resetting a page (100ms by default)
Definition: mimalloc-doc.h:761
mi_heap_t * mi_heap_new()
Create a new heap that can be used for allocation.
void * mi_heap_malloc(mi_heap_t *heap, size_t size)
Allocate in a specific heap.
diff --git a/docs/modules.html b/docs/modules.html index 0bc6036d..91bf17e8 100644 --- a/docs/modules.html +++ b/docs/modules.html @@ -113,6 +113,7 @@ $(document).ready(function(){initNavTree('modules.html','');});  Heap IntrospectionInspect the heap at runtime  Runtime OptionsSet runtime behavior  Posixmi_ prefixed implementations of various Posix, Unix, and C++ allocation functions + C++ wrappersmi_ prefixed implementations of various allocation functions that use C++ semantics on out-of-memory, generally calling std::get_new_handler and raising a std::bad_alloc exception on failure
diff --git a/docs/modules.js b/docs/modules.js index 47e99b42..b2c2a224 100644 --- a/docs/modules.js +++ b/docs/modules.js @@ -8,5 +8,6 @@ var modules = [ "Typed Macros", "group__typed.html", "group__typed" ], [ "Heap Introspection", "group__analysis.html", "group__analysis" ], [ "Runtime Options", "group__options.html", "group__options" ], - [ "Posix", "group__posix.html", "group__posix" ] + [ "Posix", "group__posix.html", "group__posix" ], + [ "C++ wrappers", "group__cpp.html", "group__cpp" ] ]; \ No newline at end of file diff --git a/docs/navtreeindex0.js b/docs/navtreeindex0.js index e2667728..047d6dbc 100644 --- a/docs/navtreeindex0.js +++ b/docs/navtreeindex0.js @@ -28,6 +28,15 @@ var NAVTREEINDEX0 = "group__analysis.html#gaa862aa8ed8d57d84cae41fc1022d71af":[5,6,4], "group__analysis.html#gadfa01e2900f0e5d515ad5506b26f6d65":[5,6,1], "group__analysis.html#structmi__heap__area__t":[5,6,0], +"group__cpp.html":[5,9], +"group__cpp.html#ga756f4b2bc6a7ecd0a90baea8e90c7907":[5,9,7], +"group__cpp.html#gaab78a32f55149e9fbf432d5288e38e1e":[5,9,6], +"group__cpp.html#gaad048a9fce3d02c5909cd05c6ec24545":[5,9,1], +"group__cpp.html#gab5e29558926d934c3f1cae8c815f942c":[5,9,3], +"group__cpp.html#gae7bc4f56cd57ed3359060ff4f38bda81":[5,9,4], +"group__cpp.html#gaeaded64eda71ed6b1d569d3e723abc4a":[5,9,5], +"group__cpp.html#gaef2c2bdb4f70857902d3c8903ac095f3":[5,9,2], +"group__cpp.html#structmi__stl__allocator":[5,9,0], "group__extended.html":[5,1], "group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee":[5,1,21], "group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf":[5,1,18], @@ -123,20 +132,15 @@ var NAVTREEINDEX0 = "group__posix.html#ga0d28d5cf61e6bfbb18c63092939fe5c9":[5,8,3], "group__posix.html#ga1326d2e4388630b5f81ca7206318b8e5":[5,8,1], "group__posix.html#ga4531c9e775bb3ae12db57c1ba8a5d7de":[5,8,6], -"group__posix.html#ga48fad8648a2f1dab9c87ea9448a52088":[5,8,16], +"group__posix.html#ga48fad8648a2f1dab9c87ea9448a52088":[5,8,11], "group__posix.html#ga705dc7a64bffacfeeb0141501a5c35d7":[5,8,2], "group__posix.html#ga72e9d7ffb5fe94d69bc722c8506e27bc":[5,8,5], -"group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b":[5,8,17], +"group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b":[5,8,12], "group__posix.html#gaab7fa71ea93b96873f5d9883db57d40e":[5,8,8], -"group__posix.html#gaad048a9fce3d02c5909cd05c6ec24545":[5,8,9], -"group__posix.html#gab5e29558926d934c3f1cae8c815f942c":[5,8,11], -"group__posix.html#gacff84f226ba9feb2031b8992e5579447":[5,8,14], +"group__posix.html#gacff84f226ba9feb2031b8992e5579447":[5,8,9], "group__posix.html#gad5a69c8fea96aa2b7a7c818c2130090a":[5,8,0], "group__posix.html#gae01389eedab8d67341ff52e2aad80ebb":[5,8,4], -"group__posix.html#gae7bc4f56cd57ed3359060ff4f38bda81":[5,8,12], -"group__posix.html#gaeaded64eda71ed6b1d569d3e723abc4a":[5,8,13], -"group__posix.html#gaeb325c39b887d3b90d85d1eb1712fb1e":[5,8,15], -"group__posix.html#gaef2c2bdb4f70857902d3c8903ac095f3":[5,8,10], +"group__posix.html#gaeb325c39b887d3b90d85d1eb1712fb1e":[5,8,10], "group__typed.html":[5,5], "group__typed.html#ga0619a62c5fd886f1016030abe91f0557":[5,5,7], "group__typed.html#ga1158b49a55dfa81f58a4426a7578f523":[5,5,9], diff --git a/docs/search/all_3.js b/docs/search/all_3.js index af76e9c8..2e08411f 100644 --- a/docs/search/all_3.js +++ b/docs/search/all_3.js @@ -1,4 +1,5 @@ var searchData= [ - ['committed',['committed',['../group__analysis.html#ab47526df656d8837ec3e97f11b83f835',1,'mi_heap_area_t']]] + ['committed',['committed',['../group__analysis.html#ab47526df656d8837ec3e97f11b83f835',1,'mi_heap_area_t']]], + ['c_2b_2b_20wrappers',['C++ wrappers',['../group__cpp.html',1,'']]] ]; diff --git a/docs/search/all_6.js b/docs/search/all_6.js index 7af11c0f..c757cbbf 100644 --- a/docs/search/all_6.js +++ b/docs/search/all_6.js @@ -73,11 +73,13 @@ var searchData= ['mi_5fmallocn',['mi_mallocn',['../group__malloc.html#ga0b05e2bf0f73e7401ae08597ff782ac6',1,'mimalloc-doc.h']]], ['mi_5fmallocn_5ftp',['mi_mallocn_tp',['../group__typed.html#gae5cb6e0fafc9f23169c5622e077afe8b',1,'mimalloc-doc.h']]], ['mi_5fmemalign',['mi_memalign',['../group__posix.html#gaab7fa71ea93b96873f5d9883db57d40e',1,'mimalloc-doc.h']]], - ['mi_5fnew',['mi_new',['../group__posix.html#gaad048a9fce3d02c5909cd05c6ec24545',1,'mimalloc-doc.h']]], - ['mi_5fnew_5faligned',['mi_new_aligned',['../group__posix.html#gaef2c2bdb4f70857902d3c8903ac095f3',1,'mimalloc-doc.h']]], - ['mi_5fnew_5faligned_5fnothrow',['mi_new_aligned_nothrow',['../group__posix.html#gab5e29558926d934c3f1cae8c815f942c',1,'mimalloc-doc.h']]], - ['mi_5fnew_5fn',['mi_new_n',['../group__posix.html#gae7bc4f56cd57ed3359060ff4f38bda81',1,'mimalloc-doc.h']]], - ['mi_5fnew_5fnothrow',['mi_new_nothrow',['../group__posix.html#gaeaded64eda71ed6b1d569d3e723abc4a',1,'mimalloc-doc.h']]], + ['mi_5fnew',['mi_new',['../group__cpp.html#gaad048a9fce3d02c5909cd05c6ec24545',1,'mimalloc-doc.h']]], + ['mi_5fnew_5faligned',['mi_new_aligned',['../group__cpp.html#gaef2c2bdb4f70857902d3c8903ac095f3',1,'mimalloc-doc.h']]], + ['mi_5fnew_5faligned_5fnothrow',['mi_new_aligned_nothrow',['../group__cpp.html#gab5e29558926d934c3f1cae8c815f942c',1,'mimalloc-doc.h']]], + ['mi_5fnew_5fn',['mi_new_n',['../group__cpp.html#gae7bc4f56cd57ed3359060ff4f38bda81',1,'mimalloc-doc.h']]], + ['mi_5fnew_5fnothrow',['mi_new_nothrow',['../group__cpp.html#gaeaded64eda71ed6b1d569d3e723abc4a',1,'mimalloc-doc.h']]], + ['mi_5fnew_5frealloc',['mi_new_realloc',['../group__cpp.html#gaab78a32f55149e9fbf432d5288e38e1e',1,'mimalloc-doc.h']]], + ['mi_5fnew_5freallocn',['mi_new_reallocn',['../group__cpp.html#ga756f4b2bc6a7ecd0a90baea8e90c7907',1,'mimalloc-doc.h']]], ['mi_5foption_5feager_5fcommit',['mi_option_eager_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b',1,'mimalloc-doc.h']]], ['mi_5foption_5feager_5fcommit_5fdelay',['mi_option_eager_commit_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c',1,'mimalloc-doc.h']]], ['mi_5foption_5feager_5fregion_5fcommit',['mi_option_eager_region_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca32ce97ece29f69e82579679cf8a307ad',1,'mimalloc-doc.h']]], @@ -126,6 +128,7 @@ var searchData= ['mi_5fstats_5fmerge',['mi_stats_merge',['../group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1',1,'mimalloc-doc.h']]], ['mi_5fstats_5fprint',['mi_stats_print',['../group__extended.html#ga2d126e5c62d3badc35445e5d84166df2',1,'mi_stats_print(void *out): mimalloc-doc.h'],['../group__extended.html#ga256cc6f13a142deabbadd954a217e228',1,'mi_stats_print(mi_output_fun *out, void *arg): mimalloc-doc.h']]], ['mi_5fstats_5freset',['mi_stats_reset',['../group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99',1,'mimalloc-doc.h']]], + ['mi_5fstl_5fallocator',['mi_stl_allocator',['../group__cpp.html#structmi__stl__allocator',1,'']]], ['mi_5fstrdup',['mi_strdup',['../group__malloc.html#gac7cffe13f1f458ed16789488bf92b9b2',1,'mimalloc-doc.h']]], ['mi_5fstrndup',['mi_strndup',['../group__malloc.html#gaaabf971c2571891433477e2d21a35266',1,'mimalloc-doc.h']]], ['mi_5fthread_5fdone',['mi_thread_done',['../group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf',1,'mimalloc-doc.h']]], diff --git a/docs/search/classes_0.js b/docs/search/classes_0.js index 4c5482b9..0010dd97 100644 --- a/docs/search/classes_0.js +++ b/docs/search/classes_0.js @@ -1,4 +1,5 @@ var searchData= [ - ['mi_5fheap_5farea_5ft',['mi_heap_area_t',['../group__analysis.html#structmi__heap__area__t',1,'']]] + ['mi_5fheap_5farea_5ft',['mi_heap_area_t',['../group__analysis.html#structmi__heap__area__t',1,'']]], + ['mi_5fstl_5fallocator',['mi_stl_allocator',['../group__cpp.html#structmi__stl__allocator',1,'']]] ]; diff --git a/docs/search/functions_0.js b/docs/search/functions_0.js index 098041bb..6271797a 100644 --- a/docs/search/functions_0.js +++ b/docs/search/functions_0.js @@ -59,11 +59,13 @@ var searchData= ['mi_5fmalloc_5fusable_5fsize',['mi_malloc_usable_size',['../group__posix.html#ga06d07cf357bbac5c73ba5d0c0c421e17',1,'mimalloc-doc.h']]], ['mi_5fmallocn',['mi_mallocn',['../group__malloc.html#ga0b05e2bf0f73e7401ae08597ff782ac6',1,'mimalloc-doc.h']]], ['mi_5fmemalign',['mi_memalign',['../group__posix.html#gaab7fa71ea93b96873f5d9883db57d40e',1,'mimalloc-doc.h']]], - ['mi_5fnew',['mi_new',['../group__posix.html#gaad048a9fce3d02c5909cd05c6ec24545',1,'mimalloc-doc.h']]], - ['mi_5fnew_5faligned',['mi_new_aligned',['../group__posix.html#gaef2c2bdb4f70857902d3c8903ac095f3',1,'mimalloc-doc.h']]], - ['mi_5fnew_5faligned_5fnothrow',['mi_new_aligned_nothrow',['../group__posix.html#gab5e29558926d934c3f1cae8c815f942c',1,'mimalloc-doc.h']]], - ['mi_5fnew_5fn',['mi_new_n',['../group__posix.html#gae7bc4f56cd57ed3359060ff4f38bda81',1,'mimalloc-doc.h']]], - ['mi_5fnew_5fnothrow',['mi_new_nothrow',['../group__posix.html#gaeaded64eda71ed6b1d569d3e723abc4a',1,'mimalloc-doc.h']]], + ['mi_5fnew',['mi_new',['../group__cpp.html#gaad048a9fce3d02c5909cd05c6ec24545',1,'mimalloc-doc.h']]], + ['mi_5fnew_5faligned',['mi_new_aligned',['../group__cpp.html#gaef2c2bdb4f70857902d3c8903ac095f3',1,'mimalloc-doc.h']]], + ['mi_5fnew_5faligned_5fnothrow',['mi_new_aligned_nothrow',['../group__cpp.html#gab5e29558926d934c3f1cae8c815f942c',1,'mimalloc-doc.h']]], + ['mi_5fnew_5fn',['mi_new_n',['../group__cpp.html#gae7bc4f56cd57ed3359060ff4f38bda81',1,'mimalloc-doc.h']]], + ['mi_5fnew_5fnothrow',['mi_new_nothrow',['../group__cpp.html#gaeaded64eda71ed6b1d569d3e723abc4a',1,'mimalloc-doc.h']]], + ['mi_5fnew_5frealloc',['mi_new_realloc',['../group__cpp.html#gaab78a32f55149e9fbf432d5288e38e1e',1,'mimalloc-doc.h']]], + ['mi_5fnew_5freallocn',['mi_new_reallocn',['../group__cpp.html#ga756f4b2bc6a7ecd0a90baea8e90c7907',1,'mimalloc-doc.h']]], ['mi_5foption_5fenable',['mi_option_enable',['../group__options.html#ga6d45a20a3131f18bc351b69763b38ce4',1,'mimalloc-doc.h']]], ['mi_5foption_5fenable_5fdefault',['mi_option_enable_default',['../group__options.html#ga37988264b915a7db92530cc02d5494cb',1,'mimalloc-doc.h']]], ['mi_5foption_5fenabled',['mi_option_enabled',['../group__options.html#gacebe3f6d91b4a50b54eb84e2a1da1b30',1,'mimalloc-doc.h']]], diff --git a/docs/search/groups_2.js b/docs/search/groups_2.js index 68c73dbe..29185761 100644 --- a/docs/search/groups_2.js +++ b/docs/search/groups_2.js @@ -1,4 +1,4 @@ var searchData= [ - ['extended_20functions',['Extended Functions',['../group__extended.html',1,'']]] + ['c_2b_2b_20wrappers',['C++ wrappers',['../group__cpp.html',1,'']]] ]; diff --git a/docs/search/groups_3.js b/docs/search/groups_3.js index e7e40934..68c73dbe 100644 --- a/docs/search/groups_3.js +++ b/docs/search/groups_3.js @@ -1,5 +1,4 @@ var searchData= [ - ['heap_20introspection',['Heap Introspection',['../group__analysis.html',1,'']]], - ['heap_20allocation',['Heap Allocation',['../group__heap.html',1,'']]] + ['extended_20functions',['Extended Functions',['../group__extended.html',1,'']]] ]; diff --git a/docs/search/groups_4.js b/docs/search/groups_4.js index 4f005682..e7e40934 100644 --- a/docs/search/groups_4.js +++ b/docs/search/groups_4.js @@ -1,4 +1,5 @@ var searchData= [ - ['posix',['Posix',['../group__posix.html',1,'']]] + ['heap_20introspection',['Heap Introspection',['../group__analysis.html',1,'']]], + ['heap_20allocation',['Heap Allocation',['../group__heap.html',1,'']]] ]; diff --git a/docs/search/groups_5.js b/docs/search/groups_5.js index 2533cb94..4f005682 100644 --- a/docs/search/groups_5.js +++ b/docs/search/groups_5.js @@ -1,4 +1,4 @@ var searchData= [ - ['runtime_20options',['Runtime Options',['../group__options.html',1,'']]] + ['posix',['Posix',['../group__posix.html',1,'']]] ]; diff --git a/docs/search/groups_6.js b/docs/search/groups_6.js index 647887f5..2533cb94 100644 --- a/docs/search/groups_6.js +++ b/docs/search/groups_6.js @@ -1,4 +1,4 @@ var searchData= [ - ['typed_20macros',['Typed Macros',['../group__typed.html',1,'']]] + ['runtime_20options',['Runtime Options',['../group__options.html',1,'']]] ]; diff --git a/docs/search/groups_7.js b/docs/search/groups_7.js index 2b9b4cea..647887f5 100644 --- a/docs/search/groups_7.js +++ b/docs/search/groups_7.js @@ -1,4 +1,4 @@ var searchData= [ - ['zero_20initialized_20re_2dallocation',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]] + ['typed_20macros',['Typed Macros',['../group__typed.html',1,'']]] ]; diff --git a/docs/search/groups_8.html b/docs/search/groups_8.html new file mode 100644 index 00000000..81ac9508 --- /dev/null +++ b/docs/search/groups_8.html @@ -0,0 +1,30 @@ + + + + + + + + + +
+
Loading...
+
+ +
Searching...
+
No Matches
+ +
+ + diff --git a/docs/search/groups_8.js b/docs/search/groups_8.js new file mode 100644 index 00000000..2b9b4cea --- /dev/null +++ b/docs/search/groups_8.js @@ -0,0 +1,4 @@ +var searchData= +[ + ['zero_20initialized_20re_2dallocation',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]] +]; diff --git a/docs/search/searchdata.js b/docs/search/searchdata.js index 919719e9..dd31068e 100644 --- a/docs/search/searchdata.js +++ b/docs/search/searchdata.js @@ -7,7 +7,7 @@ var indexSectionsWithContent = 4: "m", 5: "m", 6: "_m", - 7: "abehprtz", + 7: "abcehprtz", 8: "beopu" }; diff --git a/docs/using.html b/docs/using.html index eae37a5e..c5dc12e7 100644 --- a/docs/using.html +++ b/docs/using.html @@ -106,7 +106,7 @@ $(document).ready(function(){initNavTree('using.html','');});

The preferred usage is including <mimalloc.h>, linking with the shared- or static library, and using the mi_malloc API exclusively for allocation. For example,

gcc -o myprogram -lmimalloc myfile.c

mimalloc uses only safe OS calls (mmap and VirtualAlloc) and can co-exist with other allocators linked to the same program. If you use cmake, you can simply use:

find_package(mimalloc 1.0 REQUIRED)

in your CMakeLists.txt to find a locally installed mimalloc. Then use either:

target_link_libraries(myapp PUBLIC mimalloc)

to link with the shared (dynamic) library, or:

target_link_libraries(myapp PUBLIC mimalloc-static)

to link with the static library. See test\CMakeLists.txt for an example.

C++

For best performance in C++ programs, it is also recommended to override the global new and delete operators. For convience, mimalloc provides mimalloc-new-delete.h which does this for you – just include it in a single(!) source file in your project.

-

In C++, mimalloc also provides the mi_stl_allocator struct which implements the std::allocator interface. For example:

std::vector<some_struct, mi_stl_allocator<some_struct>> vec;
vec.push_back(some_struct());

Statistics

+

In C++, mimalloc also provides the mi_stl_allocator struct which implements the std::allocator interface. For example:

std::vector<some_struct, mi_stl_allocator<some_struct>> vec;
vec.push_back(some_struct());

Statistics

You can pass environment variables to print verbose messages (MIMALLOC_VERBOSE=1) and statistics (MIMALLOC_SHOW_STATS=1) (in the debug version):

> env MIMALLOC_SHOW_STATS=1 ./cfrac 175451865205073170563711388363
175451865205073170563711388363 = 374456281610909315237213 * 468551
heap stats: peak total freed unit
normal 2: 16.4 kb 17.5 mb 17.5 mb 16 b ok
normal 3: 16.3 kb 15.2 mb 15.2 mb 24 b ok
normal 4: 64 b 4.6 kb 4.6 kb 32 b ok
normal 5: 80 b 118.4 kb 118.4 kb 40 b ok
normal 6: 48 b 48 b 48 b 48 b ok
normal 17: 960 b 960 b 960 b 320 b ok
heap stats: peak total freed unit
normal: 33.9 kb 32.8 mb 32.8 mb 1 b ok
huge: 0 b 0 b 0 b 1 b ok
total: 33.9 kb 32.8 mb 32.8 mb 1 b ok
malloc requested: 32.8 mb
committed: 58.2 kb 58.2 kb 58.2 kb 1 b ok
reserved: 2.0 mb 2.0 mb 2.0 mb 1 b ok
reset: 0 b 0 b 0 b 1 b ok
segments: 1 1 1
-abandoned: 0
pages: 6 6 6
-abandoned: 0
mmaps: 3
mmap fast: 0
mmap slow: 1
threads: 0
elapsed: 2.022s
process: user: 1.781s, system: 0.016s, faults: 756, reclaims: 0, rss: 2.7 mb

The above model of using the mi_ prefixed API is not always possible though in existing programs that already use the standard malloc interface, and another option is to override the standard malloc interface completely and redirect all calls to the mimalloc library instead.

See Overriding Malloc for more info.

diff --git a/test/main-override.cpp b/test/main-override.cpp index d082ade3..fcf3970f 100644 --- a/test/main-override.cpp +++ b/test/main-override.cpp @@ -71,16 +71,16 @@ static Static s = Static(); bool test_stl_allocator1() { - std::vector> vec; + std::vector > vec; vec.push_back(1); vec.pop_back(); return vec.size() == 0; } -bool test_stl_allocator2() { - struct some_struct { int i; int j; double z; }; +struct some_struct { int i; int j; double z; }; - std::vector> vec; +bool test_stl_allocator2() { + std::vector > vec; vec.push_back(some_struct()); vec.pop_back(); return vec.size() == 0;