merge from dev
This commit is contained in:
commit
7e492f4420
@ -580,9 +580,6 @@ typedef struct mi_segments_tld_s {
|
||||
size_t peak_count; // peak number of segments
|
||||
size_t current_size; // current size of all segments
|
||||
size_t peak_size; // peak size of all segments
|
||||
size_t cache_count; // number of segments in the cache
|
||||
size_t cache_size; // total size of all segments in the cache
|
||||
mi_segment_t* cache; // (small) cache of segments
|
||||
mi_stats_t* stats; // points to tld stats
|
||||
mi_os_tld_t* os; // points to os stats
|
||||
} mi_segments_tld_t;
|
||||
|
@ -316,7 +316,7 @@ typedef enum mi_option_e {
|
||||
mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB) at startup
|
||||
mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
|
||||
mi_option_reserve_os_memory, // reserve specified amount of OS memory at startup
|
||||
mi_option_segment_cache,
|
||||
mi_option_deprecated_segment_cache,
|
||||
mi_option_page_reset,
|
||||
mi_option_abandoned_page_decommit,
|
||||
mi_option_deprecated_segment_reset,
|
||||
|
@ -32,17 +32,17 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
#endif
|
||||
|
||||
|
||||
size_t mi_malloc_size(const void* p) mi_attr_noexcept {
|
||||
mi_decl_nodiscard size_t mi_malloc_size(const void* p) mi_attr_noexcept {
|
||||
//if (!mi_is_in_heap_region(p)) return 0;
|
||||
return mi_usable_size(p);
|
||||
}
|
||||
|
||||
size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept {
|
||||
mi_decl_nodiscard size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept {
|
||||
//if (!mi_is_in_heap_region(p)) return 0;
|
||||
return mi_usable_size(p);
|
||||
}
|
||||
|
||||
size_t mi_malloc_good_size(size_t size) mi_attr_noexcept {
|
||||
mi_decl_nodiscard size_t mi_malloc_good_size(size_t size) mi_attr_noexcept {
|
||||
return mi_good_size(size);
|
||||
}
|
||||
|
||||
@ -65,24 +65,24 @@ int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept
|
||||
return 0;
|
||||
}
|
||||
|
||||
mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept {
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept {
|
||||
void* p = mi_malloc_aligned(size, alignment);
|
||||
mi_assert_internal(((uintptr_t)p % alignment) == 0);
|
||||
return p;
|
||||
}
|
||||
|
||||
mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept {
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept {
|
||||
return mi_memalign( _mi_os_page_size(), size );
|
||||
}
|
||||
|
||||
mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept {
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept {
|
||||
size_t psize = _mi_os_page_size();
|
||||
if (size >= SIZE_MAX - psize) return NULL; // overflow
|
||||
size_t asize = _mi_align_up(size, psize);
|
||||
return mi_malloc_aligned(asize, psize);
|
||||
}
|
||||
|
||||
mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept {
|
||||
mi_decl_nodiscard mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept {
|
||||
if (mi_unlikely((size&(alignment-1)) != 0)) { // C11 requires alignment>0 && integral multiple, see <https://en.cppreference.com/w/c/memory/aligned_alloc>
|
||||
#if MI_DEBUG > 0
|
||||
_mi_error_message(EOVERFLOW, "(mi_)aligned_alloc requires the size to be an integral multiple of the alignment (size %zu, alignment %zu)\n", size, alignment);
|
||||
@ -95,13 +95,13 @@ mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_n
|
||||
return p;
|
||||
}
|
||||
|
||||
void* mi_reallocarray( void* p, size_t count, size_t size ) mi_attr_noexcept { // BSD
|
||||
mi_decl_nodiscard void* mi_reallocarray( void* p, size_t count, size_t size ) mi_attr_noexcept { // BSD
|
||||
void* newp = mi_reallocn(p,count,size);
|
||||
if (newp==NULL) { errno = ENOMEM; }
|
||||
return newp;
|
||||
}
|
||||
|
||||
int mi_reallocarr( void* p, size_t count, size_t size ) mi_attr_noexcept { // NetBSD
|
||||
mi_decl_nodiscard int mi_reallocarr( void* p, size_t count, size_t size ) mi_attr_noexcept { // NetBSD
|
||||
mi_assert(p != NULL);
|
||||
if (p == NULL) {
|
||||
errno = EINVAL;
|
||||
@ -120,7 +120,7 @@ void* mi__expand(void* p, size_t newsize) mi_attr_noexcept { // Microsoft
|
||||
return res;
|
||||
}
|
||||
|
||||
mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept {
|
||||
mi_decl_nodiscard mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept {
|
||||
if (s==NULL) return NULL;
|
||||
size_t len;
|
||||
for(len = 0; s[len] != 0; len++) { }
|
||||
@ -132,7 +132,7 @@ mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noex
|
||||
return p;
|
||||
}
|
||||
|
||||
mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept {
|
||||
mi_decl_nodiscard mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept {
|
||||
return (unsigned char*)mi_strdup((const char*)s);
|
||||
}
|
||||
|
||||
@ -172,10 +172,10 @@ int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name)
|
||||
#endif
|
||||
}
|
||||
|
||||
void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { // Microsoft
|
||||
mi_decl_nodiscard void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { // Microsoft
|
||||
return mi_recalloc_aligned_at(p, newcount, size, alignment, offset);
|
||||
}
|
||||
|
||||
void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { // Microsoft
|
||||
mi_decl_nodiscard void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { // Microsoft
|
||||
return mi_recalloc_aligned(p, newcount, size, alignment);
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ mi_decl_cache_align static const mi_tld_t tld_empty = {
|
||||
0,
|
||||
false,
|
||||
NULL, NULL,
|
||||
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, 0, NULL, tld_empty_stats, tld_empty_os }, // segments
|
||||
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, tld_empty_stats, tld_empty_os }, // segments
|
||||
{ 0, tld_empty_stats }, // os
|
||||
{ MI_STATS_NULL } // stats
|
||||
};
|
||||
@ -137,7 +137,7 @@ extern mi_heap_t _mi_heap_main;
|
||||
static mi_tld_t tld_main = {
|
||||
0, false,
|
||||
&_mi_heap_main, & _mi_heap_main,
|
||||
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, 0, NULL, &tld_main.stats, &tld_main.os }, // segments
|
||||
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, &tld_main.stats, &tld_main.os }, // segments
|
||||
{ 0, &tld_main.stats }, // os
|
||||
{ MI_STATS_NULL } // stats
|
||||
};
|
||||
@ -485,7 +485,7 @@ bool _mi_preloading(void) {
|
||||
return os_preloading;
|
||||
}
|
||||
|
||||
bool mi_is_redirected(void) mi_attr_noexcept {
|
||||
mi_decl_nodiscard bool mi_is_redirected(void) mi_attr_noexcept {
|
||||
return mi_redirected;
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ static mi_option_desc_t options[_mi_option_last] =
|
||||
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
|
||||
{ -1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N
|
||||
{ 0, UNINIT, MI_OPTION(reserve_os_memory) },
|
||||
{ 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread
|
||||
{ 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread
|
||||
{ 0, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free
|
||||
{ 0, UNINIT, MI_OPTION_LEGACY(abandoned_page_decommit, abandoned_page_reset) },// decommit free page memory when a thread terminates
|
||||
{ 0, UNINIT, MI_OPTION(deprecated_segment_reset) },
|
||||
@ -116,6 +116,7 @@ void _mi_options_init(void) {
|
||||
|
||||
mi_decl_nodiscard long mi_option_get(mi_option_t option) {
|
||||
mi_assert(option >= 0 && option < _mi_option_last);
|
||||
if (option < 0 || option >= _mi_option_last) return 0;
|
||||
mi_option_desc_t* desc = &options[option];
|
||||
mi_assert(desc->option == option); // index should match the option
|
||||
if (mi_unlikely(desc->init == UNINIT)) {
|
||||
@ -126,6 +127,7 @@ mi_decl_nodiscard long mi_option_get(mi_option_t option) {
|
||||
|
||||
void mi_option_set(mi_option_t option, long value) {
|
||||
mi_assert(option >= 0 && option < _mi_option_last);
|
||||
if (option < 0 || option >= _mi_option_last) return;
|
||||
mi_option_desc_t* desc = &options[option];
|
||||
mi_assert(desc->option == option); // index should match the option
|
||||
desc->value = value;
|
||||
@ -134,6 +136,7 @@ void mi_option_set(mi_option_t option, long value) {
|
||||
|
||||
void mi_option_set_default(mi_option_t option, long value) {
|
||||
mi_assert(option >= 0 && option < _mi_option_last);
|
||||
if (option < 0 || option >= _mi_option_last) return;
|
||||
mi_option_desc_t* desc = &options[option];
|
||||
if (desc->init != INITIALIZED) {
|
||||
desc->value = value;
|
||||
|
@ -394,69 +394,13 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// The thread local segment cache is limited to be at most 1/8 of the peak size of segments in use,
|
||||
#define MI_SEGMENT_CACHE_FRACTION (8)
|
||||
|
||||
// note: returned segment may be partially reset
|
||||
static mi_segment_t* mi_segment_cache_pop(size_t segment_slices, mi_segments_tld_t* tld) {
|
||||
if (segment_slices != 0 && segment_slices != MI_SLICES_PER_SEGMENT) return NULL;
|
||||
mi_segment_t* segment = tld->cache;
|
||||
if (segment == NULL) return NULL;
|
||||
tld->cache_count--;
|
||||
tld->cache = segment->next;
|
||||
segment->next = NULL;
|
||||
mi_assert_internal(segment->segment_slices == MI_SLICES_PER_SEGMENT);
|
||||
_mi_stat_decrease(&tld->stats->segments_cache, 1);
|
||||
return segment;
|
||||
}
|
||||
|
||||
static bool mi_segment_cache_full(mi_segments_tld_t* tld)
|
||||
{
|
||||
// if (tld->count == 1 && tld->cache_count==0) return false; // always cache at least the final segment of a thread
|
||||
size_t max_cache = mi_option_get(mi_option_segment_cache);
|
||||
if (tld->cache_count < max_cache
|
||||
&& tld->cache_count < (1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION)) // at least allow a 1 element cache
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
// take the opportunity to reduce the segment cache if it is too large (now)
|
||||
// TODO: this never happens as we check against peak usage, should we use current usage instead?
|
||||
while (tld->cache_count > max_cache) { //(1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION))) {
|
||||
mi_segment_t* segment = mi_segment_cache_pop(0,tld);
|
||||
mi_assert_internal(segment != NULL);
|
||||
if (segment != NULL) mi_segment_os_free(segment, tld);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(segment->next == NULL);
|
||||
if (segment->segment_slices != MI_SLICES_PER_SEGMENT || mi_segment_cache_full(tld)) {
|
||||
return false;
|
||||
}
|
||||
// mi_segment_delayed_decommit(segment, true, tld->stats);
|
||||
mi_assert_internal(segment->segment_slices == MI_SLICES_PER_SEGMENT);
|
||||
mi_assert_internal(segment->next == NULL);
|
||||
segment->next = tld->cache;
|
||||
tld->cache = segment;
|
||||
tld->cache_count++;
|
||||
_mi_stat_increase(&tld->stats->segments_cache,1);
|
||||
return true;
|
||||
}
|
||||
|
||||
// called by threads that are terminating to free cached segments
|
||||
// called by threads that are terminating
|
||||
void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
|
||||
mi_segment_t* segment;
|
||||
while ((segment = mi_segment_cache_pop(0,tld)) != NULL) {
|
||||
mi_segment_os_free(segment, tld);
|
||||
}
|
||||
mi_assert_internal(tld->cache_count == 0);
|
||||
mi_assert_internal(tld->cache == NULL);
|
||||
MI_UNUSED(tld);
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Span management
|
||||
----------------------------------------------------------- */
|
||||
@ -927,7 +871,7 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
|
||||
size_t guard_slices = 0;
|
||||
if (MI_SECURE>0) {
|
||||
// in secure mode, we set up a protected page in between the segment info
|
||||
// and the page data
|
||||
// and the page data, and at the end of the segment.
|
||||
size_t os_pagesize = _mi_os_page_size();
|
||||
mi_assert_internal(mi_segment_info_size(segment) - os_pagesize >= pre_size);
|
||||
_mi_os_protect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize);
|
||||
@ -969,6 +913,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_segments_tld_t* tld, m
|
||||
|
||||
|
||||
static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
|
||||
MI_UNUSED(force);
|
||||
mi_assert_internal(segment != NULL);
|
||||
mi_assert_internal(segment->next == NULL);
|
||||
mi_assert_internal(segment->used == 0);
|
||||
@ -992,13 +937,8 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
|
||||
// stats
|
||||
_mi_stat_decrease(&tld->stats->page_committed, mi_segment_info_size(segment));
|
||||
|
||||
if (!force && mi_segment_cache_push(segment, tld)) {
|
||||
// it is put in our cache
|
||||
}
|
||||
else {
|
||||
// otherwise return it to the OS
|
||||
mi_segment_os_free(segment, tld);
|
||||
}
|
||||
// return it to the OS
|
||||
mi_segment_os_free(segment, tld);
|
||||
}
|
||||
|
||||
|
||||
@ -1488,15 +1428,10 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_
|
||||
{
|
||||
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
|
||||
mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
|
||||
// 1. try to get a segment from our cache
|
||||
mi_segment_t* segment = mi_segment_cache_pop(MI_SEGMENT_SIZE, tld);
|
||||
if (segment != NULL) {
|
||||
mi_segment_init(segment, 0, tld, os_tld, NULL);
|
||||
return segment;
|
||||
}
|
||||
// 2. try to reclaim an abandoned segment
|
||||
|
||||
// 1. try to reclaim an abandoned segment
|
||||
bool reclaimed;
|
||||
segment = mi_segment_try_reclaim(heap, needed_slices, block_size, &reclaimed, tld);
|
||||
mi_segment_t* segment = mi_segment_try_reclaim(heap, needed_slices, block_size, &reclaimed, tld);
|
||||
if (reclaimed) {
|
||||
// reclaimed the right page right into the heap
|
||||
mi_assert_internal(segment != NULL);
|
||||
@ -1506,7 +1441,7 @@ static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_
|
||||
// reclaimed a segment with a large enough empty span in it
|
||||
return segment;
|
||||
}
|
||||
// 3. otherwise allocate a fresh segment
|
||||
// 2. otherwise allocate a fresh segment
|
||||
return mi_segment_alloc(0, tld, os_tld, NULL);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user