save decommit_mask for segments in the segment cache

This commit is contained in:
daan 2021-11-10 16:30:21 -08:00
parent 8cc7d0c019
commit 49d64dbc95
5 changed files with 79 additions and 26 deletions

View File

@ -85,8 +85,8 @@ void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinn
void _mi_arena_free(void* p, size_t size, size_t memid, bool is_committed, mi_os_tld_t* tld); void _mi_arena_free(void* p, size_t size, size_t memid, bool is_committed, mi_os_tld_t* tld);
// "segment-cache.c" // "segment-cache.c"
void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld); void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
bool _mi_segment_cache_push(void* start, size_t size, size_t memid, mi_commit_mask_t commit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld); bool _mi_segment_cache_push(void* start, size_t size, size_t memid, mi_commit_mask_t commit_mask, mi_commit_mask_t decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld);
void _mi_segment_map_allocated_at(const mi_segment_t* segment); void _mi_segment_map_allocated_at(const mi_segment_t* segment);
void _mi_segment_map_freed_at(const mi_segment_t* segment); void _mi_segment_map_freed_at(const mi_segment_t* segment);

View File

@ -89,8 +89,8 @@ static mi_option_desc_t options[_mi_option_last] =
{ 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand) { 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
#endif #endif
{ 1, UNINIT, MI_OPTION(allow_decommit) }, // decommit slices when no longer used (after reset_delay milli-seconds) { 1, UNINIT, MI_OPTION(allow_decommit) }, // decommit slices when no longer used (after reset_delay milli-seconds)
{ 500, UNINIT, MI_OPTION(reset_delay) }, // page reset delay in milli-seconds (= decommit) { 100, UNINIT, MI_OPTION(reset_delay) }, // page reset delay in milli-seconds (= decommit)
{ 1000, UNINIT, MI_OPTION(segment_decommit_delay) },// decommit delay in milli-seconds for freed segments { 500, UNINIT, MI_OPTION(segment_decommit_delay) },// decommit delay in milli-seconds for freed segments
{ 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes. { 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
{ 0, UNINIT, MI_OPTION(limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas) { 0, UNINIT, MI_OPTION(limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)
{ 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose { 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose

View File

@ -22,13 +22,14 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_CACHE_MAX (MI_BITMAP_FIELD_BITS*MI_CACHE_FIELDS) // 1024 on 64-bit #define MI_CACHE_MAX (MI_BITMAP_FIELD_BITS*MI_CACHE_FIELDS) // 1024 on 64-bit
#define BITS_SET() ATOMIC_VAR_INIT(UINTPTR_MAX) #define BITS_SET() ATOMIC_VAR_INIT(UINTPTR_MAX)
#define MI_CACHE_BITS_SET MI_INIT16(BITS_SET) #define MI_CACHE_BITS_SET MI_INIT16(BITS_SET) // note: update if MI_CACHE_FIELDS changes
typedef struct mi_cache_slot_s { typedef struct mi_cache_slot_s {
void* p; void* p;
size_t memid; size_t memid;
bool is_pinned; bool is_pinned;
mi_commit_mask_t commit_mask; mi_commit_mask_t commit_mask;
mi_commit_mask_t decommit_mask;
_Atomic(mi_msecs_t) expire; _Atomic(mi_msecs_t) expire;
} mi_cache_slot_t; } mi_cache_slot_t;
@ -39,8 +40,10 @@ static mi_decl_cache_align mi_bitmap_field_t cache_available_large[MI_CACHE_FIEL
static mi_decl_cache_align mi_bitmap_field_t cache_inuse[MI_CACHE_FIELDS]; // zero bit = free static mi_decl_cache_align mi_bitmap_field_t cache_inuse[MI_CACHE_FIELDS]; // zero bit = free
mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld) mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
{ {
if (_mi_preloading()) return NULL;
#ifdef MI_CACHE_DISABLE #ifdef MI_CACHE_DISABLE
return NULL; return NULL;
#else #else
@ -76,11 +79,11 @@ mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* comm
*memid = slot->memid; *memid = slot->memid;
*is_pinned = slot->is_pinned; *is_pinned = slot->is_pinned;
*is_zero = false; *is_zero = false;
mi_commit_mask_t cmask = slot->commit_mask; // copy *commit_mask = slot->commit_mask;
*decommit_mask = slot->decommit_mask;
slot->p = NULL; slot->p = NULL;
mi_atomic_storei64_release(&slot->expire,(mi_msecs_t)0); mi_atomic_storei64_release(&slot->expire,(mi_msecs_t)0);
*commit_mask = cmask;
// mark the slot as free again // mark the slot as free again
mi_assert_internal(_mi_bitmap_is_claimed(cache_inuse, MI_CACHE_FIELDS, 1, bitidx)); mi_assert_internal(_mi_bitmap_is_claimed(cache_inuse, MI_CACHE_FIELDS, 1, bitidx));
_mi_bitmap_unclaim(cache_inuse, MI_CACHE_FIELDS, 1, bitidx); _mi_bitmap_unclaim(cache_inuse, MI_CACHE_FIELDS, 1, bitidx);
@ -140,6 +143,7 @@ static mi_decl_noinline void mi_segment_cache_purge(mi_os_tld_t* tld)
// decommit committed parts // decommit committed parts
// TODO: instead of decommit, we could also free to the OS? // TODO: instead of decommit, we could also free to the OS?
mi_commit_mask_decommit(&slot->commit_mask, slot->p, MI_SEGMENT_SIZE, tld->stats); mi_commit_mask_decommit(&slot->commit_mask, slot->p, MI_SEGMENT_SIZE, tld->stats);
slot->decommit_mask = mi_commit_mask_empty();
} }
_mi_bitmap_unclaim(cache_available, MI_CACHE_FIELDS, 1, bitidx); // make it available again for a pop _mi_bitmap_unclaim(cache_available, MI_CACHE_FIELDS, 1, bitidx); // make it available again for a pop
} }
@ -148,7 +152,7 @@ static mi_decl_noinline void mi_segment_cache_purge(mi_os_tld_t* tld)
} }
} }
mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t memid, mi_commit_mask_t commit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld) mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t memid, mi_commit_mask_t commit_mask, mi_commit_mask_t decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld)
{ {
#ifdef MI_CACHE_DISABLE #ifdef MI_CACHE_DISABLE
return false; return false;
@ -188,11 +192,13 @@ mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t me
slot->is_pinned = is_pinned; slot->is_pinned = is_pinned;
mi_atomic_storei64_relaxed(&slot->expire,(mi_msecs_t)0); mi_atomic_storei64_relaxed(&slot->expire,(mi_msecs_t)0);
slot->commit_mask = commit_mask; slot->commit_mask = commit_mask;
slot->decommit_mask = decommit_mask;
if (!mi_commit_mask_is_empty(commit_mask) && !is_large && !is_pinned && mi_option_is_enabled(mi_option_allow_decommit)) { if (!mi_commit_mask_is_empty(commit_mask) && !is_large && !is_pinned && mi_option_is_enabled(mi_option_allow_decommit)) {
long delay = mi_option_get(mi_option_segment_decommit_delay); long delay = mi_option_get(mi_option_segment_decommit_delay);
if (delay == 0) { if (delay == 0) {
_mi_abandoned_await_readers(); // wait until safe to decommit _mi_abandoned_await_readers(); // wait until safe to decommit
mi_commit_mask_decommit(&slot->commit_mask, start, MI_SEGMENT_SIZE, tld->stats); mi_commit_mask_decommit(&slot->commit_mask, start, MI_SEGMENT_SIZE, tld->stats);
slot->decommit_mask = mi_commit_mask_empty();
} }
else { else {
mi_atomic_storei64_release(&slot->expire, _mi_clock_now() + delay); mi_atomic_storei64_release(&slot->expire, _mi_clock_now() + delay);

View File

@ -256,7 +256,7 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
// _mi_os_free(segment, mi_segment_size(segment), /*segment->memid,*/ tld->stats); // _mi_os_free(segment, mi_segment_size(segment), /*segment->memid,*/ tld->stats);
const size_t size = mi_segment_size(segment); const size_t size = mi_segment_size(segment);
if (size != MI_SEGMENT_SIZE || !_mi_segment_cache_push(segment, size, segment->memid, segment->commit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os)) { if (size != MI_SEGMENT_SIZE || !_mi_segment_cache_push(segment, size, segment->memid, segment->commit_mask, segment->decommit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os)) {
const size_t csize = mi_commit_mask_committed_size(segment->commit_mask, size); const size_t csize = mi_commit_mask_committed_size(segment->commit_mask, size);
if (csize > 0 && !segment->mem_is_pinned) _mi_stat_decrease(&_mi_stats_main.committed, csize); if (csize > 0 && !segment->mem_is_pinned) _mi_stat_decrease(&_mi_stats_main.committed, csize);
_mi_abandoned_await_readers(); // wait until safe to free _mi_abandoned_await_readers(); // wait until safe to free
@ -650,12 +650,13 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
bool is_zero = false; bool is_zero = false;
const bool commit_info_still_good = (segment != NULL); const bool commit_info_still_good = (segment != NULL);
mi_commit_mask_t commit_mask = (segment != NULL ? segment->commit_mask : mi_commit_mask_empty()); mi_commit_mask_t commit_mask = (segment != NULL ? segment->commit_mask : mi_commit_mask_empty());
mi_commit_mask_t decommit_mask = (segment != NULL ? segment->decommit_mask : mi_commit_mask_empty());
if (segment==NULL) { if (segment==NULL) {
// Allocate the segment from the OS // Allocate the segment from the OS
bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
bool is_pinned = false; bool is_pinned = false;
size_t memid = 0; size_t memid = 0;
segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, &commit_mask, &mem_large, &is_pinned, &is_zero, &memid, os_tld); segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, &commit_mask, &decommit_mask, &mem_large, &is_pinned, &is_zero, &memid, os_tld);
if (segment==NULL) { if (segment==NULL) {
segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_pinned, &is_zero, &memid, os_tld); segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_pinned, &is_zero, &memid, os_tld);
if (segment == NULL) return NULL; // failed to allocate if (segment == NULL) return NULL; // failed to allocate
@ -691,9 +692,22 @@ static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_
if (!commit_info_still_good) { if (!commit_info_still_good) {
segment->commit_mask = commit_mask; // on lazy commit, the initial part is always committed segment->commit_mask = commit_mask; // on lazy commit, the initial part is always committed
segment->allow_decommit = (mi_option_is_enabled(mi_option_allow_decommit) && !segment->mem_is_pinned && !segment->mem_is_large); segment->allow_decommit = (mi_option_is_enabled(mi_option_allow_decommit) && !segment->mem_is_pinned && !segment->mem_is_large);
segment->decommit_expire = 0; if (segment->allow_decommit) {
segment->decommit_mask = mi_commit_mask_empty(); segment->decommit_expire = _mi_clock_now() + mi_option_get(mi_option_reset_delay);
segment->decommit_mask = decommit_mask;
mi_assert_internal(mi_commit_mask_all_set(segment->commit_mask, segment->decommit_mask));
#if MI_DEBUG>2
const size_t commit_needed = _mi_divide_up(info_slices*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE);
mi_assert_internal(!mi_commit_mask_any_set(segment->decommit_mask, mi_commit_mask_create(0, commit_needed)));
#endif
}
else {
mi_assert_internal(mi_commit_mask_is_empty(decommit_mask));
segment->decommit_expire = 0;
segment->decommit_mask = mi_commit_mask_empty();
}
} }
// initialize segment info // initialize segment info
segment->segment_slices = segment_slices; segment->segment_slices = segment_slices;

View File

@ -35,22 +35,24 @@ static void test_mt_shutdown();
static void large_alloc(void); // issue #363 static void large_alloc(void); // issue #363
static void fail_aslr(); // issue #372 static void fail_aslr(); // issue #372
static void tsan_numa_test(); // issue #414 static void tsan_numa_test(); // issue #414
static void strdup_test(); // issue #445 static void strdup_test(); // issue #445
static void bench_alloc_large(void); // issue #xxx
int main() { int main() {
mi_stats_reset(); // ignore earlier allocations mi_stats_reset(); // ignore earlier allocations
heap_thread_free_large(); heap_thread_free_large();
heap_no_delete(); heap_no_delete();
heap_late_free(); heap_late_free();
padding_shrink(); padding_shrink();
various_tests(); various_tests();
large_alloc(); large_alloc();
tsan_numa_test(); tsan_numa_test();
strdup_test(); strdup_test();
//test_mt_shutdown(); //test_mt_shutdown();
//fail_aslr(); //fail_aslr();
//bench_alloc_large();
mi_stats_print(NULL); mi_stats_print(NULL);
return 0; return 0;
} }
@ -246,11 +248,42 @@ static void fail_aslr() {
// issues #414 // issues #414
static void dummy_worker() { static void dummy_worker() {
void* p = mi_malloc(0); void* p = mi_malloc(0);
mi_free(p); mi_free(p);
} }
static void tsan_numa_test() { static void tsan_numa_test() {
auto t1 = std::thread(dummy_worker); auto t1 = std::thread(dummy_worker);
dummy_worker(); dummy_worker();
t1.join(); t1.join();
} }
// issue #?
#include <chrono>
#include <random>
#include <iostream>
static void bench_alloc_large(void) {
static constexpr int kNumBuffers = 20;
static constexpr size_t kMinBufferSize = 5 * 1024 * 1024;
static constexpr size_t kMaxBufferSize = 25 * 1024 * 1024;
std::unique_ptr<char[]> buffers[kNumBuffers];
std::random_device rd;
std::mt19937 gen(42); //rd());
std::uniform_int_distribution<> size_distribution(kMinBufferSize, kMaxBufferSize);
std::uniform_int_distribution<> buf_number_distribution(0, kNumBuffers - 1);
static constexpr int kNumIterations = 2000;
const auto start = std::chrono::steady_clock::now();
for (int i = 0; i < kNumIterations; ++i) {
int buffer_idx = buf_number_distribution(gen);
size_t new_size = size_distribution(gen);
buffers[buffer_idx] = std::make_unique<char[]>(new_size);
}
const auto end = std::chrono::steady_clock::now();
const auto num_ms = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
const auto us_per_allocation = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / kNumIterations;
std::cout << kNumIterations << " allocations Done in " << num_ms << "ms." << std::endl;
std::cout << "Avg " << us_per_allocation << " us per allocation" << std::endl;
}