fix decommit for huge objects

This commit is contained in:
Daan Leijen 2022-11-23 09:50:29 -08:00
parent 20880807ce
commit 9e56567d23
4 changed files with 39 additions and 27 deletions

View File

@ -395,9 +395,10 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
#endif
}
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED // note: when tracking, cannot use mi_usable_size with multi-threading
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
}
#endif
// Try to put the block on either the page-local thread free list, or the heap delayed free list.
@ -449,7 +450,9 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
#if (MI_DEBUG!=0) && !MI_TRACK_ENABLED
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
if (!mi_page_is_huge(page)) { // huge page content may be already decommitted
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
}
#endif
mi_block_set_next(page, block, page->local_free);
page->local_free = block;

View File

@ -1522,25 +1522,23 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment,
#if MI_HUGE_PAGE_ABANDON
segment->thread_id = 0; // huge segments are immediately abandoned
#endif
if (page_alignment > 0) {
size_t psize;
uint8_t* p = _mi_segment_page_start(segment, page, &psize);
uint8_t* aligned_p = (uint8_t*)_mi_align_up((uintptr_t)p, page_alignment);
mi_assert_internal(_mi_is_aligned(aligned_p, page_alignment));
mi_assert_internal(psize - (aligned_p - p) >= size);
if (!segment->allow_decommit) {
// decommit the part of the page that is unused; this can be quite large (close to MI_SEGMENT_SIZE)
uint8_t* decommit_start = p + sizeof(mi_block_t); // for the free list
ptrdiff_t decommit_size = aligned_p - decommit_start;
mi_segment_decommit(segment, decommit_start, decommit_size, &_mi_stats_main);
}
}
// for huge pages we initialize the xblock_size as we may
// overallocate to accommodate large alignments.
size_t psize;
_mi_segment_page_start(segment, page, &psize);
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
page->xblock_size = (psize > MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : (uint32_t)psize);
// decommit the part of the prefix of a page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE)
if (page_alignment > 0 && segment->allow_decommit) {
uint8_t* aligned_p = (uint8_t*)_mi_align_up((uintptr_t)start, page_alignment);
mi_assert_internal(_mi_is_aligned(aligned_p, page_alignment));
mi_assert_internal(psize - (aligned_p - start) >= size);
uint8_t* decommit_start = start + sizeof(mi_block_t); // for the free list
ptrdiff_t decommit_size = aligned_p - decommit_start;
_mi_os_decommit(decommit_start, decommit_size, &_mi_stats_main); // note: cannot use segment_decommit on huge segments
}
return page;
}
@ -1579,13 +1577,10 @@ void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_bloc
mi_assert_internal(segment == _mi_page_segment(page));
mi_assert_internal(page->used == 1); // this is called just before the free
mi_assert_internal(page->free == NULL);
const size_t csize = mi_page_block_size(page) - sizeof(mi_block_t);
uint8_t* p = ( uint8_t*)block + sizeof(mi_block_t);
if (segment->allow_decommit) {
mi_segment_decommit(segment, p, csize, &_mi_stats_main);
}
else {
_mi_os_reset(p, csize, &_mi_stats_main);
const size_t csize = mi_usable_size(block) - sizeof(mi_block_t);
uint8_t* p = (uint8_t*)block + sizeof(mi_block_t);
_mi_os_decommit(p, csize, &_mi_stats_main); // note: cannot use segment_decommit on huge segments
}
}
#endif

View File

@ -37,12 +37,14 @@ static void fail_aslr(); // issue #372
static void tsan_numa_test(); // issue #414
static void strdup_test(); // issue #445
static void bench_alloc_large(void); // issue #xxx
static void heap_thread_free_huge();
static void test_stl_allocators();
int main() {
mi_stats_reset(); // ignore earlier allocations
heap_thread_free_huge();
/*
heap_thread_free_large();
heap_no_delete();
heap_late_free();
@ -51,7 +53,7 @@ int main() {
large_alloc();
tsan_numa_test();
strdup_test();
*/
test_stl_allocators();
test_mt_shutdown();
@ -240,6 +242,18 @@ static void heap_thread_free_large() {
}
}
static void heap_thread_free_huge_worker() {
mi_free(shared_p);
}
static void heap_thread_free_huge() {
for (int i = 0; i < 100; i++) {
shared_p = mi_malloc(1024 * 1024 * 1024);
auto t1 = std::thread(heap_thread_free_large_worker);
t1.join();
}
}
static void test_mt_shutdown()

View File

@ -91,7 +91,7 @@ static bool chance(size_t perc, random_t r) {
static void* alloc_items(size_t items, random_t r) {
if (chance(1, r)) {
if (chance(1, r) && allow_large_objects) items *= 10000; // 0.01% giant
if (chance(1, r) && allow_large_objects) items *= 50000; // 0.01% giant
else if (chance(10, r) && allow_large_objects) items *= 1000; // 0.1% huge
else items *= 100; // 1% large objects;
}