merge from dev

This commit is contained in:
daanx 2023-04-18 17:54:07 -07:00
commit eff7940f30
5 changed files with 14 additions and 12 deletions

View File

@ -299,7 +299,7 @@ typedef struct mi_page_s {
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
uint16_t reserved; // number of blocks reserved in memory
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
uint8_t is_zero : 1; // `true` if the blocks in the free list are zero initialized
uint8_t free_is_zero : 1; // `true` if the blocks in the free list are zero initialized
uint8_t retire_expire : 7; // expiration count for retired blocks
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)

View File

@ -227,7 +227,7 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne
if (newp != NULL) {
if (zero && newsize > size) {
const mi_page_t* page = _mi_ptr_page(newp);
if (page->is_zero) {
if (page->free_is_zero) {
// already zero initialized
mi_assert_expensive(mi_mem_is_zero(newp,newsize));
}

View File

@ -46,11 +46,12 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
// zero the block? note: we need to zero the full block size (issue #63)
if mi_unlikely(zero) {
mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
if (page->is_zero) {
mi_assert_internal(page->xblock_size >= MI_PADDING_SIZE);
if (page->free_is_zero) {
block->next = 0;
mi_track_mem_defined(block, page->xblock_size - MI_PADDING_SIZE);
}
else {
mi_assert_internal(page->xblock_size >= MI_PADDING_SIZE);
_mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE);
}
}

View File

@ -84,7 +84,7 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
mi_assert_internal(mi_page_list_is_valid(page,page->local_free));
#if MI_DEBUG>3 // generally too expensive to check this
if (page->is_zero) {
if (page->free_is_zero) {
const size_t ubsize = mi_page_usable_block_size(page);
for(mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) {
mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t)));
@ -221,7 +221,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
// usual case
page->free = page->local_free;
page->local_free = NULL;
page->is_zero = false;
page->free_is_zero = false;
}
else if (force) {
// append -- only on shutdown (force) as this is a linear operation
@ -233,7 +233,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
mi_block_set_next(page, tail, page->free);
page->free = page->local_free;
page->local_free = NULL;
page->is_zero = false;
page->free_is_zero = false;
}
}
@ -666,13 +666,14 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
page->keys[0] = _mi_heap_random_next(heap);
page->keys[1] = _mi_heap_random_next(heap);
#endif
page->is_zero = page->is_zero_init;
#if MI_DEBUG>1
page->free_is_zero = page->is_zero_init;
#if MI_DEBUG>2
if (page->is_zero_init) {
mi_assert(mi_mem_is_zero(page_start, page_size));
mi_track_mem_defined(page_start, page_size);
mi_assert_expensive(mi_mem_is_zero(page_start, page_size));
}
#endif
mi_assert_internal(page->is_committed);
mi_assert_internal(page->capacity == 0);
mi_assert_internal(page->free == NULL);

View File

@ -39,7 +39,7 @@ static int ITER = 50; // N full iterations destructing and re-creating a
#define STRESS // undefine for leak test
static bool allow_large_objects = false; // allow very large objects? (set to `true` if SCALE>100)
static bool allow_large_objects = true; // allow very large objects? (set to `true` if SCALE>100)
static size_t use_one_size = 0; // use single object size of `N * sizeof(uintptr_t)`?