pass page to block_zero_init and extend to block_size
This commit is contained in:
parent
739d11313c
commit
7a91368108
@ -20,7 +20,6 @@ terms of the MIT license. A copy of the license can be found in the file
|
||||
#define mi_trace_message(...)
|
||||
#endif
|
||||
|
||||
|
||||
// "options.c"
|
||||
void _mi_fputs(mi_output_fun* out, const char* prefix, const char* message);
|
||||
void _mi_fprintf(mi_output_fun* out, const char* fmt, ...);
|
||||
@ -101,7 +100,7 @@ void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero);
|
||||
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero);
|
||||
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
|
||||
bool _mi_free_delayed_block(mi_block_t* block);
|
||||
void _mi_block_zero_init(void* p, size_t size);
|
||||
void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size);
|
||||
|
||||
#if MI_DEBUG>1
|
||||
bool _mi_page_is_valid(mi_page_t* page);
|
||||
@ -182,6 +181,14 @@ static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
|
||||
}
|
||||
}
|
||||
|
||||
// Is memory zero initialized?
|
||||
static inline bool mi_mem_is_zero(void* p, size_t size) {
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if (((uint8_t*)p)[i] != 0) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Align a byte size to a size in _machine words_,
|
||||
// i.e. byte size == `wsize*sizeof(void*)`.
|
||||
static inline size_t _mi_wsize_from_size(size_t size) {
|
||||
@ -189,6 +196,11 @@ static inline size_t _mi_wsize_from_size(size_t size) {
|
||||
return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t);
|
||||
}
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
The thread local default heap
|
||||
----------------------------------------------------------- */
|
||||
|
||||
extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap
|
||||
extern mi_heap_t _mi_heap_main; // statically allocated main backing heap
|
||||
extern bool _mi_process_is_initialized;
|
||||
@ -220,6 +232,10 @@ static inline bool mi_heap_is_initialized(mi_heap_t* heap) {
|
||||
return (heap != &_mi_heap_empty);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Pages
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) {
|
||||
mi_assert_internal(size <= MI_SMALL_SIZE_MAX);
|
||||
return heap->pages_free_direct[_mi_wsize_from_size(size)];
|
||||
@ -230,7 +246,6 @@ static inline mi_page_t* _mi_get_free_small_page(size_t size) {
|
||||
return _mi_heap_get_free_small_page(mi_get_default_heap(), size);
|
||||
}
|
||||
|
||||
|
||||
// Segment that contains the pointer
|
||||
static inline mi_segment_t* _mi_ptr_segment(const void* p) {
|
||||
// mi_assert_internal(p != NULL);
|
||||
|
@ -33,7 +33,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* heap, size_t size, size_t
|
||||
void* p = _mi_page_malloc(heap,page,size);
|
||||
mi_assert_internal(p != NULL);
|
||||
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
|
||||
if (zero) _mi_block_zero_init(p,size);
|
||||
if (zero) _mi_block_zero_init(page,p,size);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
@ -117,9 +117,16 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne
|
||||
void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset);
|
||||
if (newp != NULL) {
|
||||
if (zero && newsize > size) {
|
||||
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
|
||||
size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
|
||||
memset((uint8_t*)newp + start, 0, newsize - start);
|
||||
const mi_page_t* page = _mi_ptr_page(newp);
|
||||
if (page->flags.is_zero) {
|
||||
// already zero initialized
|
||||
mi_assert_expensive(mi_mem_is_zero(newp,newsize));
|
||||
}
|
||||
else {
|
||||
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
|
||||
size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
|
||||
memset((uint8_t*)newp + start, 0, newsize - start);
|
||||
}
|
||||
}
|
||||
memcpy(newp, p, (newsize > size ? size : newsize));
|
||||
mi_free(p); // only free if successful
|
||||
|
33
src/alloc.c
33
src/alloc.c
@ -89,31 +89,28 @@ extern inline void* mi_malloc(size_t size) mi_attr_noexcept {
|
||||
return mi_heap_malloc(mi_get_default_heap(), size);
|
||||
}
|
||||
|
||||
void _mi_block_zero_init(void* p, size_t size) {
|
||||
void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
|
||||
// note: we need to initialize the whole block to zero, not just size
|
||||
// or the recalloc/rezalloc functions cannot safely expand in place (see issue #63)
|
||||
UNUSED(size);
|
||||
mi_assert_internal(p != NULL);
|
||||
// already zero initialized memory?
|
||||
if (size > 4*sizeof(void*)) { // don't bother for small sizes
|
||||
mi_page_t* page = _mi_ptr_page(p);
|
||||
if (page->flags.is_zero) {
|
||||
((mi_block_t*)p)->next = 0;
|
||||
#if MI_DEBUG>0
|
||||
for (size_t i = 0; i < (page->block_size/sizeof(uintptr_t)); i++) {
|
||||
if (((uintptr_t*)p)[i] != 0) {
|
||||
_mi_assert_fail("page not zero", __FILE__, __LINE__, "_mi_block_zero_init");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return; // and done
|
||||
}
|
||||
mi_assert_internal(size > 0 && page->block_size >= size);
|
||||
mi_assert_internal(_mi_ptr_page(p)==page);
|
||||
if (page->flags.is_zero) {
|
||||
// already zero initialized memory?
|
||||
((mi_block_t*)p)->next = 0; // clear the free list pointer
|
||||
mi_assert_expensive(mi_mem_is_zero(p,page->block_size));
|
||||
}
|
||||
else {
|
||||
// otherwise memset
|
||||
memset(p, 0, page->block_size);
|
||||
}
|
||||
// otherwise memset
|
||||
memset(p, 0, size);
|
||||
}
|
||||
|
||||
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) {
|
||||
void* p = mi_heap_malloc(heap,size);
|
||||
if (zero && p != NULL) {
|
||||
_mi_block_zero_init(p,size);
|
||||
_mi_block_zero_init(_mi_ptr_page(p),p,size); // todo: can we avoid getting the page again?
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
@ -81,14 +81,10 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
|
||||
mi_assert_internal(mi_page_list_is_valid(page,page->free));
|
||||
mi_assert_internal(mi_page_list_is_valid(page,page->local_free));
|
||||
|
||||
#if 0
|
||||
#if MI_DEBUG>3 // generally too expensive to check this
|
||||
if (page->flags.is_zero) {
|
||||
for(mi_block_t* block = page->free; block != NULL; mi_block_next(page,block)) {
|
||||
for (size_t i = 1; i < (page->block_size/sizeof(uintptr_t)); i++) {
|
||||
if (((uintptr_t*)block)[i] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
mi_assert_expensive(mi_mem_is_zero(block + 1, page->block_size - sizeof(mi_block_t)));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user