PR#66 by kickunderscore to ensure consistent small block alignment
This commit is contained in:
parent
d6901558cd
commit
d35fc6cdc4
@ -48,7 +48,7 @@ void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t*
|
||||
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
|
||||
bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld);
|
||||
void _mi_segment_thread_collect(mi_segments_tld_t* tld);
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t block_size, size_t* page_size); // page start for any page
|
||||
|
||||
// "page.c"
|
||||
void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc;
|
||||
@ -209,7 +209,7 @@ static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const
|
||||
|
||||
// Quick page start for initialized pages
|
||||
static inline uint8_t* _mi_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) {
|
||||
return _mi_segment_page_start(segment, page, page_size);
|
||||
return _mi_segment_page_start(segment, page, page->block_size, page_size);
|
||||
}
|
||||
|
||||
// Get the page containing the pointer
|
||||
|
1
src/os.c
1
src/os.c
@ -216,6 +216,7 @@ static void* mi_mmap_aligned(size_t size, size_t alignment, mi_stats_t* stats) {
|
||||
UNUSED(size);
|
||||
UNUSED(alignment);
|
||||
#endif
|
||||
UNUSED(stats);
|
||||
mi_assert(p == NULL || (uintptr_t)p % alignment == 0);
|
||||
if (p != NULL) mi_stat_increase(stats->mmap_calls, 1);
|
||||
return p;
|
||||
|
@ -76,7 +76,7 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
|
||||
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
uint8_t* start = _mi_page_start(segment,page,NULL);
|
||||
mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL));
|
||||
mi_assert_internal(start == _mi_segment_page_start(segment,page,page->block_size,NULL));
|
||||
//mi_assert_internal(start + page->capacity*page->block_size == page->top);
|
||||
|
||||
mi_assert_internal(mi_page_list_is_valid(page,page->free));
|
||||
@ -514,11 +514,11 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
|
||||
mi_assert(page != NULL);
|
||||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
mi_assert(segment != NULL);
|
||||
mi_assert_internal(block_size > 0);
|
||||
// set fields
|
||||
size_t page_size;
|
||||
_mi_segment_page_start(segment, page, &page_size);
|
||||
_mi_segment_page_start(segment, page, block_size, &page_size);
|
||||
page->block_size = block_size;
|
||||
mi_assert_internal(block_size>0);
|
||||
mi_assert_internal(page_size / block_size < (1L<<16));
|
||||
page->reserved = (uint16_t)(page_size / block_size);
|
||||
page->cookie = _mi_heap_random(heap) | 1;
|
||||
|
@ -121,16 +121,25 @@ static void mi_segment_queue_insert_before(mi_segment_queue_t* queue, mi_segment
|
||||
}
|
||||
|
||||
|
||||
// Start of the page available memory
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
|
||||
// Start of the page available memory; can be used on uninitialized pages (only `segment_idx` must be set)
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t block_size, size_t* page_size)
|
||||
{
|
||||
size_t psize = (segment->page_kind == MI_PAGE_HUGE ? segment->segment_size : (size_t)1 << segment->page_shift);
|
||||
uint8_t* p = (uint8_t*)segment + page->segment_idx*psize;
|
||||
|
||||
if (page->segment_idx == 0) {
|
||||
// the first page starts after the segment info (and possible guard page)
|
||||
p += segment->segment_info_size;
|
||||
psize -= segment->segment_info_size;
|
||||
// the first page starts after the segment info (and possible guard page)
|
||||
p += segment->segment_info_size;
|
||||
psize -= segment->segment_info_size;
|
||||
// for small objects, ensure the page start is aligned with the block size (PR#66 by kickunderscore)
|
||||
if (block_size > 0 && segment->page_kind == MI_PAGE_SMALL) {
|
||||
size_t adjust = block_size - ((uintptr_t)p % block_size);
|
||||
if (adjust < block_size) {
|
||||
p += adjust;
|
||||
psize -= adjust;
|
||||
}
|
||||
mi_assert_internal((uintptr_t)p % block_size == 0);
|
||||
}
|
||||
}
|
||||
long secure = mi_option_get(mi_option_secure);
|
||||
if (secure > 1 || (secure == 1 && page->segment_idx == segment->capacity - 1)) {
|
||||
@ -138,7 +147,7 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* pa
|
||||
// secure > 1: every page has an os guard page
|
||||
psize -= _mi_os_page_size();
|
||||
}
|
||||
|
||||
|
||||
if (page_size != NULL) *page_size = psize;
|
||||
mi_assert_internal(_mi_ptr_page(p) == page);
|
||||
mi_assert_internal(_mi_ptr_segment(p) == segment);
|
||||
@ -381,7 +390,7 @@ static mi_segment_t* mi_segment_alloc( size_t required, mi_page_kind_t page_kind
|
||||
// Available memory in a page
|
||||
static size_t mi_page_size(const mi_page_t* page) {
|
||||
size_t psize;
|
||||
_mi_segment_page_start(_mi_page_segment(page), page, &psize);
|
||||
_mi_page_start(_mi_page_segment(page), page, &psize);
|
||||
return psize;
|
||||
}
|
||||
#endif
|
||||
@ -467,7 +476,7 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_sta
|
||||
// reset the page memory to reduce memory pressure?
|
||||
if (!page->is_reset && mi_option_is_enabled(mi_option_page_reset)) {
|
||||
size_t psize;
|
||||
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
|
||||
uint8_t* start = _mi_page_start(segment, page, &psize);
|
||||
mi_stat_increase( stats->reset, psize); // for stats we assume resetting the full page
|
||||
page->is_reset = true;
|
||||
if (inuse > 0) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user