refine mi_os_good_alloc_size and use it for huge pages (to ensure realloc is bounded as in #153

This commit is contained in:
Daan Leijen 2019-09-10 13:26:51 -07:00
parent d278c26c0e
commit 1909cfb346
4 changed files with 18 additions and 12 deletions

View File

@ -43,6 +43,7 @@ size_t _mi_os_page_size(void);
void _mi_os_init(void); // called from process init
void* _mi_os_alloc(size_t size, mi_stats_t* stats); // to allocate thread local data
void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to free thread local data
size_t _mi_os_good_alloc_size(size_t size);
// memory.c
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* id, mi_os_tld_t* tld);

View File

@ -68,6 +68,9 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_INTPTR_SIZE (1<<MI_INTPTR_SHIFT)
#define KiB ((size_t)1024)
#define MiB (KiB*KiB)
#define GiB (MiB*KiB)
// ------------------------------------------------------
// Main internal data-structures

View File

@ -76,11 +76,16 @@ static bool use_large_os_page(size_t size, size_t alignment) {
return ((size % large_os_page_size) == 0 && (alignment % large_os_page_size) == 0);
}
// round to a good allocation size
static size_t mi_os_good_alloc_size(size_t size, size_t alignment) {
UNUSED(alignment);
if (size >= (SIZE_MAX - os_alloc_granularity)) return size; // possible overflow?
return _mi_align_up(size, os_alloc_granularity);
// round to a good OS allocation size (bounded by max 12.5% waste)
size_t _mi_os_good_alloc_size(size_t size) {
size_t align_size;
if (size < 512*KiB) align_size = _mi_os_page_size();
else if (size < 2*MiB) align_size = 64*KiB;
else if (size < 8*MiB) align_size = 256*KiB;
else if (size < 32*MiB) align_size = 1*MiB;
else align_size = 4*MiB;
if (size >= (SIZE_MAX - align_size)) return size; // possible overflow?
return _mi_align_up(size, align_size);
}
#if defined(_WIN32)
@ -547,14 +552,14 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
void* _mi_os_alloc(size_t size, mi_stats_t* stats) {
if (size == 0) return NULL;
size = mi_os_good_alloc_size(size, 0);
size = _mi_os_good_alloc_size(size);
bool is_large = false;
return mi_os_mem_alloc(size, 0, true, false, &is_large, stats);
}
void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats) {
if (size == 0 || p == NULL) return;
size = mi_os_good_alloc_size(size, 0);
size = _mi_os_good_alloc_size(size);
mi_os_mem_free(p, size, was_committed, stats);
}
@ -565,7 +570,7 @@ void _mi_os_free(void* p, size_t size, mi_stats_t* stats) {
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_os_tld_t* tld)
{
if (size == 0) return NULL;
size = mi_os_good_alloc_size(size, alignment);
size = _mi_os_good_alloc_size(size);
alignment = _mi_align_up(alignment, _mi_os_page_size());
bool allow_large = false;
if (large != NULL) {

View File

@ -727,10 +727,7 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn) mi_attr_noexcept {
// just that page, we always treat them as abandoned and any thread
// that frees the block can free the whole page and segment directly.
static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
size_t align_size = _mi_os_page_size();
if (align_size < (size / 8)) align_size = _mi_align_up(size / 8, align_size);
if (align_size > MI_SEGMENT_SIZE) align_size = MI_SEGMENT_SIZE;
size_t block_size = _mi_align_up(size, align_size);
size_t block_size = _mi_os_good_alloc_size(size);
mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE);
mi_page_t* page = mi_page_fresh_alloc(heap,NULL,block_size);
if (page != NULL) {