fix multi-threaded free to unprotect guarded blocks

This commit is contained in:
daanx 2024-08-20 09:55:57 -07:00
parent 0c19eb60cf
commit 635cf7af6a
5 changed files with 44 additions and 20 deletions

View File

@ -74,7 +74,7 @@ terms of the MIT license. A copy of the license can be found in the file
// Use guard pages behind objects of a certain size
#define MI_DEBUG_GUARDED 1
#if defined(MI_DEBUG_GUARDED) || defined(MI_DEBUG_GUARDEDX)
#if defined(MI_DEBUG_GUARDED)
#define MI_PADDING 0
#endif

View File

@ -55,8 +55,10 @@ extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_
// zero the block? note: we need to zero the full block size (issue #63)
if mi_unlikely(zero) {
mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
mi_assert_internal(page->block_size >= MI_PADDING_SIZE);
mi_assert_internal(!mi_page_is_huge(page));
#if MI_PADDING_SIZE > 0
mi_assert_internal(page->block_size >= MI_PADDING_SIZE);
#endif
if (page->free_is_zero) {
block->next = 0;
mi_track_mem_defined(block, page->block_size - MI_PADDING_SIZE);
@ -131,7 +133,7 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
if (size == 0) { size = sizeof(void*); }
#endif
#if MI_DEBUG_GUARDED
if (size >= _mi_option_get_fast(mi_option_debug_guarded_min) && size <= _mi_option_get_fast(mi_option_debug_guarded_max)) {
if (size <= (size_t)_mi_option_get_fast(mi_option_debug_guarded_max) && size >= (size_t)_mi_option_get_fast(mi_option_debug_guarded_min)) {
return mi_heap_malloc_guarded(heap, size, zero, 0);
}
#endif
@ -171,8 +173,9 @@ extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool z
}
#if MI_DEBUG_GUARDED
else if ( huge_alignment == 0 && // guarded pages do not work with huge aligments at the moment
((size >= _mi_option_get_fast(mi_option_debug_guarded_min) && size <= _mi_option_get_fast(mi_option_debug_guarded_max))
|| ((size & (_mi_os_page_size()-1)) == 0)) ) // page-size multiple are always guarded so we can have a correct `mi_usable_size`.
_mi_option_get_fast(mi_option_debug_guarded_max) > 0 && // guarded must be enabled
((size >= (size_t)_mi_option_get_fast(mi_option_debug_guarded_min) && size <= (size_t)_mi_option_get_fast(mi_option_debug_guarded_max))
|| ((mi_good_size(size) & (_mi_os_page_size()-1)) == 0)) ) // page-size multiple are always guarded so we can have a correct `mi_usable_size`.
{
return mi_heap_malloc_guarded(heap, size, zero, 0);
}
@ -615,11 +618,20 @@ static mi_decl_restrict void* mi_heap_malloc_guarded(mi_heap_t* heap, size_t siz
const size_t fullsize = mi_page_block_size(page); // must use `block_size` to match `mi_free_local`
void* const gpage = (uint8_t*)base + (fullsize - psize);
mi_assert_internal(_mi_is_aligned(gpage, psize));
void* const p = (uint8_t*)base + (fullsize - psize - bsize);
// place block in front of the guard page
size_t offset = fullsize - psize - bsize;
if (offset > MI_BLOCK_ALIGNMENT_MAX) {
// give up to place it right in front of the guard page if the offset is too large for unalignment
offset = MI_BLOCK_ALIGNMENT_MAX;
}
void* const p = (uint8_t*)base + offset;
mi_assert_internal(p >= base);
// set page flags
if (p > base) { mi_page_set_has_aligned(page, true); }
if (offset > 0) {
mi_page_set_has_aligned(page, true);
}
// set guard page
if (segment->allow_decommit) {

View File

@ -26,6 +26,25 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block);
// forward declaration of multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
static mi_decl_noinline void mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block);
#if !MI_DEBUG_GUARDED
static void mi_block_unguard(mi_page_t* page, mi_block_t* block) {
MI_UNUSED(page);
MI_UNUSED(block);
}
#else
static void mi_block_unguard(mi_page_t* page, mi_block_t* block) {
if (mi_page_has_guarded(page)) {
const size_t bsize = mi_page_block_size(page);
const size_t psize = _mi_os_page_size();
mi_assert_internal(bsize > psize);
mi_assert_internal(_mi_page_segment(page)->allow_decommit);
void* gpage = (uint8_t*)block + (bsize - psize);
mi_assert_internal(_mi_is_aligned(gpage, psize));
_mi_os_unprotect(gpage, psize);
}
}
#endif
// regular free of a (thread local) block pointer
// fast path written carefully to prevent spilling on the stack
static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool track_stats, bool check_full)
@ -34,16 +53,6 @@ static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool
if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
if (track_stats) { mi_stat_free(page, block); }
#if MI_DEBUG_GUARDED
if (mi_page_has_guarded(page)) {
const size_t bsize = mi_page_block_size(page);
const size_t psize = _mi_os_page_size();
mi_assert_internal(bsize > psize);
mi_assert_internal(_mi_page_segment(page)->allow_decommit);
void* gpage = (uint8_t*)block + (bsize - psize);
_mi_os_unprotect(gpage, psize);
}
#endif
#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN && !MI_DEBUG_GUARDED
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif
@ -83,12 +92,14 @@ mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) {
static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
MI_UNUSED(segment);
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p);
mi_block_unguard(page,block);
mi_free_block_local(page, block, true /* track stats */, true /* check for a full page */);
}
// free a pointer owned by another thread (page parameter comes first for better codegen)
static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept {
mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865)
mi_block_unguard(page, block);
mi_free_block_mt(page, segment, block);
}

View File

@ -745,6 +745,7 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_seg
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
{
mi_assert(page != NULL);
mi_assert_internal(!mi_page_has_guarded(page));
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_expensive(mi_segment_is_valid(segment,tld));
mi_pages_try_purge(false /*force?*/, tld);

View File

@ -271,7 +271,7 @@ int main(void) {
mi_free(p);
};
#if !(MI_TRACK_VALGRIND || MI_TRACK_ASAN)
#if !(MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_DEBUG_GUARDED)
CHECK_BODY("fill-freed-small") {
size_t malloc_size = MI_SMALL_SIZE_MAX / 2;
uint8_t* p = (uint8_t*)mi_malloc(malloc_size);