Merge branch 'dev-slice'

This commit is contained in:
Daan 2024-05-13 10:18:56 -07:00
commit a1b284de0a
4 changed files with 10 additions and 9 deletions

View File

@ -1,6 +1,6 @@
set(mi_version_major 2)
set(mi_version_minor 1)
set(mi_version_patch 5)
set(mi_version_patch 6)
set(mi_version ${mi_version_major}.${mi_version_minor})
set(PACKAGE_VERSION ${mi_version})

View File

@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_H
#define MIMALLOC_H
#define MI_MALLOC_VERSION 215 // major + 2 digits minor
#define MI_MALLOC_VERSION 216 // major + 2 digits minor
// ------------------------------------------------------
// Compiler specific attributes

View File

@ -12,8 +12,8 @@ is a general purpose allocator with excellent [performance](#performance) charac
Initially developed by Daan Leijen for the runtime systems of the
[Koka](https://koka-lang.github.io) and [Lean](https://github.com/leanprover/lean) languages.
Latest release tag: `v2.1.5` (2024-05-13).
Latest v1 tag: `v1.8.5` (2024-05-13).
Latest release tag: `v2.1.6` (2024-05-13).
Latest v1 tag: `v1.8.6` (2024-05-13).
mimalloc is a drop-in replacement for `malloc` and can be used in other programs
without code changes, for example, on dynamically linked ELF-based systems (Linux, BSD, etc.) you can use it as:
@ -82,7 +82,7 @@ memory usage
and fragmentation compared to mimalloc `v1.x` (especially for large workloads). Should otherwise have similar performance
(see [below](#performance)); please report if you observe any significant performance regression.
* 2024-05-13, `v1.8.5`, `v2.1.5`: Fix build errors on various (older) platforms. Refactored aligned allocation.
* 2024-05-13, `v1.8.6`, `v2.1.6`: Fix build errors on various (older) platforms. Refactored aligned allocation.
* 2024-04-22, `v1.8.4`, `v2.1.4`: Fixes various bugs and build issues. Add `MI_LIBC_MUSL` cmake flag for musl builds.
Free-ing code is refactored into a separate module (`free.c`). Mimalloc page info is simplified with the block size
directly available (and new `block_size_shift` to improve aligned block free-ing).

View File

@ -95,6 +95,11 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t
mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL));
mi_collect_t collect = *((mi_collect_t*)arg_collect);
_mi_page_free_collect(page, collect >= MI_FORCE);
if (collect == MI_FORCE) {
// note: call before a potential `_mi_page_free` as the segment may be freed if this was the last used page in that segment.
mi_segment_t* segment = _mi_page_segment(page);
_mi_segment_collect(segment, true /* force? */, &heap->tld->segments);
}
if (mi_page_all_free(page)) {
// no more used blocks, free the page.
// note: this will free retired pages as well.
@ -104,10 +109,6 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t
// still used blocks but the thread is done; abandon the page
_mi_page_abandon(page, pq);
}
if (collect == MI_FORCE) {
mi_segment_t* segment = _mi_page_segment(page);
_mi_segment_collect(segment, true /* force? */, &heap->tld->segments);
}
return true; // don't break
}