mirror of https://github.com/microsoft/mimalloc
merge with dev-win
This commit is contained in:
commit
3192ca4682
|
@ -44,11 +44,13 @@ void* _mi_os_alloc(size_t size, mi_stats_t* stats); // to allocat
|
|||
void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to free thread local data
|
||||
|
||||
// memory.c
|
||||
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, size_t* id, mi_os_tld_t* tld);
|
||||
void* _mi_mem_alloc(size_t size, size_t* id, mi_os_tld_t* tld);
|
||||
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool commit, size_t* id, mi_os_tld_t* tld);
|
||||
void* _mi_mem_alloc(size_t size, bool commit, size_t* id, mi_os_tld_t* tld);
|
||||
void _mi_mem_free(void* p, size_t size, size_t id, mi_stats_t* stats);
|
||||
|
||||
bool _mi_mem_reset(void* p, size_t size, mi_stats_t* stats);
|
||||
bool _mi_mem_unreset(void* p, size_t size, mi_stats_t* stats);
|
||||
bool _mi_mem_commit(void* p, size_t size, mi_stats_t* stats);
|
||||
bool _mi_mem_protect(void* addr, size_t size);
|
||||
bool _mi_mem_unprotect(void* addr, size_t size);
|
||||
|
||||
|
|
|
@ -221,7 +221,10 @@ typedef enum mi_option_e {
|
|||
mi_option_page_reset,
|
||||
mi_option_cache_reset,
|
||||
mi_option_eager_commit,
|
||||
mi_option_eager_region_commit,
|
||||
mi_option_large_os_pages, // implies eager commit
|
||||
mi_option_reset_decommits,
|
||||
mi_option_reset_discards,
|
||||
mi_option_secure,
|
||||
mi_option_show_stats,
|
||||
mi_option_show_errors,
|
||||
|
@ -229,6 +232,7 @@ typedef enum mi_option_e {
|
|||
_mi_option_last
|
||||
} mi_option_t;
|
||||
|
||||
|
||||
mi_decl_export bool mi_option_is_enabled(mi_option_t option);
|
||||
mi_decl_export void mi_option_enable(mi_option_t option, bool enable);
|
||||
mi_decl_export void mi_option_enable_default(mi_option_t option, bool enable);
|
||||
|
|
|
@ -37,7 +37,10 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
|
|||
block->next = 0;
|
||||
#endif
|
||||
#if (MI_STAT>1)
|
||||
if(size <= MI_LARGE_SIZE_MAX) mi_heap_stat_increase(heap,normal[_mi_bin(size)], 1);
|
||||
if(size <= MI_LARGE_SIZE_MAX) {
|
||||
size_t bin = _mi_bin(size);
|
||||
mi_heap_stat_increase(heap,normal[bin], 1);
|
||||
}
|
||||
#endif
|
||||
return block;
|
||||
}
|
||||
|
|
45
src/memory.c
45
src/memory.c
|
@ -41,6 +41,7 @@ bool _mi_os_unprotect(void* addr, size_t size);
|
|||
bool _mi_os_commit(void* p, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_unreset(void* p, size_t size, mi_stats_t* stats);
|
||||
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, mi_os_tld_t* tld);
|
||||
|
||||
|
||||
|
@ -108,7 +109,7 @@ Commit from a region
|
|||
// Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written
|
||||
// if the blocks were successfully claimed so ensure they are initialized to NULL/SIZE_MAX before the call.
|
||||
// (not being able to claim is not considered an error so check for `p != NULL` afterwards).
|
||||
static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bitidx, size_t blocks, size_t size, void** p, size_t* id, mi_os_tld_t* tld) {
|
||||
static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bitidx, size_t blocks, size_t size, bool commit, void** p, size_t* id, mi_os_tld_t* tld) {
|
||||
size_t mask = mi_region_block_mask(blocks,bitidx);
|
||||
mi_assert_internal(mask != 0);
|
||||
mi_assert_internal((mask & mi_atomic_read(®ion->map)) == mask);
|
||||
|
@ -116,7 +117,7 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
|
|||
// ensure the region is reserved
|
||||
void* start = mi_atomic_read_ptr(®ion->start);
|
||||
if (start == NULL) {
|
||||
start = _mi_os_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, mi_option_is_enabled(mi_option_eager_commit), tld);
|
||||
start = _mi_os_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, mi_option_is_enabled(mi_option_eager_region_commit), tld);
|
||||
if (start == NULL) {
|
||||
// failure to allocate from the OS! unclaim the blocks and fail
|
||||
size_t map;
|
||||
|
@ -142,7 +143,7 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
|
|||
mi_assert_internal(start == mi_atomic_read_ptr(®ion->start));
|
||||
mi_assert_internal(start != NULL);
|
||||
void* blocks_start = (uint8_t*)start + (bitidx * MI_SEGMENT_SIZE);
|
||||
if (!mi_option_is_enabled(mi_option_eager_commit)) {
|
||||
if (commit && !mi_option_is_enabled(mi_option_eager_region_commit)) {
|
||||
_mi_os_commit(blocks_start, mi_good_commit_size(size), tld->stats); // only commit needed size (unless using large OS pages)
|
||||
}
|
||||
|
||||
|
@ -157,7 +158,7 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit
|
|||
// Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written
|
||||
// if the blocks were successfully claimed so ensure they are initialized to NULL/SIZE_MAX before the call.
|
||||
// (not being able to claim is not considered an error so check for `p != NULL` afterwards).
|
||||
static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t blocks, size_t size, void** p, size_t* id, mi_os_tld_t* tld) {
|
||||
static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t blocks, size_t size, bool commit, void** p, size_t* id, mi_os_tld_t* tld) {
|
||||
mi_assert_internal(p != NULL && id != NULL);
|
||||
mi_assert_internal(blocks < MI_REGION_MAP_BITS);
|
||||
|
||||
|
@ -196,21 +197,21 @@ static bool mi_region_alloc_blocks(mem_region_t* region, size_t idx, size_t bloc
|
|||
|
||||
// success, we claimed the blocks atomically
|
||||
// now commit the block memory -- this can still fail
|
||||
return mi_region_commit_blocks(region, idx, bitidx, blocks, size, p, id, tld);
|
||||
return mi_region_commit_blocks(region, idx, bitidx, blocks, size, commit, p, id, tld);
|
||||
}
|
||||
|
||||
// Try to allocate `blocks` in a `region` at `idx` of a given `size`. Does a quick check before trying to claim.
|
||||
// Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written
|
||||
// if the blocks were successfully claimed so ensure they are initialized to NULL/0 before the call.
|
||||
// (not being able to claim is not considered an error so check for `p != NULL` afterwards).
|
||||
static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size, void** p, size_t* id, mi_os_tld_t* tld)
|
||||
static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size, bool commit, void** p, size_t* id, mi_os_tld_t* tld)
|
||||
{
|
||||
// check if there are available blocks in the region..
|
||||
mi_assert_internal(idx < MI_REGION_MAX);
|
||||
mem_region_t* region = ®ions[idx];
|
||||
uintptr_t m = mi_atomic_read(®ion->map);
|
||||
if (m != MI_REGION_MAP_FULL) { // some bits are zero
|
||||
return mi_region_alloc_blocks(region, idx, blocks, size, p, id, tld);
|
||||
return mi_region_alloc_blocks(region, idx, blocks, size, commit, p, id, tld);
|
||||
}
|
||||
else {
|
||||
return true; // no error, but no success either
|
||||
|
@ -223,7 +224,7 @@ static bool mi_region_try_alloc_blocks(size_t idx, size_t blocks, size_t size, v
|
|||
|
||||
// Allocate `size` memory aligned at `alignment`. Return non NULL on success, with a given memory `id`.
|
||||
// (`id` is abstract, but `id = idx*MI_REGION_MAP_BITS + bitidx`)
|
||||
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, size_t* id, mi_os_tld_t* tld)
|
||||
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool commit, size_t* id, mi_os_tld_t* tld)
|
||||
{
|
||||
mi_assert_internal(id != NULL && tld != NULL);
|
||||
mi_assert_internal(size > 0);
|
||||
|
@ -247,21 +248,21 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, size_t* id, mi_os_tld
|
|||
size_t count = mi_atomic_read(®ions_count);
|
||||
size_t idx = mi_atomic_read(®ion_next_idx);
|
||||
for (size_t visited = 0; visited < count; visited++, idx++) {
|
||||
if (!mi_region_try_alloc_blocks(idx%count, blocks, size, &p, id, tld)) return NULL; // error
|
||||
if (!mi_region_try_alloc_blocks(idx%count, blocks, size, commit, &p, id, tld)) return NULL; // error
|
||||
if (p != NULL) break;
|
||||
}
|
||||
|
||||
if (p == NULL) {
|
||||
// no free range in existing regions -- try to extend beyond the count
|
||||
for (idx = count; idx < MI_REGION_MAX; idx++) {
|
||||
if (!mi_region_try_alloc_blocks(idx, blocks, size, &p, id, tld)) return NULL; // error
|
||||
if (!mi_region_try_alloc_blocks(idx, blocks, size, commit, &p, id, tld)) return NULL; // error
|
||||
if (p != NULL) break;
|
||||
}
|
||||
}
|
||||
|
||||
if (p == NULL) {
|
||||
// we could not find a place to allocate, fall back to the os directly
|
||||
p = _mi_os_alloc_aligned(size, alignment, true, tld);
|
||||
p = _mi_os_alloc_aligned(size, alignment, commit, tld);
|
||||
}
|
||||
|
||||
mi_assert_internal( p == NULL || (uintptr_t)p % alignment == 0);
|
||||
|
@ -270,8 +271,8 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, size_t* id, mi_os_tld
|
|||
|
||||
|
||||
// Allocate `size` memory. Return non NULL on success, with a given memory `id`.
|
||||
void* _mi_mem_alloc(size_t size, size_t* id, mi_os_tld_t* tld) {
|
||||
return _mi_mem_alloc_aligned(size,0,id,tld);
|
||||
void* _mi_mem_alloc(size_t size, bool commit, size_t* id, mi_os_tld_t* tld) {
|
||||
return _mi_mem_alloc_aligned(size,0,commit,id,tld);
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
|
@ -311,11 +312,11 @@ void _mi_mem_free(void* p, size_t size, size_t id, mi_stats_t* stats) {
|
|||
// TODO: implement delayed decommit/reset as these calls are too expensive
|
||||
// if the memory is reused soon.
|
||||
// reset: 10x slowdown on malloc-large, decommit: 17x slowdown on malloc-large
|
||||
if (mi_option_is_enabled(mi_option_eager_commit)) {
|
||||
// _mi_os_reset(p, size, stats); // 10x slowdown on malloc-large
|
||||
if (mi_option_is_enabled(mi_option_eager_region_commit)) {
|
||||
_mi_os_reset(p, size, stats); // 10x slowdown on malloc-large
|
||||
}
|
||||
else {
|
||||
// _mi_os_decommit(p, size, stats); // 17x slowdown on malloc-large
|
||||
_mi_os_decommit(p, size, stats); // 17x slowdown on malloc-large
|
||||
}
|
||||
|
||||
// TODO: should we free empty regions?
|
||||
|
@ -336,10 +337,22 @@ void _mi_mem_free(void* p, size_t size, size_t id, mi_stats_t* stats) {
|
|||
Other
|
||||
-----------------------------------------------------------------------------*/
|
||||
|
||||
bool _mi_mem_commit(void* p, size_t size, mi_stats_t* stats) {
|
||||
return _mi_os_commit(p, size, stats);
|
||||
}
|
||||
|
||||
bool _mi_mem_decommit(void* p, size_t size, mi_stats_t* stats) {
|
||||
return _mi_os_decommit(p, size, stats);
|
||||
}
|
||||
|
||||
bool _mi_mem_reset(void* p, size_t size, mi_stats_t* stats) {
|
||||
return _mi_os_reset(p, size, stats);
|
||||
}
|
||||
|
||||
bool _mi_mem_unreset(void* p, size_t size, mi_stats_t* stats) {
|
||||
return _mi_os_unreset(p, size, stats);
|
||||
}
|
||||
|
||||
bool _mi_mem_protect(void* p, size_t size) {
|
||||
return _mi_os_protect(p, size);
|
||||
}
|
||||
|
|
|
@ -34,14 +34,17 @@ typedef struct mi_option_desc_s {
|
|||
static mi_option_desc_t options[_mi_option_last] = {
|
||||
{ 0, UNINIT, "page_reset" },
|
||||
{ 0, UNINIT, "cache_reset" },
|
||||
{ 1, UNINIT, "eager_commit" }, // on by default as it seems to be faster in general
|
||||
{ 0, UNINIT, "eager_commit" },
|
||||
{ 0, UNINIT, "eager_region_commit" },
|
||||
{ 0, UNINIT, "large_os_pages" }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
|
||||
{ 0, UNINIT, "reset_decommits" },
|
||||
{ 0, UNINIT, "reset_discards" },
|
||||
#if MI_SECURE
|
||||
{ MI_SECURE, INITIALIZED, "secure" }, // in secure build the environment setting is ignored
|
||||
#else
|
||||
{ 0, UNINIT, "secure" },
|
||||
#endif
|
||||
{ 0, UNINIT, "show_stats" },
|
||||
{ 1, UNINIT, "show_stats" },
|
||||
{ MI_DEBUG, UNINIT, "show_errors" },
|
||||
{ 0, UNINIT, "verbose" }
|
||||
};
|
||||
|
|
188
src/os.c
188
src/os.c
|
@ -382,7 +382,7 @@ static void* mi_os_page_align_areax(bool conservative, void* addr, size_t size,
|
|||
ptrdiff_t diff = (uint8_t*)end - (uint8_t*)start;
|
||||
if (diff <= 0) return NULL;
|
||||
|
||||
mi_assert_internal((size_t)diff <= size);
|
||||
mi_assert_internal((conservative && (size_t)diff <= size) || (!conservative && (size_t)diff >= size));
|
||||
if (newsize != NULL) *newsize = (size_t)diff;
|
||||
return start;
|
||||
}
|
||||
|
@ -391,85 +391,6 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t*
|
|||
return mi_os_page_align_areax(true, addr, size, newsize);
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Signal to the OS that the address range is no longer in use
|
||||
// but may be used later again. This will release physical memory
|
||||
// pages and reduce swapping while keeping the memory committed.
|
||||
// We page align to a conservative area inside the range to reset.
|
||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
|
||||
// page align conservatively within the range
|
||||
size_t csize;
|
||||
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
|
||||
if (csize == 0) return true;
|
||||
_mi_stat_increase(&stats->reset, csize);
|
||||
|
||||
#if defined(_WIN32)
|
||||
// Testing shows that for us (on `malloc-large`) MEM_RESET is 2x faster than DiscardVirtualMemory
|
||||
// (but this is for an access pattern that immediately reuses the memory)
|
||||
/*
|
||||
DWORD ok = DiscardVirtualMemory(start, csize);
|
||||
return (ok != 0);
|
||||
*/
|
||||
void* p = VirtualAlloc(start, csize, MEM_RESET, PAGE_READWRITE);
|
||||
mi_assert(p == start);
|
||||
if (p != start) return false;
|
||||
/*
|
||||
// VirtualUnlock removes the memory eagerly from the current working set (which MEM_RESET does lazily on demand)
|
||||
// TODO: put this behind an option?
|
||||
DWORD ok = VirtualUnlock(start, csize);
|
||||
if (ok != 0) return false;
|
||||
*/
|
||||
return true;
|
||||
#else
|
||||
#if defined(MADV_FREE)
|
||||
static int advice = MADV_FREE;
|
||||
int err = madvise(start, csize, advice);
|
||||
if (err != 0 && errno == EINVAL && advice == MADV_FREE) {
|
||||
// if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on
|
||||
advice = MADV_DONTNEED;
|
||||
err = madvise(start, csize, advice);
|
||||
}
|
||||
#else
|
||||
int err = madvise(start, csize, MADV_DONTNEED);
|
||||
#endif
|
||||
if (err != 0) {
|
||||
_mi_warning_message("madvise reset error: start: 0x%8p, csize: 0x%8zux, errno: %i\n", start, csize, errno);
|
||||
}
|
||||
//mi_assert(err == 0);
|
||||
return (err == 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Protect a region in memory to be not accessible.
|
||||
static bool mi_os_protectx(void* addr, size_t size, bool protect) {
|
||||
// page align conservatively within the range
|
||||
size_t csize = 0;
|
||||
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
|
||||
if (csize == 0) return false;
|
||||
|
||||
int err = 0;
|
||||
#ifdef _WIN32
|
||||
DWORD oldprotect = 0;
|
||||
BOOL ok = VirtualProtect(start, csize, protect ? PAGE_NOACCESS : PAGE_READWRITE, &oldprotect);
|
||||
err = (ok ? 0 : GetLastError());
|
||||
#else
|
||||
err = mprotect(start, csize, protect ? PROT_NONE : (PROT_READ | PROT_WRITE));
|
||||
#endif
|
||||
if (err != 0) {
|
||||
_mi_warning_message("mprotect error: start: 0x%8p, csize: 0x%8zux, err: %i\n", start, csize, err);
|
||||
}
|
||||
return (err == 0);
|
||||
}
|
||||
|
||||
bool _mi_os_protect(void* addr, size_t size) {
|
||||
return mi_os_protectx(addr, size, true);
|
||||
}
|
||||
|
||||
bool _mi_os_unprotect(void* addr, size_t size) {
|
||||
return mi_os_protectx(addr, size, false);
|
||||
}
|
||||
|
||||
// Commit/Decommit memory. Commit is aligned liberal, while decommit is aligned conservative.
|
||||
static bool mi_os_commitx(void* addr, size_t size, bool commit, mi_stats_t* stats) {
|
||||
// page align in the range, commit liberally, decommit conservative
|
||||
|
@ -512,6 +433,113 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats) {
|
|||
return mi_os_commitx(addr, size, false, stats);
|
||||
}
|
||||
|
||||
// Signal to the OS that the address range is no longer in use
|
||||
// but may be used later again. This will release physical memory
|
||||
// pages and reduce swapping while keeping the memory committed.
|
||||
// We page align to a conservative area inside the range to reset.
|
||||
static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats) {
|
||||
// page align conservatively within the range
|
||||
size_t csize;
|
||||
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
|
||||
if (csize == 0) return true;
|
||||
if (reset) _mi_stat_increase(&stats->reset, csize);
|
||||
else _mi_stat_decrease(&stats->reset, csize);
|
||||
if (!reset) return true; // nothing to do on unreset!
|
||||
|
||||
#if defined(_WIN32)
|
||||
// Testing shows that for us (on `malloc-large`) MEM_RESET is 2x faster than DiscardVirtualMemory
|
||||
// (but this is for an access pattern that immediately reuses the memory)
|
||||
if (mi_option_is_enabled(mi_option_reset_discards)) {
|
||||
DWORD ok = DiscardVirtualMemory(start, csize);
|
||||
mi_assert_internal(ok == 0);
|
||||
if (ok != 0) return false;
|
||||
}
|
||||
else {
|
||||
void* p = VirtualAlloc(start, csize, MEM_RESET, PAGE_READWRITE);
|
||||
mi_assert_internal(p == start);
|
||||
if (p != start) return false;
|
||||
}
|
||||
/*
|
||||
// VirtualUnlock removes the memory eagerly from the current working set (which MEM_RESET does lazily on demand)
|
||||
// TODO: put this behind an option?
|
||||
DWORD ok = VirtualUnlock(start, csize);
|
||||
if (ok != 0) return false;
|
||||
*/
|
||||
return true;
|
||||
#else
|
||||
#if defined(MADV_FREE)
|
||||
static int advice = MADV_FREE;
|
||||
int err = madvise(start, csize, advice);
|
||||
if (err != 0 && errno == EINVAL && advice == MADV_FREE) {
|
||||
// if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on
|
||||
advice = MADV_DONTNEED;
|
||||
err = madvise(start, csize, advice);
|
||||
}
|
||||
#else
|
||||
int err = madvise(start, csize, MADV_DONTNEED);
|
||||
#endif
|
||||
if (err != 0) {
|
||||
_mi_warning_message("madvise reset error: start: 0x%8p, csize: 0x%8zux, errno: %i\n", start, csize, errno);
|
||||
}
|
||||
//mi_assert(err == 0);
|
||||
return (err == 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Signal to the OS that the address range is no longer in use
|
||||
// but may be used later again. This will release physical memory
|
||||
// pages and reduce swapping while keeping the memory committed.
|
||||
// We page align to a conservative area inside the range to reset.
|
||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
|
||||
if (mi_option_is_enabled(mi_option_reset_decommits)) {
|
||||
return _mi_os_decommit(addr,size,stats);
|
||||
}
|
||||
else {
|
||||
return mi_os_resetx(addr, size, true, stats);
|
||||
}
|
||||
}
|
||||
|
||||
bool _mi_os_unreset(void* addr, size_t size, mi_stats_t* stats) {
|
||||
if (mi_option_is_enabled(mi_option_reset_decommits)) {
|
||||
return _mi_os_commit(addr, size, stats); // re-commit it
|
||||
}
|
||||
else {
|
||||
return mi_os_resetx(addr, size, false, stats);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Protect a region in memory to be not accessible.
|
||||
static bool mi_os_protectx(void* addr, size_t size, bool protect) {
|
||||
// page align conservatively within the range
|
||||
size_t csize = 0;
|
||||
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
|
||||
if (csize == 0) return false;
|
||||
|
||||
int err = 0;
|
||||
#ifdef _WIN32
|
||||
DWORD oldprotect = 0;
|
||||
BOOL ok = VirtualProtect(start, csize, protect ? PAGE_NOACCESS : PAGE_READWRITE, &oldprotect);
|
||||
err = (ok ? 0 : GetLastError());
|
||||
#else
|
||||
err = mprotect(start, csize, protect ? PROT_NONE : (PROT_READ | PROT_WRITE));
|
||||
#endif
|
||||
if (err != 0) {
|
||||
_mi_warning_message("mprotect error: start: 0x%8p, csize: 0x%8zux, err: %i\n", start, csize, err);
|
||||
}
|
||||
return (err == 0);
|
||||
}
|
||||
|
||||
bool _mi_os_protect(void* addr, size_t size) {
|
||||
return mi_os_protectx(addr, size, true);
|
||||
}
|
||||
|
||||
bool _mi_os_unprotect(void* addr, size_t size) {
|
||||
return mi_os_protectx(addr, size, false);
|
||||
}
|
||||
|
||||
|
||||
|
||||
bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats) {
|
||||
// page align conservatively within the range
|
||||
mi_assert_internal(oldsize > newsize && p != NULL);
|
||||
|
|
11
src/page.c
11
src/page.c
|
@ -456,7 +456,7 @@ static void mi_page_free_list_extend( mi_heap_t* heap, mi_page_t* page, size_t e
|
|||
}
|
||||
// enable the new free list
|
||||
page->capacity += (uint16_t)extend;
|
||||
mi_stat_increase(stats->page_committed, extend * page->block_size);
|
||||
_mi_stat_increase(&stats->page_committed, extend * page->block_size);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
|
@ -485,12 +485,7 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_stats_t* st
|
|||
|
||||
size_t page_size;
|
||||
_mi_page_start(_mi_page_segment(page), page, &page_size);
|
||||
if (page->is_reset) {
|
||||
page->is_reset = false;
|
||||
mi_stat_decrease( stats->reset, page_size);
|
||||
}
|
||||
|
||||
mi_stat_increase( stats->pages_extended, 1);
|
||||
_mi_stat_increase(&stats->pages_extended, 1);
|
||||
|
||||
// calculate the extend count
|
||||
size_t extend = page->reserved - page->capacity;
|
||||
|
@ -595,7 +590,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
|
|||
page = next;
|
||||
} // for each page
|
||||
|
||||
mi_stat_counter_increase(heap->tld->stats.searches,count);
|
||||
_mi_stat_counter_increase(&heap->tld->stats.searches,count);
|
||||
|
||||
if (page == NULL) {
|
||||
page = rpage;
|
||||
|
|
|
@ -179,8 +179,8 @@ reuse and avoid setting/clearing guard pages in secure mode.
|
|||
------------------------------------------------------------------------------- */
|
||||
|
||||
static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) {
|
||||
if (segment_size>=0) mi_stat_increase(tld->stats->segments,1);
|
||||
else mi_stat_decrease(tld->stats->segments,1);
|
||||
if (segment_size>=0) _mi_stat_increase(&tld->stats->segments,1);
|
||||
else _mi_stat_decrease(&tld->stats->segments,1);
|
||||
tld->count += (segment_size >= 0 ? 1 : -1);
|
||||
if (tld->count > tld->peak_count) tld->peak_count = tld->count;
|
||||
tld->current_size += segment_size;
|
||||
|
@ -196,6 +196,7 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
|
|||
_mi_mem_free(segment, segment_size, segment->memid, tld->stats);
|
||||
}
|
||||
|
||||
|
||||
// The thread local segment cache is limited to be at most 1/8 of the peak size of segments in use,
|
||||
// and no more than 4.
|
||||
#define MI_SEGMENT_CACHE_MAX (4)
|
||||
|
@ -205,6 +206,12 @@ static mi_segment_t* mi_segment_cache_pop(size_t segment_size, mi_segments_tld_t
|
|||
if (segment_size != 0 && segment_size != MI_SEGMENT_SIZE) return NULL;
|
||||
mi_segment_t* segment = tld->cache;
|
||||
if (segment == NULL) return NULL;
|
||||
if (mi_option_is_enabled(mi_option_eager_commit) &&
|
||||
(mi_option_is_enabled(mi_option_cache_reset) || mi_option_is_enabled(mi_option_page_reset)))
|
||||
{
|
||||
// ensure the memory is available
|
||||
_mi_mem_unreset((uint8_t*)segment + segment->segment_info_size, segment->segment_size - segment->segment_info_size, tld->stats);
|
||||
}
|
||||
tld->cache_count--;
|
||||
tld->cache = segment->next;
|
||||
segment->next = NULL;
|
||||
|
@ -230,12 +237,9 @@ static bool mi_segment_cache_full(mi_segments_tld_t* tld) {
|
|||
static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(!mi_segment_is_in_free_queue(segment, tld));
|
||||
mi_assert_internal(segment->next == NULL);
|
||||
if (segment->segment_size != MI_SEGMENT_SIZE || mi_segment_cache_full(tld)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (segment->segment_size != MI_SEGMENT_SIZE || mi_segment_cache_full(tld)) return false;
|
||||
mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
|
||||
if (mi_option_is_enabled(mi_option_cache_reset) && !mi_option_is_enabled(mi_option_page_reset)) {
|
||||
if (mi_option_is_enabled(mi_option_cache_reset)) {
|
||||
_mi_mem_reset((uint8_t*)segment + segment->segment_info_size, segment->segment_size - segment->segment_info_size, tld->stats);
|
||||
}
|
||||
segment->next = tld->cache;
|
||||
|
@ -282,6 +286,7 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
|
|||
size_t page_size = (page_kind == MI_PAGE_HUGE ? segment_size : (size_t)1 << page_shift);
|
||||
|
||||
// Try to get it from our thread local cache first
|
||||
bool commit = mi_option_is_enabled(mi_option_eager_commit) || (page_kind != MI_PAGE_SMALL);
|
||||
bool protection_still_good = false;
|
||||
mi_segment_t* segment = mi_segment_cache_pop(segment_size, tld);
|
||||
if (segment != NULL) {
|
||||
|
@ -293,12 +298,19 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
|
|||
protection_still_good = true; // otherwise, the guard pages are still in place
|
||||
}
|
||||
}
|
||||
if (page_kind != MI_PAGE_SMALL && !mi_option_is_enabled(mi_option_eager_commit) &&
|
||||
(mi_option_is_enabled(mi_option_cache_reset) || mi_option_is_enabled(mi_option_page_reset))) {
|
||||
_mi_mem_commit(segment, segment->segment_size, tld->stats);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Allocate the segment from the OS
|
||||
size_t memid;
|
||||
segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &memid, os_tld);
|
||||
segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, commit, &memid, os_tld);
|
||||
if (segment == NULL) return NULL; // failed to allocate
|
||||
if (!commit) {
|
||||
_mi_mem_commit(segment, info_size, tld->stats);
|
||||
}
|
||||
segment->memid = memid;
|
||||
mi_segments_track_size((long)segment_size, tld);
|
||||
}
|
||||
|
@ -311,7 +323,8 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
|
|||
}
|
||||
|
||||
if (mi_option_is_enabled(mi_option_secure) && !protection_still_good) {
|
||||
// in secure mode, we set up a protected page in between the segment info and the page data
|
||||
// in secure mode, we set up a protected page in between the segment info
|
||||
// and the page data
|
||||
mi_assert_internal( info_size == pre_size - _mi_os_page_size() && info_size % _mi_os_page_size() == 0);
|
||||
_mi_mem_protect( (uint8_t*)segment + info_size, (pre_size - info_size) );
|
||||
size_t os_page_size = _mi_os_page_size();
|
||||
|
@ -336,8 +349,9 @@ static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind,
|
|||
segment->cookie = _mi_ptr_cookie(segment);
|
||||
for (uint8_t i = 0; i < segment->capacity; i++) {
|
||||
segment->pages[i].segment_idx = i;
|
||||
segment->pages[i].is_reset = !commit;
|
||||
}
|
||||
mi_stat_increase(tld->stats->page_committed, segment->segment_info_size);
|
||||
_mi_stat_increase(&tld->stats->page_committed, segment->segment_info_size);
|
||||
//fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment);
|
||||
return segment;
|
||||
}
|
||||
|
@ -369,10 +383,11 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
|
|||
mi_assert_expensive(!mi_segment_queue_contains(&tld->small_free, segment));
|
||||
mi_assert(segment->next == NULL);
|
||||
mi_assert(segment->prev == NULL);
|
||||
mi_stat_decrease( tld->stats->page_committed, segment->segment_info_size);
|
||||
_mi_stat_decrease(&tld->stats->page_committed, segment->segment_info_size);
|
||||
segment->thread_id = 0;
|
||||
|
||||
// update reset memory statistics
|
||||
/*
|
||||
for (uint8_t i = 0; i < segment->capacity; i++) {
|
||||
mi_page_t* page = &segment->pages[i];
|
||||
if (page->is_reset) {
|
||||
|
@ -380,6 +395,7 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
|
|||
mi_stat_decrease( tld->stats->reset,mi_page_size(page));
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
if (!force && mi_segment_cache_push(segment, tld)) {
|
||||
// it is put in our cache
|
||||
|
@ -399,12 +415,25 @@ static bool mi_segment_has_free(const mi_segment_t* segment) {
|
|||
return (segment->used < segment->capacity);
|
||||
}
|
||||
|
||||
static mi_page_t* mi_segment_find_free(mi_segment_t* segment) {
|
||||
static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_stats_t* stats) {
|
||||
mi_assert_internal(mi_segment_has_free(segment));
|
||||
mi_assert_expensive(mi_segment_is_valid(segment));
|
||||
for (size_t i = 0; i < segment->capacity; i++) {
|
||||
mi_page_t* page = &segment->pages[i];
|
||||
if (!page->segment_in_use) {
|
||||
if (page->is_reset) {
|
||||
size_t psize;
|
||||
uint8_t* start = _mi_page_start(segment, page, &psize);
|
||||
page->is_reset = false;
|
||||
if (mi_option_is_enabled(mi_option_eager_commit)) {
|
||||
_mi_mem_unreset(start, psize, stats);
|
||||
}
|
||||
else {
|
||||
// note we could allow both lazy commit, and page level reset if we add a `is_commit` flag...
|
||||
// for now we use commit for both
|
||||
_mi_mem_commit(start, psize, stats);
|
||||
}
|
||||
}
|
||||
return page;
|
||||
}
|
||||
}
|
||||
|
@ -424,17 +453,15 @@ static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, mi_sta
|
|||
mi_assert_internal(page->segment_in_use);
|
||||
mi_assert_internal(mi_page_all_free(page));
|
||||
size_t inuse = page->capacity * page->block_size;
|
||||
mi_stat_decrease( stats->page_committed, inuse);
|
||||
mi_stat_decrease( stats->pages, 1);
|
||||
_mi_stat_decrease(&stats->page_committed, inuse);
|
||||
_mi_stat_decrease(&stats->pages, 1);
|
||||
|
||||
// reset the page memory to reduce memory pressure?
|
||||
if (!page->is_reset && mi_option_is_enabled(mi_option_page_reset)) {
|
||||
size_t psize;
|
||||
uint8_t* start = _mi_page_start(segment, page, &psize);
|
||||
page->is_reset = true;
|
||||
if (inuse > 0) {
|
||||
_mi_mem_reset(start, psize, stats); // TODO: just `inuse`?
|
||||
}
|
||||
_mi_mem_reset(start, psize, stats);
|
||||
}
|
||||
|
||||
// zero the page data
|
||||
|
@ -503,7 +530,8 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
|||
segment->abandoned_next = (mi_segment_t*)abandoned;
|
||||
} while (!mi_atomic_compare_exchange_ptr((volatile void**)&abandoned, segment, segment->abandoned_next));
|
||||
mi_atomic_increment(&abandoned_count);
|
||||
mi_stat_increase( tld->stats->segments_abandoned,1);
|
||||
_mi_stat_increase(&tld->stats->segments_abandoned,1);
|
||||
mi_segments_track_size((long)segment->segment_size, tld);
|
||||
}
|
||||
|
||||
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
|
||||
|
@ -511,7 +539,7 @@ void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
|
|||
mi_segment_t* segment = _mi_page_segment(page);
|
||||
mi_assert_expensive(mi_segment_is_valid(segment));
|
||||
segment->abandoned++;
|
||||
mi_stat_increase( tld->stats->pages_abandoned, 1);
|
||||
_mi_stat_increase(&tld->stats->pages_abandoned, 1);
|
||||
mi_assert_internal(segment->abandoned <= segment->used);
|
||||
if (segment->used == segment->abandoned) {
|
||||
// all pages are abandoned, abandon the entire segment
|
||||
|
@ -546,7 +574,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
|
|||
mi_segments_track_size((long)segment->segment_size,tld);
|
||||
mi_assert_internal(segment->next == NULL && segment->prev == NULL);
|
||||
mi_assert_expensive(mi_segment_is_valid(segment));
|
||||
mi_stat_decrease(tld->stats->segments_abandoned,1);
|
||||
_mi_stat_decrease(&tld->stats->segments_abandoned,1);
|
||||
// add its free pages to the the current thread
|
||||
if (segment->page_kind == MI_PAGE_SMALL && mi_segment_has_free(segment)) {
|
||||
mi_segment_enqueue(&tld->small_free, segment);
|
||||
|
@ -558,7 +586,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
|
|||
if (page->segment_in_use) {
|
||||
segment->abandoned--;
|
||||
mi_assert(page->next == NULL);
|
||||
mi_stat_decrease( tld->stats->pages_abandoned, 1);
|
||||
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
|
||||
if (mi_page_all_free(page)) {
|
||||
// if everything free by now, free the page
|
||||
mi_segment_page_clear(segment,page,tld->stats);
|
||||
|
@ -589,7 +617,7 @@ bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segmen
|
|||
// Requires that the page has free pages
|
||||
static mi_page_t* mi_segment_small_page_alloc_in(mi_segment_t* segment, mi_segments_tld_t* tld) {
|
||||
mi_assert_internal(mi_segment_has_free(segment));
|
||||
mi_page_t* page = mi_segment_find_free(segment);
|
||||
mi_page_t* page = mi_segment_find_free(segment, tld->stats);
|
||||
page->segment_in_use = true;
|
||||
segment->used++;
|
||||
mi_assert_internal(segment->used <= segment->capacity);
|
||||
|
|
34
src/stats.c
34
src/stats.c
|
@ -141,7 +141,7 @@ static void mi_printf_amount(int64_t n, int64_t unit, FILE* out, const char* fmt
|
|||
_mi_fprintf(out, (fmt==NULL ? "%11s" : fmt), buf);
|
||||
}
|
||||
|
||||
#if MI_STAT>0
|
||||
|
||||
static void mi_print_amount(int64_t n, int64_t unit, FILE* out) {
|
||||
mi_printf_amount(n,unit,out,NULL);
|
||||
}
|
||||
|
@ -175,7 +175,8 @@ static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg
|
|||
double avg = (stat->count == 0 ? 0.0 : (double)stat->total / (double)stat->count);
|
||||
_mi_fprintf(out,"%10s: %7.1f avg\n", msg, avg);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
static void mi_print_header( FILE* out ) {
|
||||
_mi_fprintf(out,"%10s: %10s %10s %10s %10s %10s\n", "heap stats", "peak ", "total ", "freed ", "unit ", "count ");
|
||||
|
@ -204,33 +205,27 @@ static void mi_stats_print_bins(mi_stat_count_t* all, const mi_stat_count_t* bin
|
|||
#endif
|
||||
|
||||
|
||||
static void mi_process_info(double* utime, double* stime, size_t* peak_rss, size_t* page_faults, size_t* page_reclaim);
|
||||
static void mi_process_info(double* utime, double* stime, size_t* peak_rss, size_t* page_faults, size_t* page_reclaim, size_t* peak_commit);
|
||||
|
||||
static void _mi_stats_print(mi_stats_t* stats, double secs, FILE* out) mi_attr_noexcept {
|
||||
if (out == NULL) out = stderr;
|
||||
mi_print_header(out);
|
||||
#if !defined(MI_STAT) || (MI_STAT==0)
|
||||
UNUSED(stats);
|
||||
//_mi_fprintf(out,"(mimalloc built without statistics)\n");
|
||||
#else
|
||||
#if MI_STAT>1
|
||||
mi_stat_count_t normal = { 0,0,0,0 };
|
||||
mi_stats_print_bins(&normal, stats->normal, MI_BIN_HUGE, "normal",out);
|
||||
mi_stat_print(&normal, "normal", 1, out);
|
||||
#endif
|
||||
mi_stat_print(&stats->huge, "huge", 1, out);
|
||||
#if MI_STAT>1
|
||||
mi_stat_count_t total = { 0,0,0,0 };
|
||||
mi_stat_add(&total, &normal, 1);
|
||||
mi_stat_add(&total, &stats->huge, 1);
|
||||
mi_stat_print(&total, "total", 1, out);
|
||||
#endif
|
||||
_mi_fprintf(out, "malloc requested: ");
|
||||
mi_print_amount(stats->malloc.allocated, 1, out);
|
||||
_mi_fprintf(out, "\n\n");
|
||||
#endif
|
||||
mi_stat_print(&stats->reserved, "reserved", 1, out);
|
||||
mi_stat_print(&stats->committed, "committed", 1, out);
|
||||
mi_stat_print(&stats->reset, "reset", -1, out);
|
||||
mi_stat_print(&stats->reset, "reset", 1, out);
|
||||
mi_stat_print(&stats->page_committed, "touched", 1, out);
|
||||
mi_stat_print(&stats->segments, "segments", -1, out);
|
||||
mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out);
|
||||
|
@ -243,7 +238,6 @@ static void _mi_stats_print(mi_stats_t* stats, double secs, FILE* out) mi_attr_n
|
|||
mi_stat_print(&stats->commit_calls, "commits", 0, out);
|
||||
mi_stat_print(&stats->threads, "threads", 0, out);
|
||||
mi_stat_counter_print(&stats->searches, "searches", out);
|
||||
#endif
|
||||
|
||||
if (secs >= 0.0) _mi_fprintf(out, "%10s: %9.3f s\n", "elapsed", secs);
|
||||
|
||||
|
@ -252,9 +246,14 @@ static void _mi_stats_print(mi_stats_t* stats, double secs, FILE* out) mi_attr_n
|
|||
size_t peak_rss;
|
||||
size_t page_faults;
|
||||
size_t page_reclaim;
|
||||
mi_process_info(&user_time, &sys_time, &peak_rss, &page_faults, &page_reclaim);
|
||||
size_t peak_commit;
|
||||
mi_process_info(&user_time, &sys_time, &peak_rss, &page_faults, &page_reclaim, &peak_commit);
|
||||
_mi_fprintf(out,"%10s: user: %.3f s, system: %.3f s, faults: %lu, reclaims: %lu, rss: ", "process", user_time, sys_time, (unsigned long)page_faults, (unsigned long)page_reclaim );
|
||||
mi_printf_amount((int64_t)peak_rss, 1, out, "%s");
|
||||
if (peak_commit > 0) {
|
||||
_mi_fprintf(out,", commit charge: ");
|
||||
mi_printf_amount((int64_t)peak_commit, 1, out, "%s");
|
||||
}
|
||||
_mi_fprintf(out,"\n");
|
||||
}
|
||||
|
||||
|
@ -362,7 +361,7 @@ static double filetime_secs(const FILETIME* ftime) {
|
|||
double secs = (double)(i.QuadPart) * 1.0e-7; // FILETIME is in 100 nano seconds
|
||||
return secs;
|
||||
}
|
||||
static void mi_process_info(double* utime, double* stime, size_t* peak_rss, size_t* page_faults, size_t* page_reclaim) {
|
||||
static void mi_process_info(double* utime, double* stime, size_t* peak_rss, size_t* page_faults, size_t* page_reclaim, size_t* peak_commit) {
|
||||
FILETIME ct;
|
||||
FILETIME ut;
|
||||
FILETIME st;
|
||||
|
@ -375,6 +374,7 @@ static void mi_process_info(double* utime, double* stime, size_t* peak_rss, size
|
|||
GetProcessMemoryInfo(GetCurrentProcess(), &info, sizeof(info));
|
||||
*peak_rss = (size_t)info.PeakWorkingSetSize;
|
||||
*page_faults = (size_t)info.PageFaultCount;
|
||||
*peak_commit = (size_t)info.PeakPagefileUsage;
|
||||
*page_reclaim = 0;
|
||||
}
|
||||
|
||||
|
@ -391,7 +391,7 @@ static double timeval_secs(const struct timeval* tv) {
|
|||
return (double)tv->tv_sec + ((double)tv->tv_usec * 1.0e-6);
|
||||
}
|
||||
|
||||
static void mi_process_info(double* utime, double* stime, size_t* peak_rss, size_t* page_faults, size_t* page_reclaim) {
|
||||
static void mi_process_info(double* utime, double* stime, size_t* peak_rss, size_t* page_faults, size_t* page_reclaim, size_t* peak_commit) {
|
||||
struct rusage rusage;
|
||||
getrusage(RUSAGE_SELF, &rusage);
|
||||
#if defined(__APPLE__) && defined(__MACH__)
|
||||
|
@ -401,16 +401,18 @@ static void mi_process_info(double* utime, double* stime, size_t* peak_rss, size
|
|||
#endif
|
||||
*page_faults = rusage.ru_majflt;
|
||||
*page_reclaim = rusage.ru_minflt;
|
||||
*peak_commit = 0;
|
||||
*utime = timeval_secs(&rusage.ru_utime);
|
||||
*stime = timeval_secs(&rusage.ru_stime);
|
||||
}
|
||||
|
||||
#else
|
||||
#pragma message("define a way to get process info")
|
||||
static void mi_process_info(double* utime, double* stime, size_t* peak_rss, size_t* page_faults, size_t* page_reclaim) {
|
||||
static void mi_process_info(double* utime, double* stime, size_t* peak_rss, size_t* page_faults, size_t* page_reclaim, size_t* peak_commit) {
|
||||
*peak_rss = 0;
|
||||
*page_faults = 0;
|
||||
*page_reclaim = 0;
|
||||
*peak_commit = 0;
|
||||
*utime = 0.0;
|
||||
*stime = 0.0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue