2019-06-20 02:26:12 +03:00
/* ----------------------------------------------------------------------------
2021-04-24 19:35:11 +03:00
Copyright ( c ) 2018 - 2021 , Microsoft Research , Daan Leijen
2019-06-20 02:26:12 +03:00
This is free software ; you can redistribute it and / or modify it under the
terms of the MIT license . A copy of the license can be found in the file
2019-06-23 14:53:34 +03:00
" LICENSE " at the root of this distribution .
2019-06-20 02:26:12 +03:00
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
# pragma once
2019-06-26 07:57:13 +03:00
# ifndef MIMALLOC_TYPES_H
# define MIMALLOC_TYPES_H
2019-06-20 02:26:12 +03:00
# include <stddef.h> // ptrdiff_t
# include <stdint.h> // uintptr_t, uint16_t, etc
2021-06-07 06:31:36 +03:00
# include "mimalloc-atomic.h" // _Atomic
2019-06-20 02:26:12 +03:00
2020-09-05 19:37:09 +03:00
# ifdef _MSC_VER
# pragma warning(disable:4214) // bitfield is not int
2022-12-03 02:23:43 +03:00
# endif
2020-09-05 19:37:09 +03:00
2020-01-30 04:10:57 +03:00
// Minimal alignment necessary. On most platforms 16 bytes are needed
2021-11-14 01:46:03 +03:00
// due to SSE registers for example. This must be at least `sizeof(void*)`
2020-08-29 19:59:15 +03:00
# ifndef MI_MAX_ALIGN_SIZE
2020-01-30 04:10:57 +03:00
# define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t)
2020-08-29 19:59:15 +03:00
# endif
2020-01-30 04:10:57 +03:00
2019-06-20 02:26:12 +03:00
// ------------------------------------------------------
// Variants
// ------------------------------------------------------
// Define NDEBUG in the release version to disable assertions.
// #define NDEBUG
2022-10-30 00:37:55 +03:00
// Define MI_VALGRIND to enable valgrind support
2022-10-30 22:49:29 +03:00
// #define MI_VALGRIND 1
2022-10-30 00:37:55 +03:00
2019-06-23 20:40:28 +03:00
// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance).
2019-06-20 02:26:12 +03:00
// #define MI_STAT 1
2019-10-19 04:11:04 +03:00
// Define MI_SECURE to enable security mitigations
// #define MI_SECURE 1 // guard page around metadata
// #define MI_SECURE 2 // guard page around each mimalloc page
2019-10-29 01:54:33 +03:00
// #define MI_SECURE 3 // encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free)
2019-11-22 02:21:23 +03:00
// #define MI_SECURE 4 // checks for double free. (may be more expensive)
2019-06-20 02:26:12 +03:00
2019-06-24 07:37:43 +03:00
# if !defined(MI_SECURE)
2020-05-02 22:04:36 +03:00
# define MI_SECURE 0
2019-06-20 02:26:12 +03:00
# endif
2019-10-29 01:54:33 +03:00
// Define MI_DEBUG for debug mode
// #define MI_DEBUG 1 // basic assertion checks and statistics, check double free, corrupted free list, and invalid pointer free.
// #define MI_DEBUG 2 // + internal assertion checks
2019-11-22 02:21:23 +03:00
// #define MI_DEBUG 3 // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON)
2019-06-20 02:26:12 +03:00
# if !defined(MI_DEBUG)
# if !defined(NDEBUG) || defined(_DEBUG)
2019-10-29 01:54:33 +03:00
# define MI_DEBUG 2
2019-06-20 02:26:12 +03:00
# else
# define MI_DEBUG 0
# endif
# endif
2020-02-01 23:15:12 +03:00
// Reserve extra padding at the end of each block to be more resilient against heap block overflows.
// The padding can detect byte-precise buffer overflow on free.
2022-10-30 00:37:55 +03:00
# if !defined(MI_PADDING) && (MI_DEBUG>=1 || MI_VALGRIND)
2020-02-01 23:15:12 +03:00
# define MI_PADDING 1
# endif
2019-10-29 01:54:33 +03:00
// Encoded free lists allow detection of corrupted free lists
2020-02-01 10:39:51 +03:00
// and can detect buffer overflows, modify after free, and double `free`s.
2022-10-30 22:23:11 +03:00
# if (MI_SECURE>=3 || MI_DEBUG>=1)
2022-12-03 02:23:43 +03:00
# define MI_ENCODE_FREELIST 1
2019-10-29 01:54:33 +03:00
# endif
2019-06-20 02:26:12 +03:00
2021-11-14 01:46:03 +03:00
2022-11-23 07:56:35 +03:00
// We used to abandon huge pages but to eagerly deallocate if freed from another thread,
// but that makes it not possible to visit them during a heap walk or include them in a
// `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks if freed from
// another thread so most memory is available until it gets properly freed by the owning thread.
// #define MI_HUGE_PAGE_ABANDON 1
2019-06-20 02:26:12 +03:00
// ------------------------------------------------------
// Platform specific values
// ------------------------------------------------------
// ------------------------------------------------------
// Size of a pointer.
// We assume that `sizeof(void*)==sizeof(intptr_t)`
// and it holds for all platforms we know of.
//
// However, the C standard only requires that:
// p == (void*)((intptr_t)p))
// but we also need:
// i == (intptr_t)((void*)i)
// or otherwise one might define an intptr_t type that is larger than a pointer...
// ------------------------------------------------------
2021-11-14 01:46:03 +03:00
# if INTPTR_MAX > INT64_MAX
# define MI_INTPTR_SHIFT (4) // assume 128-bit (as on arm CHERI for example)
# elif INTPTR_MAX == INT64_MAX
2019-06-20 02:26:12 +03:00
# define MI_INTPTR_SHIFT (3)
2021-11-10 07:19:31 +03:00
# elif INTPTR_MAX == INT32_MAX
2019-06-20 02:26:12 +03:00
# define MI_INTPTR_SHIFT (2)
# else
2021-11-14 01:46:03 +03:00
# error platform pointers must be 32, 64, or 128 bits
2019-06-20 02:26:12 +03:00
# endif
2021-11-10 07:19:31 +03:00
# if SIZE_MAX == UINT64_MAX
# define MI_SIZE_SHIFT (3)
2021-11-14 01:46:03 +03:00
typedef int64_t mi_ssize_t ;
2021-11-10 07:19:31 +03:00
# elif SIZE_MAX == UINT32_MAX
# define MI_SIZE_SHIFT (2)
2021-11-14 01:46:03 +03:00
typedef int32_t mi_ssize_t ;
2021-11-10 07:19:31 +03:00
# else
2021-11-14 01:46:03 +03:00
# error platform objects must be 32 or 64 bits
2021-11-10 07:19:31 +03:00
# endif
2021-11-14 01:46:03 +03:00
# if (SIZE_MAX / 2) > LONG_MAX
# define MI_ZU(x) x##ULL
# define MI_ZI(x) x##LL
# else
# define MI_ZU(x) x##UL
# define MI_ZI(x) x##L
2019-06-20 02:26:12 +03:00
# endif
# define MI_INTPTR_SIZE (1<<MI_INTPTR_SHIFT)
2019-12-23 04:07:01 +03:00
# define MI_INTPTR_BITS (MI_INTPTR_SIZE*8)
2021-11-10 07:19:31 +03:00
2021-11-14 01:46:03 +03:00
# define MI_SIZE_SIZE (1<<MI_SIZE_SHIFT)
# define MI_SIZE_BITS (MI_SIZE_SIZE*8)
2021-11-10 07:19:31 +03:00
2021-11-14 01:46:03 +03:00
# define MI_KiB (MI_ZU(1024))
# define MI_MiB (MI_KiB*MI_KiB)
# define MI_GiB (MI_MiB*MI_KiB)
2019-06-20 02:26:12 +03:00
2020-02-01 07:34:24 +03:00
2019-06-20 02:26:12 +03:00
// ------------------------------------------------------
// Main internal data-structures
// ------------------------------------------------------
// Main tuning parameters for segment and page sizes
2021-11-15 03:51:41 +03:00
// Sizes for 64-bit (usually divide by two for 32-bit)
2022-11-22 02:03:15 +03:00
# define MI_SEGMENT_SLICE_SHIFT (13 + MI_INTPTR_SHIFT) // 64KiB (32KiB on 32-bit)
2021-11-15 03:51:41 +03:00
# if MI_INTPTR_SIZE > 4
2022-11-22 02:03:15 +03:00
# define MI_SEGMENT_SHIFT ( 9 + MI_SEGMENT_SLICE_SHIFT) // 32MiB
2021-11-15 03:51:41 +03:00
# else
# define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 4MiB on 32-bit
# endif
2019-08-15 10:46:45 +03:00
2021-10-19 22:55:10 +03:00
# define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64KiB
2022-11-22 05:56:56 +03:00
# define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB
2019-08-15 10:46:45 +03:00
2019-06-20 02:26:12 +03:00
// Derived constants
2021-11-14 01:46:03 +03:00
# define MI_SEGMENT_SIZE (MI_ZU(1)<<MI_SEGMENT_SHIFT)
2020-09-08 23:27:34 +03:00
# define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE
2022-11-07 22:13:42 +03:00
# define MI_SEGMENT_MASK (MI_SEGMENT_ALIGN - 1)
2021-11-14 01:52:11 +03:00
# define MI_SEGMENT_SLICE_SIZE (MI_ZU(1)<< MI_SEGMENT_SLICE_SHIFT)
2021-11-14 23:09:20 +03:00
# define MI_SLICES_PER_SEGMENT (MI_SEGMENT_SIZE / MI_SEGMENT_SLICE_SIZE) // 1024
2019-06-20 02:26:12 +03:00
2021-11-14 01:46:03 +03:00
# define MI_SMALL_PAGE_SIZE (MI_ZU(1)<<MI_SMALL_PAGE_SHIFT)
# define MI_MEDIUM_PAGE_SIZE (MI_ZU(1)<<MI_MEDIUM_PAGE_SHIFT)
2019-06-20 02:26:12 +03:00
2021-10-19 22:55:10 +03:00
# define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE / 4) // 8KiB on 64-bit
# define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE / 4) // 128KiB on 64-bit
# define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX / MI_INTPTR_SIZE)
2021-11-11 03:29:53 +03:00
# define MI_LARGE_OBJ_SIZE_MAX (MI_SEGMENT_SIZE / 2) // 32MiB on 64-bit
2019-08-21 03:31:46 +03:00
# define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX / MI_INTPTR_SIZE)
2019-06-20 02:26:12 +03:00
2019-08-09 21:18:38 +03:00
// Maximum number of size classes. (spaced exponentially in 12.5% increments)
2019-08-11 06:51:37 +03:00
# define MI_BIN_HUGE (73U)
2019-08-09 21:18:38 +03:00
2019-08-21 03:31:46 +03:00
# if (MI_MEDIUM_OBJ_WSIZE_MAX >= 655360)
2021-12-17 22:40:46 +03:00
# error "mimalloc internal: define more bins"
# endif
2019-06-20 02:26:12 +03:00
2021-12-18 00:23:24 +03:00
// Maximum slice offset (15)
2022-11-07 07:57:27 +03:00
# define MI_MAX_SLICE_OFFSET ((MI_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
2019-08-24 22:20:32 +03:00
2020-01-16 04:19:01 +03:00
// Used as a special value to encode block sizes in 32 bits.
2022-04-07 20:38:31 +03:00
# define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB))
2020-01-21 06:06:08 +03:00
2021-10-02 21:13:00 +03:00
// blocks up to this size are always allocated aligned
2021-12-18 22:34:02 +03:00
# define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
2021-10-02 21:13:00 +03:00
2022-11-07 22:13:42 +03:00
// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
2022-11-07 22:29:03 +03:00
# define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
2020-01-16 04:19:01 +03:00
2021-11-14 01:46:03 +03:00
// ------------------------------------------------------
// Mimalloc pages contain allocated blocks
// ------------------------------------------------------
2019-10-29 01:54:33 +03:00
// The free lists use encoded next fields
// (Only actually encodes when MI_ENCODED_FREELIST is defined.)
2021-11-14 01:46:03 +03:00
typedef uintptr_t mi_encoded_t ;
// thread id's
typedef size_t mi_threadid_t ;
2019-06-20 02:26:12 +03:00
// free lists contain blocks
typedef struct mi_block_s {
2019-06-24 07:37:43 +03:00
mi_encoded_t next ;
2019-06-20 02:26:12 +03:00
} mi_block_t ;
2019-10-29 01:54:33 +03:00
// The delayed flags are used for efficient multi-threaded free-ing
2019-06-20 02:26:12 +03:00
typedef enum mi_delayed_e {
2020-01-16 04:19:01 +03:00
MI_USE_DELAYED_FREE = 0 , // push on the owning heap thread delayed list
MI_DELAYED_FREEING = 1 , // temporary: another thread is accessing the owning heap
MI_NO_DELAYED_FREE = 2 , // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list
MI_NEVER_DELAYED_FREE = 3 // sticky, only resets on page reclaim
2019-06-20 02:26:12 +03:00
} mi_delayed_t ;
2020-01-03 04:57:41 +03:00
// The `in_full` and `has_aligned` page flags are put in a union to efficiently
2019-10-18 02:48:16 +03:00
// test if both are false (`full_aligned == 0`) in the `mi_free` routine.
2020-07-26 09:50:22 +03:00
# if !MI_TSAN
2019-10-18 02:48:16 +03:00
typedef union mi_page_flags_s {
uint8_t full_aligned ;
2019-08-24 00:08:00 +03:00
struct {
2019-10-18 02:48:16 +03:00
uint8_t in_full : 1 ;
uint8_t has_aligned : 1 ;
2020-01-03 04:57:41 +03:00
} x ;
2019-08-24 00:08:00 +03:00
} mi_page_flags_t ;
2020-07-26 09:50:22 +03:00
# else
// under thread sanitizer, use a byte for each flag to suppress warning, issue #130
typedef union mi_page_flags_s {
uint16_t full_aligned ;
struct {
uint8_t in_full ;
uint8_t has_aligned ;
} x ;
} mi_page_flags_t ;
# endif
2019-08-09 01:23:18 +03:00
2019-06-20 02:26:12 +03:00
// Thread free list.
2019-07-23 06:51:12 +03:00
// We use the bottom 2 bits of the pointer for mi_delayed_t flags
2019-07-15 02:20:27 +03:00
typedef uintptr_t mi_thread_free_t ;
2019-06-20 02:26:12 +03:00
// A page contains blocks of one specific size (`block_size`).
// Each page has three list of free blocks:
// `free` for blocks that can be allocated,
// `local_free` for freed blocks that are not yet available to `mi_malloc`
// `thread_free` for freed blocks by other threads
// The `local_free` and `thread_free` lists are migrated to the `free` list
// when it is exhausted. The separate `local_free` list is necessary to
2019-06-22 18:09:11 +03:00
// implement a monotonic heartbeat. The `thread_free` list is needed for
2019-06-20 02:26:12 +03:00
// avoiding atomic operations in the common case.
//
//
2020-01-16 04:19:01 +03:00
// `used - |thread_free|` == actual blocks that are in use (alive)
// `used - |thread_free| + |free| + |local_free| == capacity`
//
// We don't count `freed` (as |free|) but use `used` to reduce
// the number of memory accesses in the `mi_page_all_free` function(s).
//
2022-12-03 02:23:43 +03:00
// Notes:
2020-01-16 04:19:01 +03:00
// - Access is optimized for `mi_free` and `mi_page_alloc` (in `alloc.c`)
// - Using `uint16_t` does not seem to slow things down
// - The size is 8 words on 64-bit which helps the page index calculations
2022-12-03 02:23:43 +03:00
// (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10
2020-01-16 04:19:01 +03:00
// and 12 are still good for address calculation)
2022-12-03 02:23:43 +03:00
// - To limit the structure size, the `xblock_size` is 32-bits only; for
2020-01-16 04:19:01 +03:00
// blocks > MI_HUGE_BLOCK_SIZE the size is determined from the segment page size
// - `thread_free` uses the bottom bits as a delayed-free flags to optimize
// concurrent frees where only the first concurrent free adds to the owning
// heap `thread_delayed_free` list (see `alloc.c:mi_free_block_mt`).
// The invariant is that no-delayed-free is only set if there is
2022-12-03 02:23:43 +03:00
// at least one block that will be added, or as already been added, to
2020-01-16 04:19:01 +03:00
// the owning heap `thread_delayed_free` list. This guarantees that pages
// will be freed correctly even if only other threads free blocks.
2019-06-20 02:26:12 +03:00
typedef struct mi_page_s {
// "owned" by the segment
2019-08-16 09:19:52 +03:00
uint32_t slice_count ; // slices in this page (0 if not a page)
uint32_t slice_offset ; // distance from the actual page data slice (0 if a page)
2022-12-20 04:08:45 +03:00
uint8_t is_reset : 1 ; // `true` if the page memory was reset
uint8_t is_committed : 1 ; // `true` if the page virtual memory is committed
uint8_t is_zero_init : 1 ; // `true` if the page was zero initialized
2019-06-20 02:26:12 +03:00
2019-08-09 21:18:38 +03:00
// layout like this to optimize access in `mi_malloc` and `mi_free`
2019-08-27 08:45:26 +03:00
uint16_t capacity ; // number of blocks committed, must be the first field, see `segment.c:page_clear`
2019-07-10 03:38:58 +03:00
uint16_t reserved ; // number of blocks reserved in memory
2019-10-18 02:48:16 +03:00
mi_page_flags_t flags ; // `in_full` and `has_aligned` flags (8 bits)
2022-12-20 04:08:45 +03:00
uint8_t is_zero : 1 ; // `true` if the blocks in the free list are zero initialized
uint8_t retire_expire : 7 ; // expiration count for retired blocks
2019-08-15 10:46:45 +03:00
2019-06-20 02:26:12 +03:00
mi_block_t * free ; // list of available free blocks (`malloc` allocates from this list)
2022-11-18 21:14:37 +03:00
uint32_t used ; // number of blocks in use (including blocks in `local_free` and `thread_free`)
2022-12-03 02:23:43 +03:00
uint32_t xblock_size ; // size available in each block (always `>0`)
2022-11-18 21:14:37 +03:00
mi_block_t * local_free ; // list of deferred free blocks by this thread (migrates to `free`)
2019-10-29 01:54:33 +03:00
# ifdef MI_ENCODE_FREELIST
2020-02-01 23:15:12 +03:00
uintptr_t keys [ 2 ] ; // two random keys to encode the free lists (see `_mi_block_next`)
2019-07-23 06:51:12 +03:00
# endif
2019-08-21 03:31:46 +03:00
2020-07-26 08:52:27 +03:00
_Atomic ( mi_thread_free_t ) xthread_free ; // list of deferred free blocks freed by other threads
_Atomic ( uintptr_t ) xheap ;
2019-06-20 02:26:12 +03:00
struct mi_page_s * next ; // next page owned by this thread with the same `block_size`
struct mi_page_s * prev ; // previous page owned by this thread with the same `block_size`
2020-01-22 22:29:32 +03:00
// 64-bit 9 words, 32-bit 12 words, (+2 for secure)
# if MI_INTPTR_SIZE==8
uintptr_t padding [ 1 ] ;
# endif
2019-06-20 02:26:12 +03:00
} mi_page_t ;
typedef enum mi_page_kind_e {
2021-05-30 08:09:02 +03:00
MI_PAGE_SMALL , // small blocks go into 64KiB pages inside a segment
2021-10-19 22:55:10 +03:00
MI_PAGE_MEDIUM , // medium blocks go into medium pages inside a segment
2019-08-15 10:46:45 +03:00
MI_PAGE_LARGE , // larger blocks go into a page of just one block
2021-10-19 22:55:10 +03:00
MI_PAGE_HUGE , // huge blocks (> 16 MiB) are put into a single page in a single segment.
2019-06-20 02:26:12 +03:00
} mi_page_kind_t ;
2019-08-15 10:46:45 +03:00
typedef enum mi_segment_kind_e {
MI_SEGMENT_NORMAL , // MI_SEGMENT_SIZE size with pages inside.
MI_SEGMENT_HUGE , // > MI_LARGE_SIZE_MAX segment with just one huge page inside.
} mi_segment_kind_t ;
2021-11-14 01:03:16 +03:00
// ------------------------------------------------------
// A segment holds a commit mask where a bit is set if
// the corresponding MI_COMMIT_SIZE area is committed.
// The MI_COMMIT_SIZE must be a multiple of the slice
2021-11-14 07:12:03 +03:00
// size. If it is equal we have the most fine grained
2022-02-05 21:57:15 +03:00
// decommit (but setting it higher can be more efficient).
// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will
// be committed in one go which can be set higher than
// MI_COMMIT_SIZE for efficiency (while the decommit mask
// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
2021-11-14 01:03:16 +03:00
// ------------------------------------------------------
2022-11-22 02:03:15 +03:00
# define MI_MINIMAL_COMMIT_SIZE (16*MI_SEGMENT_SLICE_SIZE) // 1MiB
2022-02-05 22:21:47 +03:00
# define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
2021-11-14 01:03:16 +03:00
# define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
# define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
# define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS)
2019-08-24 22:20:32 +03:00
2021-11-14 01:03:16 +03:00
# if (MI_COMMIT_MASK_BITS != (MI_COMMIT_MASK_FIELD_COUNT * MI_COMMIT_MASK_FIELD_BITS))
2021-11-10 07:19:31 +03:00
# error "the segment size must be exactly divisible by the (commit size * size_t bits)"
2019-08-24 22:20:32 +03:00
# endif
2021-11-10 07:19:31 +03:00
typedef struct mi_commit_mask_s {
2021-11-14 01:03:16 +03:00
size_t mask [ MI_COMMIT_MASK_FIELD_COUNT ] ;
2021-11-10 07:19:31 +03:00
} mi_commit_mask_t ;
2019-08-15 10:46:45 +03:00
2021-11-10 07:19:31 +03:00
typedef mi_page_t mi_slice_t ;
2020-09-06 05:39:10 +03:00
typedef int64_t mi_msecs_t ;
2019-11-22 06:53:43 +03:00
2020-08-28 18:46:51 +03:00
// Segments are large allocated memory blocks (8mb on 64 bit) from
2019-06-20 02:26:12 +03:00
// the OS. Inside segments we allocated fixed size _pages_ that
// contain blocks.
typedef struct mi_segment_s {
2019-10-31 10:40:41 +03:00
size_t memid ; // memory id for arena allocation
2020-09-09 03:54:58 +03:00
bool mem_is_pinned ; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
bool mem_is_large ; // in large/huge os pages?
2019-10-18 04:24:35 +03:00
bool mem_is_committed ; // `true` if the whole segment is eagerly committed
2022-11-07 03:23:42 +03:00
size_t mem_alignment ; // page alignment for huge pages (only used for alignment > MI_ALIGNMENT_MAX)
size_t mem_align_offset ; // offset for huge page alignment (only used for alignment > MI_ALIGNMENT_MAX)
2019-10-18 04:24:35 +03:00
2020-09-09 03:54:58 +03:00
bool allow_decommit ;
2019-11-22 06:53:43 +03:00
mi_msecs_t decommit_expire ;
2020-09-06 05:39:10 +03:00
mi_commit_mask_t decommit_mask ;
mi_commit_mask_t commit_mask ;
2019-11-22 06:53:43 +03:00
2020-07-26 09:50:22 +03:00
_Atomic ( struct mi_segment_s * ) abandoned_next ;
2019-10-31 10:40:41 +03:00
2020-09-03 22:13:09 +03:00
// from here is zero initialized
struct mi_segment_s * next ; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
2019-08-24 22:20:32 +03:00
size_t abandoned ; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
2020-01-28 09:12:23 +03:00
size_t abandoned_visits ; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long)
2019-08-24 22:20:32 +03:00
size_t used ; // count of pages in use
2019-10-18 04:24:35 +03:00
uintptr_t cookie ; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
2019-08-24 22:20:32 +03:00
2019-10-18 04:24:35 +03:00
size_t segment_slices ; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
size_t segment_info_slices ; // initial slices we are using segment info and possible guard pages.
2019-08-24 22:20:32 +03:00
2019-06-20 02:26:12 +03:00
// layout like this to optimize access in `mi_free`
2019-08-21 03:31:46 +03:00
mi_segment_kind_t kind ;
2019-08-24 22:20:32 +03:00
size_t slice_entries ; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
2022-11-21 21:22:50 +03:00
_Atomic ( mi_threadid_t ) thread_id ; // unique id of the thread owning this segment
2022-11-07 07:36:51 +03:00
mi_slice_t slices [ MI_SLICES_PER_SEGMENT + 1 ] ; // one more for huge blocks with large alignment
2019-06-20 02:26:12 +03:00
} mi_segment_t ;
// ------------------------------------------------------
// Heaps
// Provide first-class heaps to allocate from.
// A heap just owns a set of pages for allocation and
// can only be allocate/reallocate from the thread that created it.
// Freeing blocks can be done from any thread though.
// Per thread, the segments are shared among its heaps.
// Per thread, there is always a default heap that is
// used for allocation; it is initialized to statically
// point to an empty heap to avoid initialization checks
// in the fast path.
// ------------------------------------------------------
// Thread local data
typedef struct mi_tld_s mi_tld_t ;
// Pages of a certain block size are held in a queue.
typedef struct mi_page_queue_s {
mi_page_t * first ;
mi_page_t * last ;
size_t block_size ;
} mi_page_queue_t ;
# define MI_BIN_FULL (MI_BIN_HUGE+1)
2019-12-23 04:07:01 +03:00
// Random context
typedef struct mi_random_cxt_s {
uint32_t input [ 16 ] ;
uint32_t output [ 16 ] ;
int output_available ;
2022-11-08 01:47:53 +03:00
bool weak ;
2019-12-23 04:07:01 +03:00
} mi_random_ctx_t ;
2021-08-05 00:31:48 +03:00
// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows
2020-04-07 20:01:18 +03:00
# if (MI_PADDING)
2020-02-01 10:39:51 +03:00
typedef struct mi_padding_s {
2020-02-01 23:15:12 +03:00
uint32_t canary ; // encoded block value to check validity of the padding (in case of overflow)
uint32_t delta ; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
2020-02-01 10:39:51 +03:00
} mi_padding_t ;
# define MI_PADDING_SIZE (sizeof(mi_padding_t))
# define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE)
# else
# define MI_PADDING_SIZE 0
# define MI_PADDING_WSIZE 0
# endif
# define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1)
2019-12-23 04:07:01 +03:00
2019-06-20 02:26:12 +03:00
// A heap owns a set of pages.
struct mi_heap_s {
mi_tld_t * tld ;
2020-02-01 07:34:24 +03:00
mi_page_t * pages_free_direct [ MI_PAGES_DIRECT ] ; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
mi_page_queue_t pages [ MI_BIN_FULL + 1 ] ; // queue of pages for each size class (or "bin")
2020-07-26 08:52:27 +03:00
_Atomic ( mi_block_t * ) thread_delayed_free ;
2021-11-14 01:46:03 +03:00
mi_threadid_t thread_id ; // thread this heap belongs too
2022-11-02 02:22:51 +03:00
mi_arena_id_t arena_id ; // arena id if the heap belongs to a specific arena (or 0)
2020-02-01 07:34:24 +03:00
uintptr_t cookie ; // random cookie to verify pointers (see `_mi_ptr_cookie`)
2020-02-01 23:15:12 +03:00
uintptr_t keys [ 2 ] ; // two random keys used to encode the `thread_delayed_free` list
2020-02-01 07:34:24 +03:00
mi_random_ctx_t random ; // random number context used for secure allocation
size_t page_count ; // total number of pages in the `pages` queues.
2020-03-17 01:31:37 +03:00
size_t page_retired_min ; // smallest retired index (retired pages are fully free, but still in the page queues)
size_t page_retired_max ; // largest retired index into the `pages` array.
2020-02-13 23:15:23 +03:00
mi_heap_t * next ; // list of heaps per thread
2020-02-01 07:34:24 +03:00
bool no_reclaim ; // `true` if this heap should not reclaim abandoned pages
2019-06-20 02:26:12 +03:00
} ;
// ------------------------------------------------------
// Debug
// ------------------------------------------------------
2022-01-05 15:11:44 +03:00
# if !defined(MI_DEBUG_UNINIT)
2019-06-20 02:26:12 +03:00
# define MI_DEBUG_UNINIT (0xD0)
2022-01-05 15:11:44 +03:00
# endif
# if !defined(MI_DEBUG_FREED)
2019-06-20 02:26:12 +03:00
# define MI_DEBUG_FREED (0xDF)
2022-01-05 15:11:44 +03:00
# endif
# if !defined(MI_DEBUG_PADDING)
2020-02-01 10:39:51 +03:00
# define MI_DEBUG_PADDING (0xDE)
2022-01-05 15:11:44 +03:00
# endif
2019-06-20 02:26:12 +03:00
# if (MI_DEBUG)
// use our own assertion to print without memory allocation
void _mi_assert_fail ( const char * assertion , const char * fname , unsigned int line , const char * func ) ;
# define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__))
# else
# define mi_assert(x)
# endif
# if (MI_DEBUG>1)
# define mi_assert_internal mi_assert
# else
# define mi_assert_internal(x)
# endif
# if (MI_DEBUG>2)
# define mi_assert_expensive mi_assert
# else
# define mi_assert_expensive(x)
# endif
// ------------------------------------------------------
// Statistics
// ------------------------------------------------------
# ifndef MI_STAT
# if (MI_DEBUG>0)
# define MI_STAT 2
# else
# define MI_STAT 0
# endif
# endif
typedef struct mi_stat_count_s {
int64_t allocated ;
int64_t freed ;
int64_t peak ;
int64_t current ;
} mi_stat_count_t ;
typedef struct mi_stat_counter_s {
int64_t total ;
int64_t count ;
} mi_stat_counter_t ;
typedef struct mi_stats_s {
mi_stat_count_t segments ;
mi_stat_count_t pages ;
mi_stat_count_t reserved ;
mi_stat_count_t committed ;
mi_stat_count_t reset ;
2019-07-02 17:23:24 +03:00
mi_stat_count_t page_committed ;
2019-06-20 02:26:12 +03:00
mi_stat_count_t segments_abandoned ;
mi_stat_count_t pages_abandoned ;
mi_stat_count_t threads ;
2020-11-11 11:34:40 +03:00
mi_stat_count_t normal ;
2019-06-20 02:26:12 +03:00
mi_stat_count_t huge ;
2019-08-15 10:46:45 +03:00
mi_stat_count_t large ;
2019-06-20 02:26:12 +03:00
mi_stat_count_t malloc ;
2019-08-08 21:36:13 +03:00
mi_stat_count_t segments_cache ;
2019-10-28 23:43:42 +03:00
mi_stat_counter_t pages_extended ;
mi_stat_counter_t mmap_calls ;
mi_stat_counter_t commit_calls ;
2019-08-08 21:36:13 +03:00
mi_stat_counter_t page_no_retire ;
2019-06-20 02:26:12 +03:00
mi_stat_counter_t searches ;
2020-11-11 11:34:40 +03:00
mi_stat_counter_t normal_count ;
2019-08-11 06:51:37 +03:00
mi_stat_counter_t huge_count ;
2019-08-15 10:46:45 +03:00
mi_stat_counter_t large_count ;
2019-06-20 02:26:12 +03:00
# if MI_STAT>1
2020-11-11 10:57:48 +03:00
mi_stat_count_t normal_bins [ MI_BIN_HUGE + 1 ] ;
2019-06-20 02:26:12 +03:00
# endif
} mi_stats_t ;
void _mi_stat_increase ( mi_stat_count_t * stat , size_t amount ) ;
void _mi_stat_decrease ( mi_stat_count_t * stat , size_t amount ) ;
void _mi_stat_counter_increase ( mi_stat_counter_t * stat , size_t amount ) ;
# if (MI_STAT)
# define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount)
# define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount)
# define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount)
# else
# define mi_stat_increase(stat,amount) (void)0
# define mi_stat_decrease(stat,amount) (void)0
# define mi_stat_counter_increase(stat,amount) (void)0
# endif
2020-11-02 00:14:02 +03:00
# define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount)
2019-06-20 02:26:12 +03:00
# define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount)
# define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount)
// ------------------------------------------------------
// Thread Local data
// ------------------------------------------------------
2019-08-24 22:20:32 +03:00
// A "span" is is an available range of slices. The span queues keep
// track of slice spans of at most the given `slice_count` (but more than the previous size class).
typedef struct mi_span_queue_s {
mi_slice_t * first ;
mi_slice_t * last ;
size_t slice_count ;
} mi_span_queue_t ;
2019-06-20 02:26:12 +03:00
2019-08-15 21:49:56 +03:00
# define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT)
2019-06-20 02:26:12 +03:00
2019-11-04 22:48:41 +03:00
// OS thread local data
typedef struct mi_os_tld_s {
2019-11-21 01:55:12 +03:00
size_t region_idx ; // start point for next allocation
mi_stats_t * stats ; // points to tld stats
2019-11-04 22:48:41 +03:00
} mi_os_tld_t ;
2019-06-20 02:26:12 +03:00
2019-11-22 04:03:30 +03:00
2019-06-20 02:26:12 +03:00
// Segments thread local data
typedef struct mi_segments_tld_s {
2019-08-24 22:20:32 +03:00
mi_span_queue_t spans [ MI_SEGMENT_BIN_MAX + 1 ] ; // free slice spans inside segments
2019-07-02 17:23:24 +03:00
size_t count ; // current number of segments;
size_t peak_count ; // peak number of segments
2019-06-24 09:15:42 +03:00
size_t current_size ; // current size of all segments
size_t peak_size ; // peak size of all segments
2019-06-20 02:26:12 +03:00
mi_stats_t * stats ; // points to tld stats
2019-11-04 22:48:41 +03:00
mi_os_tld_t * os ; // points to os stats
2019-06-20 02:26:12 +03:00
} mi_segments_tld_t ;
// Thread local data
struct mi_tld_s {
unsigned long long heartbeat ; // monotonic heartbeat count
2019-09-09 18:02:41 +03:00
bool recurse ; // true if deferred was called; used to prevent infinite recursion.
2019-06-20 02:26:12 +03:00
mi_heap_t * heap_backing ; // backing heap of this thread (cannot be deleted)
2020-02-13 23:15:23 +03:00
mi_heap_t * heaps ; // list of heaps in this thread (so we can abandon all when the thread terminates)
2019-06-24 07:37:43 +03:00
mi_segments_tld_t segments ; // segment tld
2019-06-20 02:26:12 +03:00
mi_os_tld_t os ; // os tld
mi_stats_t stats ; // statistics
} ;
# endif