merge from dev

This commit is contained in:
daan 2020-03-16 16:41:21 -07:00
commit 1f396e64a0
34 changed files with 2041 additions and 554 deletions

View File

@ -5,11 +5,12 @@ set(CMAKE_C_STANDARD 11)
set(CMAKE_CXX_STANDARD 17)
option(MI_OVERRIDE "Override the standard malloc interface" ON)
option(MI_INTERPOSE "Use interpose to override standard malloc on macOS" ON)
option(MI_DEBUG_FULL "Use full internal heap invariant checking in DEBUG mode" OFF)
option(MI_SECURE "Use full security mitigations (like guard pages, allocation randomization, double-free mitigation, and free-list corruption detection)" OFF)
option(MI_USE_CXX "Use the C++ compiler to compile the library" OFF)
option(MI_SEE_ASM "Generate assembly files" OFF)
option(MI_INTERPOSE "Use interpose to override standard malloc on macOS" ON)
option(MI_OSX_ZONE "Use malloc zone to override standard malloc on macOS" OFF) # enables interpose as well
option(MI_LOCAL_DYNAMIC_TLS "Use slightly slower, dlopen-compatible TLS mechanism (Unix)" OFF)
option(MI_BUILD_TESTS "Build test executables" ON)
option(MI_CHECK_FULL "Use full internal invariant checking in DEBUG mode (deprecated, use MI_DEBUG_FULL instead)" OFF)
@ -60,14 +61,19 @@ endif()
if(MI_OVERRIDE MATCHES "ON")
message(STATUS "Override standard malloc (MI_OVERRIDE=ON)")
if(APPLE)
if(MI_OSX_ZONE MATCHES "ON")
# use zone's on macOS
message(STATUS " Use malloc zone to override malloc (MI_OSX_ZONE=ON)")
list(APPEND mi_sources src/alloc-override-osx.c)
if(NOT MI_INTERPOSE MATCHES "ON")
message(STATUS " (enabling INTERPOSE as well since zone's require this)")
set(MI_INTERPOSE "ON")
endif()
endif()
if(MI_INTERPOSE MATCHES "ON")
# use interpose on macOS
message(STATUS " Use interpose to override malloc (MI_INTERPOSE=ON)")
list(APPEND mi_defines MI_INTERPOSE)
else()
# use zone's on macOS
message(STATUS " Use zone's to override malloc (MI_INTERPOSE=OFF)")
list(APPEND mi_sources src/alloc-override-osx.c)
endif()
endif()
endif()
@ -124,6 +130,11 @@ if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU|Intel")
endif()
endif()
# Architecture flags
if(${CMAKE_HOST_SYSTEM_PROCESSOR} MATCHES "arm")
list(APPEND mi_cflags -march=native)
endif()
# extra needed libraries
if(WIN32)
list(APPEND mi_libraries psapi shell32 user32 bcrypt)
@ -209,7 +220,7 @@ if(NOT WIN32)
# install a symlink in the /usr/local/lib to the versioned library
set(mi_symlink "${CMAKE_SHARED_MODULE_PREFIX}${mi_basename}${CMAKE_SHARED_LIBRARY_SUFFIX}")
set(mi_soname "mimalloc-${mi_version}/${mi_symlink}.${mi_version}")
install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${mi_soname} ${mi_symlink} WORKING_DIRECTORY ${CMAKE_INSTALL_PREFIX}/${mi_install_dir}/..)")
install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${mi_soname} ${mi_symlink} WORKING_DIRECTORY ${mi_install_dir}/..)")
install(CODE "MESSAGE(\"-- Symbolic link: ${CMAKE_INSTALL_PREFIX}/lib/${mi_symlink} -> ${mi_soname}\")")
endif()
@ -246,7 +257,7 @@ if (MI_BUILD_TESTS MATCHES "ON")
target_compile_definitions(mimalloc-test-stress PRIVATE ${mi_defines})
target_compile_options(mimalloc-test-stress PRIVATE ${mi_cflags})
target_include_directories(mimalloc-test-stress PRIVATE include)
target_link_libraries(mimalloc-test-stress PRIVATE mimalloc-static ${mi_libraries})
target_link_libraries(mimalloc-test-stress PRIVATE mimalloc ${mi_libraries})
enable_testing()
add_test(test_api, mimalloc-test-api)

View File

@ -1,5 +1,5 @@
set(mi_version_major 1)
set(mi_version_minor 5)
set(mi_version_minor 6)
set(mi_version ${mi_version_major}.${mi_version_minor})
set(PACKAGE_VERSION ${mi_version})

View File

@ -95,7 +95,11 @@
<SDLCheck>true</SDLCheck>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
<<<<<<< HEAD
<PreprocessorDefinitions>MI_DEBUG=3;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);</PreprocessorDefinitions>
=======
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);</PreprocessorDefinitions>
>>>>>>> dev
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
<SupportJustMyCode>false</SupportJustMyCode>
<CompileAs>Default</CompileAs>
@ -123,7 +127,11 @@
<SDLCheck>true</SDLCheck>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
<<<<<<< HEAD
<PreprocessorDefinitions>MI_DEBUG=3;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);</PreprocessorDefinitions>
=======
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);</PreprocessorDefinitions>
>>>>>>> dev
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
<SupportJustMyCode>false</SupportJustMyCode>
<CompileAs>Default</CompileAs>
@ -152,7 +160,7 @@
<IntrinsicFunctions>true</IntrinsicFunctions>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
<PreprocessorDefinitions>MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG</PreprocessorDefinitions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG</PreprocessorDefinitions>
<AssemblerOutput>AssemblyAndSourceCode</AssemblerOutput>
<AssemblerListingLocation>$(IntDir)</AssemblerListingLocation>
<WholeProgramOptimization>false</WholeProgramOptimization>
@ -184,7 +192,7 @@
<IntrinsicFunctions>true</IntrinsicFunctions>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
<PreprocessorDefinitions>MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG</PreprocessorDefinitions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG</PreprocessorDefinitions>
<AssemblerOutput>AssemblyAndSourceCode</AssemblerOutput>
<AssemblerListingLocation>$(IntDir)</AssemblerListingLocation>
<WholeProgramOptimization>false</WholeProgramOptimization>

View File

@ -149,8 +149,8 @@
</ClCompile>
</ItemGroup>
<ItemGroup>
<ProjectReference Include="mimalloc-override.vcxproj">
<Project>{abb5eae7-b3e6-432e-b636-333449892ea7}</Project>
<ProjectReference Include="mimalloc.vcxproj">
<Project>{abb5eae7-b3e6-432e-b636-333449892ea6}</Project>
</ProjectReference>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />

View File

@ -90,6 +90,18 @@
<TargetExt>.lib</TargetExt>
<TargetName>mimalloc-static</TargetName>
</PropertyGroup>
<PropertyGroup Label="LLVM" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<UseLlvmLib>false</UseLlvmLib>
</PropertyGroup>
<PropertyGroup Label="LLVM" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<UseLlvmLib>false</UseLlvmLib>
</PropertyGroup>
<PropertyGroup Label="LLVM" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<UseLlvmLib>false</UseLlvmLib>
</PropertyGroup>
<PropertyGroup Label="LLVM" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<UseLlvmLib>false</UseLlvmLib>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
@ -97,7 +109,7 @@
<SDLCheck>true</SDLCheck>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
<PreprocessorDefinitions>MI_DEBUG=3;%(PreprocessorDefinitions);</PreprocessorDefinitions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;MI_DEBUG=3;%(PreprocessorDefinitions);</PreprocessorDefinitions>
<CompileAs>CompileAsCpp</CompileAs>
<SupportJustMyCode>false</SupportJustMyCode>
<LanguageStandard>stdcpp17</LanguageStandard>
@ -116,7 +128,7 @@
<SDLCheck>true</SDLCheck>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
<PreprocessorDefinitions>MI_DEBUG=3;%(PreprocessorDefinitions);</PreprocessorDefinitions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;MI_DEBUG=3;%(PreprocessorDefinitions);</PreprocessorDefinitions>
<CompileAs>CompileAsCpp</CompileAs>
<SupportJustMyCode>false</SupportJustMyCode>
<LanguageStandard>stdcpp17</LanguageStandard>
@ -143,7 +155,7 @@
<FunctionLevelLinking>true</FunctionLevelLinking>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
<PreprocessorDefinitions>%(PreprocessorDefinitions);NDEBUG</PreprocessorDefinitions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions);NDEBUG</PreprocessorDefinitions>
<AssemblerOutput>AssemblyAndSourceCode</AssemblerOutput>
<AssemblerListingLocation>$(IntDir)</AssemblerListingLocation>
<WholeProgramOptimization>false</WholeProgramOptimization>
@ -170,7 +182,7 @@
<FunctionLevelLinking>true</FunctionLevelLinking>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>../../include</AdditionalIncludeDirectories>
<PreprocessorDefinitions>%(PreprocessorDefinitions);NDEBUG</PreprocessorDefinitions>
<PreprocessorDefinitions>_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions);NDEBUG</PreprocessorDefinitions>
<AssemblerOutput>AssemblyAndSourceCode</AssemblerOutput>
<AssemblerListingLocation>$(IntDir)</AssemblerListingLocation>
<WholeProgramOptimization>false</WholeProgramOptimization>

View File

@ -1,9 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<ClCompile Include="..\..\src\options.c">
<Filter>Header Files</Filter>
</ClCompile>
<ClCompile Include="..\..\src\alloc.c">
<Filter>Source Files</Filter>
</ClCompile>
@ -46,6 +43,9 @@
<ClCompile Include="..\..\src\random.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\src\options.c">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="$(ProjectDir)..\..\include\mimalloc.h">
@ -75,4 +75,4 @@
<UniqueIdentifier>{39cb7e38-69d0-43fb-8406-6a0f7cefc3b4}</UniqueIdentifier>
</Filter>
</ItemGroup>
</Project>
</Project>

View File

@ -0,0 +1,155 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<VCProjectVersion>15.0</VCProjectVersion>
<ProjectGuid>{FFF7958F-750E-4C21-A04D-22707CC66878}</ProjectGuid>
<RootNamespace>mimalloc-test-api</RootNamespace>
<WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>
<ProjectName>mimalloc-test-api</ProjectName>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v142</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v142</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v142</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v142</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<OutDir>$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<OutDir>$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<OutDir>$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<OutDir>$(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\</IntDir>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<SDLCheck>true</SDLCheck>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>..\..\include</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<SDLCheck>true</SDLCheck>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>..\..\include</AdditionalIncludeDirectories>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>..\..\include</AdditionalIncludeDirectories>
<PreprocessorDefinitions>%(PreprocessorDefinitions);NDEBUG</PreprocessorDefinitions>
</ClCompile>
<Link>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<SubSystem>Console</SubSystem>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<ConformanceMode>true</ConformanceMode>
<AdditionalIncludeDirectories>..\..\include</AdditionalIncludeDirectories>
<PreprocessorDefinitions>%(PreprocessorDefinitions);NDEBUG</PreprocessorDefinitions>
</ClCompile>
<Link>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<SubSystem>Console</SubSystem>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="..\..\test\test-api.c">
</ClCompile>
</ItemGroup>
<ItemGroup>
<ProjectReference Include="mimalloc.vcxproj">
<Project>{abb5eae7-b3e6-432e-b636-333449892ea6}</Project>
</ProjectReference>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

View File

@ -1,7 +1,7 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 15
VisualStudioVersion = 15.0.28010.2016
# Visual Studio Version 16
VisualStudioVersion = 16.0.29709.97
MinimumVisualStudioVersion = 10.0.40219.1
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}"
EndProject
@ -13,6 +13,8 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "m
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-api", "mimalloc-test-api.vcxproj", "{FFF7958F-750E-4C21-A04D-22707CC66878}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|x64 = Debug|x64
@ -61,6 +63,14 @@ Global
{FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
{FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
{FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
{FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64
{FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64
{FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32
{FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32
{FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64
{FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64
{FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32
{FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE

View File

@ -151,6 +151,7 @@
<InlineFunctionExpansion>Default</InlineFunctionExpansion>
<CompileAs>CompileAsCpp</CompileAs>
<IntrinsicFunctions>true</IntrinsicFunctions>
<LanguageStandard>Default</LanguageStandard>
</ClCompile>
<Link>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
@ -178,6 +179,7 @@
<InlineFunctionExpansion>Default</InlineFunctionExpansion>
<CompileAs>CompileAsCpp</CompileAs>
<IntrinsicFunctions>true</IntrinsicFunctions>
<LanguageStandard>Default</LanguageStandard>
</ClCompile>
<Link>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
@ -247,4 +249,4 @@
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
</Project>

View File

@ -0,0 +1,722 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2019, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#pragma once
#ifndef MIMALLOC_INTERNAL_TLD_H
#define MIMALLOC_INTERNAL_TLD_H
#include "mimalloc-types.h"
#include "mimalloc-internal.h"
#define MI_TLD_DECL 1 // thread local declaration
#define MI_TLD_PTHREAD 2 // ptrhead_get/setspecific
#define MI_TLD_DECL_GUARD 3 // thread local + recursion guard at initial load
#define MI_TLD_PTHREAD_GUARD 4 // ptrhead_get/setspecific + recursion guard at initial load
#define MI_TLD_SLOT 5 // steal slot from OS thread local predefined slots
#define MI_TLD_PTHREAD_SLOT 6 // steal slot from pthread structure (usually `retval`)
#if !defined(MI_TLD)
#if defined(_MSC_VER) || defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
// on windows and linux/freeBSD/netBSD (with initial-exec) a __thread always works without recursion into malloc
#define MI_TLD MI_TLD_DECL
#elif !defined(MI_MIMALLOC_OVERRIDE)
// if not overriding, __thread declarations should be fine (use MI_TLD_PTHREAD if your OS does not have __thread)
#define MI_TLD MI_TLD_DECL
#elif // defined(MI_MALLOC_OVERRIDE)
// if overriding, some BSD variants allocate when accessing a thread local the first time
#if defined(__APPLE__)
#define MI_TLD MI_TLD_SLOT
#define MI_TLD_SLOT_NUM 89 // seems unused? (__PTK_FRAMEWORK_OLDGC_KEY9) see <https://github.com/rweichler/substrate/blob/master/include/pthread_machdep.h>
// possibly unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89)
// #define MI_TLD MI_TLD_PTHREAD_SLOT
// #define MI_TLD_PTHREAD_SLOT_OFS (2*sizeof(void*) + sizeof(long) + 2*sizeof(void*) /*TAILQ*/) // offset `tl_exit_value` <https://github.com/apple/darwin-libpthread/blob/master/src/internal.h#L184>
#elif defined(__OpenBSD__)
#define MI_TLD MI_TLD_PTHREAD_SLOT
#define MI_TLD_PTHREAD_SLOT_OFS (6*sizeof(int) + 1*sizeof(void*)) // offset `retval` <https://github.com/openbsd/src/blob/master/lib/libc/include/thread_private.h#L371>
#elif defined(__DragonFly__)
#define MI_TLD MI_TLD_PTHREAD_SLOT
#define MI_TLD_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) <https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/lib/libthread_xu/thread/thr_private.h#L458>
#endif
#endif
#endif
#if (MI_DEBUG>0)
#define mi_trace_message(...) _mi_trace_message(__VA_ARGS__)
#else
#define mi_trace_message(...)
#endif
#define MI_CACHE_LINE 64
#if defined(_MSC_VER)
#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths)
#define mi_decl_noinline __declspec(noinline)
#define mi_decl_thread __declspec(thread)
#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE))
#elif (defined(__GNUC__) && (__GNUC__>=3)) // includes clang and icc
#define mi_decl_noinline __attribute__((noinline))
#define mi_decl_thread __thread
#define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE)))
#else
#define mi_decl_noinline
#define mi_decl_thread __thread // hope for the best :-)
#define mi_decl_cache_align
#endif
// "options.c"
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...);
void _mi_warning_message(const char* fmt, ...);
void _mi_verbose_message(const char* fmt, ...);
void _mi_trace_message(const char* fmt, ...);
void _mi_options_init(void);
void _mi_error_message(int err, const char* fmt, ...);
// random.c
void _mi_random_init(mi_random_ctx_t* ctx);
void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx);
uintptr_t _mi_random_next(mi_random_ctx_t* ctx);
uintptr_t _mi_heap_random_next(mi_heap_t* heap);
uintptr_t _os_random_weak(uintptr_t extra_seed);
static inline uintptr_t _mi_random_shuffle(uintptr_t x);
// init.c
extern mi_stats_t _mi_stats_main;
extern const mi_page_t _mi_page_empty;
bool _mi_is_main_thread(void);
bool _mi_preloading(); // true while the C runtime is not ready
// os.c
size_t _mi_os_page_size(void);
void _mi_os_init(void); // called from process init
void* _mi_os_alloc(size_t size, mi_stats_t* stats); // to allocate thread local data
void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to free thread local data
size_t _mi_os_good_alloc_size(size_t size);
// memory.c
void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* id, mi_os_tld_t* tld);
void _mi_mem_free(void* p, size_t size, size_t id, bool fully_committed, bool any_reset, mi_os_tld_t* tld);
bool _mi_mem_reset(void* p, size_t size, mi_os_tld_t* tld);
bool _mi_mem_unreset(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld);
bool _mi_mem_commit(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld);
bool _mi_mem_protect(void* addr, size_t size);
bool _mi_mem_unprotect(void* addr, size_t size);
void _mi_mem_collect(mi_os_tld_t* tld);
// "segment.c"
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_wsize, mi_segments_tld_t* tld, mi_os_tld_t* os_tld);
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t block_size, size_t* page_size, size_t* pre_size); // page start for any page
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
void _mi_segment_thread_collect(mi_segments_tld_t* tld);
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
void _mi_abandoned_await_readers(void);
// "page.c"
void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc;
void _mi_page_retire(mi_page_t* page); // free the page if there are no other pages with many free blocks
void _mi_page_unfull(mi_page_t* page);
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
void _mi_heap_delayed_free(mi_heap_t* heap);
void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
void _mi_deferred_free(mi_heap_t* heap, bool force);
void _mi_page_free_collect(mi_page_t* page,bool force);
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments
size_t _mi_bin_size(uint8_t bin); // for stats
uint8_t _mi_bin(size_t size); // for stats
uint8_t _mi_bsr(uintptr_t x); // bit-scan-right, used on BSD in "os.c"
// "heap.c"
void _mi_heap_destroy_pages(mi_heap_t* heap);
void _mi_heap_collect_abandon(mi_heap_t* heap);
void _mi_heap_set_default_direct(mi_heap_t* heap);
// "stats.c"
void _mi_stats_done(mi_stats_t* stats);
mi_msecs_t _mi_clock_now(void);
mi_msecs_t _mi_clock_end(mi_msecs_t start);
mi_msecs_t _mi_clock_start(void);
// "alloc.c"
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_malloc_generic`
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero);
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero);
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
bool _mi_free_delayed_block(mi_block_t* block);
void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size);
#if MI_DEBUG>1
bool _mi_page_is_valid(mi_page_t* page);
#endif
// ------------------------------------------------------
// Branches
// ------------------------------------------------------
#if defined(__GNUC__) || defined(__clang__)
#define mi_unlikely(x) __builtin_expect((x),0)
#define mi_likely(x) __builtin_expect((x),1)
#else
#define mi_unlikely(x) (x)
#define mi_likely(x) (x)
#endif
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
/* -----------------------------------------------------------
Error codes passed to `_mi_fatal_error`
All are recoverable but EFAULT is a serious error and aborts by default in secure mode.
For portability define undefined error codes using common Unix codes:
<https://www-numi.fnal.gov/offline_software/srt_public_context/WebDocs/Errors/unix_system_errors.html>
----------------------------------------------------------- */
#include <errno.h>
#ifndef EAGAIN // double free
#define EAGAIN (11)
#endif
#ifndef ENOMEM // out of memory
#define ENOMEM (12)
#endif
#ifndef EFAULT // corrupted free-list or meta-data
#define EFAULT (14)
#endif
#ifndef EINVAL // trying to free an invalid pointer
#define EINVAL (22)
#endif
#ifndef EOVERFLOW // count*size overflow
#define EOVERFLOW (75)
#endif
/* -----------------------------------------------------------
Inlined definitions
----------------------------------------------------------- */
#define UNUSED(x) (void)(x)
#if (MI_DEBUG>0)
#define UNUSED_RELEASE(x)
#else
#define UNUSED_RELEASE(x) UNUSED(x)
#endif
#define MI_INIT4(x) x(),x(),x(),x()
#define MI_INIT8(x) MI_INIT4(x),MI_INIT4(x)
#define MI_INIT16(x) MI_INIT8(x),MI_INIT8(x)
#define MI_INIT32(x) MI_INIT16(x),MI_INIT16(x)
#define MI_INIT64(x) MI_INIT32(x),MI_INIT32(x)
#define MI_INIT128(x) MI_INIT64(x),MI_INIT64(x)
#define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x)
// Is `x` a power of two? (0 is considered a power of two)
static inline bool _mi_is_power_of_two(uintptr_t x) {
return ((x & (x - 1)) == 0);
}
// Align upwards
static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
mi_assert_internal(alignment != 0);
uintptr_t mask = alignment - 1;
if ((alignment & mask) == 0) { // power of two?
return ((sz + mask) & ~mask);
}
else {
return (((sz + mask)/alignment)*alignment);
}
}
// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`.
static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
mi_assert_internal(divider != 0);
return (divider == 0 ? size : ((size + divider - 1) / divider));
}
// Is memory zero initialized?
static inline bool mi_mem_is_zero(void* p, size_t size) {
for (size_t i = 0; i < size; i++) {
if (((uint8_t*)p)[i] != 0) return false;
}
return true;
}
// Align a byte size to a size in _machine words_,
// i.e. byte size == `wsize*sizeof(void*)`.
static inline size_t _mi_wsize_from_size(size_t size) {
mi_assert_internal(size <= SIZE_MAX - sizeof(uintptr_t));
return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t);
}
// Overflow detecting multiply
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
#if __has_builtin(__builtin_umul_overflow) || __GNUC__ >= 5
#include <limits.h> // UINT_MAX, ULONG_MAX
#if (SIZE_MAX == UINT_MAX)
return __builtin_umul_overflow(count, size, total);
#elif (SIZE_MAX == ULONG_MAX)
return __builtin_umull_overflow(count, size, total);
#else
return __builtin_umulll_overflow(count, size, total);
#endif
#else /* __builtin_umul_overflow is unavailable */
#define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX)
*total = count * size;
return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW)
&& size > 0 && (SIZE_MAX / size) < count);
#endif
}
// Safe multiply `count*size` into `total`; return `true` on overflow.
static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* total) {
if (count==1) { // quick check for the case where count is one (common for C++ allocators)
*total = size;
return false;
}
else if (mi_unlikely(mi_mul_overflow(count, size, total))) {
_mi_error_message(EOVERFLOW, "allocation request too large (%zu * %zu bytes)\n", count, size);
*total = SIZE_MAX;
return true;
}
else return false;
}
/* -----------------------------------------------------------
The thread local default heap
----------------------------------------------------------- */
extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap
extern mi_heap_t _mi_heap_main; // statically allocated main backing heap
extern bool _mi_process_is_initialized;
#if defined(MI_TLS_OSX_FAST)
#define MI_TLS_OSX_OFFSET (MI_TLS_OSX_SLOT*sizeof(void*))
static inline void* mi_tls_osx_fast_get(void) {
void* ret;
__asm__("mov %%gs:%1, %0" : "=r" (ret) : "m" (*(void**)(MI_TLS_OSX_OFFSET)));
return ret;
}
static inline void mi_tls_osx_fast_set(void* value) {
__asm__("movq %1,%%gs:%0" : "=m" (*(void**)(MI_TLS_OSX_OFFSET)) : "rn" (value));
}
#elif defined(MI_TLS_PTHREADS)
extern pthread_key_t _mi_heap_default_key;
#else
extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from
#endif
static inline mi_heap_t* mi_get_default_heap(void) {
#if defined(MI_TLS_OSX_FAST)
// Use a fixed slot in the TSD on MacOSX to avoid recursion (since the loader calls malloc).
// We use slot 94 (__PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4) <https://github.com/apportable/Foundation/blob/master/System/System/src/pthread_machdep.h>
// which seems unused except for the more recent Webkit <https://github.com/WebKit/webkit/blob/master/Source/WTF/wtf/FastTLS.h>
// Use with care.
mi_heap_t* heap = (mi_heap_t*)mi_tls_osx_fast_get();
return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
#elif defined(MI_TLS_PTHREADS)
// Use pthreads for TLS; this is used on macOSX with interpose as the loader calls `malloc`
// to allocate TLS storage leading to recursive calls if __thread declared variables are accessed.
// Using pthreads allows us to initialize without recursive calls. (performance seems still quite good).
mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? (mi_heap_t*)&_mi_heap_empty : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key));
return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
#else
#if defined(MI_TLS_RECURSE_GUARD)
// On some BSD platforms, like openBSD, the dynamic loader calls `malloc`
// to initialize thread local data (before our module is loaded).
// To avoid recursion, we need to avoid accessing the thread local `_mi_default_heap`
// until our module is loaded and use the statically allocated main heap until that time.
// TODO: patch ourselves dynamically to avoid this check every time?
// if (mi_unlikely(!_mi_process_is_initialized)) return &_mi_heap_main;
#endif
return _mi_heap_default;
#endif
}
static inline bool mi_heap_is_default(const mi_heap_t* heap) {
return (heap == mi_get_default_heap());
}
static inline bool mi_heap_is_backing(const mi_heap_t* heap) {
return (heap->tld->heap_backing == heap);
}
static inline bool mi_heap_is_initialized(mi_heap_t* heap) {
mi_assert_internal(heap != NULL);
return (heap != &_mi_heap_empty);
}
static inline uintptr_t _mi_ptr_cookie(const void* p) {
mi_assert_internal(_mi_heap_main.cookie != 0);
return ((uintptr_t)p ^ _mi_heap_main.cookie);
}
/* -----------------------------------------------------------
Pages
----------------------------------------------------------- */
static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) {
mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE));
const size_t idx = _mi_wsize_from_size(size);
mi_assert_internal(idx < MI_PAGES_DIRECT);
return heap->pages_free_direct[idx];
}
// Get the page belonging to a certain size class
static inline mi_page_t* _mi_get_free_small_page(size_t size) {
return _mi_heap_get_free_small_page(mi_get_default_heap(), size);
}
// Segment that contains the pointer
static inline mi_segment_t* _mi_ptr_segment(const void* p) {
// mi_assert_internal(p != NULL);
return (mi_segment_t*)((uintptr_t)p & ~MI_SEGMENT_MASK);
}
// Segment belonging to a page
static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
mi_segment_t* segment = _mi_ptr_segment(page);
mi_assert_internal(segment == NULL || page == &segment->pages[page->segment_idx]);
return segment;
}
// used internally
static inline uintptr_t _mi_segment_page_idx_of(const mi_segment_t* segment, const void* p) {
// if (segment->page_size > MI_SEGMENT_SIZE) return &segment->pages[0]; // huge pages
ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
mi_assert_internal(diff >= 0 && (size_t)diff < MI_SEGMENT_SIZE);
uintptr_t idx = (uintptr_t)diff >> segment->page_shift;
mi_assert_internal(idx < segment->capacity);
mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM || idx == 0);
return idx;
}
// Get the page containing the pointer
static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) {
uintptr_t idx = _mi_segment_page_idx_of(segment, p);
return &((mi_segment_t*)segment)->pages[idx];
}
// Quick page start for initialized pages
static inline uint8_t* _mi_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) {
const size_t bsize = page->xblock_size;
mi_assert_internal(bsize > 0 && (bsize%sizeof(void*)) == 0);
return _mi_segment_page_start(segment, page, bsize, page_size, NULL);
}
// Get the page containing the pointer
static inline mi_page_t* _mi_ptr_page(void* p) {
return _mi_segment_page_of(_mi_ptr_segment(p), p);
}
// Get the block size of a page (special cased for huge objects)
static inline size_t mi_page_block_size(const mi_page_t* page) {
const size_t bsize = page->xblock_size;
mi_assert_internal(bsize > 0);
if (mi_likely(bsize < MI_HUGE_BLOCK_SIZE)) {
return bsize;
}
else {
size_t psize;
_mi_segment_page_start(_mi_page_segment(page), page, bsize, &psize, NULL);
return psize;
}
}
// Get the usable block size of a page without fixed padding.
// This may still include internal padding due to alignment and rounding up size classes.
static inline size_t mi_page_usable_block_size(const mi_page_t* page) {
return mi_page_block_size(page) - MI_PADDING_SIZE;
}
// Thread free access
static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) {
return (mi_block_t*)(mi_atomic_read_relaxed(&page->xthread_free) & ~3);
}
static inline mi_delayed_t mi_page_thread_free_flag(const mi_page_t* page) {
return (mi_delayed_t)(mi_atomic_read_relaxed(&page->xthread_free) & 3);
}
// Heap access
static inline mi_heap_t* mi_page_heap(const mi_page_t* page) {
return (mi_heap_t*)(mi_atomic_read_relaxed(&page->xheap));
}
static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) {
mi_assert_internal(mi_page_thread_free_flag(page) != MI_DELAYED_FREEING);
mi_atomic_write(&page->xheap,(uintptr_t)heap);
}
// Thread free flag helpers
static inline mi_block_t* mi_tf_block(mi_thread_free_t tf) {
return (mi_block_t*)(tf & ~0x03);
}
static inline mi_delayed_t mi_tf_delayed(mi_thread_free_t tf) {
return (mi_delayed_t)(tf & 0x03);
}
static inline mi_thread_free_t mi_tf_make(mi_block_t* block, mi_delayed_t delayed) {
return (mi_thread_free_t)((uintptr_t)block | (uintptr_t)delayed);
}
static inline mi_thread_free_t mi_tf_set_delayed(mi_thread_free_t tf, mi_delayed_t delayed) {
return mi_tf_make(mi_tf_block(tf),delayed);
}
static inline mi_thread_free_t mi_tf_set_block(mi_thread_free_t tf, mi_block_t* block) {
return mi_tf_make(block, mi_tf_delayed(tf));
}
// are all blocks in a page freed?
// note: needs up-to-date used count, (as the `xthread_free` list may not be empty). see `_mi_page_collect_free`.
static inline bool mi_page_all_free(const mi_page_t* page) {
mi_assert_internal(page != NULL);
return (page->used == 0);
}
// are there any available blocks?
static inline bool mi_page_has_any_available(const mi_page_t* page) {
mi_assert_internal(page != NULL && page->reserved > 0);
return (page->used < page->reserved || (mi_page_thread_free(page) != NULL));
}
// are there immediately available blocks, i.e. blocks available on the free list.
static inline bool mi_page_immediate_available(const mi_page_t* page) {
mi_assert_internal(page != NULL);
return (page->free != NULL);
}
// is more than 7/8th of a page in use?
static inline bool mi_page_mostly_used(const mi_page_t* page) {
if (page==NULL) return true;
uint16_t frac = page->reserved / 8U;
return (page->reserved - page->used <= frac);
}
static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size) {
return &((mi_heap_t*)heap)->pages[_mi_bin(size)];
}
//-----------------------------------------------------------
// Page flags
//-----------------------------------------------------------
static inline bool mi_page_is_in_full(const mi_page_t* page) {
return page->flags.x.in_full;
}
static inline void mi_page_set_in_full(mi_page_t* page, bool in_full) {
page->flags.x.in_full = in_full;
}
static inline bool mi_page_has_aligned(const mi_page_t* page) {
return page->flags.x.has_aligned;
}
static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
page->flags.x.has_aligned = has_aligned;
}
/* -------------------------------------------------------------------
Encoding/Decoding the free list next pointers
This is to protect against buffer overflow exploits where the
free list is mutated. Many hardened allocators xor the next pointer `p`
with a secret key `k1`, as `p^k1`. This prevents overwriting with known
values but might be still too weak: if the attacker can guess
the pointer `p` this can reveal `k1` (since `p^k1^p == k1`).
Moreover, if multiple blocks can be read as well, the attacker can
xor both as `(p1^k1) ^ (p2^k1) == p1^p2` which may reveal a lot
about the pointers (and subsequently `k1`).
Instead mimalloc uses an extra key `k2` and encodes as `((p^k2)<<<k1)+k1`.
Since these operations are not associative, the above approaches do not
work so well any more even if the `p` can be guesstimated. For example,
for the read case we can subtract two entries to discard the `+k1` term,
but that leads to `((p1^k2)<<<k1) - ((p2^k2)<<<k1)` at best.
We include the left-rotation since xor and addition are otherwise linear
in the lowest bit. Finally, both keys are unique per page which reduces
the re-use of keys by a large factor.
We also pass a separate `null` value to be used as `NULL` or otherwise
`(k2<<<k1)+k1` would appear (too) often as a sentinel value.
------------------------------------------------------------------- */
static inline bool mi_is_in_same_segment(const void* p, const void* q) {
return (_mi_ptr_segment(p) == _mi_ptr_segment(q));
}
static inline bool mi_is_in_same_page(const void* p, const void* q) {
mi_segment_t* segmentp = _mi_ptr_segment(p);
mi_segment_t* segmentq = _mi_ptr_segment(q);
if (segmentp != segmentq) return false;
uintptr_t idxp = _mi_segment_page_idx_of(segmentp, p);
uintptr_t idxq = _mi_segment_page_idx_of(segmentq, q);
return (idxp == idxq);
}
static inline uintptr_t mi_rotl(uintptr_t x, uintptr_t shift) {
shift %= MI_INTPTR_BITS;
return ((x << shift) | (x >> (MI_INTPTR_BITS - shift)));
}
static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) {
shift %= MI_INTPTR_BITS;
return ((x >> shift) | (x << (MI_INTPTR_BITS - shift)));
}
static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) {
void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]);
return (mi_unlikely(p==null) ? NULL : p);
}
static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) {
uintptr_t x = (uintptr_t)(mi_unlikely(p==NULL) ? null : p);
return mi_rotl(x ^ keys[1], keys[0]) + keys[0];
}
static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) {
#ifdef MI_ENCODE_FREELIST
return (mi_block_t*)mi_ptr_decode(null, block->next, keys);
#else
UNUSED(keys); UNUSED(null);
return (mi_block_t*)block->next;
#endif
}
static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) {
#ifdef MI_ENCODE_FREELIST
block->next = mi_ptr_encode(null, next, keys);
#else
UNUSED(keys); UNUSED(null);
block->next = (mi_encoded_t)next;
#endif
}
static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) {
#ifdef MI_ENCODE_FREELIST
mi_block_t* next = mi_block_nextx(page,block,page->keys);
// check for free list corruption: is `next` at least in the same page?
// TODO: check if `next` is `page->block_size` aligned?
if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) {
_mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
next = NULL;
}
return next;
#else
UNUSED(page);
return mi_block_nextx(page,block,NULL);
#endif
}
static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) {
#ifdef MI_ENCODE_FREELIST
mi_block_set_nextx(page,block,next, page->keys);
#else
UNUSED(page);
mi_block_set_nextx(page,block,next,NULL);
#endif
}
// -------------------------------------------------------------------
// Fast "random" shuffle
// -------------------------------------------------------------------
static inline uintptr_t _mi_random_shuffle(uintptr_t x) {
if (x==0) { x = 17; } // ensure we don't get stuck in generating zeros
#if (MI_INTPTR_SIZE==8)
// by Sebastiano Vigna, see: <http://xoshiro.di.unimi.it/splitmix64.c>
x ^= x >> 30;
x *= 0xbf58476d1ce4e5b9UL;
x ^= x >> 27;
x *= 0x94d049bb133111ebUL;
x ^= x >> 31;
#elif (MI_INTPTR_SIZE==4)
// by Chris Wellons, see: <https://nullprogram.com/blog/2018/07/31/>
x ^= x >> 16;
x *= 0x7feb352dUL;
x ^= x >> 15;
x *= 0x846ca68bUL;
x ^= x >> 16;
#endif
return x;
}
// -------------------------------------------------------------------
// Optimize numa node access for the common case (= one node)
// -------------------------------------------------------------------
int _mi_os_numa_node_get(mi_os_tld_t* tld);
size_t _mi_os_numa_node_count_get(void);
extern size_t _mi_numa_node_count;
static inline int _mi_os_numa_node(mi_os_tld_t* tld) {
if (mi_likely(_mi_numa_node_count == 1)) return 0;
else return _mi_os_numa_node_get(tld);
}
static inline size_t _mi_os_numa_node_count(void) {
if (mi_likely(_mi_numa_node_count>0)) return _mi_numa_node_count;
else return _mi_os_numa_node_count_get();
}
// -------------------------------------------------------------------
// Getting the thread id should be performant
// as it is called in the fast path of `_mi_free`,
// so we specialize for various platforms.
// -------------------------------------------------------------------
#if defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
static inline uintptr_t _mi_thread_id(void) mi_attr_noexcept {
// Windows: works on Intel and ARM in both 32- and 64-bit
return (uintptr_t)NtCurrentTeb();
}
#elif (defined(__GNUC__) || defined(__clang__)) && \
(defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))
// TLS register on x86 is in the FS or GS register
// see: https://akkadia.org/drepper/tls.pdf
static inline uintptr_t _mi_thread_id(void) mi_attr_noexcept {
uintptr_t tid;
#if defined(__i386__)
__asm__("movl %%gs:0, %0" : "=r" (tid) : : ); // 32-bit always uses GS
#elif defined(__MACH__)
__asm__("movq %%gs:0, %0" : "=r" (tid) : : ); // x86_64 macOS uses GS
#elif defined(__x86_64__)
__asm__("movq %%fs:0, %0" : "=r" (tid) : : ); // x86_64 Linux, BSD uses FS
#elif defined(__arm__)
asm volatile ("mrc p15, 0, %0, c13, c0, 3" : "=r" (tid));
#elif defined(__aarch64__)
asm volatile ("mrs %0, tpidr_el0" : "=r" (tid));
#endif
return tid;
}
#else
// otherwise use standard C
static inline uintptr_t _mi_thread_id(void) mi_attr_noexcept {
return (uintptr_t)&_mi_heap_default;
}
#endif
#endif

View File

@ -10,10 +10,6 @@ terms of the MIT license. A copy of the license can be found in the file
#include "mimalloc-types.h"
#if defined(MI_MALLOC_OVERRIDE) && (defined(__APPLE__) || defined(__OpenBSD__) || defined(__DragonFly__))
#define MI_TLS_RECURSE_GUARD
#endif
#if (MI_DEBUG>0)
#define mi_trace_message(...) _mi_trace_message(__VA_ARGS__)
#else
@ -33,7 +29,7 @@ terms of the MIT license. A copy of the license can be found in the file
#else
#define mi_decl_noinline
#define mi_decl_thread __thread // hope for the best :-)
#define mi_decl_cache_align
#define mi_decl_cache_align
#endif
@ -51,6 +47,7 @@ void _mi_random_init(mi_random_ctx_t* ctx);
void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx);
uintptr_t _mi_random_next(mi_random_ctx_t* ctx);
uintptr_t _mi_heap_random_next(mi_heap_t* heap);
uintptr_t _os_random_weak(uintptr_t extra_seed);
static inline uintptr_t _mi_random_shuffle(uintptr_t x);
// init.c
@ -71,6 +68,7 @@ bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* stat
bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
size_t _mi_os_good_alloc_size(size_t size);
// arena.c
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
@ -89,6 +87,8 @@ uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t*
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
void _mi_abandoned_await_readers(void);
// "page.c"
void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc;
@ -238,9 +238,13 @@ static inline size_t _mi_wsize_from_size(size_t size) {
return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t);
}
// Does malloc satisfy the alignment constraints already?
static inline bool mi_malloc_satisfies_alignment(size_t alignment, size_t size) {
return (alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)));
}
// Overflow detecting multiply
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
#if __has_builtin(__builtin_umul_overflow) || __GNUC__ >= 5
#include <limits.h> // UINT_MAX, ULONG_MAX
#if (SIZE_MAX == UINT_MAX)
@ -273,26 +277,76 @@ static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* tot
}
/* -----------------------------------------------------------
The thread local default heap
----------------------------------------------------------- */
/* ----------------------------------------------------------------------------------------
The thread local default heap: `_mi_get_default_heap` returns the thread local heap.
On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a
__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures
that the storage will always be available (allocated on the thread stacks).
On some platforms though we cannot use that when overriding `malloc` since the underlying
TLS implementation (or the loader) will call itself `malloc` on a first access and recurse.
We try to circumvent this in an efficient way:
- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the
loader itself calls `malloc` even before the modules are initialized.
- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS).
- DragonFly: not yet working.
------------------------------------------------------------------------------------------- */
extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap
extern mi_heap_t _mi_heap_main; // statically allocated main backing heap
extern bool _mi_process_is_initialized;
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
#if defined(MI_MALLOC_OVERRIDE)
#if defined(__MACH__) // OSX
#define MI_TLS_SLOT 89 // seems unused?
// other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89)
// see <https://github.com/rweichler/substrate/blob/master/include/pthread_machdep.h>
#elif defined(__OpenBSD__)
// use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16)
// see <https://github.com/openbsd/src/blob/master/lib/libc/include/thread_private.h#L371>
#define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24)
#elif defined(__DragonFly__)
#warning "mimalloc is not working correctly on DragonFly yet."
#define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) <https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/lib/libthread_xu/thread/thr_private.h#L458>
#endif
#endif
#if defined(MI_TLS_SLOT)
static inline void* mi_tls_slot(size_t slot) mi_attr_noexcept; // forward declaration
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
#include <pthread.h>
static inline mi_heap_t** mi_tls_pthread_heap_slot(void) {
pthread_t self = pthread_self();
#if defined(__DragonFly__)
if (self==NULL) {
static mi_heap_t* pheap_main = _mi_heap_main_get();
return &pheap_main;
}
#endif
return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS);
}
#elif defined(MI_TLS_PTHREAD)
#include <pthread.h>
extern pthread_key_t _mi_heap_default_key;
#else
extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from
#endif
static inline mi_heap_t* mi_get_default_heap(void) {
#ifdef MI_TLS_RECURSE_GUARD
// on some BSD platforms, like macOS, the dynamic loader calls `malloc`
// to initialize thread local data. To avoid recursion, we need to avoid
// accessing the thread local `_mi_default_heap` until our module is loaded
// and use the statically allocated main heap until that time.
// TODO: patch ourselves dynamically to avoid this check every time?
if (!_mi_process_is_initialized) return &_mi_heap_main;
#endif
#if defined(MI_TLS_SLOT)
mi_heap_t* heap = (mi_heap_t*)mi_tls_slot(MI_TLS_SLOT);
return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
mi_heap_t* heap = *mi_tls_pthread_heap_slot();
return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
#elif defined(MI_TLS_PTHREAD)
mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key));
return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
#else
#if defined(MI_TLS_RECURSE_GUARD)
if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get();
#endif
return _mi_heap_default;
#endif
}
static inline bool mi_heap_is_default(const mi_heap_t* heap) {
@ -309,6 +363,8 @@ static inline bool mi_heap_is_initialized(mi_heap_t* heap) {
}
static inline uintptr_t _mi_ptr_cookie(const void* p) {
extern mi_heap_t _mi_heap_main;
mi_assert_internal(_mi_heap_main.cookie != 0);
return ((uintptr_t)p ^ _mi_heap_main.cookie);
}
@ -317,8 +373,10 @@ static inline uintptr_t _mi_ptr_cookie(const void* p) {
----------------------------------------------------------- */
static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) {
mi_assert_internal(size <= MI_SMALL_SIZE_MAX);
return heap->pages_free_direct[_mi_wsize_from_size(size)];
mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE));
const size_t idx = _mi_wsize_from_size(size);
mi_assert_internal(idx < MI_PAGES_DIRECT);
return heap->pages_free_direct[idx];
}
// Get the page belonging to a certain size class
@ -394,6 +452,13 @@ static inline size_t mi_page_block_size(const mi_page_t* page) {
}
}
// Get the usable block size of a page without fixed padding.
// This may still include internal padding due to alignment and rounding up size classes.
static inline size_t mi_page_usable_block_size(const mi_page_t* page) {
return mi_page_block_size(page) - MI_PADDING_SIZE;
}
// Thread free access
static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) {
return (mi_block_t*)(mi_atomic_read_relaxed(&page->xthread_free) & ~3);
@ -430,14 +495,14 @@ static inline mi_thread_free_t mi_tf_set_block(mi_thread_free_t tf, mi_block_t*
return mi_tf_make(block, mi_tf_delayed(tf));
}
// are all blocks in a page freed?
// are all blocks in a page freed?
// note: needs up-to-date used count, (as the `xthread_free` list may not be empty). see `_mi_page_collect_free`.
static inline bool mi_page_all_free(const mi_page_t* page) {
mi_assert_internal(page != NULL);
return (page->used == 0);
}
// are there any available blocks?
// are there any available blocks?
static inline bool mi_page_has_any_available(const mi_page_t* page) {
mi_assert_internal(page != NULL && page->reserved > 0);
return (page->used < page->reserved || (mi_page_thread_free(page) != NULL));
@ -485,11 +550,11 @@ static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
/* -------------------------------------------------------------------
Encoding/Decoding the free list next pointers
This is to protect against buffer overflow exploits where the
free list is mutated. Many hardened allocators xor the next pointer `p`
This is to protect against buffer overflow exploits where the
free list is mutated. Many hardened allocators xor the next pointer `p`
with a secret key `k1`, as `p^k1`. This prevents overwriting with known
values but might be still too weak: if the attacker can guess
the pointer `p` this can reveal `k1` (since `p^k1^p == k1`).
values but might be still too weak: if the attacker can guess
the pointer `p` this can reveal `k1` (since `p^k1^p == k1`).
Moreover, if multiple blocks can be read as well, the attacker can
xor both as `(p1^k1) ^ (p2^k1) == p1^p2` which may reveal a lot
about the pointers (and subsequently `k1`).
@ -497,9 +562,9 @@ about the pointers (and subsequently `k1`).
Instead mimalloc uses an extra key `k2` and encodes as `((p^k2)<<<k1)+k1`.
Since these operations are not associative, the above approaches do not
work so well any more even if the `p` can be guesstimated. For example,
for the read case we can subtract two entries to discard the `+k1` term,
for the read case we can subtract two entries to discard the `+k1` term,
but that leads to `((p1^k2)<<<k1) - ((p2^k2)<<<k1)` at best.
We include the left-rotation since xor and addition are otherwise linear
We include the left-rotation since xor and addition are otherwise linear
in the lowest bit. Finally, both keys are unique per page which reduces
the re-use of keys by a large factor.
@ -526,30 +591,37 @@ static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) {
return ((x >> shift) | (x << (MI_INTPTR_BITS - shift)));
}
static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, uintptr_t key1, uintptr_t key2 ) {
static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) {
void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]);
return (mi_unlikely(p==null) ? NULL : p);
}
static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) {
uintptr_t x = (uintptr_t)(mi_unlikely(p==NULL) ? null : p);
return mi_rotl(x ^ keys[1], keys[0]) + keys[0];
}
static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) {
#ifdef MI_ENCODE_FREELIST
mi_block_t* b = (mi_block_t*)(mi_rotr(block->next - key1, key1) ^ key2);
if (mi_unlikely((void*)b==null)) { b = NULL; }
return b;
return (mi_block_t*)mi_ptr_decode(null, block->next, keys);
#else
UNUSED(key1); UNUSED(key2); UNUSED(null);
UNUSED(keys); UNUSED(null);
return (mi_block_t*)block->next;
#endif
}
static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, uintptr_t key1, uintptr_t key2) {
static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) {
#ifdef MI_ENCODE_FREELIST
if (mi_unlikely(next==NULL)) { next = (mi_block_t*)null; }
block->next = mi_rotl((uintptr_t)next ^ key2, key1) + key1;
block->next = mi_ptr_encode(null, next, keys);
#else
UNUSED(key1); UNUSED(key2); UNUSED(null);
UNUSED(keys); UNUSED(null);
block->next = (mi_encoded_t)next;
#endif
}
static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) {
#ifdef MI_ENCODE_FREELIST
mi_block_t* next = mi_block_nextx(page,block,page->key[0],page->key[1]);
mi_block_t* next = mi_block_nextx(page,block,page->keys);
// check for free list corruption: is `next` at least in the same page?
// TODO: check if `next` is `page->block_size` aligned?
if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) {
@ -559,16 +631,16 @@ static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t*
return next;
#else
UNUSED(page);
return mi_block_nextx(page,block,0,0);
return mi_block_nextx(page,block,NULL);
#endif
}
static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) {
#ifdef MI_ENCODE_FREELIST
mi_block_set_nextx(page,block,next, page->key[0], page->key[1]);
mi_block_set_nextx(page,block,next, page->keys);
#else
UNUSED(page);
mi_block_set_nextx(page,block, next,0,0);
mi_block_set_nextx(page,block,next,NULL);
#endif
}
@ -615,9 +687,8 @@ static inline size_t _mi_os_numa_node_count(void) {
// -------------------------------------------------------------------
// Getting the thread id should be performant
// as it is called in the fast path of `_mi_free`,
// so we specialize for various platforms.
// Getting the thread id should be performant as it is called in the
// fast path of `_mi_free` and we specialize for various platforms.
// -------------------------------------------------------------------
#if defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
@ -626,24 +697,55 @@ static inline uintptr_t _mi_thread_id(void) mi_attr_noexcept {
// Windows: works on Intel and ARM in both 32- and 64-bit
return (uintptr_t)NtCurrentTeb();
}
#elif (defined(__GNUC__) || defined(__clang__)) && \
#elif defined(__GNUC__) && \
(defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))
// TLS register on x86 is in the FS or GS register
// see: https://akkadia.org/drepper/tls.pdf
// TLS register on x86 is in the FS or GS register, see: https://akkadia.org/drepper/tls.pdf
static inline void* mi_tls_slot(size_t slot) mi_attr_noexcept {
void* res;
const size_t ofs = (slot*sizeof(void*));
#if defined(__i386__)
__asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // 32-bit always uses GS
#elif defined(__MACH__) && defined(__x86_64__)
__asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS
#elif defined(__x86_64__)
__asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS
#elif defined(__arm__)
void** tcb; UNUSED(ofs);
asm volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
res = tcb[slot];
#elif defined(__aarch64__)
void** tcb; UNUSED(ofs);
asm volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
res = tcb[slot];
#endif
return res;
}
// setting is only used on macOSX for now
static inline void mi_tls_slot_set(size_t slot, void* value) mi_attr_noexcept {
const size_t ofs = (slot*sizeof(void*));
#if defined(__i386__)
__asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS
#elif defined(__MACH__) && defined(__x86_64__)
__asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOSX uses GS
#elif defined(__x86_64__)
__asm__("movq %1,%%fs:%1" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS
#elif defined(__arm__)
void** tcb; UNUSED(ofs);
asm volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
tcb[slot] = value;
#elif defined(__aarch64__)
void** tcb; UNUSED(ofs);
asm volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
tcb[slot] = value;
#endif
}
static inline uintptr_t _mi_thread_id(void) mi_attr_noexcept {
uintptr_t tid;
#if defined(__i386__)
__asm__("movl %%gs:0, %0" : "=r" (tid) : : ); // 32-bit always uses GS
#elif defined(__MACH__)
__asm__("movq %%gs:0, %0" : "=r" (tid) : : ); // x86_64 macOS uses GS
#elif defined(__x86_64__)
__asm__("movq %%fs:0, %0" : "=r" (tid) : : ); // x86_64 Linux, BSD uses FS
#elif defined(__arm__)
asm volatile ("mrc p15, 0, %0, c13, c0, 3" : "=r" (tid));
#elif defined(__aarch64__)
asm volatile ("mrs %0, tpidr_el0" : "=r" (tid));
#endif
return tid;
// in all our targets, slot 0 is the pointer to the thread control block
return (uintptr_t)mi_tls_slot(0);
}
#else
// otherwise use standard C

View File

@ -32,8 +32,8 @@ terms of the MIT license. A copy of the license can be found in the file
void* operator new[](std::size_t n, const std::nothrow_t& tag) noexcept { (void)(tag); return mi_new_nothrow(n); }
#if (__cplusplus >= 201402L || _MSC_VER >= 1916)
void operator delete (void* p, std::size_t n) { mi_free_size(p,n); };
void operator delete[](void* p, std::size_t n) { mi_free_size(p,n); };
void operator delete (void* p, std::size_t n) noexcept { mi_free_size(p,n); };
void operator delete[](void* p, std::size_t n) noexcept { mi_free_size(p,n); };
#endif
#if (__cplusplus > 201402L || defined(__cpp_aligned_new))

View File

@ -12,6 +12,10 @@ terms of the MIT license. A copy of the license can be found in the file
#include <stdint.h> // uintptr_t, uint16_t, etc
#include <mimalloc-atomic.h> // _Atomic
// Minimal alignment necessary. On most platforms 16 bytes are needed
// due to SSE registers for example. This must be at least `MI_INTPTR_SIZE`
#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t)
// ------------------------------------------------------
// Variants
// ------------------------------------------------------
@ -44,17 +48,24 @@ terms of the MIT license. A copy of the license can be found in the file
#endif
#endif
// Reserve extra padding at the end of each block to be more resilient against heap block overflows.
// The padding can detect byte-precise buffer overflow on free.
#if !defined(MI_PADDING) && (MI_DEBUG>=1)
#define MI_PADDING 1
#endif
// Encoded free lists allow detection of corrupted free lists
// and can detect buffer overflows and double `free`s.
#if (MI_SECURE>=3 || MI_DEBUG>=1)
// and can detect buffer overflows, modify after free, and double `free`s.
#if (MI_SECURE>=3 || MI_DEBUG>=1 || defined(MI_PADDING))
#define MI_ENCODE_FREELIST 1
#endif
// ------------------------------------------------------
// Platform specific values
// ------------------------------------------------------
// ------------------------------------------------------
// Size of a pointer.
// We assume that `sizeof(void*)==sizeof(intptr_t)`
@ -82,6 +93,7 @@ terms of the MIT license. A copy of the license can be found in the file
#define MiB (KiB*KiB)
#define GiB (MiB*KiB)
// ------------------------------------------------------
// Main internal data-structures
// ------------------------------------------------------
@ -114,10 +126,6 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_HUGE_OBJ_SIZE_MAX (2*MI_INTPTR_SIZE*MI_SEGMENT_SIZE) // (must match MI_REGION_MAX_ALLOC_SIZE in memory.c)
// Minimal alignment necessary. On most platforms 16 bytes are needed
// due to SSE registers for example. This must be at least `MI_INTPTR_SIZE`
#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t)
// Maximum number of size classes. (spaced exponentially in 12.5% increments)
#define MI_BIN_HUGE (73U)
@ -212,10 +220,10 @@ typedef struct mi_page_s {
uint8_t is_zero : 1; // `true` if the blocks in the free list are zero initialized
uint8_t retire_expire : 7; // expiration count for retired blocks
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
#ifdef MI_ENCODE_FREELIST
uintptr_t key[2]; // two random keys to encode the free lists (see `_mi_block_next`)
#endif
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
#ifdef MI_ENCODE_FREELIST
uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`)
#endif
uint32_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`)
uint32_t xblock_size; // size available in each block (always `>0`)
@ -323,18 +331,37 @@ typedef struct mi_random_cxt_s {
} mi_random_ctx_t;
// In debug mode there is a padding stucture at the end of the blocks to check for buffer overflows
#if defined(MI_PADDING)
typedef struct mi_padding_s {
uint32_t canary; // encoded block value to check validity of the padding (in case of overflow)
uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
} mi_padding_t;
#define MI_PADDING_SIZE (sizeof(mi_padding_t))
#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE)
#else
#define MI_PADDING_SIZE 0
#define MI_PADDING_WSIZE 0
#endif
#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1)
// A heap owns a set of pages.
struct mi_heap_s {
mi_tld_t* tld;
mi_page_t* pages_free_direct[MI_SMALL_WSIZE_MAX + 2]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
volatile _Atomic(mi_block_t*) thread_delayed_free;
uintptr_t thread_id; // thread this heap belongs too
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
uintptr_t key[2]; // twb random keys used to encode the `thread_delayed_free` list
mi_random_ctx_t random; // random number context used for secure allocation
size_t page_count; // total number of pages in the `pages` queues.
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
uintptr_t thread_id; // thread this heap belongs too
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
mi_random_ctx_t random; // random number context used for secure allocation
size_t page_count; // total number of pages in the `pages` queues.
size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues)
size_t page_retired_max; // largest retired index into the `pages` array.
mi_heap_t* next; // list of heaps per thread
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
};
@ -345,7 +372,7 @@ struct mi_heap_s {
#define MI_DEBUG_UNINIT (0xD0)
#define MI_DEBUG_FREED (0xDF)
#define MI_DEBUG_PADDING (0xDE)
#if (MI_DEBUG)
// use our own assertion to print without memory allocation
@ -475,6 +502,7 @@ struct mi_tld_s {
unsigned long long heartbeat; // monotonic heartbeat count
bool recurse; // true if deferred was called; used to prevent infinite recursion.
mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted)
mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates)
mi_segments_tld_t segments; // segment tld
mi_os_tld_t os; // os tld
mi_stats_t stats; // statistics

View File

@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_H
#define MIMALLOC_H
#define MI_MALLOC_VERSION 150 // major + 2 digits minor
#define MI_MALLOC_VERSION 161 // major + 2 digits minor
// ------------------------------------------------------
// Compiler specific attributes
@ -24,46 +24,56 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_attr_noexcept
#endif
#if (__cplusplus >= 201703)
#define mi_decl_nodiscard [[nodiscard]]
#elif (__GNUC__ >= 4) || defined(__clang__) // includes clang, icc, and clang-cl
#define mi_decl_nodiscard __attribute__((warn_unused_result))
#elif (_MSC_VER >= 1700)
#define mi_decl_nodiscard _Check_return_
#else
#define mi_decl_nodiscard
#endif
#ifdef _MSC_VER
#if !defined(MI_SHARED_LIB)
#define mi_decl_export
#elif defined(MI_SHARED_LIB_EXPORT)
#define mi_decl_export __declspec(dllexport)
#define mi_decl_export __declspec(dllexport)
#else
#define mi_decl_export __declspec(dllimport)
#define mi_decl_export __declspec(dllimport)
#endif
#if (_MSC_VER >= 1900) && !defined(__EDG__)
#define mi_decl_allocator __declspec(allocator) __declspec(restrict)
#define mi_decl_restrict __declspec(allocator) __declspec(restrict)
#else
#define mi_decl_allocator __declspec(restrict)
#define mi_decl_restrict __declspec(restrict)
#endif
#define mi_cdecl __cdecl
#define mi_cdecl __cdecl
#define mi_attr_malloc
#define mi_attr_alloc_size(s)
#define mi_attr_alloc_size2(s1,s2)
#define mi_attr_alloc_align(p)
#elif defined(__GNUC__) // includes clang and icc
#define mi_cdecl // leads to warnings... __attribute__((cdecl))
#define mi_decl_export __attribute__((visibility("default")))
#define mi_decl_allocator
#define mi_attr_malloc __attribute__((malloc))
#elif defined(__GNUC__) // includes clang and icc
#define mi_cdecl // leads to warnings... __attribute__((cdecl))
#define mi_decl_export __attribute__((visibility("default")))
#define mi_decl_restrict
#define mi_attr_malloc __attribute__((malloc))
#if (defined(__clang_major__) && (__clang_major__ < 4)) || (__GNUC__ < 5)
#define mi_attr_alloc_size(s)
#define mi_attr_alloc_size2(s1,s2)
#define mi_attr_alloc_align(p)
#define mi_attr_alloc_size(s)
#define mi_attr_alloc_size2(s1,s2)
#define mi_attr_alloc_align(p)
#elif defined(__INTEL_COMPILER)
#define mi_attr_alloc_size(s) __attribute__((alloc_size(s)))
#define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2)))
#define mi_attr_alloc_align(p)
#define mi_attr_alloc_size(s) __attribute__((alloc_size(s)))
#define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2)))
#define mi_attr_alloc_align(p)
#else
#define mi_attr_alloc_size(s) __attribute__((alloc_size(s)))
#define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2)))
#define mi_attr_alloc_align(p) __attribute__((alloc_align(p)))
#define mi_attr_alloc_size(s) __attribute__((alloc_size(s)))
#define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2)))
#define mi_attr_alloc_align(p) __attribute__((alloc_align(p)))
#endif
#else
#define mi_cdecl
#define mi_decl_export
#define mi_decl_allocator
#define mi_decl_restrict
#define mi_attr_malloc
#define mi_attr_alloc_size(s)
#define mi_attr_alloc_size2(s1,s2)
@ -85,15 +95,15 @@ extern "C" {
// Standard malloc interface
// ------------------------------------------------------
mi_decl_export mi_decl_allocator void* mi_malloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_export mi_decl_allocator void* mi_calloc(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
mi_decl_export mi_decl_allocator void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_export mi_decl_allocator void* mi_expand(void* p, size_t newsize) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
mi_decl_nodiscard mi_decl_export void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
mi_decl_export void* mi_expand(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
mi_decl_export void mi_free(void* p) mi_attr_noexcept;
mi_decl_export char* mi_strdup(const char* s) mi_attr_noexcept;
mi_decl_export char* mi_strndup(const char* s, size_t n) mi_attr_noexcept;
mi_decl_export char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept;
mi_decl_export void mi_free(void* p) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept mi_attr_malloc;
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept mi_attr_malloc;
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc;
// ------------------------------------------------------
// Extended functionality
@ -101,16 +111,16 @@ mi_decl_export char* mi_realpath(const char* fname, char* resolved_name) mi_attr
#define MI_SMALL_WSIZE_MAX (128)
#define MI_SMALL_SIZE_MAX (MI_SMALL_WSIZE_MAX*sizeof(void*))
mi_decl_export mi_decl_allocator void* mi_malloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_export mi_decl_allocator void* mi_zalloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_export mi_decl_allocator void* mi_zalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_export mi_decl_allocator void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
mi_decl_export mi_decl_allocator void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2,3);
mi_decl_export mi_decl_allocator void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
mi_decl_nodiscard mi_decl_export void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
mi_decl_nodiscard mi_decl_export void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
mi_decl_export size_t mi_usable_size(const void* p) mi_attr_noexcept;
mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export size_t mi_usable_size(const void* p) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept;
// ------------------------------------------------------
@ -145,23 +155,24 @@ mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_
// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`.
// -------------------------------------------------------------------------------------
mi_decl_export mi_decl_allocator void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
mi_decl_export mi_decl_allocator void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_export mi_decl_allocator void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
mi_decl_export mi_decl_allocator void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_export mi_decl_allocator void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2) mi_attr_alloc_align(3);
mi_decl_export mi_decl_allocator void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
mi_decl_export mi_decl_allocator void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
mi_decl_export mi_decl_allocator void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2) mi_attr_alloc_align(3);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3);
mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2);
// -------------------------------------------------------------------------------------
// Heaps: first-class, but can only allocate from the same thread that created it.
// -------------------------------------------------------------------------------------
struct mi_heap_s;
typedef struct mi_heap_s mi_heap_t;
mi_decl_export mi_heap_t* mi_heap_new(void);
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new(void);
mi_decl_export void mi_heap_delete(mi_heap_t* heap);
mi_decl_export void mi_heap_destroy(mi_heap_t* heap);
mi_decl_export mi_heap_t* mi_heap_set_default(mi_heap_t* heap);
@ -169,28 +180,28 @@ mi_decl_export mi_heap_t* mi_heap_get_default(void);
mi_decl_export mi_heap_t* mi_heap_get_backing(void);
mi_decl_export void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept;
mi_decl_export mi_decl_allocator void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_export mi_decl_allocator void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_export mi_decl_allocator void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
mi_decl_export mi_decl_allocator void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
mi_decl_export mi_decl_allocator void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_export mi_decl_allocator void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(3);
mi_decl_export mi_decl_allocator void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept;
mi_decl_export mi_decl_allocator void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(3);
mi_decl_nodiscard mi_decl_export void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
mi_decl_nodiscard mi_decl_export void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4);;
mi_decl_nodiscard mi_decl_export void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
mi_decl_export char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept;
mi_decl_export char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept;
mi_decl_export char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept mi_attr_malloc;
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept mi_attr_malloc;
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc;
mi_decl_export mi_decl_allocator void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
mi_decl_export mi_decl_allocator void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_export mi_decl_allocator void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
mi_decl_export mi_decl_allocator void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_export mi_decl_allocator void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3) mi_attr_alloc_align(4);
mi_decl_export mi_decl_allocator void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
mi_decl_export mi_decl_allocator void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(3) mi_attr_alloc_align(4);
mi_decl_export mi_decl_allocator void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(3);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3) mi_attr_alloc_align(4);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4);
mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3);
// --------------------------------------------------------------------------------
@ -200,21 +211,21 @@ mi_decl_export mi_decl_allocator void* mi_heap_realloc_aligned_at(mi_heap_t* hea
// see <https://github.com/microsoft/mimalloc/issues/63#issuecomment-508272992>
// --------------------------------------------------------------------------------
mi_decl_export mi_decl_allocator void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_export mi_decl_allocator void* mi_recalloc(void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2,3);
mi_decl_nodiscard mi_decl_export void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export void* mi_recalloc(void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
mi_decl_export mi_decl_allocator void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
mi_decl_export mi_decl_allocator void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_export mi_decl_allocator void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2,3) mi_attr_alloc_align(4);
mi_decl_export mi_decl_allocator void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2,3);
mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3);
mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(2,3) mi_attr_alloc_align(4);
mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(2,3);
mi_decl_export mi_decl_allocator void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(3);
mi_decl_export mi_decl_allocator void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(3,4);
mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4);
mi_decl_export mi_decl_allocator void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(3) mi_attr_alloc_align(4);
mi_decl_export mi_decl_allocator void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(3);
mi_decl_export mi_decl_allocator void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(3,4) mi_attr_alloc_align(5);
mi_decl_export mi_decl_allocator void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(3,4);
mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4);
mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3);
mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(3,4) mi_attr_alloc_align(5);
mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(3,4);
// ------------------------------------------------------
@ -222,7 +233,6 @@ mi_decl_export mi_decl_allocator void* mi_heap_recalloc_aligned_at(mi_heap_t* he
// ------------------------------------------------------
mi_decl_export bool mi_heap_contains_block(mi_heap_t* heap, const void* p);
mi_decl_export bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
mi_decl_export bool mi_check_owned(const void* p);
@ -240,8 +250,8 @@ typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_
mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
// Experimental
mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
mi_decl_export bool mi_is_redirected() mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export bool mi_is_redirected() mi_attr_noexcept;
mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept;
mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept;
@ -249,16 +259,17 @@ mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size
// deprecated
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
// ------------------------------------------------------
// Convenience
// ------------------------------------------------------
#define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
#define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
#define mi_calloc_tp(tp,n) ((tp*)mi_calloc(n,sizeof(tp)))
#define mi_mallocn_tp(tp,n) ((tp*)mi_mallocn(n,sizeof(tp)))
#define mi_reallocn_tp(p,tp,n) ((tp*)mi_reallocn(p,n,sizeof(tp)))
#define mi_recalloc_tp(p,tp,n) ((tp*)mi_recalloc(p,n,sizeof(tp)))
#define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
#define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
#define mi_calloc_tp(tp,n) ((tp*)mi_calloc(n,sizeof(tp)))
#define mi_mallocn_tp(tp,n) ((tp*)mi_mallocn(n,sizeof(tp)))
#define mi_reallocn_tp(p,tp,n) ((tp*)mi_reallocn(p,n,sizeof(tp)))
#define mi_recalloc_tp(p,tp,n) ((tp*)mi_recalloc(p,n,sizeof(tp)))
#define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp)))
#define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp)))
@ -298,56 +309,56 @@ typedef enum mi_option_e {
} mi_option_t;
mi_decl_export bool mi_option_is_enabled(mi_option_t option);
mi_decl_export void mi_option_enable(mi_option_t option);
mi_decl_export void mi_option_disable(mi_option_t option);
mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable);
mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable);
mi_decl_nodiscard mi_decl_export bool mi_option_is_enabled(mi_option_t option);
mi_decl_export void mi_option_enable(mi_option_t option);
mi_decl_export void mi_option_disable(mi_option_t option);
mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable);
mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable);
mi_decl_export long mi_option_get(mi_option_t option);
mi_decl_export void mi_option_set(mi_option_t option, long value);
mi_decl_export void mi_option_set_default(mi_option_t option, long value);
mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option);
mi_decl_export void mi_option_set(mi_option_t option, long value);
mi_decl_export void mi_option_set_default(mi_option_t option, long value);
// -------------------------------------------------------------------------------------------------------
// "mi" prefixed implementations of various posix, Unix, Windows, and C++ allocation functions.
// (This can be convenient when providing overrides of these functions as done in `mimalloc-override.h`.)
// note: we use `mi_cfree` as "checked free" and it checks if the pointer is in our heap before free-ing.
// -------------------------------------------------------------------------------------------------------
mi_decl_export size_t mi_malloc_size(const void* p) mi_attr_noexcept;
mi_decl_export size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept;
mi_decl_export void mi_cfree(void* p) mi_attr_noexcept;
mi_decl_export void* mi__expand(void* p, size_t newsize) mi_attr_noexcept;
mi_decl_export void mi_cfree(void* p) mi_attr_noexcept;
mi_decl_export void* mi__expand(void* p, size_t newsize) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export size_t mi_malloc_size(const void* p) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept;
mi_decl_export int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept;
mi_decl_export void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
mi_decl_export void* mi_valloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_export int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
mi_decl_export void* mi_pvalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_export void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2,3);
mi_decl_nodiscard mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
mi_decl_nodiscard mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept;
mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept;
mi_decl_export void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept mi_attr_malloc;
mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept mi_attr_malloc;
mi_decl_export int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept;
mi_decl_export int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name) mi_attr_noexcept;
mi_decl_export unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept;
mi_decl_export unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept;
mi_decl_export int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept;
mi_decl_export int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name) mi_attr_noexcept;
mi_decl_export void mi_free_size(void* p, size_t size) mi_attr_noexcept;
mi_decl_export void mi_free_size(void* p, size_t size) mi_attr_noexcept;
mi_decl_export void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept;
mi_decl_export void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept;
mi_decl_export void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept;
// The `mi_new` wrappers implement C++ semantics on out-of-memory instead of directly returning `NULL`.
// (and call `std::get_new_handler` and potentially raise a `std::bad_alloc` exception).
mi_decl_export void* mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_export void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
mi_decl_export void* mi_new_nothrow(size_t size) mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_export void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
mi_decl_export void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1, 2);
mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_malloc mi_attr_alloc_size2(2, 3);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1, 2);
mi_decl_nodiscard mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_alloc_size2(2, 3);
#ifdef __cplusplus
}
@ -359,7 +370,7 @@ mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_a
// ---------------------------------------------------------------------------------------------
#ifdef __cplusplus
#include <limits> // std::numeric_limits<ptrdiff_t>
#include <cstdint> // PTRDIFF_MAX
#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11
#include <type_traits> // std::true_type
#include <utility> // std::forward
@ -375,17 +386,17 @@ template<class T> struct mi_stl_allocator {
typedef value_type const* const_pointer;
template <class U> struct rebind { typedef mi_stl_allocator<U> other; };
mi_stl_allocator() mi_attr_noexcept { }
mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept { }
mi_stl_allocator() mi_attr_noexcept = default;
mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept = default;
template<class U> mi_stl_allocator(const mi_stl_allocator<U>&) mi_attr_noexcept { }
mi_stl_allocator select_on_container_copy_construction() const { return *this; }
void deallocate(T* p, size_type) { mi_free(p); }
#if (__cplusplus >= 201703L) // C++17
T* allocate(size_type count) { return static_cast<T*>(mi_new_n(count, sizeof(T))); }
T* allocate(size_type count, const void*) { return allocate(count); }
mi_decl_nodiscard T* allocate(size_type count) { return static_cast<T*>(mi_new_n(count, sizeof(T))); }
mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); }
#else
pointer allocate(size_type count, const void* = 0) { return static_cast<pointer>(mi_new_n(count, sizeof(value_type))); }
mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast<pointer>(mi_new_n(count, sizeof(value_type))); }
#endif
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
@ -400,7 +411,7 @@ template<class T> struct mi_stl_allocator {
void destroy(pointer p) { p->~value_type(); }
#endif
size_type max_size() const mi_attr_noexcept { return (std::numeric_limits<difference_type>::max() / sizeof(value_type)); }
size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX/sizeof(value_type)); }
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
};

View File

@ -10,8 +10,8 @@
mimalloc (pronounced "me-malloc")
is a general purpose allocator with excellent [performance](#performance) characteristics.
Initially developed by Daan Leijen for the run-time systems of the
[Koka](https://github.com/koka-lang/koka) and [Lean](https://github.com/leanprover/lean) languages.
Latest release:`v1.4.0` (2020-01-22).
[Koka](https://github.com/koka-lang/koka) and [Lean](https://github.com/leanprover/lean) languages.
Latest release:`v1.6.1` (2020-02-17).
It is a drop-in replacement for `malloc` and can be used in other programs
without code changes, for example, on dynamically linked ELF-based systems (Linux, BSD, etc.) you can use it as:
@ -47,7 +47,7 @@ It also has an easy way to override the allocator in [Windows](#override_on_wind
- __fast__: In our benchmarks (see [below](#performance)),
_mimalloc_ outperforms other leading allocators (_jemalloc_, _tcmalloc_, _Hoard_, etc),
and usually uses less memory (up to 25% more in the worst case). A nice property
is that it does consistently well over a wide range of benchmarks. There is also good huge OS page
is that it does consistently well over a wide range of benchmarks. There is also good huge OS page
support for larger server programs.
The [documentation](https://microsoft.github.io/mimalloc) gives a full overview of the API.
@ -57,7 +57,15 @@ Enjoy!
### Releases
* 2020-01-22, `v1.4.0`: stable release 1.4: improved performance for delayed OS page reset,
* 2020-02-17, `v1.6.1`: stable release 1.6: minor updates (build with clang-cl, fix alignment issue for small objects).
* 2020-02-09, `v1.6.0`: stable release 1.6: fixed potential memory leak, improved overriding
and thread local support on FreeBSD, NetBSD, DragonFly, and macOSX. New byte-precise
heap block overflow detection in debug mode (besides the double-free detection and free-list
corruption detection). Add `nodiscard` attribute to most allocation functions.
Enable `MIMALLOC_PAGE_RESET` by default. New reclamation strategy for abandoned heap pages
for better memory footprint.
* 2020-02-09, `v1.5.0`: stable release 1.5: improved free performance, small bug fixes.
* 2020-01-22, `v1.4.0`: stable release 1.4: improved performance for delayed OS page reset,
more eager concurrent free, addition of STL allocator, fixed potential memory leak.
* 2020-01-15, `v1.3.0`: stable release 1.3: bug fixes, improved randomness and [stronger
free list encoding](https://github.com/microsoft/mimalloc/blob/783e3377f79ee82af43a0793910a9f2d01ac7863/include/mimalloc-internal.h#L396) in secure mode.
@ -212,13 +220,13 @@ or via environment variables.
<!--
- `MIMALLOC_EAGER_REGION_COMMIT=1`: on Windows, commit large (256MiB) regions eagerly. On Windows, these regions
show in the working set even though usually just a small part is committed to physical memory. This is why it
turned off by default on Windows as it looks not good in the task manager. However, turning it on has no
turned off by default on Windows as it looks not good in the task manager. However, turning it on has no
real drawbacks and may improve performance by a little.
-->
- `MIMALLOC_RESERVE_HUGE_OS_PAGES=N`: where N is the number of 1GiB huge OS pages. This reserves the huge pages at
startup and can give quite a (latency) performance improvement on long running workloads. Usually it is better to not use
`MIMALLOC_LARGE_OS_PAGES` in combination with this setting. Just like large OS pages, use with care as reserving
contiguous physical memory can take a long time when memory is fragmented (but reserving the huge pages is done at
contiguous physical memory can take a long time when memory is fragmented (but reserving the huge pages is done at
startup only once).
Note that we usually need to explicitly enable huge OS pages (as on [Windows][windows-huge] and [Linux][linux-huge])). With huge OS pages, it may be beneficial to set the setting
`MIMALLOC_EAGER_COMMIT_DELAY=N` (with usually `N` as 1) to delay the initial `N` segments
@ -268,8 +276,7 @@ resolved to the _mimalloc_ library.
Note that certain security restrictions may apply when doing this from
the [shell](https://stackoverflow.com/questions/43941322/dyld-insert-libraries-ignored-when-calling-application-through-bash).
Note: unfortunately, at this time, dynamic overriding on macOS seems broken but it is
actively worked on to fix this (see issue [`#50`](https://github.com/microsoft/mimalloc/issues/50)).
(Note: macOS support for dynamic overriding is recent, please report any issues.)
### Override on Windows

View File

@ -18,20 +18,22 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
// note: we don't require `size > offset`, we just guarantee that
// the address at offset is aligned regardless of the allocated size.
mi_assert(alignment > 0 && alignment % sizeof(void*) == 0);
if (mi_unlikely(size > PTRDIFF_MAX)) return NULL; // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
if (mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment))) return NULL; // require power-of-two (see <https://en.cppreference.com/w/c/memory/aligned_alloc>)
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
// try if there is a small block available with just the right alignment
if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
mi_page_t* page = _mi_heap_get_free_small_page(heap,size);
const size_t padsize = size + MI_PADDING_SIZE;
if (mi_likely(padsize <= MI_SMALL_SIZE_MAX)) {
mi_page_t* page = _mi_heap_get_free_small_page(heap,padsize);
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
if (mi_likely(page->free != NULL && is_aligned))
{
#if MI_STAT>1
mi_heap_stat_increase( heap, malloc, size);
#endif
void* p = _mi_page_malloc(heap,page,size); // TODO: inline _mi_page_malloc
void* p = _mi_page_malloc(heap,page,padsize); // TODO: inline _mi_page_malloc
mi_assert_internal(p != NULL);
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
if (zero) _mi_block_zero_init(page,p,size);
@ -40,7 +42,7 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
}
// use regular allocation if it is guaranteed to fit the alignment constraints
if (offset==0 && alignment<=size && size<=MI_MEDIUM_OBJ_SIZE_MAX && (size&align_mask)==0) {
if (offset==0 && alignment<=padsize && padsize<=MI_MEDIUM_OBJ_SIZE_MAX && (padsize&align_mask)==0) {
void* p = _mi_heap_malloc_zero(heap, size, zero);
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
return p;
@ -61,53 +63,53 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
}
mi_decl_allocator void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, false);
}
mi_decl_allocator void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_malloc_aligned_at(heap, size, alignment, 0);
}
mi_decl_allocator void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, true);
}
mi_decl_allocator void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_zalloc_aligned_at(heap, size, alignment, 0);
}
mi_decl_allocator void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_zalloc_aligned_at(heap, total, alignment, offset);
}
mi_decl_allocator void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept {
mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_calloc_aligned_at(heap,count,size,alignment,0);
}
mi_decl_allocator void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
}
mi_decl_allocator void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_malloc_aligned(mi_get_default_heap(), size, alignment);
}
mi_decl_allocator void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_zalloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
}
mi_decl_allocator void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_zalloc_aligned(mi_get_default_heap(), size, alignment);
}
mi_decl_allocator void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_calloc_aligned_at(mi_get_default_heap(), count, size, alignment, offset);
}
mi_decl_allocator void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept {
mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_calloc_aligned(mi_get_default_heap(), count, size, alignment);
}
@ -150,55 +152,55 @@ static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsi
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero);
}
mi_decl_allocator void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false);
}
mi_decl_allocator void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false);
}
mi_decl_allocator void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true);
}
mi_decl_allocator void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true);
}
mi_decl_allocator void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset);
}
mi_decl_allocator void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
return mi_heap_rezalloc_aligned(heap, p, total, alignment);
}
mi_decl_allocator void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
}
mi_decl_allocator void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_aligned(mi_get_default_heap(), p, newsize, alignment);
}
mi_decl_allocator void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_rezalloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
}
mi_decl_allocator void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_rezalloc_aligned(mi_get_default_heap(), p, newsize, alignment);
}
mi_decl_allocator void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_recalloc_aligned_at(mi_get_default_heap(), p, newcount, size, alignment, offset);
}
mi_decl_allocator void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_recalloc_aligned(mi_get_default_heap(), p, newcount, size, alignment);
}

View File

@ -17,6 +17,12 @@ terms of the MIT license. A copy of the license can be found in the file
/* ------------------------------------------------------
Override system malloc on macOS
This is done through the malloc zone interface.
It seems we also need to interpose (see `alloc-override.c`)
or otherwise we get zone errors as there are usually
already allocations done by the time we take over the
zone. Unfortunately, that means we need to replace
the `free` with a checked free (`cfree`) impacting
performance.
------------------------------------------------------ */
#include <AvailabilityMacros.h>
@ -35,34 +41,42 @@ extern malloc_zone_t* malloc_default_purgeable_zone(void) __attribute__((weak_im
------------------------------------------------------ */
static size_t zone_size(malloc_zone_t* zone, const void* p) {
UNUSED(zone); UNUSED(p);
return 0; // as we cannot guarantee that `p` comes from us, just return 0
}
static void* zone_malloc(malloc_zone_t* zone, size_t size) {
UNUSED(zone);
return mi_malloc(size);
}
static void* zone_calloc(malloc_zone_t* zone, size_t count, size_t size) {
UNUSED(zone);
return mi_calloc(count, size);
}
static void* zone_valloc(malloc_zone_t* zone, size_t size) {
UNUSED(zone);
return mi_malloc_aligned(size, _mi_os_page_size());
}
static void zone_free(malloc_zone_t* zone, void* p) {
UNUSED(zone);
return mi_free(p);
}
static void* zone_realloc(malloc_zone_t* zone, void* p, size_t newsize) {
UNUSED(zone);
return mi_realloc(p, newsize);
}
static void* zone_memalign(malloc_zone_t* zone, size_t alignment, size_t size) {
UNUSED(zone);
return mi_malloc_aligned(size,alignment);
}
static void zone_destroy(malloc_zone_t* zone) {
UNUSED(zone);
// todo: ignore for now?
}
@ -83,11 +97,13 @@ static void zone_batch_free(malloc_zone_t* zone, void** ps, unsigned count) {
}
static size_t zone_pressure_relief(malloc_zone_t* zone, size_t size) {
UNUSED(zone); UNUSED(size);
mi_collect(false);
return 0;
}
static void zone_free_definite_size(malloc_zone_t* zone, void* p, size_t size) {
UNUSED(size);
zone_free(zone,p);
}
@ -102,34 +118,43 @@ static kern_return_t intro_enumerator(task_t task, void* p,
vm_range_recorder_t recorder)
{
// todo: enumerate all memory
UNUSED(task); UNUSED(p); UNUSED(type_mask); UNUSED(zone_address);
UNUSED(reader); UNUSED(recorder);
return KERN_SUCCESS;
}
static size_t intro_good_size(malloc_zone_t* zone, size_t size) {
UNUSED(zone);
return mi_good_size(size);
}
static boolean_t intro_check(malloc_zone_t* zone) {
UNUSED(zone);
return true;
}
static void intro_print(malloc_zone_t* zone, boolean_t verbose) {
UNUSED(zone); UNUSED(verbose);
mi_stats_print(NULL);
}
static void intro_log(malloc_zone_t* zone, void* p) {
UNUSED(zone); UNUSED(p);
// todo?
}
static void intro_force_lock(malloc_zone_t* zone) {
UNUSED(zone);
// todo?
}
static void intro_force_unlock(malloc_zone_t* zone) {
UNUSED(zone);
// todo?
}
static void intro_statistics(malloc_zone_t* zone, malloc_statistics_t* stats) {
UNUSED(zone);
// todo...
stats->blocks_in_use = 0;
stats->size_in_use = 0;
@ -138,6 +163,7 @@ static void intro_statistics(malloc_zone_t* zone, malloc_statistics_t* stats) {
}
static boolean_t intro_zone_locked(malloc_zone_t* zone) {
UNUSED(zone);
return false;
}
@ -161,7 +187,6 @@ static malloc_zone_t* mi_get_default_zone()
}
}
static void __attribute__((constructor)) _mi_macos_override_malloc()
{
static malloc_introspection_t intro;
@ -201,6 +226,7 @@ static void __attribute__((constructor)) _mi_macos_override_malloc()
zone.free_definite_size = &zone_free_definite_size;
zone.pressure_relief = &zone_pressure_relief;
intro.zone_locked = &intro_zone_locked;
intro.statistics = &intro_statistics;
// force the purgeable zone to exist to avoid strange bugs
if (malloc_default_purgeable_zone) {
@ -225,6 +251,7 @@ static void __attribute__((constructor)) _mi_macos_override_malloc()
malloc_zone_unregister(purgeable_zone);
malloc_zone_register(purgeable_zone);
}
}
#endif // MI_MALLOC_OVERRIDE

View File

@ -13,7 +13,7 @@ terms of the MIT license. A copy of the license can be found in the file
#error "It is only possible to override "malloc" on Windows when building as a DLL (and linking the C runtime as a DLL)"
#endif
#if defined(MI_MALLOC_OVERRIDE) && !defined(_WIN32)
#if defined(MI_MALLOC_OVERRIDE) && !(defined(_WIN32)) // || (defined(__MACH__) && !defined(MI_INTERPOSE)))
// ------------------------------------------------------
// Override system malloc
@ -47,26 +47,31 @@ terms of the MIT license. A copy of the license can be found in the file
const void* replacement;
const void* target;
};
#define MI_INTERPOSEX(oldfun,newfun) { (const void*)&newfun, (const void*)&oldfun }
#define MI_INTERPOSE_MI(fun) MI_INTERPOSEX(fun,mi_##fun)
#define MI_INTERPOSE_FUN(oldfun,newfun) { (const void*)&newfun, (const void*)&oldfun }
#define MI_INTERPOSE_MI(fun) MI_INTERPOSE_FUN(fun,mi_##fun)
__attribute__((used)) static struct mi_interpose_s _mi_interposes[] __attribute__((section("__DATA, __interpose"))) =
{
MI_INTERPOSE_MI(malloc),
MI_INTERPOSE_MI(calloc),
MI_INTERPOSE_MI(realloc),
MI_INTERPOSE_MI(free),
MI_INTERPOSE_MI(strdup),
MI_INTERPOSE_MI(strndup)
MI_INTERPOSE_MI(strndup),
MI_INTERPOSE_MI(realpath),
MI_INTERPOSE_MI(posix_memalign),
MI_INTERPOSE_MI(reallocf),
MI_INTERPOSE_MI(valloc),
// some code allocates from a zone but deallocates using plain free :-( (like NxHashResizeToCapacity <https://github.com/nneonneo/osx-10.9-opensource/blob/master/objc4-551.1/runtime/hashtable2.mm>)
MI_INTERPOSE_FUN(free,mi_cfree), // use safe free that checks if pointers are from us
};
#elif defined(_MSC_VER)
// cannot override malloc unless using a dll.
// we just override new/delete which does work in a static library.
#else
// On all other systems forward to our API
void* malloc(size_t size) mi_attr_noexcept MI_FORWARD1(mi_malloc, size);
void* calloc(size_t size, size_t n) mi_attr_noexcept MI_FORWARD2(mi_calloc, size, n);
void* realloc(void* p, size_t newsize) mi_attr_noexcept MI_FORWARD2(mi_realloc, p, newsize);
void free(void* p) mi_attr_noexcept MI_FORWARD0(mi_free, p);
void* malloc(size_t size) MI_FORWARD1(mi_malloc, size);
void* calloc(size_t size, size_t n) MI_FORWARD2(mi_calloc, size, n);
void* realloc(void* p, size_t newsize) MI_FORWARD2(mi_realloc, p, newsize);
void free(void* p) MI_FORWARD0(mi_free, p);
#endif
#if (defined(__GNUC__) || defined(__clang__)) && !defined(__MACH__)
@ -94,8 +99,8 @@ terms of the MIT license. A copy of the license can be found in the file
void* operator new[](std::size_t n, const std::nothrow_t& tag) noexcept { UNUSED(tag); return mi_new_nothrow(n); }
#if (__cplusplus >= 201402L || _MSC_VER >= 1916)
void operator delete (void* p, std::size_t n) MI_FORWARD02(mi_free_size,p,n);
void operator delete[](void* p, std::size_t n) MI_FORWARD02(mi_free_size,p,n);
void operator delete (void* p, std::size_t n) noexcept MI_FORWARD02(mi_free_size,p,n);
void operator delete[](void* p, std::size_t n) noexcept MI_FORWARD02(mi_free_size,p,n);
#endif
#if (__cplusplus > 201402L || defined(__cpp_aligned_new)) && (!defined(__GNUC__) || (__GNUC__ > 5))
@ -194,4 +199,3 @@ int posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_me
#endif
#endif // MI_MALLOC_OVERRIDE && !_WIN32

View File

@ -9,7 +9,6 @@ terms of the MIT license. A copy of the license can be found in the file
// mi prefixed publi definitions of various Posix, Unix, and C++ functions
// for convenience and used when overriding these functions.
// ------------------------------------------------------------------------
#include "mimalloc.h"
#include "mimalloc-internal.h"
@ -47,33 +46,38 @@ int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept
// Note: The spec dictates we should not modify `*p` on an error. (issue#27)
// <http://man7.org/linux/man-pages/man3/posix_memalign.3.html>
if (p == NULL) return EINVAL;
if (alignment % sizeof(void*) != 0) return EINVAL; // natural alignment
if (alignment % sizeof(void*) != 0) return EINVAL; // natural alignment
if (!_mi_is_power_of_two(alignment)) return EINVAL; // not a power of 2
void* q = mi_malloc_aligned(size, alignment);
void* q = (mi_malloc_satisfies_alignment(alignment, size) ? mi_malloc(size) : mi_malloc_aligned(size, alignment));
if (q==NULL && size != 0) return ENOMEM;
mi_assert_internal(((uintptr_t)q % alignment) == 0);
*p = q;
return 0;
}
void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept {
return mi_malloc_aligned(size, alignment);
mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept {
void* p = (mi_malloc_satisfies_alignment(alignment,size) ? mi_malloc(size) : mi_malloc_aligned(size, alignment));
mi_assert_internal(((uintptr_t)p % alignment) == 0);
return p;
}
void* mi_valloc(size_t size) mi_attr_noexcept {
return mi_malloc_aligned(size, _mi_os_page_size());
mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept {
return mi_memalign( _mi_os_page_size(), size );
}
void* mi_pvalloc(size_t size) mi_attr_noexcept {
mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept {
size_t psize = _mi_os_page_size();
if (size >= SIZE_MAX - psize) return NULL; // overflow
size_t asize = ((size + psize - 1) / psize) * psize;
size_t asize = _mi_align_up(size, psize);
return mi_malloc_aligned(asize, psize);
}
void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept {
mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept {
if (alignment==0 || !_mi_is_power_of_two(alignment)) return NULL;
if ((size&(alignment-1)) != 0) return NULL; // C11 requires integral multiple, see <https://en.cppreference.com/w/c/memory/aligned_alloc>
return mi_malloc_aligned(size, alignment);
void* p = (mi_malloc_satisfies_alignment(alignment, size) ? mi_malloc(size) : mi_malloc_aligned(size, alignment));
mi_assert_internal(((uintptr_t)p % alignment) == 0);
return p;
}
void* mi_reallocarray( void* p, size_t count, size_t size ) mi_attr_noexcept { // BSD
@ -88,7 +92,7 @@ void* mi__expand(void* p, size_t newsize) mi_attr_noexcept { // Microsoft
return res;
}
unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept {
mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept {
if (s==NULL) return NULL;
size_t len;
for(len = 0; s[len] != 0; len++) { }
@ -100,7 +104,7 @@ unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept {
return p;
}
unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept {
mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept {
return (unsigned char*)mi_strdup((const char*)s);
}

View File

@ -21,92 +21,120 @@ terms of the MIT license. A copy of the license can be found in the file
// Fast allocation in a page: just pop from the free list.
// Fall back to generic allocation only if the list is empty.
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
mi_block_t* block = page->free;
if (mi_unlikely(block == NULL)) {
return _mi_malloc_generic(heap, size); // slow path
return _mi_malloc_generic(heap, size);
}
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
// pop from the free list
page->free = mi_block_next(page,block);
page->free = mi_block_next(page, block);
page->used++;
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
#if (MI_DEBUG!=0)
#if (MI_DEBUG>0)
if (!page->is_zero) { memset(block, MI_DEBUG_UNINIT, size); }
#elif (MI_SECURE!=0)
block->next = 0; // don't leak internal data
#endif
#if (MI_STAT>1)
if(size <= MI_LARGE_OBJ_SIZE_MAX) {
size_t bin = _mi_bin(size);
mi_heap_stat_increase(heap,normal[bin], 1);
const size_t bsize = mi_page_usable_block_size(page);
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
const size_t bin = _mi_bin(bsize);
mi_heap_stat_increase(heap, normal[bin], 1);
}
#endif
#if defined(MI_PADDING) && defined(MI_ENCODE_FREELIST)
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
padding->delta = (uint32_t)(delta);
uint8_t* fill = (uint8_t*)padding - delta;
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
#endif
return block;
}
// allocate a small block
extern inline mi_decl_allocator void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
mi_assert(size <= MI_SMALL_SIZE_MAX);
mi_page_t* page = _mi_heap_get_free_small_page(heap,size);
return _mi_page_malloc(heap, page, size);
}
extern inline mi_decl_allocator void* mi_malloc_small(size_t size) mi_attr_noexcept {
return mi_heap_malloc_small(mi_get_default_heap(), size);
}
// zero initialized small block
mi_decl_allocator void* mi_zalloc_small(size_t size) mi_attr_noexcept {
void* p = mi_malloc_small(size);
if (p != NULL) { memset(p, 0, size); }
return p;
}
// The main allocation function
extern inline mi_decl_allocator void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
void* p;
if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
p = mi_heap_malloc_small(heap, size);
}
else {
p = _mi_malloc_generic(heap, size);
mi_assert(size <= MI_SMALL_SIZE_MAX);
#if (MI_PADDING)
if (size == 0) {
size = sizeof(void*);
}
#endif
mi_page_t* page = _mi_heap_get_free_small_page(heap,size + MI_PADDING_SIZE);
void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE);
mi_assert_internal(p==NULL || mi_usable_size(p) >= size);
#if MI_STAT>1
if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
mi_heap_stat_increase( heap, malloc, mi_good_size(size) ); // overestimate for aligned sizes
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
return p;
}
extern inline mi_decl_allocator void* mi_malloc(size_t size) mi_attr_noexcept {
extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
return mi_heap_malloc_small(mi_get_default_heap(), size);
}
// The main allocation function
extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
return mi_heap_malloc_small(heap, size);
}
else {
mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE); // note: size can overflow but it is detected in malloc_generic
mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
#if MI_STAT>1
if (p != NULL) {
if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
return p;
}
}
extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
return mi_heap_malloc(mi_get_default_heap(), size);
}
void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
// note: we need to initialize the whole block to zero, not just size
// note: we need to initialize the whole usable block size to zero, not just the requested size,
// or the recalloc/rezalloc functions cannot safely expand in place (see issue #63)
UNUSED_RELEASE(size);
UNUSED(size);
mi_assert_internal(p != NULL);
mi_assert_internal(mi_page_block_size(page) >= size); // size can be zero
mi_assert_internal(mi_usable_size(p) >= size); // size can be zero
mi_assert_internal(_mi_ptr_page(p)==page);
if (page->is_zero) {
// already zero initialized memory?
if (page->is_zero && size > sizeof(mi_block_t)) {
// already zero initialized memory
((mi_block_t*)p)->next = 0; // clear the free list pointer
mi_assert_expensive(mi_mem_is_zero(p, mi_page_block_size(page)));
mi_assert_expensive(mi_mem_is_zero(p, mi_usable_size(p)));
}
else {
// otherwise memset
memset(p, 0, mi_page_block_size(page));
memset(p, 0, mi_usable_size(p));
}
}
// zero initialized small block
mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
void* p = mi_malloc_small(size);
if (p != NULL) {
_mi_block_zero_init(_mi_ptr_page(p), p, size); // todo: can we avoid getting the page again?
}
return p;
}
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) {
void* p = mi_heap_malloc(heap,size);
if (zero && p != NULL) {
@ -115,11 +143,11 @@ void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) {
return p;
}
extern inline mi_decl_allocator void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
return _mi_heap_malloc_zero(heap, size, true);
}
mi_decl_allocator void* mi_zalloc(size_t size) mi_attr_noexcept {
mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
return mi_heap_zalloc(mi_get_default_heap(),size);
}
@ -153,7 +181,7 @@ static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, con
}
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
mi_block_t* n = mi_block_nextx(page, block, page->key[0], page->key[1]); // pretend it is freed, and get the decoded first field
mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
(n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
{
@ -171,6 +199,88 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
}
#endif
// ---------------------------------------------------------------------------
// Check for heap block overflow by setting up padding at the end of the block
// ---------------------------------------------------------------------------
#if defined(MI_PADDING) && defined(MI_ENCODE_FREELIST)
static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
*bsize = mi_page_usable_block_size(page);
const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
*delta = padding->delta;
return ((uint32_t)mi_ptr_encode(page,block,page->keys) == padding->canary && *delta <= *bsize);
}
// Return the exact usable size of a block.
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
size_t bsize;
size_t delta;
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
mi_assert_internal(ok); mi_assert_internal(delta <= bsize);
return (ok ? bsize - delta : 0);
}
static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
size_t bsize;
size_t delta;
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
*size = *wrong = bsize;
if (!ok) return false;
mi_assert_internal(bsize >= delta);
*size = bsize - delta;
uint8_t* fill = (uint8_t*)block + bsize - delta;
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
for (size_t i = 0; i < maxpad; i++) {
if (fill[i] != MI_DEBUG_PADDING) {
*wrong = bsize - delta + i;
return false;
}
}
return true;
}
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
size_t size;
size_t wrong;
if (!mi_verify_padding(page,block,&size,&wrong)) {
_mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
}
}
// When a non-thread-local block is freed, it becomes part of the thread delayed free
// list that is freed later by the owning heap. If the exact usable size is too small to
// contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
// so it will later not trigger an overflow error in `mi_free_block`.
static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
size_t bsize;
size_t delta;
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
mi_assert_internal(ok);
if (!ok || (bsize - delta) >= min_size) return; // usually already enough space
mi_assert_internal(bsize >= min_size);
if (bsize < min_size) return; // should never happen
size_t new_delta = (bsize - min_size);
mi_assert_internal(new_delta < bsize);
mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
padding->delta = (uint32_t)new_delta;
}
#else
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
UNUSED(page);
UNUSED(block);
}
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
UNUSED(block);
return mi_page_usable_block_size(page);
}
static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
UNUSED(page);
UNUSED(block);
UNUSED(min_size);
}
#endif
// ------------------------------------------------------
// Free
@ -208,6 +318,14 @@ static mi_decl_noinline void mi_free_huge_block_mt(mi_segment_t* segment, mi_pag
// multi-threaded free
static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
{
// The padding check may access the non-thread-owned page for the key values.
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
mi_check_padding(page, block);
mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
#if (MI_DEBUG!=0)
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
#endif
// huge page segments are always abandoned and can be freed immediately
mi_segment_t* segment = _mi_page_segment(page);
if (segment->kind==MI_SEGMENT_HUGE) {
@ -215,6 +333,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
return;
}
// Try to put the block on either the page-local thread free list, or the heap delayed free list.
mi_thread_free_t tfree;
mi_thread_free_t tfreex;
bool use_delayed;
@ -234,14 +353,14 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
if (mi_unlikely(use_delayed)) {
// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
mi_heap_t* heap = mi_page_heap(page);
mi_heap_t* const heap = mi_page_heap(page);
mi_assert_internal(heap != NULL);
if (heap != NULL) {
// add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity)
mi_block_t* dfree;
do {
dfree = mi_atomic_read_ptr_relaxed(mi_block_t,&heap->thread_delayed_free);
mi_block_set_nextx(heap,block,dfree, heap->key[0], heap->key[1]);
mi_block_set_nextx(heap,block,dfree, heap->keys);
} while (!mi_atomic_cas_ptr_weak(mi_block_t,&heap->thread_delayed_free, block, dfree));
}
@ -258,14 +377,14 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
// regular free
static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
{
#if (MI_DEBUG)
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif
// and push it on the free list
if (mi_likely(local)) {
// owning thread can free a block directly
if (mi_unlikely(mi_check_is_double_free(page, block))) return;
mi_check_padding(page, block);
#if (MI_DEBUG!=0)
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
page->used--;
@ -285,15 +404,15 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
// Adjust a block that was allocated aligned, to the actual start of the block in the page.
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p) {
mi_assert_internal(page!=NULL && p!=NULL);
size_t diff = (uint8_t*)p - _mi_page_start(segment, page, NULL);
size_t adjust = (diff % mi_page_block_size(page));
const size_t diff = (uint8_t*)p - _mi_page_start(segment, page, NULL);
const size_t adjust = (diff % mi_page_block_size(page));
return (mi_block_t*)((uintptr_t)p - adjust);
}
static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, bool local, void* p) {
mi_page_t* page = _mi_segment_page_of(segment, p);
mi_block_t* block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
mi_page_t* const page = _mi_segment_page_of(segment, p);
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
_mi_free_block(page, local, block);
}
@ -316,7 +435,7 @@ void mi_free(void* p) mi_attr_noexcept
"(this may still be a valid very large allocation (over 64MiB))\n", p);
if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) {
_mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
}
}
}
#endif
#if (MI_DEBUG!=0 || MI_SECURE>=4)
@ -328,20 +447,24 @@ void mi_free(void* p) mi_attr_noexcept
const uintptr_t tid = _mi_thread_id();
mi_page_t* const page = _mi_segment_page_of(segment, p);
#if (MI_STAT>1)
mi_heap_t* heap = mi_heap_get_default();
mi_heap_stat_decrease(heap, malloc, mi_usable_size(p));
if (page->xblock_size <= MI_LARGE_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap, normal[_mi_bin(page->xblock_size)], 1);
mi_heap_t* const heap = mi_heap_get_default();
const size_t bsize = mi_page_usable_block_size(page);
mi_heap_stat_decrease(heap, malloc, bsize);
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { // huge page stats are accounted for in `_mi_page_retire`
mi_heap_stat_decrease(heap, normal[_mi_bin(bsize)], 1);
}
// huge page stat is accounted for in `_mi_page_retire`
#endif
if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
// local, and not full or aligned
mi_block_t* const block = (mi_block_t*)p;
mi_block_t* block = (mi_block_t*)(p);
if (mi_unlikely(mi_check_is_double_free(page,block))) return;
mi_check_padding(page, block);
#if (MI_DEBUG!=0)
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
page->used--;
@ -358,10 +481,10 @@ void mi_free(void* p) mi_attr_noexcept
bool _mi_free_delayed_block(mi_block_t* block) {
// get segment and page
const mi_segment_t* segment = _mi_ptr_segment(block);
const mi_segment_t* const segment = _mi_ptr_segment(block);
mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
mi_assert_internal(_mi_thread_id() == segment->thread_id);
mi_page_t* page = _mi_segment_page_of(segment, block);
mi_page_t* const page = _mi_segment_page_of(segment, block);
// Clear the no-delayed flag so delayed freeing is used again for this page.
// This must be done before collecting the free lists on this page -- otherwise
@ -381,11 +504,12 @@ bool _mi_free_delayed_block(mi_block_t* block) {
// Bytes available in a block
size_t mi_usable_size(const void* p) mi_attr_noexcept {
if (p==NULL) return 0;
const mi_segment_t* segment = _mi_ptr_segment(p);
const mi_page_t* page = _mi_segment_page_of(segment,p);
size_t size = mi_page_block_size(page);
const mi_segment_t* const segment = _mi_ptr_segment(p);
const mi_page_t* const page = _mi_segment_page_of(segment, p);
const mi_block_t* const block = (const mi_block_t*)p;
const size_t size = mi_page_usable_size_of(page, block);
if (mi_unlikely(mi_page_has_aligned(page))) {
ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)_mi_page_ptr_unalign(segment,page,p);
ptrdiff_t const adjust = (uint8_t*)p - (uint8_t*)_mi_page_ptr_unalign(segment,page,p);
mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
return (size - adjust);
}
@ -433,29 +557,29 @@ void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
mi_free(p);
}
extern inline mi_decl_allocator void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count,size,&total)) return NULL;
return mi_heap_zalloc(heap,total);
}
mi_decl_allocator void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
return mi_heap_calloc(mi_get_default_heap(),count,size);
}
// Uninitialized `calloc`
extern mi_decl_allocator void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_malloc(heap, total);
}
mi_decl_allocator void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
return mi_heap_mallocn(mi_get_default_heap(),count,size);
}
// Expand in place or fail
mi_decl_allocator void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
if (p == NULL) return NULL;
size_t size = mi_usable_size(p);
if (newsize > size) return NULL;
@ -481,11 +605,11 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
return newp;
}
mi_decl_allocator void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, false);
}
mi_decl_allocator void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_realloc(heap, p, total);
@ -493,41 +617,41 @@ mi_decl_allocator void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count,
// Reallocate but free `p` on errors
mi_decl_allocator void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
void* newp = mi_heap_realloc(heap, p, newsize);
if (newp==NULL && p!=NULL) mi_free(p);
return newp;
}
mi_decl_allocator void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, true);
}
mi_decl_allocator void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_rezalloc(heap, p, total);
}
mi_decl_allocator void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_realloc(mi_get_default_heap(),p,newsize);
}
mi_decl_allocator void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
return mi_heap_reallocn(mi_get_default_heap(),p,count,size);
}
// Reallocate but free `p` on errors
mi_decl_allocator void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_reallocf(mi_get_default_heap(),p,newsize);
}
mi_decl_allocator void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
return mi_heap_rezalloc(mi_get_default_heap(), p, newsize);
}
mi_decl_allocator void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
return mi_heap_recalloc(mi_get_default_heap(), p, count, size);
}
@ -538,7 +662,7 @@ mi_decl_allocator void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_
// ------------------------------------------------------
// `strdup` using mi_malloc
char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
if (s == NULL) return NULL;
size_t n = strlen(s);
char* t = (char*)mi_heap_malloc(heap,n+1);
@ -546,12 +670,12 @@ char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
return t;
}
char* mi_strdup(const char* s) mi_attr_noexcept {
mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
return mi_heap_strdup(mi_get_default_heap(), s);
}
// `strndup` using mi_malloc
char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
if (s == NULL) return NULL;
size_t m = strlen(s);
if (n > m) n = m;
@ -562,7 +686,7 @@ char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept
return t;
}
char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
return mi_heap_strndup(mi_get_default_heap(),s,n);
}
@ -573,7 +697,7 @@ char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
#define PATH_MAX MAX_PATH
#endif
#include <windows.h>
char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
// todo: use GetFullPathNameW to allow longer file names
char buf[PATH_MAX];
DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL);
@ -619,7 +743,7 @@ char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name)
}
#endif
char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
return mi_heap_realpath(mi_get_default_heap(),fname,resolved_name);
}
#endif
@ -684,19 +808,19 @@ static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) {
return p;
}
void* mi_new(size_t size) {
mi_decl_restrict void* mi_new(size_t size) {
void* p = mi_malloc(size);
if (mi_unlikely(p == NULL)) return mi_try_new(size,false);
return p;
}
void* mi_new_nothrow(size_t size) {
mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
void* p = mi_malloc(size);
if (mi_unlikely(p == NULL)) return mi_try_new(size, true);
return p;
}
void* mi_new_aligned(size_t size, size_t alignment) {
mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
void* p;
do {
p = mi_malloc_aligned(size, alignment);
@ -705,7 +829,7 @@ void* mi_new_aligned(size_t size, size_t alignment) {
return p;
}
void* mi_new_aligned_nothrow(size_t size, size_t alignment) {
mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept {
void* p;
do {
p = mi_malloc_aligned(size, alignment);
@ -714,7 +838,7 @@ void* mi_new_aligned_nothrow(size_t size, size_t alignment) {
return p;
}
void* mi_new_n(size_t count, size_t size) {
mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
size_t total;
if (mi_unlikely(mi_count_size_overflow(count, size, &total))) {
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc

View File

@ -448,7 +448,7 @@ int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msec
_mi_warning_message("failed to reserve %zu gb huge pages\n", pages);
return ENOMEM;
}
_mi_verbose_message("reserved %zu gb huge pages on numa node %i (of the %zu gb requested)\n", pages_reserved, numa_node, pages);
_mi_verbose_message("numa node %i: reserved %zu gb huge pages (of the %zu gb requested)\n", numa_node, pages_reserved, pages);
size_t bcount = mi_block_count_of_size(hsize);
size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);

View File

@ -138,6 +138,9 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
// (if abandoning, after this there are no more thread-delayed references into the pages.)
_mi_heap_delayed_free(heap);
// collect retired pages
_mi_heap_collect_retired(heap, collect >= MI_FORCE);
// collect all pages owned by this thread
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
mi_assert_internal( collect != MI_ABANDON || mi_atomic_read_ptr(mi_block_t,&heap->thread_delayed_free) == NULL );
@ -188,16 +191,19 @@ mi_heap_t* mi_heap_get_backing(void) {
mi_heap_t* mi_heap_new(void) {
mi_heap_t* bheap = mi_heap_get_backing();
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t);
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
if (heap==NULL) return NULL;
memcpy(heap, &_mi_heap_empty, sizeof(mi_heap_t));
heap->tld = bheap->tld;
heap->thread_id = _mi_thread_id();
_mi_random_split(&bheap->random, &heap->random);
heap->cookie = _mi_heap_random_next(heap) | 1;
heap->key[0] = _mi_heap_random_next(heap);
heap->key[1] = _mi_heap_random_next(heap);
heap->cookie = _mi_heap_random_next(heap) | 1;
heap->keys[0] = _mi_heap_random_next(heap);
heap->keys[1] = _mi_heap_random_next(heap);
heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe
// push on the thread local heaps list
heap->next = heap->tld->heaps;
heap->tld->heaps = heap;
return heap;
}
@ -220,6 +226,7 @@ static void mi_heap_reset_pages(mi_heap_t* heap) {
// called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources.
static void mi_heap_free(mi_heap_t* heap) {
mi_assert(heap != NULL);
mi_assert_internal(mi_heap_is_initialized(heap));
if (mi_heap_is_backing(heap)) return; // dont free the backing heap
@ -227,6 +234,22 @@ static void mi_heap_free(mi_heap_t* heap) {
if (mi_heap_is_default(heap)) {
_mi_heap_set_default_direct(heap->tld->heap_backing);
}
// remove ourselves from the thread local heaps list
// linear search but we expect the number of heaps to be relatively small
mi_heap_t* prev = NULL;
mi_heap_t* curr = heap->tld->heaps;
while (curr != heap && curr != NULL) {
prev = curr;
curr = curr->next;
}
mi_assert_internal(curr == heap);
if (curr == heap) {
if (prev != NULL) { prev->next = heap->next; }
else { heap->tld->heaps = heap->next; }
}
mi_assert_internal(heap->tld->heaps != NULL);
// and free the used memory
mi_free(heap);
}
@ -283,6 +306,7 @@ void _mi_heap_destroy_pages(mi_heap_t* heap) {
}
void mi_heap_destroy(mi_heap_t* heap) {
mi_assert(heap != NULL);
mi_assert(mi_heap_is_initialized(heap));
mi_assert(heap->no_reclaim);
mi_assert_expensive(mi_heap_is_valid(heap));
@ -309,38 +333,37 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
mi_assert_internal(heap!=NULL);
if (from==NULL || from->page_count == 0) return;
// unfull all full pages in the `from` heap
mi_page_t* page = from->pages[MI_BIN_FULL].first;
while (page != NULL) {
mi_page_t* next = page->next;
_mi_page_unfull(page);
page = next;
}
mi_assert_internal(from->pages[MI_BIN_FULL].first == NULL);
// free outstanding thread delayed free blocks
// reduce the size of the delayed frees
_mi_heap_delayed_free(from);
// transfer all pages by appending the queues; this will set
// a new heap field which is ok as all pages are unfull'd and thus
// other threads won't access this field anymore (see `mi_free_block_mt`)
for (size_t i = 0; i < MI_BIN_FULL; i++) {
// transfer all pages by appending the queues; this will set a new heap field
// so threads may do delayed frees in either heap for a while.
// note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state
// so after this only the new heap will get delayed frees
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
mi_page_queue_t* pq = &heap->pages[i];
mi_page_queue_t* append = &from->pages[i];
size_t pcount = _mi_page_queue_append(heap, pq, append);
heap->page_count += pcount;
from->page_count -= pcount;
}
mi_assert_internal(from->thread_delayed_free == NULL);
mi_assert_internal(from->page_count == 0);
// and do outstanding delayed frees in the `from` heap
// note: be careful here as the `heap` field in all those pages no longer point to `from`,
// turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
// the regular `_mi_free_delayed_block` which is safe.
_mi_heap_delayed_free(from);
mi_assert_internal(from->thread_delayed_free == NULL);
// and reset the `from` heap
mi_heap_reset_pages(from);
mi_heap_reset_pages(from);
}
// Safe delete a heap without freeing any still allocated blocks in that heap.
void mi_heap_delete(mi_heap_t* heap)
{
mi_assert(heap != NULL);
mi_assert(mi_heap_is_initialized(heap));
mi_assert_expensive(mi_heap_is_valid(heap));
if (!mi_heap_is_initialized(heap)) return;

View File

@ -34,8 +34,14 @@ const mi_page_t _mi_page_empty = {
};
#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
#define MI_SMALL_PAGES_EMPTY \
{ MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
#if defined(MI_PADDING) && (MI_INTPTR_SIZE >= 8)
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
#elif defined(MI_PADDING)
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
#else
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() }
#endif
// Empty page queues for every bin
@ -106,6 +112,8 @@ const mi_heap_t _mi_heap_empty = {
{ 0, 0 }, // keys
{ {0}, {0}, 0 },
0, // page count
MI_BIN_FULL, 0, // page retired min/max
NULL, // next
false
};
@ -115,7 +123,7 @@ const mi_heap_t _mi_heap_empty = {
static const mi_tld_t tld_empty = {
0,
false,
NULL,
NULL, NULL,
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, 0, NULL, tld_empty_stats, tld_empty_os }, // segments
{ 0, tld_empty_stats }, // os
{ MI_STATS_NULL } // stats
@ -129,30 +137,28 @@ mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty;
#define tld_main_stats ((mi_stats_t*)((uint8_t*)&tld_main + offsetof(mi_tld_t,stats)))
#define tld_main_os ((mi_os_tld_t*)((uint8_t*)&tld_main + offsetof(mi_tld_t,os)))
extern mi_heap_t _mi_heap_main;
static mi_tld_t tld_main = {
0, false,
&_mi_heap_main,
&_mi_heap_main, & _mi_heap_main,
{ MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, 0, NULL, tld_main_stats, tld_main_os }, // segments
{ 0, tld_main_stats }, // os
{ MI_STATS_NULL } // stats
};
#if MI_INTPTR_SIZE==8
#define MI_INIT_COOKIE (0xCDCDCDCDCDCDCDCDUL)
#else
#define MI_INIT_COOKIE (0xCDCDCDCDUL)
#endif
mi_heap_t _mi_heap_main = {
&tld_main,
MI_SMALL_PAGES_EMPTY,
MI_PAGE_QUEUES_EMPTY,
ATOMIC_VAR_INIT(NULL),
0, // thread id
MI_INIT_COOKIE, // initial cookie
{ MI_INIT_COOKIE, MI_INIT_COOKIE }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)
{ {0}, {0}, 0 }, // random
0, // initial cookie
{ 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)
{ {0x846ca68b}, {0}, 0 }, // random
0, // page count
MI_BIN_FULL, 0, // page retired min/max
NULL, // next heap
false // can reclaim
};
@ -161,6 +167,22 @@ bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`.
mi_stats_t _mi_stats_main = { MI_STATS_NULL };
static void mi_heap_main_init(void) {
if (_mi_heap_main.cookie == 0) {
_mi_heap_main.thread_id = _mi_thread_id();
_mi_heap_main.cookie = _os_random_weak((uintptr_t)&mi_heap_main_init);
_mi_random_init(&_mi_heap_main.random);
_mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main);
_mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main);
}
}
mi_heap_t* _mi_heap_main_get(void) {
mi_heap_main_init();
return &_mi_heap_main;
}
/* -----------------------------------------------------------
Initialization and freeing of the thread local heaps
----------------------------------------------------------- */
@ -173,14 +195,16 @@ typedef struct mi_thread_data_s {
// Initialize the thread local default heap, called from `mi_thread_init`
static bool _mi_heap_init(void) {
if (mi_heap_is_initialized(_mi_heap_default)) return true;
if (mi_heap_is_initialized(mi_get_default_heap())) return true;
if (_mi_is_main_thread()) {
// mi_assert_internal(_mi_heap_main.thread_id != 0); // can happen on freeBSD where alloc is called before any initialization
// the main heap is statically allocated
mi_heap_main_init();
_mi_heap_set_default_direct(&_mi_heap_main);
mi_assert_internal(_mi_heap_default->tld->heap_backing == mi_get_default_heap());
//mi_assert_internal(_mi_heap_default->tld->heap_backing == mi_get_default_heap());
}
else {
// use `_mi_os_alloc` to allocate directly from the OS
// use `_mi_os_alloc` to allocate directly from the OS
mi_thread_data_t* td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t),&_mi_stats_main); // Todo: more efficient allocation?
if (td == NULL) {
_mi_error_message(ENOMEM, "failed to allocate thread local heap memory\n");
@ -193,11 +217,12 @@ static bool _mi_heap_init(void) {
memcpy(heap, &_mi_heap_empty, sizeof(*heap));
heap->thread_id = _mi_thread_id();
_mi_random_init(&heap->random);
heap->cookie = _mi_heap_random_next(heap) | 1;
heap->key[0] = _mi_heap_random_next(heap);
heap->key[1] = _mi_heap_random_next(heap);
heap->tld = tld;
heap->cookie = _mi_heap_random_next(heap) | 1;
heap->keys[0] = _mi_heap_random_next(heap);
heap->keys[1] = _mi_heap_random_next(heap);
heap->tld = tld;
tld->heap_backing = heap;
tld->heaps = heap;
tld->segments.stats = &tld->stats;
tld->segments.os = &tld->os;
tld->os.stats = &tld->stats;
@ -213,12 +238,24 @@ static bool _mi_heap_done(mi_heap_t* heap) {
// reset default heap
_mi_heap_set_default_direct(_mi_is_main_thread() ? &_mi_heap_main : (mi_heap_t*)&_mi_heap_empty);
// todo: delete all non-backing heaps?
// switch to backing heap and free it
// switch to backing heap
heap = heap->tld->heap_backing;
if (!mi_heap_is_initialized(heap)) return false;
// delete all non-backing heaps in this thread
mi_heap_t* curr = heap->tld->heaps;
while (curr != NULL) {
mi_heap_t* next = curr->next; // save `next` as `curr` will be freed
if (curr != heap) {
mi_assert_internal(!mi_heap_is_backing(curr));
mi_heap_delete(curr);
}
curr = next;
}
mi_assert_internal(heap->tld->heaps == heap && heap->next == NULL);
mi_assert_internal(mi_heap_is_backing(heap));
// collect if not the main thread
if (heap != &_mi_heap_main) {
_mi_heap_collect_abandon(heap);
@ -232,7 +269,9 @@ static bool _mi_heap_done(mi_heap_t* heap) {
mi_assert_internal(heap->tld->segments.count == 0);
_mi_os_free(heap, sizeof(mi_thread_data_t), &_mi_stats_main);
}
#if (MI_DEBUG > 0)
#if 0
// never free the main thread even in debug mode; if a dll is linked statically with mimalloc,
// there may still be delete/free calls after the mi_fls_done is called. Issue #207
else {
_mi_heap_destroy_pages(heap);
mi_assert_internal(heap->tld->heap_backing == &_mi_heap_main);
@ -273,14 +312,15 @@ static void _mi_thread_done(mi_heap_t* default_heap);
// use thread local storage keys to detect thread ending
#include <windows.h>
#include <fibersapi.h>
static DWORD mi_fls_key;
static DWORD mi_fls_key = (DWORD)(-1);
static void NTAPI mi_fls_done(PVOID value) {
if (value!=NULL) _mi_thread_done((mi_heap_t*)value);
}
#elif defined(MI_USE_PTHREADS)
// use pthread locol storage keys to detect thread ending
// use pthread local storage keys to detect thread ending
// (and used with MI_TLS_PTHREADS for the default heap)
#include <pthread.h>
static pthread_key_t mi_pthread_key;
pthread_key_t _mi_heap_default_key = (pthread_key_t)(-1);
static void mi_pthread_done(void* value) {
if (value!=NULL) _mi_thread_done((mi_heap_t*)value);
}
@ -300,8 +340,10 @@ static void mi_process_setup_auto_thread_done(void) {
#elif defined(_WIN32) && !defined(MI_SHARED_LIB)
mi_fls_key = FlsAlloc(&mi_fls_done);
#elif defined(MI_USE_PTHREADS)
pthread_key_create(&mi_pthread_key, &mi_pthread_done);
mi_assert_internal(_mi_heap_default_key == (pthread_key_t)(-1));
pthread_key_create(&_mi_heap_default_key, &mi_pthread_done);
#endif
_mi_heap_set_default_direct(&_mi_heap_main);
}
@ -343,21 +385,31 @@ static void _mi_thread_done(mi_heap_t* heap) {
void _mi_heap_set_default_direct(mi_heap_t* heap) {
mi_assert_internal(heap != NULL);
#if defined(MI_TLS_SLOT)
mi_tls_slot_set(MI_TLS_SLOT,heap);
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
*mi_tls_pthread_heap_slot() = heap;
#elif defined(MI_TLS_PTHREAD)
// we use _mi_heap_default_key
#else
_mi_heap_default = heap;
#endif
// ensure the default heap is passed to `_mi_thread_done`
// setting to a non-NULL value also ensures `mi_thread_done` is called.
#if defined(_WIN32) && defined(MI_SHARED_LIB)
// nothing to do as it is done in DllMain
#elif defined(_WIN32) && !defined(MI_SHARED_LIB)
mi_assert_internal(mi_fls_key != 0);
FlsSetValue(mi_fls_key, heap);
#elif defined(MI_USE_PTHREADS)
pthread_setspecific(mi_pthread_key, heap);
if (_mi_heap_default_key != (pthread_key_t)(-1)) { // can happen during recursive invocation on freeBSD
pthread_setspecific(_mi_heap_default_key, heap);
}
#endif
}
// --------------------------------------------------------
// Run functions on process init/done, and thread init/done
// --------------------------------------------------------
@ -409,11 +461,16 @@ static void mi_allocator_done() {
// Called once by the process loader
static void mi_process_load(void) {
mi_heap_main_init();
#if defined(MI_TLS_RECURSE_GUARD)
volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true;
UNUSED(dummy);
#endif
os_preloading = false;
atexit(&mi_process_done);
_mi_options_init();
mi_process_init();
//mi_stats_reset();
//mi_stats_reset();-
if (mi_redirected) _mi_verbose_message("malloc is redirected.\n");
// show message from the redirector (if present)
@ -428,22 +485,12 @@ static void mi_process_load(void) {
void mi_process_init(void) mi_attr_noexcept {
// ensure we are called once
if (_mi_process_is_initialized) return;
// access _mi_heap_default before setting _mi_process_is_initialized to ensure
// that the TLS slot is allocated without getting into recursion on macOS
// when using dynamic linking with interpose.
mi_get_default_heap();
_mi_process_is_initialized = true;
_mi_heap_main.thread_id = _mi_thread_id();
_mi_verbose_message("process init: 0x%zx\n", _mi_heap_main.thread_id);
_mi_random_init(&_mi_heap_main.random);
#ifndef __APPLE__ // TODO: fix this? cannot update cookie if allocation already happened..
_mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main);
_mi_heap_main.key[0] = _mi_heap_random_next(&_mi_heap_main);
_mi_heap_main.key[1] = _mi_heap_random_next(&_mi_heap_main);
#endif
mi_process_setup_auto_thread_done();
_mi_verbose_message("process init: 0x%zx\n", _mi_thread_id());
_mi_os_init();
mi_heap_main_init();
#if (MI_DEBUG)
_mi_verbose_message("debug level : %d\n", MI_DEBUG);
#endif
@ -466,6 +513,10 @@ static void mi_process_done(void) {
if (process_done) return;
process_done = true;
#if defined(_WIN32) && !defined(MI_SHARED_LIB)
FlsSetValue(mi_fls_key, NULL); // don't call main-thread callback
FlsFree(mi_fls_key); // call thread-done on all threads to prevent dangling callback pointer if statically linked with a DLL; Issue #208
#endif
#ifndef NDEBUG
mi_collect(true);
#endif
@ -473,7 +524,7 @@ static void mi_process_done(void) {
mi_option_is_enabled(mi_option_verbose)) {
mi_stats_print(NULL);
}
mi_allocator_done();
mi_allocator_done();
_mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id);
os_preloading = true; // don't call the C runtime anymore
}

View File

@ -70,7 +70,11 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free
{ 0, UNINIT, MI_OPTION(abandoned_page_reset) },// reset free page memory when a thread terminates
{ 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit)
#if defined(__NetBSD__)
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
#else
{ 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
#endif
{ 1, UNINIT, MI_OPTION(allow_decommit) }, // decommit pages when not eager committed
{ 100, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
{ 1000, UNINIT, MI_OPTION(arena_reset_delay) }, // reset delay in milli-seconds
@ -87,7 +91,7 @@ void _mi_options_init(void) {
mi_add_stderr_output(); // now it safe to use stderr for output
for(int i = 0; i < _mi_option_last; i++ ) {
mi_option_t option = (mi_option_t)i;
mi_option_get(option); // initialize
long l = mi_option_get(option); UNUSED(l); // initialize
if (option != mi_option_verbose) {
mi_option_desc_t* desc = &options[option];
_mi_verbose_message("option '%s': %ld\n", desc->name, desc->value);
@ -241,16 +245,30 @@ static volatile _Atomic(uintptr_t) error_count; // = 0; // when MAX_ERROR_COUNT
// inside the C runtime causes another message.
static mi_decl_thread bool recurse = false;
static bool mi_recurse_enter(void) {
#ifdef MI_TLS_RECURSE_GUARD
if (_mi_preloading()) return true;
#endif
if (recurse) return false;
recurse = true;
return true;
}
static void mi_recurse_exit(void) {
#ifdef MI_TLS_RECURSE_GUARD
if (_mi_preloading()) return;
#endif
recurse = false;
}
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message) {
if (recurse) return;
if (!mi_recurse_enter()) return;
if (out==NULL || (FILE*)out==stdout || (FILE*)out==stderr) { // TODO: use mi_out_stderr for stderr?
out = mi_out_get_default(&arg);
}
recurse = true;
if (prefix != NULL) out(prefix,arg);
out(message,arg);
recurse = false;
return;
mi_recurse_exit();
}
// Define our own limited `fprintf` that avoids memory allocation.
@ -258,14 +276,12 @@ void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* me
static void mi_vfprintf( mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args ) {
char buf[512];
if (fmt==NULL) return;
if (recurse) return;
recurse = true;
if (!mi_recurse_enter()) return;
vsnprintf(buf,sizeof(buf)-1,fmt,args);
recurse = false;
mi_recurse_exit();
_mi_fputs(out,arg,prefix,buf);
}
void _mi_fprintf( mi_output_fun* out, void* arg, const char* fmt, ... ) {
va_list args;
va_start(args,fmt);
@ -292,7 +308,7 @@ void _mi_verbose_message(const char* fmt, ...) {
static void mi_show_error_message(const char* fmt, va_list args) {
if (!mi_option_is_enabled(mi_option_show_errors) && !mi_option_is_enabled(mi_option_verbose)) return;
if (mi_atomic_increment(&error_count) > mi_max_error_count) return;
mi_vfprintf(NULL, NULL, "mimalloc: error: ", fmt, args);
mi_vfprintf(NULL, NULL, "mimalloc: error: ", fmt, args);
}
void _mi_warning_message(const char* fmt, ...) {
@ -321,6 +337,14 @@ static volatile _Atomic(void*) mi_error_arg; // = NULL
static void mi_error_default(int err) {
UNUSED(err);
#if (MI_DEBUG>0)
if (err==EFAULT) {
#ifdef _MSC_VER
__debugbreak();
#endif
abort();
}
#endif
#if (MI_SECURE>0)
if (err==EFAULT) { // abort on serious errors in secure mode (corrupted meta-data)
abort();

View File

@ -188,7 +188,7 @@ static bool mi_os_mem_free(void* addr, size_t size, bool was_committed, mi_stats
if (was_committed) _mi_stat_decrease(&stats->committed, size);
_mi_stat_decrease(&stats->reserved, size);
if (err) {
#pragma warning(suppress:4996)
#pragma warning(suppress:4996)
_mi_warning_message("munmap failed: %s, addr 0x%8li, size %lu\n", strerror(errno), (size_t)addr, size);
return false;
}
@ -208,15 +208,20 @@ static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment
// on 64-bit systems, try to use the virtual address area after 4TiB for 4MiB aligned allocations
void* hint;
if (addr == NULL && (hint = mi_os_get_aligned_hint(try_alignment,size)) != NULL) {
return VirtualAlloc(hint, size, flags, PAGE_READWRITE);
void* p = VirtualAlloc(hint, size, flags, PAGE_READWRITE);
if (p != NULL) return p;
DWORD err = GetLastError();
if (err != ERROR_INVALID_ADDRESS) { // if linked with multiple instances, we may have tried to allocate at an already allocated area
return NULL;
}
}
#endif
#if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS)
// on modern Windows try use VirtualAlloc2 for aligned allocation
if (try_alignment > 0 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) {
MEM_ADDRESS_REQUIREMENTS reqs = { 0 };
MEM_ADDRESS_REQUIREMENTS reqs = { 0, 0, 0 };
reqs.Alignment = try_alignment;
MEM_EXTENDED_PARAMETER param = { 0 };
MEM_EXTENDED_PARAMETER param = { {0, 0}, {0} };
param.Type = MemExtendedParameterAddressRequirements;
param.Pointer = &reqs;
return (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, &param, 1);
@ -284,6 +289,7 @@ static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int pr
}
#else
UNUSED(try_alignment);
UNUSED(mi_os_get_aligned_hint);
#endif
if (p==NULL) {
p = mmap(addr,size,protect_flags,flags,fd,0);
@ -826,7 +832,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
mi_win_enable_large_os_pages();
#if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS)
MEM_EXTENDED_PARAMETER params[3] = { {0,0},{0,0},{0,0} };
MEM_EXTENDED_PARAMETER params[3] = { {{0,0},{0}},{{0,0},{0}},{{0,0},{0}} };
// on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages
static bool mi_huge_pages_available = true;
if (pNtAllocateVirtualMemoryEx != NULL && mi_huge_pages_available) {
@ -850,7 +856,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
else {
// fall back to regular large pages
mi_huge_pages_available = false; // don't try further huge pages
_mi_warning_message("unable to allocate using huge (1GiB) pages, trying large (2MiB) pages instead (status 0x%lx)\n", err);
_mi_warning_message("unable to allocate using huge (1gb) pages, trying large (2mb) pages instead (status 0x%lx)\n", err);
}
}
// on modern Windows try use VirtualAlloc2 for numa aware large OS page allocation
@ -891,7 +897,7 @@ static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
// see: <https://lkml.org/lkml/2017/2/9/875>
long err = mi_os_mbind(p, size, MPOL_PREFERRED, &numa_mask, 8*MI_INTPTR_SIZE, 0);
if (err != 0) {
_mi_warning_message("failed to bind huge (1GiB) pages to NUMA node %d: %s\n", numa_node, strerror(errno));
_mi_warning_message("failed to bind huge (1gb) pages to numa node %d: %s\n", numa_node, strerror(errno));
}
}
return p;
@ -1075,4 +1081,3 @@ int _mi_os_numa_node_get(mi_os_tld_t* tld) {
if (numa_node >= numa_count) { numa_node = numa_node % numa_count; }
return (int)numa_node;
}

View File

@ -332,6 +332,7 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
mi_page_set_in_full(page, mi_page_queue_is_full(to));
}
// Only called from `mi_heap_absorb`.
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) {
mi_assert_internal(mi_heap_contains_queue(heap,pq));
mi_assert_internal(pq->block_size == append->block_size);
@ -341,7 +342,13 @@ size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue
// set append pages to new heap and count
size_t count = 0;
for (mi_page_t* page = append->first; page != NULL; page = page->next) {
mi_page_set_heap(page,heap);
// inline `mi_page_set_heap` to avoid wrong assertion during absorption;
// in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive.
mi_atomic_write(&page->xheap, (uintptr_t)heap);
// set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a
// side effect that it spins until any DELAYED_FREEING is finished. This ensures
// that after appending only the new heap will be used for delayed free operations.
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false);
count++;
}

View File

@ -7,7 +7,7 @@ terms of the MIT license. A copy of the license can be found in the file
/* -----------------------------------------------------------
The core of the allocator. Every segment contains
pages of a certain block size. The main function
pages of a {certain block size. The main function
exported is `mi_malloc_generic`.
----------------------------------------------------------- */
@ -105,7 +105,7 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
bool _mi_page_is_valid(mi_page_t* page) {
mi_assert_internal(mi_page_is_valid_init(page));
#if MI_SECURE
mi_assert_internal(page->key != 0);
mi_assert_internal(page->keys[0] != 0);
#endif
if (mi_page_heap(page)!=NULL) {
mi_segment_t* segment = _mi_page_segment(page);
@ -281,7 +281,7 @@ void _mi_heap_delayed_free(mi_heap_t* heap) {
// and free them all
while(block != NULL) {
mi_block_t* next = mi_block_nextx(heap,block, heap->key[0], heap->key[1]);
mi_block_t* next = mi_block_nextx(heap,block, heap->keys);
// use internal free instead of regular one to keep stats etc correct
if (!_mi_free_delayed_block(block)) {
// we might already start delayed freeing while another thread has not yet
@ -289,7 +289,7 @@ void _mi_heap_delayed_free(mi_heap_t* heap) {
mi_block_t* dfree;
do {
dfree = mi_atomic_read_ptr_relaxed(mi_block_t,&heap->thread_delayed_free);
mi_block_set_nextx(heap, block, dfree, heap->key[0], heap->key[1]);
mi_block_set_nextx(heap, block, dfree, heap->keys);
} while (!mi_atomic_cas_ptr_weak(mi_block_t,&heap->thread_delayed_free, block, dfree));
}
block = next;
@ -348,7 +348,7 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
#if MI_DEBUG>1
// check there are no references left..
for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->key[0], pheap->key[1])) {
for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->keys)) {
mi_assert_internal(_mi_ptr_page(block) != page);
}
#endif
@ -393,7 +393,8 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
_mi_segment_page_free(page, force, segments_tld);
}
#define MI_MAX_RETIRE_SIZE (4*MI_SMALL_SIZE_MAX)
#define MI_MAX_RETIRE_SIZE (MI_MEDIUM_OBJ_SIZE_MAX)
#define MI_RETIRE_CYCLES (16)
// Retire a page with no more used blocks
// Important to not retire too quickly though as new
@ -418,7 +419,13 @@ void _mi_page_retire(mi_page_t* page) {
if (mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page))) {
if (pq->last==page && pq->first==page) { // the only page in the queue?
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
page->retire_expire = 16;
page->retire_expire = MI_RETIRE_CYCLES;
mi_heap_t* heap = mi_page_heap(page);
mi_assert_internal(pq >= heap->pages);
const size_t index = pq - heap->pages;
mi_assert_internal(index < MI_BIN_FULL && index < MI_BIN_HUGE);
if (index < heap->page_retired_min) heap->page_retired_min = index;
if (index > heap->page_retired_max) heap->page_retired_max = index;
mi_assert_internal(mi_page_all_free(page));
return; // dont't free after all
}
@ -427,22 +434,32 @@ void _mi_page_retire(mi_page_t* page) {
}
// free retired pages: we don't need to look at the entire queues
// since we only retire pages that are the last one in a queue.
// since we only retire pages that are at the head position in a queue.
void _mi_heap_collect_retired(mi_heap_t* heap, bool force) {
for(mi_page_queue_t* pq = heap->pages; pq->block_size <= MI_MAX_RETIRE_SIZE; pq++) {
mi_page_t* page = pq->first;
size_t min = MI_BIN_FULL;
size_t max = 0;
for(size_t bin = heap->page_retired_min; bin <= heap->page_retired_max; bin++) {
mi_page_queue_t* pq = &heap->pages[bin];
mi_page_t* page = pq->first;
if (page != NULL && page->retire_expire != 0) {
if (mi_page_all_free(page)) {
page->retire_expire--;
if (force || page->retire_expire == 0) {
_mi_page_free(pq->first, pq, force);
}
else {
// keep retired, update min/max
if (bin < min) min = bin;
if (bin > max) max = bin;
}
}
else {
page->retire_expire = 0;
}
}
}
heap->page_retired_min = min;
heap->page_retired_max = max;
}
@ -618,8 +635,8 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert_internal(page_size / block_size < (1L<<16));
page->reserved = (uint16_t)(page_size / block_size);
#ifdef MI_ENCODE_FREELIST
page->key[0] = _mi_heap_random_next(heap);
page->key[1] = _mi_heap_random_next(heap);
page->keys[0] = _mi_heap_random_next(heap);
page->keys[1] = _mi_heap_random_next(heap);
#endif
page->is_zero = page->is_zero_init;
@ -634,8 +651,8 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert_internal(page->retire_expire == 0);
mi_assert_internal(!mi_page_has_aligned(page));
#if (MI_ENCODE_FREELIST)
mi_assert_internal(page->key[0] != 0);
mi_assert_internal(page->key[1] != 0);
mi_assert_internal(page->keys[0] != 0);
mi_assert_internal(page->keys[1] != 0);
#endif
mi_assert_expensive(mi_page_is_valid_init(page));
@ -760,13 +777,13 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex
// just that page, we always treat them as abandoned and any thread
// that frees the block can free the whole page and segment directly.
static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size) {
size_t block_size = _mi_wsize_from_size(size) * sizeof(uintptr_t);
size_t block_size = _mi_os_good_alloc_size(size);
mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE);
bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX);
mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size);
if (page != NULL) {
const size_t bsize = mi_page_block_size(page);
const size_t bsize = mi_page_usable_block_size(page);
mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(bsize >= size);
@ -794,6 +811,7 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size) {
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
{
mi_assert_internal(heap != NULL);
@ -813,9 +831,10 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
// huge allocation?
mi_page_t* page;
if (mi_unlikely(size > MI_MEDIUM_OBJ_SIZE_MAX)) {
if (mi_unlikely(size > PTRDIFF_MAX)) {
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu b requested)\n", size);
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
if (mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE))) {
if (mi_unlikely(req_size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu b requested)\n", req_size);
return NULL;
}
else {
@ -824,6 +843,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
}
else {
// otherwise find a page with free blocks in our size segregated queues
mi_assert_internal(size >= MI_PADDING_SIZE);
page = mi_find_free_page(heap,size);
}
if (mi_unlikely(page == NULL)) { // out of memory

View File

@ -11,7 +11,7 @@ terms of the MIT license. A copy of the license can be found in the file
/* ----------------------------------------------------------------------------
We use our own PRNG to keep predictable performance of random number generation
and to avoid implementations that use a lock. We only use the OS provided
and to avoid implementations that use a lock. We only use the OS provided
random source to initialize the initial seeds. Since we do not need ultimate
performance but we do rely on the security (for secret cookies in secure mode)
we use a cryptographically secure generator (chacha20).
@ -21,11 +21,11 @@ we use a cryptographically secure generator (chacha20).
/* ----------------------------------------------------------------------------
Chacha20 implementation as the original algorithm with a 64-bit nonce
Chacha20 implementation as the original algorithm with a 64-bit nonce
and counter: https://en.wikipedia.org/wiki/Salsa20
The input matrix has sixteen 32-bit values:
Position 0 to 3: constant key
Position 4 to 11: the key
Position 4 to 11: the key
Position 12 to 13: the counter.
Position 14 to 15: the nonce.
@ -44,8 +44,8 @@ static inline void qround(uint32_t x[16], size_t a, size_t b, size_t c, size_t d
x[c] += x[d]; x[b] = rotl(x[b] ^ x[c], 7);
}
static void chacha_block(mi_random_ctx_t* ctx)
{
static void chacha_block(mi_random_ctx_t* ctx)
{
// scramble into `x`
uint32_t x[16];
for (size_t i = 0; i < 16; i++) {
@ -72,8 +72,8 @@ static void chacha_block(mi_random_ctx_t* ctx)
ctx->input[12] += 1;
if (ctx->input[12] == 0) {
ctx->input[13] += 1;
if (ctx->input[13] == 0) { // and keep increasing into the nonce
ctx->input[14] += 1;
if (ctx->input[13] == 0) { // and keep increasing into the nonce
ctx->input[14] += 1;
}
}
}
@ -83,7 +83,7 @@ static uint32_t chacha_next32(mi_random_ctx_t* ctx) {
chacha_block(ctx);
ctx->output_available = 16; // (assign again to suppress static analysis warning)
}
const uint32_t x = ctx->output[16 - ctx->output_available];
const uint32_t x = ctx->output[16 - ctx->output_available];
ctx->output[16 - ctx->output_available] = 0; // reset once the data is handed out
ctx->output_available--;
return x;
@ -94,9 +94,9 @@ static inline uint32_t read32(const uint8_t* p, size_t idx32) {
return ((uint32_t)p[i+0] | (uint32_t)p[i+1] << 8 | (uint32_t)p[i+2] << 16 | (uint32_t)p[i+3] << 24);
}
static void chacha_init(mi_random_ctx_t* ctx, const uint8_t key[32], uint64_t nonce)
static void chacha_init(mi_random_ctx_t* ctx, const uint8_t key[32], uint64_t nonce)
{
// since we only use chacha for randomness (and not encryption) we
// since we only use chacha for randomness (and not encryption) we
// do not _need_ to read 32-bit values as little endian but we do anyways
// just for being compatible :-)
memset(ctx, 0, sizeof(*ctx));
@ -110,7 +110,7 @@ static void chacha_init(mi_random_ctx_t* ctx, const uint8_t key[32], uint64_t no
ctx->input[12] = 0;
ctx->input[13] = 0;
ctx->input[14] = (uint32_t)nonce;
ctx->input[15] = (uint32_t)(nonce >> 32);
ctx->input[15] = (uint32_t)(nonce >> 32);
}
static void chacha_split(mi_random_ctx_t* ctx, uint64_t nonce, mi_random_ctx_t* ctx_new) {
@ -184,7 +184,7 @@ static bool os_random_buf(void* buf, size_t buf_len) {
arc4random_buf(buf, buf_len);
return true;
}
#elif defined(__linux__)
#elif defined(__linux__)
#include <sys/syscall.h>
#include <unistd.h>
#include <sys/types.h>
@ -241,8 +241,8 @@ static bool os_random_buf(void* buf, size_t buf_len) {
#include <time.h>
#endif
static uintptr_t os_random_weak(uintptr_t extra_seed) {
uintptr_t x = (uintptr_t)&os_random_weak ^ extra_seed; // ASLR makes the address random
uintptr_t _os_random_weak(uintptr_t extra_seed) {
uintptr_t x = (uintptr_t)&_os_random_weak ^ extra_seed; // ASLR makes the address random
#if defined(_WIN32)
LARGE_INTEGER pcount;
QueryPerformanceCounter(&pcount);
@ -267,10 +267,10 @@ static uintptr_t os_random_weak(uintptr_t extra_seed) {
void _mi_random_init(mi_random_ctx_t* ctx) {
uint8_t key[32];
if (!os_random_buf(key, sizeof(key))) {
// if we fail to get random data from the OS, we fall back to a
// if we fail to get random data from the OS, we fall back to a
// weak random source based on the current time
_mi_warning_message("unable to use secure randomness\n");
uintptr_t x = os_random_weak(0);
uintptr_t x = _os_random_weak(0);
for (size_t i = 0; i < 8; i++) { // key is eight 32-bit words.
x = _mi_random_shuffle(x);
((uint32_t*)key)[i] = (uint32_t)x;
@ -280,7 +280,7 @@ void _mi_random_init(mi_random_ctx_t* ctx) {
}
/* --------------------------------------------------------
test vectors from <https://tools.ietf.org/html/rfc8439>
test vectors from <https://tools.ietf.org/html/rfc8439>
----------------------------------------------------------- */
/*
static bool array_equals(uint32_t* x, uint32_t* y, size_t n) {

View File

@ -861,12 +861,12 @@ Note: the current implementation is one possible design;
another way might be to keep track of abandoned segments
in the regions. This would have the advantage of keeping
all concurrent code in one place and not needing to deal
with ABA issues. The drawback is that it is unclear how to
scan abandoned segments efficiently in that case as they
with ABA issues. The drawback is that it is unclear how to
scan abandoned segments efficiently in that case as they
would be spread among all other segments in the regions.
----------------------------------------------------------- */
// Use the bottom 20-bits (on 64-bit) of the aligned segment pointers
// Use the bottom 20-bits (on 64-bit) of the aligned segment pointers
// to put in a tag that increments on update to avoid the A-B-A problem.
#define MI_TAGGED_MASK MI_SEGMENT_MASK
typedef uintptr_t mi_tagged_segment_t;
@ -882,7 +882,7 @@ static mi_tagged_segment_t mi_tagged_segment(mi_segment_t* segment, mi_tagged_se
}
// This is a list of visited abandoned pages that were full at the time.
// this list migrates to `abandoned` when that becomes NULL. The use of
// this list migrates to `abandoned` when that becomes NULL. The use of
// this list reduces contention and the rate at which segments are visited.
static mi_decl_cache_align volatile _Atomic(mi_segment_t*) abandoned_visited; // = NULL
@ -908,7 +908,7 @@ static void mi_abandoned_visited_push(mi_segment_t* segment) {
}
// Move the visited list to the abandoned list.
static bool mi_abandoned_visited_revisit(void)
static bool mi_abandoned_visited_revisit(void)
{
// quick check if the visited list is empty
if (mi_atomic_read_ptr_relaxed(mi_segment_t,&abandoned_visited)==NULL) return false;
@ -974,12 +974,12 @@ static mi_segment_t* mi_abandoned_pop(void) {
segment = mi_tagged_segment_ptr(ts);
if (mi_likely(segment == NULL)) {
if (mi_likely(!mi_abandoned_visited_revisit())) { // try to swap in the visited list on NULL
return NULL;
return NULL;
}
}
// Do a pop. We use a reader count to prevent
// a segment to be decommitted while a read is still pending,
// a segment to be decommitted while a read is still pending,
// and a tagged pointer to prevent A-B-A link corruption.
// (this is called from `memory.c:_mi_mem_free` for example)
mi_atomic_increment(&abandoned_readers); // ensure no segment gets decommitted
@ -1192,7 +1192,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
// free the segment (by forced reclaim) to make it available to other threads.
// note1: we prefer to free a segment as that might lead to reclaiming another
// segment that is still partially used.
// note2: we could in principle optimize this by skipping reclaim and directly
// note2: we could in principle optimize this by skipping reclaim and directly
// freeing but that would violate some invariants temporarily)
mi_segment_reclaim(segment, heap, 0, NULL, tld);
}
@ -1216,7 +1216,7 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slice
/* -----------------------------------------------------------
Reclaim or allocate
Reclaim or allocate
----------------------------------------------------------- */
static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
@ -1293,6 +1293,34 @@ static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld
return page;
}
// free huge block from another thread
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
// huge page segments are always abandoned and can be freed immediately by any thread
mi_assert_internal(segment == _mi_page_segment(page));
mi_assert_internal(mi_atomic_read_relaxed(&segment->thread_id)==0);
// claim it and free
mi_heap_t* heap = mi_get_default_heap();
// paranoia: if this it the last reference, the cas should always succeed
if (mi_atomic_cas_strong(&segment->thread_id, heap->thread_id, 0)) {
mi_block_set_next(page, block, page->free);
page->free = block;
page->used--;
page->is_zero = false;
mi_assert(page->used == 0);
mi_segments_tld_t* tld = &heap->tld->segments;
const size_t bsize = mi_page_usable_block_size(page);
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
_mi_stat_decrease(&tld->stats->large, bsize);
}
else {
_mi_stat_decrease(&tld->stats->huge, bsize);
}
// mi_segments_track_size((long)segment->segment_size, tld);
_mi_segment_page_free(page, true, tld);
}
}
/* -----------------------------------------------------------
Page allocation and free
----------------------------------------------------------- */

View File

@ -13,7 +13,7 @@ if (NOT CMAKE_BUILD_TYPE)
endif()
# Import mimalloc (if installed)
find_package(mimalloc 1.5 REQUIRED NO_SYSTEM_ENVIRONMENT_PATH)
find_package(mimalloc 1.6 REQUIRED NO_SYSTEM_ENVIRONMENT_PATH)
message(STATUS "Found mimalloc installed at: ${MIMALLOC_TARGET_DIR}")
# overriding with a dynamic library

View File

@ -174,6 +174,7 @@ void mi_bins() {
static void double_free1();
static void double_free2();
static void corrupt_free();
static void block_overflow1();
int main() {
@ -183,11 +184,12 @@ int main() {
// double_free1();
// double_free2();
// corrupt_free();
// block_overflow1();
void* p1 = malloc(78);
void* p2 = malloc(24);
free(p1);
p1 = malloc(8);
p1 = mi_malloc(8);
//char* s = strdup("hello\n");
free(p2);
p2 = malloc(16);
@ -207,6 +209,11 @@ int main() {
return 0;
}
static void block_overflow1() {
uint8_t* p = (uint8_t*)mi_malloc(17);
p[18] = 0;
free(p);
}
// The double free samples come ArcHeap [1] by Insu Yun (issue #161)
// [1]: https://arxiv.org/pdf/1903.00503.pdf

View File

@ -8,6 +8,33 @@
#include <new>
#include <vector>
#include <thread>
#include <mimalloc.h>
#include <assert.h>
#ifdef _WIN32
#include <windows.h>
static void msleep(unsigned long msecs) { Sleep(msecs); }
#else
#include <unistd.h>
static void msleep(unsigned long msecs) { usleep(msecs * 1000UL); }
#endif
void heap_no_delete();
void heap_late_free();
void padding_shrink();
void various_tests();
int main() {
mi_stats_reset(); // ignore earlier allocations
// heap_no_delete(); // issue #202
// heap_late_free(); // issue #204
padding_shrink(); // issue #209
various_tests();
mi_stats_print(NULL);
return 0;
}
static void* p = malloc(8);
void free_p() {
@ -24,21 +51,20 @@ public:
};
int main() {
mi_stats_reset(); // ignore earlier allocations
void various_tests() {
atexit(free_p);
void* p1 = malloc(78);
void* p2 = mi_malloc_aligned(16,24);
free(p1);
p1 = malloc(8);
char* s = mi_strdup("hello\n");
/*
char* s = _strdup("hello\n");
char* buf = NULL;
size_t len;
_dupenv_s(&buf,&len,"MIMALLOC_VERBOSE");
mi_free(buf);
*/
//char* s = _strdup("hello\n");
//char* buf = NULL;
//size_t len;
//_dupenv_s(&buf,&len,"MIMALLOC_VERBOSE");
//mi_free(buf);
mi_free(p2);
p2 = malloc(16);
p1 = realloc(p1, 32);
@ -49,8 +75,6 @@ int main() {
delete t;
t = new (std::nothrow) Test(42);
delete t;
mi_stats_print(NULL);
return 0;
}
class Static {
@ -84,4 +108,52 @@ bool test_stl_allocator2() {
vec.push_back(some_struct());
vec.pop_back();
return vec.size() == 0;
}
}
// Issue #202
void heap_no_delete_worker() {
mi_heap_t* heap = mi_heap_new();
void* q = mi_heap_malloc(heap,1024);
// mi_heap_delete(heap); // uncomment to prevent assertion
}
void heap_no_delete() {
auto t1 = std::thread(heap_no_delete_worker);
t1.join();
}
// Issue #204
volatile void* global_p;
void t1main() {
mi_heap_t* heap = mi_heap_new();
global_p = mi_heap_malloc(heap, 1024);
mi_heap_delete(heap);
}
void heap_late_free() {
auto t1 = std::thread(t1main);
msleep(2000);
assert(global_p);
mi_free((void*)global_p);
t1.join();
}
// issue #209
static void* shared_p;
static void alloc0(/* void* arg */)
{
shared_p = mi_malloc(8);
}
void padding_shrink(void)
{
auto t1 = std::thread(alloc0);
t1.join();
mi_free(shared_p);
}

View File

@ -31,7 +31,7 @@ we therefore test the API over various inputs. Please add more tests :-)
#endif
#include "mimalloc.h"
#include "mimalloc-internal.h"
// #include "mimalloc-internal.h"
// ---------------------------------------------------------------------------
// Test macros: CHECK(name,predicate) and CHECK_BODY(name,body)
@ -98,38 +98,34 @@ int main() {
// ---------------------------------------------------
// Extended
// ---------------------------------------------------
#if defined(MI_MALLOC_OVERRIDE) && !defined(_WIN32)
// ---------------------------------------------------
CHECK_BODY("posix_memalign1", {
void* p = &p;
int err = posix_memalign(&p, sizeof(void*), 32);
mi_assert((err==0 && (uintptr_t)p % sizeof(void*) == 0) || p==&p);
int err = mi_posix_memalign(&p, sizeof(void*), 32);
result = ((err==0 && (uintptr_t)p % sizeof(void*) == 0) || p==&p);
mi_free(p);
result = (err==0);
});
CHECK_BODY("posix_memalign_no_align", {
void* p = &p;
int err = posix_memalign(&p, 3, 32);
mi_assert(p==&p);
result = (err==EINVAL);
int err = mi_posix_memalign(&p, 3, 32);
result = (err==EINVAL && p==&p);
});
CHECK_BODY("posix_memalign_zero", {
void* p = &p;
int err = posix_memalign(&p, sizeof(void*), 0);
int err = mi_posix_memalign(&p, sizeof(void*), 0);
mi_free(p);
result = (err==0);
});
CHECK_BODY("posix_memalign_nopow2", {
void* p = &p;
int err = posix_memalign(&p, 3*sizeof(void*), 32);
int err = mi_posix_memalign(&p, 3*sizeof(void*), 32);
result = (err==EINVAL && p==&p);
});
CHECK_BODY("posix_memalign_nomem", {
void* p = &p;
int err = posix_memalign(&p, sizeof(void*), SIZE_MAX);
int err = mi_posix_memalign(&p, sizeof(void*), SIZE_MAX);
result = (err==ENOMEM && p==&p);
});
#endif
// ---------------------------------------------------
// Aligned API
@ -140,12 +136,37 @@ int main() {
CHECK_BODY("malloc-aligned2", {
void* p = mi_malloc_aligned(48,32); result = (p != NULL && (uintptr_t)(p) % 32 == 0); mi_free(p);
});
CHECK_BODY("malloc-aligned3", {
void* p1 = mi_malloc_aligned(48,32); bool result1 = (p1 != NULL && (uintptr_t)(p1) % 32 == 0);
void* p2 = mi_malloc_aligned(48,32); bool result2 = (p2 != NULL && (uintptr_t)(p2) % 32 == 0);
mi_free(p2);
mi_free(p1);
result = (result1&&result2);
});
CHECK_BODY("malloc-aligned4", {
void* p;
bool ok = true;
for (int i = 0; i < 8 && ok; i++) {
p = mi_malloc_aligned(8, 16);
ok = (p != NULL && (uintptr_t)(p) % 16 == 0); mi_free(p);
}
result = ok;
});
CHECK_BODY("malloc-aligned-at1", {
void* p = mi_malloc_aligned_at(48,32,0); result = (p != NULL && ((uintptr_t)(p) + 0) % 32 == 0); mi_free(p);
});
CHECK_BODY("malloc-aligned-at2", {
void* p = mi_malloc_aligned_at(50,32,8); result = (p != NULL && ((uintptr_t)(p) + 8) % 32 == 0); mi_free(p);
});
});
CHECK_BODY("memalign1", {
void* p;
bool ok = true;
for (int i = 0; i < 8 && ok; i++) {
p = mi_memalign(16,8);
ok = (p != NULL && (uintptr_t)(p) % 16 == 0); mi_free(p);
}
result = ok;
});
// ---------------------------------------------------
// Heaps

View File

@ -188,7 +188,7 @@ static void test_stress(void) {
free_items(p);
}
}
mi_collect(false);
// mi_collect(false);
#ifndef NDEBUG
if ((n + 1) % 10 == 0) { printf("- iterations left: %3d\n", ITER - (n + 1)); }
#endif
@ -206,7 +206,7 @@ static void leak(intptr_t tid) {
}
}
static void test_leak(void) {
static void test_leak(void) {
for (int n = 0; n < ITER; n++) {
run_os_threads(THREADS, &leak);
mi_collect(false);
@ -242,14 +242,14 @@ int main(int argc, char** argv) {
// Run ITER full iterations where half the objects in the transfer buffer survive to the next round.
srand(0x7feb352d);
mi_stats_reset();
// mi_stats_reset();
#ifdef STRESS
test_stress();
#else
test_leak();
#endif
#endif
mi_collect(true);
// mi_collect(true);
mi_stats_print(NULL);
//bench_end_program();
return 0;
@ -262,7 +262,7 @@ static void (*thread_entry_fun)(intptr_t) = &stress;
#include <windows.h>
static DWORD WINAPI thread_entry(LPVOID param) {
static DWORD WINAPI thread_entry(LPVOID param) {
thread_entry_fun((intptr_t)param);
return 0;
}