MemoryManager:

* Added support to do larger raw allocations (up to one large chunk (128 pages))
  in the slab areas. For an even larger allocation an area is created (haven't
  seen that happen yet, though).
* Added kernel tracing (SLAB_MEMORY_MANAGER_TRACING).
* _FreeArea(): Copy and paste bug: The meta chunks of the to be freed area
  would be added to the free lists instead of being removed from them. This
  would corrupt the lists and also lead to all kinds of misuse of meta chunks.

object caches:
* Implemented CACHE_ALIGN_ON_SIZE. It is no longer set for all small object
  caches, but the block allocator sets it on all power of two size caches.
* object_cache_reserve_internal(): Detect recursion and don't wait in such a
  case. The function could deadlock itself, since
  HashedObjectCache::CreateSlab() does allocate memory, thus potentially
  reentering.
* object_cache_low_memory():
  - I missed some returns when reworking that one in r35254, so the function
    might stop early and also leave the cache in maintenance mode, which would
    cause it to be ignored by object cache resizer and low memory handler from
    that point on.
  - Since ReturnSlab() potentially unlocks, the conditions weren't quite correct
    and too many slabs could be freed.
  - Simplified things a bit.
* object_cache_alloc(): Since object_cache_reserve_internal() does potentially
  unlock the cache, the situation might have changed and their might not be an
  empty slab available, but a partial one. The function would crash.
* Renamed the object cache tracing variable to SLAB_OBJECT_CACHE_TRACING.
* Renamed debugger command "cache_info" to "slab_cache" to avoid confusion with
  the VMCache commands.
* ObjectCache::usage was not maintained anymore since I introduced the
  MemoryManager. object_cache_get_usage() would thus always return 0 and the
  block cache would not be considered cached memory. This was only of
  informational relevance, though.

slab allocator misc.:
* Disable the object depots of block allocator caches for object sizes > 2 KB.
  Allocations of those sizes aren't so common that the object depots yield any
  benefit.
* The slab allocator is now fully self-sufficient. It allocates its bootstrap
  memory from the MemoryManager, and the hash tables for HashedObjectCaches use
  the block allocator instead of the heap, now.
* Added option to use the slab allocator for malloc() and friends
  (USE_SLAB_ALLOCATOR_FOR_MALLOC). Currently disabled. Works in principle and
  has virtually no lock contention. Handling for low memory situations is yet
  missing, though.
* Improved the output of some debugger commands.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35283 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-01-25 13:46:58 +00:00
parent a1db5b96bb
commit b4e5e49823
15 changed files with 1147 additions and 217 deletions

View File

@ -94,6 +94,9 @@
// Enables swap support.
#define ENABLE_SWAP_SUPPORT 1
// Use the slab allocator as generic memory allocator (malloc()/free()).
#define USE_SLAB_ALLOCATOR_FOR_MALLOC 0
// When set limits the amount of available RAM (in MB).
//#define LIMIT_AVAILABLE_MEMORY 256

View File

@ -30,7 +30,6 @@
#define KTRACE_PRINTF_STACK_TRACE 0 /* stack trace depth */
#define NET_BUFFER_TRACING 0
#define NET_BUFFER_TRACING_STACK_TRACE 0 /* stack trace depth */
#define OBJECT_CACHE_TRACING 0
#define PAGE_ALLOCATION_TRACING 0
#define PAGE_DAEMON_TRACING 0
#define PAGE_WRITER_TRACING 0
@ -41,6 +40,8 @@
#define SCHEDULER_TRACING 0
#define SCHEDULING_ANALYSIS_TRACING 0
#define SIGNAL_TRACING 0
#define SLAB_MEMORY_MANAGER_TRACING 0
#define SLAB_OBJECT_CACHE_TRACING 0
#define SWAP_TRACING 0
#define SYSCALL_TRACING 0
#define SYSCALL_TRACING_IGNORE_KTRACE_OUTPUT 1

View File

@ -49,8 +49,7 @@ void vm_free_unused_boot_loader_range(addr_t start, addr_t end);
addr_t vm_allocate_early(struct kernel_args *args, size_t virtualSize,
size_t physicalSize, uint32 attributes, bool blockAlign);
void slab_init(struct kernel_args *args, addr_t initialBase,
size_t initialSize);
void slab_init(struct kernel_args *args);
void slab_init_post_area();
void slab_init_post_sem();
void slab_init_post_thread();

View File

@ -1687,6 +1687,9 @@ heap_set_get_caller(heap_allocator* heap, addr_t (*getCaller)())
#endif
#if !USE_SLAB_ALLOCATOR_FOR_MALLOC
static status_t
heap_realloc(heap_allocator *heap, void *address, void **newAddress,
size_t newSize)
@ -1794,6 +1797,9 @@ heap_realloc(heap_allocator *heap, void *address, void **newAddress,
}
#endif // !USE_SLAB_ALLOCATOR_FOR_MALLOC
inline uint32
heap_index_for(size_t size, int32 cpu)
{
@ -2045,6 +2051,9 @@ heap_init_post_thread()
// #pragma mark - Public API
#if !USE_SLAB_ALLOCATOR_FOR_MALLOC
void *
memalign(size_t alignment, size_t size)
{
@ -2312,6 +2321,9 @@ realloc(void *address, size_t newSize)
}
#endif // !USE_SLAB_ALLOCATOR_FOR_MALLOC
void *
calloc(size_t numElements, size_t size)
{

View File

@ -61,6 +61,16 @@ HashedObjectCache::Create(const char* name, size_t object_size,
HashedObjectCache* cache = new(buffer) HashedObjectCache();
// init the hash table
size_t hashSize = cache->hash_table.ResizeNeeded();
buffer = slab_internal_alloc(hashSize, flags);
if (buffer == NULL) {
cache->Delete();
return NULL;
}
cache->hash_table.Resize(buffer, hashSize, true);
if (cache->Init(name, object_size, alignment, maximum, flags, cookie,
constructor, destructor, reclaimer) != B_OK) {
cache->Delete();
@ -96,21 +106,20 @@ HashedObjectCache::CreateSlab(uint32 flags)
Unlock();
slab* slab = allocate_slab(flags);
if (slab != NULL) {
void* pages;
if (MemoryManager::Allocate(this, flags, pages) == B_OK) {
Lock();
if (InitSlab(slab, pages, slab_size, flags))
return slab;
Unlock();
MemoryManager::Free(pages, flags);
}
Lock();
if (slab == NULL)
return NULL;
void* pages;
if (MemoryManager::Allocate(this, flags, pages) == B_OK) {
if (InitSlab(slab, pages, slab_size, flags))
return slab;
MemoryManager::Free(pages, flags);
free_slab(slab, flags);
}
free_slab(slab, flags);
Lock();
return NULL;
}
@ -119,8 +128,10 @@ void
HashedObjectCache::ReturnSlab(slab* slab, uint32 flags)
{
UninitSlab(slab);
Unlock();
MemoryManager::Free(slab->pages, flags);
free_slab(slab, flags);
Lock();
}
@ -147,11 +158,9 @@ HashedObjectCache::PrepareObject(slab* source, void* object, uint32 flags)
link->buffer = object;
link->parent = source;
hash_table.Insert(link);
// TODO: This might resize the table! Currently it uses the heap, so
// we won't possibly reenter and deadlock on our own cache. We do ignore
// the flags, though!
// TODO: We don't pre-init the table, so Insert() can fail!
hash_table.InsertUnchecked(link);
_ResizeHashTableIfNeeded(flags);
return B_OK;
}
@ -170,11 +179,37 @@ HashedObjectCache::UnprepareObject(slab* source, void* object, uint32 flags)
return;
}
hash_table.Remove(link);
hash_table.RemoveUnchecked(link);
_ResizeHashTableIfNeeded(flags);
_FreeLink(link, flags);
}
void
HashedObjectCache::_ResizeHashTableIfNeeded(uint32 flags)
{
size_t hashSize = hash_table.ResizeNeeded();
if (hashSize != 0) {
Unlock();
void* buffer = slab_internal_alloc(hashSize, flags);
Lock();
if (buffer != NULL) {
if (hash_table.ResizeNeeded() == hashSize) {
void* oldHash;
hash_table.Resize(buffer, hashSize, true, &oldHash);
if (oldHash != NULL) {
Unlock();
slab_internal_free(oldHash, flags);
Lock();
}
}
}
}
}
/*static*/ inline HashedObjectCache::Link*
HashedObjectCache::_AllocateLink(uint32 flags)
{

View File

@ -11,6 +11,7 @@
#include <util/OpenHashTable.h>
#include "ObjectCache.h"
#include "slab_private.h"
struct HashedObjectCache : ObjectCache {
@ -81,11 +82,26 @@ private:
HashedObjectCache* parent;
};
typedef BOpenHashTable<Definition> HashTable;
struct InternalAllocator {
void* Allocate(size_t size) const
{
return slab_internal_alloc(size, 0);
}
void Free(void* memory) const
{
slab_internal_free(memory, 0);
}
};
typedef BOpenHashTable<Definition, false, false,
InternalAllocator> HashTable;
friend class Definition;
private:
void _ResizeHashTableIfNeeded(uint32 flags);
static Link* _AllocateLink(uint32 flags);
static void _FreeLink(HashedObjectCache::Link* link,
uint32 flags);

File diff suppressed because it is too large Load Diff

View File

@ -41,19 +41,28 @@ public:
void*& _pages);
static void Free(void* pages, uint32 flags);
static status_t AllocateRaw(size_t size, uint32 flags,
void*& _pages);
static ObjectCache* FreeRawOrReturnCache(void* pages,
uint32 flags);
static size_t AcceptableChunkSize(size_t size);
static ObjectCache* GetAllocationInfo(void* address,
size_t& _size);
static ObjectCache* CacheForAddress(void* address);
static bool MaintenanceNeeded();
static void PerformMaintenance();
private:
struct Tracing;
struct Area;
struct Chunk {
union {
Chunk* next;
ObjectCache* cache;
addr_t reference;
};
};
@ -63,6 +72,8 @@ private:
size_t totalSize;
uint16 chunkCount;
uint16 usedChunkCount;
uint16 firstFreeChunk; // *some* free range
uint16 lastFreeChunk; // inclusive
Chunk chunks[SLAB_SMALL_CHUNKS_PER_META_CHUNK];
Chunk* freeChunks;
@ -115,7 +126,11 @@ private:
};
private:
static status_t _AllocateChunk(size_t chunkSize, uint32 flags,
static status_t _AllocateChunks(size_t chunkSize,
uint32 chunkCount, uint32 flags,
MetaChunk*& _metaChunk, Chunk*& _chunk);
static bool _GetChunks(MetaChunkList* metaChunkList,
size_t chunkSize, uint32 chunkCount,
MetaChunk*& _metaChunk, Chunk*& _chunk);
static bool _GetChunk(MetaChunkList* metaChunkList,
size_t chunkSize, MetaChunk*& _metaChunk,
@ -135,10 +150,9 @@ private:
static status_t _MapChunk(VMArea* vmArea, addr_t address,
size_t size, size_t reserveAdditionalMemory,
uint32 flags);
static status_t _UnmapChunk(VMArea* vmArea,addr_t address,
static status_t _UnmapChunk(VMArea* vmArea, addr_t address,
size_t size, uint32 flags);
static void _UnmapChunkEarly(addr_t address, size_t size);
static void _UnmapFreeChunksEarly(Area* area);
static void _ConvertEarlyArea(Area* area);
@ -148,7 +162,10 @@ private:
const MetaChunk* metaChunk, addr_t address);
static addr_t _ChunkAddress(const MetaChunk* metaChunk,
const Chunk* chunk);
static bool _IsChunkFree(const MetaChunk* metaChunk,
const Chunk* chunk);
static int _DumpRawAllocations(int argc, char** argv);
static void _PrintMetaChunkTableHeader(bool printChunks);
static void _DumpMetaChunk(MetaChunk* metaChunk,
bool printChunks, bool printHeader);
@ -202,6 +219,15 @@ MemoryManager::_ChunkAddress(const MetaChunk* metaChunk, const Chunk* chunk)
}
/*static*/ inline bool
MemoryManager::_IsChunkFree(const MetaChunk* metaChunk, const Chunk* chunk)
{
return chunk->next == NULL
|| (chunk->next >= metaChunk->chunks
&& chunk->next < metaChunk->chunks + metaChunk->chunkCount);
}
inline MemoryManager::Area*
MemoryManager::MetaChunk::GetArea() const
{

View File

@ -112,15 +112,18 @@ ObjectCache::InitSlab(slab* slab, void* pages, size_t byteCount, uint32 flags)
slab->pages = pages;
slab->count = slab->size = byteCount / object_size;
slab->free = NULL;
total_objects += slab->size;
size_t spareBytes = byteCount - (slab->size * object_size);
slab->offset = cache_color_cycle;
if (slab->offset > spareBytes)
cache_color_cycle = slab->offset = 0;
else
cache_color_cycle += kCacheColorPeriod;
if ((this->flags & CACHE_ALIGN_ON_SIZE) != 0) {
slab->offset = cache_color_cycle;
if (slab->offset > spareBytes)
cache_color_cycle = slab->offset = 0;
else
cache_color_cycle += kCacheColorPeriod;
} else
slab->offset = 0;
TRACE_CACHE(this, " %lu objects, %lu spare bytes, offset %lu",
slab->size, spareBytes, slab->offset);
@ -163,6 +166,9 @@ ObjectCache::InitSlab(slab* slab, void* pages, size_t byteCount, uint32 flags)
data += object_size;
}
usage += slab_size;
total_objects += slab->size;
return slab;
}
@ -175,6 +181,7 @@ ObjectCache::UninitSlab(slab* slab)
if (slab->count != slab->size)
panic("cache: destroying a slab which isn't empty.");
usage -= slab_size;
total_objects -= slab->size;
DELETE_PARANOIA_CHECK_SET(slab);

View File

@ -34,6 +34,7 @@ typedef DoublyLinkedList<slab> SlabList;
struct ObjectCacheResizeEntry {
ConditionVariable condition;
thread_id thread;
};
struct ObjectCache : DoublyLinkedListLinkImpl<ObjectCache> {

View File

@ -50,10 +50,10 @@ static MaintenanceQueue sMaintenanceQueue;
static ConditionVariable sMaintenanceCondition;
#if OBJECT_CACHE_TRACING
#if SLAB_OBJECT_CACHE_TRACING
namespace ObjectCacheTracing {
namespace SlabObjectCacheTracing {
class ObjectCacheTraceEntry : public AbstractTraceEntry {
public:
@ -186,13 +186,13 @@ class Reserve : public ObjectCacheTraceEntry {
};
} // namespace ObjectCacheTracing
} // namespace SlabObjectCacheTracing
# define T(x) new(std::nothrow) ObjectCacheTracing::x
# define T(x) new(std::nothrow) SlabObjectCacheTracing::x
#else
# define T(x)
#endif // OBJECT_CACHE_TRACING
#endif // SLAB_OBJECT_CACHE_TRACING
// #pragma mark -
@ -211,8 +211,7 @@ dump_slabs(int argc, char* argv[])
kprintf("%p %22s %8lu %8lu %6lu %8lu %8lu %8lx\n", cache, cache->name,
cache->object_size, cache->usage, cache->empty_count,
cache->used_count, cache->usage / cache->object_size,
cache->flags);
cache->used_count, cache->total_objects, cache->flags);
}
return 0;
@ -241,6 +240,8 @@ dump_cache_info(int argc, char* argv[])
kprintf("maximum: %lu\n", cache->maximum);
kprintf("flags: 0x%lx\n", cache->flags);
kprintf("cookie: %p\n", cache->cookie);
kprintf("resize entry don't wait: %p\n", cache->resize_entry_dont_wait);
kprintf("resize entry can wait: %p\n", cache->resize_entry_can_wait);
return 0;
}
@ -249,23 +250,6 @@ dump_cache_info(int argc, char* argv[])
// #pragma mark -
void*
slab_internal_alloc(size_t size, uint32 flags)
{
if (flags & CACHE_DURING_BOOT)
return block_alloc_early(size);
return block_alloc(size, flags);
}
void
slab_internal_free(void* buffer, uint32 flags)
{
block_free(buffer, flags);
}
void
request_memory_manager_maintenance()
{
@ -322,6 +306,7 @@ object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
{
// If someone else is already adding slabs, we wait for that to be finished
// first.
thread_id thread = find_thread(NULL);
while (true) {
if (objectCount <= cache->total_objects - cache->used_count)
return B_OK;
@ -329,9 +314,19 @@ object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
ObjectCacheResizeEntry* resizeEntry = NULL;
if (cache->resize_entry_dont_wait != NULL) {
resizeEntry = cache->resize_entry_dont_wait;
} else if (cache->resize_entry_can_wait != NULL
&& (flags & CACHE_DONT_WAIT_FOR_MEMORY) == 0) {
if (thread == resizeEntry->thread)
return B_WOULD_BLOCK;
// Note: We could still have reentered the function, i.e.
// resize_entry_can_wait would be ours. That doesn't matter much,
// though, since after the don't-wait thread has done its job
// everyone will be happy.
} else if (cache->resize_entry_can_wait != NULL) {
resizeEntry = cache->resize_entry_can_wait;
if (thread == resizeEntry->thread)
return B_WOULD_BLOCK;
if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0)
break;
} else
break;
@ -351,6 +346,7 @@ object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
ObjectCacheResizeEntry myResizeEntry;
resizeEntry = &myResizeEntry;
resizeEntry->condition.Init(cache, "wait for slabs");
resizeEntry->thread = thread;
// add new slabs until there are as many free ones as requested
while (objectCount > cache->total_objects - cache->used_count) {
@ -431,29 +427,13 @@ object_cache_low_memory(void* dummy, uint32 resources, int32 level)
break;
}
// If the object cache has minimum object reserve, make sure that we
// don't free too many slabs.
if (cache->min_object_reserve > 0 && cache->empty_count > 0) {
while (cache->empty_count > minimumAllowed) {
// make sure we respect the cache's minimum object reserve
size_t objectsPerSlab = cache->empty.Head()->size;
size_t freeObjects = cache->total_objects - cache->used_count;
if (freeObjects < cache->min_object_reserve + objectsPerSlab)
break;
if (cache->min_object_reserve + objectsPerSlab >= freeObjects)
return;
size_t slabsToFree = (freeObjects - cache->min_object_reserve)
/ objectsPerSlab;
if (cache->empty_count > minimumAllowed + slabsToFree)
minimumAllowed = cache->empty_count - slabsToFree;
}
if (cache->empty_count <= minimumAllowed)
return;
TRACE_CACHE(cache, "cache: memory pressure, will release down to %lu.",
minimumAllowed);
while (cache->empty_count > minimumAllowed) {
cache->ReturnSlab(cache->empty.RemoveHead(), 0);
cache->empty_count--;
}
@ -649,23 +629,26 @@ object_cache_alloc(object_cache* cache, uint32 flags)
}
MutexLocker _(cache->lock);
slab* source;
slab* source = NULL;
if (cache->partial.IsEmpty()) {
if (cache->empty.IsEmpty()) {
if (object_cache_reserve_internal(cache, 1, flags) < B_OK) {
T(Alloc(cache, flags, NULL));
return NULL;
}
cache->pressure++;
}
while (true) {
source = cache->partial.Head();
if (source != NULL)
break;
source = cache->empty.RemoveHead();
cache->empty_count--;
cache->partial.Add(source);
} else {
source = cache->partial.Head();
if (source != NULL) {
cache->empty_count--;
cache->partial.Add(source);
break;
}
if (object_cache_reserve_internal(cache, 1, flags) != B_OK) {
T(Alloc(cache, flags, NULL));
return NULL;
}
cache->pressure++;
}
ParanoiaChecker _2(source);
@ -734,19 +717,17 @@ object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
void
slab_init(kernel_args* args, addr_t initialBase, size_t initialSize)
slab_init(kernel_args* args)
{
dprintf("slab: init base %p + 0x%lx\n", (void*)initialBase, initialSize);
MemoryManager::Init(args);
new (&sObjectCaches) ObjectCacheList();
block_allocator_init_boot(initialBase, initialSize);
block_allocator_init_boot();
add_debugger_command("slabs", dump_slabs, "list all object caches");
add_debugger_command("cache_info", dump_cache_info,
"dump information about a specific cache");
add_debugger_command("slab_cache", dump_cache_info,
"dump information about a specific object cache");
}

View File

@ -24,9 +24,8 @@ SmallObjectCache::Create(const char* name, size_t object_size,
SmallObjectCache* cache = new(buffer) SmallObjectCache();
if (cache->Init(name, object_size, alignment, maximum,
flags | CACHE_ALIGN_ON_SIZE, cookie, constructor, destructor,
reclaimer) != B_OK) {
if (cache->Init(name, object_size, alignment, maximum, flags, cookie,
constructor, destructor, reclaimer) != B_OK) {
cache->Delete();
return NULL;
}

View File

@ -13,8 +13,12 @@
#include <stdio.h>
#include <string.h>
#include <algorithm>
#include <debug.h>
#include <heap.h>
#include <kernel.h> // for ROUNDUP
#include <slab/Slab.h>
#include <malloc.h>
#include <vm/vm.h>
#include <vm/VMAddressSpace.h>
@ -38,9 +42,9 @@ static const size_t kNumBlockSizes = sizeof(kBlockSizes) / sizeof(size_t) - 1;
static object_cache* sBlockCaches[kNumBlockSizes];
static addr_t sBootStrapMemory;
static size_t sBootStrapMemorySize;
static size_t sUsedBootStrapMemory;
static addr_t sBootStrapMemory = 0;
static size_t sBootStrapMemorySize = 0;
static size_t sUsedBootStrapMemory = 0;
static int
@ -68,24 +72,33 @@ size_to_index(size_t size)
void*
block_alloc(size_t size, uint32 flags)
block_alloc(size_t size, size_t alignment, uint32 flags)
{
if (alignment > 8) {
// Make size >= alignment and a power of two. This is sufficient, since
// all of our object caches with power of two sizes are aligned. We may
// waste quite a bit of memory, but memalign() is very rarely used
// in the kernel and always with power of two size == alignment anyway.
ASSERT((alignment & (alignment - 1)) == 0);
while (alignment < size)
alignment <<= 1;
size = alignment;
// If we're not using an object cache, make sure that the memory
// manager knows it has to align the allocation.
if (size > kBlockSizes[kNumBlockSizes])
flags |= CACHE_ALIGN_ON_SIZE;
}
// allocate from the respective object cache, if any
int index = size_to_index(size);
if (index >= 0)
return object_cache_alloc(sBlockCaches[index], flags);
// the allocation is too large for our object caches -- create an area
if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0)
return NULL;
// the allocation is too large for our object caches -- ask the memory
// manager
void* block;
area_id area = create_area_etc(VMAddressSpace::KernelID(),
"alloc'ed block", &block, B_ANY_KERNEL_ADDRESS,
ROUNDUP(size, B_PAGE_SIZE), B_FULL_LOCK,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0,
(flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? CREATE_AREA_DONT_WAIT : 0);
if (area < 0)
if (MemoryManager::AllocateRaw(size, flags, block) != B_OK)
return NULL;
return block;
@ -96,14 +109,31 @@ void*
block_alloc_early(size_t size)
{
int index = size_to_index(size);
if (index < 0)
return NULL;
if (sBlockCaches[index] != NULL)
if (index >= 0 && sBlockCaches[index] != NULL)
return object_cache_alloc(sBlockCaches[index], CACHE_DURING_BOOT);
// No object cache yet. Use the bootstrap memory. This allocation must
// never be freed!
if (size > SLAB_CHUNK_SIZE_SMALL) {
// This is a sufficiently large allocation -- just ask the memory
// manager directly.
void* block;
if (MemoryManager::AllocateRaw(size, 0, block) != B_OK)
return NULL;
return block;
}
// A small allocation, but no object cache yet. Use the bootstrap memory.
// This allocation must never be freed!
if (sBootStrapMemorySize - sUsedBootStrapMemory < size) {
// We need more memory.
void* block;
if (MemoryManager::AllocateRaw(SLAB_CHUNK_SIZE_SMALL, 0, block) != B_OK)
return NULL;
sBootStrapMemory = (addr_t)block;
sBootStrapMemorySize = SLAB_CHUNK_SIZE_SMALL;
sUsedBootStrapMemory = 0;
}
size_t neededSize = ROUNDUP(size, sizeof(double));
if (sUsedBootStrapMemory + neededSize > sBootStrapMemorySize)
return NULL;
@ -117,40 +147,41 @@ block_alloc_early(size_t size)
void
block_free(void* block, uint32 flags)
{
if (ObjectCache* cache = MemoryManager::CacheForAddress(block)) {
if (block == NULL)
return;
ObjectCache* cache = MemoryManager::FreeRawOrReturnCache(block, flags);
if (cache != NULL) {
// a regular small allocation
ASSERT(cache->object_size >= kBlockSizes[0]);
ASSERT(cache->object_size <= kBlockSizes[kNumBlockSizes - 1]);
ASSERT(cache == sBlockCaches[size_to_index(cache->object_size)]);
object_cache_free(cache, block, flags);
} else {
// a large allocation -- look up the area
VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
addressSpace->ReadLock();
VMArea* area = addressSpace->LookupArea((addr_t)block);
addressSpace->ReadUnlock();
if (area != NULL && (addr_t)block == area->Base())
delete_area(area->id);
else
panic("freeing unknown block %p from area %p", block, area);
}
}
void
block_allocator_init_boot(addr_t bootStrapBase, size_t bootStrapSize)
block_allocator_init_boot()
{
sBootStrapMemory = bootStrapBase;
sBootStrapMemorySize = bootStrapSize;
sUsedBootStrapMemory = 0;
for (int index = 0; kBlockSizes[index] != 0; index++) {
char name[32];
snprintf(name, sizeof(name), "block cache: %lu", kBlockSizes[index]);
sBlockCaches[index] = create_object_cache_etc(name, kBlockSizes[index],
0, 0, CACHE_DURING_BOOT, NULL, NULL, NULL, NULL);
uint32 flags = CACHE_DURING_BOOT;
size_t size = kBlockSizes[index];
// align the power of two objects to their size
if ((size & (size - 1)) == 0)
flags |= CACHE_ALIGN_ON_SIZE;
// For the larger allocation sizes disable the object depot, so we don't
// keep lot's of unused objects around.
if (size > 2048)
flags |= CACHE_NO_DEPOT;
sBlockCaches[index] = create_object_cache_etc(name, size, 0, 0, flags,
NULL, NULL, NULL, NULL);
if (sBlockCaches[index] == NULL)
panic("allocator: failed to init block cache");
}
@ -162,7 +193,87 @@ block_allocator_init_rest()
{
#ifdef TEST_ALL_CACHES_DURING_BOOT
for (int index = 0; kBlockSizes[index] != 0; index++) {
block_free(block_alloc(kBlockSizes[index] - sizeof(boundary_tag)));
block_free(block_alloc(kBlockSizes[index] - sizeof(boundary_tag)), 0,
0);
}
#endif
}
// #pragma mark - public API
#if USE_SLAB_ALLOCATOR_FOR_MALLOC
void*
memalign(size_t alignment, size_t size)
{
return block_alloc(size, alignment, 0);
}
void*
memalign_nogrow(size_t alignment, size_t size)
{
return block_alloc(size, alignment,
CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
}
void*
malloc_nogrow(size_t size)
{
return block_alloc(size, 0,
CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
}
void*
malloc(size_t size)
{
return block_alloc(size, 0, 0);
}
void
free(void* address)
{
block_free(address, 0);
}
void*
realloc(void* address, size_t newSize)
{
if (newSize == 0) {
block_free(address, 0);
return NULL;
}
if (address == NULL)
return block_alloc(newSize, 0, 0);
size_t oldSize;
ObjectCache* cache = MemoryManager::GetAllocationInfo(address, oldSize);
if (cache == NULL && oldSize == 0) {
panic("block_realloc(): allocation %p not known", address);
return NULL;
}
if (oldSize == newSize)
return address;
void* newBlock = block_alloc(newSize, 0, 0);
if (newBlock == NULL)
return NULL;
memcpy(newBlock, address, std::min(oldSize, newSize));
block_free(address, 0);
return newBlock;
}
#endif // USE_SLAB_ALLOCATOR_FOR_MALLOC

View File

@ -11,6 +11,8 @@
#include <stddef.h>
#include <slab/Slab.h>
//#define TRACE_SLAB
#ifdef TRACE_SLAB
@ -26,16 +28,12 @@
struct ObjectCache;
void* slab_internal_alloc(size_t size, uint32 flags);
void slab_internal_free(void *_buffer, uint32 flags);
void request_memory_manager_maintenance();
void* block_alloc(size_t size, uint32 flags);
void* block_alloc(size_t size, size_t alignment, uint32 flags);
void* block_alloc_early(size_t size);
void block_free(void *block, uint32 flags);
void block_allocator_init_boot(addr_t bootStrapBase,
size_t bootStrapSize);
void block_free(void* block, uint32 flags);
void block_allocator_init_boot();
void block_allocator_init_rest();
@ -58,4 +56,21 @@ _push(Type*& head, Type* object)
}
static inline void*
slab_internal_alloc(size_t size, uint32 flags)
{
if (flags & CACHE_DURING_BOOT)
return block_alloc_early(size);
return block_alloc(size, 0, flags);
}
static inline void
slab_internal_free(void* buffer, uint32 flags)
{
block_free(buffer, flags);
}
#endif // SLAB_PRIVATE_H

View File

@ -3223,17 +3223,14 @@ vm_init(kernel_args* args)
if (heapSize < 1024 * 1024)
panic("vm_init: go buy some RAM please.");
slab_init(args);
// map in the new heap and initialize it
addr_t heapBase = vm_allocate_early(args, heapSize, heapSize,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
TRACE(("heap at 0x%lx\n", heapBase));
heap_init(heapBase, heapSize);
size_t slabInitialSize = B_PAGE_SIZE;
addr_t slabInitialBase = vm_allocate_early(args, slabInitialSize,
slabInitialSize, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
slab_init(args, slabInitialBase, slabInitialSize);
// initialize the free page list and physical page mapper
vm_page_init(args);
@ -3262,11 +3259,6 @@ vm_init(kernel_args* args)
create_area("kernel heap", &address, B_EXACT_ADDRESS, heapSize,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
address = (void*)ROUNDDOWN(slabInitialBase, B_PAGE_SIZE);
create_area("initial slab space", &address, B_EXACT_ADDRESS,
slabInitialSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA);
allocate_kernel_args(args);
create_preloaded_image_areas(&args->kernel_image);