slab allocator:

* Implemented a more elaborated raw memory allocation backend (MemoryManager).
  We allocate 8 MB areas whose pages we allocate and map when needed. An area is
  divided into equally-sized chunks which form the basic units of allocation. We
  have areas with three possible chunk sizes (small, medium, large), which is
  basically what the ObjectCache implementations were using anyway.
* Added "uint32 flags" parameter to several of the slab allocator's object
  cache and object depot functions. E.g. object_depot_store() potentially wants
  to allocate memory for a magazine. But also in pure freeing functions it
  might eventually become useful to have those flags, since they could end up
  deleting an area, which might not be allowable in all situations. We should
  introduce specific flags to indicate that.
* Reworked the block allocator. Since the MemoryManager allocates block-aligned
  areas, maintains a hash table for lookup, and maps chunks to object caches,
  we can quickly find out which object cache a to be freed allocation belongs
  to and thus don't need the boundary tags anymore.
* Reworked the slab boot strap process. We allocate from the initial area only
  when really necessary, i.e. when the object cache for the respective
  allocation size has not been created yet. A single page is thus sufficient.

other:
* vm_allocate_early(): Added boolean "blockAlign" parameter. If true, the
  semantics is the same as for B_ANY_KERNEL_BLOCK_ADDRESS.
* Use an object cache for page mappings. This significantly reduces the
  contention on the heap bin locks.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35232 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-01-21 23:10:52 +00:00
parent 5f679d1cd3
commit 86c794e5c1
28 changed files with 1111 additions and 420 deletions

View File

@ -23,7 +23,7 @@ typedef struct object_depot {
void* cookie;
void (*return_object)(struct object_depot* depot, void* cookie,
void* object);
void* object, uint32 flags);
} object_depot;
@ -32,13 +32,14 @@ extern "C" {
#endif
status_t object_depot_init(object_depot* depot, uint32 flags, void* cookie,
void (*returnObject)(object_depot* depot, void* cookie, void* object));
void object_depot_destroy(object_depot* depot);
void (*returnObject)(object_depot* depot, void* cookie, void* object,
uint32 flags));
void object_depot_destroy(object_depot* depot, uint32 flags);
void* object_depot_obtain(object_depot* depot);
int object_depot_store(object_depot* depot, void* object);
int object_depot_store(object_depot* depot, void* object, uint32 flags);
void object_depot_make_empty(object_depot* depot);
void object_depot_make_empty(object_depot* depot, uint32 flags);
#ifdef __cplusplus
}

View File

@ -52,7 +52,7 @@ status_t object_cache_set_minimum_reserve(object_cache* cache,
size_t objectCount);
void* object_cache_alloc(object_cache* cache, uint32 flags);
void object_cache_free(object_cache* cache, void* object);
void object_cache_free(object_cache* cache, void* object, uint32 flags);
status_t object_cache_reserve(object_cache* cache, size_t object_count,
uint32 flags);

View File

@ -16,8 +16,9 @@
struct iovec;
struct kernel_args;
struct team;
struct ObjectCache;
struct system_memory_info;
struct team;
struct VMAddressSpace;
struct VMArea;
struct VMCache;
@ -31,6 +32,9 @@ struct vnode;
#define CREATE_AREA_DONT_CLEAR 0x04
extern struct ObjectCache* gPageMappingsObjectCache;
#ifdef __cplusplus
extern "C" {
#endif
@ -43,10 +47,11 @@ status_t vm_init_post_modules(struct kernel_args *args);
void vm_free_kernel_args(struct kernel_args *args);
void vm_free_unused_boot_loader_range(addr_t start, addr_t end);
addr_t vm_allocate_early(struct kernel_args *args, size_t virtualSize,
size_t physicalSize, uint32 attributes);
size_t physicalSize, uint32 attributes, bool blockAlign);
void slab_init(struct kernel_args *args, addr_t initialBase,
size_t initialSize);
void slab_init_post_area();
void slab_init_post_sem();
void slab_init_post_thread();

View File

@ -732,7 +732,7 @@ free_data_header(data_header* header)
if (header != NULL)
atomic_add(&sAllocatedDataHeaderCount, -1);
#endif
object_cache_free(sDataNodeCache, header);
object_cache_free(sDataNodeCache, header, 0);
}
@ -743,7 +743,7 @@ free_net_buffer(net_buffer_private* buffer)
if (buffer != NULL)
atomic_add(&sAllocatedNetBufferCount, -1);
#endif
object_cache_free(sNetBufferCache, buffer);
object_cache_free(sNetBufferCache, buffer, 0);
}

View File

@ -125,7 +125,7 @@ m_getcl(int how, short type, int flags)
return NULL;
if (construct_pkt_mbuf(how, memoryBuffer, type, flags) < 0) {
object_cache_free(sMBufCache, memoryBuffer);
object_cache_free(sMBufCache, memoryBuffer, 0);
return NULL;
}
@ -170,7 +170,7 @@ m_getjcl(int how, short type, int flags, int size)
return NULL;
construct_mbuf(memoryBuffer, type, flags);
if (construct_ext_sized_mbuf(memoryBuffer, how, size) < 0) {
object_cache_free(sMBufCache, memoryBuffer);
object_cache_free(sMBufCache, memoryBuffer, 0);
return NULL;
}
return memoryBuffer;
@ -225,9 +225,9 @@ mb_free_ext(struct mbuf *memoryBuffer)
else
panic("unknown type");
object_cache_free(cache, memoryBuffer->m_ext.ext_buf);
object_cache_free(cache, memoryBuffer->m_ext.ext_buf, 0);
memoryBuffer->m_ext.ext_buf = NULL;
object_cache_free(sMBufCache, memoryBuffer);
object_cache_free(sMBufCache, memoryBuffer, 0);
}
@ -239,7 +239,7 @@ m_free(struct mbuf *memoryBuffer)
if (memoryBuffer->m_flags & M_EXT)
mb_free_ext(memoryBuffer);
else
object_cache_free(sMBufCache, memoryBuffer);
object_cache_free(sMBufCache, memoryBuffer, 0);
return next;
}
@ -278,7 +278,7 @@ init_mbufs()
NULL, NULL, NULL);
if (sJumbo9ChunkCache == NULL)
goto clean;
sJumboPageSizeCache = create_object_cache("mbuf jumbo page size chunks",
sJumboPageSizeCache = create_object_cache("mbuf jumbo page size chunks",
MJUMPAGESIZE, 0, NULL, NULL, NULL);
if (sJumboPageSizeCache == NULL)
goto clean;

View File

@ -251,7 +251,7 @@ generic_vm_physical_page_mapper_init(kernel_args *args,
// We reserve (ioSpaceChunkSize - B_PAGE_SIZE) bytes more, so that we
// can guarantee to align the base address to ioSpaceChunkSize.
sIOSpaceBase = vm_allocate_early(args,
sIOSpaceSize + ioSpaceChunkSize - B_PAGE_SIZE, 0, 0);
sIOSpaceSize + ioSpaceChunkSize - B_PAGE_SIZE, 0, 0, false);
if (sIOSpaceBase == 0) {
panic("generic_vm_physical_page_mapper_init(): Failed to reserve IO "
"space in virtual address space!");
@ -266,11 +266,11 @@ generic_vm_physical_page_mapper_init(kernel_args *args,
// allocate some space to hold physical page mapping info
paddr_desc = (paddr_chunk_desc *)vm_allocate_early(args,
sizeof(paddr_chunk_desc) * 1024, ~0L,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
num_virtual_chunks = sIOSpaceSize / sIOSpaceChunkSize;
virtual_pmappings = (paddr_chunk_desc **)vm_allocate_early(args,
sizeof(paddr_chunk_desc *) * num_virtual_chunks, ~0L,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
TRACE(("paddr_desc %p, virtual_pmappings %p"/*", iospace_pgtables %p"*/"\n",
paddr_desc, virtual_pmappings/*, iospace_pgtables*/));

View File

@ -1268,7 +1268,7 @@ m68k_vm_translation_map_init(kernel_args *args)
iospace_pgtables = (page_table_entry *)vm_allocate_early(args,
B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * NUM_PAGEENT_PER_TBL * NUM_PAGETBL_PER_PAGE)), ~0L,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
TRACE(("iospace_pgtables %p\n", iospace_pgtables));

View File

@ -83,6 +83,7 @@
#include <boot/kernel_args.h>
#include <int.h>
#include <kernel.h>
#include <slab/Slab.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_priv.h>
@ -518,7 +519,7 @@ PPCVMTranslationMap::UnmapPage(VMArea* area, addr_t address)
locker.Unlock();
if (mapping != NULL)
free(mapping);
object_cache_free(gPageMappingsObjectCache, mapping, CACHE_DONT_SLEEP);
return B_OK;
}

View File

@ -19,6 +19,7 @@
#include <heap.h>
#include <int.h>
#include <thread.h>
#include <slab/Slab.h>
#include <smp.h>
#include <util/AutoLock.h>
#include <util/queue.h>
@ -628,7 +629,7 @@ X86VMTranslationMap::UnmapPage(VMArea* area, addr_t address)
locker.Unlock();
if (mapping != NULL)
free(mapping);
object_cache_free(gPageMappingsObjectCache, mapping, CACHE_DONT_SLEEP);
return B_OK;
}
@ -738,7 +739,7 @@ X86VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size)
// free removed mappings
while (vm_page_mapping* mapping = queue.RemoveHead())
free(mapping);
object_cache_free(gPageMappingsObjectCache, mapping, CACHE_DONT_SLEEP);
}
@ -823,7 +824,7 @@ X86VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
locker.Unlock();
while (vm_page_mapping* mapping = mappings.RemoveHead())
free(mapping);
object_cache_free(gPageMappingsObjectCache, mapping, CACHE_DONT_SLEEP);
}

View File

@ -483,7 +483,7 @@ LargeMemoryPhysicalPageMapper::Init(kernel_args* args,
// We reserve more, so that we can guarantee to align the base address
// to page table ranges.
addr_t virtualBase = vm_allocate_early(args,
1024 * B_PAGE_SIZE + kPageTableAlignment - B_PAGE_SIZE, 0, 0);
1024 * B_PAGE_SIZE + kPageTableAlignment - B_PAGE_SIZE, 0, 0, false);
if (virtualBase == 0) {
panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve "
"physical page pool space in virtual address space!");
@ -495,7 +495,7 @@ LargeMemoryPhysicalPageMapper::Init(kernel_args* args,
// allocate memory for the page table and data
size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args,
areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
// prepare the page table
x86_early_prepare_page_tables(pageTable, virtualBase,

View File

@ -987,7 +987,7 @@ void
block_cache::Free(void* buffer)
{
if (buffer != NULL)
object_cache_free(buffer_cache, buffer);
object_cache_free(buffer_cache, buffer, 0);
}
@ -1025,7 +1025,7 @@ block_cache::FreeBlock(cached_block* block)
Free(block->compare);
#endif
object_cache_free(sBlockCache, block);
object_cache_free(sBlockCache, block, 0);
}
@ -1059,7 +1059,7 @@ block_cache::NewBlock(off_t blockNumber)
block->current_data = Allocate();
if (block->current_data == NULL) {
object_cache_free(sBlockCache, block);
object_cache_free(sBlockCache, block, 0);
return NULL;
}

View File

@ -8,6 +8,7 @@
#include "HashedObjectCache.h"
#include "MemoryManager.h"
#include "slab_private.h"
@ -32,9 +33,9 @@ allocate_slab(uint32 flags)
static void
free_slab(slab* slab)
free_slab(slab* slab, uint32 flags)
{
slab_internal_free(slab);
slab_internal_free(slab, flags);
}
@ -67,15 +68,25 @@ HashedObjectCache::Create(const char* name, size_t object_size,
}
if ((flags & CACHE_LARGE_SLAB) != 0)
cache->slab_size = max_c(256 * B_PAGE_SIZE, 128 * object_size);
cache->slab_size = 128 * object_size;
else
cache->slab_size = max_c(16 * B_PAGE_SIZE, 8 * object_size);
cache->slab_size = 8 * object_size;
cache->slab_size = MemoryManager::AcceptableChunkSize(cache->slab_size);
cache->lower_boundary = __fls0(cache->object_size);
return cache;
}
void
HashedObjectCache::Delete()
{
this->~HashedObjectCache();
slab_internal_free(this, 0);
}
slab*
HashedObjectCache::CreateSlab(uint32 flags)
{
@ -92,24 +103,24 @@ HashedObjectCache::CreateSlab(uint32 flags)
return NULL;
void* pages;
if ((this->*allocate_pages)(&pages, flags) == B_OK) {
if (MemoryManager::Allocate(this, flags, pages) == B_OK) {
if (InitSlab(slab, pages, slab_size, flags))
return slab;
(this->*free_pages)(pages);
MemoryManager::Free(pages, flags);
}
free_slab(slab);
free_slab(slab, flags);
return NULL;
}
void
HashedObjectCache::ReturnSlab(slab* slab)
HashedObjectCache::ReturnSlab(slab* slab, uint32 flags)
{
UninitSlab(slab);
(this->*free_pages)(slab->pages);
free_slab(slab);
MemoryManager::Free(slab->pages, flags);
free_slab(slab, flags);
}
@ -137,12 +148,16 @@ HashedObjectCache::PrepareObject(slab* source, void* object, uint32 flags)
link->parent = source;
hash_table.Insert(link);
// TODO: This might resize the table! Currently it uses the heap, so
// we won't possibly reenter and deadlock on our own cache. We do ignore
// the flags, though!
// TODO: We don't pre-init the table, so Insert() can fail!
return B_OK;
}
void
HashedObjectCache::UnprepareObject(slab* source, void* object)
HashedObjectCache::UnprepareObject(slab* source, void* object, uint32 flags)
{
Link* link = hash_table.Lookup(object);
if (link == NULL) {
@ -156,7 +171,7 @@ HashedObjectCache::UnprepareObject(slab* source, void* object)
}
hash_table.Remove(link);
_FreeLink(link);
_FreeLink(link, flags);
}
@ -169,7 +184,7 @@ HashedObjectCache::_AllocateLink(uint32 flags)
/*static*/ inline void
HashedObjectCache::_FreeLink(HashedObjectCache::Link* link)
HashedObjectCache::_FreeLink(HashedObjectCache::Link* link, uint32 flags)
{
slab_internal_free(link);
slab_internal_free(link, flags);
}

View File

@ -22,14 +22,16 @@ struct HashedObjectCache : ObjectCache {
object_cache_constructor constructor,
object_cache_destructor destructor,
object_cache_reclaimer reclaimer);
virtual void Delete();
virtual slab* CreateSlab(uint32 flags);
virtual void ReturnSlab(slab* slab);
virtual void ReturnSlab(slab* slab, uint32 flags);
virtual slab* ObjectSlab(void* object) const;
virtual status_t PrepareObject(slab* source, void* object,
uint32 flags);
virtual void UnprepareObject(slab* source, void* object);
virtual void UnprepareObject(slab* source, void* object,
uint32 flags);
private:
struct Link {
@ -85,7 +87,8 @@ private:
private:
static Link* _AllocateLink(uint32 flags);
static void _FreeLink(HashedObjectCache::Link* link);
static void _FreeLink(HashedObjectCache::Link* link,
uint32 flags);
private:
HashTable hash_table;

View File

@ -4,6 +4,7 @@ SubDir HAIKU_TOP src system kernel slab ;
KernelMergeObject kernel_slab.o :
allocator.cpp
HashedObjectCache.cpp
MemoryManager.cpp
ObjectCache.cpp
ObjectDepot.cpp
Slab.cpp

View File

@ -0,0 +1,668 @@
/*
* Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
* Distributed under the terms of the MIT License.
*/
#include "MemoryManager.h"
#include <algorithm>
#include <debug.h>
#include <kernel.h>
#include <util/AutoLock.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_priv.h>
#include <vm/VMAddressSpace.h>
#include <vm/VMArea.h>
#include <vm/VMCache.h>
#include <vm/VMTranslationMap.h>
#include "ObjectCache.h"
#include "slab_private.h"
//#define TRACE_MEMORY_MANAGER
#ifdef TRACE_MEMORY_MANAGER
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) do {} while (false)
#endif
static const char* const kSlabAreaName = "slab area";
static void* sAreaTableBuffer[1024];
mutex MemoryManager::sLock;
rw_lock MemoryManager::sAreaTableLock;
kernel_args* MemoryManager::sKernelArgs;
MemoryManager::AreaPool MemoryManager::sSmallChunkAreas;
MemoryManager::AreaPool MemoryManager::sMiddleChunkAreas;
MemoryManager::AreaPool MemoryManager::sLargeChunkAreas;
MemoryManager::AreaTable MemoryManager::sAreaTable;
MemoryManager::Area* MemoryManager::sFreeAreas;
MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryCanWait;
MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryDontWait;
/*static*/ void
MemoryManager::Init(kernel_args* args)
{
mutex_init(&sLock, "slab memory manager");
rw_lock_init(&sAreaTableLock, "slab memory manager area table");
sKernelArgs = args;
new(&sSmallChunkAreas) AreaPool;
new(&sMiddleChunkAreas) AreaPool;
new(&sLargeChunkAreas) AreaPool;
sSmallChunkAreas.chunkSize = SLAB_CHUNK_SIZE_SMALL;
sMiddleChunkAreas.chunkSize = SLAB_CHUNK_SIZE_MIDDLE;
sLargeChunkAreas.chunkSize = SLAB_CHUNK_SIZE_LARGE;
new(&sAreaTable) AreaTable;
sAreaTable.Resize(sAreaTableBuffer, sizeof(sAreaTableBuffer), true);
// A bit hacky: The table now owns the memory. Since we never resize or
// free it, that's not a problem, though.
sFreeAreas = NULL;
}
/*static*/ void
MemoryManager::InitPostArea()
{
sKernelArgs = NULL;
// Convert all areas to actual areas. This loop might look a bit weird, but
// is necessary since creating the actual area involves memory allocations,
// which in turn can change the situation.
while (true) {
if (!_ConvertEarlyAreas(sSmallChunkAreas.partialAreas)
&& !_ConvertEarlyAreas(sSmallChunkAreas.fullAreas)
&& !_ConvertEarlyAreas(sMiddleChunkAreas.partialAreas)
&& !_ConvertEarlyAreas(sMiddleChunkAreas.fullAreas)
&& !_ConvertEarlyAreas(sLargeChunkAreas.partialAreas)
&& !_ConvertEarlyAreas(sLargeChunkAreas.fullAreas)) {
break;
}
}
// just "leak" the free areas -- the VM will automatically free all
// unclaimed memory
sFreeAreas = NULL;
add_debugger_command_etc("slab_area", &_DumpArea,
"Dump information on a given slab area",
"<area>\n"
"Dump information on a given slab area specified by its base "
"address.\n", 0);
add_debugger_command_etc("slab_areas", &_DumpAreas,
"List all slab areas",
"\n"
"Lists all slab areas.\n", 0);
}
/*static*/ status_t
MemoryManager::Allocate(ObjectCache* cache, uint32 flags, void*& _pages)
{
// TODO: Support CACHE_UNLOCKED_PAGES!
size_t chunkSize = cache->slab_size;
TRACE("MemoryManager::Allocate(%p, %#" B_PRIx32 "): chunkSize: %"
B_PRIuSIZE "\n", cache, flags, chunkSize);
// get the right area pool
AreaPool* areaPool = _AreaPoolFor(chunkSize);
if (areaPool == NULL) {
panic("Invalid slab size: %" B_PRIuSIZE, chunkSize);
return B_BAD_VALUE;
}
MutexLocker locker(sLock);
// get an area
Area* area;
status_t error = _GetPartialArea(areaPool, flags, area);
if (error != B_OK)
return error;
// allocate a chunk
bool chunkMapped = false;
Chunk* chunk;
if (area->mappedFreeChunks != NULL) {
chunk = _pop(area->mappedFreeChunks);
chunkMapped = true;
} else
chunk = _pop(area->unmappedFreeChunks);
if (++area->usedChunkCount == area->chunkCount) {
areaPool->partialAreas.Remove(area);
areaPool->fullAreas.Add(area);
}
// If the chunk is not mapped yet, do it now.
addr_t chunkAddress = _ChunkAddress(area, chunk);
if (!chunkMapped) {
locker.Unlock();
error = _MapChunk(area->vmArea, chunkAddress, chunkSize, 0, flags);
locker.Lock();
if (error != B_OK) {
// something failed -- free the chunk
_FreeChunk(areaPool, area, chunk, chunkAddress, true, flags);
return error;
}
}
chunk->cache = cache;
_pages = (void*)chunkAddress;
TRACE("MemoryManager::Allocate() done: %p (chunk %p)\n", _pages, chunk);
return B_OK;
}
/*static*/ void
MemoryManager::Free(void* pages, uint32 flags)
{
TRACE("MemoryManager::Free(%p, %#" B_PRIx32 ")\n", pages, flags);
// get the area and the pool
Area* area = (Area*)ROUNDDOWN((addr_t)pages, SLAB_AREA_SIZE);
AreaPool* areaPool = _AreaPoolFor(area->chunkSize);
if (areaPool == NULL) {
panic("Invalid area: %p", area);
return;
}
// get the chunk
uint16 chunkIndex = _ChunkIndexForAddress(area, (addr_t)pages);
ASSERT(chunkIndex < area->chunkCount);
ASSERT((addr_t)pages % area->chunkSize == 0);
Chunk* chunk = &area->chunks[chunkIndex];
// and free it
MutexLocker locker(sLock);
_FreeChunk(areaPool, area, chunk, (addr_t)pages, false, flags);
}
/*static*/ size_t
MemoryManager::AcceptableChunkSize(size_t size)
{
if (size <= SLAB_CHUNK_SIZE_SMALL)
return SLAB_CHUNK_SIZE_SMALL;
if (size <= SLAB_CHUNK_SIZE_MIDDLE)
return SLAB_CHUNK_SIZE_MIDDLE;
return SLAB_CHUNK_SIZE_LARGE;
}
/*static*/ ObjectCache*
MemoryManager::CacheForAddress(void* address)
{
// get the area
addr_t areaBase = ROUNDDOWN((addr_t)address, SLAB_AREA_SIZE);
ReadLocker readLocker(sAreaTableLock);
Area* area = sAreaTable.Lookup(areaBase);
readLocker.Unlock();
if (area == NULL)
return NULL;
// get the chunk
uint16 chunkIndex = _ChunkIndexForAddress(area, (addr_t)address);
ASSERT(chunkIndex < area->chunkCount);
return area->chunks[chunkIndex].cache;
}
/*static*/ MemoryManager::AreaPool*
MemoryManager::_AreaPoolFor(size_t chunkSize)
{
if (chunkSize == SLAB_CHUNK_SIZE_SMALL)
return &sSmallChunkAreas;
if (chunkSize == SLAB_CHUNK_SIZE_MIDDLE)
return &sMiddleChunkAreas;
if (chunkSize == SLAB_CHUNK_SIZE_LARGE)
return &sLargeChunkAreas;
return NULL;
}
/*static*/ status_t
MemoryManager::_GetPartialArea(AreaPool* areaPool, uint32 flags, Area*& _area)
{
while (true) {
Area* area = areaPool->partialAreas.Head();
if (area != NULL) {
_area = area;
return B_OK;
}
// We need to allocate a new area. Wait, if someone else is trying the
// same.
AllocationEntry* allocationEntry = NULL;
if (sAllocationEntryDontWait != NULL) {
allocationEntry = sAllocationEntryDontWait;
} else if (sAllocationEntryCanWait != NULL
&& (flags & CACHE_DONT_SLEEP) == 0) {
allocationEntry = sAllocationEntryCanWait;
} else
break;
ConditionVariableEntry entry;
allocationEntry->condition.Add(&entry);
mutex_unlock(&sLock);
entry.Wait();
mutex_lock(&sLock);
}
// prepare the allocation entry others can wait on
AllocationEntry*& allocationEntry = (flags & CACHE_DONT_SLEEP) != 0
? sAllocationEntryDontWait : sAllocationEntryCanWait;
AllocationEntry myResizeEntry;
allocationEntry = &myResizeEntry;
allocationEntry->condition.Init(areaPool, "wait for slab area");
allocationEntry->thread = find_thread(NULL);
Area* area;
status_t error = _AllocateArea(areaPool->chunkSize, flags, area);
allocationEntry->condition.NotifyAll();
allocationEntry = NULL;
if (error != B_OK)
return error;
areaPool->partialAreas.Add(area);
// TODO: If something was freed in the meantime, we might rather
// want to delete the area again. Alternatively we could keep
// one global free area.
_area = area;
return B_OK;
}
/*static*/ status_t
MemoryManager::_AllocateArea(size_t chunkSize, uint32 flags, Area*& _area)
{
// TODO: Support reusing free areas!
TRACE("MemoryManager::_AllocateArea(%" B_PRIuSIZE ", %#" B_PRIx32 ")\n",
chunkSize, flags);
if ((flags & CACHE_DONT_SLEEP) != 0)
return B_WOULD_BLOCK;
// TODO: Support CACHE_DONT_SLEEP for real! We already consider it in
// most cases below, but not for vm_create_null_area().
mutex_unlock(&sLock);
uint32 chunkCount = (SLAB_AREA_SIZE - sizeof(Area))
/ (chunkSize + sizeof(Chunk));
size_t adminMemory = sizeof(Area) + chunkCount * sizeof(Chunk);
adminMemory = ROUNDUP(adminMemory, B_PAGE_SIZE);
size_t pagesNeededToMap = 0;
Area* area;
VMArea* vmArea = NULL;
if (sKernelArgs == NULL) {
// create an area
area_id areaID = vm_create_null_area(B_SYSTEM_TEAM, kSlabAreaName,
(void**)&area, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE);
if (areaID < 0) {
mutex_lock(&sLock);
return areaID;
}
// map the memory for the administrative structure
VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
VMTranslationMap* translationMap = addressSpace->TranslationMap();
pagesNeededToMap = translationMap->MaxPagesNeededToMap((addr_t)area,
(addr_t)area + SLAB_AREA_SIZE - 1);
vmArea = VMAreaHash::Lookup(areaID);
status_t error = _MapChunk(vmArea, (addr_t)area, adminMemory,
pagesNeededToMap, flags);
if (error != B_OK) {
delete_area(areaID);
mutex_lock(&sLock);
return error;
}
TRACE("MemoryManager::_AllocateArea(): allocated area %p (%" B_PRId32
")\n", area, areaID);
} else {
// no areas yet -- allocate raw memory
area = (Area*)vm_allocate_early(sKernelArgs, SLAB_AREA_SIZE,
SLAB_AREA_SIZE, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, true);
if (area == NULL) {
mutex_lock(&sLock);
return B_NO_MEMORY;
}
TRACE("MemoryManager::_AllocateArea(): allocated early area %p\n",
area);
}
// init the area structure
area->vmArea = vmArea;
area->chunkSize = chunkSize;
area->reserved_memory_for_mapping = pagesNeededToMap * B_PAGE_SIZE;
area->firstUsableChunk = (addr_t)area + ROUNDUP(adminMemory, chunkSize);
area->chunkCount = chunkCount;
area->usedChunkCount = 0;
area->mappedFreeChunks = NULL;
area->unmappedFreeChunks = NULL;
for (uint32 i = 0; i < chunkCount; i++)
_push(area->unmappedFreeChunks, area->chunks + i);
// in the early boot process everything is mapped already
if (sKernelArgs != NULL)
std::swap(area->mappedFreeChunks, area->unmappedFreeChunks);
// add the area to the hash table
WriteLocker writeLocker(sAreaTableLock);
sAreaTable.InsertUnchecked(area);
writeLocker.Unlock();
mutex_lock(&sLock);
_area = area;
return B_OK;
}
/*static*/ void
MemoryManager::_FreeArea(Area* area, uint32 flags)
{
TRACE("MemoryManager::_FreeArea(%p, %#" B_PRIx32 ")\n", area, flags);
ASSERT(area->usedChunkCount == 0);
// remove the area from the hash table
WriteLocker writeLocker(sAreaTableLock);
sAreaTable.RemoveUnchecked(area);
writeLocker.Unlock();
if (area->vmArea == NULL) {
_push(sFreeAreas, area);
return;
}
// TODO: Do we need to handle CACHE_DONT_SLEEP here? Is delete_area()
// problematic?
mutex_unlock(&sLock);
size_t reservedMemory = area->reserved_memory_for_mapping;
// count mapped free chunks
for (Chunk* chunk = area->mappedFreeChunks; chunk != NULL;
chunk = chunk->next) {
reservedMemory += area->chunkSize;
}
delete_area(area->vmArea->id);
vm_unreserve_memory(reservedMemory);
mutex_lock(&sLock);
}
/*static*/ void
MemoryManager::_FreeChunk(AreaPool* areaPool, Area* area, Chunk* chunk,
addr_t chunkAddress, bool alreadyUnmapped, uint32 flags)
{
// unmap the chunk
if (!alreadyUnmapped) {
mutex_unlock(&sLock);
alreadyUnmapped = _UnmapChunk(area->vmArea, chunkAddress,
area->chunkSize, flags) == B_OK;
mutex_lock(&sLock);
}
if (alreadyUnmapped)
_push(area->unmappedFreeChunks, chunk);
else
_push(area->mappedFreeChunks, chunk);
// free the area, if it is unused now
ASSERT(area->usedChunkCount > 0);
if (--area->usedChunkCount == 0) {
areaPool->partialAreas.Remove(area);
_FreeArea(area, flags);
}
}
/*static*/ status_t
MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
size_t reserveAdditionalMemory, uint32 flags)
{
TRACE("MemoryManager::_MapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
")\n", vmArea, address, size);
VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
VMTranslationMap* translationMap = addressSpace->TranslationMap();
// reserve memory for the chunk
size_t reservedMemory = size + reserveAdditionalMemory;
status_t error = vm_try_reserve_memory(size,
(flags & CACHE_DONT_SLEEP) != 0 ? 0 : 1000000);
if (error != B_OK)
return error;
// reserve the pages we need now
size_t reservedPages = size / B_PAGE_SIZE
+ translationMap->MaxPagesNeededToMap(address, address + size - 1);
if ((flags & CACHE_DONT_SLEEP) != 0) {
if (!vm_page_try_reserve_pages(reservedPages)) {
vm_unreserve_memory(reservedMemory);
return B_WOULD_BLOCK;
}
} else
vm_page_reserve_pages(reservedPages);
VMCache* cache = vm_area_get_locked_cache(vmArea);
// map the pages
translationMap->Lock();
addr_t areaOffset = address - vmArea->Base();
addr_t endAreaOffset = areaOffset + size;
for (size_t offset = areaOffset; offset < endAreaOffset;
offset += B_PAGE_SIZE) {
vm_page* page = vm_page_allocate_page(PAGE_STATE_FREE);
cache->InsertPage(page, offset);
vm_page_set_state(page, PAGE_STATE_WIRED);
page->wired_count++;
atomic_add(&gMappedPagesCount, 1);
DEBUG_PAGE_ACCESS_END(page);
translationMap->Map(vmArea->Base() + offset,
page->physical_page_number * B_PAGE_SIZE,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
}
translationMap->Unlock();
cache->ReleaseRefAndUnlock();
vm_page_unreserve_pages(reservedPages);
return B_OK;
}
/*static*/ status_t
MemoryManager::_UnmapChunk(VMArea* vmArea, addr_t address, size_t size,
uint32 flags)
{
if (vmArea == NULL)
return B_ERROR;
TRACE("MemoryManager::_UnmapChunk(%p, %#" B_PRIxADDR ", %#" B_PRIxSIZE
")\n", vmArea, address, size);
VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
VMTranslationMap* translationMap = addressSpace->TranslationMap();
VMCache* cache = vm_area_get_locked_cache(vmArea);
// unmap the pages
translationMap->Lock();
translationMap->Unmap(address, address + size - 1);
atomic_add(&gMappedPagesCount, -(size / B_PAGE_SIZE));
translationMap->Unlock();
// free the pages
addr_t areaPageOffset = (address - vmArea->Base()) / B_PAGE_SIZE;
addr_t areaPageEndOffset = areaPageOffset + size / B_PAGE_SIZE;
VMCachePagesTree::Iterator it = cache->pages.GetIterator(
areaPageOffset, true, true);
while (vm_page* page = it.Next()) {
if (page->cache_offset >= areaPageEndOffset)
break;
DEBUG_PAGE_ACCESS_START(page);
page->wired_count--;
cache->RemovePage(page);
// the iterator is remove-safe
vm_page_free(cache, page);
}
cache->ReleaseRefAndUnlock();
vm_unreserve_memory(size);
return B_OK;
}
/*static*/ bool
MemoryManager::_ConvertEarlyAreas(AreaList& areas)
{
for (AreaList::Iterator it = areas.GetIterator();
Area* area = it.Next();) {
if (area->vmArea != NULL) {
// unmap mapped chunks
while (area->mappedFreeChunks != NULL) {
Chunk* chunk = _pop(area->mappedFreeChunks);
_UnmapChunk(area->vmArea, _ChunkAddress(area, chunk),
area->chunkSize, 0);
_push(area->unmappedFreeChunks, chunk);
}
continue;
}
void* address = area;
area_id areaID = create_area(kSlabAreaName, &address, B_EXACT_ADDRESS,
SLAB_AREA_SIZE, B_ALREADY_WIRED,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (areaID < 0)
panic("out of memory");
area->vmArea = VMAreaHash::Lookup(areaID);
return true;
}
return false;
}
/*static*/ int
MemoryManager::_DumpArea(int argc, char** argv)
{
if (argc != 2) {
print_debugger_command_usage(argv[0]);
return 0;
}
uint64 address;
if (!evaluate_debug_expression(argv[1], &address, false))
return 0;
address = ROUNDDOWN(address, SLAB_AREA_SIZE);
Area* area = (Area*)(addr_t)address;
kprintf("chunk base cache object size cache name\n");
// Get the last chunk in each of the free lists. This allows us to easily
// identify free chunks, since besides these two all other free chunks
// have a Chunk::next pointing to another chunk.
Chunk* lastMappedFree = area->mappedFreeChunks;
if (lastMappedFree != NULL) {
while (lastMappedFree->next != NULL)
lastMappedFree = lastMappedFree->next;
}
Chunk* lastUnmappedFree = area->unmappedFreeChunks;
if (lastUnmappedFree != NULL) {
while (lastUnmappedFree->next != NULL)
lastUnmappedFree = lastUnmappedFree->next;
}
for (uint32 i = 0; i < area->chunkCount; i++) {
Chunk* chunk = area->chunks + i;
if (chunk == lastMappedFree || chunk == lastUnmappedFree)
continue;
if (chunk->next >= area->chunks
&& chunk->next < area->chunks + area->chunkCount) {
continue;
}
ObjectCache* cache = chunk->cache;
kprintf("%5" B_PRIu32 " %p %p %11" B_PRIuSIZE " %s\n", i,
(void*)_ChunkAddress(area, chunk), cache,
cache != NULL ? cache->object_size : 0,
cache != NULL ? cache->name : "");
}
return 0;
}
/*static*/ int
MemoryManager::_DumpAreas(int argc, char** argv)
{
kprintf(" base area chunk size count used mapped free\n");
for (AreaTable::Iterator it = sAreaTable.GetIterator();
Area* area = it.Next();) {
// count the mapped free chunks
int mappedFreeChunks = 0;
for (Chunk* chunk = area->mappedFreeChunks; chunk != NULL;
chunk = chunk->next) {
mappedFreeChunks++;
}
kprintf("%p %p %10" B_PRIuSIZE " %5u %5u %11d\n",
area, area->vmArea, area->chunkSize, area->chunkCount,
area->usedChunkCount, mappedFreeChunks);
}
return 0;
}

View File

@ -0,0 +1,159 @@
/*
* Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
* Distributed under the terms of the MIT License.
*/
#ifndef MEMORY_MANAGER_H
#define MEMORY_MANAGER_H
#include <KernelExport.h>
#include <condition_variable.h>
#include <lock.h>
#include <util/DoublyLinkedList.h>
#include <util/OpenHashTable.h>
struct kernel_args;
struct ObjectCache;
struct VMArea;
#define SLAB_CHUNK_SIZE_SMALL B_PAGE_SIZE
#define SLAB_CHUNK_SIZE_MIDDLE 16 * B_PAGE_SIZE
#define SLAB_CHUNK_SIZE_LARGE 128 * B_PAGE_SIZE
#define SLAB_AREA_SIZE 2048 * B_PAGE_SIZE
// TODO: These sizes have been chosen with 4 KB pages is mind.
class MemoryManager {
public:
static void Init(kernel_args* args);
static void InitPostArea();
static status_t Allocate(ObjectCache* cache, uint32 flags,
void*& _pages);
static void Free(void* pages, uint32 flags);
static size_t AcceptableChunkSize(size_t size);
static ObjectCache* CacheForAddress(void* address);
private:
struct Chunk {
union {
Chunk* next;
ObjectCache* cache;
};
};
struct Area : DoublyLinkedListLinkImpl<Area> {
Area* next;
VMArea* vmArea;
size_t chunkSize;
size_t reserved_memory_for_mapping;
addr_t firstUsableChunk;
uint16 chunkCount;
uint16 usedChunkCount;
Chunk* mappedFreeChunks;
Chunk* unmappedFreeChunks;
Chunk chunks[0];
};
typedef DoublyLinkedList<Area> AreaList;
struct AreaPool {
AreaList partialAreas;
AreaList fullAreas;
size_t chunkSize;
};
struct AreaHashDefinition {
typedef addr_t KeyType;
typedef Area ValueType;
size_t HashKey(addr_t key) const
{
return key / SLAB_AREA_SIZE;
}
size_t Hash(const Area* value) const
{
return HashKey((addr_t)value);
}
bool Compare(addr_t key, const Area* value) const
{
return key == (addr_t)value;
}
Area*& GetLink(Area* value) const
{
return value->next;
}
};
typedef BOpenHashTable<AreaHashDefinition> AreaTable;
struct AllocationEntry {
ConditionVariable condition;
thread_id thread;
};
private:
static AreaPool* _AreaPoolFor(size_t chunkSize);
static status_t _GetPartialArea(AreaPool* areaPool,
uint32 flags, Area*& _area);
static status_t _AllocateArea(size_t chunkSize, uint32 flags,
Area*& _area);
static void _FreeArea(Area* area, uint32 flags);
static void _FreeChunk(AreaPool* areaPool, Area* area,
Chunk* chunk, addr_t chunkAddress,
bool alreadyUnmapped, uint32 flags);
static status_t _MapChunk(VMArea* vmArea, addr_t address,
size_t size, size_t reserveAdditionalMemory,
uint32 flags);
static status_t _UnmapChunk(VMArea* vmArea,addr_t address,
size_t size, uint32 flags);
static bool _ConvertEarlyAreas(AreaList& areas);
static uint32 _ChunkIndexForAddress(const Area* area,
addr_t address);
static addr_t _ChunkAddress(const Area* area,
const Chunk* chunk);
static int _DumpArea(int argc, char** argv);
static int _DumpAreas(int argc, char** argv);
private:
static mutex sLock;
static rw_lock sAreaTableLock;
static kernel_args* sKernelArgs;
static AreaPool sSmallChunkAreas;
static AreaPool sMiddleChunkAreas;
static AreaPool sLargeChunkAreas;
static AreaTable sAreaTable;
static Area* sFreeAreas;
static AllocationEntry* sAllocationEntryCanWait;
static AllocationEntry* sAllocationEntryDontWait;
};
/*static*/ inline uint32
MemoryManager::_ChunkIndexForAddress(const Area* area, addr_t address)
{
return (address - area->firstUsableChunk) / area->chunkSize;
}
/*static*/ inline addr_t
MemoryManager::_ChunkAddress(const Area* area, const Chunk* chunk)
{
return area->firstUsableChunk + (chunk - area->chunks) * area->chunkSize;
}
#endif // MEMORY_MANAGER_H

View File

@ -19,27 +19,15 @@
static const size_t kCacheColorPeriod = 8;
kernel_args* ObjectCache::sKernelArgs = NULL;
static void
object_cache_commit_slab(ObjectCache* cache, slab* slab)
{
void* pages = (void*)ROUNDDOWN((addr_t)slab->pages, B_PAGE_SIZE);
if (create_area(cache->name, &pages, B_EXACT_ADDRESS, cache->slab_size,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA) < 0)
panic("failed to create_area()");
}
static void
object_cache_return_object_wrapper(object_depot* depot, void* cookie,
void* object)
void* object, uint32 flags)
{
ObjectCache* cache = (ObjectCache*)cookie;
MutexLocker _(cache->lock);
cache->ReturnObjectToSlab(cache->ObjectSlab(object), object);
cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
}
@ -106,49 +94,10 @@ ObjectCache::Init(const char* name, size_t objectSize,
this->destructor = destructor;
this->reclaimer = reclaimer;
if (this->flags & CACHE_DURING_BOOT) {
allocate_pages = &ObjectCache::EarlyAllocatePages;
free_pages = &ObjectCache::EarlyFreePages;
} else {
allocate_pages = &ObjectCache::AllocatePages;
free_pages = &ObjectCache::FreePages;
}
return B_OK;
}
void
ObjectCache::InitPostArea()
{
if (allocate_pages != &ObjectCache::EarlyAllocatePages)
return;
SlabList::Iterator it = full.GetIterator();
while (it.HasNext())
object_cache_commit_slab(this, it.Next());
it = partial.GetIterator();
while (it.HasNext())
object_cache_commit_slab(this, it.Next());
it = empty.GetIterator();
while (it.HasNext())
object_cache_commit_slab(this, it.Next());
allocate_pages = &ObjectCache::AllocatePages;
free_pages = &ObjectCache::FreePages;
}
void
ObjectCache::Delete()
{
this->~ObjectCache();
slab_internal_free(this);
}
slab*
ObjectCache::InitSlab(slab* slab, void* pages, size_t byteCount, uint32 flags)
{
@ -186,13 +135,13 @@ ObjectCache::InitSlab(slab* slab, void* pages, size_t byteCount, uint32 flags)
if (status < B_OK) {
if (!failedOnFirst)
UnprepareObject(slab, data);
UnprepareObject(slab, data, flags);
data = ((uint8*)pages) + slab->offset;
for (size_t j = 0; j < i; j++) {
if (destructor)
destructor(cookie, data);
UnprepareObject(slab, data);
UnprepareObject(slab, data, flags);
data += object_size;
}
@ -230,7 +179,7 @@ ObjectCache::UninitSlab(slab* slab)
for (size_t i = 0; i < slab->size; i++) {
if (destructor)
destructor(cookie, data);
UnprepareObject(slab, data);
UnprepareObject(slab, data, flags);
data += object_size;
}
}
@ -244,13 +193,13 @@ ObjectCache::PrepareObject(slab* source, void* object, uint32 flags)
void
ObjectCache::UnprepareObject(slab* source, void* object)
ObjectCache::UnprepareObject(slab* source, void* object, uint32 flags)
{
}
void
ObjectCache::ReturnObjectToSlab(slab* source, void* object)
ObjectCache::ReturnObjectToSlab(slab* source, void* object, uint32 flags)
{
if (source == NULL) {
panic("object_cache: free'd object has no slab");
@ -280,102 +229,10 @@ ObjectCache::ReturnObjectToSlab(slab* source, void* object)
empty_count++;
empty.Add(source);
} else {
ReturnSlab(source);
ReturnSlab(source, flags);
}
} else if (source->count == 1) {
full.Remove(source);
partial.Add(source);
}
}
/*static*/ void
ObjectCache::SetKernelArgs(kernel_args* args)
{
sKernelArgs = args;
}
status_t
ObjectCache::AllocatePages(void** pages, uint32 flags)
{
TRACE_CACHE(cache, "allocate pages (%lu, 0x0%lx)", slab_size, flags);
uint32 lock = B_FULL_LOCK;
if (this->flags & CACHE_UNLOCKED_PAGES)
lock = B_NO_LOCK;
uint32 addressSpec = B_ANY_KERNEL_ADDRESS;
if ((this->flags & CACHE_ALIGN_ON_SIZE) != 0
&& slab_size != B_PAGE_SIZE)
addressSpec = B_ANY_KERNEL_BLOCK_ADDRESS;
Unlock();
// if we are allocating, it is because we need the pages immediatly
// so we lock them. when moving the slab to the empty list we should
// unlock them, and lock them again when getting one from the empty list.
area_id areaId = create_area_etc(VMAddressSpace::KernelID(),
name, pages, addressSpec, slab_size, lock,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0,
(flags & CACHE_DONT_SLEEP) != 0 ? CREATE_AREA_DONT_WAIT : 0);
Lock();
if (areaId < 0)
return areaId;
usage += slab_size;
TRACE_CACHE(this, " ... = { %ld, %p }", areaId, *pages);
return B_OK;
}
void
ObjectCache::FreePages(void* pages)
{
area_id id = area_for(pages);
TRACE_CACHE(this, "delete pages %p (%ld)", pages, id);
if (id < 0) {
panic("object cache: freeing unknown area");
return;
}
delete_area(id);
usage -= slab_size;
}
status_t
ObjectCache::EarlyAllocatePages(void** pages, uint32 flags)
{
TRACE_CACHE(this, "early allocate pages (%lu, 0x0%lx)", slab_size,
flags);
Unlock();
addr_t base = vm_allocate_early(sKernelArgs, slab_size,
slab_size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
Lock();
*pages = (void*)base;
usage += slab_size;
TRACE_CACHE(this, " ... = { %p }", *pages);
return B_OK;
}
void
ObjectCache::EarlyFreePages(void* pages)
{
panic("memory pressure on bootup?");
}

View File

@ -66,10 +66,6 @@ struct ObjectCache : DoublyLinkedListLinkImpl<ObjectCache> {
object_cache_destructor destructor;
object_cache_reclaimer reclaimer;
status_t (ObjectCache::*allocate_pages)(void** pages,
uint32 flags);
void (ObjectCache::*free_pages)(void* pages);
object_depot depot;
public:
@ -81,11 +77,10 @@ public:
object_cache_constructor constructor,
object_cache_destructor destructor,
object_cache_reclaimer reclaimer);
void InitPostArea();
void Delete();
virtual void Delete() = 0;
virtual slab* CreateSlab(uint32 flags) = 0;
virtual void ReturnSlab(slab* slab) = 0;
virtual void ReturnSlab(slab* slab, uint32 flags) = 0;
virtual slab* ObjectSlab(void* object) const = 0;
slab* InitSlab(slab* slab, void* pages,
@ -94,21 +89,19 @@ public:
virtual status_t PrepareObject(slab* source, void* object,
uint32 flags);
virtual void UnprepareObject(slab* source, void* object);
virtual void UnprepareObject(slab* source, void* object,
uint32 flags);
void ReturnObjectToSlab(slab* source, void* object);
void ReturnObjectToSlab(slab* source, void* object,
uint32 flags);
bool Lock() { return mutex_lock(&lock) == B_OK; }
void Unlock() { mutex_unlock(&lock); }
static void SetKernelArgs(kernel_args* args);
status_t AllocatePages(void** pages, uint32 flags);
void FreePages(void* pages);
status_t EarlyAllocatePages(void** pages, uint32 flags);
void EarlyFreePages(void* pages);
private:
static kernel_args* sKernelArgs;
};

View File

@ -93,18 +93,18 @@ alloc_magazine()
static void
free_magazine(DepotMagazine* magazine)
free_magazine(DepotMagazine* magazine, uint32 flags)
{
slab_internal_free(magazine);
slab_internal_free(magazine, flags);
}
static void
empty_magazine(object_depot* depot, DepotMagazine* magazine)
empty_magazine(object_depot* depot, DepotMagazine* magazine, uint32 flags)
{
for (uint16 i = 0; i < magazine->current_round; i++)
depot->return_object(depot, depot->cookie, magazine->rounds[i]);
free_magazine(magazine);
depot->return_object(depot, depot->cookie, magazine->rounds[i], flags);
free_magazine(magazine, flags);
}
@ -166,7 +166,8 @@ object_depot_cpu(object_depot* depot)
status_t
object_depot_init(object_depot* depot, uint32 flags, void* cookie,
void (*return_object)(object_depot* depot, void* cookie, void* object))
void (*return_object)(object_depot* depot, void* cookie, void* object,
uint32 flags))
{
depot->full = NULL;
depot->empty = NULL;
@ -196,11 +197,11 @@ object_depot_init(object_depot* depot, uint32 flags, void* cookie,
void
object_depot_destroy(object_depot* depot)
object_depot_destroy(object_depot* depot, uint32 flags)
{
object_depot_make_empty(depot);
object_depot_make_empty(depot, flags);
slab_internal_free(depot->stores);
slab_internal_free(depot->stores, flags);
rw_lock_destroy(&depot->outer_lock);
}
@ -239,7 +240,7 @@ object_depot_obtain(object_depot* depot)
int
object_depot_store(object_depot* depot, void* object)
object_depot_store(object_depot* depot, void* object, uint32 flags)
{
ReadLocker readLocker(depot->outer_lock);
InterruptsLocker interruptsLocker;
@ -278,7 +279,7 @@ object_depot_store(object_depot* depot, void* object)
void
object_depot_make_empty(object_depot* depot)
object_depot_make_empty(object_depot* depot, uint32 flags)
{
WriteLocker writeLocker(depot->outer_lock);
@ -314,11 +315,11 @@ object_depot_make_empty(object_depot* depot)
// free all magazines
while (storeMagazines != NULL)
empty_magazine(depot, _pop(storeMagazines));
empty_magazine(depot, _pop(storeMagazines), flags);
while (fullMagazines != NULL)
empty_magazine(depot, _pop(fullMagazines));
empty_magazine(depot, _pop(fullMagazines), flags);
while (emptyMagazines)
free_magazine(_pop(emptyMagazines));
free_magazine(_pop(emptyMagazines), flags);
}

View File

@ -29,6 +29,7 @@
#include <vm/VMAddressSpace.h>
#include "HashedObjectCache.h"
#include "MemoryManager.h"
#include "slab_private.h"
#include "SmallObjectCache.h"
@ -56,10 +57,6 @@ typedef DoublyLinkedList<ResizeRequest> ResizeRequestQueue;
static ObjectCacheList sObjectCaches;
static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list");
static uint8* sInitialBegin;
static uint8* sInitialLimit;
static uint8* sInitialPointer;
static mutex sResizeRequestsLock
= MUTEX_INITIALIZER("object cache resize requests");
static ResizeRequestQueue sResizeRequests;
@ -268,30 +265,17 @@ dump_cache_info(int argc, char* argv[])
void*
slab_internal_alloc(size_t size, uint32 flags)
{
if (flags & CACHE_DURING_BOOT) {
if ((sInitialPointer + size) > sInitialLimit) {
panic("slab_internal_alloc: ran out of initial space");
return NULL;
}
uint8* buffer = sInitialPointer;
sInitialPointer += size;
return buffer;
}
if (flags & CACHE_DURING_BOOT)
return block_alloc_early(size);
return block_alloc(size, flags);
}
void
slab_internal_free(void* _buffer)
slab_internal_free(void* buffer, uint32 flags)
{
uint8* buffer = (uint8*)_buffer;
if (buffer >= sInitialBegin && buffer < sInitialLimit)
return;
block_free(buffer);
block_free(buffer, flags);
}
@ -428,7 +412,7 @@ object_cache_low_memory(void* _self, uint32 resources, int32 level)
minimumAllowed);
while (cache->empty_count > minimumAllowed) {
cache->ReturnSlab(cache->empty.RemoveHead());
cache->ReturnSlab(cache->empty.RemoveHead(), 0);
cache->empty_count--;
}
}
@ -530,7 +514,7 @@ delete_object_cache(object_cache* cache)
}
if (!(cache->flags & CACHE_NO_DEPOT))
object_depot_destroy(&cache->depot);
object_depot_destroy(&cache->depot, 0);
mutex_lock(&cache->lock);
@ -543,7 +527,7 @@ delete_object_cache(object_cache* cache)
panic("cache destroy: still has partial slabs");
while (!cache->empty.IsEmpty())
cache->ReturnSlab(cache->empty.RemoveHead());
cache->ReturnSlab(cache->empty.RemoveHead(), 0);
mutex_destroy(&cache->lock);
cache->Delete();
@ -637,7 +621,7 @@ object_cache_alloc(object_cache* cache, uint32 flags)
void
object_cache_free(object_cache* cache, void* object)
object_cache_free(object_cache* cache, void* object, uint32 flags)
{
if (object == NULL)
return;
@ -645,12 +629,12 @@ object_cache_free(object_cache* cache, void* object)
T(Free(cache, object));
if (!(cache->flags & CACHE_NO_DEPOT)) {
if (object_depot_store(&cache->depot, object))
if (object_depot_store(&cache->depot, object, flags))
return;
}
MutexLocker _(cache->lock);
cache->ReturnObjectToSlab(cache->ObjectSlab(object), object);
cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
}
@ -680,15 +664,11 @@ slab_init(kernel_args* args, addr_t initialBase, size_t initialSize)
{
dprintf("slab: init base %p + 0x%lx\n", (void*)initialBase, initialSize);
sInitialBegin = (uint8*)initialBase;
sInitialLimit = sInitialBegin + initialSize;
sInitialPointer = sInitialBegin;
ObjectCache::SetKernelArgs(args);
MemoryManager::Init(args);
new (&sObjectCaches) ObjectCacheList();
block_allocator_init_boot();
block_allocator_init_boot(initialBase, initialSize);
add_debugger_command("slabs", dump_slabs, "list all object caches");
add_debugger_command("cache_info", dump_cache_info,
@ -696,14 +676,19 @@ slab_init(kernel_args* args, addr_t initialBase, size_t initialSize)
}
void
slab_init_post_area()
{
MemoryManager::InitPostArea();
}
void
slab_init_post_sem()
{
ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
while (it.HasNext()) {
ObjectCache* cache = it.Next();
cache->InitPostArea();
register_low_resource_handler(object_cache_low_memory, cache,
B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY
| B_KERNEL_RESOURCE_ADDRESS_SPACE, 5);

View File

@ -8,6 +8,7 @@
#include "SmallObjectCache.h"
#include "MemoryManager.h"
#include "slab_private.h"
@ -31,14 +32,24 @@ SmallObjectCache::Create(const char* name, size_t object_size,
}
if ((flags & CACHE_LARGE_SLAB) != 0)
cache->slab_size = max_c(16 * B_PAGE_SIZE, 1024 * object_size);
cache->slab_size = 1024 * object_size;
else
cache->slab_size = B_PAGE_SIZE;
cache->slab_size = SLAB_CHUNK_SIZE_SMALL;
cache->slab_size = MemoryManager::AcceptableChunkSize(cache->slab_size);
return cache;
}
void
SmallObjectCache::Delete()
{
this->~SmallObjectCache();
slab_internal_free(this, 0);
}
slab*
SmallObjectCache::CreateSlab(uint32 flags)
{
@ -46,7 +57,8 @@ SmallObjectCache::CreateSlab(uint32 flags)
return NULL;
void* pages;
if ((this->*allocate_pages)(&pages, flags) != B_OK)
if (MemoryManager::Allocate(this, flags, pages) != B_OK)
return NULL;
return InitSlab(slab_in_pages(pages, slab_size), pages,
@ -55,10 +67,10 @@ SmallObjectCache::CreateSlab(uint32 flags)
void
SmallObjectCache::ReturnSlab(slab* slab)
SmallObjectCache::ReturnSlab(slab* slab, uint32 flags)
{
UninitSlab(slab);
(this->*free_pages)(slab->pages);
MemoryManager::Free(slab->pages, flags);
}

View File

@ -18,9 +18,10 @@ struct SmallObjectCache : ObjectCache {
object_cache_constructor constructor,
object_cache_destructor destructor,
object_cache_reclaimer reclaimer);
virtual void Delete();
virtual slab* CreateSlab(uint32 flags);
virtual void ReturnSlab(slab* slab);
virtual void ReturnSlab(slab* slab, uint32 flags);
virtual slab* ObjectSlab(void* object) const;
};

View File

@ -1,4 +1,5 @@
/*
* Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
* Copyright 2007, Hugo Santos. All Rights Reserved.
* Distributed under the terms of the MIT License.
*
@ -17,6 +18,9 @@
#include <vm/vm.h>
#include <vm/VMAddressSpace.h>
#include "ObjectCache.h"
#include "MemoryManager.h"
#define DEBUG_ALLOCATOR
//#define TEST_ALL_CACHES_DURING_BOOT
@ -32,23 +36,11 @@ static const size_t kBlockSizes[] = {
static const size_t kNumBlockSizes = sizeof(kBlockSizes) / sizeof(size_t) - 1;
static object_cache *sBlockCaches[kNumBlockSizes];
static int32 sBlockCacheWaste[kNumBlockSizes];
static object_cache* sBlockCaches[kNumBlockSizes];
struct boundary_tag {
uint32 size;
#ifdef DEBUG_ALLOCATOR
uint32 magic;
#endif
};
struct area_boundary_tag {
area_id area;
boundary_tag tag;
};
static const uint32 kBoundaryMagic = 0x6da78d13;
static addr_t sBootStrapMemory;
static size_t sBootStrapMemorySize;
static size_t sUsedBootStrapMemory;
static int
@ -75,113 +67,90 @@ size_to_index(size_t size)
}
void *
void*
block_alloc(size_t size, uint32 flags)
{
int index = size_to_index(size + sizeof(boundary_tag));
// allocate from the respective object cache, if any
int index = size_to_index(size);
if (index >= 0)
return object_cache_alloc(sBlockCaches[index], flags);
void *block;
boundary_tag *tag;
// the allocation is too large for our object caches -- create an area
void* block;
area_id area = create_area_etc(VMAddressSpace::KernelID(),
"alloc'ed block", &block, B_ANY_KERNEL_ADDRESS,
ROUNDUP(size, B_PAGE_SIZE), B_FULL_LOCK,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0,
(flags & CACHE_DONT_SLEEP) != 0 ? CREATE_AREA_DONT_WAIT : 0);
if (area < 0)
return NULL;
if (index < 0) {
void *pages;
area_id area = create_area_etc(VMAddressSpace::KernelID(),
"alloc'ed block", &pages, B_ANY_KERNEL_ADDRESS,
ROUNDUP(size + sizeof(area_boundary_tag), B_PAGE_SIZE), B_FULL_LOCK,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0,
(flags & CACHE_DONT_SLEEP) != 0 ? CREATE_AREA_DONT_WAIT : 0);
if (area < 0)
return NULL;
return block;
}
area_boundary_tag *areaTag = (area_boundary_tag *)pages;
areaTag->area = area;
tag = &areaTag->tag;
block = areaTag + 1;
} else {
tag = (boundary_tag *)object_cache_alloc(sBlockCaches[index], flags);
if (tag == NULL)
return NULL;
atomic_add(&sBlockCacheWaste[index], kBlockSizes[index] - size
- sizeof(boundary_tag));
block = tag + 1;
}
tag->size = size;
void*
block_alloc_early(size_t size)
{
int index = size_to_index(size);
if (index < 0)
return NULL;
#ifdef DEBUG_ALLOCATOR
tag->magic = kBoundaryMagic;
#endif
if (sBlockCaches[index] != NULL)
return object_cache_alloc(sBlockCaches[index], CACHE_DURING_BOOT);
// No object cache yet. Use the bootstrap memory. This allocation must
// never be freed!
size_t neededSize = ROUNDUP(size, sizeof(double));
if (sUsedBootStrapMemory + neededSize > sBootStrapMemorySize)
return NULL;
void* block = (void*)(sBootStrapMemory + sUsedBootStrapMemory);
sUsedBootStrapMemory += neededSize;
return block;
}
void
block_free(void *block)
block_free(void* block, uint32 flags)
{
boundary_tag *tag = (boundary_tag *)(((uint8 *)block)
- sizeof(boundary_tag));
#ifdef DEBUG_ALLOCATOR
if (tag->magic != kBoundaryMagic)
panic("allocator: boundary tag magic doesn't match this universe");
#endif
int index = size_to_index(tag->size + sizeof(boundary_tag));
if (index < 0) {
area_boundary_tag *areaTag = (area_boundary_tag *)(((uint8 *)block)
- sizeof(area_boundary_tag));
delete_area(areaTag->area);
if (ObjectCache* cache = MemoryManager::CacheForAddress(block)) {
// a regular small allocation
ASSERT(cache->object_size >= kBlockSizes[0]);
ASSERT(cache->object_size <= kBlockSizes[kNumBlockSizes - 1]);
ASSERT(cache == sBlockCaches[size_to_index(cache->object_size)]);
object_cache_free(cache, block, flags);
} else {
atomic_add(&sBlockCacheWaste[index], -(kBlockSizes[index] - tag->size
- sizeof(boundary_tag)));
object_cache_free(sBlockCaches[index], tag);
// a large allocation -- look up the area
VMAddressSpace* addressSpace = VMAddressSpace::Kernel();
addressSpace->ReadLock();
VMArea* area = addressSpace->LookupArea((addr_t)block);
addressSpace->ReadUnlock();
if (area != NULL && (addr_t)block == area->Base())
delete_area(area->id);
else
panic("freeing unknown block %p from area %p", block, area);
}
}
static void
block_create_cache(size_t index, bool boot)
{
char name[32];
snprintf(name, sizeof(name), "block cache: %lu", kBlockSizes[index]);
sBlockCaches[index] = create_object_cache_etc(name, kBlockSizes[index],
0, 0, boot ? CACHE_DURING_BOOT : 0, NULL, NULL, NULL, NULL);
if (sBlockCaches[index] == NULL)
panic("allocator: failed to init block cache");
sBlockCacheWaste[index] = 0;
}
static int
show_waste(int argc, char *argv[])
{
size_t total = 0;
for (size_t index = 0; index < kNumBlockSizes; index++) {
if (sBlockCacheWaste[index] > 0) {
kprintf("%lu: %lu\n", kBlockSizes[index], sBlockCacheWaste[index]);
total += sBlockCacheWaste[index];
}
}
kprintf("total waste: %lu\n", total);
return 0;
}
void
block_allocator_init_boot()
block_allocator_init_boot(addr_t bootStrapBase, size_t bootStrapSize)
{
for (int index = 0; kBlockSizes[index] != 0; index++)
block_create_cache(index, true);
sBootStrapMemory = bootStrapBase;
sBootStrapMemorySize = bootStrapSize;
sUsedBootStrapMemory = 0;
add_debugger_command("show_waste", show_waste,
"show cache allocator's memory waste");
for (int index = 0; kBlockSizes[index] != 0; index++) {
char name[32];
snprintf(name, sizeof(name), "block cache: %lu", kBlockSizes[index]);
sBlockCaches[index] = create_object_cache_etc(name, kBlockSizes[index],
0, 0, CACHE_DURING_BOOT, NULL, NULL, NULL, NULL);
if (sBlockCaches[index] == NULL)
panic("allocator: failed to init block cache");
}
}
@ -194,4 +163,3 @@ block_allocator_init_rest()
}
#endif
}

View File

@ -27,11 +27,13 @@
struct ObjectCache;
void* slab_internal_alloc(size_t size, uint32 flags);
void slab_internal_free(void *_buffer);
void slab_internal_free(void *_buffer, uint32 flags);
void* block_alloc(size_t size, uint32 flags);
void block_free(void *block);
void block_allocator_init_boot();
void* block_alloc_early(size_t size);
void block_free(void *block, uint32 flags);
void block_allocator_init_boot(addr_t bootStrapBase,
size_t bootStrapSize);
void block_allocator_init_rest();

View File

@ -831,7 +831,7 @@ VMAnonymousCache::_SwapBlockFree(off_t startPageIndex, uint32 count)
swap->used -= j;
if (swap->used == 0) {
sSwapHashTable.RemoveUnchecked(swap);
object_cache_free(sSwapBlockCache, swap);
object_cache_free(sSwapBlockCache, swap, CACHE_DONT_SLEEP);
}
}
}
@ -1041,7 +1041,8 @@ VMAnonymousCache::_MergeSwapPages(VMAnonymousCache* source)
if (sourceSwapBlock->used == 0) {
// All swap pages have been freed -- we can discard the source swap
// block.
object_cache_free(sSwapBlockCache, sourceSwapBlock);
object_cache_free(sSwapBlockCache, sourceSwapBlock,
CACHE_DONT_SLEEP);
} else if (swapBlock == NULL) {
// We need to take over some of the source's swap pages and there's
// no swap block in the consumer cache. Just take over the source
@ -1059,7 +1060,8 @@ VMAnonymousCache::_MergeSwapPages(VMAnonymousCache* source)
swapBlock->swap_slots[i] = sourceSwapBlock->swap_slots[i];
}
object_cache_free(sSwapBlockCache, sourceSwapBlock);
object_cache_free(sSwapBlockCache, sourceSwapBlock,
CACHE_DONT_SLEEP);
}
}
}

View File

@ -37,6 +37,7 @@
#include <int.h>
#include <lock.h>
#include <low_resource_manager.h>
#include <slab/Slab.h>
#include <smp.h>
#include <system_info.h>
#include <thread.h>
@ -221,6 +222,8 @@ private:
};
ObjectCache* gPageMappingsObjectCache;
static rw_lock sAreaCacheLock = RW_LOCK_INITIALIZER("area->cache");
static off_t sAvailableMemory;
@ -468,8 +471,8 @@ map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection)
if (area->wiring == B_NO_LOCK) {
DEBUG_PAGE_ACCESS_CHECK(page);
vm_page_mapping* mapping
= (vm_page_mapping*)malloc_nogrow(sizeof(vm_page_mapping));
vm_page_mapping* mapping = (vm_page_mapping*)object_cache_alloc(
gPageMappingsObjectCache, CACHE_DONT_SLEEP);
if (mapping == NULL)
return B_NO_MEMORY;
@ -3067,50 +3070,55 @@ reserve_boot_loader_ranges(kernel_args* args)
static addr_t
allocate_early_virtual(kernel_args* args, size_t size)
allocate_early_virtual(kernel_args* args, size_t size, bool blockAlign)
{
addr_t spot = 0;
uint32 i;
int last_valloc_entry = 0;
size = PAGE_ALIGN(size);
// find a slot in the virtual allocation addr range
for (i = 1; i < args->num_virtual_allocated_ranges; i++) {
for (uint32 i = 1; i < args->num_virtual_allocated_ranges; i++) {
// check to see if the space between this one and the last is big enough
addr_t rangeStart = args->virtual_allocated_range[i].start;
addr_t previousRangeEnd = args->virtual_allocated_range[i - 1].start
+ args->virtual_allocated_range[i - 1].size;
last_valloc_entry = i;
// check to see if the space between this one and the last is big enough
if (previousRangeEnd >= KERNEL_BASE
&& args->virtual_allocated_range[i].start
- previousRangeEnd >= size) {
spot = previousRangeEnd;
args->virtual_allocated_range[i - 1].size += size;
goto out;
}
}
if (spot == 0) {
// we hadn't found one between allocation ranges. this is ok.
// see if there's a gap after the last one
addr_t lastRangeEnd
= args->virtual_allocated_range[last_valloc_entry].start
+ args->virtual_allocated_range[last_valloc_entry].size;
if (KERNEL_BASE + (KERNEL_SIZE - 1) - lastRangeEnd >= size) {
spot = lastRangeEnd;
args->virtual_allocated_range[last_valloc_entry].size += size;
goto out;
}
// see if there's a gap before the first one
if (args->virtual_allocated_range[0].start > KERNEL_BASE) {
if (args->virtual_allocated_range[0].start - KERNEL_BASE >= size) {
args->virtual_allocated_range[0].start -= size;
spot = args->virtual_allocated_range[0].start;
goto out;
}
addr_t base = blockAlign
? ROUNDUP(previousRangeEnd, size) : previousRangeEnd;
if (base >= KERNEL_BASE && base < rangeStart
&& rangeStart - base >= size) {
args->virtual_allocated_range[i - 1].size
+= base + size - previousRangeEnd;
return base;
}
}
out:
return spot;
// we hadn't found one between allocation ranges. this is ok.
// see if there's a gap after the last one
int lastEntryIndex = args->num_virtual_allocated_ranges - 1;
addr_t lastRangeEnd = args->virtual_allocated_range[lastEntryIndex].start
+ args->virtual_allocated_range[lastEntryIndex].size;
addr_t base = blockAlign ? ROUNDUP(lastRangeEnd, size) : lastRangeEnd;
if (KERNEL_BASE + (KERNEL_SIZE - 1) - base >= size) {
args->virtual_allocated_range[lastEntryIndex].size
+= base + size - lastRangeEnd;
return base;
}
// see if there's a gap before the first one
addr_t rangeStart = args->virtual_allocated_range[0].start;
if (rangeStart > KERNEL_BASE && rangeStart - KERNEL_BASE >= size) {
base = rangeStart - size;
if (blockAlign)
base = ROUNDDOWN(base, size);
if (base >= KERNEL_BASE) {
args->virtual_allocated_range[0].start = base;
args->virtual_allocated_range[0].size += rangeStart - base;
return base;
}
}
return 0;
}
@ -3162,13 +3170,13 @@ allocate_early_physical_page(kernel_args* args)
*/
addr_t
vm_allocate_early(kernel_args* args, size_t virtualSize, size_t physicalSize,
uint32 attributes)
uint32 attributes, bool blockAlign)
{
if (physicalSize > virtualSize)
physicalSize = virtualSize;
// find the vaddr to allocate at
addr_t virtualBase = allocate_early_virtual(args, virtualSize);
addr_t virtualBase = allocate_early_virtual(args, virtualSize, blockAlign);
//dprintf("vm_allocate_early: vaddr 0x%lx\n", virtualAddress);
// map the pages
@ -3214,13 +3222,13 @@ vm_init(kernel_args* args)
// map in the new heap and initialize it
addr_t heapBase = vm_allocate_early(args, heapSize, heapSize,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
TRACE(("heap at 0x%lx\n", heapBase));
heap_init(heapBase, heapSize);
size_t slabInitialSize = args->num_cpus * 3 * B_PAGE_SIZE;
size_t slabInitialSize = B_PAGE_SIZE;
addr_t slabInitialBase = vm_allocate_early(args, slabInitialSize,
slabInitialSize, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
slabInitialSize, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
slab_init(args, slabInitialBase, slabInitialSize);
// initialize the free page list and physical page mapper
@ -3243,6 +3251,7 @@ vm_init(kernel_args* args)
arch_vm_translation_map_init_post_area(args);
arch_vm_init_post_area(args);
vm_page_init_post_area(args);
slab_init_post_area();
// allocate areas to represent stuff that already exists
@ -3260,9 +3269,8 @@ vm_init(kernel_args* args)
create_preloaded_image_areas(&args->kernel_image);
// allocate areas for preloaded images
for (image = args->preloaded_images; image != NULL; image = image->next) {
for (image = args->preloaded_images; image != NULL; image = image->next)
create_preloaded_image_areas(image);
}
// allocate kernel stacks
for (i = 0; i < args->num_cpus; i++) {
@ -3277,6 +3285,14 @@ vm_init(kernel_args* args)
void* lastPage = (void*)ROUNDDOWN(~(addr_t)0, B_PAGE_SIZE);
vm_block_address_range("overflow protection", lastPage, B_PAGE_SIZE);
// create the object cache for the page mappings
gPageMappingsObjectCache = create_object_cache_etc("page mappings",
sizeof(vm_page_mapping), 0, 0, 0, NULL, NULL, NULL, NULL);
if (gPageMappingsObjectCache == NULL)
panic("failed to create page mappings object cache");
object_cache_set_minimum_reserve(gPageMappingsObjectCache, 1024);
#if DEBUG_CACHE_LIST
create_area("cache info table", (void**)&sCacheInfoTable,
B_ANY_KERNEL_ADDRESS,

View File

@ -1878,7 +1878,7 @@ vm_page_init(kernel_args *args)
// map in the new free page table
sPages = (vm_page *)vm_allocate_early(args, sNumPages * sizeof(vm_page),
~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
TRACE(("vm_init: putting free_page_table @ %p, # ents %ld (size 0x%x)\n",
sPages, sNumPages, (unsigned int)(sNumPages * sizeof(vm_page))));

View File

@ -80,7 +80,7 @@ object_cache_alloc(object_cache *cache, uint32 flags)
void
object_cache_free(object_cache *cache, void *object)
object_cache_free(object_cache *cache, void *object, uint32 flags)
{
if (object != NULL) {
if (cache != NULL && cache->objectDestructor != NULL)