* Split the slab allocator code into separate source files and C++-ified
things a bit. * Some style cleanup. * The object depot does now have a cookie that will be passed to the return hook. * Fixed object_cache_return_object_wrapper() using the new cookie. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35174 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
6ff00ae7e5
commit
825566f82f
@ -10,13 +10,19 @@
|
||||
#include <KernelExport.h>
|
||||
|
||||
|
||||
typedef struct object_depot {
|
||||
recursive_lock lock;
|
||||
struct depot_magazine *full, *empty;
|
||||
size_t full_count, empty_count;
|
||||
struct depot_cpu_store *stores;
|
||||
struct DepotMagazine;
|
||||
|
||||
void (*return_object)(struct object_depot *depot, void *object);
|
||||
typedef struct object_depot {
|
||||
recursive_lock lock;
|
||||
DepotMagazine* full;
|
||||
DepotMagazine* empty;
|
||||
size_t full_count;
|
||||
size_t empty_count;
|
||||
struct depot_cpu_store* stores;
|
||||
|
||||
void* cookie;
|
||||
void (*return_object)(struct object_depot* depot, void* cookie,
|
||||
void* object);
|
||||
} object_depot;
|
||||
|
||||
|
||||
@ -24,14 +30,14 @@ typedef struct object_depot {
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
status_t object_depot_init(object_depot *depot, uint32 flags,
|
||||
void (*returnObject)(object_depot *, void *));
|
||||
void object_depot_destroy(object_depot *depot);
|
||||
status_t object_depot_init(object_depot* depot, uint32 flags, void *cookie,
|
||||
void (*returnObject)(object_depot* depot, void* cookie, void* object));
|
||||
void object_depot_destroy(object_depot* depot);
|
||||
|
||||
void *object_depot_obtain(object_depot *depot);
|
||||
int object_depot_store(object_depot *depot, void *object);
|
||||
void* object_depot_obtain(object_depot* depot);
|
||||
int object_depot_store(object_depot* depot, void* object);
|
||||
|
||||
void object_depot_make_empty(object_depot *depot);
|
||||
void object_depot_make_empty(object_depot* depot);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -26,37 +26,38 @@ enum {
|
||||
CACHE_DURING_BOOT = 1 << 31
|
||||
};
|
||||
|
||||
typedef struct object_cache object_cache;
|
||||
struct ObjectCache;
|
||||
typedef struct ObjectCache object_cache;
|
||||
|
||||
typedef status_t (*object_cache_constructor)(void *cookie, void *object);
|
||||
typedef void (*object_cache_destructor)(void *cookie, void *object);
|
||||
typedef void (*object_cache_reclaimer)(void *cookie, int32 level);
|
||||
typedef status_t (*object_cache_constructor)(void* cookie, void* object);
|
||||
typedef void (*object_cache_destructor)(void* cookie, void* object);
|
||||
typedef void (*object_cache_reclaimer)(void* cookie, int32 level);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
object_cache *create_object_cache(const char *name, size_t object_size,
|
||||
size_t alignment, void *cookie, object_cache_constructor constructor,
|
||||
object_cache* create_object_cache(const char* name, size_t object_size,
|
||||
size_t alignment, void* cookie, object_cache_constructor constructor,
|
||||
object_cache_destructor);
|
||||
object_cache *create_object_cache_etc(const char *name, size_t object_size,
|
||||
size_t alignment, size_t max_byte_usage, uint32 flags, void *cookie,
|
||||
object_cache* create_object_cache_etc(const char* name, size_t object_size,
|
||||
size_t alignment, size_t max_byte_usage, uint32 flags, void* cookie,
|
||||
object_cache_constructor constructor, object_cache_destructor destructor,
|
||||
object_cache_reclaimer reclaimer);
|
||||
|
||||
void delete_object_cache(object_cache *cache);
|
||||
void delete_object_cache(object_cache* cache);
|
||||
|
||||
status_t object_cache_set_minimum_reserve(object_cache *cache,
|
||||
status_t object_cache_set_minimum_reserve(object_cache* cache,
|
||||
size_t objectCount);
|
||||
|
||||
void *object_cache_alloc(object_cache *cache, uint32 flags);
|
||||
void object_cache_free(object_cache *cache, void *object);
|
||||
void* object_cache_alloc(object_cache* cache, uint32 flags);
|
||||
void object_cache_free(object_cache* cache, void* object);
|
||||
|
||||
status_t object_cache_reserve(object_cache *cache, size_t object_count,
|
||||
status_t object_cache_reserve(object_cache* cache, size_t object_count,
|
||||
uint32 flags);
|
||||
|
||||
void object_cache_get_usage(object_cache *cache, size_t *_allocatedMemory);
|
||||
void object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
176
src/system/kernel/slab/HashedObjectCache.cpp
Normal file
176
src/system/kernel/slab/HashedObjectCache.cpp
Normal file
@ -0,0 +1,176 @@
|
||||
/*
|
||||
* Copyright 2008, Axel Dörfler. All Rights Reserved.
|
||||
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
||||
*
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include "HashedObjectCache.h"
|
||||
|
||||
#include "slab_private.h"
|
||||
|
||||
|
||||
static inline int
|
||||
__fls0(size_t value)
|
||||
{
|
||||
if (value == 0)
|
||||
return -1;
|
||||
|
||||
int bit;
|
||||
for (bit = 0; value != 1; bit++)
|
||||
value >>= 1;
|
||||
return bit;
|
||||
}
|
||||
|
||||
|
||||
static slab*
|
||||
allocate_slab(uint32 flags)
|
||||
{
|
||||
return (slab*)slab_internal_alloc(sizeof(slab), flags);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
free_slab(slab* slab)
|
||||
{
|
||||
slab_internal_free(slab);
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
HashedObjectCache::HashedObjectCache()
|
||||
:
|
||||
hash_table(this)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
/*static*/ HashedObjectCache*
|
||||
HashedObjectCache::Create(const char* name, size_t object_size,
|
||||
size_t alignment, size_t maximum, uint32 flags, void* cookie,
|
||||
object_cache_constructor constructor, object_cache_destructor destructor,
|
||||
object_cache_reclaimer reclaimer)
|
||||
{
|
||||
void* buffer = slab_internal_alloc(sizeof(HashedObjectCache), flags);
|
||||
if (buffer == NULL)
|
||||
return NULL;
|
||||
|
||||
HashedObjectCache* cache = new(buffer) HashedObjectCache();
|
||||
|
||||
if (cache->Init(name, object_size, alignment, maximum, flags, cookie,
|
||||
constructor, destructor, reclaimer) != B_OK) {
|
||||
cache->Delete();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if ((flags & CACHE_LARGE_SLAB) != 0)
|
||||
cache->slab_size = max_c(256 * B_PAGE_SIZE, 128 * object_size);
|
||||
else
|
||||
cache->slab_size = max_c(16 * B_PAGE_SIZE, 8 * object_size);
|
||||
cache->lower_boundary = __fls0(cache->object_size);
|
||||
|
||||
return cache;
|
||||
}
|
||||
|
||||
|
||||
slab*
|
||||
HashedObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating)
|
||||
{
|
||||
if (!check_cache_quota(this))
|
||||
return NULL;
|
||||
|
||||
if (unlockWhileAllocating)
|
||||
Unlock();
|
||||
|
||||
slab* slab = allocate_slab(flags);
|
||||
|
||||
if (unlockWhileAllocating)
|
||||
Lock();
|
||||
|
||||
if (slab == NULL)
|
||||
return NULL;
|
||||
|
||||
void* pages;
|
||||
if ((this->*allocate_pages)(&pages, flags, unlockWhileAllocating) == B_OK) {
|
||||
if (InitSlab(slab, pages, slab_size))
|
||||
return slab;
|
||||
|
||||
(this->*free_pages)(pages);
|
||||
}
|
||||
|
||||
free_slab(slab);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
HashedObjectCache::ReturnSlab(slab* slab)
|
||||
{
|
||||
UninitSlab(slab);
|
||||
(this->*free_pages)(slab->pages);
|
||||
}
|
||||
|
||||
|
||||
slab*
|
||||
HashedObjectCache::ObjectSlab(void* object) const
|
||||
{
|
||||
Link* link = hash_table.Lookup(object);
|
||||
if (link == NULL) {
|
||||
panic("object cache: requested object %p missing from hash table",
|
||||
object);
|
||||
return NULL;
|
||||
}
|
||||
return link->parent;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
HashedObjectCache::PrepareObject(slab* source, void* object)
|
||||
{
|
||||
Link* link = _AllocateLink(CACHE_DONT_SLEEP);
|
||||
if (link == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
link->buffer = object;
|
||||
link->parent = source;
|
||||
|
||||
hash_table.Insert(link);
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
HashedObjectCache::UnprepareObject(slab* source, void* object)
|
||||
{
|
||||
Link* link = hash_table.Lookup(object);
|
||||
if (link == NULL) {
|
||||
panic("object cache: requested object missing from hash table");
|
||||
return;
|
||||
}
|
||||
|
||||
if (link->parent != source) {
|
||||
panic("object cache: slab mismatch");
|
||||
return;
|
||||
}
|
||||
|
||||
hash_table.Remove(link);
|
||||
_FreeLink(link);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline HashedObjectCache::Link*
|
||||
HashedObjectCache::_AllocateLink(uint32 flags)
|
||||
{
|
||||
return (HashedObjectCache::Link*)
|
||||
slab_internal_alloc(sizeof(HashedObjectCache::Link), flags);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline void
|
||||
HashedObjectCache::_FreeLink(HashedObjectCache::Link* link)
|
||||
{
|
||||
slab_internal_free(link);
|
||||
}
|
95
src/system/kernel/slab/HashedObjectCache.h
Normal file
95
src/system/kernel/slab/HashedObjectCache.h
Normal file
@ -0,0 +1,95 @@
|
||||
/*
|
||||
* Copyright 2008, Axel Dörfler. All Rights Reserved.
|
||||
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
||||
*
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef HASHED_OBJECT_CACHE_H
|
||||
#define HASHED_OBJECT_CACHE_H
|
||||
|
||||
|
||||
#include <util/OpenHashTable.h>
|
||||
|
||||
#include "ObjectCache.h"
|
||||
|
||||
|
||||
struct HashedObjectCache : ObjectCache {
|
||||
HashedObjectCache();
|
||||
|
||||
static HashedObjectCache* Create(const char* name, size_t object_size,
|
||||
size_t alignment, size_t maximum,
|
||||
uint32 flags, void* cookie,
|
||||
object_cache_constructor constructor,
|
||||
object_cache_destructor destructor,
|
||||
object_cache_reclaimer reclaimer);
|
||||
|
||||
virtual slab* CreateSlab(uint32 flags,
|
||||
bool unlockWhileAllocating);
|
||||
virtual void ReturnSlab(slab* slab);
|
||||
virtual slab* ObjectSlab(void* object) const;
|
||||
|
||||
virtual status_t PrepareObject(slab* source, void* object);
|
||||
virtual void UnprepareObject(slab* source, void* object);
|
||||
|
||||
private:
|
||||
struct Link {
|
||||
const void* buffer;
|
||||
slab* parent;
|
||||
Link* next;
|
||||
};
|
||||
|
||||
struct Definition {
|
||||
typedef HashedObjectCache ParentType;
|
||||
typedef const void* KeyType;
|
||||
typedef Link ValueType;
|
||||
|
||||
Definition(HashedObjectCache* parent)
|
||||
:
|
||||
parent(parent)
|
||||
{
|
||||
}
|
||||
|
||||
Definition(const Definition& definition)
|
||||
:
|
||||
parent(definition.parent)
|
||||
{
|
||||
}
|
||||
|
||||
size_t HashKey(const void* key) const
|
||||
{
|
||||
return (((const uint8*)key) - ((const uint8*)0))
|
||||
>> parent->lower_boundary;
|
||||
}
|
||||
|
||||
size_t Hash(Link* value) const
|
||||
{
|
||||
return HashKey(value->buffer);
|
||||
}
|
||||
|
||||
bool Compare(const void* key, Link* value) const
|
||||
{
|
||||
return value->buffer == key;
|
||||
}
|
||||
|
||||
Link*& GetLink(Link* value) const
|
||||
{
|
||||
return value->next;
|
||||
}
|
||||
|
||||
HashedObjectCache* parent;
|
||||
};
|
||||
|
||||
typedef BOpenHashTable<Definition> HashTable;
|
||||
|
||||
private:
|
||||
static Link* _AllocateLink(uint32 flags);
|
||||
static void _FreeLink(HashedObjectCache::Link* link);
|
||||
|
||||
private:
|
||||
HashTable hash_table;
|
||||
size_t lower_boundary;
|
||||
};
|
||||
|
||||
|
||||
|
||||
#endif // HASHED_OBJECT_CACHE_H
|
@ -3,8 +3,11 @@ SubDir HAIKU_TOP src system kernel slab ;
|
||||
|
||||
KernelMergeObject kernel_slab.o :
|
||||
allocator.cpp
|
||||
HashedObjectCache.cpp
|
||||
ObjectCache.cpp
|
||||
ObjectDepot.cpp
|
||||
Slab.cpp
|
||||
SmallObjectCache.cpp
|
||||
|
||||
: $(TARGET_KERNEL_PIC_CCFLAGS)
|
||||
;
|
||||
|
381
src/system/kernel/slab/ObjectCache.cpp
Normal file
381
src/system/kernel/slab/ObjectCache.cpp
Normal file
@ -0,0 +1,381 @@
|
||||
/*
|
||||
* Copyright 2008, Axel Dörfler. All Rights Reserved.
|
||||
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
||||
*
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include "ObjectCache.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "slab_private.h"
|
||||
#include <vm/vm.h>
|
||||
#include <vm/VMAddressSpace.h>
|
||||
|
||||
|
||||
static const size_t kCacheColorPeriod = 8;
|
||||
|
||||
kernel_args* ObjectCache::sKernelArgs = NULL;
|
||||
|
||||
|
||||
static void
|
||||
object_cache_commit_slab(ObjectCache* cache, slab* slab)
|
||||
{
|
||||
void* pages = (void*)ROUNDDOWN((addr_t)slab->pages, B_PAGE_SIZE);
|
||||
if (create_area(cache->name, &pages, B_EXACT_ADDRESS, cache->slab_size,
|
||||
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA) < 0)
|
||||
panic("failed to create_area()");
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
object_cache_return_object_wrapper(object_depot* depot, void* cookie,
|
||||
void* object)
|
||||
{
|
||||
object_cache_free((ObjectCache*)cookie, object);
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
ObjectCache::~ObjectCache()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ObjectCache::Init(const char* name, size_t objectSize,
|
||||
size_t alignment, size_t maximum, uint32 flags, void* cookie,
|
||||
object_cache_constructor constructor, object_cache_destructor destructor,
|
||||
object_cache_reclaimer reclaimer)
|
||||
{
|
||||
strlcpy(this->name, name, sizeof(this->name));
|
||||
|
||||
mutex_init(&lock, this->name);
|
||||
|
||||
if (objectSize < sizeof(object_link))
|
||||
objectSize = sizeof(object_link);
|
||||
|
||||
if (alignment > 0 && (objectSize & (alignment - 1)))
|
||||
object_size = objectSize + alignment - (objectSize & (alignment - 1));
|
||||
else
|
||||
object_size = objectSize;
|
||||
|
||||
TRACE_CACHE(this, "init %lu, %lu -> %lu", objectSize, alignment,
|
||||
object_size);
|
||||
|
||||
cache_color_cycle = 0;
|
||||
total_objects = 0;
|
||||
used_count = 0;
|
||||
empty_count = 0;
|
||||
pressure = 0;
|
||||
min_object_reserve = 0;
|
||||
|
||||
usage = 0;
|
||||
this->maximum = maximum;
|
||||
|
||||
this->flags = flags;
|
||||
|
||||
resize_request = NULL;
|
||||
|
||||
// TODO: depot destruction is obviously broken
|
||||
// no gain in using the depot in single cpu setups
|
||||
//if (smp_get_num_cpus() == 1)
|
||||
this->flags |= CACHE_NO_DEPOT;
|
||||
|
||||
if (!(this->flags & CACHE_NO_DEPOT)) {
|
||||
status_t status = object_depot_init(&depot, flags, this,
|
||||
object_cache_return_object_wrapper);
|
||||
if (status < B_OK) {
|
||||
mutex_destroy(&lock);
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
this->cookie = cookie;
|
||||
this->constructor = constructor;
|
||||
this->destructor = destructor;
|
||||
this->reclaimer = reclaimer;
|
||||
|
||||
if (this->flags & CACHE_DURING_BOOT) {
|
||||
allocate_pages = &ObjectCache::EarlyAllocatePages;
|
||||
free_pages = &ObjectCache::EarlyFreePages;
|
||||
} else {
|
||||
allocate_pages = &ObjectCache::AllocatePages;
|
||||
free_pages = &ObjectCache::FreePages;
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ObjectCache::InitPostArea()
|
||||
{
|
||||
if (allocate_pages != &ObjectCache::EarlyAllocatePages)
|
||||
return;
|
||||
|
||||
SlabList::Iterator it = full.GetIterator();
|
||||
while (it.HasNext())
|
||||
object_cache_commit_slab(this, it.Next());
|
||||
|
||||
it = partial.GetIterator();
|
||||
while (it.HasNext())
|
||||
object_cache_commit_slab(this, it.Next());
|
||||
|
||||
it = empty.GetIterator();
|
||||
while (it.HasNext())
|
||||
object_cache_commit_slab(this, it.Next());
|
||||
|
||||
allocate_pages = &ObjectCache::AllocatePages;
|
||||
free_pages = &ObjectCache::FreePages;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ObjectCache::Delete()
|
||||
{
|
||||
this->~ObjectCache();
|
||||
slab_internal_free(this);
|
||||
}
|
||||
|
||||
|
||||
slab*
|
||||
ObjectCache::InitSlab(slab* slab, void* pages, size_t byteCount)
|
||||
{
|
||||
TRACE_CACHE(this, "construct (%p, %p .. %p, %lu)", slab, pages,
|
||||
((uint8*)pages) + byteCount, byteCount);
|
||||
|
||||
slab->pages = pages;
|
||||
slab->count = slab->size = byteCount / object_size;
|
||||
slab->free = NULL;
|
||||
total_objects += slab->size;
|
||||
|
||||
size_t spareBytes = byteCount - (slab->size * object_size);
|
||||
slab->offset = cache_color_cycle;
|
||||
|
||||
if (slab->offset > spareBytes)
|
||||
cache_color_cycle = slab->offset = 0;
|
||||
else
|
||||
cache_color_cycle += kCacheColorPeriod;
|
||||
|
||||
TRACE_CACHE(this, " %lu objects, %lu spare bytes, offset %lu",
|
||||
slab->size, spareBytes, slab->offset);
|
||||
|
||||
uint8* data = ((uint8*)pages) + slab->offset;
|
||||
|
||||
CREATE_PARANOIA_CHECK_SET(slab, "slab");
|
||||
|
||||
for (size_t i = 0; i < slab->size; i++) {
|
||||
bool failedOnFirst = false;
|
||||
|
||||
status_t status = PrepareObject(slab, data);
|
||||
if (status < B_OK)
|
||||
failedOnFirst = true;
|
||||
else if (constructor)
|
||||
status = constructor(cookie, data);
|
||||
|
||||
if (status < B_OK) {
|
||||
if (!failedOnFirst)
|
||||
UnprepareObject(slab, data);
|
||||
|
||||
data = ((uint8*)pages) + slab->offset;
|
||||
for (size_t j = 0; j < i; j++) {
|
||||
if (destructor)
|
||||
destructor(cookie, data);
|
||||
UnprepareObject(slab, data);
|
||||
data += object_size;
|
||||
}
|
||||
|
||||
DELETE_PARANOIA_CHECK_SET(slab);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
_push(slab->free, object_to_link(data, object_size));
|
||||
|
||||
ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, slab,
|
||||
&object_to_link(data, object_size)->next, sizeof(void*));
|
||||
|
||||
data += object_size;
|
||||
}
|
||||
|
||||
return slab;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ObjectCache::UninitSlab(slab* slab)
|
||||
{
|
||||
TRACE_CACHE(this, "destruct %p", slab);
|
||||
|
||||
if (slab->count != slab->size)
|
||||
panic("cache: destroying a slab which isn't empty.");
|
||||
|
||||
total_objects -= slab->size;
|
||||
|
||||
DELETE_PARANOIA_CHECK_SET(slab);
|
||||
|
||||
uint8* data = ((uint8*)slab->pages) + slab->offset;
|
||||
|
||||
for (size_t i = 0; i < slab->size; i++) {
|
||||
if (destructor)
|
||||
destructor(cookie, data);
|
||||
UnprepareObject(slab, data);
|
||||
data += object_size;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ObjectCache::PrepareObject(slab* source, void* object)
|
||||
{
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ObjectCache::UnprepareObject(slab* source, void* object)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ObjectCache::ReturnObjectToSlab(slab* source, void* object)
|
||||
{
|
||||
if (source == NULL) {
|
||||
panic("object_cache: free'd object has no slab");
|
||||
return;
|
||||
}
|
||||
|
||||
ParanoiaChecker _(source);
|
||||
|
||||
object_link* link = object_to_link(object, object_size);
|
||||
|
||||
TRACE_CACHE(this, "returning %p (%p) to %p, %lu used (%lu empty slabs).",
|
||||
object, link, source, source->size - source->count,
|
||||
empty_count);
|
||||
|
||||
_push(source->free, link);
|
||||
source->count++;
|
||||
used_count--;
|
||||
|
||||
ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next, sizeof(void*));
|
||||
|
||||
if (source->count == source->size) {
|
||||
partial.Remove(source);
|
||||
|
||||
if (empty_count < pressure
|
||||
&& total_objects - used_count - source->size
|
||||
>= min_object_reserve) {
|
||||
empty_count++;
|
||||
empty.Add(source);
|
||||
} else {
|
||||
ReturnSlab(source);
|
||||
}
|
||||
} else if (source->count == 1) {
|
||||
full.Remove(source);
|
||||
partial.Add(source);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*static*/ void
|
||||
ObjectCache::SetKernelArgs(kernel_args* args)
|
||||
{
|
||||
sKernelArgs = args;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ObjectCache::AllocatePages(void** pages, uint32 flags,
|
||||
bool unlockWhileAllocating)
|
||||
{
|
||||
TRACE_CACHE(cache, "allocate pages (%lu, 0x0%lx)", slab_size, flags);
|
||||
|
||||
uint32 lock = B_FULL_LOCK;
|
||||
if (this->flags & CACHE_UNLOCKED_PAGES)
|
||||
lock = B_NO_LOCK;
|
||||
|
||||
uint32 addressSpec = B_ANY_KERNEL_ADDRESS;
|
||||
if ((this->flags & CACHE_ALIGN_ON_SIZE) != 0
|
||||
&& slab_size != B_PAGE_SIZE)
|
||||
addressSpec = B_ANY_KERNEL_BLOCK_ADDRESS;
|
||||
|
||||
if (unlockWhileAllocating)
|
||||
Unlock();
|
||||
|
||||
// if we are allocating, it is because we need the pages immediatly
|
||||
// so we lock them. when moving the slab to the empty list we should
|
||||
// unlock them, and lock them again when getting one from the empty list.
|
||||
area_id areaId = create_area_etc(VMAddressSpace::KernelID(),
|
||||
name, pages, addressSpec, slab_size, lock,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0,
|
||||
(flags & CACHE_DONT_SLEEP) != 0 ? CREATE_AREA_DONT_WAIT : 0);
|
||||
|
||||
if (unlockWhileAllocating)
|
||||
Lock();
|
||||
|
||||
if (areaId < 0)
|
||||
return areaId;
|
||||
|
||||
usage += slab_size;
|
||||
|
||||
TRACE_CACHE(this, " ... = { %ld, %p }", areaId, *pages);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ObjectCache::FreePages(void* pages)
|
||||
{
|
||||
area_id id = area_for(pages);
|
||||
|
||||
TRACE_CACHE(this, "delete pages %p (%ld)", pages, id);
|
||||
|
||||
if (id < 0) {
|
||||
panic("object cache: freeing unknown area");
|
||||
return;
|
||||
}
|
||||
|
||||
delete_area(id);
|
||||
|
||||
usage -= slab_size;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ObjectCache::EarlyAllocatePages(void** pages, uint32 flags,
|
||||
bool unlockWhileAllocating)
|
||||
{
|
||||
TRACE_CACHE(this, "early allocate pages (%lu, 0x0%lx)", slab_size,
|
||||
flags);
|
||||
|
||||
if (unlockWhileAllocating)
|
||||
Unlock();
|
||||
|
||||
addr_t base = vm_allocate_early(sKernelArgs, slab_size,
|
||||
slab_size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
|
||||
if (unlockWhileAllocating)
|
||||
Lock();
|
||||
|
||||
*pages = (void*)base;
|
||||
|
||||
usage += slab_size;
|
||||
|
||||
TRACE_CACHE(this, " ... = { %p }", *pages);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ObjectCache::EarlyFreePages(void* pages)
|
||||
{
|
||||
panic("memory pressure on bootup?");
|
||||
}
|
149
src/system/kernel/slab/ObjectCache.h
Normal file
149
src/system/kernel/slab/ObjectCache.h
Normal file
@ -0,0 +1,149 @@
|
||||
/*
|
||||
* Copyright 2008, Axel Dörfler. All Rights Reserved.
|
||||
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
||||
*
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef OBJECT_CACHE_H
|
||||
#define OBJECT_CACHE_H
|
||||
|
||||
|
||||
#include <lock.h>
|
||||
#include <slab/ObjectDepot.h>
|
||||
#include <slab/Slab.h>
|
||||
#include <util/DoublyLinkedList.h>
|
||||
|
||||
|
||||
struct ResizeRequest;
|
||||
|
||||
|
||||
struct object_link {
|
||||
struct object_link* next;
|
||||
};
|
||||
|
||||
struct slab : DoublyLinkedListLinkImpl<slab> {
|
||||
void* pages;
|
||||
size_t size; // total number of objects
|
||||
size_t count; // free objects
|
||||
size_t offset;
|
||||
object_link* free;
|
||||
};
|
||||
|
||||
typedef DoublyLinkedList<slab> SlabList;
|
||||
|
||||
struct ObjectCache : DoublyLinkedListLinkImpl<ObjectCache> {
|
||||
char name[32];
|
||||
mutex lock;
|
||||
size_t object_size;
|
||||
size_t cache_color_cycle;
|
||||
SlabList empty;
|
||||
SlabList partial;
|
||||
SlabList full;
|
||||
size_t total_objects; // total number of objects
|
||||
size_t used_count; // used objects
|
||||
size_t empty_count; // empty slabs
|
||||
size_t pressure;
|
||||
size_t min_object_reserve;
|
||||
// minimum number of free objects
|
||||
|
||||
size_t slab_size;
|
||||
size_t usage;
|
||||
size_t maximum;
|
||||
uint32 flags;
|
||||
|
||||
ResizeRequest* resize_request;
|
||||
|
||||
void* cookie;
|
||||
object_cache_constructor constructor;
|
||||
object_cache_destructor destructor;
|
||||
object_cache_reclaimer reclaimer;
|
||||
|
||||
status_t (ObjectCache::*allocate_pages)(void** pages,
|
||||
uint32 flags, bool unlockWhileAllocating);
|
||||
void (ObjectCache::*free_pages)(void* pages);
|
||||
|
||||
object_depot depot;
|
||||
|
||||
public:
|
||||
virtual ~ObjectCache();
|
||||
|
||||
status_t Init(const char* name, size_t objectSize,
|
||||
size_t alignment, size_t maximum,
|
||||
uint32 flags, void* cookie,
|
||||
object_cache_constructor constructor,
|
||||
object_cache_destructor destructor,
|
||||
object_cache_reclaimer reclaimer);
|
||||
void InitPostArea();
|
||||
void Delete();
|
||||
|
||||
virtual slab* CreateSlab(uint32 flags,
|
||||
bool unlockWhileAllocating) = 0;
|
||||
virtual void ReturnSlab(slab* slab) = 0;
|
||||
virtual slab* ObjectSlab(void* object) const = 0;
|
||||
|
||||
slab* InitSlab(slab* slab, void* pages,
|
||||
size_t byteCount);
|
||||
void UninitSlab(slab* slab);
|
||||
|
||||
virtual status_t PrepareObject(slab* source, void* object);
|
||||
virtual void UnprepareObject(slab* source, void* object);
|
||||
|
||||
void ReturnObjectToSlab(slab* source, void* object);
|
||||
|
||||
bool Lock() { return mutex_lock(&lock) == B_OK; }
|
||||
void Unlock() { mutex_unlock(&lock); }
|
||||
|
||||
static void SetKernelArgs(kernel_args* args);
|
||||
status_t AllocatePages(void** pages, uint32 flags,
|
||||
bool unlockWhileAllocating);
|
||||
void FreePages(void* pages);
|
||||
status_t EarlyAllocatePages(void** pages, uint32 flags,
|
||||
bool unlockWhileAllocating);
|
||||
void EarlyFreePages(void* pages);
|
||||
|
||||
private:
|
||||
static kernel_args* sKernelArgs;
|
||||
};
|
||||
|
||||
|
||||
static inline void*
|
||||
link_to_object(object_link* link, size_t objectSize)
|
||||
{
|
||||
return ((uint8*)link) - (objectSize - sizeof(object_link));
|
||||
}
|
||||
|
||||
|
||||
static inline object_link*
|
||||
object_to_link(void* object, size_t objectSize)
|
||||
{
|
||||
return (object_link*)(((uint8*)object)
|
||||
+ (objectSize - sizeof(object_link)));
|
||||
}
|
||||
|
||||
|
||||
static inline slab *
|
||||
slab_in_pages(const void *pages, size_t slab_size)
|
||||
{
|
||||
return (slab *)(((uint8 *)pages) + slab_size - sizeof(slab));
|
||||
}
|
||||
|
||||
|
||||
static inline const void *
|
||||
lower_boundary(void *object, size_t byteCount)
|
||||
{
|
||||
const uint8 *null = (uint8 *)NULL;
|
||||
return null + ((((uint8 *)object) - null) & ~(byteCount - 1));
|
||||
}
|
||||
|
||||
|
||||
static inline bool
|
||||
check_cache_quota(ObjectCache *cache)
|
||||
{
|
||||
if (cache->maximum == 0)
|
||||
return true;
|
||||
|
||||
return (cache->usage + cache->slab_size) <= cache->maximum;
|
||||
}
|
||||
|
||||
|
||||
#endif // OBJECT_CACHE_H
|
@ -19,55 +19,65 @@ static const int kMagazineCapacity = 32;
|
||||
// TODO: Should be dynamically tuned per cache.
|
||||
|
||||
|
||||
struct depot_magazine {
|
||||
struct depot_magazine *next;
|
||||
uint16 current_round, round_count;
|
||||
void *rounds[0];
|
||||
struct DepotMagazine {
|
||||
DepotMagazine* next;
|
||||
uint16 current_round;
|
||||
uint16 round_count;
|
||||
void* rounds[0];
|
||||
|
||||
public:
|
||||
inline bool IsEmpty() const;
|
||||
inline bool IsFull() const;
|
||||
|
||||
inline void* Pop();
|
||||
inline bool Push(void* object);
|
||||
};
|
||||
|
||||
|
||||
struct depot_cpu_store {
|
||||
recursive_lock lock;
|
||||
struct depot_magazine *loaded, *previous;
|
||||
recursive_lock lock;
|
||||
DepotMagazine* loaded;
|
||||
DepotMagazine* previous;
|
||||
};
|
||||
|
||||
|
||||
static inline bool
|
||||
is_magazine_empty(depot_magazine *magazine)
|
||||
bool
|
||||
DepotMagazine::IsEmpty() const
|
||||
{
|
||||
return magazine->current_round == 0;
|
||||
return current_round == 0;
|
||||
}
|
||||
|
||||
|
||||
static inline bool
|
||||
is_magazine_full(depot_magazine *magazine)
|
||||
bool
|
||||
DepotMagazine::IsFull() const
|
||||
{
|
||||
return magazine->current_round == magazine->round_count;
|
||||
return current_round == round_count;
|
||||
}
|
||||
|
||||
|
||||
static inline void *
|
||||
pop_magazine(depot_magazine *magazine)
|
||||
void*
|
||||
DepotMagazine::Pop()
|
||||
{
|
||||
return magazine->rounds[--magazine->current_round];
|
||||
return rounds[--current_round];
|
||||
}
|
||||
|
||||
|
||||
static inline bool
|
||||
push_magazine(depot_magazine *magazine, void *object)
|
||||
bool
|
||||
DepotMagazine::Push(void* object)
|
||||
{
|
||||
if (is_magazine_full(magazine))
|
||||
if (IsFull())
|
||||
return false;
|
||||
magazine->rounds[magazine->current_round++] = object;
|
||||
|
||||
rounds[current_round++] = object;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static depot_magazine *
|
||||
static DepotMagazine*
|
||||
alloc_magazine()
|
||||
{
|
||||
depot_magazine *magazine = (depot_magazine *)internal_alloc(
|
||||
sizeof(depot_magazine) + kMagazineCapacity * sizeof(void *), 0);
|
||||
DepotMagazine* magazine = (DepotMagazine*)slab_internal_alloc(
|
||||
sizeof(DepotMagazine) + kMagazineCapacity * sizeof(void*), 0);
|
||||
if (magazine) {
|
||||
magazine->next = NULL;
|
||||
magazine->current_round = 0;
|
||||
@ -79,23 +89,23 @@ alloc_magazine()
|
||||
|
||||
|
||||
static void
|
||||
free_magazine(depot_magazine *magazine)
|
||||
free_magazine(DepotMagazine* magazine)
|
||||
{
|
||||
internal_free(magazine);
|
||||
slab_internal_free(magazine);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
empty_magazine(object_depot *depot, depot_magazine *magazine)
|
||||
empty_magazine(object_depot* depot, DepotMagazine* magazine)
|
||||
{
|
||||
for (uint16 i = 0; i < magazine->current_round; i++)
|
||||
depot->return_object(depot, magazine->rounds[i]);
|
||||
depot->return_object(depot, depot->cookie, magazine->rounds[i]);
|
||||
free_magazine(magazine);
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
exchange_with_full(object_depot *depot, depot_magazine* &magazine)
|
||||
exchange_with_full(object_depot* depot, DepotMagazine*& magazine)
|
||||
{
|
||||
RecursiveLocker _(depot->lock);
|
||||
|
||||
@ -112,7 +122,7 @@ exchange_with_full(object_depot *depot, depot_magazine* &magazine)
|
||||
|
||||
|
||||
static bool
|
||||
exchange_with_empty(object_depot *depot, depot_magazine* &magazine)
|
||||
exchange_with_empty(object_depot* depot, DepotMagazine*& magazine)
|
||||
{
|
||||
RecursiveLocker _(depot->lock);
|
||||
|
||||
@ -134,8 +144,8 @@ exchange_with_empty(object_depot *depot, depot_magazine* &magazine)
|
||||
}
|
||||
|
||||
|
||||
static inline depot_cpu_store *
|
||||
object_depot_cpu(object_depot *depot)
|
||||
static inline depot_cpu_store*
|
||||
object_depot_cpu(object_depot* depot)
|
||||
{
|
||||
return &depot->stores[smp_get_current_cpu()];
|
||||
}
|
||||
@ -145,8 +155,8 @@ object_depot_cpu(object_depot *depot)
|
||||
|
||||
|
||||
status_t
|
||||
object_depot_init(object_depot *depot, uint32 flags,
|
||||
void (*return_object)(object_depot *depot, void *object))
|
||||
object_depot_init(object_depot* depot, uint32 flags, void* cookie,
|
||||
void (*return_object)(object_depot* depot, void* cookie, void* object))
|
||||
{
|
||||
depot->full = NULL;
|
||||
depot->empty = NULL;
|
||||
@ -154,8 +164,8 @@ object_depot_init(object_depot *depot, uint32 flags,
|
||||
|
||||
recursive_lock_init(&depot->lock, "depot");
|
||||
|
||||
depot->stores = (depot_cpu_store *)internal_alloc(sizeof(depot_cpu_store)
|
||||
* smp_get_num_cpus(), flags);
|
||||
depot->stores = (depot_cpu_store*)slab_internal_alloc(
|
||||
sizeof(depot_cpu_store) * smp_get_num_cpus(), flags);
|
||||
if (depot->stores == NULL) {
|
||||
recursive_lock_destroy(&depot->lock);
|
||||
return B_NO_MEMORY;
|
||||
@ -166,6 +176,7 @@ object_depot_init(object_depot *depot, uint32 flags,
|
||||
depot->stores[i].loaded = depot->stores[i].previous = NULL;
|
||||
}
|
||||
|
||||
depot->cookie = cookie;
|
||||
depot->return_object = return_object;
|
||||
|
||||
return B_OK;
|
||||
@ -173,7 +184,7 @@ object_depot_init(object_depot *depot, uint32 flags,
|
||||
|
||||
|
||||
void
|
||||
object_depot_destroy(object_depot *depot)
|
||||
object_depot_destroy(object_depot* depot)
|
||||
{
|
||||
object_depot_make_empty(depot);
|
||||
|
||||
@ -181,14 +192,14 @@ object_depot_destroy(object_depot *depot)
|
||||
recursive_lock_destroy(&depot->stores[i].lock);
|
||||
}
|
||||
|
||||
internal_free(depot->stores);
|
||||
slab_internal_free(depot->stores);
|
||||
|
||||
recursive_lock_destroy(&depot->lock);
|
||||
}
|
||||
|
||||
|
||||
static void *
|
||||
object_depot_obtain_from_store(object_depot *depot, depot_cpu_store *store)
|
||||
static void*
|
||||
object_depot_obtain_from_store(object_depot* depot, depot_cpu_store* store)
|
||||
{
|
||||
RecursiveLocker _(store->lock);
|
||||
|
||||
@ -203,21 +214,22 @@ object_depot_obtain_from_store(object_depot *depot, depot_cpu_store *store)
|
||||
return NULL;
|
||||
|
||||
while (true) {
|
||||
if (!is_magazine_empty(store->loaded))
|
||||
return pop_magazine(store->loaded);
|
||||
if (!store->loaded->IsEmpty())
|
||||
return store->loaded->Pop();
|
||||
|
||||
if (store->previous && (is_magazine_full(store->previous)
|
||||
|| exchange_with_full(depot, store->previous)))
|
||||
if (store->previous
|
||||
&& (store->previous->IsFull()
|
||||
|| exchange_with_full(depot, store->previous))) {
|
||||
std::swap(store->previous, store->loaded);
|
||||
else
|
||||
} else
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
object_depot_return_to_store(object_depot *depot, depot_cpu_store *store,
|
||||
void *object)
|
||||
object_depot_return_to_store(object_depot* depot, depot_cpu_store* store,
|
||||
void* object)
|
||||
{
|
||||
RecursiveLocker _(store->lock);
|
||||
|
||||
@ -227,10 +239,10 @@ object_depot_return_to_store(object_depot *depot, depot_cpu_store *store,
|
||||
// we return the object directly to the slab.
|
||||
|
||||
while (true) {
|
||||
if (store->loaded && push_magazine(store->loaded, object))
|
||||
if (store->loaded && store->loaded->Push(object))
|
||||
return 1;
|
||||
|
||||
if ((store->previous && is_magazine_empty(store->previous))
|
||||
if ((store->previous && store->previous->IsEmpty())
|
||||
|| exchange_with_empty(depot, store->previous))
|
||||
std::swap(store->loaded, store->previous);
|
||||
else
|
||||
@ -239,15 +251,15 @@ object_depot_return_to_store(object_depot *depot, depot_cpu_store *store,
|
||||
}
|
||||
|
||||
|
||||
void *
|
||||
object_depot_obtain(object_depot *depot)
|
||||
void*
|
||||
object_depot_obtain(object_depot* depot)
|
||||
{
|
||||
return object_depot_obtain_from_store(depot, object_depot_cpu(depot));
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
object_depot_store(object_depot *depot, void *object)
|
||||
object_depot_store(object_depot* depot, void* object)
|
||||
{
|
||||
return object_depot_return_to_store(depot, object_depot_cpu(depot),
|
||||
object);
|
||||
@ -255,10 +267,10 @@ object_depot_store(object_depot *depot, void *object)
|
||||
|
||||
|
||||
void
|
||||
object_depot_make_empty(object_depot *depot)
|
||||
object_depot_make_empty(object_depot* depot)
|
||||
{
|
||||
for (int i = 0; i < smp_get_num_cpus(); i++) {
|
||||
depot_cpu_store *store = &depot->stores[i];
|
||||
depot_cpu_store* store = &depot->stores[i];
|
||||
|
||||
RecursiveLocker _(store->lock);
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
69
src/system/kernel/slab/SmallObjectCache.cpp
Normal file
69
src/system/kernel/slab/SmallObjectCache.cpp
Normal file
@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright 2008, Axel Dörfler. All Rights Reserved.
|
||||
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
||||
*
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include "SmallObjectCache.h"
|
||||
|
||||
#include "slab_private.h"
|
||||
|
||||
|
||||
/*static*/ SmallObjectCache*
|
||||
SmallObjectCache::Create(const char* name, size_t object_size,
|
||||
size_t alignment, size_t maximum, uint32 flags, void* cookie,
|
||||
object_cache_constructor constructor, object_cache_destructor destructor,
|
||||
object_cache_reclaimer reclaimer)
|
||||
{
|
||||
void* buffer = slab_internal_alloc(sizeof(SmallObjectCache), flags);
|
||||
if (buffer == NULL)
|
||||
return NULL;
|
||||
|
||||
SmallObjectCache* cache = new(buffer) SmallObjectCache();
|
||||
|
||||
if (cache->Init(name, object_size, alignment, maximum,
|
||||
flags | CACHE_ALIGN_ON_SIZE, cookie, constructor, destructor,
|
||||
reclaimer) != B_OK) {
|
||||
cache->Delete();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if ((flags & CACHE_LARGE_SLAB) != 0)
|
||||
cache->slab_size = max_c(16 * B_PAGE_SIZE, 1024 * object_size);
|
||||
else
|
||||
cache->slab_size = B_PAGE_SIZE;
|
||||
|
||||
return cache;
|
||||
}
|
||||
|
||||
|
||||
slab*
|
||||
SmallObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating)
|
||||
{
|
||||
if (!check_cache_quota(this))
|
||||
return NULL;
|
||||
|
||||
void* pages;
|
||||
if ((this->*allocate_pages)(&pages, flags, unlockWhileAllocating) != B_OK)
|
||||
return NULL;
|
||||
|
||||
return InitSlab(slab_in_pages(pages, slab_size), pages,
|
||||
slab_size - sizeof(slab));
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
SmallObjectCache::ReturnSlab(slab* slab)
|
||||
{
|
||||
UninitSlab(slab);
|
||||
(this->*free_pages)(slab->pages);
|
||||
}
|
||||
|
||||
|
||||
slab*
|
||||
SmallObjectCache::ObjectSlab(void* object) const
|
||||
{
|
||||
return slab_in_pages(lower_boundary(object, slab_size), slab_size);
|
||||
}
|
29
src/system/kernel/slab/SmallObjectCache.h
Normal file
29
src/system/kernel/slab/SmallObjectCache.h
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Copyright 2008, Axel Dörfler. All Rights Reserved.
|
||||
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
||||
*
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef SMALL_OBJECT_CACHE_H
|
||||
#define SMALL_OBJECT_CACHE_H
|
||||
|
||||
|
||||
#include "ObjectCache.h"
|
||||
|
||||
|
||||
struct SmallObjectCache : ObjectCache {
|
||||
static SmallObjectCache* Create(const char* name, size_t object_size,
|
||||
size_t alignment, size_t maximum,
|
||||
uint32 flags, void* cookie,
|
||||
object_cache_constructor constructor,
|
||||
object_cache_destructor destructor,
|
||||
object_cache_reclaimer reclaimer);
|
||||
|
||||
virtual slab* CreateSlab(uint32 flags,
|
||||
bool unlockWhileAllocating);
|
||||
virtual void ReturnSlab(slab* slab);
|
||||
virtual slab* ObjectSlab(void* object) const;
|
||||
};
|
||||
|
||||
|
||||
#endif // SMALL_OBJECT_CACHE_H
|
@ -12,14 +12,27 @@
|
||||
#include <stddef.h>
|
||||
|
||||
|
||||
void* internal_alloc(size_t size, uint32 flags);
|
||||
void internal_free(void *_buffer);
|
||||
//#define TRACE_SLAB
|
||||
#ifdef TRACE_SLAB
|
||||
#define TRACE_CACHE(cache, format, args...) \
|
||||
dprintf("Cache[%p, %s] " format "\n", cache, cache->name , ##args)
|
||||
#else
|
||||
#define TRACE_CACHE(cache, format, bananas...) do { } while (0)
|
||||
#endif
|
||||
|
||||
|
||||
void* block_alloc(size_t size);
|
||||
void block_free(void *block);
|
||||
void block_allocator_init_boot();
|
||||
void block_allocator_init_rest();
|
||||
#define COMPONENT_PARANOIA_LEVEL OBJECT_CACHE_PARANOIA
|
||||
#include <debug_paranoia.h>
|
||||
|
||||
struct ObjectCache;
|
||||
|
||||
void* slab_internal_alloc(size_t size, uint32 flags);
|
||||
void slab_internal_free(void *_buffer);
|
||||
|
||||
void* block_alloc(size_t size);
|
||||
void block_free(void *block);
|
||||
void block_allocator_init_boot();
|
||||
void block_allocator_init_rest();
|
||||
|
||||
|
||||
template<typename Type>
|
||||
|
@ -3215,7 +3215,7 @@ vm_init(kernel_args* args)
|
||||
TRACE(("heap at 0x%lx\n", heapBase));
|
||||
heap_init(heapBase, heapSize);
|
||||
|
||||
size_t slabInitialSize = args->num_cpus * 2 * B_PAGE_SIZE;
|
||||
size_t slabInitialSize = args->num_cpus * 3 * B_PAGE_SIZE;
|
||||
addr_t slabInitialBase = vm_allocate_early(args, slabInitialSize,
|
||||
slabInitialSize, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
slab_init(args, slabInitialBase, slabInitialSize);
|
||||
|
Loading…
Reference in New Issue
Block a user