diff --git a/headers/private/kernel/slab/ObjectDepot.h b/headers/private/kernel/slab/ObjectDepot.h index be3f7762e6..4eca569691 100644 --- a/headers/private/kernel/slab/ObjectDepot.h +++ b/headers/private/kernel/slab/ObjectDepot.h @@ -10,13 +10,19 @@ #include -typedef struct object_depot { - recursive_lock lock; - struct depot_magazine *full, *empty; - size_t full_count, empty_count; - struct depot_cpu_store *stores; +struct DepotMagazine; - void (*return_object)(struct object_depot *depot, void *object); +typedef struct object_depot { + recursive_lock lock; + DepotMagazine* full; + DepotMagazine* empty; + size_t full_count; + size_t empty_count; + struct depot_cpu_store* stores; + + void* cookie; + void (*return_object)(struct object_depot* depot, void* cookie, + void* object); } object_depot; @@ -24,14 +30,14 @@ typedef struct object_depot { extern "C" { #endif -status_t object_depot_init(object_depot *depot, uint32 flags, - void (*returnObject)(object_depot *, void *)); -void object_depot_destroy(object_depot *depot); +status_t object_depot_init(object_depot* depot, uint32 flags, void *cookie, + void (*returnObject)(object_depot* depot, void* cookie, void* object)); +void object_depot_destroy(object_depot* depot); -void *object_depot_obtain(object_depot *depot); -int object_depot_store(object_depot *depot, void *object); +void* object_depot_obtain(object_depot* depot); +int object_depot_store(object_depot* depot, void* object); -void object_depot_make_empty(object_depot *depot); +void object_depot_make_empty(object_depot* depot); #ifdef __cplusplus } diff --git a/headers/private/kernel/slab/Slab.h b/headers/private/kernel/slab/Slab.h index 8b32426281..a0b1febc93 100644 --- a/headers/private/kernel/slab/Slab.h +++ b/headers/private/kernel/slab/Slab.h @@ -26,37 +26,38 @@ enum { CACHE_DURING_BOOT = 1 << 31 }; -typedef struct object_cache object_cache; +struct ObjectCache; +typedef struct ObjectCache object_cache; -typedef status_t (*object_cache_constructor)(void *cookie, void *object); -typedef void (*object_cache_destructor)(void *cookie, void *object); -typedef void (*object_cache_reclaimer)(void *cookie, int32 level); +typedef status_t (*object_cache_constructor)(void* cookie, void* object); +typedef void (*object_cache_destructor)(void* cookie, void* object); +typedef void (*object_cache_reclaimer)(void* cookie, int32 level); #ifdef __cplusplus extern "C" { #endif -object_cache *create_object_cache(const char *name, size_t object_size, - size_t alignment, void *cookie, object_cache_constructor constructor, +object_cache* create_object_cache(const char* name, size_t object_size, + size_t alignment, void* cookie, object_cache_constructor constructor, object_cache_destructor); -object_cache *create_object_cache_etc(const char *name, size_t object_size, - size_t alignment, size_t max_byte_usage, uint32 flags, void *cookie, +object_cache* create_object_cache_etc(const char* name, size_t object_size, + size_t alignment, size_t max_byte_usage, uint32 flags, void* cookie, object_cache_constructor constructor, object_cache_destructor destructor, object_cache_reclaimer reclaimer); -void delete_object_cache(object_cache *cache); +void delete_object_cache(object_cache* cache); -status_t object_cache_set_minimum_reserve(object_cache *cache, +status_t object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount); -void *object_cache_alloc(object_cache *cache, uint32 flags); -void object_cache_free(object_cache *cache, void *object); +void* object_cache_alloc(object_cache* cache, uint32 flags); +void object_cache_free(object_cache* cache, void* object); -status_t object_cache_reserve(object_cache *cache, size_t object_count, +status_t object_cache_reserve(object_cache* cache, size_t object_count, uint32 flags); -void object_cache_get_usage(object_cache *cache, size_t *_allocatedMemory); +void object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory); #ifdef __cplusplus } diff --git a/src/system/kernel/slab/HashedObjectCache.cpp b/src/system/kernel/slab/HashedObjectCache.cpp new file mode 100644 index 0000000000..28e60aa78c --- /dev/null +++ b/src/system/kernel/slab/HashedObjectCache.cpp @@ -0,0 +1,176 @@ +/* + * Copyright 2008, Axel Dörfler. All Rights Reserved. + * Copyright 2007, Hugo Santos. All Rights Reserved. + * + * Distributed under the terms of the MIT License. + */ + + +#include "HashedObjectCache.h" + +#include "slab_private.h" + + +static inline int +__fls0(size_t value) +{ + if (value == 0) + return -1; + + int bit; + for (bit = 0; value != 1; bit++) + value >>= 1; + return bit; +} + + +static slab* +allocate_slab(uint32 flags) +{ + return (slab*)slab_internal_alloc(sizeof(slab), flags); +} + + +static void +free_slab(slab* slab) +{ + slab_internal_free(slab); +} + + +// #pragma mark - + + +HashedObjectCache::HashedObjectCache() + : + hash_table(this) +{ +} + + +/*static*/ HashedObjectCache* +HashedObjectCache::Create(const char* name, size_t object_size, + size_t alignment, size_t maximum, uint32 flags, void* cookie, + object_cache_constructor constructor, object_cache_destructor destructor, + object_cache_reclaimer reclaimer) +{ + void* buffer = slab_internal_alloc(sizeof(HashedObjectCache), flags); + if (buffer == NULL) + return NULL; + + HashedObjectCache* cache = new(buffer) HashedObjectCache(); + + if (cache->Init(name, object_size, alignment, maximum, flags, cookie, + constructor, destructor, reclaimer) != B_OK) { + cache->Delete(); + return NULL; + } + + if ((flags & CACHE_LARGE_SLAB) != 0) + cache->slab_size = max_c(256 * B_PAGE_SIZE, 128 * object_size); + else + cache->slab_size = max_c(16 * B_PAGE_SIZE, 8 * object_size); + cache->lower_boundary = __fls0(cache->object_size); + + return cache; +} + + +slab* +HashedObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating) +{ + if (!check_cache_quota(this)) + return NULL; + + if (unlockWhileAllocating) + Unlock(); + + slab* slab = allocate_slab(flags); + + if (unlockWhileAllocating) + Lock(); + + if (slab == NULL) + return NULL; + + void* pages; + if ((this->*allocate_pages)(&pages, flags, unlockWhileAllocating) == B_OK) { + if (InitSlab(slab, pages, slab_size)) + return slab; + + (this->*free_pages)(pages); + } + + free_slab(slab); + return NULL; +} + + +void +HashedObjectCache::ReturnSlab(slab* slab) +{ + UninitSlab(slab); + (this->*free_pages)(slab->pages); +} + + +slab* +HashedObjectCache::ObjectSlab(void* object) const +{ + Link* link = hash_table.Lookup(object); + if (link == NULL) { + panic("object cache: requested object %p missing from hash table", + object); + return NULL; + } + return link->parent; +} + + +status_t +HashedObjectCache::PrepareObject(slab* source, void* object) +{ + Link* link = _AllocateLink(CACHE_DONT_SLEEP); + if (link == NULL) + return B_NO_MEMORY; + + link->buffer = object; + link->parent = source; + + hash_table.Insert(link); + return B_OK; +} + + +void +HashedObjectCache::UnprepareObject(slab* source, void* object) +{ + Link* link = hash_table.Lookup(object); + if (link == NULL) { + panic("object cache: requested object missing from hash table"); + return; + } + + if (link->parent != source) { + panic("object cache: slab mismatch"); + return; + } + + hash_table.Remove(link); + _FreeLink(link); +} + + +/*static*/ inline HashedObjectCache::Link* +HashedObjectCache::_AllocateLink(uint32 flags) +{ + return (HashedObjectCache::Link*) + slab_internal_alloc(sizeof(HashedObjectCache::Link), flags); +} + + +/*static*/ inline void +HashedObjectCache::_FreeLink(HashedObjectCache::Link* link) +{ + slab_internal_free(link); +} diff --git a/src/system/kernel/slab/HashedObjectCache.h b/src/system/kernel/slab/HashedObjectCache.h new file mode 100644 index 0000000000..b82d4f7789 --- /dev/null +++ b/src/system/kernel/slab/HashedObjectCache.h @@ -0,0 +1,95 @@ +/* + * Copyright 2008, Axel Dörfler. All Rights Reserved. + * Copyright 2007, Hugo Santos. All Rights Reserved. + * + * Distributed under the terms of the MIT License. + */ +#ifndef HASHED_OBJECT_CACHE_H +#define HASHED_OBJECT_CACHE_H + + +#include + +#include "ObjectCache.h" + + +struct HashedObjectCache : ObjectCache { + HashedObjectCache(); + + static HashedObjectCache* Create(const char* name, size_t object_size, + size_t alignment, size_t maximum, + uint32 flags, void* cookie, + object_cache_constructor constructor, + object_cache_destructor destructor, + object_cache_reclaimer reclaimer); + + virtual slab* CreateSlab(uint32 flags, + bool unlockWhileAllocating); + virtual void ReturnSlab(slab* slab); + virtual slab* ObjectSlab(void* object) const; + + virtual status_t PrepareObject(slab* source, void* object); + virtual void UnprepareObject(slab* source, void* object); + +private: + struct Link { + const void* buffer; + slab* parent; + Link* next; + }; + + struct Definition { + typedef HashedObjectCache ParentType; + typedef const void* KeyType; + typedef Link ValueType; + + Definition(HashedObjectCache* parent) + : + parent(parent) + { + } + + Definition(const Definition& definition) + : + parent(definition.parent) + { + } + + size_t HashKey(const void* key) const + { + return (((const uint8*)key) - ((const uint8*)0)) + >> parent->lower_boundary; + } + + size_t Hash(Link* value) const + { + return HashKey(value->buffer); + } + + bool Compare(const void* key, Link* value) const + { + return value->buffer == key; + } + + Link*& GetLink(Link* value) const + { + return value->next; + } + + HashedObjectCache* parent; + }; + + typedef BOpenHashTable HashTable; + +private: + static Link* _AllocateLink(uint32 flags); + static void _FreeLink(HashedObjectCache::Link* link); + +private: + HashTable hash_table; + size_t lower_boundary; +}; + + + +#endif // HASHED_OBJECT_CACHE_H diff --git a/src/system/kernel/slab/Jamfile b/src/system/kernel/slab/Jamfile index 0eec9c1035..fc18fa75e1 100644 --- a/src/system/kernel/slab/Jamfile +++ b/src/system/kernel/slab/Jamfile @@ -3,8 +3,11 @@ SubDir HAIKU_TOP src system kernel slab ; KernelMergeObject kernel_slab.o : allocator.cpp + HashedObjectCache.cpp + ObjectCache.cpp ObjectDepot.cpp Slab.cpp + SmallObjectCache.cpp : $(TARGET_KERNEL_PIC_CCFLAGS) ; diff --git a/src/system/kernel/slab/ObjectCache.cpp b/src/system/kernel/slab/ObjectCache.cpp new file mode 100644 index 0000000000..87cc221bcb --- /dev/null +++ b/src/system/kernel/slab/ObjectCache.cpp @@ -0,0 +1,381 @@ +/* + * Copyright 2008, Axel Dörfler. All Rights Reserved. + * Copyright 2007, Hugo Santos. All Rights Reserved. + * + * Distributed under the terms of the MIT License. + */ + + +#include "ObjectCache.h" + +#include + +#include "slab_private.h" +#include +#include + + +static const size_t kCacheColorPeriod = 8; + +kernel_args* ObjectCache::sKernelArgs = NULL; + + +static void +object_cache_commit_slab(ObjectCache* cache, slab* slab) +{ + void* pages = (void*)ROUNDDOWN((addr_t)slab->pages, B_PAGE_SIZE); + if (create_area(cache->name, &pages, B_EXACT_ADDRESS, cache->slab_size, + B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA) < 0) + panic("failed to create_area()"); +} + + +static void +object_cache_return_object_wrapper(object_depot* depot, void* cookie, + void* object) +{ + object_cache_free((ObjectCache*)cookie, object); +} + + +// #pragma mark - + + +ObjectCache::~ObjectCache() +{ +} + + +status_t +ObjectCache::Init(const char* name, size_t objectSize, + size_t alignment, size_t maximum, uint32 flags, void* cookie, + object_cache_constructor constructor, object_cache_destructor destructor, + object_cache_reclaimer reclaimer) +{ + strlcpy(this->name, name, sizeof(this->name)); + + mutex_init(&lock, this->name); + + if (objectSize < sizeof(object_link)) + objectSize = sizeof(object_link); + + if (alignment > 0 && (objectSize & (alignment - 1))) + object_size = objectSize + alignment - (objectSize & (alignment - 1)); + else + object_size = objectSize; + + TRACE_CACHE(this, "init %lu, %lu -> %lu", objectSize, alignment, + object_size); + + cache_color_cycle = 0; + total_objects = 0; + used_count = 0; + empty_count = 0; + pressure = 0; + min_object_reserve = 0; + + usage = 0; + this->maximum = maximum; + + this->flags = flags; + + resize_request = NULL; + + // TODO: depot destruction is obviously broken + // no gain in using the depot in single cpu setups + //if (smp_get_num_cpus() == 1) + this->flags |= CACHE_NO_DEPOT; + + if (!(this->flags & CACHE_NO_DEPOT)) { + status_t status = object_depot_init(&depot, flags, this, + object_cache_return_object_wrapper); + if (status < B_OK) { + mutex_destroy(&lock); + return status; + } + } + + this->cookie = cookie; + this->constructor = constructor; + this->destructor = destructor; + this->reclaimer = reclaimer; + + if (this->flags & CACHE_DURING_BOOT) { + allocate_pages = &ObjectCache::EarlyAllocatePages; + free_pages = &ObjectCache::EarlyFreePages; + } else { + allocate_pages = &ObjectCache::AllocatePages; + free_pages = &ObjectCache::FreePages; + } + + return B_OK; +} + + +void +ObjectCache::InitPostArea() +{ + if (allocate_pages != &ObjectCache::EarlyAllocatePages) + return; + + SlabList::Iterator it = full.GetIterator(); + while (it.HasNext()) + object_cache_commit_slab(this, it.Next()); + + it = partial.GetIterator(); + while (it.HasNext()) + object_cache_commit_slab(this, it.Next()); + + it = empty.GetIterator(); + while (it.HasNext()) + object_cache_commit_slab(this, it.Next()); + + allocate_pages = &ObjectCache::AllocatePages; + free_pages = &ObjectCache::FreePages; +} + + +void +ObjectCache::Delete() +{ + this->~ObjectCache(); + slab_internal_free(this); +} + + +slab* +ObjectCache::InitSlab(slab* slab, void* pages, size_t byteCount) +{ + TRACE_CACHE(this, "construct (%p, %p .. %p, %lu)", slab, pages, + ((uint8*)pages) + byteCount, byteCount); + + slab->pages = pages; + slab->count = slab->size = byteCount / object_size; + slab->free = NULL; + total_objects += slab->size; + + size_t spareBytes = byteCount - (slab->size * object_size); + slab->offset = cache_color_cycle; + + if (slab->offset > spareBytes) + cache_color_cycle = slab->offset = 0; + else + cache_color_cycle += kCacheColorPeriod; + + TRACE_CACHE(this, " %lu objects, %lu spare bytes, offset %lu", + slab->size, spareBytes, slab->offset); + + uint8* data = ((uint8*)pages) + slab->offset; + + CREATE_PARANOIA_CHECK_SET(slab, "slab"); + + for (size_t i = 0; i < slab->size; i++) { + bool failedOnFirst = false; + + status_t status = PrepareObject(slab, data); + if (status < B_OK) + failedOnFirst = true; + else if (constructor) + status = constructor(cookie, data); + + if (status < B_OK) { + if (!failedOnFirst) + UnprepareObject(slab, data); + + data = ((uint8*)pages) + slab->offset; + for (size_t j = 0; j < i; j++) { + if (destructor) + destructor(cookie, data); + UnprepareObject(slab, data); + data += object_size; + } + + DELETE_PARANOIA_CHECK_SET(slab); + + return NULL; + } + + _push(slab->free, object_to_link(data, object_size)); + + ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, slab, + &object_to_link(data, object_size)->next, sizeof(void*)); + + data += object_size; + } + + return slab; +} + + +void +ObjectCache::UninitSlab(slab* slab) +{ + TRACE_CACHE(this, "destruct %p", slab); + + if (slab->count != slab->size) + panic("cache: destroying a slab which isn't empty."); + + total_objects -= slab->size; + + DELETE_PARANOIA_CHECK_SET(slab); + + uint8* data = ((uint8*)slab->pages) + slab->offset; + + for (size_t i = 0; i < slab->size; i++) { + if (destructor) + destructor(cookie, data); + UnprepareObject(slab, data); + data += object_size; + } +} + + +status_t +ObjectCache::PrepareObject(slab* source, void* object) +{ + return B_OK; +} + + +void +ObjectCache::UnprepareObject(slab* source, void* object) +{ +} + + +void +ObjectCache::ReturnObjectToSlab(slab* source, void* object) +{ + if (source == NULL) { + panic("object_cache: free'd object has no slab"); + return; + } + + ParanoiaChecker _(source); + + object_link* link = object_to_link(object, object_size); + + TRACE_CACHE(this, "returning %p (%p) to %p, %lu used (%lu empty slabs).", + object, link, source, source->size - source->count, + empty_count); + + _push(source->free, link); + source->count++; + used_count--; + + ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next, sizeof(void*)); + + if (source->count == source->size) { + partial.Remove(source); + + if (empty_count < pressure + && total_objects - used_count - source->size + >= min_object_reserve) { + empty_count++; + empty.Add(source); + } else { + ReturnSlab(source); + } + } else if (source->count == 1) { + full.Remove(source); + partial.Add(source); + } +} + + +/*static*/ void +ObjectCache::SetKernelArgs(kernel_args* args) +{ + sKernelArgs = args; +} + + +status_t +ObjectCache::AllocatePages(void** pages, uint32 flags, + bool unlockWhileAllocating) +{ + TRACE_CACHE(cache, "allocate pages (%lu, 0x0%lx)", slab_size, flags); + + uint32 lock = B_FULL_LOCK; + if (this->flags & CACHE_UNLOCKED_PAGES) + lock = B_NO_LOCK; + + uint32 addressSpec = B_ANY_KERNEL_ADDRESS; + if ((this->flags & CACHE_ALIGN_ON_SIZE) != 0 + && slab_size != B_PAGE_SIZE) + addressSpec = B_ANY_KERNEL_BLOCK_ADDRESS; + + if (unlockWhileAllocating) + Unlock(); + + // if we are allocating, it is because we need the pages immediatly + // so we lock them. when moving the slab to the empty list we should + // unlock them, and lock them again when getting one from the empty list. + area_id areaId = create_area_etc(VMAddressSpace::KernelID(), + name, pages, addressSpec, slab_size, lock, + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, + (flags & CACHE_DONT_SLEEP) != 0 ? CREATE_AREA_DONT_WAIT : 0); + + if (unlockWhileAllocating) + Lock(); + + if (areaId < 0) + return areaId; + + usage += slab_size; + + TRACE_CACHE(this, " ... = { %ld, %p }", areaId, *pages); + + return B_OK; +} + + +void +ObjectCache::FreePages(void* pages) +{ + area_id id = area_for(pages); + + TRACE_CACHE(this, "delete pages %p (%ld)", pages, id); + + if (id < 0) { + panic("object cache: freeing unknown area"); + return; + } + + delete_area(id); + + usage -= slab_size; +} + + +status_t +ObjectCache::EarlyAllocatePages(void** pages, uint32 flags, + bool unlockWhileAllocating) +{ + TRACE_CACHE(this, "early allocate pages (%lu, 0x0%lx)", slab_size, + flags); + + if (unlockWhileAllocating) + Unlock(); + + addr_t base = vm_allocate_early(sKernelArgs, slab_size, + slab_size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); + + if (unlockWhileAllocating) + Lock(); + + *pages = (void*)base; + + usage += slab_size; + + TRACE_CACHE(this, " ... = { %p }", *pages); + + return B_OK; +} + + +void +ObjectCache::EarlyFreePages(void* pages) +{ + panic("memory pressure on bootup?"); +} diff --git a/src/system/kernel/slab/ObjectCache.h b/src/system/kernel/slab/ObjectCache.h new file mode 100644 index 0000000000..31f8378b0d --- /dev/null +++ b/src/system/kernel/slab/ObjectCache.h @@ -0,0 +1,149 @@ +/* + * Copyright 2008, Axel Dörfler. All Rights Reserved. + * Copyright 2007, Hugo Santos. All Rights Reserved. + * + * Distributed under the terms of the MIT License. + */ +#ifndef OBJECT_CACHE_H +#define OBJECT_CACHE_H + + +#include +#include +#include +#include + + +struct ResizeRequest; + + +struct object_link { + struct object_link* next; +}; + +struct slab : DoublyLinkedListLinkImpl { + void* pages; + size_t size; // total number of objects + size_t count; // free objects + size_t offset; + object_link* free; +}; + +typedef DoublyLinkedList SlabList; + +struct ObjectCache : DoublyLinkedListLinkImpl { + char name[32]; + mutex lock; + size_t object_size; + size_t cache_color_cycle; + SlabList empty; + SlabList partial; + SlabList full; + size_t total_objects; // total number of objects + size_t used_count; // used objects + size_t empty_count; // empty slabs + size_t pressure; + size_t min_object_reserve; + // minimum number of free objects + + size_t slab_size; + size_t usage; + size_t maximum; + uint32 flags; + + ResizeRequest* resize_request; + + void* cookie; + object_cache_constructor constructor; + object_cache_destructor destructor; + object_cache_reclaimer reclaimer; + + status_t (ObjectCache::*allocate_pages)(void** pages, + uint32 flags, bool unlockWhileAllocating); + void (ObjectCache::*free_pages)(void* pages); + + object_depot depot; + +public: + virtual ~ObjectCache(); + + status_t Init(const char* name, size_t objectSize, + size_t alignment, size_t maximum, + uint32 flags, void* cookie, + object_cache_constructor constructor, + object_cache_destructor destructor, + object_cache_reclaimer reclaimer); + void InitPostArea(); + void Delete(); + + virtual slab* CreateSlab(uint32 flags, + bool unlockWhileAllocating) = 0; + virtual void ReturnSlab(slab* slab) = 0; + virtual slab* ObjectSlab(void* object) const = 0; + + slab* InitSlab(slab* slab, void* pages, + size_t byteCount); + void UninitSlab(slab* slab); + + virtual status_t PrepareObject(slab* source, void* object); + virtual void UnprepareObject(slab* source, void* object); + + void ReturnObjectToSlab(slab* source, void* object); + + bool Lock() { return mutex_lock(&lock) == B_OK; } + void Unlock() { mutex_unlock(&lock); } + + static void SetKernelArgs(kernel_args* args); + status_t AllocatePages(void** pages, uint32 flags, + bool unlockWhileAllocating); + void FreePages(void* pages); + status_t EarlyAllocatePages(void** pages, uint32 flags, + bool unlockWhileAllocating); + void EarlyFreePages(void* pages); + +private: + static kernel_args* sKernelArgs; +}; + + +static inline void* +link_to_object(object_link* link, size_t objectSize) +{ + return ((uint8*)link) - (objectSize - sizeof(object_link)); +} + + +static inline object_link* +object_to_link(void* object, size_t objectSize) +{ + return (object_link*)(((uint8*)object) + + (objectSize - sizeof(object_link))); +} + + +static inline slab * +slab_in_pages(const void *pages, size_t slab_size) +{ + return (slab *)(((uint8 *)pages) + slab_size - sizeof(slab)); +} + + +static inline const void * +lower_boundary(void *object, size_t byteCount) +{ + const uint8 *null = (uint8 *)NULL; + return null + ((((uint8 *)object) - null) & ~(byteCount - 1)); +} + + +static inline bool +check_cache_quota(ObjectCache *cache) +{ + if (cache->maximum == 0) + return true; + + return (cache->usage + cache->slab_size) <= cache->maximum; +} + + +#endif // OBJECT_CACHE_H diff --git a/src/system/kernel/slab/ObjectDepot.cpp b/src/system/kernel/slab/ObjectDepot.cpp index 9b5e1a79a0..5204dfad5f 100644 --- a/src/system/kernel/slab/ObjectDepot.cpp +++ b/src/system/kernel/slab/ObjectDepot.cpp @@ -19,55 +19,65 @@ static const int kMagazineCapacity = 32; // TODO: Should be dynamically tuned per cache. -struct depot_magazine { - struct depot_magazine *next; - uint16 current_round, round_count; - void *rounds[0]; +struct DepotMagazine { + DepotMagazine* next; + uint16 current_round; + uint16 round_count; + void* rounds[0]; + +public: + inline bool IsEmpty() const; + inline bool IsFull() const; + + inline void* Pop(); + inline bool Push(void* object); }; struct depot_cpu_store { - recursive_lock lock; - struct depot_magazine *loaded, *previous; + recursive_lock lock; + DepotMagazine* loaded; + DepotMagazine* previous; }; -static inline bool -is_magazine_empty(depot_magazine *magazine) +bool +DepotMagazine::IsEmpty() const { - return magazine->current_round == 0; + return current_round == 0; } -static inline bool -is_magazine_full(depot_magazine *magazine) +bool +DepotMagazine::IsFull() const { - return magazine->current_round == magazine->round_count; + return current_round == round_count; } -static inline void * -pop_magazine(depot_magazine *magazine) +void* +DepotMagazine::Pop() { - return magazine->rounds[--magazine->current_round]; + return rounds[--current_round]; } -static inline bool -push_magazine(depot_magazine *magazine, void *object) +bool +DepotMagazine::Push(void* object) { - if (is_magazine_full(magazine)) + if (IsFull()) return false; - magazine->rounds[magazine->current_round++] = object; + + rounds[current_round++] = object; return true; } -static depot_magazine * +static DepotMagazine* alloc_magazine() { - depot_magazine *magazine = (depot_magazine *)internal_alloc( - sizeof(depot_magazine) + kMagazineCapacity * sizeof(void *), 0); + DepotMagazine* magazine = (DepotMagazine*)slab_internal_alloc( + sizeof(DepotMagazine) + kMagazineCapacity * sizeof(void*), 0); if (magazine) { magazine->next = NULL; magazine->current_round = 0; @@ -79,23 +89,23 @@ alloc_magazine() static void -free_magazine(depot_magazine *magazine) +free_magazine(DepotMagazine* magazine) { - internal_free(magazine); + slab_internal_free(magazine); } static void -empty_magazine(object_depot *depot, depot_magazine *magazine) +empty_magazine(object_depot* depot, DepotMagazine* magazine) { for (uint16 i = 0; i < magazine->current_round; i++) - depot->return_object(depot, magazine->rounds[i]); + depot->return_object(depot, depot->cookie, magazine->rounds[i]); free_magazine(magazine); } static bool -exchange_with_full(object_depot *depot, depot_magazine* &magazine) +exchange_with_full(object_depot* depot, DepotMagazine*& magazine) { RecursiveLocker _(depot->lock); @@ -112,7 +122,7 @@ exchange_with_full(object_depot *depot, depot_magazine* &magazine) static bool -exchange_with_empty(object_depot *depot, depot_magazine* &magazine) +exchange_with_empty(object_depot* depot, DepotMagazine*& magazine) { RecursiveLocker _(depot->lock); @@ -134,8 +144,8 @@ exchange_with_empty(object_depot *depot, depot_magazine* &magazine) } -static inline depot_cpu_store * -object_depot_cpu(object_depot *depot) +static inline depot_cpu_store* +object_depot_cpu(object_depot* depot) { return &depot->stores[smp_get_current_cpu()]; } @@ -145,8 +155,8 @@ object_depot_cpu(object_depot *depot) status_t -object_depot_init(object_depot *depot, uint32 flags, - void (*return_object)(object_depot *depot, void *object)) +object_depot_init(object_depot* depot, uint32 flags, void* cookie, + void (*return_object)(object_depot* depot, void* cookie, void* object)) { depot->full = NULL; depot->empty = NULL; @@ -154,8 +164,8 @@ object_depot_init(object_depot *depot, uint32 flags, recursive_lock_init(&depot->lock, "depot"); - depot->stores = (depot_cpu_store *)internal_alloc(sizeof(depot_cpu_store) - * smp_get_num_cpus(), flags); + depot->stores = (depot_cpu_store*)slab_internal_alloc( + sizeof(depot_cpu_store) * smp_get_num_cpus(), flags); if (depot->stores == NULL) { recursive_lock_destroy(&depot->lock); return B_NO_MEMORY; @@ -166,6 +176,7 @@ object_depot_init(object_depot *depot, uint32 flags, depot->stores[i].loaded = depot->stores[i].previous = NULL; } + depot->cookie = cookie; depot->return_object = return_object; return B_OK; @@ -173,7 +184,7 @@ object_depot_init(object_depot *depot, uint32 flags, void -object_depot_destroy(object_depot *depot) +object_depot_destroy(object_depot* depot) { object_depot_make_empty(depot); @@ -181,14 +192,14 @@ object_depot_destroy(object_depot *depot) recursive_lock_destroy(&depot->stores[i].lock); } - internal_free(depot->stores); + slab_internal_free(depot->stores); recursive_lock_destroy(&depot->lock); } -static void * -object_depot_obtain_from_store(object_depot *depot, depot_cpu_store *store) +static void* +object_depot_obtain_from_store(object_depot* depot, depot_cpu_store* store) { RecursiveLocker _(store->lock); @@ -203,21 +214,22 @@ object_depot_obtain_from_store(object_depot *depot, depot_cpu_store *store) return NULL; while (true) { - if (!is_magazine_empty(store->loaded)) - return pop_magazine(store->loaded); + if (!store->loaded->IsEmpty()) + return store->loaded->Pop(); - if (store->previous && (is_magazine_full(store->previous) - || exchange_with_full(depot, store->previous))) + if (store->previous + && (store->previous->IsFull() + || exchange_with_full(depot, store->previous))) { std::swap(store->previous, store->loaded); - else + } else return NULL; } } static int -object_depot_return_to_store(object_depot *depot, depot_cpu_store *store, - void *object) +object_depot_return_to_store(object_depot* depot, depot_cpu_store* store, + void* object) { RecursiveLocker _(store->lock); @@ -227,10 +239,10 @@ object_depot_return_to_store(object_depot *depot, depot_cpu_store *store, // we return the object directly to the slab. while (true) { - if (store->loaded && push_magazine(store->loaded, object)) + if (store->loaded && store->loaded->Push(object)) return 1; - if ((store->previous && is_magazine_empty(store->previous)) + if ((store->previous && store->previous->IsEmpty()) || exchange_with_empty(depot, store->previous)) std::swap(store->loaded, store->previous); else @@ -239,15 +251,15 @@ object_depot_return_to_store(object_depot *depot, depot_cpu_store *store, } -void * -object_depot_obtain(object_depot *depot) +void* +object_depot_obtain(object_depot* depot) { return object_depot_obtain_from_store(depot, object_depot_cpu(depot)); } int -object_depot_store(object_depot *depot, void *object) +object_depot_store(object_depot* depot, void* object) { return object_depot_return_to_store(depot, object_depot_cpu(depot), object); @@ -255,10 +267,10 @@ object_depot_store(object_depot *depot, void *object) void -object_depot_make_empty(object_depot *depot) +object_depot_make_empty(object_depot* depot) { for (int i = 0; i < smp_get_num_cpus(); i++) { - depot_cpu_store *store = &depot->stores[i]; + depot_cpu_store* store = &depot->stores[i]; RecursiveLocker _(store->lock); diff --git a/src/system/kernel/slab/Slab.cpp b/src/system/kernel/slab/Slab.cpp index d081ff65bd..aa6aeb6e09 100644 --- a/src/system/kernel/slab/Slab.cpp +++ b/src/system/kernel/slab/Slab.cpp @@ -24,145 +24,20 @@ #include #include #include -#include #include #include #include +#include "HashedObjectCache.h" #include "slab_private.h" +#include "SmallObjectCache.h" -//#define TRACE_SLAB - -#ifdef TRACE_SLAB -#define TRACE_CACHE(cache, format, args...) \ - dprintf("Cache[%p, %s] " format "\n", cache, cache->name , ##args) -#else -#define TRACE_CACHE(cache, format, bananas...) do { } while (0) -#endif - -#define COMPONENT_PARANOIA_LEVEL OBJECT_CACHE_PARANOIA -#include - - -static const size_t kCacheColorPeriod = 8; - -struct object_link { - struct object_link *next; -}; - -struct slab : DoublyLinkedListLinkImpl { - void *pages; - size_t size; // total number of objects - size_t count; // free objects - size_t offset; - object_link *free; -}; - -typedef DoublyLinkedList SlabList; -struct ResizeRequest; - -struct object_cache : DoublyLinkedListLinkImpl { - char name[32]; - mutex lock; - size_t object_size; - size_t cache_color_cycle; - SlabList empty, partial, full; - size_t total_objects; // total number of objects - size_t used_count; // used objects - size_t empty_count; // empty slabs - size_t pressure; - size_t min_object_reserve; // minimum number of free objects - - size_t slab_size; - size_t usage, maximum; - uint32 flags; - - ResizeRequest *resize_request; - - void *cookie; - object_cache_constructor constructor; - object_cache_destructor destructor; - object_cache_reclaimer reclaimer; - - status_t (*allocate_pages)(object_cache *cache, void **pages, - uint32 flags, bool unlockWhileAllocating); - void (*free_pages)(object_cache *cache, void *pages); - - object_depot depot; - - virtual slab *CreateSlab(uint32 flags, bool unlockWhileAllocating) = 0; - virtual void ReturnSlab(slab *slab) = 0; - virtual slab *ObjectSlab(void *object) const = 0; - - slab *InitSlab(slab *slab, void *pages, size_t byteCount); - void UninitSlab(slab *slab); - - virtual status_t PrepareObject(slab *source, void *object) { return B_OK; } - virtual void UnprepareObject(slab *source, void *object) {} - - virtual ~object_cache() {} - - bool Lock() { return mutex_lock(&lock) == B_OK; } - void Unlock() { mutex_unlock(&lock); } -}; - -typedef DoublyLinkedList ObjectCacheList; - -struct SmallObjectCache : object_cache { - slab *CreateSlab(uint32 flags, bool unlockWhileAllocating); - void ReturnSlab(slab *slab); - slab *ObjectSlab(void *object) const; -}; - - -struct HashedObjectCache : object_cache { - struct Link { - const void* buffer; - slab* parent; - Link* next; - }; - - struct Definition { - typedef HashedObjectCache ParentType; - typedef const void * KeyType; - typedef Link ValueType; - - Definition(HashedObjectCache *_parent) : parent(_parent) {} - Definition(const Definition& definition) : parent(definition.parent) {} - - size_t HashKey(const void *key) const - { - return (((const uint8 *)key) - ((const uint8 *)0)) - >> parent->lower_boundary; - } - - size_t Hash(Link *value) const { return HashKey(value->buffer); } - bool Compare(const void *key, Link *value) const - { return value->buffer == key; } - Link*& GetLink(Link *value) const { return value->next; } - - HashedObjectCache *parent; - }; - - typedef BOpenHashTable HashTable; - - HashedObjectCache() - : hash_table(this) {} - - slab *CreateSlab(uint32 flags, bool unlockWhileAllocating); - void ReturnSlab(slab *slab); - slab *ObjectSlab(void *object) const; - status_t PrepareObject(slab *source, void *object); - void UnprepareObject(slab *source, void *object); - - HashTable hash_table; - size_t lower_boundary; -}; +typedef DoublyLinkedList ObjectCacheList; struct ResizeRequest : DoublyLinkedListLinkImpl { - ResizeRequest(object_cache* cache) + ResizeRequest(ObjectCache* cache) : cache(cache), pending(false), @@ -170,7 +45,7 @@ struct ResizeRequest : DoublyLinkedListLinkImpl { { } - object_cache* cache; + ObjectCache* cache; bool pending; bool delete_when_done; }; @@ -181,11 +56,11 @@ typedef DoublyLinkedList ResizeRequestQueue; static ObjectCacheList sObjectCaches; static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list"); -static uint8 *sInitialBegin, *sInitialLimit, *sInitialPointer; -static kernel_args *sKernelArgs; +static uint8* sInitialBegin; +static uint8* sInitialLimit; +static uint8* sInitialPointer; - -static status_t object_cache_reserve_internal(object_cache *cache, +static status_t object_cache_reserve_internal(ObjectCache* cache, size_t object_count, uint32 flags, bool unlockWhileAllocating); static mutex sResizeRequestsLock @@ -201,22 +76,22 @@ namespace ObjectCacheTracing { class ObjectCacheTraceEntry : public AbstractTraceEntry { public: - ObjectCacheTraceEntry(object_cache* cache) + ObjectCacheTraceEntry(ObjectCache* cache) : fCache(cache) { } protected: - object_cache* fCache; + ObjectCache* fCache; }; class Create : public ObjectCacheTraceEntry { public: Create(const char* name, size_t objectSize, size_t alignment, - size_t maxByteUsage, uint32 flags, void *cookie, - object_cache *cache) + size_t maxByteUsage, uint32 flags, void* cookie, + ObjectCache* cache) : ObjectCacheTraceEntry(cache), fObjectSize(objectSize), @@ -249,7 +124,7 @@ class Create : public ObjectCacheTraceEntry { class Delete : public ObjectCacheTraceEntry { public: - Delete(object_cache *cache) + Delete(ObjectCache* cache) : ObjectCacheTraceEntry(cache) { @@ -265,7 +140,7 @@ class Delete : public ObjectCacheTraceEntry { class Alloc : public ObjectCacheTraceEntry { public: - Alloc(object_cache *cache, uint32 flags, void* object) + Alloc(ObjectCache* cache, uint32 flags, void* object) : ObjectCacheTraceEntry(cache), fFlags(flags), @@ -288,7 +163,7 @@ class Alloc : public ObjectCacheTraceEntry { class Free : public ObjectCacheTraceEntry { public: - Free(object_cache *cache, void* object) + Free(ObjectCache* cache, void* object) : ObjectCacheTraceEntry(cache), fObject(object) @@ -309,7 +184,7 @@ class Free : public ObjectCacheTraceEntry { class Reserve : public ObjectCacheTraceEntry { public: - Reserve(object_cache *cache, size_t count, uint32 flags) + Reserve(ObjectCache* cache, size_t count, uint32 flags) : ObjectCacheTraceEntry(cache), fCount(count), @@ -342,44 +217,69 @@ class Reserve : public ObjectCacheTraceEntry { // #pragma mark - -static inline void * -link_to_object(object_link *link, size_t objectSize) +static int +dump_slabs(int argc, char* argv[]) { - return ((uint8 *)link) - (objectSize - sizeof(object_link)); + kprintf("%10s %22s %8s %8s %6s %8s %8s %8s\n", "address", "name", + "objsize", "usage", "empty", "usedobj", "total", "flags"); + + ObjectCacheList::Iterator it = sObjectCaches.GetIterator(); + + while (it.HasNext()) { + ObjectCache* cache = it.Next(); + + kprintf("%p %22s %8lu %8lu %6lu %8lu %8lu %8lx\n", cache, cache->name, + cache->object_size, cache->usage, cache->empty_count, + cache->used_count, cache->usage / cache->object_size, + cache->flags); + } + + return 0; } -static inline object_link * -object_to_link(void *object, size_t objectSize) +static int +dump_cache_info(int argc, char* argv[]) { - return (object_link *)(((uint8 *)object) - + (objectSize - sizeof(object_link))); + if (argc < 2) { + kprintf("usage: cache_info [address]\n"); + return 0; + } + + ObjectCache* cache = (ObjectCache*)strtoul(argv[1], NULL, 16); + + kprintf("name: %s\n", cache->name); + kprintf("lock: %p\n", &cache->lock); + kprintf("object_size: %lu\n", cache->object_size); + kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle); + kprintf("used_count: %lu\n", cache->used_count); + kprintf("empty_count: %lu\n", cache->empty_count); + kprintf("pressure: %lu\n", cache->pressure); + kprintf("slab_size: %lu\n", cache->slab_size); + kprintf("usage: %lu\n", cache->usage); + kprintf("maximum: %lu\n", cache->maximum); + kprintf("flags: 0x%lx\n", cache->flags); + kprintf("cookie: %p\n", cache->cookie); + + return 0; } -static inline int -__fls0(size_t value) -{ - if (value == 0) - return -1; - - int bit; - for (bit = 0; value != 1; bit++) - value >>= 1; - return bit; -} +// #pragma mark - -void * -internal_alloc(size_t size, uint32 flags) +void* +slab_internal_alloc(size_t size, uint32 flags) { if (flags & CACHE_DURING_BOOT) { if ((sInitialPointer + size) > sInitialLimit) { - panic("internal_alloc: ran out of initial space"); +dprintf("sInitialPointer: %p, sInitialLimit: %p, size: %#lx\n", +sInitialPointer, sInitialLimit, size); + panic("slab_internal_alloc: ran out of initial space"); return NULL; } - uint8 *buffer = sInitialPointer; + uint8* buffer = sInitialPointer; sInitialPointer += size; return buffer; } @@ -390,9 +290,9 @@ internal_alloc(size_t size, uint32 flags) void -internal_free(void *_buffer) +slab_internal_free(void* _buffer) { - uint8 *buffer = (uint8 *)_buffer; + uint8* buffer = (uint8*)_buffer; if (buffer >= sInitialBegin && buffer < sInitialLimit) return; @@ -401,104 +301,13 @@ internal_free(void *_buffer) } -static status_t -area_allocate_pages(object_cache *cache, void **pages, uint32 flags, - bool unlockWhileAllocating) -{ - TRACE_CACHE(cache, "allocate pages (%lu, 0x0%lx)", cache->slab_size, flags); - - uint32 lock = B_FULL_LOCK; - if (cache->flags & CACHE_UNLOCKED_PAGES) - lock = B_NO_LOCK; - - uint32 addressSpec = B_ANY_KERNEL_ADDRESS; - if ((cache->flags & CACHE_ALIGN_ON_SIZE) != 0 - && cache->slab_size != B_PAGE_SIZE) - addressSpec = B_ANY_KERNEL_BLOCK_ADDRESS; - - if (unlockWhileAllocating) - cache->Unlock(); - - // if we are allocating, it is because we need the pages immediatly - // so we lock them. when moving the slab to the empty list we should - // unlock them, and lock them again when getting one from the empty list. - area_id areaId = create_area_etc(VMAddressSpace::KernelID(), - cache->name, pages, addressSpec, cache->slab_size, lock, - B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, - (flags & CACHE_DONT_SLEEP) != 0 ? CREATE_AREA_DONT_WAIT : 0); - - if (unlockWhileAllocating) - cache->Lock(); - - if (areaId < 0) - return areaId; - - cache->usage += cache->slab_size; - - TRACE_CACHE(cache, " ... = { %ld, %p }", areaId, *pages); - - return B_OK; -} - - static void -area_free_pages(object_cache *cache, void *pages) -{ - area_id id = area_for(pages); - - TRACE_CACHE(cache, "delete pages %p (%ld)", pages, id); - - if (id < 0) { - panic("object cache: freeing unknown area"); - return; - } - - delete_area(id); - - cache->usage -= cache->slab_size; -} - - -static status_t -early_allocate_pages(object_cache *cache, void **pages, uint32 flags, - bool unlockWhileAllocating) -{ - TRACE_CACHE(cache, "early allocate pages (%lu, 0x0%lx)", cache->slab_size, - flags); - - if (unlockWhileAllocating) - cache->Unlock(); - - addr_t base = vm_allocate_early(sKernelArgs, cache->slab_size, - cache->slab_size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); - - if (unlockWhileAllocating) - cache->Lock(); - - *pages = (void *)base; - - cache->usage += cache->slab_size; - - TRACE_CACHE(cache, " ... = { %p }", *pages); - - return B_OK; -} - - -static void -early_free_pages(object_cache *cache, void *pages) -{ - panic("memory pressure on bootup?"); -} - - -static void -object_cache_low_memory(void *_self, uint32 resources, int32 level) +object_cache_low_memory(void* _self, uint32 resources, int32 level) { if (level == B_NO_LOW_RESOURCE) return; - object_cache *cache = (object_cache *)_self; + ObjectCache* cache = (ObjectCache*)_self; // we are calling the reclaimer without the object cache lock // to give the owner a chance to return objects to the slabs. @@ -557,124 +366,6 @@ object_cache_low_memory(void *_self, uint32 resources, int32 level) } -static void -object_cache_return_object_wrapper(object_depot *depot, void *object) -{ - // TODO: the offset calculation might be wrong because we hardcode a - // SmallObjectCache instead of a base object_cache. Also this must - // have an unacceptable overhead. - SmallObjectCache dummyCache; - object_cache *cache = (object_cache *)(((uint8 *)depot) - - offset_of_member(dummyCache, depot)); - - object_cache_free(cache, object); -} - - -static status_t -object_cache_init(object_cache *cache, const char *name, size_t objectSize, - size_t alignment, size_t maximum, uint32 flags, void *cookie, - object_cache_constructor constructor, object_cache_destructor destructor, - object_cache_reclaimer reclaimer) -{ - strlcpy(cache->name, name, sizeof(cache->name)); - - mutex_init(&cache->lock, cache->name); - - if (objectSize < sizeof(object_link)) - objectSize = sizeof(object_link); - - if (alignment > 0 && (objectSize & (alignment - 1))) - cache->object_size = objectSize + alignment - - (objectSize & (alignment - 1)); - else - cache->object_size = objectSize; - - TRACE_CACHE(cache, "init %lu, %lu -> %lu", objectSize, alignment, - cache->object_size); - - cache->cache_color_cycle = 0; - cache->total_objects = 0; - cache->used_count = 0; - cache->empty_count = 0; - cache->pressure = 0; - cache->min_object_reserve = 0; - - cache->usage = 0; - cache->maximum = maximum; - - cache->flags = flags; - - cache->resize_request = NULL; - - // TODO: depot destruction is obviously broken - // no gain in using the depot in single cpu setups - //if (smp_get_num_cpus() == 1) - cache->flags |= CACHE_NO_DEPOT; - - if (!(cache->flags & CACHE_NO_DEPOT)) { - status_t status = object_depot_init(&cache->depot, flags, - object_cache_return_object_wrapper); - if (status < B_OK) { - mutex_destroy(&cache->lock); - return status; - } - } - - cache->cookie = cookie; - cache->constructor = constructor; - cache->destructor = destructor; - cache->reclaimer = reclaimer; - - if (cache->flags & CACHE_DURING_BOOT) { - cache->allocate_pages = early_allocate_pages; - cache->free_pages = early_free_pages; - } else { - cache->allocate_pages = area_allocate_pages; - cache->free_pages = area_free_pages; - } - - register_low_resource_handler(object_cache_low_memory, cache, - B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY - | B_KERNEL_RESOURCE_ADDRESS_SPACE, 5); - - MutexLocker _(sObjectCacheListLock); - sObjectCaches.Add(cache); - - return B_OK; -} - - -static void -object_cache_commit_slab(object_cache *cache, slab *slab) -{ - void *pages = (void *)ROUNDDOWN((addr_t)slab->pages, B_PAGE_SIZE); - if (create_area(cache->name, &pages, B_EXACT_ADDRESS, cache->slab_size, - B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA) < 0) - panic("failed to create_area()"); -} - - -static void -object_cache_commit_pre_pages(object_cache *cache) -{ - SlabList::Iterator it = cache->full.GetIterator(); - while (it.HasNext()) - object_cache_commit_slab(cache, it.Next()); - - it = cache->partial.GetIterator(); - while (it.HasNext()) - object_cache_commit_slab(cache, it.Next()); - - it = cache->empty.GetIterator(); - while (it.HasNext()) - object_cache_commit_slab(cache, it.Next()); - - cache->allocate_pages = area_allocate_pages; - cache->free_pages = area_free_pages; -} - - static status_t object_cache_resizer(void*) { @@ -696,7 +387,7 @@ object_cache_resizer(void*) // resize the cache, if necessary - object_cache* cache = request->cache; + ObjectCache* cache = request->cache; MutexLocker cacheLocker(cache->lock); @@ -726,7 +417,7 @@ object_cache_resizer(void*) static void -increase_object_reserve(object_cache* cache) +increase_object_reserve(ObjectCache* cache) { if (cache->resize_request->pending) return; @@ -739,73 +430,9 @@ increase_object_reserve(object_cache* cache) } -static void -delete_cache(object_cache *cache) -{ - cache->~object_cache(); - internal_free(cache); -} - - -static SmallObjectCache * -create_small_object_cache(const char *name, size_t object_size, - size_t alignment, size_t maximum, uint32 flags, void *cookie, - object_cache_constructor constructor, object_cache_destructor destructor, - object_cache_reclaimer reclaimer) -{ - void *buffer = internal_alloc(sizeof(SmallObjectCache), flags); - if (buffer == NULL) - return NULL; - - SmallObjectCache *cache = new (buffer) SmallObjectCache(); - - if (object_cache_init(cache, name, object_size, alignment, maximum, - flags | CACHE_ALIGN_ON_SIZE, cookie, constructor, destructor, - reclaimer) < B_OK) { - delete_cache(cache); - return NULL; - } - - if ((flags & CACHE_LARGE_SLAB) != 0) - cache->slab_size = max_c(16 * B_PAGE_SIZE, 1024 * object_size); - else - cache->slab_size = B_PAGE_SIZE; - - return cache; -} - - -static HashedObjectCache * -create_hashed_object_cache(const char *name, size_t object_size, - size_t alignment, size_t maximum, uint32 flags, void *cookie, - object_cache_constructor constructor, object_cache_destructor destructor, - object_cache_reclaimer reclaimer) -{ - void *buffer = internal_alloc(sizeof(HashedObjectCache), flags); - if (buffer == NULL) - return NULL; - - HashedObjectCache *cache = new (buffer) HashedObjectCache(); - - if (object_cache_init(cache, name, object_size, alignment, maximum, flags, - cookie, constructor, destructor, reclaimer) < B_OK) { - delete_cache(cache); - return NULL; - } - - if ((flags & CACHE_LARGE_SLAB) != 0) - cache->slab_size = max_c(256 * B_PAGE_SIZE, 128 * object_size); - else - cache->slab_size = max_c(16 * B_PAGE_SIZE, 8 * object_size); - cache->lower_boundary = __fls0(cache->object_size); - - return cache; -} - - -object_cache * -create_object_cache(const char *name, size_t object_size, size_t alignment, - void *cookie, object_cache_constructor constructor, +object_cache* +create_object_cache(const char* name, size_t object_size, size_t alignment, + void* cookie, object_cache_constructor constructor, object_cache_destructor destructor) { return create_object_cache_etc(name, object_size, alignment, 0, 0, cookie, @@ -813,31 +440,36 @@ create_object_cache(const char *name, size_t object_size, size_t alignment, } -object_cache * -create_object_cache_etc(const char *name, size_t objectSize, size_t alignment, - size_t maximum, uint32 flags, void *cookie, +object_cache* +create_object_cache_etc(const char* name, size_t objectSize, size_t alignment, + size_t maximum, uint32 flags, void* cookie, object_cache_constructor constructor, object_cache_destructor destructor, object_cache_reclaimer reclaimer) { - object_cache *cache; + ObjectCache* cache; if (objectSize == 0) { cache = NULL; } else if (objectSize <= 256) { - cache = create_small_object_cache(name, objectSize, alignment, maximum, + cache = SmallObjectCache::Create(name, objectSize, alignment, maximum, flags, cookie, constructor, destructor, reclaimer); } else { - cache = create_hashed_object_cache(name, objectSize, alignment, + cache = HashedObjectCache::Create(name, objectSize, alignment, maximum, flags, cookie, constructor, destructor, reclaimer); } + if (cache != NULL) { + MutexLocker _(sObjectCacheListLock); + sObjectCaches.Add(cache); + } + T(Create(name, objectSize, alignment, maximum, flags, cookie, cache)); return cache; } void -delete_object_cache(object_cache *cache) +delete_object_cache(object_cache* cache) { T(Delete(cache)); @@ -863,12 +495,12 @@ delete_object_cache(object_cache *cache) cache->ReturnSlab(cache->empty.RemoveHead()); mutex_destroy(&cache->lock); - delete_cache(cache); + cache->Delete(); } status_t -object_cache_set_minimum_reserve(object_cache *cache, size_t objectCount) +object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount) { MutexLocker _(cache->lock); @@ -896,11 +528,11 @@ object_cache_set_minimum_reserve(object_cache *cache, size_t objectCount) } -void * -object_cache_alloc(object_cache *cache, uint32 flags) +void* +object_cache_alloc(object_cache* cache, uint32 flags) { if (!(cache->flags & CACHE_NO_DEPOT)) { - void *object = object_depot_obtain(&cache->depot); + void* object = object_depot_obtain(&cache->depot); if (object) { T(Alloc(cache, flags, object)); return object; @@ -908,7 +540,7 @@ object_cache_alloc(object_cache *cache, uint32 flags) } MutexLocker _(cache->lock); - slab *source; + slab* source; if (cache->partial.IsEmpty()) { if (cache->empty.IsEmpty()) { @@ -929,7 +561,7 @@ object_cache_alloc(object_cache *cache, uint32 flags) ParanoiaChecker _2(source); - object_link *link = _pop(source->free); + object_link* link = _pop(source->free); source->count--; cache->used_count++; @@ -947,54 +579,14 @@ object_cache_alloc(object_cache *cache, uint32 flags) cache->full.Add(source); } - void *object = link_to_object(link, cache->object_size); + void* object = link_to_object(link, cache->object_size); T(Alloc(cache, flags, object)); return object; } -static void -object_cache_return_to_slab(object_cache *cache, slab *source, void *object) -{ - if (source == NULL) { - panic("object_cache: free'd object has no slab"); - return; - } - - ParanoiaChecker _(source); - - object_link *link = object_to_link(object, cache->object_size); - - TRACE_CACHE(cache, "returning %p (%p) to %p, %lu used (%lu empty slabs).", - object, link, source, source->size - source->count, - cache->empty_count); - - _push(source->free, link); - source->count++; - cache->used_count--; - - ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next, sizeof(void*)); - - if (source->count == source->size) { - cache->partial.Remove(source); - - if (cache->empty_count < cache->pressure - && cache->total_objects - cache->used_count - source->size - >= cache->min_object_reserve) { - cache->empty_count++; - cache->empty.Add(source); - } else { - cache->ReturnSlab(source); - } - } else if (source->count == 1) { - cache->full.Remove(source); - cache->partial.Add(source); - } -} - - void -object_cache_free(object_cache *cache, void *object) +object_cache_free(object_cache* cache, void* object) { if (object == NULL) return; @@ -1007,21 +599,21 @@ object_cache_free(object_cache *cache, void *object) } MutexLocker _(cache->lock); - object_cache_return_to_slab(cache, cache->ObjectSlab(object), object); + cache->ReturnObjectToSlab(cache->ObjectSlab(object), object); } static status_t -object_cache_reserve_internal(object_cache *cache, size_t objectCount, +object_cache_reserve_internal(ObjectCache* cache, size_t objectCount, uint32 flags, bool unlockWhileAllocating) { - size_t numBytes = objectCount * cache->object_size; + size_t numBytes = objectCount* cache->object_size; size_t slabCount = ((numBytes - 1) / cache->slab_size) + 1; // TODO: This also counts the unusable space of each slab, which can // sum up. while (slabCount > 0) { - slab *newSlab = cache->CreateSlab(flags, unlockWhileAllocating); + slab* newSlab = cache->CreateSlab(flags, unlockWhileAllocating); if (newSlab == NULL) return B_NO_MEMORY; @@ -1035,7 +627,7 @@ object_cache_reserve_internal(object_cache *cache, size_t objectCount, status_t -object_cache_reserve(object_cache *cache, size_t objectCount, uint32 flags) +object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags) { if (objectCount == 0) return B_OK; @@ -1048,329 +640,23 @@ object_cache_reserve(object_cache *cache, size_t objectCount, uint32 flags) void -object_cache_get_usage(object_cache *cache, size_t *_allocatedMemory) +object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory) { MutexLocker _(cache->lock); *_allocatedMemory = cache->usage; } -slab * -object_cache::InitSlab(slab *slab, void *pages, size_t byteCount) -{ - TRACE_CACHE(this, "construct (%p, %p .. %p, %lu)", slab, pages, - ((uint8 *)pages) + byteCount, byteCount); - - slab->pages = pages; - slab->count = slab->size = byteCount / object_size; - slab->free = NULL; - total_objects += slab->size; - - size_t spareBytes = byteCount - (slab->size * object_size); - slab->offset = cache_color_cycle; - - if (slab->offset > spareBytes) - cache_color_cycle = slab->offset = 0; - else - cache_color_cycle += kCacheColorPeriod; - - TRACE_CACHE(this, " %lu objects, %lu spare bytes, offset %lu", - slab->size, spareBytes, slab->offset); - - uint8 *data = ((uint8 *)pages) + slab->offset; - - CREATE_PARANOIA_CHECK_SET(slab, "slab"); - - for (size_t i = 0; i < slab->size; i++) { - bool failedOnFirst = false; - - status_t status = PrepareObject(slab, data); - if (status < B_OK) - failedOnFirst = true; - else if (constructor) - status = constructor(cookie, data); - - if (status < B_OK) { - if (!failedOnFirst) - UnprepareObject(slab, data); - - data = ((uint8 *)pages) + slab->offset; - for (size_t j = 0; j < i; j++) { - if (destructor) - destructor(cookie, data); - UnprepareObject(slab, data); - data += object_size; - } - - DELETE_PARANOIA_CHECK_SET(slab); - - return NULL; - } - - _push(slab->free, object_to_link(data, object_size)); - - ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, slab, - &object_to_link(data, object_size)->next, sizeof(void*)); - - data += object_size; - } - - return slab; -} - - void -object_cache::UninitSlab(slab *slab) +slab_init(kernel_args* args, addr_t initialBase, size_t initialSize) { - TRACE_CACHE(this, "destruct %p", slab); + dprintf("slab: init base %p + 0x%lx\n", (void*)initialBase, initialSize); - if (slab->count != slab->size) - panic("cache: destroying a slab which isn't empty."); - - total_objects -= slab->size; - - DELETE_PARANOIA_CHECK_SET(slab); - - uint8 *data = ((uint8 *)slab->pages) + slab->offset; - - for (size_t i = 0; i < slab->size; i++) { - if (destructor) - destructor(cookie, data); - UnprepareObject(slab, data); - data += object_size; - } -} - - -static inline slab * -slab_in_pages(const void *pages, size_t slab_size) -{ - return (slab *)(((uint8 *)pages) + slab_size - sizeof(slab)); -} - - -static inline const void * -lower_boundary(void *object, size_t byteCount) -{ - const uint8 *null = (uint8 *)NULL; - return null + ((((uint8 *)object) - null) & ~(byteCount - 1)); -} - - -static inline bool -check_cache_quota(object_cache *cache) -{ - if (cache->maximum == 0) - return true; - - return (cache->usage + cache->slab_size) <= cache->maximum; -} - - -slab * -SmallObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating) -{ - if (!check_cache_quota(this)) - return NULL; - - void *pages; - - if (allocate_pages(this, &pages, flags, unlockWhileAllocating) < B_OK) - return NULL; - - return InitSlab(slab_in_pages(pages, slab_size), pages, - slab_size - sizeof(slab)); -} - - -void -SmallObjectCache::ReturnSlab(slab *slab) -{ - UninitSlab(slab); - free_pages(this, slab->pages); -} - - -slab * -SmallObjectCache::ObjectSlab(void *object) const -{ - return slab_in_pages(lower_boundary(object, slab_size), slab_size); -} - - -static slab * -allocate_slab(uint32 flags) -{ - return (slab *)internal_alloc(sizeof(slab), flags); -} - - -static void -free_slab(slab *slab) -{ - internal_free(slab); -} - - -static HashedObjectCache::Link * -allocate_link(uint32 flags) -{ - return (HashedObjectCache::Link *) - internal_alloc(sizeof(HashedObjectCache::Link), flags); -} - - -static void -free_link(HashedObjectCache::Link *link) -{ - internal_free(link); -} - - -slab * -HashedObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating) -{ - if (!check_cache_quota(this)) - return NULL; - - if (unlockWhileAllocating) - Unlock(); - - slab *slab = allocate_slab(flags); - - if (unlockWhileAllocating) - Lock(); - - if (slab == NULL) - return NULL; - - void *pages; - - if (allocate_pages(this, &pages, flags, unlockWhileAllocating) == B_OK) { - if (InitSlab(slab, pages, slab_size)) - return slab; - - free_pages(this, pages); - } - - free_slab(slab); - return NULL; -} - - -void -HashedObjectCache::ReturnSlab(slab *slab) -{ - UninitSlab(slab); - free_pages(this, slab->pages); -} - - -slab * -HashedObjectCache::ObjectSlab(void *object) const -{ - Link *link = hash_table.Lookup(object); - if (link == NULL) { - panic("object cache: requested object %p missing from hash table", - object); - return NULL; - } - return link->parent; -} - - -status_t -HashedObjectCache::PrepareObject(slab *source, void *object) -{ - Link *link = allocate_link(CACHE_DONT_SLEEP); - if (link == NULL) - return B_NO_MEMORY; - - link->buffer = object; - link->parent = source; - - hash_table.Insert(link); - return B_OK; -} - - -void -HashedObjectCache::UnprepareObject(slab *source, void *object) -{ - Link *link = hash_table.Lookup(object); - if (link == NULL) { - panic("object cache: requested object missing from hash table"); - return; - } - - if (link->parent != source) { - panic("object cache: slab mismatch"); - return; - } - - hash_table.Remove(link); - free_link(link); -} - - -static int -dump_slabs(int argc, char *argv[]) -{ - kprintf("%10s %22s %8s %8s %6s %8s %8s %8s\n", "address", "name", - "objsize", "usage", "empty", "usedobj", "total", "flags"); - - ObjectCacheList::Iterator it = sObjectCaches.GetIterator(); - - while (it.HasNext()) { - object_cache *cache = it.Next(); - - kprintf("%p %22s %8lu %8lu %6lu %8lu %8lu %8lx\n", cache, cache->name, - cache->object_size, cache->usage, cache->empty_count, - cache->used_count, cache->usage / cache->object_size, - cache->flags); - } - - return 0; -} - - -static int -dump_cache_info(int argc, char *argv[]) -{ - if (argc < 2) { - kprintf("usage: cache_info [address]\n"); - return 0; - } - - object_cache *cache = (object_cache *)strtoul(argv[1], NULL, 16); - - kprintf("name: %s\n", cache->name); - kprintf("lock: %p\n", &cache->lock); - kprintf("object_size: %lu\n", cache->object_size); - kprintf("cache_color_cycle: %lu\n", cache->cache_color_cycle); - kprintf("used_count: %lu\n", cache->used_count); - kprintf("empty_count: %lu\n", cache->empty_count); - kprintf("pressure: %lu\n", cache->pressure); - kprintf("slab_size: %lu\n", cache->slab_size); - kprintf("usage: %lu\n", cache->usage); - kprintf("maximum: %lu\n", cache->maximum); - kprintf("flags: 0x%lx\n", cache->flags); - kprintf("cookie: %p\n", cache->cookie); - - return 0; -} - - -void -slab_init(kernel_args *args, addr_t initialBase, size_t initialSize) -{ - dprintf("slab: init base %p + 0x%lx\n", (void *)initialBase, initialSize); - - sInitialBegin = (uint8 *)initialBase; + sInitialBegin = (uint8*)initialBase; sInitialLimit = sInitialBegin + initialSize; sInitialPointer = sInitialBegin; - sKernelArgs = args; + ObjectCache::SetKernelArgs(args); new (&sObjectCaches) ObjectCacheList(); @@ -1388,9 +674,12 @@ slab_init_post_sem() ObjectCacheList::Iterator it = sObjectCaches.GetIterator(); while (it.HasNext()) { - object_cache *cache = it.Next(); - if (cache->allocate_pages == early_allocate_pages) - object_cache_commit_pre_pages(cache); + ObjectCache* cache = it.Next(); + cache->InitPostArea(); + register_low_resource_handler(object_cache_low_memory, cache, + B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY + | B_KERNEL_RESOURCE_ADDRESS_SPACE, 5); + } block_allocator_init_rest(); diff --git a/src/system/kernel/slab/SmallObjectCache.cpp b/src/system/kernel/slab/SmallObjectCache.cpp new file mode 100644 index 0000000000..d2c62d04c4 --- /dev/null +++ b/src/system/kernel/slab/SmallObjectCache.cpp @@ -0,0 +1,69 @@ +/* + * Copyright 2008, Axel Dörfler. All Rights Reserved. + * Copyright 2007, Hugo Santos. All Rights Reserved. + * + * Distributed under the terms of the MIT License. + */ + + +#include "SmallObjectCache.h" + +#include "slab_private.h" + + +/*static*/ SmallObjectCache* +SmallObjectCache::Create(const char* name, size_t object_size, + size_t alignment, size_t maximum, uint32 flags, void* cookie, + object_cache_constructor constructor, object_cache_destructor destructor, + object_cache_reclaimer reclaimer) +{ + void* buffer = slab_internal_alloc(sizeof(SmallObjectCache), flags); + if (buffer == NULL) + return NULL; + + SmallObjectCache* cache = new(buffer) SmallObjectCache(); + + if (cache->Init(name, object_size, alignment, maximum, + flags | CACHE_ALIGN_ON_SIZE, cookie, constructor, destructor, + reclaimer) != B_OK) { + cache->Delete(); + return NULL; + } + + if ((flags & CACHE_LARGE_SLAB) != 0) + cache->slab_size = max_c(16 * B_PAGE_SIZE, 1024 * object_size); + else + cache->slab_size = B_PAGE_SIZE; + + return cache; +} + + +slab* +SmallObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating) +{ + if (!check_cache_quota(this)) + return NULL; + + void* pages; + if ((this->*allocate_pages)(&pages, flags, unlockWhileAllocating) != B_OK) + return NULL; + + return InitSlab(slab_in_pages(pages, slab_size), pages, + slab_size - sizeof(slab)); +} + + +void +SmallObjectCache::ReturnSlab(slab* slab) +{ + UninitSlab(slab); + (this->*free_pages)(slab->pages); +} + + +slab* +SmallObjectCache::ObjectSlab(void* object) const +{ + return slab_in_pages(lower_boundary(object, slab_size), slab_size); +} diff --git a/src/system/kernel/slab/SmallObjectCache.h b/src/system/kernel/slab/SmallObjectCache.h new file mode 100644 index 0000000000..849798d770 --- /dev/null +++ b/src/system/kernel/slab/SmallObjectCache.h @@ -0,0 +1,29 @@ +/* + * Copyright 2008, Axel Dörfler. All Rights Reserved. + * Copyright 2007, Hugo Santos. All Rights Reserved. + * + * Distributed under the terms of the MIT License. + */ +#ifndef SMALL_OBJECT_CACHE_H +#define SMALL_OBJECT_CACHE_H + + +#include "ObjectCache.h" + + +struct SmallObjectCache : ObjectCache { + static SmallObjectCache* Create(const char* name, size_t object_size, + size_t alignment, size_t maximum, + uint32 flags, void* cookie, + object_cache_constructor constructor, + object_cache_destructor destructor, + object_cache_reclaimer reclaimer); + + virtual slab* CreateSlab(uint32 flags, + bool unlockWhileAllocating); + virtual void ReturnSlab(slab* slab); + virtual slab* ObjectSlab(void* object) const; +}; + + +#endif // SMALL_OBJECT_CACHE_H diff --git a/src/system/kernel/slab/slab_private.h b/src/system/kernel/slab/slab_private.h index 73c04c9588..42809c857c 100644 --- a/src/system/kernel/slab/slab_private.h +++ b/src/system/kernel/slab/slab_private.h @@ -12,14 +12,27 @@ #include -void* internal_alloc(size_t size, uint32 flags); -void internal_free(void *_buffer); +//#define TRACE_SLAB +#ifdef TRACE_SLAB +#define TRACE_CACHE(cache, format, args...) \ + dprintf("Cache[%p, %s] " format "\n", cache, cache->name , ##args) +#else +#define TRACE_CACHE(cache, format, bananas...) do { } while (0) +#endif -void* block_alloc(size_t size); -void block_free(void *block); -void block_allocator_init_boot(); -void block_allocator_init_rest(); +#define COMPONENT_PARANOIA_LEVEL OBJECT_CACHE_PARANOIA +#include + +struct ObjectCache; + +void* slab_internal_alloc(size_t size, uint32 flags); +void slab_internal_free(void *_buffer); + +void* block_alloc(size_t size); +void block_free(void *block); +void block_allocator_init_boot(); +void block_allocator_init_rest(); template diff --git a/src/system/kernel/vm/vm.cpp b/src/system/kernel/vm/vm.cpp index bc5938cf7a..5fcb2333c3 100644 --- a/src/system/kernel/vm/vm.cpp +++ b/src/system/kernel/vm/vm.cpp @@ -3215,7 +3215,7 @@ vm_init(kernel_args* args) TRACE(("heap at 0x%lx\n", heapBase)); heap_init(heapBase, heapSize); - size_t slabInitialSize = args->num_cpus * 2 * B_PAGE_SIZE; + size_t slabInitialSize = args->num_cpus * 3 * B_PAGE_SIZE; addr_t slabInitialBase = vm_allocate_early(args, slabInitialSize, slabInitialSize, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); slab_init(args, slabInitialBase, slabInitialSize);