added a test Slab implementation to tests/ (including Depot management for SMP scalability). after a bit more of testing this will be added to the kernel (still without VM integration).

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@20792 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Hugo Santos 2007-04-24 11:04:47 +00:00
parent 85dbe747c4
commit a30fb13f58
3 changed files with 1059 additions and 0 deletions

View File

@ -0,0 +1,8 @@
SubDir HAIKU_TOP src tests system kernel slab ;
UsePrivateHeaders kernel ;
UseHeaders $(TARGET_PRIVATE_KERNEL_HEADERS) : true ;
BinCommand test_slab
: Slab.cpp
;

View File

@ -0,0 +1,533 @@
/*
* Copyright 2007, Hugo Santos. All Rights Reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* Hugo Santos, hugosantos@gmail.com
*/
#include "Slab.h"
#include <stdio.h>
#include <malloc.h>
// TODO this value should be dynamically tuned per cache.
static const int kMagazineCapacity = 32;
static const size_t kCacheColorPeriod = 8;
template<typename Type> static Type *
SListPop(Type* &head)
{
Type *oldHead = head;
head = head->next;
return oldHead;
}
template<typename Type> static inline void
SListPush(Type* &head, Type *object)
{
object->next = head;
head = object;
}
status_t
MallocBackend::AllocatePages(BaseCache *cache, AllocationID *id, void **pages,
size_t byteCount, uint32_t flags)
{
size_t alignment = 16;
if (flags & CACHE_ALIGN_TO_PAGE_TOTAL)
alignment = byteCount;
*pages = memalign(alignment, byteCount);
if (*pages == NULL)
return B_NO_MEMORY;
*id = *pages;
return B_OK;
}
void
MallocBackend::FreePages(BaseCache *cache, void *pages)
{
free(pages);
}
status_t
AreaBackend::AllocatePages(BaseCache *cache, area_id *id, void **pages,
size_t byteCount, uint32_t flags)
{
if (flags & CACHE_ALIGN_TO_PAGE_TOTAL)
; // panic()
area_id areaId = create_area(cache->Name(), pages, B_ANY_ADDRESS, //B_ANY_KERNEL_ADDRESS,
byteCount, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
if (areaId < 0)
return areaId;
printf("AreaBackend::AllocatePages() = { %ld, %p }\n", areaId, *pages);
*id = areaId;
return B_OK;
}
void
AreaBackend::FreePages(BaseCache *cache, area_id area)
{
printf("AreaBackend::DeletePages(%ld)\n", area);
delete_area(area);
}
BaseCache::BaseCache(const char *_name, size_t objectSize, size_t alignment,
Constructor _constructor, Destructor _destructor, void *_cookie)
: fConstructor(_constructor), fDestructor(_destructor), fCookie(_cookie)
{
strncpy(fName, _name, sizeof(fName));
fName[sizeof(fName) - 1] = 0;
if (alignment > 0 && (objectSize & (alignment - 1)))
fObjectSize = objectSize + alignment - (objectSize & (alignment - 1));
else
fObjectSize = objectSize;
fCacheColorCycle = 0;
}
BaseCache::ObjectLink *BaseCache::AllocateObject()
{
Slab *slab = fPartialSlabs.Head();
printf("BaseCache::AllocateObject() from %p, %lu remaining\n",
slab, slab->count);
ObjectLink *link = SListPop(slab->free);
slab->count--;
if (slab->count == 0) {
// move the partial slab to the full list
fPartialSlabs.Remove(slab);
fFullSlabs.Add(slab);
}
return link;
}
bool
BaseCache::ReturnObject(const ObjectInfo &object)
{
Slab *slab = object.first;
ObjectLink *link = object.second;
// We return true if the slab is completely unused.
SListPush(slab->free, link);
slab->count++;
if (slab->count == slab->size) {
fPartialSlabs.Remove(slab);
return true;
} else if (slab->count == 1) {
fFullSlabs.Remove(slab);
fPartialSlabs.Add(slab);
}
return false;
}
BaseCache::Slab *
BaseCache::ConstructSlab(Slab *slab, void *pages, size_t byteCount,
ObjectLink *(*getLink)(void *parent, void *object), void *parent)
{
printf("BaseCache::ConstructSlab(%p, %p, %lu, %p, %p)\n", slab, pages,
byteCount, getLink, parent);
slab->pages = pages;
slab->count = slab->size = byteCount / fObjectSize;
slab->free = NULL;
size_t spareBytes = byteCount - (slab->size * fObjectSize);
size_t cycle = fCacheColorCycle;
if (cycle > spareBytes)
cycle = 0;
else
fCacheColorCycle += kCacheColorPeriod;
printf(" %lu objects, %lu spare bytes, cycle %lu\n",
slab->size, spareBytes, cycle);
uint8_t *data = ((uint8_t *)pages) + cycle;
for (size_t i = 0; i < slab->size; i++) {
if (fConstructor)
fConstructor(fCookie, data);
SListPush(slab->free, getLink(parent, data));
data += fObjectSize;
}
return slab;
}
void
BaseCache::DestructSlab(Slab *slab)
{
if (fDestructor == NULL)
return;
uint8_t *data = (uint8_t *)slab->pages;
for (size_t i = 0; i < slab->size; i++) {
fDestructor(fCookie, data);
data += fObjectSize;
}
}
static inline bool
_IsMagazineEmpty(BaseDepot::Magazine *magazine)
{
return magazine->current_round == 0;
}
static inline bool
_IsMagazineFull(BaseDepot::Magazine *magazine)
{
return magazine->current_round == magazine->round_count;
}
static inline void *
_PopMagazine(BaseDepot::Magazine *magazine)
{
return magazine->rounds[--magazine->current_round];
}
static inline bool
_PushMagazine(BaseDepot::Magazine *magazine, void *object)
{
if (_IsMagazineFull(magazine))
return false;
magazine->rounds[magazine->current_round++] = object;
return true;
}
BaseDepot::BaseDepot()
: fFull(NULL), fEmpty(NULL), fFullCount(0), fEmptyCount(0)
{
// benaphore_init(...)
fStores = new (std::nothrow) CPUStore[smp_get_num_cpus()];
if (fStores) {
for (int i = 0; i < smp_get_num_cpus(); i++) {
// benaphore_init(...)
fStores[i].loaded = fStores[i].previous = NULL;
}
}
}
BaseDepot::~BaseDepot()
{
// MakeEmpty may not be used here as ReturnObject is
// no longer available by then.
delete [] fStores;
// benaphore_destroy()
}
status_t
BaseDepot::InitCheck() const
{
return fStores ? B_OK : B_NO_MEMORY;
}
void *
BaseDepot::ObtainFromStore(CPUStore *store)
{
BenaphoreLocker _(store->lock);
// To better understand both the Alloc() and Free() logic refer to
// Bonwick's ``Magazines and Vmem'' [in 2001 USENIX proceedings]
// In a nutshell, we try to get an object from the loaded magazine
// if it's not empty, or from the previous magazine if it's full
// and finally from the Slab if the magazine depot has no full magazines.
if (store->loaded == NULL)
return NULL;
while (true) {
if (!_IsMagazineEmpty(store->loaded))
return _PopMagazine(store->loaded);
if (store->previous && (_IsMagazineFull(store->previous)
|| _ExchangeWithFull(store->previous)))
std::swap(store->previous, store->loaded);
else
return NULL;
}
}
bool
BaseDepot::ReturnToStore(CPUStore *store, void *object)
{
BenaphoreLocker _(store->lock);
// We try to add the object to the loaded magazine if we have one
// and it's not full, or to the previous one if it is empty. If
// the magazine depot doesn't provide us with a new empty magazine
// we return the object directly to the slab.
while (true) {
if (store->loaded && _PushMagazine(store->loaded, object))
return true;
if ((store->previous && _IsMagazineEmpty(store->previous))
|| _ExchangeWithEmpty(store->previous))
std::swap(store->loaded, store->previous);
else
return false;
}
}
void
BaseDepot::MakeEmpty()
{
for (int i = 0; i < smp_get_num_cpus(); i++) {
if (fStores[i].loaded)
_EmptyMagazine(fStores[i].loaded);
if (fStores[i].previous)
_EmptyMagazine(fStores[i].previous);
fStores[i].loaded = fStores[i].previous = NULL;
}
while (fFull)
_EmptyMagazine(SListPop(fFull));
while (fEmpty)
_EmptyMagazine(SListPop(fEmpty));
}
bool
BaseDepot::_ExchangeWithFull(Magazine* &magazine)
{
BenaphoreLocker _(fLock);
if (fFull == NULL)
return false;
fFullCount--;
fEmptyCount++;
SListPush(fEmpty, magazine);
magazine = SListPop(fFull);
return true;
}
bool
BaseDepot::_ExchangeWithEmpty(Magazine* &magazine)
{
BenaphoreLocker _(fLock);
if (fEmpty == NULL) {
fEmpty = _AllocMagazine();
if (fEmpty == NULL)
return false;
} else {
fEmptyCount--;
}
if (magazine) {
SListPush(fFull, magazine);
fFullCount++;
}
magazine = SListPop(fEmpty);
return true;
}
void
BaseDepot::_EmptyMagazine(Magazine *magazine)
{
for (uint16_t i = 0; i < magazine->current_round; i++)
ReturnObject(magazine->rounds[i]);
_FreeMagazine(magazine);
}
BaseDepot::Magazine *
BaseDepot::_AllocMagazine()
{
Magazine *magazine = (Magazine *)malloc(sizeof(Magazine)
+ kMagazineCapacity * sizeof(void *));
if (magazine) {
magazine->next = NULL;
magazine->current_round = 0;
magazine->round_count = kMagazineCapacity;
}
return magazine;
}
void
BaseDepot::_FreeMagazine(Magazine *magazine)
{
free(magazine);
}
typedef MergedLinkCacheStrategy<MallocBackend> MallocMergedCacheStrategy;
typedef Cache<MallocMergedCacheStrategy> MallocMergedCache;
typedef LocalCache<MallocMergedCache> MallocLocalCache;
typedef HashCacheStrategy<MallocBackend> MallocHashCacheStrategy;
typedef Cache<MallocHashCacheStrategy> MallocHashCache;
object_cache_t
object_cache_create(const char *name, size_t object_size, size_t alignment,
void (*_constructor)(void *, void *), void (*_destructor)(void *, void *),
void *cookie)
{
return new (std::nothrow) MallocLocalCache(name, object_size, alignment,
_constructor, _destructor, cookie);
}
void *
object_cache_alloc(object_cache_t cache)
{
return ((MallocLocalCache *)cache)->Alloc(0);
}
void *
object_cache_alloc_etc(object_cache_t cache, uint32_t flags)
{
return ((MallocLocalCache *)cache)->Alloc(flags);
}
void
object_cache_free(object_cache_t cache, void *object)
{
((MallocLocalCache *)cache)->Free(object);
}
void
object_cache_destroy(object_cache_t cache)
{
delete (MallocLocalCache *)cache;
}
void test1()
{
MallocLocalCache cache("foobar", sizeof(int), 0, NULL, NULL, NULL);
static const int N = 4096;
void *buf[N];
for (int i = 0; i < N; i++)
buf[i] = cache.Alloc(0);
for (int i = 0; i < N; i++)
cache.Free(buf[i]);
cache.Destroy();
}
void test2()
{
TypedCache<int, MallocBackend> cache("int cache", 0);
static const int N = 4096;
int *buf[N];
for (int i = 0; i < N; i++)
buf[i] = cache.Alloc(0);
for (int i = 0; i < N; i++)
cache.Free(buf[i]);
}
void test3()
{
Cache<HashCacheStrategy<AreaBackend> > cache("512byte hash cache", 512, 0, NULL,
NULL, NULL);
static const int N = 128;
void *buf[N];
for (int i = 0; i < N; i++)
buf[i] = cache.AllocateObject(0);
for (int i = 0; i < N; i++)
cache.ReturnObject(buf[i]);
}
void test4()
{
LocalCache<MallocHashCache> cache("foobar", 512, 0, NULL, NULL, NULL);
static const int N = 128;
void *buf[N];
for (int i = 0; i < N; i++)
buf[i] = cache.Alloc(0);
for (int i = 0; i < N; i++)
cache.Free(buf[i]);
cache.Destroy();
}
void test5()
{
object_cache_t cache = object_cache_create("foobar", 16, 0,
NULL, NULL, NULL);
static const int N = 1024;
void *buf[N];
for (int i = 0; i < N; i++)
buf[i] = object_cache_alloc(cache);
for (int i = 0; i < N; i++)
object_cache_free(cache, buf[i]);
object_cache_destroy(cache);
}
int main()
{
//test1();
//test2();
test3();
//test4();
//test5();
return 0;
}

View File

@ -0,0 +1,518 @@
/*
* Copyright 2007, Hugo Santos. All Rights Reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* Hugo Santos, hugosantos@gmail.com
*/
#ifndef _SLAB_H_
#define _SLAB_H_
#include <stdint.h>
#include <stdlib.h>
#include <algorithm> // for swap()
#include <new>
#include <utility> // for pair<>
#include <util/AutoLock.h>
#include <util/DoublyLinkedList.h>
#include <util/OpenHashTable.h>
#include <OS.h>
#define smp_get_current_cpu() 0
#define smp_get_num_cpus() 1
// C interface
extern "C" {
typedef void *object_cache_t;
object_cache_t
object_cache_create(const char *name, size_t object_size, size_t alignment,
void (*_constructor)(void *, void *), void (*_destructor)(void *, void *),
void *cookie);
void *object_cache_alloc(object_cache_t cache);
void *object_cache_alloc_etc(object_cache_t cache, uint32_t flags);
void object_cache_free(object_cache_t cache, void *object);
void object_cache_destroy(object_cache_t cache);
}
// TODO this values should be dynamically tuned per cache.
static const int kMinimumSlabItems = 32;
// base Slab implementation, opaque to the backend used.
class BaseCache {
public:
typedef void (*Constructor)(void *cookie, void *object);
typedef void (*Destructor)(void *cookie, void *object);
BaseCache(const char *_name, size_t objectSize, size_t alignment,
Constructor _constructor, Destructor _destructor, void *_cookie);
struct ObjectLink {
struct ObjectLink *next;
};
struct Slab : DoublyLinkedListLinkImpl<Slab> {
void *pages;
size_t count, size;
ObjectLink *free;
};
typedef std::pair<Slab *, ObjectLink *> ObjectInfo;
ObjectLink *AllocateObject();
bool ReturnObject(const ObjectInfo &object);
Slab *ConstructSlab(Slab *slab, void *pages, size_t byteCount,
ObjectLink *(*getLink)(void *parent, void *object), void *parent);
void DestructSlab(Slab *slab);
const char *Name() const { return fName; }
size_t ObjectSize() const { return fObjectSize; }
protected:
typedef DoublyLinkedList<Slab> SlabList;
char fName[32];
size_t fObjectSize, fCacheColorCycle;
SlabList fPartialSlabs, fFullSlabs;
Constructor fConstructor;
Destructor fDestructor;
void *fCookie;
};
enum {
CACHE_ALIGN_TO_PAGE_TOTAL = 1 << 0,
};
struct MallocBackend {
typedef void *AllocationID;
static int PageSize() { return 4096; }
static status_t AllocatePages(BaseCache *cache, AllocationID *id,
void **pages, size_t byteCount, uint32_t flags);
static void FreePages(BaseCache *cache, void *pages);
};
struct AreaBackend {
typedef area_id AllocationID;
static int PageSize() { return B_PAGE_SIZE; }
static status_t AllocatePages(BaseCache *cache, area_id *id, void **pages,
size_t byteCount, uint32_t flags);
static void FreePages(BaseCache *cache, area_id id);
};
// Slab implementation, glues together the frontend, backend as
// well as the Slab strategy used.
template<typename Strategy>
class Cache : protected BaseCache {
public:
Cache(const char *_name, size_t objectSize, size_t alignment,
Constructor _constructor, Destructor _destructor, void *_cookie)
: BaseCache(_name, Strategy::RequiredSpace(objectSize), alignment,
_constructor, _destructor, _cookie), fStrategy(this) {}
void *AllocateObject(uint32_t flags)
{
if (fPartialSlabs.IsEmpty()) {
Slab *newSlab = fStrategy.NewSlab(flags);
if (newSlab == NULL)
return NULL;
fPartialSlabs.Add(newSlab);
}
return fStrategy.Object(BaseCache::AllocateObject());
}
void ReturnObject(void *object)
{
ObjectInfo location = fStrategy.ObjectInformation(object);
if (BaseCache::ReturnObject(location))
fStrategy.ReturnSlab(location.first);
}
private:
Strategy fStrategy;
};
static inline const void *
LowerBoundary(void *object, size_t byteCount)
{
const uint8_t *null = (uint8_t *)NULL;
return null + ((((uint8_t *)object) - null) & ~(byteCount - 1));
}
template<typename Backend>
class BaseCacheStrategy {
protected:
typedef BaseCache::ObjectLink ObjectLink;
BaseCacheStrategy(BaseCache *parent)
: fParent(parent) {}
size_t SlabSize(size_t tailSpace) const
{
size_t pageCount = (kMinimumSlabItems * fParent->ObjectSize()
+ tailSpace) / Backend::PageSize();
if (pageCount < 1)
pageCount = 1;
return pageCount * Backend::PageSize();
}
struct Slab : BaseCache::Slab {
typename Backend::AllocationID id;
};
BaseCache::Slab *_ConstructSlab(Slab *slab, void *pages, size_t tailSpace,
ObjectLink *(*getLink)(void *parent, void *object), void *parent)
{
return fParent->ConstructSlab(slab, pages, SlabSize(tailSpace)
- tailSpace, getLink, parent);
}
void _DestructSlab(BaseCache::Slab *slab)
{
fParent->DestructSlab(slab);
Backend::FreePages(fParent, ((Slab *)slab)->id);
}
BaseCache *fParent;
};
// This slab strategy includes the ObjectLink at the end of each object and the
// slab at the end of the allocated pages. It uses aligned allocations to
// provide object to slab mapping with zero storage, thus there is only one
// word of overhead per object. This is optimized for small objects.
template<typename Backend>
class MergedLinkCacheStrategy : public BaseCacheStrategy<Backend> {
public:
typedef typename BaseCacheStrategy<Backend>::Slab Slab;
typedef BaseCache::ObjectLink Link;
typedef BaseCache::ObjectInfo ObjectInfo;
MergedLinkCacheStrategy(BaseCache *parent)
: BaseCacheStrategy<Backend>(parent) {}
static size_t RequiredSpace(size_t objectSize)
{
return objectSize + sizeof(Link);
}
void *Object(Link *link) const
{
return ((uint8_t *)link) - (fParent->ObjectSize() - sizeof(Link));
}
ObjectInfo ObjectInformation(void *object) const
{
Slab *slab = _SlabInPages(LowerBoundary(object, _SlabSize()));
return ObjectInfo(slab, _Linkage(object));
}
BaseCache::Slab *NewSlab(uint32_t flags)
{
typename Backend::AllocationID id;
void *pages;
// in order to save a pointer per object or a hash table to
// map objects to slabs we required this set of pages to be
// aligned in a (pageCount * PAGE_SIZE) boundary.
if (Backend::AllocatePages(fParent, &id, &pages, _SlabSize(),
CACHE_ALIGN_TO_PAGE_TOTAL | flags) < B_OK)
return NULL;
_SlabInPages(pages)->id = id;
return BaseCacheStrategy<Backend>::_ConstructSlab(_SlabInPages(pages),
pages, sizeof(Slab), _Linkage, this);
}
void ReturnSlab(BaseCache::Slab *slab)
{
BaseCacheStrategy<Backend>::_DestructSlab(slab);
}
private:
size_t _SlabSize() const
{
return BaseCacheStrategy<Backend>::SlabSize(sizeof(Slab));
}
Link *_Linkage(void *object) const
{
return (Link *)(((uint8_t *)object)
+ (fParent->ObjectSize() - sizeof(Link)));
}
Slab *_SlabInPages(const void *pages) const
{
return (Slab *)(((uint8_t *)pages) + _SlabSize() - sizeof(Slab));
}
static Link *_Linkage(void *_this, void *object)
{
return ((MergedLinkCacheStrategy *)_this)->_Linkage(object);
}
};
template<typename Type, typename Backend>
class TypedCache : public Cache<MergedLinkCacheStrategy<Backend> > {
public:
typedef MergedLinkCacheStrategy<Backend> Strategy;
typedef Cache<Strategy> BaseType;
TypedCache(const char *name, size_t alignment)
: BaseType(name, sizeof(Type), alignment, _ConstructObject,
_DestructObject, this) {}
virtual ~TypedCache() {}
Type *Alloc(uint32_t flags) { return (Type *)BaseType::AllocateObject(flags); }
void Free(Type *object) { BaseType::ReturnObject(object); }
private:
static void _ConstructObject(void *cookie, void *object)
{
((TypedCache *)cookie)->ConstructObject((Type *)object);
}
static void _DestructObject(void *cookie, void *object)
{
((TypedCache *)cookie)->DestructObject((Type *)object);
}
virtual void ConstructObject(Type *object) {}
virtual void DestructObject(Type *object) {}
};
static inline int
Fls(size_t value)
{
for (int i = 31; i >= 0; i--) {
if ((value >> i) & 1)
return i + 1;
}
return -1;
}
template<typename Backend>
struct HashCacheStrategy : BaseCacheStrategy<Backend> {
typedef typename BaseCacheStrategy<Backend>::Slab Slab;
typedef HashCacheStrategy<Backend> Strategy;
typedef BaseCache::ObjectLink ObjectLink;
typedef BaseCache::ObjectInfo ObjectInfo;
struct Link : ObjectLink {
Slab *slab;
void *buffer;
};
struct HashTableDefinition {
typedef Strategy * ParentType;
typedef void * KeyType;
typedef Link ValueType;
static size_t HashKey(Strategy *parent, void *key)
{
return (((uint8_t *)key) - ((uint8_t *)0)) >> parent->fLowerBoundary;
}
static size_t Hash(Strategy *parent, Link *value)
{
return HashKey(parent, value->buffer);
}
static bool Compare(Strategy *parent, void *key, Link *value)
{
return value->buffer == key;
}
};
// for g++ 2.95
friend class HashTableDefinition;
typedef OpenHashTable<HashTableDefinition> HashTable;
HashCacheStrategy(BaseCache *parent)
: BaseCacheStrategy<Backend>(parent), fHashTable(this),
fSlabCache("slab cache", 0), fLinkCache("link cache", 0),
fLowerBoundary(Fls(parent->ObjectSize()) - 1) {}
static size_t RequiredSpace(size_t objectSize)
{
return objectSize;
}
void *Object(ObjectLink *link) const
{
return ((Link *)link)->buffer;
}
ObjectInfo ObjectInformation(void *object) const
{
Link *link = _Linkage(object);
return ObjectInfo(link->slab, link);
}
BaseCache::Slab *NewSlab(uint32_t flags)
{
size_t byteCount = _SlabSize();
Slab *slab = fSlabCache.Alloc(flags);
if (slab == NULL)
return NULL;
void *pages;
if (Backend::AllocatePages(fParent, &slab->id, &pages, byteCount,
flags) < B_OK) {
fSlabCache.Free(slab);
return NULL;
}
uint8_t *data = (uint8_t *)pages;
for (uint8_t *it = data;
it < (data + byteCount); it += fParent->ObjectSize()) {
Link *link = fLinkCache.Alloc(flags);
link->slab = slab;
link->buffer = it;
fHashTable.Insert(link);
}
return BaseCacheStrategy<Backend>::_ConstructSlab(slab, pages, 0,
_Linkage, this);
}
void ReturnSlab(BaseCache::Slab *slab)
{
uint8_t *data = (uint8_t *)slab->pages;
size_t byteCount = _SlabSize();
for (uint8_t *it = data;
it < (data + byteCount); it += fParent->ObjectSize()) {
Link *link = fHashTable.Lookup(it);
fHashTable.Remove(link);
fLinkCache.Free(link);
}
BaseCacheStrategy<Backend>::_DestructSlab(slab);
fSlabCache.Free((Slab *)slab);
}
private:
size_t _SlabSize() const
{
return BaseCacheStrategy<Backend>::SlabSize(0);
}
Link *_Linkage(void *object) const
{
return fHashTable.Lookup(object);
}
static ObjectLink *_Linkage(void *_this, void *object)
{
return ((Strategy *)_this)->_Linkage(object);
}
HashTable fHashTable;
TypedCache<Slab, Backend> fSlabCache;
TypedCache<Link, Backend> fLinkCache;
const size_t fLowerBoundary;
};
class BaseDepot {
public:
struct Magazine {
Magazine *next;
uint16_t current_round, round_count;
void *rounds[0];
};
struct CPUStore {
benaphore lock;
Magazine *loaded, *previous;
};
protected:
BaseDepot();
virtual ~BaseDepot();
status_t InitCheck() const;
CPUStore *CPU() const { return &fStores[smp_get_current_cpu()]; }
void *ObtainFromStore(CPUStore *store);
bool ReturnToStore(CPUStore *store, void *object);
void MakeEmpty();
virtual void ReturnObject(void *object) = 0;
bool _ExchangeWithFull(Magazine* &magazine);
bool _ExchangeWithEmpty(Magazine* &magazine);
void _EmptyMagazine(Magazine *magazine);
Magazine *_AllocMagazine();
void _FreeMagazine(Magazine *magazine);
benaphore fLock;
Magazine *fFull, *fEmpty;
size_t fFullCount, fEmptyCount;
CPUStore *fStores;
};
template<typename CacheType>
class LocalCache : public CacheType, protected BaseDepot {
public:
typedef typename CacheType::Constructor Constructor;
typedef typename CacheType::Destructor Destructor;
LocalCache(const char *name, size_t objectSize, size_t alignment,
Constructor _constructor, Destructor _destructor, void *_cookie)
: CacheType(name, objectSize, alignment, _constructor, _destructor,
_cookie) {}
~LocalCache() { Destroy(); }
void *Alloc(uint32_t flags)
{
void *object = BaseDepot::ObtainFromStore(CPU());
if (object == NULL)
object = CacheType::AllocateObject(flags);
return object;
}
void Free(void *object)
{
if (!BaseDepot::ReturnToStore(CPU(), object))
CacheType::ReturnObject(object);
}
void Destroy() { BaseDepot::MakeEmpty(); }
private:
void ReturnObject(void *object)
{
CacheType::ReturnObject(object);
}
};
#endif