added initial slab code to the kernel. It is still unused, and there is still no VM interaction.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@20832 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
5f29df20b2
commit
81423c91c7
38
headers/private/kernel/slab/Backend.h
Normal file
38
headers/private/kernel/slab/Backend.h
Normal file
@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Authors:
|
||||
* Hugo Santos, hugosantos@gmail.com
|
||||
*/
|
||||
|
||||
#ifndef _SLAB_BACKEND_H_
|
||||
#define _SLAB_BACKEND_H_
|
||||
|
||||
#include <slab/Base.h>
|
||||
|
||||
extern "C" {
|
||||
status_t slab_area_backend_allocate(base_cache *cache, area_id *id,
|
||||
void **pages, size_t byte_count, uint32_t flags);
|
||||
void slab_area_backend_free(base_cache *cache, area_id id);
|
||||
}
|
||||
|
||||
struct AreaBackend {
|
||||
typedef area_id AllocationID;
|
||||
|
||||
static const size_t kPageSize = B_PAGE_SIZE;
|
||||
static const size_t kMaximumAlignedLength = B_PAGE_SIZE;
|
||||
|
||||
static status_t AllocatePages(base_cache *cache, area_id *id, void **pages,
|
||||
size_t byteCount, uint32_t flags)
|
||||
{
|
||||
return slab_area_backend_allocate(cache, id, pages, byteCount, flags);
|
||||
}
|
||||
|
||||
static void FreePages(base_cache *cache, area_id id)
|
||||
{
|
||||
return slab_area_backend_free(cache, id);
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
111
headers/private/kernel/slab/Base.h
Normal file
111
headers/private/kernel/slab/Base.h
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Authors:
|
||||
* Hugo Santos, hugosantos@gmail.com
|
||||
*/
|
||||
|
||||
#ifndef _SLAB_BASE_SLAB_H_
|
||||
#define _SLAB_BASE_SLAB_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <OS.h>
|
||||
#include <util/list.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
#include <utility> // pair<>
|
||||
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
static const int kMinimumSlabItems = 32;
|
||||
|
||||
typedef void (*base_cache_constructor)(void *cookie, void *object);
|
||||
typedef void (*base_cache_destructor)(void *cookie, void *object);
|
||||
|
||||
/* base Slab implementation, opaque to the backend used. */
|
||||
|
||||
typedef struct base_cache {
|
||||
char name[32];
|
||||
size_t object_size;
|
||||
size_t cache_color_cycle;
|
||||
struct list partial, full;
|
||||
base_cache_constructor constructor;
|
||||
base_cache_destructor destructor;
|
||||
void *cookie;
|
||||
} base_cache;
|
||||
|
||||
typedef struct cache_object_link {
|
||||
struct cache_object_link *next;
|
||||
} cache_object_link;
|
||||
|
||||
typedef struct cache_slab {
|
||||
void *pages;
|
||||
size_t count, size;
|
||||
cache_object_link *free;
|
||||
struct list_link link;
|
||||
} cache_slab;
|
||||
|
||||
void base_cache_init(base_cache *cache, const char *name, size_t object_size,
|
||||
size_t alignment, base_cache_constructor constructor,
|
||||
base_cache_destructor destructor, void *cookie);
|
||||
|
||||
cache_object_link *base_cache_allocate_object(base_cache *cache);
|
||||
int base_cache_return_object(base_cache *cache, cache_slab *slab,
|
||||
cache_object_link *link);
|
||||
|
||||
cache_slab *base_cache_construct_slab(base_cache *cache, cache_slab *slab,
|
||||
void *pages, size_t byte_count, cache_object_link *(*get_link)(
|
||||
void *parent, void *object), void *parent);
|
||||
void base_cache_destruct_slab(base_cache *cache, cache_slab *slab);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
typedef std::pair<cache_slab *, cache_object_link *> CacheObjectInfo;
|
||||
|
||||
// Slab implementation, glues together the frontend, backend as
|
||||
// well as the Slab strategy used.
|
||||
template<typename Strategy>
|
||||
class Cache : protected base_cache {
|
||||
public:
|
||||
typedef base_cache_constructor Constructor;
|
||||
typedef base_cache_destructor Destructor;
|
||||
|
||||
Cache(const char *name, size_t objectSize, size_t alignment,
|
||||
Constructor constructor, Destructor destructor, void *cookie)
|
||||
: fStrategy(this)
|
||||
{
|
||||
base_cache_init(this, name, objectSize, alignment, constructor,
|
||||
destructor, cookie);
|
||||
}
|
||||
|
||||
void *AllocateObject(uint32_t flags)
|
||||
{
|
||||
if (list_is_empty(&partial)) {
|
||||
cache_slab *newSlab = fStrategy.NewSlab(flags);
|
||||
if (newSlab == NULL)
|
||||
return NULL;
|
||||
list_add_item(&partial, newSlab);
|
||||
}
|
||||
|
||||
return fStrategy.Object(base_cache_allocate_object(this));
|
||||
}
|
||||
|
||||
void ReturnObject(void *object)
|
||||
{
|
||||
CacheObjectInfo location = fStrategy.ObjectInformation(object);
|
||||
|
||||
if (base_cache_return_object(this, location.first, location.second))
|
||||
fStrategy.ReturnSlab(location.first);
|
||||
}
|
||||
|
||||
private:
|
||||
Strategy fStrategy;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
63
headers/private/kernel/slab/Depot.h
Normal file
63
headers/private/kernel/slab/Depot.h
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Authors:
|
||||
* Hugo Santos, hugosantos@gmail.com
|
||||
*/
|
||||
|
||||
#ifndef _SLAB_DEPOT_H_
|
||||
#define _SLAB_DEPOT_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <lock.h>
|
||||
#include <smp.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct depot_magazine {
|
||||
struct depot_magazine *next;
|
||||
uint16_t current_round, round_count;
|
||||
void *rounds[0];
|
||||
} depot_magazine;
|
||||
|
||||
|
||||
typedef struct depot_cpu_store {
|
||||
benaphore lock;
|
||||
depot_magazine *loaded, *previous;
|
||||
} depot_cpu_store;
|
||||
|
||||
|
||||
typedef struct base_depot {
|
||||
benaphore lock;
|
||||
depot_magazine *full, *empty;
|
||||
size_t full_count, empty_count;
|
||||
depot_cpu_store *stores;
|
||||
|
||||
void (*return_object)(base_depot *depot, void *object);
|
||||
} base_depot;
|
||||
|
||||
|
||||
static inline depot_cpu_store *
|
||||
base_depot_cpu(base_depot *depot)
|
||||
{
|
||||
return &depot->stores[smp_get_current_cpu()];
|
||||
}
|
||||
|
||||
|
||||
status_t base_depot_init(base_depot *depot,
|
||||
void (*return_object)(base_depot *, void *));
|
||||
void base_depot_destroy(base_depot *depot);
|
||||
void *base_depot_obtain_from_store(base_depot *depot, depot_cpu_store *store);
|
||||
int base_depot_return_to_store(base_depot *depot, depot_cpu_store *store,
|
||||
void *object);
|
||||
void base_depot_make_empty(base_depot *depot);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
192
headers/private/kernel/slab/HashStrategy.h
Normal file
192
headers/private/kernel/slab/HashStrategy.h
Normal file
@ -0,0 +1,192 @@
|
||||
/*
|
||||
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Authors:
|
||||
* Hugo Santos, hugosantos@gmail.com
|
||||
*/
|
||||
|
||||
#ifndef _SLAB_HASH_STRATEGY_H_
|
||||
#define _SLAB_HASH_STRATEGY_H_
|
||||
|
||||
#include <slab/Strategy.h>
|
||||
#include <slab/Utilities.h> // for TypedCache
|
||||
|
||||
#include <KernelExport.h>
|
||||
#include <util/OpenHashTable.h>
|
||||
|
||||
|
||||
struct BaseHashCacheStrategy {
|
||||
struct Link : cache_object_link, HashTableLink<Link> {
|
||||
cache_slab *slab;
|
||||
void *buffer;
|
||||
};
|
||||
|
||||
struct HashTableDefinition {
|
||||
typedef BaseHashCacheStrategy ParentType;
|
||||
typedef void * KeyType;
|
||||
typedef Link ValueType;
|
||||
|
||||
HashTableDefinition(BaseHashCacheStrategy *_parent) : parent(_parent) {}
|
||||
|
||||
size_t HashKey(void *key) const
|
||||
{
|
||||
return (((uint8_t *)key) - ((uint8_t *)0)) >> parent->fLowerBoundary;
|
||||
}
|
||||
|
||||
size_t Hash(Link *value) const { return HashKey(value->buffer); }
|
||||
|
||||
bool Compare(void *key, Link *value) const
|
||||
{
|
||||
return value->buffer == key;
|
||||
}
|
||||
|
||||
HashTableLink<Link> *GetLink(Link *value) const { return value; }
|
||||
|
||||
BaseHashCacheStrategy *parent;
|
||||
};
|
||||
|
||||
// for g++ 2.95
|
||||
friend class HashTableDefinition;
|
||||
|
||||
typedef OpenHashTable<HashTableDefinition> HashTable;
|
||||
|
||||
static inline int
|
||||
__Fls0(size_t value)
|
||||
{
|
||||
if (value == 0)
|
||||
return -1;
|
||||
|
||||
int bit;
|
||||
for (bit = 0; value != 1; bit++)
|
||||
value >>= 1;
|
||||
return bit;
|
||||
}
|
||||
|
||||
BaseHashCacheStrategy(base_cache *parent)
|
||||
: fHashTable(this), fLowerBoundary(__Fls0(parent->object_size)) {}
|
||||
|
||||
void *Object(cache_object_link *link) const
|
||||
{
|
||||
return ((Link *)link)->buffer;
|
||||
}
|
||||
|
||||
CacheObjectInfo ObjectInformation(void *object) const
|
||||
{
|
||||
Link *link = _Linkage(object);
|
||||
return CacheObjectInfo(link->slab, link);
|
||||
}
|
||||
|
||||
protected:
|
||||
Link *_Linkage(void *object) const
|
||||
{
|
||||
Link *link = fHashTable.Lookup(object);
|
||||
if (link == NULL)
|
||||
panic("slab: missing buffer link from hash table.");
|
||||
return link;
|
||||
}
|
||||
|
||||
static cache_object_link *_Linkage(void *_this, void *object)
|
||||
{
|
||||
return ((BaseHashCacheStrategy *)_this)->_Linkage(object);
|
||||
}
|
||||
|
||||
HashTable fHashTable;
|
||||
const size_t fLowerBoundary;
|
||||
};
|
||||
|
||||
|
||||
template<typename Backend>
|
||||
struct HashCacheStrategy : BaseCacheStrategy<Backend>, BaseHashCacheStrategy {
|
||||
typedef typename BaseCacheStrategy<Backend>::Slab Slab;
|
||||
typedef HashCacheStrategy<Backend> Strategy;
|
||||
|
||||
HashCacheStrategy(base_cache *parent)
|
||||
: BaseCacheStrategy<Backend>(parent), BaseHashCacheStrategy(parent),
|
||||
fSlabCache("slab cache", 0), fLinkCache("link cache", 0) {}
|
||||
|
||||
static size_t RequiredSpace(size_t objectSize)
|
||||
{
|
||||
return objectSize;
|
||||
}
|
||||
|
||||
BaseSlab *NewSlab(uint32_t flags)
|
||||
{
|
||||
size_t byteCount = _SlabSize();
|
||||
|
||||
Slab *slab = fSlabCache.Alloc(flags);
|
||||
if (slab == NULL)
|
||||
return NULL;
|
||||
|
||||
void *pages;
|
||||
if (Backend::AllocatePages(fParent, &slab->id, &pages, byteCount,
|
||||
flags) < B_OK) {
|
||||
fSlabCache.Free(slab);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (_PrepareSlab(slab, pages, byteCount, flags) < B_OK) {
|
||||
Backend::FreePages(fParent, slab->id);
|
||||
fSlabCache.Free(slab);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// it's very important that we cast this to BaseHashCacheStrategy
|
||||
// so we get the proper instance offset through void *
|
||||
return BaseCacheStrategy<Backend>::_ConstructSlab(slab, pages, 0,
|
||||
_Linkage, (BaseHashCacheStrategy *)this);
|
||||
}
|
||||
|
||||
void ReturnSlab(BaseSlab *slab)
|
||||
{
|
||||
_ClearSlab(slab->pages, _SlabSize());
|
||||
BaseCacheStrategy<Backend>::_DestructSlab(slab);
|
||||
fSlabCache.Free((Slab *)slab);
|
||||
}
|
||||
|
||||
private:
|
||||
size_t _SlabSize() const
|
||||
{
|
||||
return BaseCacheStrategy<Backend>::SlabSize(0);
|
||||
}
|
||||
|
||||
status_t _PrepareSlab(Slab *slab, void *pages, size_t byteCount,
|
||||
uint32_t flags)
|
||||
{
|
||||
uint8_t *data = (uint8_t *)pages;
|
||||
for (uint8_t *it = data;
|
||||
it < (data + byteCount); it += fParent->object_size) {
|
||||
Link *link = fLinkCache.Alloc(flags);
|
||||
|
||||
if (link == NULL) {
|
||||
_ClearSlabRange(data, it);
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
|
||||
link->slab = slab;
|
||||
link->buffer = it;
|
||||
fHashTable.Insert(link);
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
void _ClearSlab(void *pages, size_t size)
|
||||
{
|
||||
_ClearSlabRange((uint8_t *)pages, ((uint8_t *)pages) + size);
|
||||
}
|
||||
|
||||
void _ClearSlabRange(uint8_t *data, uint8_t *end)
|
||||
{
|
||||
for (uint8_t *it = data; it < end; it += fParent->object_size) {
|
||||
Link *link = _Linkage(it);
|
||||
fHashTable.Remove(link);
|
||||
fLinkCache.Free(link);
|
||||
}
|
||||
}
|
||||
|
||||
TypedCache<Slab, Backend> fSlabCache;
|
||||
TypedCache<Link, Backend> fLinkCache;
|
||||
};
|
||||
|
||||
#endif
|
104
headers/private/kernel/slab/MergedStrategy.h
Normal file
104
headers/private/kernel/slab/MergedStrategy.h
Normal file
@ -0,0 +1,104 @@
|
||||
/*
|
||||
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Authors:
|
||||
* Hugo Santos, hugosantos@gmail.com
|
||||
*/
|
||||
|
||||
#ifndef _SLAB_MERGED_STRATEGY_H_
|
||||
#define _SLAB_MERGED_STRATEGY_H_
|
||||
|
||||
#include <slab/Strategy.h>
|
||||
|
||||
|
||||
namespace Private {
|
||||
static inline const void *
|
||||
LowerBoundary(void *object, size_t byteCount)
|
||||
{
|
||||
const uint8_t *null = (uint8_t *)NULL;
|
||||
return null + ((((uint8_t *)object) - null) & ~(byteCount - 1));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// This slab strategy includes the ObjectLink at the end of each object and the
|
||||
// slab at the end of the allocated pages. It uses aligned allocations to
|
||||
// provide object to slab mapping with zero storage, thus there is only one
|
||||
// word of overhead per object. This is optimized for small objects.
|
||||
template<typename Backend>
|
||||
class MergedLinkCacheStrategy : public BaseCacheStrategy<Backend> {
|
||||
public:
|
||||
typedef typename BaseCacheStrategy<Backend>::Slab Slab;
|
||||
typedef cache_object_link Link;
|
||||
|
||||
MergedLinkCacheStrategy(base_cache *parent)
|
||||
: BaseCacheStrategy<Backend>(parent) {}
|
||||
|
||||
static size_t RequiredSpace(size_t objectSize)
|
||||
{
|
||||
return objectSize + sizeof(Link);
|
||||
}
|
||||
|
||||
void *Object(Link *link) const
|
||||
{
|
||||
return ((uint8_t *)link) - (fParent->object_size - sizeof(Link));
|
||||
}
|
||||
|
||||
CacheObjectInfo ObjectInformation(void *object) const
|
||||
{
|
||||
Slab *slab = _SlabInPages(Private::LowerBoundary(object, _SlabSize()));
|
||||
return CacheObjectInfo(slab, _Linkage(object));
|
||||
}
|
||||
|
||||
BaseSlab *NewSlab(uint32_t flags)
|
||||
{
|
||||
typename Backend::AllocationID id;
|
||||
void *pages;
|
||||
|
||||
size_t byteCount = _SlabSize();
|
||||
if (byteCount > Backend::kMaximumAlignedLength)
|
||||
byteCount = Backend::kMaximumAlignedLength;
|
||||
|
||||
// in order to save a pointer per object or a hash table to
|
||||
// map objects to slabs we required this set of pages to be
|
||||
// aligned in a (pageCount * PAGE_SIZE) boundary.
|
||||
if (Backend::AllocatePages(fParent, &id, &pages, byteCount,
|
||||
CACHE_ALIGN_TO_TOTAL | flags) < B_OK)
|
||||
return NULL;
|
||||
|
||||
_SlabInPages(pages)->id = id;
|
||||
|
||||
return BaseCacheStrategy<Backend>::_ConstructSlab(_SlabInPages(pages),
|
||||
pages, sizeof(Slab), _Linkage, this);
|
||||
}
|
||||
|
||||
void ReturnSlab(BaseSlab *slab)
|
||||
{
|
||||
BaseCacheStrategy<Backend>::_DestructSlab(slab);
|
||||
}
|
||||
|
||||
private:
|
||||
size_t _SlabSize() const
|
||||
{
|
||||
return BaseCacheStrategy<Backend>::SlabSize(sizeof(Slab));
|
||||
}
|
||||
|
||||
Link *_Linkage(void *object) const
|
||||
{
|
||||
return (Link *)(((uint8_t *)object)
|
||||
+ (fParent->object_size - sizeof(Link)));
|
||||
}
|
||||
|
||||
Slab *_SlabInPages(const void *pages) const
|
||||
{
|
||||
return (Slab *)(((uint8_t *)pages) + _SlabSize() - sizeof(Slab));
|
||||
}
|
||||
|
||||
static Link *_Linkage(void *_this, void *object)
|
||||
{
|
||||
return ((MergedLinkCacheStrategy *)_this)->_Linkage(object);
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
103
headers/private/kernel/slab/Slab.h
Normal file
103
headers/private/kernel/slab/Slab.h
Normal file
@ -0,0 +1,103 @@
|
||||
/*
|
||||
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Authors:
|
||||
* Hugo Santos, hugosantos@gmail.com
|
||||
*/
|
||||
|
||||
#ifndef _SLAB_SLAB_H_
|
||||
#define _SLAB_SLAB_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
#include <slab/Backend.h>
|
||||
#include <slab/Base.h>
|
||||
#include <slab/Depot.h>
|
||||
#include <slab/MergedStrategy.h>
|
||||
#include <slab/HashStrategy.h>
|
||||
#include <slab/Utilities.h>
|
||||
|
||||
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
enum {
|
||||
CACHE_DONT_SLEEP = 1 << 0,
|
||||
CACHE_ALIGN_TO_TOTAL = 1 << 16,
|
||||
};
|
||||
|
||||
typedef void *object_cache_t;
|
||||
|
||||
|
||||
object_cache_t
|
||||
object_cache_create(const char *name, size_t object_size, size_t alignment,
|
||||
void (*_constructor)(void *, void *), void (*_destructor)(void *, void *),
|
||||
void *cookie);
|
||||
void *object_cache_alloc(object_cache_t cache);
|
||||
void *object_cache_alloc_etc(object_cache_t cache, uint32_t flags);
|
||||
void object_cache_free(object_cache_t cache, void *object);
|
||||
void object_cache_destroy(object_cache_t cache);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
|
||||
template<typename CacheType>
|
||||
class LocalCache : public CacheType, protected base_depot {
|
||||
public:
|
||||
typedef LocalCache<CacheType> ThisType;
|
||||
typedef typename CacheType::Constructor Constructor;
|
||||
typedef typename CacheType::Destructor Destructor;
|
||||
|
||||
LocalCache(const char *name, size_t objectSize, size_t alignment,
|
||||
Constructor _constructor, Destructor _destructor, void *_cookie)
|
||||
: CacheType(name, objectSize, alignment, _constructor, _destructor,
|
||||
_cookie)
|
||||
{
|
||||
fStatus = base_depot_init(this, _ReturnObject);
|
||||
}
|
||||
|
||||
~LocalCache()
|
||||
{
|
||||
base_depot_destroy(this);
|
||||
}
|
||||
|
||||
status_t InitCheck() const { return fStatus; }
|
||||
|
||||
void *Alloc(uint32_t flags)
|
||||
{
|
||||
void *object = base_depot_obtain_from_store(this, base_depot_cpu(this));
|
||||
if (object == NULL)
|
||||
object = CacheType::AllocateObject(flags);
|
||||
return object;
|
||||
}
|
||||
|
||||
void Free(void *object)
|
||||
{
|
||||
if (!base_depot_return_to_store(this, base_depot_cpu(this), object))
|
||||
CacheType::ReturnObject(object);
|
||||
}
|
||||
|
||||
void Destroy()
|
||||
{
|
||||
base_depot_make_empty(this);
|
||||
}
|
||||
|
||||
private:
|
||||
void ReturnObject(void *object)
|
||||
{
|
||||
CacheType::ReturnObject(object);
|
||||
}
|
||||
|
||||
static void _ReturnObject(base_depot *self, void *object)
|
||||
{
|
||||
static_cast<ThisType *>(self)->ReturnObject(object);
|
||||
}
|
||||
|
||||
status_t fStatus;
|
||||
};
|
||||
|
||||
#endif /* __cplusplus */
|
||||
|
||||
#endif
|
53
headers/private/kernel/slab/Strategy.h
Normal file
53
headers/private/kernel/slab/Strategy.h
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Authors:
|
||||
* Hugo Santos, hugosantos@gmail.com
|
||||
*/
|
||||
|
||||
#ifndef _SLAB_STRATEGY_H_
|
||||
#define _SLAB_STRATEGY_H_
|
||||
|
||||
#include <slab/Base.h>
|
||||
|
||||
|
||||
template<typename Backend>
|
||||
class BaseCacheStrategy {
|
||||
protected:
|
||||
typedef cache_object_link ObjectLink;
|
||||
typedef cache_slab BaseSlab;
|
||||
|
||||
BaseCacheStrategy(base_cache *parent)
|
||||
: fParent(parent) {}
|
||||
|
||||
size_t SlabSize(size_t tailSpace) const
|
||||
{
|
||||
size_t pageCount = (kMinimumSlabItems * fParent->object_size
|
||||
+ tailSpace) / Backend::kPageSize;
|
||||
if (pageCount < 1)
|
||||
pageCount = 1;
|
||||
return pageCount * Backend::kPageSize;
|
||||
}
|
||||
|
||||
struct Slab : BaseSlab {
|
||||
typename Backend::AllocationID id;
|
||||
};
|
||||
|
||||
BaseSlab *_ConstructSlab(Slab *slab, void *pages, size_t tailSpace,
|
||||
ObjectLink *(*getLink)(void *parent, void *object), void *parent)
|
||||
{
|
||||
return base_cache_construct_slab(fParent, slab, pages,
|
||||
SlabSize(tailSpace) - tailSpace, getLink, parent);
|
||||
}
|
||||
|
||||
void _DestructSlab(BaseSlab *slab)
|
||||
{
|
||||
base_cache_destruct_slab(fParent, slab);
|
||||
Backend::FreePages(fParent, ((Slab *)slab)->id);
|
||||
}
|
||||
|
||||
base_cache *fParent;
|
||||
};
|
||||
|
||||
#endif
|
45
headers/private/kernel/slab/Utilities.h
Normal file
45
headers/private/kernel/slab/Utilities.h
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Authors:
|
||||
* Hugo Santos, hugosantos@gmail.com
|
||||
*/
|
||||
|
||||
#ifndef _SLAB_UTILITIES_H_
|
||||
#define _SLAB_UTILITIES_H_
|
||||
|
||||
#include <slab/Base.h>
|
||||
#include <slab/MergedStrategy.h>
|
||||
|
||||
|
||||
template<typename Type, typename Backend>
|
||||
class TypedCache : public Cache< MergedLinkCacheStrategy<Backend> > {
|
||||
public:
|
||||
typedef MergedLinkCacheStrategy<Backend> Strategy;
|
||||
typedef Cache<Strategy> BaseType;
|
||||
|
||||
TypedCache(const char *name, size_t alignment)
|
||||
: BaseType(name, sizeof(Type), alignment, _ConstructObject,
|
||||
_DestructObject, this) {}
|
||||
virtual ~TypedCache() {}
|
||||
|
||||
Type *Alloc(uint32_t flags) { return (Type *)BaseType::AllocateObject(flags); }
|
||||
void Free(Type *object) { BaseType::ReturnObject(object); }
|
||||
|
||||
private:
|
||||
static void _ConstructObject(void *cookie, void *object)
|
||||
{
|
||||
((TypedCache *)cookie)->ConstructObject((Type *)object);
|
||||
}
|
||||
|
||||
static void _DestructObject(void *cookie, void *object)
|
||||
{
|
||||
((TypedCache *)cookie)->DestructObject((Type *)object);
|
||||
}
|
||||
|
||||
virtual void ConstructObject(Type *object) {}
|
||||
virtual void DestructObject(Type *object) {}
|
||||
};
|
||||
|
||||
#endif
|
@ -61,6 +61,7 @@ KernelLd kernel_$(TARGET_ARCH) :
|
||||
kernel_util.o
|
||||
kernel_messaging.o
|
||||
kernel_debug.o
|
||||
kernel_slab.o
|
||||
|
||||
lib$(TARGET_ARCH).a
|
||||
kernel_platform_$(TARGET_BOOT_PLATFORM).o
|
||||
@ -91,6 +92,7 @@ KernelLd kernel.so :
|
||||
kernel_util.o
|
||||
kernel_messaging.o
|
||||
kernel_debug.o
|
||||
kernel_slab.o
|
||||
|
||||
lib$(TARGET_ARCH).a
|
||||
kernel_platform_$(TARGET_BOOT_PLATFORM).o
|
||||
@ -124,6 +126,7 @@ SubInclude HAIKU_TOP src system kernel disk_device_manager ;
|
||||
SubInclude HAIKU_TOP src system kernel fs ;
|
||||
SubInclude HAIKU_TOP src system kernel lib ;
|
||||
SubInclude HAIKU_TOP src system kernel messaging ;
|
||||
SubInclude HAIKU_TOP src system kernel slab ;
|
||||
SubInclude HAIKU_TOP src system kernel util ;
|
||||
SubInclude HAIKU_TOP src system kernel vm ;
|
||||
|
||||
|
9
src/system/kernel/slab/Jamfile
Normal file
9
src/system/kernel/slab/Jamfile
Normal file
@ -0,0 +1,9 @@
|
||||
SubDir HAIKU_TOP src system kernel slab ;
|
||||
|
||||
UsePrivateHeaders [ FDirName kernel slab ] ;
|
||||
|
||||
KernelMergeObject kernel_slab.o :
|
||||
Slab.cpp
|
||||
|
||||
: $(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused
|
||||
;
|
448
src/system/kernel/slab/Slab.cpp
Normal file
448
src/system/kernel/slab/Slab.cpp
Normal file
@ -0,0 +1,448 @@
|
||||
/*
|
||||
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Authors:
|
||||
* Hugo Santos, hugosantos@gmail.com
|
||||
*/
|
||||
|
||||
#include <Slab.h>
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include <KernelExport.h>
|
||||
#include <util/AutoLock.h>
|
||||
|
||||
#include <algorithm> // swap
|
||||
#include <new>
|
||||
|
||||
// TODO all of the small allocations we perform here will fallback
|
||||
// to the internal allocator which in the future will use this
|
||||
// same code. We'll have to resolve all of the dependencies
|
||||
// then, for now, it is still not required.
|
||||
|
||||
|
||||
// TODO this value should be dynamically tuned per cache.
|
||||
static const int kMagazineCapacity = 32;
|
||||
|
||||
static const size_t kCacheColorPeriod = 8;
|
||||
|
||||
static depot_magazine *_AllocMagazine();
|
||||
static void _FreeMagazine(depot_magazine *magazine);
|
||||
|
||||
template<typename Type> static Type *
|
||||
SListPop(Type* &head)
|
||||
{
|
||||
Type *oldHead = head;
|
||||
head = head->next;
|
||||
return oldHead;
|
||||
}
|
||||
|
||||
|
||||
template<typename Type> static inline void
|
||||
SListPush(Type* &head, Type *object)
|
||||
{
|
||||
object->next = head;
|
||||
head = object;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
slab_area_backend_allocate(base_cache *cache, area_id *id, void **pages,
|
||||
size_t byteCount, uint32_t flags)
|
||||
{
|
||||
if (flags & CACHE_ALIGN_TO_TOTAL && byteCount > B_PAGE_SIZE)
|
||||
return NULL;
|
||||
|
||||
dprintf("AreaBackend::AllocatePages(%lu, 0x%lx)\n", byteCount, flags);
|
||||
|
||||
area_id areaId = create_area(cache->name, pages,
|
||||
B_ANY_KERNEL_ADDRESS, byteCount, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
|
||||
if (areaId < 0)
|
||||
return areaId;
|
||||
|
||||
dprintf(" AreaBackend::AllocatePages() = { %ld, %p }\n", areaId, *pages);
|
||||
|
||||
*id = areaId;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
void
|
||||
slab_area_backend_free(base_cache *cache, area_id area)
|
||||
{
|
||||
dprintf("AreaBackend::DeletePages(%ld)\n", area);
|
||||
delete_area(area);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
base_cache_init(base_cache *cache, const char *name, size_t objectSize,
|
||||
size_t alignment, base_cache_constructor constructor,
|
||||
base_cache_destructor destructor, void *cookie)
|
||||
{
|
||||
strlcpy(cache->name, name, sizeof(cache->name));
|
||||
|
||||
if (alignment > 0 && (objectSize & (alignment - 1)))
|
||||
cache->object_size = objectSize + alignment
|
||||
- (objectSize & (alignment - 1));
|
||||
else
|
||||
cache->object_size = objectSize;
|
||||
|
||||
cache->cache_color_cycle = 0;
|
||||
|
||||
list_init_etc(&cache->partial, offsetof(cache_slab, link));
|
||||
list_init_etc(&cache->full, offsetof(cache_slab, link));
|
||||
}
|
||||
|
||||
|
||||
cache_object_link *
|
||||
base_cache_allocate_object(base_cache *cache)
|
||||
{
|
||||
cache_slab *slab = (cache_slab *)list_get_first_item(&cache->partial);
|
||||
|
||||
dprintf("BaseCache::AllocateObject() from %p, %lu remaining\n",
|
||||
slab, slab->count);
|
||||
|
||||
cache_object_link *link = SListPop(slab->free);
|
||||
slab->count--;
|
||||
if (slab->count == 0) {
|
||||
// move the partial slab to the full list
|
||||
list_remove_item(&cache->partial, slab);
|
||||
list_add_item(&cache->full, slab);
|
||||
}
|
||||
|
||||
return link;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
base_cache_return_object(base_cache *cache, cache_slab *slab,
|
||||
cache_object_link *link)
|
||||
{
|
||||
// We return true if the slab is completely unused.
|
||||
|
||||
SListPush(slab->free, link);
|
||||
slab->count++;
|
||||
if (slab->count == slab->size) {
|
||||
list_remove_item(&cache->partial, slab);
|
||||
return 1;
|
||||
} else if (slab->count == 1) {
|
||||
list_remove_item(&cache->full, slab);
|
||||
list_add_item(&cache->partial, slab);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
cache_slab *
|
||||
base_cache_construct_slab(base_cache *cache, cache_slab *slab, void *pages,
|
||||
size_t byteCount, cache_object_link *(*getLink)(void *parent, void *object),
|
||||
void *parent)
|
||||
{
|
||||
dprintf("BaseCache::ConstructSlab(%p, %p, %lu, %p, %p)\n", slab, pages,
|
||||
byteCount, getLink, parent);
|
||||
|
||||
slab->pages = pages;
|
||||
slab->count = slab->size = byteCount / cache->object_size;
|
||||
slab->free = NULL;
|
||||
|
||||
size_t spareBytes = byteCount - (slab->size * cache->object_size);
|
||||
size_t cycle = cache->cache_color_cycle;
|
||||
|
||||
if (cycle > spareBytes)
|
||||
cache->cache_color_cycle = cycle = 0;
|
||||
else
|
||||
cache->cache_color_cycle += kCacheColorPeriod;
|
||||
|
||||
dprintf(" %lu objects, %lu spare bytes, cycle %lu\n",
|
||||
slab->size, spareBytes, cycle);
|
||||
|
||||
uint8_t *data = ((uint8_t *)pages) + cycle;
|
||||
|
||||
for (size_t i = 0; i < slab->size; i++) {
|
||||
if (cache->constructor)
|
||||
cache->constructor(cache->cookie, data);
|
||||
SListPush(slab->free, getLink(parent, data));
|
||||
data += cache->object_size;
|
||||
}
|
||||
|
||||
return slab;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
base_cache_destruct_slab(base_cache *cache, cache_slab *slab)
|
||||
{
|
||||
if (cache->destructor == NULL)
|
||||
return;
|
||||
|
||||
uint8_t *data = (uint8_t *)slab->pages;
|
||||
|
||||
for (size_t i = 0; i < slab->size; i++) {
|
||||
cache->destructor(cache->cookie, data);
|
||||
data += cache->object_size;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline bool
|
||||
_IsMagazineEmpty(depot_magazine *magazine)
|
||||
{
|
||||
return magazine->current_round == 0;
|
||||
}
|
||||
|
||||
|
||||
static inline bool
|
||||
_IsMagazineFull(depot_magazine *magazine)
|
||||
{
|
||||
return magazine->current_round == magazine->round_count;
|
||||
}
|
||||
|
||||
|
||||
static inline void *
|
||||
_PopMagazine(depot_magazine *magazine)
|
||||
{
|
||||
return magazine->rounds[--magazine->current_round];
|
||||
}
|
||||
|
||||
|
||||
static inline bool
|
||||
_PushMagazine(depot_magazine *magazine, void *object)
|
||||
{
|
||||
if (_IsMagazineFull(magazine))
|
||||
return false;
|
||||
magazine->rounds[magazine->current_round++] = object;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
_ExchangeWithFull(base_depot *depot, depot_magazine* &magazine)
|
||||
{
|
||||
BenaphoreLocker _(depot->lock);
|
||||
|
||||
if (depot->full == NULL)
|
||||
return false;
|
||||
|
||||
depot->full_count--;
|
||||
depot->empty_count++;
|
||||
|
||||
SListPush(depot->empty, magazine);
|
||||
magazine = SListPop(depot->full);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
_ExchangeWithEmpty(base_depot *depot, depot_magazine* &magazine)
|
||||
{
|
||||
BenaphoreLocker _(depot->lock);
|
||||
|
||||
if (depot->empty == NULL) {
|
||||
depot->empty = _AllocMagazine();
|
||||
if (depot->empty == NULL)
|
||||
return false;
|
||||
} else {
|
||||
depot->empty_count--;
|
||||
}
|
||||
|
||||
if (magazine) {
|
||||
SListPush(depot->full, magazine);
|
||||
depot->full_count++;
|
||||
}
|
||||
|
||||
magazine = SListPop(depot->empty);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static depot_magazine *
|
||||
_AllocMagazine()
|
||||
{
|
||||
depot_magazine *magazine = (depot_magazine *)malloc(sizeof(depot_magazine)
|
||||
+ kMagazineCapacity * sizeof(void *));
|
||||
if (magazine) {
|
||||
magazine->next = NULL;
|
||||
magazine->current_round = 0;
|
||||
magazine->round_count = kMagazineCapacity;
|
||||
}
|
||||
|
||||
return magazine;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
_FreeMagazine(depot_magazine *magazine)
|
||||
{
|
||||
free(magazine);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
_EmptyMagazine(base_depot *depot, depot_magazine *magazine)
|
||||
{
|
||||
for (uint16_t i = 0; i < magazine->current_round; i++)
|
||||
depot->return_object(depot, magazine->rounds[i]);
|
||||
_FreeMagazine(magazine);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
base_depot_init(base_depot *depot,
|
||||
void (*return_object)(base_depot *depot, void *object))
|
||||
{
|
||||
depot->full = NULL;
|
||||
depot->empty = NULL;
|
||||
depot->full_count = depot->empty_count = 0;
|
||||
|
||||
status_t status = benaphore_init(&depot->lock, "depot");
|
||||
if (status < B_OK)
|
||||
return status;
|
||||
|
||||
depot->stores = new (std::nothrow) depot_cpu_store[smp_get_num_cpus()];
|
||||
if (depot->stores == NULL) {
|
||||
benaphore_destroy(&depot->lock);
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
|
||||
for (int i = 0; i < smp_get_num_cpus(); i++) {
|
||||
benaphore_init(&depot->stores[i].lock, "cpu store");
|
||||
depot->stores[i].loaded = depot->stores[i].previous = NULL;
|
||||
}
|
||||
|
||||
depot->return_object = return_object;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
base_depot_destroy(base_depot *depot)
|
||||
{
|
||||
base_depot_make_empty(depot);
|
||||
|
||||
for (int i = 0; i < smp_get_num_cpus(); i++) {
|
||||
benaphore_destroy(&depot->stores[i].lock);
|
||||
}
|
||||
|
||||
delete [] depot->stores;
|
||||
|
||||
benaphore_destroy(&depot->lock);
|
||||
}
|
||||
|
||||
|
||||
void *
|
||||
base_depot_obtain_from_store(base_depot *depot, depot_cpu_store *store)
|
||||
{
|
||||
BenaphoreLocker _(store->lock);
|
||||
|
||||
// To better understand both the Alloc() and Free() logic refer to
|
||||
// Bonwick's ``Magazines and Vmem'' [in 2001 USENIX proceedings]
|
||||
|
||||
// In a nutshell, we try to get an object from the loaded magazine
|
||||
// if it's not empty, or from the previous magazine if it's full
|
||||
// and finally from the Slab if the magazine depot has no full magazines.
|
||||
|
||||
if (store->loaded == NULL)
|
||||
return NULL;
|
||||
|
||||
while (true) {
|
||||
if (!_IsMagazineEmpty(store->loaded))
|
||||
return _PopMagazine(store->loaded);
|
||||
|
||||
if (store->previous && (_IsMagazineFull(store->previous)
|
||||
|| _ExchangeWithFull(depot, store->previous)))
|
||||
std::swap(store->previous, store->loaded);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
base_depot_return_to_store(base_depot *depot, depot_cpu_store *store,
|
||||
void *object)
|
||||
{
|
||||
BenaphoreLocker _(store->lock);
|
||||
|
||||
// We try to add the object to the loaded magazine if we have one
|
||||
// and it's not full, or to the previous one if it is empty. If
|
||||
// the magazine depot doesn't provide us with a new empty magazine
|
||||
// we return the object directly to the slab.
|
||||
|
||||
while (true) {
|
||||
if (store->loaded && _PushMagazine(store->loaded, object))
|
||||
return 1;
|
||||
|
||||
if ((store->previous && _IsMagazineEmpty(store->previous))
|
||||
|| _ExchangeWithEmpty(depot, store->previous))
|
||||
std::swap(store->loaded, store->previous);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
base_depot_make_empty(base_depot *depot)
|
||||
{
|
||||
// TODO locking
|
||||
|
||||
for (int i = 0; i < smp_get_num_cpus(); i++) {
|
||||
if (depot->stores[i].loaded)
|
||||
_EmptyMagazine(depot, depot->stores[i].loaded);
|
||||
if (depot->stores[i].previous)
|
||||
_EmptyMagazine(depot, depot->stores[i].previous);
|
||||
depot->stores[i].loaded = depot->stores[i].previous = NULL;
|
||||
}
|
||||
|
||||
while (depot->full)
|
||||
_EmptyMagazine(depot, SListPop(depot->full));
|
||||
|
||||
while (depot->empty)
|
||||
_EmptyMagazine(depot, SListPop(depot->empty));
|
||||
}
|
||||
|
||||
|
||||
typedef MergedLinkCacheStrategy<AreaBackend> AreaMergedCacheStrategy;
|
||||
typedef Cache<AreaMergedCacheStrategy> AreaMergedCache;
|
||||
typedef LocalCache<AreaMergedCache> AreaLocalCache;
|
||||
|
||||
object_cache_t
|
||||
object_cache_create(const char *name, size_t object_size, size_t alignment,
|
||||
void (*_constructor)(void *, void *), void (*_destructor)(void *, void *),
|
||||
void *cookie)
|
||||
{
|
||||
return new (std::nothrow) AreaLocalCache(name, object_size, alignment,
|
||||
_constructor, _destructor, cookie);
|
||||
}
|
||||
|
||||
|
||||
void *
|
||||
object_cache_alloc(object_cache_t cache)
|
||||
{
|
||||
return ((AreaLocalCache *)cache)->Alloc(0);
|
||||
}
|
||||
|
||||
|
||||
void *
|
||||
object_cache_alloc_etc(object_cache_t cache, uint32_t flags)
|
||||
{
|
||||
return ((AreaLocalCache *)cache)->Alloc(flags);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
object_cache_free(object_cache_t cache, void *object)
|
||||
{
|
||||
((AreaLocalCache *)cache)->Free(object);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
object_cache_destroy(object_cache_t cache)
|
||||
{
|
||||
delete (AreaLocalCache *)cache;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user