more slab fixes, also introduced a new strategy optimized for medium sized buffers (64 >= x < 512) with lengths other than power of 2 (has an overhead of 2 words per buffer).

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@20839 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Hugo Santos 2007-04-26 11:38:24 +00:00
parent b821eb28c8
commit e6fb3d3947
8 changed files with 192 additions and 66 deletions

View File

@ -32,7 +32,7 @@ enum {
static const int kMinimumSlabItems = 32;
typedef void (*base_cache_constructor)(void *cookie, void *object);
typedef status_t (*base_cache_constructor)(void *cookie, void *object);
typedef void (*base_cache_destructor)(void *cookie, void *object);
/* base Slab implementation, opaque to the backend used.
@ -79,9 +79,15 @@ cache_object_link *base_cache_allocate_object_with_new_slab(base_cache *cache,
int base_cache_return_object(base_cache *cache, cache_slab *slab,
cache_object_link *link);
typedef status_t (*base_cache_owner_prepare)(void *parent,
cache_slab *slab, void *object);
typedef void (*base_cache_owner_unprepare)(void *parent, cache_slab *slab,
void *object);
cache_slab *base_cache_construct_slab(base_cache *cache, cache_slab *slab,
void *pages, size_t byte_count, cache_object_link *(*get_link)(
void *parent, void *object), void *parent);
void *pages, size_t byte_count, void *parent,
cache_object_link *(*get_link)(void *parent, void *object),
base_cache_owner_prepare prepare, base_cache_owner_unprepare unprepare);
void base_cache_destruct_slab(base_cache *cache, cache_slab *slab);
#ifdef __cplusplus
@ -103,8 +109,8 @@ public:
: fStrategy(this)
{
if (benaphore_init(&fLock, name) >= B_OK) {
base_cache_init(this, name, objectSize, alignment, constructor,
destructor, cookie);
base_cache_init(this, name, Strategy::RequiredSpace(objectSize),
alignment, constructor, destructor, cookie);
register_low_memory_handler(_LowMemory, this, 0);
}
}

View File

@ -126,16 +126,17 @@ struct HashCacheStrategy : BaseCacheStrategy<Backend>, BaseHashCacheStrategy {
return NULL;
}
if (_PrepareSlab(slab, pages, byteCount, flags) < B_OK) {
Backend::FreePages(Parent(), slab->id);
fSlabCache.Free(slab);
return NULL;
}
// it's very important that we cast this to BaseHashCacheStrategy
// so we get the proper instance offset through void *
return BaseCacheStrategy<Backend>::_ConstructSlab(slab, pages,
_SlabSize(), _Linkage, (BaseHashCacheStrategy *)this);
cache_slab *result = BaseCacheStrategy<Backend>::_ConstructSlab(slab,
pages, _SlabSize(), (BaseHashCacheStrategy *)this, _Linkage,
_PrepareObject, _UnprepareObject);
if (result == NULL) {
Backend::FreePages(Parent(), slab->id);
fSlabCache.Free(slab);
}
return result;
}
void ReturnSlab(BaseSlab *slab)
@ -153,39 +154,43 @@ private:
base_cache *Parent() const { return BaseCacheStrategy<Backend>::Parent(); }
status_t _PrepareSlab(Slab *slab, void *pages, size_t byteCount,
uint32_t flags)
static status_t _PrepareObject(void *_self, cache_slab *slab, void *object)
{
uint8_t *data = (uint8_t *)pages;
for (uint8_t *it = data;
it < (data + byteCount); it += Parent()->object_size) {
Link *link = fLinkCache.Alloc(flags);
BaseHashCacheStrategy *base = (BaseHashCacheStrategy *)_self;
Strategy *self = (Strategy *)base;
if (link == NULL) {
_ClearSlabRange(data, it);
return B_NO_MEMORY;
}
Link *link = self->fLinkCache.Alloc(CACHE_DONT_SLEEP);
if (link == NULL)
return B_NO_MEMORY;
link->slab = slab;
link->buffer = it;
fHashTable.Insert(link);
}
link->slab = slab;
link->buffer = object;
self->fHashTable.Insert(link);
return B_OK;
}
void _ClearSlab(void *pages, size_t size)
static void _UnprepareObject(void *_self, cache_slab *slab, void *object)
{
_ClearSlabRange((uint8_t *)pages, ((uint8_t *)pages) + size);
BaseHashCacheStrategy *base = (BaseHashCacheStrategy *)_self;
((Strategy *)base)->_UnprepareObject(object);
}
void _ClearSlabRange(uint8_t *data, uint8_t *end)
void _UnprepareObject(void *object)
{
for (uint8_t *it = data; it < end; it += Parent()->object_size) {
Link *link = _Linkage(it);
fHashTable.Remove(link);
fLinkCache.Free(link);
}
Link *link = _Linkage(object);
fHashTable.Remove(link);
fLinkCache.Free(link);
}
void _ClearSlab(void *pages, size_t size)
{
uint8_t *data = (uint8_t *)pages;
uint8_t *end = data + size;
for (uint8_t *it = data; it < end; it += Parent()->object_size)
_UnprepareObject(it);
}
TypedCache<Slab, Backend> fSlabCache;

View File

@ -12,16 +12,6 @@
#include <slab/Strategy.h>
namespace Private {
static inline const void *
LowerBoundary(void *object, size_t byteCount)
{
const uint8_t *null = (uint8_t *)NULL;
return null + ((((uint8_t *)object) - null) & ~(byteCount - 1));
}
}
// This slab strategy includes the ObjectLink at the end of each object and the
// slab at the end of the allocated pages. It uses aligned allocations to
// provide object to slab mapping with zero storage, thus there is only one
@ -46,9 +36,16 @@ public:
return ((uint8_t *)link) - (Parent()->object_size - sizeof(Link));
}
static inline const void *
LowerBoundary(void *object, size_t byteCount)
{
const uint8_t *null = (uint8_t *)NULL;
return null + ((((uint8_t *)object) - null) & ~(byteCount - 1));
}
CacheObjectInfo ObjectInformation(void *object) const
{
Slab *slab = _SlabInPages(Private::LowerBoundary(object, _SlabSize()));
Slab *slab = _SlabInPages(LowerBoundary(object, _SlabSize()));
return CacheObjectInfo(slab, _Linkage(object));
}
@ -68,7 +65,7 @@ public:
_SlabInPages(pages)->id = id;
return BaseCacheStrategy<Backend>::_ConstructSlab(_SlabInPages(pages),
pages, _SlabSize() - sizeof(Slab), _Linkage, this);
pages, byteCount - sizeof(Slab), this, _Linkage, NULL, NULL);
}
void ReturnSlab(BaseSlab *slab)
@ -104,4 +101,91 @@ private:
}
};
// This slab strategy includes the ObjectLink at the end of each object and the
// slab at the end of the allocated pages. It maintains a pointer to the owning
// slab in the ObjectLink. This is optimized for medium sized objects whose
// length is not a power of 2.
template<typename Backend>
class MergedLinkAndSlabCacheStrategy : public BaseCacheStrategy<Backend> {
public:
typedef MergedLinkAndSlabCacheStrategy<Backend> Strategy;
typedef typename BaseCacheStrategy<Backend>::BaseSlab BaseSlab;
typedef typename BaseCacheStrategy<Backend>::Slab Slab;
struct Link : cache_object_link {
cache_slab *slab;
};
MergedLinkAndSlabCacheStrategy(base_cache *parent)
: BaseCacheStrategy<Backend>(parent) {}
static size_t RequiredSpace(size_t objectSize)
{
return objectSize + sizeof(Link);
}
void *Object(cache_object_link *_link) const
{
Link *link = static_cast<Link *>(_link);
return ((uint8_t *)link) - (Parent()->object_size - sizeof(Link));
}
CacheObjectInfo ObjectInformation(void *object) const
{
Link *link = _Linkage(object);
return CacheObjectInfo(link->slab, link);
}
BaseSlab *NewSlab(uint32_t flags)
{
typename Backend::AllocationID id;
void *pages;
size_t size = _SlabSize();
if (Backend::AllocatePages(Parent(), &id, &pages, size, flags) < B_OK)
return NULL;
_SlabInPages(pages)->id = id;
return BaseCacheStrategy<Backend>::_ConstructSlab(_SlabInPages(pages),
pages, size - sizeof(Slab), this, _Linkage, _PrepareObject, NULL);
}
void ReturnSlab(BaseSlab *slab)
{
BaseCacheStrategy<Backend>::_DestructSlab(slab);
}
private:
size_t _SlabSize() const
{
return BaseCacheStrategy<Backend>::SlabSize(sizeof(Slab));
}
base_cache *Parent() const { return BaseCacheStrategy<Backend>::Parent(); }
Link *_Linkage(void *_object) const
{
uint8_t *object = (uint8_t *)_object;
return (Link *)(object + (Parent()->object_size - sizeof(Link)));
}
Slab *_SlabInPages(const void *pages) const
{
return (Slab *)(((uint8_t *)pages) + _SlabSize() - sizeof(Slab));
}
static cache_object_link *_Linkage(void *_this, void *object)
{
return static_cast<Strategy *>(_this)->_Linkage(object);
}
static status_t _PrepareObject(void *_this, cache_slab *slab, void *object)
{
static_cast<Strategy *>(_this)->_Linkage(object)->slab = slab;
return B_OK;
}
};
#endif

View File

@ -27,7 +27,7 @@ typedef void *object_cache_t;
object_cache_t
object_cache_create(const char *name, size_t object_size, size_t alignment,
void (*_constructor)(void *, void *), void (*_destructor)(void *, void *),
base_cache_constructor constructor, base_cache_destructor destructor,
void *cookie);
void *object_cache_alloc(object_cache_t cache);
void *object_cache_alloc_etc(object_cache_t cache, uint32_t flags);

View File

@ -23,8 +23,8 @@ protected:
size_t SlabSize(size_t tailSpace) const
{
size_t pageCount = (kMinimumSlabItems * fParent->object_size
+ tailSpace) / Backend::kPageSize;
size_t pageCount = ((kMinimumSlabItems * fParent->object_size
+ tailSpace) + Backend::kPageSize / 2) / Backend::kPageSize;
if (pageCount < 1)
pageCount = 1;
return pageCount * Backend::kPageSize;
@ -35,10 +35,11 @@ protected:
};
BaseSlab *_ConstructSlab(Slab *slab, void *pages, size_t byteCount,
ObjectLink *(*getLink)(void *parent, void *object), void *parent)
void *parent, ObjectLink *(*getLink)(void *parent, void *object),
base_cache_owner_prepare prepare, base_cache_owner_unprepare unprepare)
{
return base_cache_construct_slab(fParent, slab, pages, byteCount,
getLink, parent);
parent, getLink, prepare, unprepare);
}
void _DestructSlab(BaseSlab *slab)

View File

@ -28,9 +28,9 @@ public:
void Free(Type *object) { BaseType::ReturnObject(object); }
private:
static void _ConstructObject(void *cookie, void *object)
static status_t _ConstructObject(void *cookie, void *object)
{
((TypedCache *)cookie)->ConstructObject((Type *)object);
return ((TypedCache *)cookie)->ConstructObject((Type *)object);
}
static void _DestructObject(void *cookie, void *object)
@ -38,7 +38,7 @@ private:
((TypedCache *)cookie)->DestructObject((Type *)object);
}
virtual void ConstructObject(Type *object) {}
virtual status_t ConstructObject(Type *object) { return B_OK; }
virtual void DestructObject(Type *object) {}
};

View File

@ -65,7 +65,7 @@ struct net_buffer_private : net_buffer {
};
typedef MergedLinkCacheStrategy<AreaBackend> AreaMergedCacheStrategy;
typedef MergedLinkAndSlabCacheStrategy<AreaBackend> AreaMergedCacheStrategy;
typedef HashCacheStrategy<AreaBackend> AreaHashCacheStrategy;
typedef Cache<AreaMergedCacheStrategy> NetBufferCache;

View File

@ -91,14 +91,15 @@ base_cache_init(base_cache *cache, const char *name, size_t objectSize,
{
strlcpy(cache->name, name, sizeof(cache->name));
TRACE_CACHE(cache, "init %lu, %lu", objectSize, alignment);
if (alignment > 0 && (objectSize & (alignment - 1)))
cache->object_size = objectSize + alignment
- (objectSize & (alignment - 1));
else
cache->object_size = objectSize;
TRACE_CACHE(cache, "init %lu, %lu -> %lu", objectSize, alignment,
cache->object_size);
cache->cache_color_cycle = 0;
list_init_etc(&cache->empty, offsetof(cache_slab, link));
@ -190,10 +191,12 @@ base_cache_allocate_object(base_cache *cache)
} else
slab = (cache_slab *)list_get_first_item(&cache->partial);
TRACE_CACHE(cache, "allocate from %p, %lu remaining.", slab, slab->count);
cache_object_link *link = SListPop(slab->free);
slab->count--;
TRACE_CACHE(cache, "allocate %p from %p, %lu remaining.", link, slab,
slab->count);
if (slab->count == 0) {
// move the partial slab to the full list
list_remove_item(&cache->partial, slab);
@ -243,8 +246,9 @@ base_cache_return_object(base_cache *cache, cache_slab *slab,
cache_slab *
base_cache_construct_slab(base_cache *cache, cache_slab *slab, void *pages,
size_t byteCount, cache_object_link *(*getLink)(void *parent, void *object),
void *parent)
size_t byteCount, void *parent,
cache_object_link *(*getLink)(void *parent, void *object),
base_cache_owner_prepare prepare, base_cache_owner_unprepare unprepare)
{
TRACE_CACHE(cache, "construct (%p, %p, %lu)", slab, pages, byteCount);
@ -266,8 +270,34 @@ base_cache_construct_slab(base_cache *cache, cache_slab *slab, void *pages,
uint8_t *data = ((uint8_t *)pages) + cycle;
for (size_t i = 0; i < slab->size; i++) {
if (cache->constructor)
cache->constructor(cache->cookie, data);
if (prepare || cache->constructor) {
bool failedOnFirst = false;
status_t status = B_OK;
if (prepare)
status = prepare(parent, slab, data);
if (status < B_OK)
failedOnFirst = true;
else if (cache->constructor)
status = cache->constructor(cache->cookie, data);
if (status < B_OK) {
if (!failedOnFirst && unprepare)
unprepare(parent, slab, data);
data = ((uint8_t *)pages) + cycle;
for (size_t j = 0; j < i; j++) {
if (cache->destructor)
cache->destructor(cache->cookie, data);
if (unprepare)
unprepare(parent, slab, data);
data += cache->object_size;
}
return NULL;
}
}
SListPush(slab->free, getLink(parent, data));
data += cache->object_size;
}
@ -521,8 +551,8 @@ typedef LocalCache<AreaMergedCache> AreaLocalCache;
object_cache_t
object_cache_create(const char *name, size_t object_size, size_t alignment,
void (*_constructor)(void *, void *), void (*_destructor)(void *, void *),
void *cookie)
status_t (*_constructor)(void *, void *), void (*_destructor)(void *,
void *), void *cookie)
{
return new (std::nothrow) AreaLocalCache(name, object_size, alignment,
_constructor, _destructor, cookie);