Always unlock the object cache while allocating memory. This is necessary for

the CACHE_DONT_SLEEP flag to work for real, since otherwise the thread could
block on the mutex held by a thread allocating memory. We use two condition
variables to prevent multiple threads from allocating slabs at the same time.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35206 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-01-20 18:40:16 +00:00
parent 7bca271d9e
commit 08d66c1288
7 changed files with 72 additions and 37 deletions

View File

@ -77,24 +77,22 @@ HashedObjectCache::Create(const char* name, size_t object_size,
slab*
HashedObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating)
HashedObjectCache::CreateSlab(uint32 flags)
{
if (!check_cache_quota(this))
return NULL;
if (unlockWhileAllocating)
Unlock();
Unlock();
slab* slab = allocate_slab(flags);
if (unlockWhileAllocating)
Lock();
Lock();
if (slab == NULL)
return NULL;
void* pages;
if ((this->*allocate_pages)(&pages, flags, unlockWhileAllocating) == B_OK) {
if ((this->*allocate_pages)(&pages, flags) == B_OK) {
if (InitSlab(slab, pages, slab_size, flags))
return slab;

View File

@ -23,8 +23,7 @@ struct HashedObjectCache : ObjectCache {
object_cache_destructor destructor,
object_cache_reclaimer reclaimer);
virtual slab* CreateSlab(uint32 flags,
bool unlockWhileAllocating);
virtual slab* CreateSlab(uint32 flags);
virtual void ReturnSlab(slab* slab);
virtual slab* ObjectSlab(void* object) const;

View File

@ -85,6 +85,8 @@ ObjectCache::Init(const char* name, size_t objectSize,
this->flags = flags;
resize_request = NULL;
resize_entry_can_wait = NULL;
resize_entry_dont_wait = NULL;
// no gain in using the depot in single cpu setups
if (smp_get_num_cpus() == 1)
@ -295,8 +297,7 @@ ObjectCache::SetKernelArgs(kernel_args* args)
status_t
ObjectCache::AllocatePages(void** pages, uint32 flags,
bool unlockWhileAllocating)
ObjectCache::AllocatePages(void** pages, uint32 flags)
{
TRACE_CACHE(cache, "allocate pages (%lu, 0x0%lx)", slab_size, flags);
@ -309,8 +310,7 @@ ObjectCache::AllocatePages(void** pages, uint32 flags,
&& slab_size != B_PAGE_SIZE)
addressSpec = B_ANY_KERNEL_BLOCK_ADDRESS;
if (unlockWhileAllocating)
Unlock();
Unlock();
// if we are allocating, it is because we need the pages immediatly
// so we lock them. when moving the slab to the empty list we should
@ -320,8 +320,7 @@ ObjectCache::AllocatePages(void** pages, uint32 flags,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0,
(flags & CACHE_DONT_SLEEP) != 0 ? CREATE_AREA_DONT_WAIT : 0);
if (unlockWhileAllocating)
Lock();
Lock();
if (areaId < 0)
return areaId;
@ -353,20 +352,17 @@ ObjectCache::FreePages(void* pages)
status_t
ObjectCache::EarlyAllocatePages(void** pages, uint32 flags,
bool unlockWhileAllocating)
ObjectCache::EarlyAllocatePages(void** pages, uint32 flags)
{
TRACE_CACHE(this, "early allocate pages (%lu, 0x0%lx)", slab_size,
flags);
if (unlockWhileAllocating)
Unlock();
Unlock();
addr_t base = vm_allocate_early(sKernelArgs, slab_size,
slab_size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (unlockWhileAllocating)
Lock();
Lock();
*pages = (void*)base;

View File

@ -8,6 +8,7 @@
#define OBJECT_CACHE_H
#include <condition_variable.h>
#include <lock.h>
#include <slab/ObjectDepot.h>
#include <slab/Slab.h>
@ -31,6 +32,10 @@ struct slab : DoublyLinkedListLinkImpl<slab> {
typedef DoublyLinkedList<slab> SlabList;
struct ObjectCacheResizeEntry {
ConditionVariable condition;
};
struct ObjectCache : DoublyLinkedListLinkImpl<ObjectCache> {
char name[32];
mutex lock;
@ -53,13 +58,16 @@ struct ObjectCache : DoublyLinkedListLinkImpl<ObjectCache> {
ResizeRequest* resize_request;
ObjectCacheResizeEntry* resize_entry_can_wait;
ObjectCacheResizeEntry* resize_entry_dont_wait;
void* cookie;
object_cache_constructor constructor;
object_cache_destructor destructor;
object_cache_reclaimer reclaimer;
status_t (ObjectCache::*allocate_pages)(void** pages,
uint32 flags, bool unlockWhileAllocating);
uint32 flags);
void (ObjectCache::*free_pages)(void* pages);
object_depot depot;
@ -76,8 +84,7 @@ public:
void InitPostArea();
void Delete();
virtual slab* CreateSlab(uint32 flags,
bool unlockWhileAllocating) = 0;
virtual slab* CreateSlab(uint32 flags) = 0;
virtual void ReturnSlab(slab* slab) = 0;
virtual slab* ObjectSlab(void* object) const = 0;
@ -95,11 +102,9 @@ public:
void Unlock() { mutex_unlock(&lock); }
static void SetKernelArgs(kernel_args* args);
status_t AllocatePages(void** pages, uint32 flags,
bool unlockWhileAllocating);
status_t AllocatePages(void** pages, uint32 flags);
void FreePages(void* pages);
status_t EarlyAllocatePages(void** pages, uint32 flags,
bool unlockWhileAllocating);
status_t EarlyAllocatePages(void** pages, uint32 flags);
void EarlyFreePages(void* pages);
private:

View File

@ -316,17 +316,55 @@ increase_object_reserve(ObjectCache* cache)
*/
static status_t
object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
uint32 flags, bool unlockWhileAllocating)
uint32 flags)
{
// If someone else is already adding slabs, we wait for that to be finished
// first.
while (true) {
if (objectCount <= cache->total_objects - cache->used_count)
return B_OK;
ObjectCacheResizeEntry* resizeEntry = NULL;
if (cache->resize_entry_dont_wait != NULL) {
resizeEntry = cache->resize_entry_dont_wait;
} else if (cache->resize_entry_can_wait != NULL
&& (flags & CACHE_DONT_SLEEP) == 0) {
resizeEntry = cache->resize_entry_can_wait;
} else
break;
ConditionVariableEntry entry;
resizeEntry->condition.Add(&entry);
cache->Unlock();
entry.Wait();
cache->Lock();
}
// prepare the resize entry others can wait on
ObjectCacheResizeEntry*& resizeEntry = (flags & CACHE_DONT_SLEEP) != 0
? cache->resize_entry_dont_wait : cache->resize_entry_can_wait;
ObjectCacheResizeEntry myResizeEntry;
resizeEntry = &myResizeEntry;
resizeEntry->condition.Init(cache, "wait for slabs");
// add new slabs until there are as many free ones as requested
while (objectCount > cache->total_objects - cache->used_count) {
slab* newSlab = cache->CreateSlab(flags, unlockWhileAllocating);
if (newSlab == NULL)
slab* newSlab = cache->CreateSlab(flags);
if (newSlab == NULL) {
resizeEntry->condition.NotifyAll();
resizeEntry = NULL;
return B_NO_MEMORY;
}
cache->empty.Add(newSlab);
cache->empty_count++;
}
resizeEntry->condition.NotifyAll();
resizeEntry = NULL;
return B_OK;
}
@ -422,7 +460,7 @@ object_cache_resizer(void*)
MutexLocker cacheLocker(cache->lock);
status_t error = object_cache_reserve_internal(cache,
cache->min_object_reserve, 0, true);
cache->min_object_reserve, 0);
if (error != B_OK) {
dprintf("object cache resizer: Failed to resize object cache "
"%p!\n", cache);
@ -557,7 +595,7 @@ object_cache_alloc(object_cache* cache, uint32 flags)
if (cache->partial.IsEmpty()) {
if (cache->empty.IsEmpty()) {
if (object_cache_reserve_internal(cache, 1, flags, false) < B_OK) {
if (object_cache_reserve_internal(cache, 1, flags) < B_OK) {
T(Alloc(cache, flags, NULL));
return NULL;
}
@ -625,7 +663,7 @@ object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
T(Reserve(cache, objectCount, flags));
MutexLocker _(cache->lock);
return object_cache_reserve_internal(cache, objectCount, flags, false);
return object_cache_reserve_internal(cache, objectCount, flags);
}

View File

@ -40,13 +40,13 @@ SmallObjectCache::Create(const char* name, size_t object_size,
slab*
SmallObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating)
SmallObjectCache::CreateSlab(uint32 flags)
{
if (!check_cache_quota(this))
return NULL;
void* pages;
if ((this->*allocate_pages)(&pages, flags, unlockWhileAllocating) != B_OK)
if ((this->*allocate_pages)(&pages, flags) != B_OK)
return NULL;
return InitSlab(slab_in_pages(pages, slab_size), pages,

View File

@ -19,8 +19,7 @@ struct SmallObjectCache : ObjectCache {
object_cache_destructor destructor,
object_cache_reclaimer reclaimer);
virtual slab* CreateSlab(uint32 flags,
bool unlockWhileAllocating);
virtual slab* CreateSlab(uint32 flags);
virtual void ReturnSlab(slab* slab);
virtual slab* ObjectSlab(void* object) const;
};