diff --git a/src/system/kernel/slab/HashedObjectCache.cpp b/src/system/kernel/slab/HashedObjectCache.cpp index b90b129d5b..68ea8e24ac 100644 --- a/src/system/kernel/slab/HashedObjectCache.cpp +++ b/src/system/kernel/slab/HashedObjectCache.cpp @@ -77,24 +77,22 @@ HashedObjectCache::Create(const char* name, size_t object_size, slab* -HashedObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating) +HashedObjectCache::CreateSlab(uint32 flags) { if (!check_cache_quota(this)) return NULL; - if (unlockWhileAllocating) - Unlock(); + Unlock(); slab* slab = allocate_slab(flags); - if (unlockWhileAllocating) - Lock(); + Lock(); if (slab == NULL) return NULL; void* pages; - if ((this->*allocate_pages)(&pages, flags, unlockWhileAllocating) == B_OK) { + if ((this->*allocate_pages)(&pages, flags) == B_OK) { if (InitSlab(slab, pages, slab_size, flags)) return slab; diff --git a/src/system/kernel/slab/HashedObjectCache.h b/src/system/kernel/slab/HashedObjectCache.h index 11a069daad..6fc5acbfb8 100644 --- a/src/system/kernel/slab/HashedObjectCache.h +++ b/src/system/kernel/slab/HashedObjectCache.h @@ -23,8 +23,7 @@ struct HashedObjectCache : ObjectCache { object_cache_destructor destructor, object_cache_reclaimer reclaimer); - virtual slab* CreateSlab(uint32 flags, - bool unlockWhileAllocating); + virtual slab* CreateSlab(uint32 flags); virtual void ReturnSlab(slab* slab); virtual slab* ObjectSlab(void* object) const; diff --git a/src/system/kernel/slab/ObjectCache.cpp b/src/system/kernel/slab/ObjectCache.cpp index ebb5b4bd22..1d4773ef62 100644 --- a/src/system/kernel/slab/ObjectCache.cpp +++ b/src/system/kernel/slab/ObjectCache.cpp @@ -85,6 +85,8 @@ ObjectCache::Init(const char* name, size_t objectSize, this->flags = flags; resize_request = NULL; + resize_entry_can_wait = NULL; + resize_entry_dont_wait = NULL; // no gain in using the depot in single cpu setups if (smp_get_num_cpus() == 1) @@ -295,8 +297,7 @@ ObjectCache::SetKernelArgs(kernel_args* args) status_t -ObjectCache::AllocatePages(void** pages, uint32 flags, - bool unlockWhileAllocating) +ObjectCache::AllocatePages(void** pages, uint32 flags) { TRACE_CACHE(cache, "allocate pages (%lu, 0x0%lx)", slab_size, flags); @@ -309,8 +310,7 @@ ObjectCache::AllocatePages(void** pages, uint32 flags, && slab_size != B_PAGE_SIZE) addressSpec = B_ANY_KERNEL_BLOCK_ADDRESS; - if (unlockWhileAllocating) - Unlock(); + Unlock(); // if we are allocating, it is because we need the pages immediatly // so we lock them. when moving the slab to the empty list we should @@ -320,8 +320,7 @@ ObjectCache::AllocatePages(void** pages, uint32 flags, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, (flags & CACHE_DONT_SLEEP) != 0 ? CREATE_AREA_DONT_WAIT : 0); - if (unlockWhileAllocating) - Lock(); + Lock(); if (areaId < 0) return areaId; @@ -353,20 +352,17 @@ ObjectCache::FreePages(void* pages) status_t -ObjectCache::EarlyAllocatePages(void** pages, uint32 flags, - bool unlockWhileAllocating) +ObjectCache::EarlyAllocatePages(void** pages, uint32 flags) { TRACE_CACHE(this, "early allocate pages (%lu, 0x0%lx)", slab_size, flags); - if (unlockWhileAllocating) - Unlock(); + Unlock(); addr_t base = vm_allocate_early(sKernelArgs, slab_size, slab_size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); - if (unlockWhileAllocating) - Lock(); + Lock(); *pages = (void*)base; diff --git a/src/system/kernel/slab/ObjectCache.h b/src/system/kernel/slab/ObjectCache.h index 9205886c53..bf79a940f1 100644 --- a/src/system/kernel/slab/ObjectCache.h +++ b/src/system/kernel/slab/ObjectCache.h @@ -8,6 +8,7 @@ #define OBJECT_CACHE_H +#include #include #include #include @@ -31,6 +32,10 @@ struct slab : DoublyLinkedListLinkImpl { typedef DoublyLinkedList SlabList; +struct ObjectCacheResizeEntry { + ConditionVariable condition; +}; + struct ObjectCache : DoublyLinkedListLinkImpl { char name[32]; mutex lock; @@ -53,13 +58,16 @@ struct ObjectCache : DoublyLinkedListLinkImpl { ResizeRequest* resize_request; + ObjectCacheResizeEntry* resize_entry_can_wait; + ObjectCacheResizeEntry* resize_entry_dont_wait; + void* cookie; object_cache_constructor constructor; object_cache_destructor destructor; object_cache_reclaimer reclaimer; status_t (ObjectCache::*allocate_pages)(void** pages, - uint32 flags, bool unlockWhileAllocating); + uint32 flags); void (ObjectCache::*free_pages)(void* pages); object_depot depot; @@ -76,8 +84,7 @@ public: void InitPostArea(); void Delete(); - virtual slab* CreateSlab(uint32 flags, - bool unlockWhileAllocating) = 0; + virtual slab* CreateSlab(uint32 flags) = 0; virtual void ReturnSlab(slab* slab) = 0; virtual slab* ObjectSlab(void* object) const = 0; @@ -95,11 +102,9 @@ public: void Unlock() { mutex_unlock(&lock); } static void SetKernelArgs(kernel_args* args); - status_t AllocatePages(void** pages, uint32 flags, - bool unlockWhileAllocating); + status_t AllocatePages(void** pages, uint32 flags); void FreePages(void* pages); - status_t EarlyAllocatePages(void** pages, uint32 flags, - bool unlockWhileAllocating); + status_t EarlyAllocatePages(void** pages, uint32 flags); void EarlyFreePages(void* pages); private: diff --git a/src/system/kernel/slab/Slab.cpp b/src/system/kernel/slab/Slab.cpp index 57cc168cb0..c7d8a91f3d 100644 --- a/src/system/kernel/slab/Slab.cpp +++ b/src/system/kernel/slab/Slab.cpp @@ -316,17 +316,55 @@ increase_object_reserve(ObjectCache* cache) */ static status_t object_cache_reserve_internal(ObjectCache* cache, size_t objectCount, - uint32 flags, bool unlockWhileAllocating) + uint32 flags) { + // If someone else is already adding slabs, we wait for that to be finished + // first. + while (true) { + if (objectCount <= cache->total_objects - cache->used_count) + return B_OK; + + ObjectCacheResizeEntry* resizeEntry = NULL; + if (cache->resize_entry_dont_wait != NULL) { + resizeEntry = cache->resize_entry_dont_wait; + } else if (cache->resize_entry_can_wait != NULL + && (flags & CACHE_DONT_SLEEP) == 0) { + resizeEntry = cache->resize_entry_can_wait; + } else + break; + + ConditionVariableEntry entry; + resizeEntry->condition.Add(&entry); + + cache->Unlock(); + entry.Wait(); + cache->Lock(); + } + + // prepare the resize entry others can wait on + ObjectCacheResizeEntry*& resizeEntry = (flags & CACHE_DONT_SLEEP) != 0 + ? cache->resize_entry_dont_wait : cache->resize_entry_can_wait; + + ObjectCacheResizeEntry myResizeEntry; + resizeEntry = &myResizeEntry; + resizeEntry->condition.Init(cache, "wait for slabs"); + + // add new slabs until there are as many free ones as requested while (objectCount > cache->total_objects - cache->used_count) { - slab* newSlab = cache->CreateSlab(flags, unlockWhileAllocating); - if (newSlab == NULL) + slab* newSlab = cache->CreateSlab(flags); + if (newSlab == NULL) { + resizeEntry->condition.NotifyAll(); + resizeEntry = NULL; return B_NO_MEMORY; + } cache->empty.Add(newSlab); cache->empty_count++; } + resizeEntry->condition.NotifyAll(); + resizeEntry = NULL; + return B_OK; } @@ -422,7 +460,7 @@ object_cache_resizer(void*) MutexLocker cacheLocker(cache->lock); status_t error = object_cache_reserve_internal(cache, - cache->min_object_reserve, 0, true); + cache->min_object_reserve, 0); if (error != B_OK) { dprintf("object cache resizer: Failed to resize object cache " "%p!\n", cache); @@ -557,7 +595,7 @@ object_cache_alloc(object_cache* cache, uint32 flags) if (cache->partial.IsEmpty()) { if (cache->empty.IsEmpty()) { - if (object_cache_reserve_internal(cache, 1, flags, false) < B_OK) { + if (object_cache_reserve_internal(cache, 1, flags) < B_OK) { T(Alloc(cache, flags, NULL)); return NULL; } @@ -625,7 +663,7 @@ object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags) T(Reserve(cache, objectCount, flags)); MutexLocker _(cache->lock); - return object_cache_reserve_internal(cache, objectCount, flags, false); + return object_cache_reserve_internal(cache, objectCount, flags); } diff --git a/src/system/kernel/slab/SmallObjectCache.cpp b/src/system/kernel/slab/SmallObjectCache.cpp index 5af322cbe4..cfa45e2638 100644 --- a/src/system/kernel/slab/SmallObjectCache.cpp +++ b/src/system/kernel/slab/SmallObjectCache.cpp @@ -40,13 +40,13 @@ SmallObjectCache::Create(const char* name, size_t object_size, slab* -SmallObjectCache::CreateSlab(uint32 flags, bool unlockWhileAllocating) +SmallObjectCache::CreateSlab(uint32 flags) { if (!check_cache_quota(this)) return NULL; void* pages; - if ((this->*allocate_pages)(&pages, flags, unlockWhileAllocating) != B_OK) + if ((this->*allocate_pages)(&pages, flags) != B_OK) return NULL; return InitSlab(slab_in_pages(pages, slab_size), pages, diff --git a/src/system/kernel/slab/SmallObjectCache.h b/src/system/kernel/slab/SmallObjectCache.h index 849798d770..5bc60d36a3 100644 --- a/src/system/kernel/slab/SmallObjectCache.h +++ b/src/system/kernel/slab/SmallObjectCache.h @@ -19,8 +19,7 @@ struct SmallObjectCache : ObjectCache { object_cache_destructor destructor, object_cache_reclaimer reclaimer); - virtual slab* CreateSlab(uint32 flags, - bool unlockWhileAllocating); + virtual slab* CreateSlab(uint32 flags); virtual void ReturnSlab(slab* slab); virtual slab* ObjectSlab(void* object) const; };