mmlr (distracted) + bonefish:

* Turn VMCache::consumers C list into a DoublyLinkedList.
* Use object caches for the different VMCache types and the VMCacheRefs.
  The purpose is to reduce slab area fragmentation.
* Requires the introduction of a pure virtual VMCache::DeleteObject()
  method, implemented in the derived classes.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@43133 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2011-11-02 21:14:11 +00:00
parent ef9d9a1720
commit f8154d172d
14 changed files with 158 additions and 62 deletions

View File

@ -12,7 +12,7 @@
#include <debug.h>
#include <kernel.h>
#include <util/list.h>
#include <util/DoublyLinkedList.h>
#include <vm/vm.h>
#include <vm/vm_types.h>
@ -20,6 +20,7 @@
struct kernel_args;
class ObjectCache;
enum {
@ -34,6 +35,14 @@ enum {
};
extern ObjectCache* gCacheRefObjectCache;
extern ObjectCache* gAnonymousCacheObjectCache;
extern ObjectCache* gAnonymousNoSwapCacheObjectCache;
extern ObjectCache* gVnodeCacheObjectCache;
extern ObjectCache* gDeviceCacheObjectCache;
extern ObjectCache* gNullCacheObjectCache;
struct VMCachePagesTreeDefinition {
typedef page_num_t KeyType;
typedef vm_page NodeType;
@ -63,7 +72,10 @@ struct VMCachePagesTreeDefinition {
typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree;
struct VMCache {
struct VMCache : public DoublyLinkedListLinkImpl<VMCache> {
public:
typedef DoublyLinkedList<VMCache> ConsumerList;
public:
VMCache();
virtual ~VMCache();
@ -163,10 +175,12 @@ public:
virtual void Dump(bool showPages) const;
protected:
virtual void DeleteObject() = 0;
public:
VMArea* areas;
list_link consumer_link;
list consumers;
ConsumerList consumers;
// list of caches that use this cache as a source
VMCachePagesTree pages;
VMCache* source;

View File

@ -290,7 +290,7 @@ reserve_pages(file_cache_ref* ref, vm_page_reservation* reservation,
VMCache* cache = ref->cache;
cache->Lock();
if (list_is_empty(&cache->consumers) && cache->areas == NULL
if (cache->consumers.IsEmpty() && cache->areas == NULL
&& access_is_sequential(ref)) {
// we are not mapped, and we're accessed sequentially

View File

@ -1,5 +1,5 @@
/*
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*/
@ -10,6 +10,7 @@
#include <string.h>
#include <file_cache.h>
#include <slab/Slab.h>
#include <vfs.h>
#include <vm/vm.h>
@ -164,3 +165,10 @@ VMVnodeCache::Dump(bool showPages) const
kprintf(" vnode: %p <%" B_PRIdDEV ", %" B_PRIdINO ">\n", fVnode,
fDevice, fInode);
}
void
VMVnodeCache::DeleteObject()
{
object_cache_delete(gVnodeCacheObjectCache, this);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*/
@ -48,6 +48,9 @@ public:
void VnodeDeleted() { fVnodeDeleted = true; }
protected:
virtual void DeleteObject();
private:
struct vnode* fVnode;
file_cache_ref* fFileCacheRef;

View File

@ -1,6 +1,6 @@
/*
* Copyright 2008, Zhao Shuai, upczhsh@163.com.
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -846,6 +846,13 @@ VMAnonymousCache::Merge(VMCache* _source)
}
void
VMAnonymousCache::DeleteObject()
{
object_cache_delete(gAnonymousCacheObjectCache, this);
}
void
VMAnonymousCache::_SwapBlockBuild(off_t startPageIndex,
swap_addr_t startSlotIndex, uint32 count)

View File

@ -1,5 +1,5 @@
/*
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -64,6 +64,9 @@ public:
virtual void Merge(VMCache* source);
protected:
virtual void DeleteObject();
private:
class WriteCallback;
friend class WriteCallback;

View File

@ -1,5 +1,5 @@
/*
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -14,6 +14,7 @@
#include <arch_config.h>
#include <heap.h>
#include <KernelExport.h>
#include <slab/Slab.h>
#include <vm/vm_priv.h>
#include <vm/VMAddressSpace.h>
@ -183,3 +184,10 @@ VMAnonymousNoSwapCache::MergeStore(VMCache* _source)
committed_size = actualSize;
}
}
void
VMAnonymousNoSwapCache::DeleteObject()
{
object_cache_delete(gAnonymousNoSwapCacheObjectCache, this);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -37,6 +37,9 @@ public:
virtual void MergeStore(VMCache* source);
protected:
virtual void DeleteObject();
private:
bool fCanOvercommit;
bool fHasPrecommitted;

View File

@ -20,6 +20,7 @@
#include <heap.h>
#include <int.h>
#include <kernel.h>
#include <slab/Slab.h>
#include <smp.h>
#include <tracing.h>
#include <util/khash.h>
@ -32,6 +33,13 @@
#include <vm/VMAddressSpace.h>
#include <vm/VMArea.h>
// needed for the factory only
#include "VMAnonymousCache.h"
#include "VMAnonymousNoSwapCache.h"
#include "VMDeviceCache.h"
#include "VMNullCache.h"
#include "../cache/vnode_store.h"
//#define TRACE_VM_CACHE
#ifdef TRACE_VM_CACHE
@ -47,6 +55,13 @@ VMCache* gDebugCacheList;
static mutex sCacheListLock = MUTEX_INITIALIZER("global VMCache list");
// The lock is also needed when the debug feature is disabled.
ObjectCache* gCacheRefObjectCache;
ObjectCache* gAnonymousCacheObjectCache;
ObjectCache* gAnonymousNoSwapCacheObjectCache;
ObjectCache* gVnodeCacheObjectCache;
ObjectCache* gDeviceCacheObjectCache;
ObjectCache* gNullCacheObjectCache;
struct VMCache::PageEventWaiter {
Thread* thread;
@ -465,6 +480,30 @@ command_cache_stack(int argc, char** argv)
status_t
vm_cache_init(kernel_args* args)
{
// Create object caches for the structures we allocate here.
gCacheRefObjectCache = create_object_cache("cache refs", sizeof(VMCacheRef),
0, NULL, NULL, NULL);
gAnonymousCacheObjectCache = create_object_cache("anon caches",
sizeof(VMAnonymousCache), 0, NULL, NULL, NULL);
gAnonymousNoSwapCacheObjectCache = create_object_cache(
"anon no-swap caches", sizeof(VMAnonymousNoSwapCache), 0, NULL, NULL,
NULL);
gVnodeCacheObjectCache = create_object_cache("vnode caches",
sizeof(VMVnodeCache), 0, NULL, NULL, NULL);
gDeviceCacheObjectCache = create_object_cache("device caches",
sizeof(VMDeviceCache), 0, NULL, NULL, NULL);
gNullCacheObjectCache = create_object_cache("null caches",
sizeof(VMNullCache), 0, NULL, NULL, NULL);
if (gCacheRefObjectCache == NULL || gAnonymousCacheObjectCache == NULL
|| gAnonymousNoSwapCacheObjectCache == NULL
|| gVnodeCacheObjectCache == NULL
|| gDeviceCacheObjectCache == NULL
|| gNullCacheObjectCache == NULL) {
panic("vm_cache_init(): Failed to create object caches!");
return B_NO_MEMORY;
}
return B_OK;
}
@ -558,9 +597,8 @@ VMCacheRef::VMCacheRef(VMCache* cache)
bool
VMCache::_IsMergeable() const
{
return (areas == NULL && temporary
&& !list_is_empty(const_cast<list*>(&consumers))
&& consumers.link.next == consumers.link.prev);
return areas == NULL && temporary && !consumers.IsEmpty()
&& consumers.Head() == consumers.Tail();
}
@ -573,7 +611,7 @@ VMCache::VMCache()
VMCache::~VMCache()
{
delete fCacheRef;
object_cache_delete(gCacheRefObjectCache, fCacheRef);
}
@ -582,10 +620,6 @@ VMCache::Init(uint32 cacheType, uint32 allocationFlags)
{
mutex_init(&fLock, "VMCache");
VMCache dummyCache;
list_init_etc(&consumers, offset_of_member(dummyCache, consumer_link));
// TODO: This is disgusting! Use DoublyLinkedList!
areas = NULL;
fRefCount = 1;
source = NULL;
@ -604,7 +638,7 @@ VMCache::Init(uint32 cacheType, uint32 allocationFlags)
// initialize in case the following fails
#endif
fCacheRef = new(malloc_flags(allocationFlags)) VMCacheRef(this);
fCacheRef = new(gCacheRefObjectCache, allocationFlags) VMCacheRef(this);
if (fCacheRef == NULL)
return B_NO_MEMORY;
@ -628,7 +662,7 @@ VMCache::Delete()
{
if (areas != NULL)
panic("cache %p to be deleted still has areas", this);
if (!list_is_empty(&consumers))
if (!consumers.IsEmpty())
panic("cache %p to be deleted still has consumers", this);
T(Delete(this));
@ -672,7 +706,7 @@ VMCache::Delete()
mutex_unlock(&sCacheListLock);
delete this;
DeleteObject();
}
@ -680,7 +714,7 @@ void
VMCache::Unlock(bool consumerLocked)
{
while (fRefCount == 1 && _IsMergeable()) {
VMCache* consumer = (VMCache*)list_get_first_item(&consumers);
VMCache* consumer = consumers.Head();
if (consumerLocked) {
_MergeWithOnlyConsumer();
} else if (consumer->TryLock()) {
@ -698,7 +732,7 @@ VMCache::Unlock(bool consumerLocked)
if (consumerLockedTemp) {
if (fRefCount == 1 && _IsMergeable()
&& consumer == list_get_first_item(&consumers)) {
&& consumer == consumers.Head()) {
// nothing has changed in the meantime -- merge
_MergeWithOnlyConsumer();
}
@ -901,7 +935,7 @@ VMCache::AddConsumer(VMCache* consumer)
T(AddConsumer(this, consumer));
consumer->source = this;
list_add_item(&consumers, consumer);
consumers.Add(consumer);
AcquireRefLocked();
AcquireStoreRef();
@ -1312,9 +1346,8 @@ VMCache::Dump(bool showPages) const
}
kprintf(" consumers:\n");
VMCache* consumer = NULL;
while ((consumer = (VMCache*)list_get_next_item((list*)&consumers,
consumer)) != NULL) {
for (ConsumerList::ConstIterator it = consumers.GetIterator();
VMCache* consumer = it.Next();) {
kprintf("\t%p\n", consumer);
}
@ -1365,7 +1398,7 @@ VMCache::_NotifyPageEvents(vm_page* page, uint32 events)
void
VMCache::_MergeWithOnlyConsumer()
{
VMCache* consumer = (VMCache*)list_remove_head_item(&consumers);
VMCache* consumer = consumers.RemoveHead();
TRACE(("merge vm cache %p (ref == %ld) with vm cache %p\n",
this, this->fRefCount, consumer));
@ -1381,8 +1414,8 @@ VMCache::_MergeWithOnlyConsumer()
newSource->Lock();
list_remove_item(&newSource->consumers, this);
list_add_item(&newSource->consumers, consumer);
newSource->consumers.Remove(this);
newSource->consumers.Add(consumer);
consumer->source = newSource;
source = NULL;
@ -1416,7 +1449,7 @@ VMCache::_RemoveConsumer(VMCache* consumer)
// remove the consumer from the cache, but keep its reference until later
Lock();
list_remove_item(&consumers, consumer);
consumers.Remove(consumer);
consumer->source = NULL;
ReleaseRefAndUnlock();
@ -1427,15 +1460,6 @@ VMCache::_RemoveConsumer(VMCache* consumer)
// TODO: Move to own source file!
#include <heap.h>
#include "VMAnonymousCache.h"
#include "VMAnonymousNoSwapCache.h"
#include "VMDeviceCache.h"
#include "VMNullCache.h"
#include "../cache/vnode_store.h"
/*static*/ status_t
VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
int32 numPrecommittedPages, int32 numGuardPages, bool swappable,
@ -1449,7 +1473,7 @@ VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
#if ENABLE_SWAP_SUPPORT
if (swappable) {
VMAnonymousCache* cache
= new(malloc_flags(allocationFlags)) VMAnonymousCache;
= new(gAnonymousCacheObjectCache, allocationFlags) VMAnonymousCache;
if (cache == NULL)
return B_NO_MEMORY;
@ -1468,7 +1492,8 @@ VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
#endif
VMAnonymousNoSwapCache* cache
= new(malloc_flags(allocationFlags)) VMAnonymousNoSwapCache;
= new(gAnonymousNoSwapCacheObjectCache, allocationFlags)
VMAnonymousNoSwapCache;
if (cache == NULL)
return B_NO_MEMORY;
@ -1493,7 +1518,8 @@ VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
| HEAP_DONT_LOCK_KERNEL_SPACE;
// Note: Vnode cache creation is never VIP.
VMVnodeCache* cache = new(malloc_flags(allocationFlags)) VMVnodeCache;
VMVnodeCache* cache
= new(gVnodeCacheObjectCache, allocationFlags) VMVnodeCache;
if (cache == NULL)
return B_NO_MEMORY;
@ -1517,7 +1543,8 @@ VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
| HEAP_DONT_LOCK_KERNEL_SPACE;
// Note: Device cache creation is never VIP.
VMDeviceCache* cache = new(malloc_flags(allocationFlags)) VMDeviceCache;
VMDeviceCache* cache
= new(gDeviceCacheObjectCache, allocationFlags) VMDeviceCache;
if (cache == NULL)
return B_NO_MEMORY;
@ -1542,7 +1569,8 @@ VMCacheFactory::CreateNullCache(int priority, VMCache*& _cache)
if (priority >= VM_PRIORITY_VIP)
allocationFlags |= HEAP_PRIORITY_VIP;
VMNullCache* cache = new(malloc_flags(allocationFlags)) VMNullCache;
VMNullCache* cache
= new(gNullCacheObjectCache, allocationFlags) VMNullCache;
if (cache == NULL)
return B_NO_MEMORY;

View File

@ -9,6 +9,8 @@
#include "VMDeviceCache.h"
#include <slab/Slab.h>
status_t
VMDeviceCache::Init(addr_t baseAddress, uint32 allocationFlags)
@ -34,3 +36,10 @@ VMDeviceCache::Write(off_t offset, const iovec* vecs, size_t count,
// no place to write, this will cause the page daemon to skip this store
return B_OK;
}
void
VMDeviceCache::DeleteObject()
{
object_cache_delete(gDeviceCacheObjectCache, this);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2005-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -25,6 +25,9 @@ public:
size_t count, uint32 flags,
size_t* _numBytes);
protected:
virtual void DeleteObject();
private:
addr_t fBaseAddress;
};

View File

@ -1,14 +1,24 @@
/*
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#include "VMNullCache.h"
#include <slab/Slab.h>
status_t
VMNullCache::Init(uint32 allocationFlags)
{
return VMCache::Init(CACHE_TYPE_NULL, allocationFlags);
}
void
VMNullCache::DeleteObject()
{
object_cache_delete(gNullCacheObjectCache, this);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2005-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -16,6 +16,9 @@
class VMNullCache : public VMCache {
public:
status_t Init(uint32 allocationFlags);
protected:
virtual void DeleteObject();
};

View File

@ -618,7 +618,7 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
// If no one else uses the area's cache, we can resize it, too.
if (cache->areas == area && area->cache_next == NULL
&& list_is_empty(&cache->consumers)
&& cache->consumers.IsEmpty()
&& cache->type == CACHE_TYPE_RAM) {
// Since VMCache::Resize() can temporarily drop the lock, we must
// unlock all lower caches to prevent locking order inversion.
@ -2442,7 +2442,7 @@ vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
// Make sure the area (respectively, if we're going to call
// vm_copy_on_write_area(), all areas of the cache) doesn't have any
// wired ranges.
if (!isWritable && becomesWritable && !list_is_empty(&cache->consumers)) {
if (!isWritable && becomesWritable && !cache->consumers.IsEmpty()) {
for (VMArea* otherArea = cache->areas; otherArea != NULL;
otherArea = otherArea->cache_next) {
if (wait_if_area_is_wired(otherArea, &locker, &cacheLocker)) {
@ -2490,7 +2490,7 @@ vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
} else if (!isWritable && becomesWritable) {
// !writable -> writable
if (!list_is_empty(&cache->consumers)) {
if (!cache->consumers.IsEmpty()) {
// There are consumers -- we have to insert a new cache. Fortunately
// vm_copy_on_write_area() does everything that's needed.
changePageProtection = false;
@ -2885,9 +2885,8 @@ dump_cache_tree_recursively(VMCache* cache, int level,
kprintf("%p\n", cache);
// recursively print its consumers
VMCache* consumer = NULL;
while ((consumer = (VMCache*)list_get_next_item(&cache->consumers,
consumer)) != NULL) {
for (VMCache::ConsumerList::Iterator it = cache->consumers.GetIterator();
VMCache* consumer = it.Next();) {
dump_cache_tree_recursively(consumer, level + 1, highlightCache);
}
}
@ -2947,9 +2946,8 @@ update_cache_info_recursively(VMCache* cache, cache_info& info)
info.committed += cache->committed_size;
// recurse
VMCache* consumer = NULL;
while ((consumer = (VMCache*)list_get_next_item(&cache->consumers,
consumer)) != NULL) {
for (VMCache::ConsumerList::Iterator it = cache->consumers.GetIterator();
VMCache* consumer = it.Next();) {
update_cache_info_recursively(consumer, info);
}
}
@ -3012,9 +3010,8 @@ dump_caches_recursively(VMCache* cache, cache_info& info, int level)
kputs("\n");
// recurse
VMCache* consumer = NULL;
while ((consumer = (VMCache*)list_get_next_item(&cache->consumers,
consumer)) != NULL) {
for (VMCache::ConsumerList::Iterator it = cache->consumers.GetIterator();
VMCache* consumer = it.Next();) {
dump_caches_recursively(consumer, info, level + 1);
}
}
@ -3687,7 +3684,7 @@ vm_init(kernel_args* args)
// initialize the free page list and physical page mapper
vm_page_init(args);
// initialize the hash table that stores the pages mapped to caches
// initialize the cache allocators
vm_cache_init(args);
{