2002-07-09 16:24:59 +04:00
|
|
|
/*
|
2009-05-29 16:55:25 +04:00
|
|
|
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
2008-04-02 16:19:28 +04:00
|
|
|
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
|
2004-11-23 06:34:04 +03:00
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*
|
|
|
|
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
|
|
|
* Distributed under the terms of the NewOS License.
|
|
|
|
*/
|
2002-10-30 02:07:06 +03:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
#include <vm_cache.h>
|
2007-08-10 00:08:25 +04:00
|
|
|
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
|
|
|
#include <arch/cpu.h>
|
|
|
|
#include <condition_variable.h>
|
2002-07-09 16:24:59 +04:00
|
|
|
#include <debug.h>
|
2008-06-18 23:55:51 +04:00
|
|
|
#include <heap.h>
|
2007-08-10 00:08:25 +04:00
|
|
|
#include <int.h>
|
|
|
|
#include <kernel.h>
|
2002-07-09 16:24:59 +04:00
|
|
|
#include <smp.h>
|
2008-05-30 02:09:51 +04:00
|
|
|
#include <tracing.h>
|
2007-09-27 16:21:33 +04:00
|
|
|
#include <util/khash.h>
|
|
|
|
#include <util/AutoLock.h>
|
2008-08-19 03:28:34 +04:00
|
|
|
#include <vfs.h>
|
2007-08-10 00:08:25 +04:00
|
|
|
#include <vm.h>
|
|
|
|
#include <vm_page.h>
|
|
|
|
#include <vm_priv.h>
|
2007-09-27 16:21:33 +04:00
|
|
|
#include <vm_types.h>
|
2005-03-19 04:58:05 +03:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
//#define TRACE_VM_CACHE
|
|
|
|
#ifdef TRACE_VM_CACHE
|
|
|
|
# define TRACE(x) dprintf x
|
|
|
|
#else
|
|
|
|
# define TRACE(x) ;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
* Added "caches" debugger command (to be enable by defining
DEBUG_CACHE_LIST) that prints an unspectacular list of pointers to all
existing caches. Feel free to extend.
* Enhanced MultiAddressSpaceLocker:
- It supports choosing between read and write lock per address space,
now.
- Added AddAreaCacheAndLock(), which adds the address spaces of all
areas that are attached to a given area's cache, locks them, and
locks the cache. It makes sure that the area list didn't change in
the meantime and optionally also that all areas have their
no_cache_change flags cleared.
* Changed vm_copy_on_write_area() to take a cache instead of an area,
requiring it to be locked and all address spaces of affected areas to
be read-locked, plus all areas' no_cache_change flags to be cleared.
Callers simply use MultiAddressSpaceLocker:: AddAreaCacheAndLock() to
do that. This resolves an open TODO, that the areas' base, size, and
protection fields were accessed without their address spaces being
locked.
* vm_copy_area() does now always insert a cache for the target area. Not
doing that would cause source and target area being attached to
the same cache in case the target protection was read-only. This
would make them behave like cloned areas, which would lead to trouble
when one of the areas would be changed to writable later.
* Fixed the !writable -> writable case in vm_set_area_protection(). It
would simply change the protection of all mapped pages for this area,
including ones from lower caches, thus causing later writes to the
area to be seen by areas that shouldn't see them. This fixes a problem
with software breakpoints in gdb. They could cause other programs to
be dropped into the debugger.
* resize_area() uses MultiAddressSpaceLocker::AddAreaCacheAndLock() now,
too, and could be compacted quite a bit.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22152 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-09-03 02:55:23 +04:00
|
|
|
#if DEBUG_CACHE_LIST
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache* gDebugCacheList;
|
* Added "caches" debugger command (to be enable by defining
DEBUG_CACHE_LIST) that prints an unspectacular list of pointers to all
existing caches. Feel free to extend.
* Enhanced MultiAddressSpaceLocker:
- It supports choosing between read and write lock per address space,
now.
- Added AddAreaCacheAndLock(), which adds the address spaces of all
areas that are attached to a given area's cache, locks them, and
locks the cache. It makes sure that the area list didn't change in
the meantime and optionally also that all areas have their
no_cache_change flags cleared.
* Changed vm_copy_on_write_area() to take a cache instead of an area,
requiring it to be locked and all address spaces of affected areas to
be read-locked, plus all areas' no_cache_change flags to be cleared.
Callers simply use MultiAddressSpaceLocker:: AddAreaCacheAndLock() to
do that. This resolves an open TODO, that the areas' base, size, and
protection fields were accessed without their address spaces being
locked.
* vm_copy_area() does now always insert a cache for the target area. Not
doing that would cause source and target area being attached to
the same cache in case the target protection was read-only. This
would make them behave like cloned areas, which would lead to trouble
when one of the areas would be changed to writable later.
* Fixed the !writable -> writable case in vm_set_area_protection(). It
would simply change the protection of all mapped pages for this area,
including ones from lower caches, thus causing later writes to the
area to be seen by areas that shouldn't see them. This fixes a problem
with software breakpoints in gdb. They could cause other programs to
be dropped into the debugger.
* resize_area() uses MultiAddressSpaceLocker::AddAreaCacheAndLock() now,
too, and could be compacted quite a bit.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22152 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-09-03 02:55:23 +04:00
|
|
|
#endif
|
2008-06-29 03:42:41 +04:00
|
|
|
static mutex sCacheListLock = MUTEX_INITIALIZER("global vm_cache list");
|
|
|
|
// The lock is also needed when the debug feature is disabled.
|
* Added "caches" debugger command (to be enable by defining
DEBUG_CACHE_LIST) that prints an unspectacular list of pointers to all
existing caches. Feel free to extend.
* Enhanced MultiAddressSpaceLocker:
- It supports choosing between read and write lock per address space,
now.
- Added AddAreaCacheAndLock(), which adds the address spaces of all
areas that are attached to a given area's cache, locks them, and
locks the cache. It makes sure that the area list didn't change in
the meantime and optionally also that all areas have their
no_cache_change flags cleared.
* Changed vm_copy_on_write_area() to take a cache instead of an area,
requiring it to be locked and all address spaces of affected areas to
be read-locked, plus all areas' no_cache_change flags to be cleared.
Callers simply use MultiAddressSpaceLocker:: AddAreaCacheAndLock() to
do that. This resolves an open TODO, that the areas' base, size, and
protection fields were accessed without their address spaces being
locked.
* vm_copy_area() does now always insert a cache for the target area. Not
doing that would cause source and target area being attached to
the same cache in case the target protection was read-only. This
would make them behave like cloned areas, which would lead to trouble
when one of the areas would be changed to writable later.
* Fixed the !writable -> writable case in vm_set_area_protection(). It
would simply change the protection of all mapped pages for this area,
including ones from lower caches, thus causing later writes to the
area to be seen by areas that shouldn't see them. This fixes a problem
with software breakpoints in gdb. They could cause other programs to
be dropped into the debugger.
* resize_area() uses MultiAddressSpaceLocker::AddAreaCacheAndLock() now,
too, and could be compacted quite a bit.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22152 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-09-03 02:55:23 +04:00
|
|
|
|
|
|
|
|
2008-05-30 02:09:51 +04:00
|
|
|
#if VM_CACHE_TRACING
|
|
|
|
|
|
|
|
namespace VMCacheTracing {
|
|
|
|
|
|
|
|
class VMCacheTraceEntry : public AbstractTraceEntry {
|
|
|
|
public:
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCacheTraceEntry(VMCache* cache)
|
2008-05-30 02:09:51 +04:00
|
|
|
:
|
|
|
|
fCache(cache)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache* fCache;
|
2008-05-30 02:09:51 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class Create : public VMCacheTraceEntry {
|
|
|
|
public:
|
2008-07-23 19:55:51 +04:00
|
|
|
Create(VMCache* cache)
|
2008-05-30 02:09:51 +04:00
|
|
|
:
|
2008-07-23 19:55:51 +04:00
|
|
|
VMCacheTraceEntry(cache)
|
2008-05-30 02:09:51 +04:00
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
2008-07-23 19:55:51 +04:00
|
|
|
out.Print("vm cache create: -> cache: %p", fCache);
|
2008-05-30 02:09:51 +04:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class Delete : public VMCacheTraceEntry {
|
|
|
|
public:
|
2008-07-23 00:36:32 +04:00
|
|
|
Delete(VMCache* cache)
|
2008-05-30 02:09:51 +04:00
|
|
|
:
|
|
|
|
VMCacheTraceEntry(cache)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("vm cache delete: cache: %p", fCache);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class SetMinimalCommitment : public VMCacheTraceEntry {
|
|
|
|
public:
|
2008-07-23 00:36:32 +04:00
|
|
|
SetMinimalCommitment(VMCache* cache, off_t commitment)
|
2008-05-30 02:09:51 +04:00
|
|
|
:
|
|
|
|
VMCacheTraceEntry(cache),
|
2008-07-23 00:36:32 +04:00
|
|
|
fOldCommitment(cache->committed_size),
|
2008-05-30 02:09:51 +04:00
|
|
|
fCommitment(commitment)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("vm cache set min commitment: cache: %p, "
|
|
|
|
"commitment: %lld -> %lld", fCache, fOldCommitment,
|
|
|
|
fCommitment);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
off_t fOldCommitment;
|
|
|
|
off_t fCommitment;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class Resize : public VMCacheTraceEntry {
|
|
|
|
public:
|
2008-07-23 00:36:32 +04:00
|
|
|
Resize(VMCache* cache, off_t size)
|
2008-05-30 02:09:51 +04:00
|
|
|
:
|
|
|
|
VMCacheTraceEntry(cache),
|
2008-07-23 00:36:32 +04:00
|
|
|
fOldSize(cache->virtual_end),
|
2008-05-30 02:09:51 +04:00
|
|
|
fSize(size)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("vm cache resize: cache: %p, size: %lld -> %lld", fCache,
|
|
|
|
fOldSize, fSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
off_t fOldSize;
|
|
|
|
off_t fSize;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class AddConsumer : public VMCacheTraceEntry {
|
|
|
|
public:
|
2008-07-23 00:36:32 +04:00
|
|
|
AddConsumer(VMCache* cache, VMCache* consumer)
|
2008-05-30 02:09:51 +04:00
|
|
|
:
|
|
|
|
VMCacheTraceEntry(cache),
|
|
|
|
fConsumer(consumer)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache,
|
|
|
|
fConsumer);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache* fConsumer;
|
2008-05-30 02:09:51 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class RemoveConsumer : public VMCacheTraceEntry {
|
|
|
|
public:
|
2008-07-23 00:36:32 +04:00
|
|
|
RemoveConsumer(VMCache* cache, VMCache* consumer)
|
2008-05-30 02:09:51 +04:00
|
|
|
:
|
|
|
|
VMCacheTraceEntry(cache),
|
|
|
|
fConsumer(consumer)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("vm cache remove consumer: cache: %p, consumer: %p",
|
|
|
|
fCache, fConsumer);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache* fConsumer;
|
2008-05-30 02:09:51 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2008-05-30 04:27:02 +04:00
|
|
|
class Merge : public VMCacheTraceEntry {
|
|
|
|
public:
|
2008-07-23 00:36:32 +04:00
|
|
|
Merge(VMCache* cache, VMCache* consumer)
|
2008-05-30 04:27:02 +04:00
|
|
|
:
|
|
|
|
VMCacheTraceEntry(cache),
|
|
|
|
fConsumer(consumer)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("vm cache merge with consumer: cache: %p, consumer: %p",
|
|
|
|
fCache, fConsumer);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache* fConsumer;
|
2008-05-30 04:27:02 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2008-05-30 02:09:51 +04:00
|
|
|
class InsertArea : public VMCacheTraceEntry {
|
|
|
|
public:
|
2008-07-23 00:36:32 +04:00
|
|
|
InsertArea(VMCache* cache, vm_area* area)
|
2008-05-30 02:09:51 +04:00
|
|
|
:
|
|
|
|
VMCacheTraceEntry(cache),
|
|
|
|
fArea(area)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("vm cache insert area: cache: %p, area: %p", fCache,
|
|
|
|
fArea);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
vm_area* fArea;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class RemoveArea : public VMCacheTraceEntry {
|
|
|
|
public:
|
2008-07-23 00:36:32 +04:00
|
|
|
RemoveArea(VMCache* cache, vm_area* area)
|
2008-05-30 02:09:51 +04:00
|
|
|
:
|
|
|
|
VMCacheTraceEntry(cache),
|
|
|
|
fArea(area)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("vm cache remove area: cache: %p, area: %p", fCache,
|
|
|
|
fArea);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
vm_area* fArea;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace VMCacheTracing
|
|
|
|
|
|
|
|
# define T(x) new(std::nothrow) VMCacheTracing::x;
|
2008-06-22 01:08:07 +04:00
|
|
|
|
|
|
|
# if VM_CACHE_TRACING >= 2
|
|
|
|
|
|
|
|
namespace VMCacheTracing {
|
|
|
|
|
|
|
|
class InsertPage : public VMCacheTraceEntry {
|
|
|
|
public:
|
2008-07-23 00:36:32 +04:00
|
|
|
InsertPage(VMCache* cache, vm_page* page, off_t offset)
|
2008-06-22 01:08:07 +04:00
|
|
|
:
|
|
|
|
VMCacheTraceEntry(cache),
|
|
|
|
fPage(page),
|
|
|
|
fOffset(offset)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("vm cache insert page: cache: %p, page: %p, offset: %lld",
|
|
|
|
fCache, fPage, fOffset);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
vm_page* fPage;
|
|
|
|
off_t fOffset;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
class RemovePage : public VMCacheTraceEntry {
|
|
|
|
public:
|
2008-07-23 00:36:32 +04:00
|
|
|
RemovePage(VMCache* cache, vm_page* page)
|
2008-06-22 01:08:07 +04:00
|
|
|
:
|
|
|
|
VMCacheTraceEntry(cache),
|
|
|
|
fPage(page)
|
|
|
|
{
|
|
|
|
Initialized();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void AddDump(TraceOutput& out)
|
|
|
|
{
|
|
|
|
out.Print("vm cache remove page: cache: %p, page: %p", fCache,
|
|
|
|
fPage);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
vm_page* fPage;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace VMCacheTracing
|
|
|
|
|
|
|
|
# define T2(x) new(std::nothrow) VMCacheTracing::x;
|
|
|
|
# else
|
|
|
|
# define T2(x) ;
|
|
|
|
# endif
|
2008-05-30 02:09:51 +04:00
|
|
|
#else
|
|
|
|
# define T(x) ;
|
2008-06-22 01:08:07 +04:00
|
|
|
# define T2(x) ;
|
2008-05-30 02:09:51 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
// #pragma mark -
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
vm_cache_init(kernel_args* args)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
VMCache*
|
|
|
|
vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
|
|
|
|
{
|
|
|
|
mutex_lock(&sCacheListLock);
|
|
|
|
|
|
|
|
while (dontWait) {
|
|
|
|
vm_cache* cache = page->cache;
|
|
|
|
if (cache == NULL || !cache->TryLock()) {
|
|
|
|
mutex_unlock(&sCacheListLock);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cache == page->cache) {
|
|
|
|
cache->AcquireRefLocked();
|
|
|
|
mutex_unlock(&sCacheListLock);
|
|
|
|
return cache;
|
|
|
|
}
|
|
|
|
|
|
|
|
// the cache changed in the meantime
|
|
|
|
cache->Unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
vm_cache* cache = page->cache;
|
|
|
|
if (cache == NULL) {
|
|
|
|
mutex_unlock(&sCacheListLock);
|
|
|
|
return NULL;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2009-04-30 19:46:55 +04:00
|
|
|
// TODO: this is problematic, as it requires the caller not to have
|
|
|
|
// a lock on this cache (it might be called via
|
|
|
|
// vm_page_allocate_page(..., false)).
|
2008-07-23 00:36:32 +04:00
|
|
|
if (!cache->SwitchLock(&sCacheListLock)) {
|
|
|
|
// cache has been deleted
|
|
|
|
mutex_lock(&sCacheListLock);
|
|
|
|
continue;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
if (cache == page->cache) {
|
|
|
|
cache->AcquireRefLocked();
|
|
|
|
return cache;
|
|
|
|
}
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
// the cache changed in the meantime
|
|
|
|
cache->Unlock();
|
|
|
|
mutex_lock(&sCacheListLock);
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
// #pragma mark - VMCache
|
|
|
|
|
|
|
|
|
|
|
|
bool
|
|
|
|
VMCache::_IsMergeable() const
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
return (areas == NULL && temporary
|
|
|
|
&& !list_is_empty(const_cast<list*>(&consumers))
|
|
|
|
&& consumers.link.next == consumers.link.prev);
|
|
|
|
}
|
|
|
|
|
2005-12-21 15:38:31 +03:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache::VMCache()
|
|
|
|
{
|
|
|
|
}
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache::~VMCache()
|
|
|
|
{
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
status_t
|
|
|
|
VMCache::Init(uint32 cacheType)
|
2007-09-09 18:36:10 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
mutex_init(&fLock, "vm_cache");
|
2009-02-01 23:48:02 +03:00
|
|
|
VMCache dummyCache;
|
|
|
|
list_init_etc(&consumers, offset_of_member(dummyCache, consumer_link));
|
2008-07-23 00:36:32 +04:00
|
|
|
areas = NULL;
|
|
|
|
fRefCount = 1;
|
|
|
|
source = NULL;
|
|
|
|
virtual_base = 0;
|
|
|
|
virtual_end = 0;
|
|
|
|
committed_size = 0;
|
|
|
|
temporary = 0;
|
|
|
|
scan_skip = 0;
|
|
|
|
page_count = 0;
|
|
|
|
type = cacheType;
|
2007-09-09 18:36:10 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
#if DEBUG_CACHE_LIST
|
|
|
|
mutex_lock(&sCacheListLock);
|
|
|
|
|
|
|
|
if (gDebugCacheList)
|
|
|
|
gDebugCacheList->debug_previous = this;
|
|
|
|
debug_previous = NULL;
|
|
|
|
debug_next = gDebugCacheList;
|
|
|
|
gDebugCacheList = this;
|
|
|
|
|
|
|
|
mutex_unlock(&sCacheListLock);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return B_OK;
|
2007-09-09 18:36:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
void
|
|
|
|
VMCache::Delete()
|
2007-09-26 04:20:23 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
if (areas != NULL)
|
|
|
|
panic("cache %p to be deleted still has areas", this);
|
|
|
|
if (!list_is_empty(&consumers))
|
|
|
|
panic("cache %p to be deleted still has consumers", this);
|
2008-05-30 02:09:51 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
T(Delete(this));
|
2007-09-26 04:20:23 +04:00
|
|
|
|
|
|
|
// free all of the pages in the cache
|
2008-07-23 00:36:32 +04:00
|
|
|
while (vm_page* page = pages.Root()) {
|
2008-06-29 03:42:41 +04:00
|
|
|
if (!page->mappings.IsEmpty() || page->wired_count != 0) {
|
2007-09-26 04:20:23 +04:00
|
|
|
panic("remove page %p from cache %p: page still has mappings!\n",
|
2008-07-23 00:36:32 +04:00
|
|
|
page, this);
|
2007-09-26 04:20:23 +04:00
|
|
|
}
|
|
|
|
|
2008-06-29 03:42:41 +04:00
|
|
|
// remove it
|
2008-07-23 00:36:32 +04:00
|
|
|
pages.Remove(page);
|
2008-06-29 03:42:41 +04:00
|
|
|
page->cache = NULL;
|
2007-09-26 04:20:23 +04:00
|
|
|
// TODO: we also need to remove all of the page's mappings!
|
|
|
|
|
|
|
|
TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
|
|
|
|
oldPage->physical_page_number));
|
2008-07-23 00:36:32 +04:00
|
|
|
vm_page_free(this, page);
|
2007-09-26 04:20:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// remove the ref to the source
|
2008-07-23 00:36:32 +04:00
|
|
|
if (source)
|
|
|
|
source->_RemoveConsumer(this);
|
2007-09-26 04:20:23 +04:00
|
|
|
|
2008-06-29 03:42:41 +04:00
|
|
|
// We lock and unlock the sCacheListLock, even if the DEBUG_CACHE_LIST is
|
|
|
|
// not enabled. This synchronization point is needed for
|
2008-07-23 00:36:32 +04:00
|
|
|
// vm_cache_acquire_locked_page_cache().
|
2008-06-29 03:42:41 +04:00
|
|
|
mutex_lock(&sCacheListLock);
|
|
|
|
|
|
|
|
#if DEBUG_CACHE_LIST
|
2008-07-23 00:36:32 +04:00
|
|
|
if (debug_previous)
|
|
|
|
debug_previous->debug_next = debug_next;
|
|
|
|
if (debug_next)
|
|
|
|
debug_next->debug_previous = debug_previous;
|
|
|
|
if (this == gDebugCacheList)
|
|
|
|
gDebugCacheList = debug_next;
|
2008-06-29 03:42:41 +04:00
|
|
|
#endif
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
mutex_destroy(&fLock);
|
2007-09-26 04:20:23 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
mutex_unlock(&sCacheListLock);
|
2007-09-26 04:20:23 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
delete this;
|
2008-05-30 04:27:02 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
void
|
|
|
|
VMCache::Unlock()
|
2008-05-30 04:27:02 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
while (fRefCount == 1 && _IsMergeable()) {
|
|
|
|
VMCache* consumer = (VMCache*)list_get_first_item(&consumers);
|
|
|
|
if (consumer->TryLock()) {
|
|
|
|
_MergeWithOnlyConsumer();
|
2008-05-30 04:27:02 +04:00
|
|
|
} else {
|
2008-07-23 00:36:32 +04:00
|
|
|
// Someone else has locked the consumer ATM. Unlock this cache and
|
|
|
|
// wait for the consumer lock. Increment the cache's ref count
|
|
|
|
// temporarily, so that no one else will try what we are doing or
|
|
|
|
// delete the cache.
|
|
|
|
fRefCount++;
|
|
|
|
bool consumerLocked = consumer->SwitchLock(&fLock);
|
|
|
|
Lock();
|
|
|
|
fRefCount--;
|
|
|
|
|
|
|
|
if (consumerLocked) {
|
|
|
|
if (fRefCount == 1 && _IsMergeable()
|
|
|
|
&& consumer == list_get_first_item(&consumers)) {
|
|
|
|
_MergeWithOnlyConsumer();
|
|
|
|
} else {
|
|
|
|
// something changed, get rid of the consumer lock
|
|
|
|
consumer->Unlock();
|
|
|
|
}
|
|
|
|
}
|
2008-05-30 04:27:02 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
if (fRefCount == 0) {
|
|
|
|
// delete this cache
|
|
|
|
Delete();
|
2008-05-30 05:02:29 +04:00
|
|
|
} else
|
2008-07-23 00:36:32 +04:00
|
|
|
mutex_unlock(&fLock);
|
2008-05-30 04:27:02 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
void
|
|
|
|
VMCache::AcquireRefLocked()
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
// TODO: Inline!
|
|
|
|
ASSERT_LOCKED_MUTEX(&fLock);
|
2005-12-21 20:05:50 +03:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
fRefCount++;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
void
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache::AcquireRef()
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
Lock();
|
|
|
|
fRefCount++;
|
|
|
|
Unlock();
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
void
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache::ReleaseRefLocked()
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
// TODO: Inline!
|
|
|
|
ASSERT_LOCKED_MUTEX(&fLock);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
fRefCount--;
|
2007-02-01 15:12:54 +03:00
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
void
|
|
|
|
VMCache::ReleaseRef()
|
2007-09-09 18:36:10 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
Lock();
|
|
|
|
fRefCount--;
|
|
|
|
Unlock();
|
2007-09-09 18:36:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-04-02 16:19:28 +04:00
|
|
|
vm_page*
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache::LookupPage(off_t offset)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
AssertLocked();
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
#if KDEBUG
|
|
|
|
if (page != NULL && page->cache != this)
|
|
|
|
panic("page %p not in cache %p\n", page, this);
|
|
|
|
#endif
|
2007-03-30 13:01:58 +04:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
void
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache::InsertPage(vm_page* page, off_t offset)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %Ld\n",
|
|
|
|
this, page, offset));
|
|
|
|
AssertLocked();
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-03-30 13:01:58 +04:00
|
|
|
if (page->cache != NULL) {
|
|
|
|
panic("insert page %p into cache %p: page cache is set to %p\n",
|
2008-07-23 00:36:32 +04:00
|
|
|
page, this, page->cache);
|
2007-03-30 13:01:58 +04:00
|
|
|
}
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
T2(InsertPage(this, page, offset));
|
2008-06-22 01:08:07 +04:00
|
|
|
|
2008-06-29 03:42:41 +04:00
|
|
|
page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
|
2008-07-23 00:36:32 +04:00
|
|
|
page_count++;
|
2007-09-03 19:41:14 +04:00
|
|
|
page->usage_count = 2;
|
2008-07-23 00:36:32 +04:00
|
|
|
page->cache = this;
|
2007-09-09 18:36:10 +04:00
|
|
|
|
2007-08-04 04:36:50 +04:00
|
|
|
#if KDEBUG
|
2008-07-23 00:36:32 +04:00
|
|
|
vm_page* otherPage = pages.Lookup(page->cache_offset);
|
2007-08-04 04:36:50 +04:00
|
|
|
if (otherPage != NULL) {
|
2008-07-23 00:36:32 +04:00
|
|
|
panic("VMCache::InsertPage(): there's already page %p with cache "
|
2007-08-04 04:36:50 +04:00
|
|
|
"offset %lu in cache %p; inserting page %p", otherPage,
|
2008-07-23 00:36:32 +04:00
|
|
|
page->cache_offset, this, page);
|
2007-08-04 04:36:50 +04:00
|
|
|
}
|
|
|
|
#endif // KDEBUG
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
pages.Insert(page);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2008-04-02 16:19:28 +04:00
|
|
|
/*! Removes the vm_page from this cache. Of course, the page must
|
2006-10-10 21:25:38 +04:00
|
|
|
really be in this cache or evil things will happen.
|
2007-07-18 04:16:27 +04:00
|
|
|
The cache lock must be held.
|
2006-10-10 21:25:38 +04:00
|
|
|
*/
|
2004-09-11 03:43:15 +04:00
|
|
|
void
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache::RemovePage(vm_page* page)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
|
|
|
|
AssertLocked();
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
if (page->cache != this) {
|
axeld + bonefish:
* More conditional debug code (wrt page transitions between caches).
* Replaced debugger command cache_chain by a nicer cache_tree.
* While handling a soft fault: When we temporarily unlock a cache, it
can theoretically become busy. One such occurrence is now handled
properly, two more panic() ATM, though should be fixed.
* When merging caches, we do now always replace a dummy page in the
upper cache, not only when the concurrent page fault is a read fault.
This prevents a page from the lower (to be discarded) cache from still
remaining mapped (causing a panic).
* When merging caches and replacing a dummy page, we were trying to
remove the dummy page from the wrong cache (causing a panic).
The Haiku kernel seems now to run shockingly stable. ATM, we have more
than two hours uptime of a system booted and running over network. We
didn't manage to get it down by fully building Pe, downloading, unzipping,
and playing with various stuff. Someone should finally fix all those app
server drawing bugs, though (hint, hint! ;-)).
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@21672 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-07-20 02:52:23 +04:00
|
|
|
panic("remove page %p from cache %p: page cache is set to %p\n", page,
|
2008-07-23 00:36:32 +04:00
|
|
|
this, page->cache);
|
axeld + bonefish:
* More conditional debug code (wrt page transitions between caches).
* Replaced debugger command cache_chain by a nicer cache_tree.
* While handling a soft fault: When we temporarily unlock a cache, it
can theoretically become busy. One such occurrence is now handled
properly, two more panic() ATM, though should be fixed.
* When merging caches, we do now always replace a dummy page in the
upper cache, not only when the concurrent page fault is a read fault.
This prevents a page from the lower (to be discarded) cache from still
remaining mapped (causing a panic).
* When merging caches and replacing a dummy page, we were trying to
remove the dummy page from the wrong cache (causing a panic).
The Haiku kernel seems now to run shockingly stable. ATM, we have more
than two hours uptime of a system booted and running over network. We
didn't manage to get it down by fully building Pe, downloading, unzipping,
and playing with various stuff. Someone should finally fix all those app
server drawing bugs, though (hint, hint! ;-)).
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@21672 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-07-20 02:52:23 +04:00
|
|
|
}
|
2007-03-30 13:01:58 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
T2(RemovePage(this, page));
|
2008-06-22 01:08:07 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
pages.Remove(page);
|
2007-09-09 18:36:10 +04:00
|
|
|
page->cache = NULL;
|
2008-07-23 00:36:32 +04:00
|
|
|
page_count--;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Makes this case the source of the \a consumer cache,
|
|
|
|
and adds the \a consumer to its list.
|
|
|
|
This also grabs a reference to the source cache.
|
|
|
|
Assumes you have the cache and the consumer's lock held.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
VMCache::AddConsumer(VMCache* consumer)
|
|
|
|
{
|
|
|
|
TRACE(("add consumer vm cache %p to cache %p\n", consumer, cache));
|
|
|
|
AssertLocked();
|
|
|
|
consumer->AssertLocked();
|
|
|
|
|
|
|
|
T(AddConsumer(this, consumer));
|
|
|
|
|
|
|
|
consumer->source = this;
|
|
|
|
list_add_item(&consumers, consumer);
|
|
|
|
|
|
|
|
AcquireRefLocked();
|
|
|
|
AcquireStoreRef();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*! Adds the \a area to this cache.
|
|
|
|
Assumes you have the locked the cache.
|
|
|
|
*/
|
|
|
|
status_t
|
|
|
|
VMCache::InsertAreaLocked(vm_area* area)
|
|
|
|
{
|
|
|
|
TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
|
|
|
|
AssertLocked();
|
|
|
|
|
|
|
|
T(InsertArea(this, area));
|
|
|
|
|
|
|
|
area->cache_next = areas;
|
|
|
|
if (area->cache_next)
|
|
|
|
area->cache_next->cache_prev = area;
|
|
|
|
area->cache_prev = NULL;
|
|
|
|
areas = area;
|
|
|
|
|
|
|
|
AcquireStoreRef();
|
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
VMCache::RemoveArea(vm_area* area)
|
|
|
|
{
|
|
|
|
TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area));
|
|
|
|
|
|
|
|
T(RemoveArea(this, area));
|
|
|
|
|
2008-08-07 03:02:59 +04:00
|
|
|
// We release the store reference first, since otherwise we would reverse
|
|
|
|
// the locking order or even deadlock ourselves (... -> free_vnode() -> ...
|
|
|
|
// -> bfs_remove_vnode() -> ... -> file_cache_set_size() -> mutex_lock()).
|
|
|
|
// Also cf. _RemoveConsumer().
|
|
|
|
ReleaseStoreRef();
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
AutoLocker<VMCache> locker(this);
|
|
|
|
|
|
|
|
if (area->cache_prev)
|
|
|
|
area->cache_prev->cache_next = area->cache_next;
|
|
|
|
if (area->cache_next)
|
|
|
|
area->cache_next->cache_prev = area->cache_prev;
|
|
|
|
if (areas == area)
|
|
|
|
areas = area->cache_next;
|
|
|
|
|
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2004-11-23 06:34:04 +03:00
|
|
|
status_t
|
2008-07-23 19:47:47 +04:00
|
|
|
VMCache::WriteModified()
|
2004-11-23 06:34:04 +03:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
TRACE(("VMCache::WriteModified(cache = %p)\n", this));
|
2004-11-23 06:34:04 +03:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
if (temporary)
|
2007-08-16 03:11:15 +04:00
|
|
|
return B_OK;
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
Lock();
|
2008-07-23 19:47:47 +04:00
|
|
|
status_t status = vm_page_write_modified_pages(this);
|
2008-07-23 00:36:32 +04:00
|
|
|
Unlock();
|
2004-11-23 06:34:04 +03:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-04-02 16:19:28 +04:00
|
|
|
/*! Commits the memory to the store if the \a commitment is larger than
|
2007-01-14 21:41:57 +03:00
|
|
|
what's committed already.
|
2008-07-23 00:36:32 +04:00
|
|
|
Assumes you have the cache's lock held.
|
2007-01-14 21:41:57 +03:00
|
|
|
*/
|
2004-11-03 20:24:41 +03:00
|
|
|
status_t
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache::SetMinimalCommitment(off_t commitment)
|
2004-11-03 20:24:41 +03:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %Ld)\n",
|
|
|
|
this, commitment));
|
|
|
|
AssertLocked();
|
2004-11-03 20:24:41 +03:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
T(SetMinimalCommitment(this, commitment));
|
2008-05-30 02:09:51 +04:00
|
|
|
|
2007-09-29 19:48:11 +04:00
|
|
|
status_t status = B_OK;
|
|
|
|
|
2008-04-02 16:19:28 +04:00
|
|
|
// If we don't have enough committed space to cover through to the new end
|
|
|
|
// of the area...
|
2008-07-23 00:36:32 +04:00
|
|
|
if (committed_size < commitment) {
|
2004-11-03 20:24:41 +03:00
|
|
|
// ToDo: should we check if the cache's virtual size is large
|
|
|
|
// enough for a commitment of that size?
|
|
|
|
|
|
|
|
// try to commit more memory
|
2008-07-23 00:36:32 +04:00
|
|
|
status = Commit(commitment);
|
2004-11-03 20:24:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
/*! This function updates the size field of the cache.
|
2006-10-10 21:25:38 +04:00
|
|
|
If needed, it will free up all pages that don't belong to the cache anymore.
|
2007-07-18 04:16:27 +04:00
|
|
|
The cache lock must be held when you call it.
|
2006-10-10 21:25:38 +04:00
|
|
|
Since removed pages don't belong to the cache any longer, they are not
|
|
|
|
written back before they will be removed.
|
2007-09-29 01:20:57 +04:00
|
|
|
|
2009-07-26 16:13:20 +04:00
|
|
|
Note, this function may temporarily release the cache lock in case it
|
2007-09-29 01:20:57 +04:00
|
|
|
has to wait for busy pages.
|
2006-10-10 21:25:38 +04:00
|
|
|
*/
|
2004-10-08 19:10:50 +04:00
|
|
|
status_t
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache::Resize(off_t newSize)
|
2004-10-08 19:10:50 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
TRACE(("VMCache::Resize(cache %p, newSize %Ld) old size %Ld\n",
|
|
|
|
this, newSize, this->virtual_end));
|
|
|
|
this->AssertLocked();
|
2004-11-03 20:24:41 +03:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
T(Resize(this, newSize));
|
2008-05-30 02:09:51 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
status_t status = Commit(newSize - virtual_base);
|
2004-11-03 20:24:41 +03:00
|
|
|
if (status != B_OK)
|
|
|
|
return status;
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
uint32 oldPageCount = (uint32)((virtual_end + B_PAGE_SIZE - 1)
|
2007-09-29 19:48:11 +04:00
|
|
|
>> PAGE_SHIFT);
|
|
|
|
uint32 newPageCount = (uint32)((newSize + B_PAGE_SIZE - 1) >> PAGE_SHIFT);
|
2006-03-07 01:28:40 +03:00
|
|
|
|
|
|
|
if (newPageCount < oldPageCount) {
|
2007-09-29 01:20:57 +04:00
|
|
|
// we need to remove all pages in the cache outside of the new virtual
|
|
|
|
// size
|
2008-06-29 03:42:41 +04:00
|
|
|
for (VMCachePagesTree::Iterator it
|
2008-07-23 00:36:32 +04:00
|
|
|
= pages.GetIterator(newPageCount, true, true);
|
2008-06-29 03:42:41 +04:00
|
|
|
vm_page* page = it.Next();) {
|
|
|
|
if (page->state == PAGE_STATE_BUSY) {
|
|
|
|
if (page->busy_writing) {
|
|
|
|
// We cannot wait for the page to become available
|
|
|
|
// as we might cause a deadlock this way
|
|
|
|
page->busy_writing = false;
|
|
|
|
// this will notify the writer to free the page
|
|
|
|
} else {
|
|
|
|
// wait for page to become unbusy
|
|
|
|
ConditionVariableEntry entry;
|
|
|
|
entry.Add(page);
|
2008-07-23 00:36:32 +04:00
|
|
|
Unlock();
|
2008-06-29 03:42:41 +04:00
|
|
|
entry.Wait();
|
2008-07-23 00:36:32 +04:00
|
|
|
Lock();
|
2008-06-29 03:42:41 +04:00
|
|
|
|
|
|
|
// restart from the start of the list
|
2008-07-23 00:36:32 +04:00
|
|
|
it = pages.GetIterator(newPageCount, true, true);
|
2007-09-29 01:20:57 +04:00
|
|
|
}
|
2008-06-29 03:42:41 +04:00
|
|
|
continue;
|
2004-10-08 19:10:50 +04:00
|
|
|
}
|
2007-09-29 01:20:57 +04:00
|
|
|
|
2008-06-29 03:42:41 +04:00
|
|
|
// remove the page and put it into the free queue
|
2009-05-03 16:32:20 +04:00
|
|
|
vm_remove_all_page_mappings(page, NULL);
|
|
|
|
ASSERT(page->wired_count == 0);
|
|
|
|
// TODO: Find a real solution! Unmapping is probably fine, but
|
|
|
|
// we have no way of unmapping wired pages here.
|
2008-07-23 00:36:32 +04:00
|
|
|
RemovePage(page);
|
|
|
|
vm_page_free(this, page);
|
2008-06-29 03:42:41 +04:00
|
|
|
// Note: When iterating through a IteratableSplayTree
|
|
|
|
// removing the current node is safe.
|
2004-10-08 19:10:50 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
virtual_end = newSize;
|
2004-10-08 19:10:50 +04:00
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-08-07 21:36:01 +04:00
|
|
|
/*! You have to call this function with the VMCache lock held. */
|
2008-08-04 07:15:50 +04:00
|
|
|
status_t
|
|
|
|
VMCache::FlushAndRemoveAllPages()
|
|
|
|
{
|
2008-08-07 21:36:01 +04:00
|
|
|
ASSERT_LOCKED_MUTEX(&fLock);
|
|
|
|
|
2008-08-04 07:15:50 +04:00
|
|
|
while (page_count > 0) {
|
|
|
|
// write back modified pages
|
2008-08-07 21:36:01 +04:00
|
|
|
status_t status = vm_page_write_modified_pages(this);
|
|
|
|
if (status != B_OK)
|
|
|
|
return status;
|
2008-08-04 07:15:50 +04:00
|
|
|
|
|
|
|
// remove pages
|
|
|
|
for (VMCachePagesTree::Iterator it = pages.GetIterator();
|
|
|
|
vm_page* page = it.Next();) {
|
|
|
|
if (page->state == PAGE_STATE_BUSY) {
|
|
|
|
// wait for page to become unbusy
|
|
|
|
ConditionVariableEntry entry;
|
|
|
|
entry.Add(page);
|
|
|
|
Unlock();
|
|
|
|
entry.Wait();
|
|
|
|
Lock();
|
|
|
|
|
|
|
|
// restart from the start of the list
|
|
|
|
it = pages.GetIterator();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// skip modified pages -- they will be written back in the next
|
|
|
|
// iteration
|
|
|
|
if (page->state == PAGE_STATE_MODIFIED)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// We can't remove mapped pages.
|
|
|
|
if (page->wired_count > 0 || !page->mappings.IsEmpty())
|
|
|
|
return B_BUSY;
|
|
|
|
|
|
|
|
RemovePage(page);
|
|
|
|
vm_page_free(this, page);
|
|
|
|
// Note: When iterating through a IteratableSplayTree
|
|
|
|
// removing the current node is safe.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
status_t
|
|
|
|
VMCache::Commit(off_t size)
|
|
|
|
{
|
|
|
|
committed_size = size;
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool
|
|
|
|
VMCache::HasPage(off_t offset)
|
|
|
|
{
|
|
|
|
return offset >= virtual_base && offset <= virtual_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2008-10-11 11:30:44 +04:00
|
|
|
VMCache::Read(off_t offset, const iovec *vecs, size_t count, uint32 flags,
|
|
|
|
size_t *_numBytes)
|
2008-07-23 00:36:32 +04:00
|
|
|
{
|
|
|
|
return B_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2008-08-19 03:28:34 +04:00
|
|
|
VMCache::Write(off_t offset, const iovec *vecs, size_t count, uint32 flags,
|
|
|
|
size_t *_numBytes)
|
2008-07-23 00:36:32 +04:00
|
|
|
{
|
|
|
|
return B_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-08-19 03:28:34 +04:00
|
|
|
status_t
|
|
|
|
VMCache::WriteAsync(off_t offset, const iovec* vecs, size_t count,
|
|
|
|
size_t numBytes, uint32 flags, AsyncIOCallback* callback)
|
|
|
|
{
|
|
|
|
// Not supported, fall back to the synchronous hook.
|
|
|
|
size_t transferred = numBytes;
|
|
|
|
status_t error = Write(offset, vecs, count, flags, &transferred);
|
|
|
|
|
|
|
|
if (callback != NULL)
|
|
|
|
callback->IOFinished(error, transferred != numBytes, transferred);
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-04-20 19:19:41 +04:00
|
|
|
/*! \brief Returns whether the cache can write the page at the given offset.
|
|
|
|
|
|
|
|
The cache must be locked when this function is invoked.
|
|
|
|
|
|
|
|
@param offset The page offset.
|
|
|
|
@return \c true, if the page can be written, \c false otherwise.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
VMCache::CanWritePage(off_t offset)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
status_t
|
|
|
|
VMCache::Fault(struct vm_address_space *aspace, off_t offset)
|
|
|
|
{
|
|
|
|
return B_BAD_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-10-10 21:16:06 +04:00
|
|
|
void
|
2008-08-23 23:01:01 +04:00
|
|
|
VMCache::Merge(VMCache* source)
|
2006-10-10 21:16:06 +04:00
|
|
|
{
|
2008-08-23 23:01:01 +04:00
|
|
|
for (VMCachePagesTree::Iterator it = source->pages.GetIterator();
|
|
|
|
vm_page* page = it.Next();) {
|
|
|
|
// Note: Removing the current node while iterating through a
|
|
|
|
// IteratableSplayTree is safe.
|
|
|
|
vm_page* consumerPage = LookupPage(
|
|
|
|
(off_t)page->cache_offset << PAGE_SHIFT);
|
|
|
|
if (consumerPage == NULL) {
|
|
|
|
// the page is not yet in the consumer cache - move it upwards
|
|
|
|
source->RemovePage(page);
|
|
|
|
InsertPage(page, (off_t)page->cache_offset << PAGE_SHIFT);
|
2008-10-20 18:24:46 +04:00
|
|
|
#if DEBUG_PAGE_CACHE_TRANSITIONS
|
2008-08-23 23:01:01 +04:00
|
|
|
} else {
|
|
|
|
page->debug_flags = 0;
|
|
|
|
if (consumerPage->state == PAGE_STATE_BUSY)
|
|
|
|
page->debug_flags |= 0x1;
|
|
|
|
if (consumerPage->type == PAGE_TYPE_DUMMY)
|
|
|
|
page->debug_flags |= 0x2;
|
|
|
|
page->collided_page = consumerPage;
|
|
|
|
consumerPage->collided_page = page;
|
|
|
|
#endif // DEBUG_PAGE_CACHE_TRANSITIONS
|
|
|
|
}
|
|
|
|
}
|
2008-07-23 00:36:32 +04:00
|
|
|
}
|
2006-10-10 21:16:06 +04:00
|
|
|
|
2008-05-30 02:09:51 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
status_t
|
|
|
|
VMCache::AcquireUnreferencedStoreRef()
|
|
|
|
{
|
|
|
|
return B_OK;
|
|
|
|
}
|
2008-03-24 08:15:42 +03:00
|
|
|
|
2006-10-11 02:47:00 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
void
|
|
|
|
VMCache::AcquireStoreRef()
|
|
|
|
{
|
|
|
|
}
|
2006-10-10 21:16:06 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
|
|
|
|
void
|
|
|
|
VMCache::ReleaseStoreRef()
|
|
|
|
{
|
2006-10-10 21:16:06 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
/*! Merges the given cache with its only consumer.
|
|
|
|
The caller must hold both the cache's and the consumer's lock. The method
|
|
|
|
will unlock the consumer lock.
|
2006-10-10 21:16:06 +04:00
|
|
|
*/
|
|
|
|
void
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache::_MergeWithOnlyConsumer()
|
2006-10-10 21:16:06 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
VMCache* consumer = (VMCache*)list_remove_head_item(&consumers);
|
|
|
|
|
|
|
|
TRACE(("merge vm cache %p (ref == %ld) with vm cache %p\n",
|
|
|
|
this, this->fRefCount, consumer));
|
|
|
|
|
|
|
|
T(Merge(this, consumer));
|
|
|
|
|
2008-08-23 23:01:01 +04:00
|
|
|
// merge the cache
|
|
|
|
consumer->Merge(this);
|
2008-05-30 02:09:51 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
// The remaining consumer has got a new source.
|
|
|
|
if (source != NULL) {
|
|
|
|
VMCache* newSource = source;
|
2006-10-10 21:16:06 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
newSource->Lock();
|
2007-09-26 04:20:23 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
list_remove_item(&newSource->consumers, this);
|
|
|
|
list_add_item(&newSource->consumers, consumer);
|
|
|
|
consumer->source = newSource;
|
|
|
|
source = NULL;
|
|
|
|
|
|
|
|
newSource->Unlock();
|
|
|
|
} else
|
|
|
|
consumer->source = NULL;
|
|
|
|
|
|
|
|
// Release the reference the cache's consumer owned. The consumer takes
|
|
|
|
// over the cache's ref to its source (if any) instead.
|
|
|
|
ReleaseRefLocked();
|
|
|
|
|
|
|
|
consumer->Unlock();
|
2006-10-10 21:16:06 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
/*! Removes the \a consumer from this cache.
|
|
|
|
It will also release the reference to the cache owned by the consumer.
|
|
|
|
Assumes you have the consumer's cache lock held. This cache must not be
|
|
|
|
locked.
|
2007-01-14 21:41:57 +03:00
|
|
|
*/
|
2008-07-23 00:36:32 +04:00
|
|
|
void
|
|
|
|
VMCache::_RemoveConsumer(VMCache* consumer)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this));
|
|
|
|
consumer->AssertLocked();
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
T(RemoveConsumer(this, consumer));
|
2008-05-30 02:09:51 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
// Remove the store ref before locking the cache. Otherwise we'd call into
|
|
|
|
// the VFS while holding the cache lock, which would reverse the usual
|
|
|
|
// locking order.
|
|
|
|
ReleaseStoreRef();
|
|
|
|
|
|
|
|
// remove the consumer from the cache, but keep its reference until later
|
|
|
|
Lock();
|
|
|
|
list_remove_item(&consumers, consumer);
|
|
|
|
consumer->source = NULL;
|
|
|
|
|
|
|
|
ReleaseRefAndUnlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// #pragma mark - VMCacheFactory
|
|
|
|
// TODO: Move to own source file!
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-09-26 04:20:23 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
#include <heap.h>
|
|
|
|
|
2008-07-25 03:33:38 +04:00
|
|
|
#include "VMAnonymousCache.h"
|
2008-07-23 00:36:32 +04:00
|
|
|
#include "VMAnonymousNoSwapCache.h"
|
|
|
|
#include "VMDeviceCache.h"
|
|
|
|
#include "VMNullCache.h"
|
|
|
|
#include "../cache/vnode_store.h"
|
|
|
|
|
|
|
|
|
|
|
|
/*static*/ status_t
|
|
|
|
VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
|
2008-07-25 03:33:38 +04:00
|
|
|
int32 numPrecommittedPages, int32 numGuardPages, bool swappable)
|
2008-07-23 00:36:32 +04:00
|
|
|
{
|
2008-07-25 03:33:38 +04:00
|
|
|
#if ENABLE_SWAP_SUPPORT
|
|
|
|
if (swappable) {
|
|
|
|
VMAnonymousCache* cache = new(nogrow) VMAnonymousCache;
|
|
|
|
if (cache == NULL)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
|
|
|
status_t error = cache->Init(canOvercommit, numPrecommittedPages,
|
|
|
|
numGuardPages);
|
|
|
|
if (error != B_OK) {
|
|
|
|
cache->Delete();
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
T(Create(cache));
|
|
|
|
|
|
|
|
_cache = cache;
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
VMAnonymousNoSwapCache* cache = new(nogrow) VMAnonymousNoSwapCache;
|
|
|
|
if (cache == NULL)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
|
|
|
status_t error = cache->Init(canOvercommit, numPrecommittedPages,
|
|
|
|
numGuardPages);
|
|
|
|
if (error != B_OK) {
|
|
|
|
cache->Delete();
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2008-07-23 19:55:51 +04:00
|
|
|
T(Create(cache));
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
_cache = cache;
|
2004-10-08 19:10:50 +04:00
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
/*static*/ status_t
|
|
|
|
VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2008-07-23 00:36:32 +04:00
|
|
|
VMVnodeCache* cache = new(nogrow) VMVnodeCache;
|
|
|
|
if (cache == NULL)
|
|
|
|
return B_NO_MEMORY;
|
2007-09-29 19:48:11 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
status_t error = cache->Init(vnode);
|
|
|
|
if (error != B_OK) {
|
|
|
|
cache->Delete();
|
|
|
|
return error;
|
|
|
|
}
|
2008-05-30 02:09:51 +04:00
|
|
|
|
2008-07-23 19:55:51 +04:00
|
|
|
T(Create(cache));
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
_cache = cache;
|
|
|
|
return B_OK;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
/*static*/ status_t
|
|
|
|
VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
|
|
|
|
{
|
|
|
|
VMDeviceCache* cache = new(nogrow) VMDeviceCache;
|
|
|
|
if (cache == NULL)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
|
|
|
status_t error = cache->Init(baseAddress);
|
|
|
|
if (error != B_OK) {
|
|
|
|
cache->Delete();
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2008-07-23 19:55:51 +04:00
|
|
|
T(Create(cache));
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
_cache = cache;
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
2007-09-26 04:20:23 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
/*static*/ status_t
|
|
|
|
VMCacheFactory::CreateNullCache(VMCache*& _cache)
|
|
|
|
{
|
|
|
|
VMNullCache* cache = new(nogrow) VMNullCache;
|
|
|
|
if (cache == NULL)
|
|
|
|
return B_NO_MEMORY;
|
|
|
|
|
|
|
|
status_t error = cache->Init();
|
|
|
|
if (error != B_OK) {
|
|
|
|
cache->Delete();
|
|
|
|
return error;
|
|
|
|
}
|
2008-05-30 04:27:02 +04:00
|
|
|
|
2008-07-23 19:55:51 +04:00
|
|
|
T(Create(cache));
|
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
_cache = cache;
|
2004-10-08 19:10:50 +04:00
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|