2004-11-23 06:17:39 +03:00
|
|
|
/*
|
2009-12-07 17:14:21 +03:00
|
|
|
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
2007-01-14 21:41:57 +03:00
|
|
|
* Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
|
2004-11-23 06:17:39 +03:00
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*
|
|
|
|
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
|
|
|
* Distributed under the terms of the NewOS License.
|
|
|
|
*/
|
2009-12-02 21:05:10 +03:00
|
|
|
#ifndef _KERNEL_VM_VM_CACHE_H
|
|
|
|
#define _KERNEL_VM_VM_CACHE_H
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-10-17 18:42:45 +04:00
|
|
|
|
2010-01-01 20:09:23 +03:00
|
|
|
#include <debug.h>
|
2002-07-09 16:24:59 +04:00
|
|
|
#include <kernel.h>
|
2011-11-03 01:14:11 +04:00
|
|
|
#include <util/DoublyLinkedList.h>
|
2009-12-02 21:05:10 +03:00
|
|
|
#include <vm/vm.h>
|
2009-12-07 17:14:21 +03:00
|
|
|
#include <vm/vm_types.h>
|
|
|
|
|
|
|
|
#include "kernel_debug_config.h"
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-09-27 16:21:33 +04:00
|
|
|
|
2003-05-03 20:03:26 +04:00
|
|
|
struct kernel_args;
|
2017-12-02 04:27:15 +03:00
|
|
|
struct ObjectCache;
|
2003-05-03 20:03:26 +04:00
|
|
|
|
|
|
|
|
2009-12-07 17:14:21 +03:00
|
|
|
enum {
|
|
|
|
CACHE_TYPE_RAM = 0,
|
|
|
|
CACHE_TYPE_VNODE,
|
|
|
|
CACHE_TYPE_DEVICE,
|
|
|
|
CACHE_TYPE_NULL
|
|
|
|
};
|
|
|
|
|
2009-12-07 18:42:08 +03:00
|
|
|
enum {
|
|
|
|
PAGE_EVENT_NOT_BUSY = 0x01 // page not busy anymore
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2011-11-03 01:14:11 +04:00
|
|
|
extern ObjectCache* gCacheRefObjectCache;
|
|
|
|
extern ObjectCache* gAnonymousCacheObjectCache;
|
|
|
|
extern ObjectCache* gAnonymousNoSwapCacheObjectCache;
|
|
|
|
extern ObjectCache* gVnodeCacheObjectCache;
|
|
|
|
extern ObjectCache* gDeviceCacheObjectCache;
|
|
|
|
extern ObjectCache* gNullCacheObjectCache;
|
|
|
|
|
|
|
|
|
2009-12-07 17:14:21 +03:00
|
|
|
struct VMCachePagesTreeDefinition {
|
|
|
|
typedef page_num_t KeyType;
|
|
|
|
typedef vm_page NodeType;
|
|
|
|
|
|
|
|
static page_num_t GetKey(const NodeType* node)
|
|
|
|
{
|
|
|
|
return node->cache_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static SplayTreeLink<NodeType>* GetLink(NodeType* node)
|
|
|
|
{
|
|
|
|
return &node->cache_link;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int Compare(page_num_t key, const NodeType* node)
|
|
|
|
{
|
|
|
|
return key == node->cache_offset ? 0
|
|
|
|
: (key < node->cache_offset ? -1 : 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static NodeType** GetListLink(NodeType* node)
|
|
|
|
{
|
|
|
|
return &node->cache_next;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree;
|
|
|
|
|
2010-01-19 11:34:14 +03:00
|
|
|
|
2011-11-03 01:14:11 +04:00
|
|
|
struct VMCache : public DoublyLinkedListLinkImpl<VMCache> {
|
|
|
|
public:
|
|
|
|
typedef DoublyLinkedList<VMCache> ConsumerList;
|
|
|
|
|
2009-12-07 17:14:21 +03:00
|
|
|
public:
|
2009-12-07 17:28:56 +03:00
|
|
|
VMCache();
|
|
|
|
virtual ~VMCache();
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2010-01-27 15:45:53 +03:00
|
|
|
status_t Init(uint32 cacheType, uint32 allocationFlags);
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2009-12-07 17:28:56 +03:00
|
|
|
virtual void Delete();
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2010-01-01 20:09:23 +03:00
|
|
|
inline bool Lock();
|
|
|
|
inline bool TryLock();
|
|
|
|
inline bool SwitchLock(mutex* from);
|
2010-01-07 18:32:28 +03:00
|
|
|
inline bool SwitchFromReadLock(rw_lock* from);
|
2010-01-19 06:02:11 +03:00
|
|
|
void Unlock(bool consumerLocked = false);
|
2010-01-01 20:09:23 +03:00
|
|
|
inline void AssertLocked();
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2010-01-01 20:09:23 +03:00
|
|
|
inline void AcquireRefLocked();
|
|
|
|
inline void AcquireRef();
|
|
|
|
inline void ReleaseRefLocked();
|
|
|
|
inline void ReleaseRef();
|
2010-01-19 06:02:11 +03:00
|
|
|
inline void ReleaseRefAndUnlock(
|
|
|
|
bool consumerLocked = false);
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2010-02-03 21:46:29 +03:00
|
|
|
inline VMCacheRef* CacheRef() const { return fCacheRef; }
|
|
|
|
|
2009-12-07 18:42:08 +03:00
|
|
|
void WaitForPageEvents(vm_page* page, uint32 events,
|
|
|
|
bool relock);
|
|
|
|
void NotifyPageEvents(vm_page* page, uint32 events)
|
|
|
|
{ if (fPageEventWaiters != NULL)
|
|
|
|
_NotifyPageEvents(page, events); }
|
2010-01-29 13:00:45 +03:00
|
|
|
inline void MarkPageUnbusy(vm_page* page);
|
2009-12-07 18:42:08 +03:00
|
|
|
|
2009-12-07 17:28:56 +03:00
|
|
|
vm_page* LookupPage(off_t offset);
|
|
|
|
void InsertPage(vm_page* page, off_t offset);
|
|
|
|
void RemovePage(vm_page* page);
|
* Removed DEBUG_PAGE_CACHE_TRANSITIONS debugging.
* Added VMCache::MovePage() and MoveAllPages() to move pages between caches.
* VMAnonymousCache:
- _MergeSwapPages(): Avoid doing anything, if neither cache has swapped out
pages.
- _MergeSwapPages() does now also remove source cache pages that are
shadowed by consumer swap pages. This allows us to call _MergeSwapPages()
before _MergePagesSmallerSource(), save the swap page shadowing check
there and get rid of the vm_page::merge_swap flag. This is an
optimization based on the assumption that usually none or only few pages
are swapped out, so we save a lot of checks.
- Implemented _MergePagesSmallerConsumer() as an alternative to
_MergePagesSmallerSource(). The former is used when the source cache has
more pages than the consumer cache. It iterates over the consumer cache's
pages, moves them to the source and finally moves all pages back to the
consumer. The final move is relatively cheap (though unfortunately we
still have to update all pages' vm_page::cache field), so that overall we
save iterations of the main loop with the more expensive checks.
The optimizations particularly improve the common fork()+exec*() situations.
fork() uses CoW, which is implemented by putting two new empty caches between
the to be copied area and its cache. exec*() destroys one copy of the area,
its cache and thus causes merging of the other new cache with the old cache.
Since this usually happens in a very short time, the old cache does still
contain many pages and the new cache only few. Previously the many pages were
all checked and moved individually. Now we do that for the few pages instead.
A very extreme example of this situation is the Haiku image build. jam has a
huge heap (> 200 MB) and it fork()s+exec*()s for every action to be executed.
Since during the cache merging the cache is locked, any write access to a
heap page causes jam to block until the cache merging is done. Formerly that
took so long that it killed a lot of parallelism in multi-job builds. That
could be observed particularly well when lots of small actions where executed
(like the Link, XRes, Mimeset, SetType, SetVersion combos when building
executables/libraries/add-ons). Those look dramatically better now.
The overall speed improvement for a -j8 image build on my machine is only
about 15%, though.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@34784 a95241bf-73f2-0310-859d-f6bbb57e9c96
2009-12-27 19:14:13 +03:00
|
|
|
void MovePage(vm_page* page);
|
|
|
|
void MoveAllPages(VMCache* fromCache);
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2010-07-10 19:08:13 +04:00
|
|
|
inline page_num_t WiredPagesCount() const;
|
|
|
|
inline void IncrementWiredPagesCount();
|
|
|
|
inline void DecrementWiredPagesCount();
|
|
|
|
|
2012-04-01 12:37:42 +04:00
|
|
|
virtual int32 GuardSize() { return 0; }
|
|
|
|
|
2009-12-07 17:28:56 +03:00
|
|
|
void AddConsumer(VMCache* consumer);
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2009-12-07 17:28:56 +03:00
|
|
|
status_t InsertAreaLocked(VMArea* area);
|
|
|
|
status_t RemoveArea(VMArea* area);
|
2009-12-23 01:00:35 +03:00
|
|
|
void TransferAreas(VMCache* fromCache);
|
|
|
|
uint32 CountWritableAreas(VMArea* ignoreArea) const;
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2009-12-07 17:28:56 +03:00
|
|
|
status_t WriteModified();
|
2010-01-26 17:44:58 +03:00
|
|
|
status_t SetMinimalCommitment(off_t commitment,
|
|
|
|
int priority);
|
2010-04-20 18:04:18 +04:00
|
|
|
virtual status_t Resize(off_t newSize, int priority);
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2009-12-07 17:28:56 +03:00
|
|
|
status_t FlushAndRemoveAllPages();
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2010-01-19 06:02:11 +03:00
|
|
|
void* UserData() { return fUserData; }
|
|
|
|
void SetUserData(void* data) { fUserData = data; }
|
|
|
|
// Settable by the lock owner and valid as
|
|
|
|
// long as the lock is owned.
|
|
|
|
|
2009-12-07 17:14:21 +03:00
|
|
|
// for debugging only
|
2009-12-07 17:28:56 +03:00
|
|
|
int32 RefCount() const
|
|
|
|
{ return fRefCount; }
|
2009-12-07 17:14:21 +03:00
|
|
|
|
|
|
|
// backing store operations
|
2010-01-26 17:44:58 +03:00
|
|
|
virtual status_t Commit(off_t size, int priority);
|
2009-12-07 17:28:56 +03:00
|
|
|
virtual bool HasPage(off_t offset);
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2010-06-02 22:42:20 +04:00
|
|
|
virtual status_t Read(off_t offset, const generic_io_vec *vecs,
|
2017-12-03 05:42:50 +03:00
|
|
|
size_t count, uint32 flags,
|
2010-06-02 22:42:20 +04:00
|
|
|
generic_size_t *_numBytes);
|
|
|
|
virtual status_t Write(off_t offset, const generic_io_vec *vecs,
|
|
|
|
size_t count, uint32 flags,
|
|
|
|
generic_size_t *_numBytes);
|
|
|
|
virtual status_t WriteAsync(off_t offset,
|
|
|
|
const generic_io_vec* vecs, size_t count,
|
|
|
|
generic_size_t numBytes, uint32 flags,
|
2009-12-07 17:28:56 +03:00
|
|
|
AsyncIOCallback* callback);
|
|
|
|
virtual bool CanWritePage(off_t offset);
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2009-12-07 17:28:56 +03:00
|
|
|
virtual int32 MaxPagesPerWrite() const
|
|
|
|
{ return -1; } // no restriction
|
|
|
|
virtual int32 MaxPagesPerAsyncWrite() const
|
|
|
|
{ return -1; } // no restriction
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2009-12-07 17:28:56 +03:00
|
|
|
virtual status_t Fault(struct VMAddressSpace *aspace,
|
|
|
|
off_t offset);
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2009-12-07 17:28:56 +03:00
|
|
|
virtual void Merge(VMCache* source);
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2009-12-07 17:28:56 +03:00
|
|
|
virtual status_t AcquireUnreferencedStoreRef();
|
|
|
|
virtual void AcquireStoreRef();
|
|
|
|
virtual void ReleaseStoreRef();
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2010-04-13 21:18:57 +04:00
|
|
|
virtual bool DebugHasPage(off_t offset);
|
|
|
|
vm_page* DebugLookupPage(off_t offset);
|
|
|
|
|
2010-06-15 04:07:18 +04:00
|
|
|
virtual void Dump(bool showPages) const;
|
|
|
|
|
2011-11-03 01:14:11 +04:00
|
|
|
protected:
|
|
|
|
virtual void DeleteObject() = 0;
|
|
|
|
|
2009-12-07 17:14:21 +03:00
|
|
|
public:
|
2009-12-07 17:28:56 +03:00
|
|
|
VMArea* areas;
|
2011-11-03 01:14:11 +04:00
|
|
|
ConsumerList consumers;
|
2009-12-07 17:28:56 +03:00
|
|
|
// list of caches that use this cache as a source
|
|
|
|
VMCachePagesTree pages;
|
|
|
|
VMCache* source;
|
|
|
|
off_t virtual_base;
|
|
|
|
off_t virtual_end;
|
|
|
|
off_t committed_size;
|
|
|
|
// TODO: Remove!
|
|
|
|
uint32 page_count;
|
|
|
|
uint32 temporary : 1;
|
|
|
|
uint32 type : 6;
|
2009-12-07 17:14:21 +03:00
|
|
|
|
|
|
|
#if DEBUG_CACHE_LIST
|
2009-12-07 17:28:56 +03:00
|
|
|
VMCache* debug_previous;
|
|
|
|
VMCache* debug_next;
|
2009-12-07 17:14:21 +03:00
|
|
|
#endif
|
|
|
|
|
|
|
|
private:
|
2009-12-07 18:42:08 +03:00
|
|
|
struct PageEventWaiter;
|
2010-01-19 11:34:14 +03:00
|
|
|
friend struct VMCacheRef;
|
2009-12-07 18:42:08 +03:00
|
|
|
|
|
|
|
private:
|
|
|
|
void _NotifyPageEvents(vm_page* page, uint32 events);
|
|
|
|
|
2009-12-07 17:28:56 +03:00
|
|
|
inline bool _IsMergeable() const;
|
|
|
|
|
2011-01-03 03:44:40 +03:00
|
|
|
void _MergeWithOnlyConsumer();
|
2009-12-07 17:28:56 +03:00
|
|
|
void _RemoveConsumer(VMCache* consumer);
|
|
|
|
|
|
|
|
private:
|
|
|
|
int32 fRefCount;
|
|
|
|
mutex fLock;
|
2009-12-07 18:42:08 +03:00
|
|
|
PageEventWaiter* fPageEventWaiters;
|
2010-01-19 06:02:11 +03:00
|
|
|
void* fUserData;
|
2010-01-19 11:34:14 +03:00
|
|
|
VMCacheRef* fCacheRef;
|
2010-07-10 19:08:13 +04:00
|
|
|
page_num_t fWiredPagesCount;
|
2009-12-07 17:14:21 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
#if DEBUG_CACHE_LIST
|
|
|
|
extern VMCache* gDebugCacheList;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
class VMCacheFactory {
|
|
|
|
public:
|
|
|
|
static status_t CreateAnonymousCache(VMCache*& cache,
|
|
|
|
bool canOvercommit, int32 numPrecommittedPages,
|
2010-01-26 17:44:58 +03:00
|
|
|
int32 numGuardPages, bool swappable,
|
|
|
|
int priority);
|
2009-12-07 17:14:21 +03:00
|
|
|
static status_t CreateVnodeCache(VMCache*& cache,
|
|
|
|
struct vnode* vnode);
|
|
|
|
static status_t CreateDeviceCache(VMCache*& cache,
|
|
|
|
addr_t baseAddress);
|
2010-01-26 17:44:58 +03:00
|
|
|
static status_t CreateNullCache(int priority, VMCache*& cache);
|
2009-12-07 17:14:21 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-01-01 20:09:23 +03:00
|
|
|
|
|
|
|
bool
|
|
|
|
VMCache::Lock()
|
|
|
|
{
|
|
|
|
return mutex_lock(&fLock) == B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool
|
|
|
|
VMCache::TryLock()
|
|
|
|
{
|
|
|
|
return mutex_trylock(&fLock) == B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool
|
|
|
|
VMCache::SwitchLock(mutex* from)
|
|
|
|
{
|
|
|
|
return mutex_switch_lock(from, &fLock) == B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-01-07 18:32:28 +03:00
|
|
|
bool
|
|
|
|
VMCache::SwitchFromReadLock(rw_lock* from)
|
|
|
|
{
|
|
|
|
return mutex_switch_from_read_lock(from, &fLock) == B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-01-01 20:09:23 +03:00
|
|
|
void
|
|
|
|
VMCache::AssertLocked()
|
|
|
|
{
|
|
|
|
ASSERT_LOCKED_MUTEX(&fLock);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
VMCache::AcquireRefLocked()
|
|
|
|
{
|
|
|
|
ASSERT_LOCKED_MUTEX(&fLock);
|
|
|
|
|
|
|
|
fRefCount++;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
VMCache::AcquireRef()
|
|
|
|
{
|
|
|
|
Lock();
|
|
|
|
fRefCount++;
|
|
|
|
Unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
VMCache::ReleaseRefLocked()
|
|
|
|
{
|
|
|
|
ASSERT_LOCKED_MUTEX(&fLock);
|
|
|
|
|
|
|
|
fRefCount--;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
VMCache::ReleaseRef()
|
|
|
|
{
|
|
|
|
Lock();
|
|
|
|
fRefCount--;
|
|
|
|
Unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
2010-01-19 06:02:11 +03:00
|
|
|
VMCache::ReleaseRefAndUnlock(bool consumerLocked)
|
2010-01-01 20:09:23 +03:00
|
|
|
{
|
|
|
|
ReleaseRefLocked();
|
2010-01-19 06:02:11 +03:00
|
|
|
Unlock(consumerLocked);
|
2010-01-01 20:09:23 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-01-29 13:00:45 +03:00
|
|
|
void
|
|
|
|
VMCache::MarkPageUnbusy(vm_page* page)
|
|
|
|
{
|
2010-02-16 19:49:52 +03:00
|
|
|
ASSERT(page->busy);
|
2010-01-29 13:00:45 +03:00
|
|
|
page->busy = false;
|
|
|
|
NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-07-10 19:08:13 +04:00
|
|
|
page_num_t
|
|
|
|
VMCache::WiredPagesCount() const
|
|
|
|
{
|
|
|
|
return fWiredPagesCount;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
VMCache::IncrementWiredPagesCount()
|
|
|
|
{
|
|
|
|
ASSERT(fWiredPagesCount < page_count);
|
|
|
|
|
|
|
|
fWiredPagesCount++;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
VMCache::DecrementWiredPagesCount()
|
|
|
|
{
|
|
|
|
ASSERT(fWiredPagesCount > 0);
|
|
|
|
|
|
|
|
fWiredPagesCount--;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// vm_page methods implemented here to avoid VMCache.h inclusion in vm_types.h
|
|
|
|
|
|
|
|
inline void
|
|
|
|
vm_page::IncrementWiredCount()
|
|
|
|
{
|
|
|
|
if (fWiredCount++ == 0)
|
|
|
|
cache_ref->cache->IncrementWiredPagesCount();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
inline void
|
|
|
|
vm_page::DecrementWiredCount()
|
|
|
|
{
|
2011-11-14 00:19:59 +04:00
|
|
|
ASSERT(fWiredCount > 0);
|
|
|
|
|
2010-07-10 19:08:13 +04:00
|
|
|
if (--fWiredCount == 0)
|
|
|
|
cache_ref->cache->DecrementWiredPagesCount();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-10-17 18:42:45 +04:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2004-11-08 17:16:35 +03:00
|
|
|
status_t vm_cache_init(struct kernel_args *args);
|
2009-12-23 01:00:35 +03:00
|
|
|
void vm_cache_init_post_heap();
|
2008-07-23 00:36:32 +04:00
|
|
|
struct VMCache *vm_cache_acquire_locked_page_cache(struct vm_page *page,
|
|
|
|
bool dontWait);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-10-17 18:42:45 +04:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-12-07 17:14:21 +03:00
|
|
|
|
2009-12-02 21:05:10 +03:00
|
|
|
#endif /* _KERNEL_VM_VM_CACHE_H */
|