* Cleanup, no functional change - this is now a pure C++ header.

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@33596 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2009-10-15 10:13:02 +00:00
parent 8f10aa5da6
commit 9b912c694a

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License. * Distributed under the terms of the MIT License.
* *
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -15,21 +15,19 @@
#include <kernel.h> #include <kernel.h>
#include <lock.h> #include <lock.h>
#include <util/DoublyLinkedQueue.h> #include <util/DoublyLinkedQueue.h>
#include <util/SplayTree.h>
#include <sys/uio.h> #include <sys/uio.h>
#include "kernel_debug_config.h" #include "kernel_debug_config.h"
#ifdef __cplusplus
#include <util/SplayTree.h>
class AsyncIOCallback; class AsyncIOCallback;
struct vm_page_mapping; struct vm_page_mapping;
struct VMCache; struct VMCache;
typedef DoublyLinkedListLink<vm_page_mapping> vm_page_mapping_link; typedef DoublyLinkedListLink<vm_page_mapping> vm_page_mapping_link;
typedef struct vm_page_mapping { typedef struct vm_page_mapping {
vm_page_mapping_link page_link; vm_page_mapping_link page_link;
vm_page_mapping_link area_link; vm_page_mapping_link area_link;
@ -44,7 +42,8 @@ class DoublyLinkedPageLink {
return &element->page_link; return &element->page_link;
} }
inline const vm_page_mapping_link *operator()(const vm_page_mapping *element) const inline const vm_page_mapping_link *operator()(
const vm_page_mapping *element) const
{ {
return &element->page_link; return &element->page_link;
} }
@ -57,52 +56,55 @@ class DoublyLinkedAreaLink {
return &element->area_link; return &element->area_link;
} }
inline const vm_page_mapping_link *operator()(const vm_page_mapping *element) const inline const vm_page_mapping_link *operator()(
const vm_page_mapping *element) const
{ {
return &element->area_link; return &element->area_link;
} }
}; };
typedef class DoublyLinkedQueue<vm_page_mapping, DoublyLinkedPageLink> vm_page_mappings; typedef class DoublyLinkedQueue<vm_page_mapping, DoublyLinkedPageLink>
typedef class DoublyLinkedQueue<vm_page_mapping, DoublyLinkedAreaLink> vm_area_mappings; vm_page_mappings;
typedef class DoublyLinkedQueue<vm_page_mapping, DoublyLinkedAreaLink>
vm_area_mappings;
typedef uint32 page_num_t; typedef uint32 page_num_t;
struct vm_page { struct vm_page {
struct vm_page *queue_prev; struct vm_page* queue_prev;
struct vm_page *queue_next; struct vm_page* queue_next;
addr_t physical_page_number; addr_t physical_page_number;
VMCache *cache; VMCache* cache;
page_num_t cache_offset; page_num_t cache_offset;
// in page size units // in page size units
SplayTreeLink<vm_page> cache_link; SplayTreeLink<vm_page> cache_link;
vm_page *cache_next; vm_page* cache_next;
vm_page_mappings mappings; vm_page_mappings mappings;
#if DEBUG_PAGE_QUEUE #if DEBUG_PAGE_QUEUE
void* queue; void* queue;
#endif #endif
#if DEBUG_PAGE_CACHE_TRANSITIONS #if DEBUG_PAGE_CACHE_TRANSITIONS
uint32 debug_flags; uint32 debug_flags;
struct vm_page *collided_page; struct vm_page* collided_page;
#endif #endif
uint8 type : 2; uint8 type : 2;
uint8 state : 3; uint8 state : 3;
uint8 is_cleared : 1; uint8 is_cleared : 1;
// is currently only used in vm_page_allocate_page_run() // is currently only used in vm_page_allocate_page_run()
uint8 busy_writing : 1; uint8 busy_writing : 1;
uint8 merge_swap : 1; uint8 merge_swap : 1;
// used in VMAnonymousCache::Merge() // used in VMAnonymousCache::Merge()
int8 usage_count; int8 usage_count;
uint16 wired_count; uint16 wired_count;
}; };
enum { enum {
@ -159,108 +161,110 @@ typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree;
struct VMCache { struct VMCache {
public: public:
VMCache(); VMCache();
virtual ~VMCache(); virtual ~VMCache();
status_t Init(uint32 cacheType); status_t Init(uint32 cacheType);
virtual void Delete(); virtual void Delete();
bool Lock() bool Lock()
{ return mutex_lock(&fLock) == B_OK; } { return mutex_lock(&fLock) == B_OK; }
bool TryLock() bool TryLock()
{ return mutex_trylock(&fLock) == B_OK; } { return mutex_trylock(&fLock) == B_OK; }
bool SwitchLock(mutex* from) bool SwitchLock(mutex* from)
{ return mutex_switch_lock(from, &fLock) == B_OK; } { return mutex_switch_lock(from, &fLock)
void Unlock(); == B_OK; }
void AssertLocked() void Unlock();
{ ASSERT_LOCKED_MUTEX(&fLock); } void AssertLocked()
{ ASSERT_LOCKED_MUTEX(&fLock); }
void AcquireRefLocked(); void AcquireRefLocked();
void AcquireRef(); void AcquireRef();
void ReleaseRefLocked(); void ReleaseRefLocked();
void ReleaseRef(); void ReleaseRef();
void ReleaseRefAndUnlock() void ReleaseRefAndUnlock()
{ ReleaseRefLocked(); Unlock(); } { ReleaseRefLocked(); Unlock(); }
vm_page* LookupPage(off_t offset); vm_page* LookupPage(off_t offset);
void InsertPage(vm_page* page, off_t offset); void InsertPage(vm_page* page, off_t offset);
void RemovePage(vm_page* page); void RemovePage(vm_page* page);
void AddConsumer(VMCache* consumer); void AddConsumer(VMCache* consumer);
status_t InsertAreaLocked(vm_area* area); status_t InsertAreaLocked(vm_area* area);
status_t RemoveArea(vm_area* area); status_t RemoveArea(vm_area* area);
status_t WriteModified(); status_t WriteModified();
status_t SetMinimalCommitment(off_t commitment); status_t SetMinimalCommitment(off_t commitment);
status_t Resize(off_t newSize); status_t Resize(off_t newSize);
status_t FlushAndRemoveAllPages(); status_t FlushAndRemoveAllPages();
// for debugging only // for debugging only
mutex* GetLock() mutex* GetLock()
{ return &fLock; } { return &fLock; }
int32 RefCount() const int32 RefCount() const
{ return fRefCount; } { return fRefCount; }
// backing store operations // backing store operations
virtual status_t Commit(off_t size); virtual status_t Commit(off_t size);
virtual bool HasPage(off_t offset); virtual bool HasPage(off_t offset);
virtual status_t Read(off_t offset, const iovec *vecs, size_t count, virtual status_t Read(off_t offset, const iovec *vecs, size_t count,
uint32 flags, size_t *_numBytes); uint32 flags, size_t *_numBytes);
virtual status_t Write(off_t offset, const iovec *vecs, size_t count, virtual status_t Write(off_t offset, const iovec *vecs, size_t count,
uint32 flags, size_t *_numBytes); uint32 flags, size_t *_numBytes);
virtual status_t WriteAsync(off_t offset, const iovec* vecs, virtual status_t WriteAsync(off_t offset, const iovec* vecs,
size_t count, size_t numBytes, uint32 flags, size_t count, size_t numBytes, uint32 flags,
AsyncIOCallback* callback); AsyncIOCallback* callback);
virtual bool CanWritePage(off_t offset); virtual bool CanWritePage(off_t offset);
virtual int32 MaxPagesPerWrite() const virtual int32 MaxPagesPerWrite() const
{ return -1; } // no restriction { return -1; } // no restriction
virtual int32 MaxPagesPerAsyncWrite() const virtual int32 MaxPagesPerAsyncWrite() const
{ return -1; } // no restriction { return -1; } // no restriction
virtual status_t Fault(struct vm_address_space *aspace, off_t offset); virtual status_t Fault(struct vm_address_space *aspace,
off_t offset);
virtual void Merge(VMCache* source); virtual void Merge(VMCache* source);
virtual status_t AcquireUnreferencedStoreRef(); virtual status_t AcquireUnreferencedStoreRef();
virtual void AcquireStoreRef(); virtual void AcquireStoreRef();
virtual void ReleaseStoreRef(); virtual void ReleaseStoreRef();
private: private:
inline bool _IsMergeable() const; inline bool _IsMergeable() const;
void _MergeWithOnlyConsumer(); void _MergeWithOnlyConsumer();
void _RemoveConsumer(VMCache* consumer); void _RemoveConsumer(VMCache* consumer);
public: public:
struct vm_area *areas; struct vm_area* areas;
struct list_link consumer_link; struct list_link consumer_link;
struct list consumers; struct list consumers;
// list of caches that use this cache as a source // list of caches that use this cache as a source
VMCachePagesTree pages; VMCachePagesTree pages;
VMCache *source; VMCache* source;
off_t virtual_base; off_t virtual_base;
off_t virtual_end; off_t virtual_end;
off_t committed_size; off_t committed_size;
// TODO: Remove! // TODO: Remove!
uint32 page_count; uint32 page_count;
uint32 temporary : 1; uint32 temporary : 1;
uint32 scan_skip : 1; uint32 scan_skip : 1;
uint32 type : 6; uint32 type : 6;
#if DEBUG_CACHE_LIST #if DEBUG_CACHE_LIST
struct VMCache* debug_previous; struct VMCache* debug_previous;
struct VMCache* debug_next; struct VMCache* debug_next;
#endif #endif
private: private:
int32 fRefCount; int32 fRefCount;
mutex fLock; mutex fLock;
}; };
typedef VMCache vm_cache; typedef VMCache vm_cache;
@ -274,39 +278,40 @@ extern vm_cache* gDebugCacheList;
class VMCacheFactory { class VMCacheFactory {
public: public:
static status_t CreateAnonymousCache(VMCache*& cache, static status_t CreateAnonymousCache(VMCache*& cache,
bool canOvercommit, int32 numPrecommittedPages, bool canOvercommit, int32 numPrecommittedPages,
int32 numGuardPages, bool swappable); int32 numGuardPages, bool swappable);
static status_t CreateVnodeCache(VMCache*& cache, struct vnode* vnode); static status_t CreateVnodeCache(VMCache*& cache,
static status_t CreateDeviceCache(VMCache*& cache, addr_t baseAddress); struct vnode* vnode);
static status_t CreateNullCache(VMCache*& cache); static status_t CreateDeviceCache(VMCache*& cache,
addr_t baseAddress);
static status_t CreateNullCache(VMCache*& cache);
}; };
struct vm_area { struct vm_area {
char *name; char* name;
area_id id; area_id id;
addr_t base; addr_t base;
addr_t size; addr_t size;
uint32 protection; uint32 protection;
uint16 wiring; uint16 wiring;
uint16 memory_type; uint16 memory_type;
VMCache *cache; VMCache* cache;
vint32 no_cache_change; vint32 no_cache_change;
off_t cache_offset; off_t cache_offset;
uint32 cache_type; uint32 cache_type;
vm_area_mappings mappings; vm_area_mappings mappings;
uint8 *page_protections; uint8* page_protections;
struct vm_address_space *address_space; struct vm_address_space* address_space;
struct vm_area *address_space_next; struct vm_area* address_space_next;
struct vm_area *cache_next; struct vm_area* cache_next;
struct vm_area *cache_prev; struct vm_area* cache_prev;
struct vm_area *hash_next; struct vm_area* hash_next;
}; };
#endif // __cplusplus
enum { enum {
VM_ASPACE_STATE_NORMAL = 0, VM_ASPACE_STATE_NORMAL = 0,
@ -314,18 +319,19 @@ enum {
}; };
struct vm_address_space { struct vm_address_space {
struct vm_area *areas; struct vm_area* areas;
struct vm_area *area_hint; struct vm_area* area_hint;
rw_lock lock; rw_lock lock;
addr_t base; addr_t base;
addr_t size; addr_t size;
int32 change_count; int32 change_count;
vm_translation_map translation_map; vm_translation_map translation_map;
team_id id; team_id id;
int32 ref_count; int32 ref_count;
int32 fault_count; int32 fault_count;
int32 state; int32 state;
struct vm_address_space *hash_next; struct vm_address_space* hash_next;
}; };
#endif /* _KERNEL_VM_TYPES_H */
#endif // _KERNEL_VM_TYPES_H