Merged branch haiku/branches/developer/bonefish/vm into trunk. This
introduces the following relevant changes: * VMCache: - Renamed vm_cache to VMCache, merged it with vm_store and made it a C++ class with virtual methods (replacing the store operations). Turned the different store implementations into subclasses. - Introduced MergeStore() callback, changed semantics of Commit(). - Changed locking and referencing semantics. A reference can only be acquired/released with the cache locked. An unreferenced cache is deleted and a mergeable cache merged when it is unlocked. This removes the "busy" state of a cache and simplifies the page fault code. * Added VMAnonymousCache, which will implement swap support (work by Zhao Shuai). It is not integrated and used yet, though. * Enabled the mutex/recursive lock holder asserts. * Fixed DoublyLinkedList::Swap(). * Generalized the low memory handler to a low resource handler. And made semaphores and reserved memory handled resources. Made vm_try_resource_memory() optionally wait (with timeout), and used that feature to reserve memory for areas. ... git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@26572 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
61a2483a53
commit
5c99d63970
@ -61,6 +61,10 @@ public:
|
||||
|
||||
void Add(ConditionVariableEntry* entry);
|
||||
|
||||
status_t Wait(uint32 flags = 0, bigtime_t timeout = 0);
|
||||
// all-in one, i.e. doesn't need a
|
||||
// ConditionVariableEntry
|
||||
|
||||
const void* Object() const { return fObject; }
|
||||
|
||||
static void ListAll();
|
||||
|
@ -49,8 +49,6 @@ extern status_t file_map_init(void);
|
||||
extern status_t file_cache_init_post_boot_device(void);
|
||||
extern status_t file_cache_init(void);
|
||||
|
||||
extern vm_store *vm_create_vnode_store(struct vnode *vnode);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -44,4 +44,21 @@ status_t heap_init_post_thread();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
#include <new>
|
||||
|
||||
static const struct nogrow_t {
|
||||
} nogrow = {};
|
||||
|
||||
inline void*
|
||||
operator new(size_t size, const nogrow_t& nogrow)
|
||||
{
|
||||
return malloc_nogrow(size);
|
||||
}
|
||||
|
||||
#endif /* __cplusplus */
|
||||
|
||||
|
||||
#endif /* _KERNEL_MEMHEAP_H */
|
||||
|
@ -53,13 +53,13 @@ typedef struct rw_lock {
|
||||
#define RW_LOCK_FLAG_CLONE_NAME 0x1
|
||||
|
||||
|
||||
#if 0 && KDEBUG // XXX disable this for now, it causes problems when including thread.h here
|
||||
# include <thread.h>
|
||||
#define ASSERT_LOCKED_RECURSIVE(r) { ASSERT(thread_get_current_thread_id() == (r)->holder); }
|
||||
#define ASSERT_LOCKED_MUTEX(m) { ASSERT(thread_get_current_thread_id() == (m)->holder); }
|
||||
#if KDEBUG
|
||||
# define ASSERT_LOCKED_RECURSIVE(r) \
|
||||
{ ASSERT(find_thread(NULL) == (r)->lock.holder); }
|
||||
# define ASSERT_LOCKED_MUTEX(m) { ASSERT(find_thread(NULL) == (m)->holder); }
|
||||
#else
|
||||
#define ASSERT_LOCKED_RECURSIVE(r)
|
||||
#define ASSERT_LOCKED_MUTEX(m)
|
||||
# define ASSERT_LOCKED_RECURSIVE(r) do {} while (false)
|
||||
# define ASSERT_LOCKED_MUTEX(m) do {} while (false)
|
||||
#endif
|
||||
|
||||
|
||||
@ -102,10 +102,15 @@ extern void mutex_init(mutex* lock, const char* name);
|
||||
// name is *not* cloned nor freed in mutex_destroy()
|
||||
extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags);
|
||||
extern void mutex_destroy(mutex* lock);
|
||||
extern status_t mutex_switch_lock(mutex* from, mutex* to);
|
||||
// Unlocks "from" and locks "to" such that unlocking and starting to wait
|
||||
// for the lock is atomically. I.e. if "from" guards the object "to" belongs
|
||||
// to, the operation is safe as long as "from" is held while destroying
|
||||
// "to".
|
||||
|
||||
// implementation private:
|
||||
extern status_t _mutex_lock(mutex* lock, bool threadsLocked);
|
||||
extern void _mutex_unlock(mutex* lock);
|
||||
extern void _mutex_unlock(mutex* lock, bool threadsLocked);
|
||||
extern status_t _mutex_trylock(mutex* lock);
|
||||
|
||||
|
||||
@ -151,12 +156,10 @@ mutex_trylock(mutex* lock)
|
||||
static inline void
|
||||
mutex_unlock(mutex* lock)
|
||||
{
|
||||
#ifdef KDEBUG
|
||||
_mutex_unlock(lock);
|
||||
#else
|
||||
#if !defined(KDEBUG)
|
||||
if (atomic_add(&lock->count, 1) < -1)
|
||||
_mutex_unlock(lock);
|
||||
#endif
|
||||
_mutex_unlock(lock, false);
|
||||
}
|
||||
|
||||
|
||||
|
53
headers/private/kernel/low_resource_manager.h
Normal file
53
headers/private/kernel/low_resource_manager.h
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2005-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef _KERNEL_LOW_RESOURCE_MANAGER_H
|
||||
#define _KERNEL_LOW_RESOURCE_MANAGER_H
|
||||
|
||||
|
||||
#include <SupportDefs.h>
|
||||
|
||||
|
||||
/* warning levels for low resource handlers */
|
||||
enum {
|
||||
B_NO_LOW_RESOURCE = 0,
|
||||
B_LOW_RESOURCE_NOTE,
|
||||
B_LOW_RESOURCE_WARNING,
|
||||
B_LOW_RESOURCE_CRITICAL,
|
||||
};
|
||||
|
||||
enum {
|
||||
B_KERNEL_RESOURCE_PAGES = 0x01, /* physical pages */
|
||||
B_KERNEL_RESOURCE_MEMORY = 0x02, /* reservable memory */
|
||||
B_KERNEL_RESOURCE_SEMAPHORES = 0x04, /* semaphores */
|
||||
|
||||
B_ALL_KERNEL_RESOURCES = B_KERNEL_RESOURCE_PAGES
|
||||
| B_KERNEL_RESOURCE_MEMORY
|
||||
| B_KERNEL_RESOURCE_SEMAPHORES
|
||||
};
|
||||
|
||||
typedef void (*low_resource_func)(void *data, uint32 resources, int32 level);
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
status_t low_resource_manager_init(void);
|
||||
status_t low_resource_manager_init_post_thread(void);
|
||||
int32 low_resource_state(uint32 resources);
|
||||
void low_resource(uint32 resource, uint64 requirements, uint32 flags,
|
||||
uint32 timeout);
|
||||
|
||||
// these calls might get public some day
|
||||
status_t register_low_resource_handler(low_resource_func function, void *data,
|
||||
uint32 resources, int32 priority);
|
||||
status_t unregister_low_resource_handler(low_resource_func function,
|
||||
void *data);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _KERNEL_LOW_RESOURCE_MANAGER_H */
|
@ -484,34 +484,20 @@ void
|
||||
DOUBLY_LINKED_LIST_CLASS_NAME::Swap(Element *a, Element *b)
|
||||
{
|
||||
if (a && b && a != b) {
|
||||
Link *aLink = sGetLink(a);
|
||||
Link *bLink = sGetLink(b);
|
||||
Element *aPrev = aLink->previous;
|
||||
Element *bPrev = bLink->previous;
|
||||
Element *aNext = aLink->next;
|
||||
Element *bNext = bLink->next;
|
||||
// place a
|
||||
if (bPrev)
|
||||
sGetLink(bPrev)->next = a;
|
||||
else
|
||||
fFirst = a;
|
||||
if (bNext)
|
||||
sGetLink(bNext)->previous = a;
|
||||
else
|
||||
fLast = a;
|
||||
aLink->previous = bPrev;
|
||||
aLink->next = bNext;
|
||||
// place b
|
||||
if (aPrev)
|
||||
sGetLink(aPrev)->next = b;
|
||||
else
|
||||
fFirst = b;
|
||||
if (aNext)
|
||||
sGetLink(aNext)->previous = b;
|
||||
else
|
||||
fLast = b;
|
||||
bLink->previous = aPrev;
|
||||
bLink->next = aNext;
|
||||
Element *aNext = sGetLink(a)->next;
|
||||
Element *bNext = sGetLink(b)->next;
|
||||
if (a == bNext) {
|
||||
Remove(a);
|
||||
Insert(b, a);
|
||||
} else if (b == aNext) {
|
||||
Remove(b);
|
||||
Insert(a, b);
|
||||
} else {
|
||||
Remove(a);
|
||||
Remove(b);
|
||||
Insert(aNext, b);
|
||||
Insert(bNext, a);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ struct net_stat;
|
||||
struct pollfd;
|
||||
struct selectsync;
|
||||
struct select_info;
|
||||
struct vm_cache;
|
||||
struct VMCache;
|
||||
struct vnode;
|
||||
|
||||
|
||||
@ -97,7 +97,7 @@ status_t vfs_read_pages(struct vnode *vnode, void *cookie, off_t pos,
|
||||
const iovec *vecs, size_t count, size_t *_numBytes, bool fsReenter);
|
||||
status_t vfs_write_pages(struct vnode *vnode, void *cookie, off_t pos,
|
||||
const iovec *vecs, size_t count, size_t *_numBytes, bool fsReenter);
|
||||
status_t vfs_get_vnode_cache(struct vnode *vnode, struct vm_cache **_cache,
|
||||
status_t vfs_get_vnode_cache(struct vnode *vnode, struct VMCache **_cache,
|
||||
bool allocate);
|
||||
status_t vfs_get_file_map(struct vnode *vnode, off_t offset, size_t size,
|
||||
struct file_io_vec *vecs, size_t *_count);
|
||||
|
@ -17,7 +17,7 @@
|
||||
struct kernel_args;
|
||||
struct team;
|
||||
struct vm_page;
|
||||
struct vm_cache;
|
||||
struct VMCache;
|
||||
struct vm_area;
|
||||
struct vm_address_space;
|
||||
struct vnode;
|
||||
@ -60,8 +60,8 @@ area_id vm_map_physical_memory(team_id team, const char *name, void **address,
|
||||
area_id vm_map_file(team_id aid, const char *name, void **address,
|
||||
uint32 addressSpec, addr_t size, uint32 protection, uint32 mapping,
|
||||
int fd, off_t offset);
|
||||
struct vm_cache *vm_area_get_locked_cache(struct vm_area *area);
|
||||
void vm_area_put_locked_cache(struct vm_cache *cache);
|
||||
struct VMCache *vm_area_get_locked_cache(struct vm_area *area);
|
||||
void vm_area_put_locked_cache(struct VMCache *cache);
|
||||
area_id vm_create_null_area(team_id team, const char *name, void **address,
|
||||
uint32 addressSpec, addr_t size);
|
||||
area_id vm_copy_area(team_id team, const char *name, void **_address,
|
||||
@ -70,7 +70,7 @@ area_id vm_clone_area(team_id team, const char *name, void **address,
|
||||
uint32 addressSpec, uint32 protection, uint32 mapping,
|
||||
area_id sourceArea, bool kernel);
|
||||
status_t vm_delete_area(team_id teamID, area_id areaID, bool kernel);
|
||||
status_t vm_create_vnode_cache(struct vnode *vnode, struct vm_cache **_cache);
|
||||
status_t vm_create_vnode_cache(struct vnode *vnode, struct VMCache **_cache);
|
||||
struct vm_area *vm_area_lookup(struct vm_address_space *addressSpace,
|
||||
addr_t address);
|
||||
status_t vm_set_area_memory_type(area_id id, addr_t physicalBase, uint32 type);
|
||||
@ -87,6 +87,7 @@ status_t vm_get_physical_page(addr_t paddr, addr_t *vaddr, uint32 flags);
|
||||
status_t vm_put_physical_page(addr_t vaddr);
|
||||
|
||||
off_t vm_available_memory(void);
|
||||
off_t vm_available_not_needed_memory(void);
|
||||
|
||||
// user syscalls
|
||||
area_id _user_create_area(const char *name, void **address, uint32 addressSpec,
|
||||
|
@ -15,31 +15,14 @@
|
||||
|
||||
struct kernel_args;
|
||||
|
||||
//typedef struct vm_store vm_store;
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
status_t vm_cache_init(struct kernel_args *args);
|
||||
struct vm_cache *vm_cache_create(struct vm_store *store);
|
||||
void vm_cache_acquire_ref(struct vm_cache *cache);
|
||||
void vm_cache_release_ref(struct vm_cache *cache);
|
||||
struct vm_cache *vm_cache_acquire_page_cache_ref(struct vm_page *page);
|
||||
struct vm_page *vm_cache_lookup_page(struct vm_cache *cache, off_t page);
|
||||
void vm_cache_insert_page(struct vm_cache *cache, struct vm_page *page,
|
||||
off_t offset);
|
||||
void vm_cache_remove_page(struct vm_cache *cache, struct vm_page *page);
|
||||
void vm_cache_remove_consumer(struct vm_cache *cache, struct vm_cache *consumer);
|
||||
void vm_cache_add_consumer_locked(struct vm_cache *cache,
|
||||
struct vm_cache *consumer);
|
||||
status_t vm_cache_write_modified(struct vm_cache *cache, bool fsReenter);
|
||||
status_t vm_cache_set_minimal_commitment_locked(struct vm_cache *cache,
|
||||
off_t commitment);
|
||||
status_t vm_cache_resize(struct vm_cache *cache, off_t newSize);
|
||||
status_t vm_cache_insert_area_locked(struct vm_cache *cache, vm_area *area);
|
||||
status_t vm_cache_remove_area(struct vm_cache *cache, struct vm_area *area);
|
||||
struct VMCache *vm_cache_acquire_locked_page_cache(struct vm_page *page,
|
||||
bool dontWait);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -1,40 +0,0 @@
|
||||
/*
|
||||
* Copyright 2005-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef _KERNEL_VM_LOW_MEMORY_H
|
||||
#define _KERNEL_VM_LOW_MEMORY_H
|
||||
|
||||
|
||||
#include <SupportDefs.h>
|
||||
|
||||
|
||||
/* warning levels for low memory handlers */
|
||||
enum {
|
||||
B_NO_LOW_MEMORY = 0,
|
||||
B_LOW_MEMORY_NOTE,
|
||||
B_LOW_MEMORY_WARNING,
|
||||
B_LOW_MEMORY_CRITICAL,
|
||||
};
|
||||
|
||||
typedef void (*low_memory_func)(void *data, int32 level);
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
status_t vm_low_memory_init(void);
|
||||
status_t vm_low_memory_init_post_thread(void);
|
||||
int32 vm_low_memory_state(void);
|
||||
void vm_low_memory(size_t requirements);
|
||||
|
||||
// these calls might get public some day
|
||||
status_t register_low_memory_handler(low_memory_func function, void *data,
|
||||
int32 priority);
|
||||
status_t unregister_low_memory_handler(low_memory_func function, void *data);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _KERNEL_VM_LOW_MEMORY_H */
|
@ -26,7 +26,7 @@ status_t vm_page_init_post_thread(struct kernel_args *args);
|
||||
|
||||
status_t vm_mark_page_inuse(addr_t page);
|
||||
status_t vm_mark_page_range_inuse(addr_t startPage, addr_t length);
|
||||
void vm_page_free(struct vm_cache *cache, struct vm_page *page);
|
||||
void vm_page_free(struct VMCache *cache, struct vm_page *page);
|
||||
status_t vm_page_set_state(struct vm_page *page, int state);
|
||||
void vm_page_requeue(struct vm_page *page, bool tail);
|
||||
|
||||
@ -35,11 +35,11 @@ size_t vm_page_num_pages(void);
|
||||
size_t vm_page_num_free_pages(void);
|
||||
size_t vm_page_num_available_pages(void);
|
||||
|
||||
status_t vm_page_write_modified_page_range(struct vm_cache *cache,
|
||||
status_t vm_page_write_modified_page_range(struct VMCache *cache,
|
||||
uint32 firstPage, uint32 endPage, bool fsReenter);
|
||||
status_t vm_page_write_modified_pages(struct vm_cache *cache, bool fsReenter);
|
||||
status_t vm_page_write_modified_pages(struct VMCache *cache, bool fsReenter);
|
||||
void vm_page_schedule_write_page(struct vm_page *page);
|
||||
void vm_page_schedule_write_page_range(struct vm_cache *cache,
|
||||
void vm_page_schedule_write_page_range(struct VMCache *cache,
|
||||
uint32 firstPage, uint32 endPage);
|
||||
|
||||
void vm_page_unreserve_pages(uint32 count);
|
||||
|
@ -33,7 +33,7 @@ extern "C" {
|
||||
status_t vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite,
|
||||
bool isUser, addr_t *newip);
|
||||
void vm_unreserve_memory(size_t bytes);
|
||||
status_t vm_try_reserve_memory(size_t bytes);
|
||||
status_t vm_try_reserve_memory(size_t bytes, bigtime_t timeout);
|
||||
void vm_schedule_page_scanner(uint32 target);
|
||||
status_t vm_daemon_init(void);
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <arch/vm_translation_map.h>
|
||||
#include <condition_variable.h>
|
||||
#include <kernel.h>
|
||||
#include <lock.h>
|
||||
#include <util/DoublyLinkedQueue.h>
|
||||
|
||||
#include <sys/uio.h>
|
||||
@ -34,6 +35,7 @@
|
||||
#include <util/SplayTree.h>
|
||||
|
||||
struct vm_page_mapping;
|
||||
struct VMCache;
|
||||
typedef DoublyLinkedListLink<vm_page_mapping> vm_page_mapping_link;
|
||||
|
||||
typedef struct vm_page_mapping {
|
||||
@ -80,7 +82,7 @@ struct vm_page {
|
||||
|
||||
addr_t physical_page_number;
|
||||
|
||||
struct vm_cache *cache;
|
||||
VMCache *cache;
|
||||
page_num_t cache_offset;
|
||||
// in page size units
|
||||
|
||||
@ -165,36 +167,121 @@ struct VMCachePagesTreeDefinition {
|
||||
|
||||
typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree;
|
||||
|
||||
struct vm_cache {
|
||||
mutex lock;
|
||||
struct VMCache {
|
||||
public:
|
||||
VMCache();
|
||||
virtual ~VMCache();
|
||||
|
||||
status_t Init(uint32 cacheType);
|
||||
|
||||
virtual void Delete();
|
||||
|
||||
bool Lock()
|
||||
{ return mutex_lock(&fLock) == B_OK; }
|
||||
bool TryLock()
|
||||
{ return mutex_trylock(&fLock) == B_OK; }
|
||||
bool SwitchLock(mutex* from)
|
||||
{ return mutex_switch_lock(from, &fLock) == B_OK; }
|
||||
void Unlock();
|
||||
void AssertLocked()
|
||||
{ ASSERT_LOCKED_MUTEX(&fLock); }
|
||||
|
||||
void AcquireRefLocked();
|
||||
void AcquireRef();
|
||||
void ReleaseRefLocked();
|
||||
void ReleaseRef();
|
||||
void ReleaseRefAndUnlock()
|
||||
{ ReleaseRefLocked(); Unlock(); }
|
||||
|
||||
vm_page* LookupPage(off_t offset);
|
||||
void InsertPage(vm_page* page, off_t offset);
|
||||
void RemovePage(vm_page* page);
|
||||
|
||||
void AddConsumer(VMCache* consumer);
|
||||
|
||||
status_t InsertAreaLocked(vm_area* area);
|
||||
status_t RemoveArea(vm_area* area);
|
||||
|
||||
status_t WriteModified(bool fsReenter);
|
||||
status_t SetMinimalCommitment(off_t commitment);
|
||||
status_t Resize(off_t newSize);
|
||||
|
||||
// for debugging only
|
||||
mutex* GetLock()
|
||||
{ return &fLock; }
|
||||
int32 RefCount() const
|
||||
{ return fRefCount; }
|
||||
|
||||
// backing store operations
|
||||
virtual status_t Commit(off_t size);
|
||||
virtual bool HasPage(off_t offset);
|
||||
|
||||
virtual status_t Read(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter);
|
||||
virtual status_t Write(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter);
|
||||
|
||||
virtual status_t Fault(struct vm_address_space *aspace, off_t offset);
|
||||
|
||||
virtual void MergeStore(VMCache* source);
|
||||
|
||||
virtual status_t AcquireUnreferencedStoreRef();
|
||||
virtual void AcquireStoreRef();
|
||||
virtual void ReleaseStoreRef();
|
||||
|
||||
private:
|
||||
inline bool _IsMergeable() const;
|
||||
|
||||
void _MergeWithOnlyConsumer();
|
||||
void _RemoveConsumer(VMCache* consumer);
|
||||
|
||||
|
||||
public:
|
||||
struct vm_area *areas;
|
||||
vint32 ref_count;
|
||||
struct list_link consumer_link;
|
||||
struct list consumers;
|
||||
// list of caches that use this cache as a source
|
||||
VMCachePagesTree pages;
|
||||
struct vm_cache *source;
|
||||
struct vm_store *store;
|
||||
VMCache *source;
|
||||
off_t virtual_base;
|
||||
off_t virtual_size;
|
||||
// the size is absolute, and independent from virtual_base
|
||||
off_t virtual_end;
|
||||
off_t committed_size;
|
||||
// TODO: Remove!
|
||||
uint32 page_count;
|
||||
uint32 temporary : 1;
|
||||
uint32 scan_skip : 1;
|
||||
uint32 busy : 1;
|
||||
uint32 type : 5;
|
||||
uint32 type : 6;
|
||||
|
||||
#if DEBUG_CACHE_LIST
|
||||
struct vm_cache* debug_previous;
|
||||
struct vm_cache* debug_next;
|
||||
struct VMCache* debug_previous;
|
||||
struct VMCache* debug_next;
|
||||
#endif
|
||||
|
||||
private:
|
||||
int32 fRefCount;
|
||||
mutex fLock;
|
||||
};
|
||||
|
||||
typedef VMCache vm_cache;
|
||||
// TODO: Remove!
|
||||
|
||||
|
||||
#if DEBUG_CACHE_LIST
|
||||
extern vm_cache* gDebugCacheList;
|
||||
#endif
|
||||
|
||||
|
||||
class VMCacheFactory {
|
||||
public:
|
||||
static status_t CreateAnonymousCache(VMCache*& cache,
|
||||
bool canOvercommit, int32 numPrecommittedPages,
|
||||
int32 numGuardPages);
|
||||
static status_t CreateVnodeCache(VMCache*& cache, struct vnode* vnode);
|
||||
static status_t CreateDeviceCache(VMCache*& cache, addr_t baseAddress);
|
||||
static status_t CreateNullCache(VMCache*& cache);
|
||||
};
|
||||
|
||||
|
||||
struct vm_area {
|
||||
char *name;
|
||||
area_id id;
|
||||
@ -204,7 +291,7 @@ struct vm_area {
|
||||
uint16 wiring;
|
||||
uint16 memory_type;
|
||||
|
||||
struct vm_cache *cache;
|
||||
VMCache *cache;
|
||||
vint32 no_cache_change;
|
||||
off_t cache_offset;
|
||||
uint32 cache_type;
|
||||
@ -239,25 +326,4 @@ struct vm_address_space {
|
||||
struct vm_address_space *hash_next;
|
||||
};
|
||||
|
||||
struct vm_store {
|
||||
struct vm_store_ops *ops;
|
||||
struct vm_cache *cache;
|
||||
off_t committed_size;
|
||||
};
|
||||
|
||||
typedef struct vm_store_ops {
|
||||
void (*destroy)(struct vm_store *backingStore);
|
||||
status_t (*commit)(struct vm_store *backingStore, off_t size);
|
||||
bool (*has_page)(struct vm_store *backingStore, off_t offset);
|
||||
status_t (*read)(struct vm_store *backingStore, off_t offset,
|
||||
const iovec *vecs, size_t count, size_t *_numBytes, bool fsReenter);
|
||||
status_t (*write)(struct vm_store *backingStore, off_t offset,
|
||||
const iovec *vecs, size_t count, size_t *_numBytes, bool fsReenter);
|
||||
status_t (*fault)(struct vm_store *backingStore,
|
||||
struct vm_address_space *aspace, off_t offset);
|
||||
status_t (*acquire_unreferenced_ref)(struct vm_store *backingStore);
|
||||
void (*acquire_ref)(struct vm_store *backingStore);
|
||||
void (*release_ref)(struct vm_store *backingStore);
|
||||
} vm_store_ops;
|
||||
|
||||
#endif /* _KERNEL_VM_TYPES_H */
|
||||
|
@ -29,6 +29,7 @@ KernelMergeObject kernel_core.o :
|
||||
kernel_daemon.cpp
|
||||
linkhack.c
|
||||
lock.cpp
|
||||
low_resource_manager.cpp
|
||||
main.cpp
|
||||
module.cpp
|
||||
Notifications.cpp
|
||||
|
33
src/system/kernel/cache/block_cache.cpp
vendored
33
src/system/kernel/cache/block_cache.cpp
vendored
@ -17,7 +17,7 @@
|
||||
|
||||
#include <condition_variable.h>
|
||||
#include <lock.h>
|
||||
#include <vm_low_memory.h>
|
||||
#include <low_resource_manager.h>
|
||||
#include <slab/Slab.h>
|
||||
#include <tracing.h>
|
||||
#include <util/kernel_cpp.h>
|
||||
@ -129,7 +129,7 @@ struct block_cache : DoublyLinkedListLinkImpl<block_cache> {
|
||||
void Free(void *buffer);
|
||||
void *Allocate();
|
||||
|
||||
static void LowMemoryHandler(void *data, int32 level);
|
||||
static void LowMemoryHandler(void *data, uint32 resources, int32 level);
|
||||
|
||||
private:
|
||||
cached_block *_GetUnusedBlock();
|
||||
@ -793,7 +793,8 @@ block_cache::block_cache(int _fd, off_t numBlocks, size_t blockSize,
|
||||
return;
|
||||
|
||||
mutex_init(&lock, "block cache");
|
||||
register_low_memory_handler(&block_cache::LowMemoryHandler, this, 0);
|
||||
register_low_resource_handler(&block_cache::LowMemoryHandler, this,
|
||||
B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY, 0);
|
||||
}
|
||||
|
||||
|
||||
@ -802,7 +803,7 @@ block_cache::~block_cache()
|
||||
{
|
||||
deleting = true;
|
||||
|
||||
unregister_low_memory_handler(&block_cache::LowMemoryHandler, this);
|
||||
unregister_low_resource_handler(&block_cache::LowMemoryHandler, this);
|
||||
|
||||
mutex_destroy(&lock);
|
||||
|
||||
@ -965,7 +966,7 @@ block_cache::RemoveUnusedBlocks(int32 maxAccessed, int32 count)
|
||||
|
||||
|
||||
void
|
||||
block_cache::LowMemoryHandler(void *data, int32 level)
|
||||
block_cache::LowMemoryHandler(void *data, uint32 resources, int32 level)
|
||||
{
|
||||
block_cache *cache = (block_cache *)data;
|
||||
MutexLocker locker(&cache->lock);
|
||||
@ -984,18 +985,19 @@ block_cache::LowMemoryHandler(void *data, int32 level)
|
||||
|
||||
int32 free = 1;
|
||||
int32 accessed = 1;
|
||||
switch (vm_low_memory_state()) {
|
||||
case B_NO_LOW_MEMORY:
|
||||
switch (low_resource_state(
|
||||
B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY)) {
|
||||
case B_NO_LOW_RESOURCE:
|
||||
return;
|
||||
case B_LOW_MEMORY_NOTE:
|
||||
case B_LOW_RESOURCE_NOTE:
|
||||
free = 50;
|
||||
accessed = 2;
|
||||
break;
|
||||
case B_LOW_MEMORY_WARNING:
|
||||
case B_LOW_RESOURCE_WARNING:
|
||||
free = 200;
|
||||
accessed = 10;
|
||||
break;
|
||||
case B_LOW_MEMORY_CRITICAL:
|
||||
case B_LOW_RESOURCE_CRITICAL:
|
||||
free = LONG_MAX;
|
||||
accessed = LONG_MAX;
|
||||
break;
|
||||
@ -1052,16 +1054,17 @@ if (block->original_data != NULL || block->parent_data != NULL)
|
||||
// (if there is enough memory left, we don't free any)
|
||||
|
||||
int32 free = 1;
|
||||
switch (vm_low_memory_state()) {
|
||||
case B_NO_LOW_MEMORY:
|
||||
switch (low_resource_state(
|
||||
B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY)) {
|
||||
case B_NO_LOW_RESOURCE:
|
||||
return;
|
||||
case B_LOW_MEMORY_NOTE:
|
||||
case B_LOW_RESOURCE_NOTE:
|
||||
free = 1;
|
||||
break;
|
||||
case B_LOW_MEMORY_WARNING:
|
||||
case B_LOW_RESOURCE_WARNING:
|
||||
free = 5;
|
||||
break;
|
||||
case B_LOW_MEMORY_CRITICAL:
|
||||
case B_LOW_RESOURCE_CRITICAL:
|
||||
free = 20;
|
||||
break;
|
||||
}
|
||||
|
81
src/system/kernel/cache/file_cache.cpp
vendored
81
src/system/kernel/cache/file_cache.cpp
vendored
@ -16,13 +16,13 @@
|
||||
#include <condition_variable.h>
|
||||
#include <file_cache.h>
|
||||
#include <generic_syscall.h>
|
||||
#include <low_resource_manager.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <util/kernel_cpp.h>
|
||||
#include <vfs.h>
|
||||
#include <vm.h>
|
||||
#include <vm_page.h>
|
||||
#include <vm_cache.h>
|
||||
#include <vm_low_memory.h>
|
||||
|
||||
|
||||
//#define TRACE_FILE_CACHE
|
||||
@ -117,9 +117,9 @@ push_access(file_cache_ref *ref, off_t offset, size_t bytes, bool isWrite)
|
||||
static void
|
||||
reserve_pages(file_cache_ref *ref, size_t reservePages, bool isWrite)
|
||||
{
|
||||
if (vm_low_memory_state() != B_NO_LOW_MEMORY) {
|
||||
if (low_resource_state(B_KERNEL_RESOURCE_PAGES) != B_NO_LOW_RESOURCE) {
|
||||
vm_cache *cache = ref->cache;
|
||||
mutex_lock(&cache->lock);
|
||||
cache->Lock();
|
||||
|
||||
if (list_is_empty(&cache->consumers) && cache->areas == NULL
|
||||
&& access_is_sequential(ref)) {
|
||||
@ -144,14 +144,14 @@ reserve_pages(file_cache_ref *ref, size_t reservePages, bool isWrite)
|
||||
(page = it.Next()) != NULL && left > 0;) {
|
||||
if (page->state != PAGE_STATE_MODIFIED
|
||||
&& page->state != PAGE_STATE_BUSY) {
|
||||
vm_cache_remove_page(cache, page);
|
||||
cache->RemovePage(page);
|
||||
vm_page_set_state(page, PAGE_STATE_FREE);
|
||||
left--;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&cache->lock);
|
||||
cache->Unlock();
|
||||
}
|
||||
|
||||
vm_page_reserve_pages(reservePages);
|
||||
@ -194,7 +194,7 @@ read_into_cache(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
|
||||
busyConditions[pageIndex - 1].Publish(page, "page");
|
||||
|
||||
vm_cache_insert_page(cache, page, offset + pos);
|
||||
cache->InsertPage(page, offset + pos);
|
||||
|
||||
addr_t virtualAddress;
|
||||
if (vm_get_physical_page(page->physical_page_number * B_PAGE_SIZE,
|
||||
@ -206,7 +206,7 @@ read_into_cache(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
}
|
||||
|
||||
push_access(ref, offset, bufferSize, false);
|
||||
mutex_unlock(&cache->lock);
|
||||
cache->Unlock();
|
||||
vm_page_unreserve_pages(lastReservedPages);
|
||||
|
||||
// read file into reserved pages
|
||||
@ -227,11 +227,11 @@ read_into_cache(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&cache->lock);
|
||||
cache->Lock();
|
||||
|
||||
for (int32 i = 0; i < pageIndex; i++) {
|
||||
busyConditions[i].Unpublish();
|
||||
vm_cache_remove_page(cache, pages[i]);
|
||||
cache->RemovePage(pages[i]);
|
||||
vm_page_set_state(pages[i], PAGE_STATE_FREE);
|
||||
}
|
||||
|
||||
@ -261,7 +261,7 @@ read_into_cache(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
}
|
||||
|
||||
reserve_pages(ref, reservePages, false);
|
||||
mutex_lock(&cache->lock);
|
||||
cache->Lock();
|
||||
|
||||
// make the pages accessible in the cache
|
||||
for (int32 i = pageIndex; i-- > 0;) {
|
||||
@ -290,7 +290,7 @@ read_from_file(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
vec.iov_len = bufferSize;
|
||||
|
||||
push_access(ref, offset, bufferSize, false);
|
||||
mutex_unlock(&ref->cache->lock);
|
||||
ref->cache->Unlock();
|
||||
vm_page_unreserve_pages(lastReservedPages);
|
||||
|
||||
status_t status = vfs_read_pages(ref->vnode, cookie, offset + pageOffset,
|
||||
@ -298,7 +298,7 @@ read_from_file(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
if (status == B_OK)
|
||||
reserve_pages(ref, reservePages, false);
|
||||
|
||||
mutex_lock(&ref->cache->lock);
|
||||
ref->cache->Lock();
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -338,7 +338,7 @@ write_to_cache(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
PAGE_STATE_FREE, true);
|
||||
busyConditions[pageIndex - 1].Publish(page, "page");
|
||||
|
||||
vm_cache_insert_page(ref->cache, page, offset + pos);
|
||||
ref->cache->InsertPage(page, offset + pos);
|
||||
|
||||
addr_t virtualAddress;
|
||||
vm_get_physical_page(page->physical_page_number * B_PAGE_SIZE,
|
||||
@ -349,7 +349,7 @@ write_to_cache(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
}
|
||||
|
||||
push_access(ref, offset, bufferSize, true);
|
||||
mutex_unlock(&ref->cache->lock);
|
||||
ref->cache->Unlock();
|
||||
vm_page_unreserve_pages(lastReservedPages);
|
||||
|
||||
// copy contents (and read in partially written pages first)
|
||||
@ -373,7 +373,7 @@ write_to_cache(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
addr_t last = (addr_t)vecs[vecCount - 1].iov_base
|
||||
+ vecs[vecCount - 1].iov_len - B_PAGE_SIZE;
|
||||
|
||||
if (offset + pageOffset + bufferSize == ref->cache->virtual_size) {
|
||||
if (offset + pageOffset + bufferSize == ref->cache->virtual_end) {
|
||||
// the space in the page after this write action needs to be cleaned
|
||||
memset((void *)(last + lastPageOffset), 0,
|
||||
B_PAGE_SIZE - lastPageOffset);
|
||||
@ -431,7 +431,7 @@ write_to_cache(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
if (status == B_OK)
|
||||
reserve_pages(ref, reservePages, true);
|
||||
|
||||
mutex_lock(&ref->cache->lock);
|
||||
ref->cache->Lock();
|
||||
|
||||
// unmap the pages again
|
||||
|
||||
@ -480,7 +480,7 @@ write_to_file(file_cache_ref *ref, void *cookie, off_t offset, int32 pageOffset,
|
||||
vec.iov_len = bufferSize;
|
||||
|
||||
push_access(ref, offset, bufferSize, true);
|
||||
mutex_unlock(&ref->cache->lock);
|
||||
ref->cache->Unlock();
|
||||
vm_page_unreserve_pages(lastReservedPages);
|
||||
|
||||
status_t status = B_OK;
|
||||
@ -508,7 +508,7 @@ write_to_file(file_cache_ref *ref, void *cookie, off_t offset, int32 pageOffset,
|
||||
if (status == B_OK)
|
||||
reserve_pages(ref, reservePages, true);
|
||||
|
||||
mutex_lock(&ref->cache->lock);
|
||||
ref->cache->Lock();
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -551,7 +551,7 @@ cache_io(void *_cacheRef, void *cookie, off_t offset, addr_t buffer,
|
||||
|
||||
file_cache_ref *ref = (file_cache_ref *)_cacheRef;
|
||||
vm_cache *cache = ref->cache;
|
||||
off_t fileSize = cache->virtual_size;
|
||||
off_t fileSize = cache->virtual_end;
|
||||
bool useBuffer = buffer != 0;
|
||||
|
||||
TRACE(("cache_io(ref = %p, offset = %Ld, buffer = %p, size = %lu, %s)\n",
|
||||
@ -579,14 +579,18 @@ cache_io(void *_cacheRef, void *cookie, off_t offset, addr_t buffer,
|
||||
if (doWrite) {
|
||||
// in low memory situations, we bypass the cache beyond a
|
||||
// certain I/O size
|
||||
if (size >= BYPASS_IO_SIZE && vm_low_memory_state() != B_NO_LOW_MEMORY)
|
||||
if (size >= BYPASS_IO_SIZE
|
||||
&& low_resource_state(B_KERNEL_RESOURCE_PAGES)
|
||||
!= B_NO_LOW_RESOURCE) {
|
||||
function = write_to_file;
|
||||
else
|
||||
} else
|
||||
function = write_to_cache;
|
||||
} else {
|
||||
if (size >= BYPASS_IO_SIZE && vm_low_memory_state() != B_NO_LOW_MEMORY)
|
||||
if (size >= BYPASS_IO_SIZE
|
||||
&& low_resource_state(B_KERNEL_RESOURCE_PAGES)
|
||||
!= B_NO_LOW_RESOURCE) {
|
||||
function = read_from_file;
|
||||
else
|
||||
} else
|
||||
function = read_into_cache;
|
||||
}
|
||||
|
||||
@ -604,11 +608,11 @@ cache_io(void *_cacheRef, void *cookie, off_t offset, addr_t buffer,
|
||||
size_t reservePages = 0;
|
||||
|
||||
reserve_pages(ref, lastReservedPages, doWrite);
|
||||
MutexLocker locker(cache->lock);
|
||||
AutoLocker<VMCache> locker(cache);
|
||||
|
||||
while (bytesLeft > 0) {
|
||||
// check if this page is already in memory
|
||||
vm_page *page = vm_cache_lookup_page(cache, offset);
|
||||
vm_page *page = cache->LookupPage(offset);
|
||||
if (page != NULL) {
|
||||
// The page may be busy - since we need to unlock the cache sometime
|
||||
// in the near future, we need to satisfy the request of the pages
|
||||
@ -764,9 +768,8 @@ cache_prefetch_vnode(struct vnode *vnode, off_t offset, size_t size)
|
||||
if (vfs_get_vnode_cache(vnode, &cache, false) != B_OK)
|
||||
return;
|
||||
|
||||
file_cache_ref *ref
|
||||
= (file_cache_ref *)((vnode_store *)cache->store)->file_cache_ref;
|
||||
off_t fileSize = cache->virtual_size;
|
||||
file_cache_ref *ref = ((VMVnodeCache*)cache)->FileCacheRef();
|
||||
off_t fileSize = cache->virtual_end;
|
||||
|
||||
if (size > fileSize)
|
||||
size = fileSize;
|
||||
@ -776,7 +779,8 @@ cache_prefetch_vnode(struct vnode *vnode, off_t offset, size_t size)
|
||||
size = 4 * 1024 * 1024;
|
||||
|
||||
cache_io(ref, NULL, offset, 0, &size, false);
|
||||
vm_cache_release_ref(cache);
|
||||
cache->Lock();
|
||||
cache->ReleaseRefAndUnlock();
|
||||
}
|
||||
|
||||
|
||||
@ -806,10 +810,9 @@ cache_node_opened(struct vnode *vnode, int32 fdType, vm_cache *cache,
|
||||
|
||||
off_t size = -1;
|
||||
if (cache != NULL) {
|
||||
file_cache_ref *ref = (file_cache_ref *)
|
||||
((vnode_store *)cache->store)->file_cache_ref;
|
||||
file_cache_ref *ref = ((VMVnodeCache*)cache)->FileCacheRef();
|
||||
if (ref != NULL)
|
||||
size = cache->virtual_size;
|
||||
size = cache->virtual_end;
|
||||
}
|
||||
|
||||
sCacheModule->node_opened(vnode, fdType, mountID, parentID, vnodeID, name,
|
||||
@ -899,8 +902,8 @@ file_cache_create(dev_t mountID, ino_t vnodeID, off_t size)
|
||||
if (vfs_get_vnode_cache(ref->vnode, &ref->cache, true) != B_OK)
|
||||
goto err1;
|
||||
|
||||
ref->cache->virtual_size = size;
|
||||
((vnode_store *)ref->cache->store)->file_cache_ref = ref;
|
||||
ref->cache->virtual_end = size;
|
||||
((VMVnodeCache*)ref->cache)->SetFileCacheRef(ref);
|
||||
return ref;
|
||||
|
||||
err1:
|
||||
@ -919,7 +922,7 @@ file_cache_delete(void *_cacheRef)
|
||||
|
||||
TRACE(("file_cache_delete(ref = %p)\n", ref));
|
||||
|
||||
vm_cache_release_ref(ref->cache);
|
||||
ref->cache->ReleaseRef();
|
||||
delete ref;
|
||||
}
|
||||
|
||||
@ -934,9 +937,9 @@ file_cache_set_size(void *_cacheRef, off_t newSize)
|
||||
if (ref == NULL)
|
||||
return B_OK;
|
||||
|
||||
MutexLocker _(ref->cache->lock);
|
||||
AutoLocker<VMCache> _(ref->cache);
|
||||
|
||||
off_t offset = ref->cache->virtual_size;
|
||||
off_t offset = ref->cache->virtual_end;
|
||||
off_t size = newSize;
|
||||
if (offset > newSize) {
|
||||
size = offset - newSize;
|
||||
@ -944,7 +947,7 @@ file_cache_set_size(void *_cacheRef, off_t newSize)
|
||||
} else
|
||||
size = newSize - offset;
|
||||
|
||||
return vm_cache_resize(ref->cache, newSize);
|
||||
return ref->cache->Resize(newSize);
|
||||
}
|
||||
|
||||
|
||||
@ -955,7 +958,7 @@ file_cache_sync(void *_cacheRef)
|
||||
if (ref == NULL)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
return vm_cache_write_modified(ref->cache, true);
|
||||
return ref->cache->WriteModified(true);
|
||||
}
|
||||
|
||||
|
||||
|
1
src/system/kernel/cache/file_map.cpp
vendored
1
src/system/kernel/cache/file_map.cpp
vendored
@ -20,7 +20,6 @@
|
||||
#include <vm.h>
|
||||
#include <vm_page.h>
|
||||
#include <vm_cache.h>
|
||||
#include <vm_low_memory.h>
|
||||
|
||||
|
||||
//#define TRACE_FILE_MAP
|
||||
|
106
src/system/kernel/cache/vnode_store.cpp
vendored
106
src/system/kernel/cache/vnode_store.cpp
vendored
@ -1,9 +1,9 @@
|
||||
/*
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include "vnode_store.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
@ -13,25 +13,24 @@
|
||||
#include <vfs.h>
|
||||
|
||||
|
||||
static void
|
||||
store_destroy(struct vm_store *store)
|
||||
status_t
|
||||
VMVnodeCache::Init(struct vnode *vnode)
|
||||
{
|
||||
free(store);
|
||||
}
|
||||
status_t error = VMCache::Init(CACHE_TYPE_VNODE);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
fVnode = vnode;
|
||||
fFileCacheRef = NULL;
|
||||
|
||||
static status_t
|
||||
store_commit(struct vm_store *_store, off_t size)
|
||||
{
|
||||
vnode_store *store = (vnode_store *)_store;
|
||||
vfs_vnode_to_node_ref(fVnode, &fDevice, &fInode);
|
||||
|
||||
store->vm.committed_size = size;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
store_has_page(struct vm_store *_store, off_t offset)
|
||||
bool
|
||||
VMVnodeCache::HasPage(off_t offset)
|
||||
{
|
||||
// We always pretend to have the page - even if it's beyond the size of
|
||||
// the file. The read function will only cut down the size of the read,
|
||||
@ -40,14 +39,13 @@ store_has_page(struct vm_store *_store, off_t offset)
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
store_read(struct vm_store *_store, off_t offset, const iovec *vecs,
|
||||
size_t count, size_t *_numBytes, bool fsReenter)
|
||||
status_t
|
||||
VMVnodeCache::Read(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter)
|
||||
{
|
||||
vnode_store *store = (vnode_store *)_store;
|
||||
size_t bytesUntouched = *_numBytes;
|
||||
|
||||
status_t status = vfs_read_pages(store->vnode, NULL, offset, vecs, count,
|
||||
status_t status = vfs_read_pages(fVnode, NULL, offset, vecs, count,
|
||||
_numBytes, fsReenter);
|
||||
|
||||
bytesUntouched -= *_numBytes;
|
||||
@ -73,79 +71,47 @@ store_read(struct vm_store *_store, off_t offset, const iovec *vecs,
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
store_write(struct vm_store *_store, off_t offset, const iovec *vecs,
|
||||
size_t count, size_t *_numBytes, bool fsReenter)
|
||||
status_t
|
||||
VMVnodeCache::Write(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter)
|
||||
{
|
||||
vnode_store *store = (vnode_store *)_store;
|
||||
return vfs_write_pages(store->vnode, NULL, offset, vecs, count, _numBytes,
|
||||
return vfs_write_pages(fVnode, NULL, offset, vecs, count, _numBytes,
|
||||
fsReenter);
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
store_acquire_unreferenced_ref(struct vm_store *_store)
|
||||
status_t
|
||||
VMVnodeCache::Fault(struct vm_address_space *aspace, off_t offset)
|
||||
{
|
||||
return B_BAD_HANDLER;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMVnodeCache::AcquireUnreferencedStoreRef()
|
||||
{
|
||||
vnode_store *store = (vnode_store *)_store;
|
||||
struct vnode *vnode;
|
||||
status_t status = vfs_get_vnode(store->device, store->inode, false, &vnode);
|
||||
status_t status = vfs_get_vnode(fDevice, fInode, false, &vnode);
|
||||
|
||||
// If successful, update the store's vnode pointer, so that release_ref()
|
||||
// won't use a stale pointer.
|
||||
if (status == B_OK)
|
||||
store->vnode = vnode;
|
||||
fVnode = vnode;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
store_acquire_ref(struct vm_store *_store)
|
||||
void
|
||||
VMVnodeCache::AcquireStoreRef()
|
||||
{
|
||||
vnode_store *store = (vnode_store *)_store;
|
||||
vfs_acquire_vnode(store->vnode);
|
||||
vfs_acquire_vnode(fVnode);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
store_release_ref(struct vm_store *_store)
|
||||
void
|
||||
VMVnodeCache::ReleaseStoreRef()
|
||||
{
|
||||
vnode_store *store = (vnode_store *)_store;
|
||||
vfs_put_vnode(store->vnode);
|
||||
}
|
||||
|
||||
|
||||
static vm_store_ops sStoreOps = {
|
||||
&store_destroy,
|
||||
&store_commit,
|
||||
&store_has_page,
|
||||
&store_read,
|
||||
&store_write,
|
||||
NULL, /* fault */
|
||||
&store_acquire_unreferenced_ref,
|
||||
&store_acquire_ref,
|
||||
&store_release_ref
|
||||
};
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
extern "C" vm_store *
|
||||
vm_create_vnode_store(struct vnode *vnode)
|
||||
{
|
||||
vnode_store *store = (vnode_store *)malloc(sizeof(struct vnode_store));
|
||||
if (store == NULL)
|
||||
return NULL;
|
||||
|
||||
store->vm.ops = &sStoreOps;
|
||||
store->vm.cache = NULL;
|
||||
store->vm.committed_size = 0;
|
||||
|
||||
store->vnode = vnode;
|
||||
store->file_cache_ref = NULL;
|
||||
|
||||
vfs_vnode_to_node_ref(vnode, &store->device, &store->inode);
|
||||
return &store->vm;
|
||||
vfs_put_vnode(fVnode);
|
||||
}
|
||||
|
||||
|
38
src/system/kernel/cache/vnode_store.h
vendored
38
src/system/kernel/cache/vnode_store.h
vendored
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
@ -9,12 +10,37 @@
|
||||
#include <vm_types.h>
|
||||
|
||||
|
||||
struct vnode_store {
|
||||
vm_store vm;
|
||||
struct vnode* vnode;
|
||||
dev_t device;
|
||||
ino_t inode;
|
||||
void* file_cache_ref;
|
||||
struct file_cache_ref;
|
||||
|
||||
|
||||
class VMVnodeCache : public VMCache {
|
||||
public:
|
||||
status_t Init(struct vnode *vnode);
|
||||
|
||||
virtual bool HasPage(off_t offset);
|
||||
|
||||
virtual status_t Read(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter);
|
||||
virtual status_t Write(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter);
|
||||
|
||||
virtual status_t Fault(struct vm_address_space *aspace, off_t offset);
|
||||
|
||||
virtual status_t AcquireUnreferencedStoreRef();
|
||||
virtual void AcquireStoreRef();
|
||||
virtual void ReleaseStoreRef();
|
||||
|
||||
void SetFileCacheRef(file_cache_ref* ref)
|
||||
{ fFileCacheRef = ref; }
|
||||
file_cache_ref* FileCacheRef() const
|
||||
{ return fFileCacheRef; }
|
||||
|
||||
private:
|
||||
struct vnode* fVnode;
|
||||
dev_t fDevice;
|
||||
ino_t fInode;
|
||||
file_cache_ref* fFileCacheRef;
|
||||
};
|
||||
|
||||
|
||||
#endif /* VNODE_STORE_H */
|
||||
|
@ -249,6 +249,15 @@ ConditionVariable::Add(ConditionVariableEntry* entry)
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ConditionVariable::Wait(uint32 flags, bigtime_t timeout)
|
||||
{
|
||||
ConditionVariableEntry entry;
|
||||
Add(&entry);
|
||||
return entry.Wait(flags, timeout);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ void
|
||||
ConditionVariable::ListAll()
|
||||
{
|
||||
@ -283,8 +292,6 @@ ConditionVariable::Dump() const
|
||||
void
|
||||
ConditionVariable::_Notify(bool all, bool threadsLocked)
|
||||
{
|
||||
ASSERT(fObject != NULL);
|
||||
|
||||
InterruptsLocker _;
|
||||
SpinLocker threadLocker(threadsLocked ? NULL : &thread_spinlock);
|
||||
SpinLocker locker(sConditionVariablesLock);
|
||||
|
@ -566,7 +566,7 @@ static status_t
|
||||
new_node(struct devfs *fs, const char *path, struct devfs_vnode **_node,
|
||||
struct devfs_vnode **_dir)
|
||||
{
|
||||
ASSERT_LOCKED_MUTEX(&fs->lock);
|
||||
ASSERT_LOCKED_RECURSIVE(&fs->lock);
|
||||
|
||||
// copy the path over to a temp buffer so we can munge it
|
||||
KPath tempPath(path);
|
||||
|
@ -118,7 +118,10 @@ find_image_at_address(addr_t address)
|
||||
struct hash_iterator iterator;
|
||||
struct elf_image_info *image;
|
||||
|
||||
ASSERT_LOCKED_MUTEX(&sImageMutex);
|
||||
#if KDEBUG
|
||||
if (!debug_debugger_running())
|
||||
ASSERT_LOCKED_MUTEX(&sImageMutex);
|
||||
#endif
|
||||
|
||||
hash_open(sImagesHash, &iterator);
|
||||
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <khash.h>
|
||||
#include <KPath.h>
|
||||
#include <lock.h>
|
||||
#include <low_resource_manager.h>
|
||||
#include <syscalls.h>
|
||||
#include <syscall_restart.h>
|
||||
#include <util/AutoLock.h>
|
||||
@ -47,7 +48,6 @@
|
||||
#include <vfs.h>
|
||||
#include <vm.h>
|
||||
#include <vm_cache.h>
|
||||
#include <vm_low_memory.h>
|
||||
|
||||
#include "fifo.h"
|
||||
|
||||
@ -767,7 +767,7 @@ free_vnode(struct vnode *vnode, bool reenter)
|
||||
|
||||
// if we have a vm_cache attached, remove it
|
||||
if (vnode->cache)
|
||||
vm_cache_release_ref(vnode->cache);
|
||||
vnode->cache->ReleaseRef();
|
||||
|
||||
vnode->cache = NULL;
|
||||
|
||||
@ -814,10 +814,12 @@ dec_vnode_ref_count(struct vnode *vnode, bool alwaysFree, bool reenter)
|
||||
} else {
|
||||
list_add_item(&sUnusedVnodeList, vnode);
|
||||
if (++sUnusedVnodes > kMaxUnusedVnodes
|
||||
&& vm_low_memory_state() != B_NO_LOW_MEMORY) {
|
||||
&& low_resource_state(
|
||||
B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY)
|
||||
!= B_NO_LOW_RESOURCE) {
|
||||
// there are too many unused vnodes so we free the oldest one
|
||||
// ToDo: evaluate this mechanism
|
||||
vnode = (struct vnode *)list_remove_head_item(&sUnusedVnodeList);
|
||||
// TODO: evaluate this mechanism
|
||||
vnode = (struct vnode*)list_remove_head_item(&sUnusedVnodeList);
|
||||
vnode->busy = true;
|
||||
freeNode = true;
|
||||
sUnusedVnodes--;
|
||||
@ -1015,21 +1017,21 @@ put_vnode(struct vnode *vnode)
|
||||
|
||||
|
||||
static void
|
||||
vnode_low_memory_handler(void */*data*/, int32 level)
|
||||
vnode_low_resource_handler(void */*data*/, uint32 resources, int32 level)
|
||||
{
|
||||
TRACE(("vnode_low_memory_handler(level = %ld)\n", level));
|
||||
TRACE(("vnode_low_resource_handler(level = %ld)\n", level));
|
||||
|
||||
uint32 count = 1;
|
||||
switch (level) {
|
||||
case B_NO_LOW_MEMORY:
|
||||
case B_NO_LOW_RESOURCE:
|
||||
return;
|
||||
case B_LOW_MEMORY_NOTE:
|
||||
case B_LOW_RESOURCE_NOTE:
|
||||
count = sUnusedVnodes / 100;
|
||||
break;
|
||||
case B_LOW_MEMORY_WARNING:
|
||||
case B_LOW_RESOURCE_WARNING:
|
||||
count = sUnusedVnodes / 10;
|
||||
break;
|
||||
case B_LOW_MEMORY_CRITICAL:
|
||||
case B_LOW_RESOURCE_CRITICAL:
|
||||
count = sUnusedVnodes;
|
||||
break;
|
||||
}
|
||||
@ -1054,7 +1056,7 @@ vnode_low_memory_handler(void */*data*/, int32 level)
|
||||
mutex_unlock(&sVnodeMutex);
|
||||
|
||||
if (vnode->cache != NULL)
|
||||
vm_cache_write_modified(vnode->cache, false);
|
||||
vnode->cache->WriteModified(false);
|
||||
|
||||
dec_vnode_ref_count(vnode, true, false);
|
||||
// this should free the vnode when it's still unused
|
||||
@ -2788,7 +2790,7 @@ dump_vnode_caches(int argc, char **argv)
|
||||
continue;
|
||||
|
||||
kprintf("%p%4ld%10Ld %p %8Ld%8ld\n", vnode, vnode->device, vnode->id,
|
||||
vnode->cache, (vnode->cache->virtual_size + B_PAGE_SIZE - 1)
|
||||
vnode->cache, (vnode->cache->virtual_end + B_PAGE_SIZE - 1)
|
||||
/ B_PAGE_SIZE, vnode->cache->page_count);
|
||||
}
|
||||
|
||||
@ -3868,7 +3870,8 @@ vfs_disconnect_vnode(dev_t mountID, ino_t vnodeID)
|
||||
extern "C" void
|
||||
vfs_free_unused_vnodes(int32 level)
|
||||
{
|
||||
vnode_low_memory_handler(NULL, level);
|
||||
vnode_low_resource_handler(NULL,
|
||||
B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY, level);
|
||||
}
|
||||
|
||||
|
||||
@ -3914,7 +3917,7 @@ extern "C" status_t
|
||||
vfs_get_vnode_cache(struct vnode *vnode, vm_cache **_cache, bool allocate)
|
||||
{
|
||||
if (vnode->cache != NULL) {
|
||||
vm_cache_acquire_ref(vnode->cache);
|
||||
vnode->cache->AcquireRef();
|
||||
*_cache = vnode->cache;
|
||||
return B_OK;
|
||||
}
|
||||
@ -3940,12 +3943,13 @@ vfs_get_vnode_cache(struct vnode *vnode, vm_cache **_cache, bool allocate)
|
||||
status = B_BAD_VALUE;
|
||||
}
|
||||
|
||||
mutex_unlock(&sVnodeMutex);
|
||||
|
||||
if (status == B_OK) {
|
||||
vm_cache_acquire_ref(vnode->cache);
|
||||
vnode->cache->AcquireRef();
|
||||
*_cache = vnode->cache;
|
||||
}
|
||||
|
||||
mutex_unlock(&sVnodeMutex);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -4392,7 +4396,8 @@ vfs_init(kernel_args *args)
|
||||
add_debugger_command("vnode_usage", &dump_vnode_usage, "info about vnode usage");
|
||||
#endif
|
||||
|
||||
register_low_memory_handler(&vnode_low_memory_handler, NULL, 0);
|
||||
register_low_resource_handler(&vnode_low_resource_handler, NULL,
|
||||
B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY, 0);
|
||||
|
||||
file_map_init();
|
||||
|
||||
@ -6707,7 +6712,7 @@ fs_sync(dev_t device)
|
||||
put_vnode(previousVnode);
|
||||
|
||||
if (vnode->cache != NULL)
|
||||
vm_cache_write_modified(vnode->cache, false);
|
||||
vnode->cache->WriteModified(false);
|
||||
|
||||
// the next vnode might change until we lock the vnode list again,
|
||||
// but this vnode won't go away since we keep a reference to it.
|
||||
|
@ -439,6 +439,20 @@ mutex_destroy(mutex* lock)
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
mutex_switch_lock(mutex* from, mutex* to)
|
||||
{
|
||||
InterruptsSpinLocker locker(thread_spinlock);
|
||||
|
||||
#if !defined(KDEBUG)
|
||||
if (atomic_add(&from->count, 1) < -1)
|
||||
#endif
|
||||
_mutex_unlock(from, true);
|
||||
|
||||
return mutex_lock_threads_locked(to);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
_mutex_lock(mutex* lock, bool threadsLocked)
|
||||
{
|
||||
@ -496,9 +510,10 @@ _mutex_lock(mutex* lock, bool threadsLocked)
|
||||
|
||||
|
||||
void
|
||||
_mutex_unlock(mutex* lock)
|
||||
_mutex_unlock(mutex* lock, bool threadsLocked)
|
||||
{
|
||||
InterruptsSpinLocker _(thread_spinlock);
|
||||
// lock only, if !threadsLocked
|
||||
InterruptsSpinLocker locker(thread_spinlock, false, !threadsLocked);
|
||||
|
||||
#ifdef KDEBUG
|
||||
if (thread_get_current_thread_id() != lock->holder) {
|
||||
|
393
src/system/kernel/low_resource_manager.cpp
Normal file
393
src/system/kernel/low_resource_manager.cpp
Normal file
@ -0,0 +1,393 @@
|
||||
/*
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2005-2008, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include <low_resource_manager.h>
|
||||
|
||||
#include <new>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <KernelExport.h>
|
||||
|
||||
#include <condition_variable.h>
|
||||
#include <elf.h>
|
||||
#include <lock.h>
|
||||
#include <sem.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <util/DoublyLinkedList.h>
|
||||
#include <vm_page.h>
|
||||
#include <vm_priv.h>
|
||||
|
||||
|
||||
//#define TRACE_LOW_RESOURCE_MANAGER
|
||||
#ifdef TRACE_LOW_RESOURCE_MANAGER
|
||||
# define TRACE(x) dprintf x
|
||||
#else
|
||||
# define TRACE(x) ;
|
||||
#endif
|
||||
|
||||
|
||||
struct low_resource_handler
|
||||
: public DoublyLinkedListLinkImpl<low_resource_handler> {
|
||||
low_resource_func function;
|
||||
void *data;
|
||||
uint32 resources;
|
||||
int32 priority;
|
||||
};
|
||||
|
||||
typedef DoublyLinkedList<low_resource_handler> HandlerList;
|
||||
|
||||
|
||||
static const bigtime_t kLowResourceInterval = 3000000; // 3 secs
|
||||
static const bigtime_t kWarnResourceInterval = 500000; // 0.5 secs
|
||||
|
||||
// page limits
|
||||
static const size_t kNotePagesLimit = 2048;
|
||||
static const size_t kWarnPagesLimit = 256;
|
||||
static const size_t kCriticalPagesLimit = 32;
|
||||
|
||||
// memory limits
|
||||
static const off_t kMinNoteMemoryLimit = 16 * 1024 * 1024;
|
||||
static const off_t kMinWarnMemoryLimit = 4 * 1024 * 1024;
|
||||
static const off_t kMinCriticalMemoryLimit = 1 * 1024 * 1024;
|
||||
static off_t sNoteMemoryLimit;
|
||||
static off_t sWarnMemoryLimit;
|
||||
static off_t sCriticalMemoryLimit;
|
||||
|
||||
|
||||
static int32 sLowPagesState = B_NO_LOW_RESOURCE;
|
||||
static int32 sLowMemoryState = B_NO_LOW_RESOURCE;
|
||||
static int32 sLowSemaphoresState = B_NO_LOW_RESOURCE;
|
||||
static uint32 sLowResources = 0; // resources that are not B_NO_LOW_RESOURCE
|
||||
static bigtime_t sLastMeasurement;
|
||||
|
||||
static recursive_lock sLowResourceLock
|
||||
= RECURSIVE_LOCK_INITIALIZER("low resource");
|
||||
static sem_id sLowResourceWaitSem;
|
||||
static HandlerList sLowResourceHandlers;
|
||||
|
||||
static ConditionVariable sLowResourceWaiterCondition;
|
||||
|
||||
|
||||
static int32
|
||||
low_resource_state_no_update(uint32 resources)
|
||||
{
|
||||
int32 state = B_NO_LOW_RESOURCE;
|
||||
|
||||
if ((resources & B_KERNEL_RESOURCE_PAGES) != 0)
|
||||
state = max_c(state, sLowPagesState);
|
||||
if ((resources & B_KERNEL_RESOURCE_MEMORY) != 0)
|
||||
state = max_c(state, sLowMemoryState);
|
||||
if ((resources & B_KERNEL_RESOURCE_SEMAPHORES) != 0)
|
||||
state = max_c(state, sLowSemaphoresState);
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
|
||||
/*! Calls low resource handlers for the given resources.
|
||||
sLowResourceLock must be held.
|
||||
*/
|
||||
static void
|
||||
call_handlers(uint32 lowResources)
|
||||
{
|
||||
if (sLowResourceHandlers.IsEmpty())
|
||||
return;
|
||||
|
||||
// Add a marker, so we can drop the lock while calling the handlers and
|
||||
// still iterate safely.
|
||||
low_resource_handler marker;
|
||||
sLowResourceHandlers.Insert(&marker, false);
|
||||
|
||||
while (low_resource_handler *handler
|
||||
= sLowResourceHandlers.GetNext(&marker)) {
|
||||
// swap with handler
|
||||
sLowResourceHandlers.Swap(&marker, handler);
|
||||
marker.priority = handler->priority;
|
||||
|
||||
int32 resources = handler->resources & lowResources;
|
||||
if (resources != 0) {
|
||||
recursive_lock_unlock(&sLowResourceLock);
|
||||
handler->function(handler->data, resources,
|
||||
low_resource_state_no_update(resources));
|
||||
recursive_lock_lock(&sLowResourceLock);
|
||||
}
|
||||
}
|
||||
|
||||
// remove marker
|
||||
sLowResourceHandlers.Remove(&marker);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
compute_state(void)
|
||||
{
|
||||
sLastMeasurement = system_time();
|
||||
|
||||
sLowResources = B_ALL_KERNEL_RESOURCES;
|
||||
|
||||
// free pages state
|
||||
uint32 freePages = vm_page_num_free_pages();
|
||||
|
||||
if (freePages < kCriticalPagesLimit) {
|
||||
sLowPagesState = B_LOW_RESOURCE_CRITICAL;
|
||||
} else if (freePages < kWarnPagesLimit) {
|
||||
sLowPagesState = B_LOW_RESOURCE_WARNING;
|
||||
} else if (freePages < kNotePagesLimit) {
|
||||
sLowPagesState = B_LOW_RESOURCE_NOTE;
|
||||
} else {
|
||||
sLowPagesState = B_NO_LOW_RESOURCE;
|
||||
sLowResources &= ~B_KERNEL_RESOURCE_PAGES;
|
||||
}
|
||||
|
||||
// free memory state
|
||||
off_t freeMemory = vm_available_not_needed_memory();
|
||||
|
||||
if (freeMemory < sCriticalMemoryLimit) {
|
||||
sLowMemoryState = B_LOW_RESOURCE_CRITICAL;
|
||||
} else if (freeMemory < sWarnMemoryLimit) {
|
||||
sLowMemoryState = B_LOW_RESOURCE_WARNING;
|
||||
} else if (freeMemory < sNoteMemoryLimit) {
|
||||
sLowMemoryState = B_LOW_RESOURCE_NOTE;
|
||||
} else {
|
||||
sLowMemoryState = B_NO_LOW_RESOURCE;
|
||||
sLowResources &= ~B_KERNEL_RESOURCE_MEMORY;
|
||||
}
|
||||
|
||||
// free semaphores state
|
||||
uint32 maxSems = sem_max_sems();
|
||||
uint32 freeSems = maxSems - sem_used_sems();
|
||||
|
||||
if (freeSems < maxSems >> 16) {
|
||||
sLowSemaphoresState = B_LOW_RESOURCE_CRITICAL;
|
||||
} else if (freeSems < maxSems >> 8) {
|
||||
sLowSemaphoresState = B_LOW_RESOURCE_WARNING;
|
||||
} else if (freeSems < maxSems >> 4) {
|
||||
sLowSemaphoresState = B_LOW_RESOURCE_NOTE;
|
||||
} else {
|
||||
sLowSemaphoresState = B_NO_LOW_RESOURCE;
|
||||
sLowResources &= ~B_KERNEL_RESOURCE_SEMAPHORES;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static int32
|
||||
low_resource_manager(void *)
|
||||
{
|
||||
bigtime_t timeout = kLowResourceInterval;
|
||||
while (true) {
|
||||
int32 state = low_resource_state_no_update(B_ALL_KERNEL_RESOURCES);
|
||||
if (state != B_LOW_RESOURCE_CRITICAL) {
|
||||
acquire_sem_etc(sLowResourceWaitSem, 1, B_RELATIVE_TIMEOUT,
|
||||
timeout);
|
||||
}
|
||||
|
||||
RecursiveLocker _(&sLowResourceLock);
|
||||
|
||||
compute_state();
|
||||
state = low_resource_state_no_update(B_ALL_KERNEL_RESOURCES);
|
||||
|
||||
TRACE(("low_resource_manager: state = %ld, %ld free pages, %lld free "
|
||||
"memory, %lu free semaphores\n", state, vm_page_num_free_pages(),
|
||||
vm_available_not_needed_memory(),
|
||||
sem_max_sems() - sem_used_sems()));
|
||||
|
||||
if (state < B_LOW_RESOURCE_NOTE)
|
||||
continue;
|
||||
|
||||
call_handlers(sLowResources);
|
||||
|
||||
if (state == B_LOW_RESOURCE_WARNING)
|
||||
timeout = kWarnResourceInterval;
|
||||
else
|
||||
timeout = kLowResourceInterval;
|
||||
|
||||
sLowResourceWaiterCondition.NotifyAll();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
dump_handlers(int argc, char **argv)
|
||||
{
|
||||
HandlerList::Iterator iterator = sLowResourceHandlers.GetIterator();
|
||||
kprintf("function data resources prio function-name\n");
|
||||
|
||||
while (iterator.HasNext()) {
|
||||
low_resource_handler *handler = iterator.Next();
|
||||
|
||||
const char* symbol = NULL;
|
||||
elf_debug_lookup_symbol_address((addr_t)handler->function, NULL,
|
||||
&symbol, NULL, NULL);
|
||||
|
||||
char resources[16];
|
||||
snprintf(resources, sizeof(resources), "%c %c %c",
|
||||
handler->resources & B_KERNEL_RESOURCE_PAGES ? 'p' : ' ',
|
||||
handler->resources & B_KERNEL_RESOURCE_MEMORY ? 'm' : ' ',
|
||||
handler->resources & B_KERNEL_RESOURCE_SEMAPHORES ? 's' : ' ');
|
||||
|
||||
kprintf("%p %p %s %4ld %s\n", handler->function, handler->data,
|
||||
resources, handler->priority, symbol);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - private kernel API
|
||||
|
||||
|
||||
/*! Notifies the low resource manager that a resource is lacking. If \a flags
|
||||
and \a timeout specify a timeout, the function will wait until the low
|
||||
resource manager has finished its next iteration of calling low resource
|
||||
handlers, or until the timeout occurs (whichever happens first).
|
||||
*/
|
||||
void
|
||||
low_resource(uint32 resource, uint64 requirements, uint32 flags, uint32 timeout)
|
||||
{
|
||||
// TODO: take requirements into account
|
||||
|
||||
switch (resource) {
|
||||
case B_KERNEL_RESOURCE_PAGES:
|
||||
vm_schedule_page_scanner(requirements);
|
||||
break;
|
||||
case B_KERNEL_RESOURCE_MEMORY:
|
||||
break;
|
||||
case B_KERNEL_RESOURCE_SEMAPHORES:
|
||||
break;
|
||||
}
|
||||
|
||||
release_sem(sLowResourceWaitSem);
|
||||
|
||||
if ((flags & B_RELATIVE_TIMEOUT) == 0 || timeout > 0)
|
||||
sLowResourceWaiterCondition.Wait(flags, timeout);
|
||||
}
|
||||
|
||||
|
||||
int32
|
||||
low_resource_state(uint32 resources)
|
||||
{
|
||||
recursive_lock_lock(&sLowResourceLock);
|
||||
|
||||
if (system_time() - sLastMeasurement > 500000)
|
||||
compute_state();
|
||||
|
||||
int32 state = low_resource_state_no_update(resources);
|
||||
|
||||
recursive_lock_unlock(&sLowResourceLock);
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
low_resource_manager_init(void)
|
||||
{
|
||||
new(&sLowResourceHandlers) HandlerList;
|
||||
// static initializers do not work in the kernel,
|
||||
// so we have to do it here, manually
|
||||
|
||||
sLowResourceWaiterCondition.Init(NULL, "low resource waiters");
|
||||
|
||||
// compute the free memory limits
|
||||
off_t totalMemory = (off_t)vm_page_num_pages() * B_PAGE_SIZE;
|
||||
sNoteMemoryLimit = totalMemory / 16;
|
||||
if (sNoteMemoryLimit < kMinNoteMemoryLimit) {
|
||||
sNoteMemoryLimit = kMinNoteMemoryLimit;
|
||||
sWarnMemoryLimit = kMinWarnMemoryLimit;
|
||||
sCriticalMemoryLimit = kMinCriticalMemoryLimit;
|
||||
} else {
|
||||
sWarnMemoryLimit = totalMemory / 64;
|
||||
sCriticalMemoryLimit = totalMemory / 256;
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
low_resource_manager_init_post_thread(void)
|
||||
{
|
||||
sLowResourceWaitSem = create_sem(0, "low resource wait");
|
||||
if (sLowResourceWaitSem < B_OK)
|
||||
return sLowResourceWaitSem;
|
||||
|
||||
thread_id thread = spawn_kernel_thread(&low_resource_manager,
|
||||
"low resource manager", B_LOW_PRIORITY, NULL);
|
||||
send_signal_etc(thread, SIGCONT, B_DO_NOT_RESCHEDULE);
|
||||
|
||||
add_debugger_command("low_resource", &dump_handlers,
|
||||
"Dump list of low resource handlers");
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
unregister_low_resource_handler(low_resource_func function, void *data)
|
||||
{
|
||||
TRACE(("unregister_low_resource_handler(function = %p, data = %p)\n",
|
||||
function, data));
|
||||
|
||||
RecursiveLocker locker(&sLowResourceLock);
|
||||
HandlerList::Iterator iterator = sLowResourceHandlers.GetIterator();
|
||||
|
||||
while (iterator.HasNext()) {
|
||||
low_resource_handler *handler = iterator.Next();
|
||||
|
||||
if (handler->function == function && handler->data == data) {
|
||||
sLowResourceHandlers.Remove(handler);
|
||||
free(handler);
|
||||
return B_OK;
|
||||
}
|
||||
}
|
||||
|
||||
return B_ENTRY_NOT_FOUND;
|
||||
}
|
||||
|
||||
|
||||
/*! Registers a low resource handler. The higher the \a priority, the earlier
|
||||
the handler will be called in low resource situations.
|
||||
*/
|
||||
status_t
|
||||
register_low_resource_handler(low_resource_func function, void *data,
|
||||
uint32 resources, int32 priority)
|
||||
{
|
||||
TRACE(("register_low_resource_handler(function = %p, data = %p)\n",
|
||||
function, data));
|
||||
|
||||
low_resource_handler *newHandler = (low_resource_handler *)malloc(
|
||||
sizeof(low_resource_handler));
|
||||
if (newHandler == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
newHandler->function = function;
|
||||
newHandler->data = data;
|
||||
newHandler->resources = resources;
|
||||
newHandler->priority = priority;
|
||||
|
||||
RecursiveLocker locker(&sLowResourceLock);
|
||||
|
||||
// sort it in after priority (higher priority comes first)
|
||||
|
||||
HandlerList::ReverseIterator iterator
|
||||
= sLowResourceHandlers.GetReverseIterator();
|
||||
low_resource_handler *last = NULL;
|
||||
while (iterator.HasNext()) {
|
||||
low_resource_handler *handler = iterator.Next();
|
||||
|
||||
if (handler->priority >= priority) {
|
||||
sLowResourceHandlers.Insert(last, newHandler);
|
||||
return B_OK;
|
||||
}
|
||||
last = handler;
|
||||
}
|
||||
|
||||
sLowResourceHandlers.Add(newHandler, false);
|
||||
return B_OK;
|
||||
}
|
@ -32,6 +32,7 @@
|
||||
#include <kscheduler.h>
|
||||
#include <ksyscalls.h>
|
||||
#include <lock.h>
|
||||
#include <low_resource_manager.h>
|
||||
#include <messaging.h>
|
||||
#include <Notifications.h>
|
||||
#include <port.h>
|
||||
@ -128,6 +129,7 @@ _start(kernel_args *bootKernelArgs, int currentCPU)
|
||||
vm_init(&sKernelArgs);
|
||||
// Before vm_init_post_sem() is called, we have to make sure that
|
||||
// the boot loader allocated region is not used anymore
|
||||
low_resource_manager_init();
|
||||
|
||||
// now we can use the heap and create areas
|
||||
arch_platform_init_post_vm(&sKernelArgs);
|
||||
@ -174,6 +176,7 @@ _start(kernel_args *bootKernelArgs, int currentCPU)
|
||||
|
||||
TRACE("init VM threads\n");
|
||||
vm_init_post_thread(&sKernelArgs);
|
||||
low_resource_manager_init_post_thread();
|
||||
TRACE("init ELF loader\n");
|
||||
elf_init(&sKernelArgs);
|
||||
TRACE("init scheduler\n");
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include <util/AutoLock.h>
|
||||
#include <util/DoublyLinkedList.h>
|
||||
#include <vfs.h>
|
||||
#include <vm_low_memory.h>
|
||||
#include <vm_page.h>
|
||||
#include <boot/kernel_args.h>
|
||||
#include <syscall_restart.h>
|
||||
|
@ -6,6 +6,7 @@
|
||||
*/
|
||||
|
||||
|
||||
#include <Slab.h>
|
||||
#include "slab_private.h"
|
||||
|
||||
#include <algorithm>
|
||||
@ -17,14 +18,13 @@
|
||||
|
||||
#include <Depot.h>
|
||||
#include <kernel.h>
|
||||
#include <Slab.h>
|
||||
#include <low_resource_manager.h>
|
||||
#include <smp.h>
|
||||
#include <tracing.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <util/DoublyLinkedList.h>
|
||||
#include <util/OpenHashTable.h>
|
||||
#include <vm.h>
|
||||
#include <vm_low_memory.h>
|
||||
|
||||
|
||||
// TODO kMagazineCapacity should be dynamically tuned per cache.
|
||||
@ -477,9 +477,9 @@ early_free_pages(object_cache *cache, void *pages)
|
||||
|
||||
|
||||
static void
|
||||
object_cache_low_memory(void *_self, int32 level)
|
||||
object_cache_low_memory(void *_self, uint32 resources, int32 level)
|
||||
{
|
||||
if (level == B_NO_LOW_MEMORY)
|
||||
if (level == B_NO_LOW_RESOURCE)
|
||||
return;
|
||||
|
||||
object_cache *cache = (object_cache *)_self;
|
||||
@ -497,11 +497,11 @@ object_cache_low_memory(void *_self, int32 level)
|
||||
size_t minimumAllowed;
|
||||
|
||||
switch (level) {
|
||||
case B_LOW_MEMORY_NOTE:
|
||||
case B_LOW_RESOURCE_NOTE:
|
||||
minimumAllowed = cache->pressure / 2 + 1;
|
||||
break;
|
||||
|
||||
case B_LOW_MEMORY_WARNING:
|
||||
case B_LOW_RESOURCE_WARNING:
|
||||
cache->pressure /= 2;
|
||||
minimumAllowed = 0;
|
||||
break;
|
||||
@ -594,7 +594,8 @@ object_cache_init(object_cache *cache, const char *name, size_t objectSize,
|
||||
cache->free_pages = area_free_pages;
|
||||
}
|
||||
|
||||
register_low_memory_handler(object_cache_low_memory, cache, 5);
|
||||
register_low_resource_handler(object_cache_low_memory, cache,
|
||||
B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY, 5);
|
||||
|
||||
MutexLocker _(sObjectCacheListLock);
|
||||
sObjectCaches.Add(cache);
|
||||
@ -745,7 +746,7 @@ delete_object_cache(object_cache *cache)
|
||||
if (!(cache->flags & CACHE_NO_DEPOT))
|
||||
object_depot_destroy(&cache->depot);
|
||||
|
||||
unregister_low_memory_handler(object_cache_low_memory, cache);
|
||||
unregister_low_resource_handler(object_cache_low_memory, cache);
|
||||
|
||||
if (!cache->full.IsEmpty())
|
||||
panic("cache destroy: still has full slabs");
|
||||
|
@ -7,11 +7,11 @@ KernelMergeObject kernel_vm.o :
|
||||
vm_address_space.cpp
|
||||
vm_cache.cpp
|
||||
vm_daemons.cpp
|
||||
vm_low_memory.cpp
|
||||
vm_page.cpp
|
||||
vm_store_anonymous_noswap.cpp
|
||||
vm_store_device.c
|
||||
vm_store_null.c
|
||||
VMAnonymousCache.cpp
|
||||
VMAnonymousNoSwapCache.cpp
|
||||
VMDeviceCache.cpp
|
||||
VMNullCache.cpp
|
||||
#vm_tests.c
|
||||
|
||||
: $(TARGET_KERNEL_PIC_CCFLAGS)
|
||||
|
727
src/system/kernel/vm/VMAnonymousCache.cpp
Normal file
727
src/system/kernel/vm/VMAnonymousCache.cpp
Normal file
@ -0,0 +1,727 @@
|
||||
/*
|
||||
* Copyright 2008, Zhao Shuai, upczhsh@163.com.
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
#include "VMAnonymousCache.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <arch_config.h>
|
||||
#include <heap.h>
|
||||
#include <KernelExport.h>
|
||||
#include <slab/Slab.h>
|
||||
#include <vfs.h>
|
||||
#include <vm.h>
|
||||
#include <vm_priv.h>
|
||||
#include <util/DoublyLinkedList.h>
|
||||
#include <util/OpenHashTable.h>
|
||||
|
||||
//#define TRACE_STORE
|
||||
#ifdef TRACE_STORE
|
||||
# define TRACE(x) dprintf x
|
||||
#else
|
||||
# define TRACE(x) ;
|
||||
#endif
|
||||
|
||||
#define SWAP_BLOCK_PAGES 32
|
||||
#define SWAP_BLOCK_SHIFT 5 /* 1 << SWAP_BLOCK_SHIFT == SWAP_BLOCK_PAGES */
|
||||
#define SWAP_BLOCK_MASK (SWAP_BLOCK_PAGES - 1)
|
||||
|
||||
#define SWAP_PAGE_NONE (~(swap_addr_t)0)
|
||||
|
||||
// bitmap allocation macros
|
||||
#define MAP_SHIFT 5
|
||||
#define NUM_BITS_PER_WORD 32 // number of bits per word
|
||||
|
||||
#define TESTBIT(map, i) \
|
||||
(((map)[(i) >> MAP_SHIFT] & (1 << (i) % NUM_BITS_PER_WORD)))
|
||||
#define SETBIT(map, i) \
|
||||
(((map)[(i) >> MAP_SHIFT] |= (1 << (i) % NUM_BITS_PER_WORD)))
|
||||
#define CLEARBIT(map, i) \
|
||||
(((map)[(i) >> MAP_SHIFT] &= ~(1 << (i) % NUM_BITS_PER_WORD)))
|
||||
|
||||
// The stack functionality looks like a good candidate to put into its own
|
||||
// store. I have not done this because once we have a swap file backing up
|
||||
// the memory, it would probably not be a good idea to separate this
|
||||
// anymore.
|
||||
|
||||
struct swap_file : DoublyLinkedListLinkImpl<swap_file> {
|
||||
struct vnode *vnode;
|
||||
swap_addr_t first_page;
|
||||
swap_addr_t last_page;
|
||||
swap_addr_t used; // # of pages used
|
||||
uint32 *maps; // bitmap for the pages
|
||||
swap_addr_t hint; // next free page
|
||||
};
|
||||
|
||||
struct swap_hash_key {
|
||||
VMAnonymousCache *cache;
|
||||
off_t cache_offset;
|
||||
};
|
||||
|
||||
// Each swap block contains SWAP_BLOCK_PAGES pages
|
||||
struct swap_block : HashTableLink<swap_block> {
|
||||
swap_hash_key key;
|
||||
uint32 used;
|
||||
swap_addr_t swap_pages[SWAP_BLOCK_PAGES];
|
||||
};
|
||||
|
||||
static swap_addr_t swap_page_alloc(uint32 npages);
|
||||
static void swap_page_dealloc(swap_addr_t pageIndex, uint32 npages);
|
||||
static swap_file *find_swap_file(swap_addr_t pageIndex);
|
||||
static off_t swap_space_reserve(off_t amount);
|
||||
static void swap_space_unreserve(off_t amount);
|
||||
|
||||
static object_cache *sSwapBlockCache;
|
||||
|
||||
struct SwapHashTableDefinition {
|
||||
typedef swap_hash_key KeyType;
|
||||
typedef swap_block ValueType;
|
||||
|
||||
SwapHashTableDefinition() {}
|
||||
|
||||
size_t HashKey(const swap_hash_key& key) const
|
||||
{
|
||||
off_t cacheOffset = key.cache_offset > SWAP_BLOCK_SHIFT;
|
||||
VMAnonymousCache *cache = key.cache;
|
||||
return cacheOffset ^ (int)(int *)cache;
|
||||
}
|
||||
|
||||
size_t Hash(const swap_block *value) const
|
||||
{
|
||||
return HashKey(value->key);
|
||||
}
|
||||
|
||||
bool Compare(const swap_hash_key& key, const swap_block *value) const
|
||||
{
|
||||
return (key.cache_offset & ~(off_t)SWAP_BLOCK_MASK)
|
||||
== (value->key.cache_offset & ~(off_t)SWAP_BLOCK_MASK)
|
||||
&& key.cache == value->key.cache;
|
||||
}
|
||||
|
||||
HashTableLink<swap_block> *GetLink(swap_block *value) const
|
||||
{
|
||||
return value;
|
||||
}
|
||||
};
|
||||
|
||||
typedef OpenHashTable<SwapHashTableDefinition> SwapHashTable;
|
||||
typedef DoublyLinkedList<swap_file> SwapFileList;
|
||||
|
||||
static SwapHashTable sSwapHashTable;
|
||||
static mutex sSwapHashLock;
|
||||
|
||||
static SwapFileList sSwapFileList;
|
||||
static mutex sSwapFileListLock;
|
||||
static swap_file *sSwapFileAlloc = NULL; // allocate from here
|
||||
static uint32 sSwapFileCount = 0;
|
||||
|
||||
static off_t sAvailSwapSpace = 0;
|
||||
static mutex sAvailSwapSpaceLock;
|
||||
|
||||
|
||||
VMAnonymousCache::~VMAnonymousCache()
|
||||
{
|
||||
swap_space_unreserve(fCommittedSwapSize);
|
||||
vm_unreserve_memory(committed_size);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousCache::Init(bool canOvercommit, int32 numPrecommittedPages,
|
||||
int32 numGuardPages)
|
||||
{
|
||||
TRACE(("VMAnonymousCache::Init(canOvercommit = %s, numGuardPages = %ld) "
|
||||
"at %p\n", canOvercommit ? "yes" : "no", numGuardPages, store));
|
||||
|
||||
status_t error = VMCache::Init(CACHE_TYPE_RAM);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
fCanOvercommit = canOvercommit;
|
||||
fHasPrecommitted = false;
|
||||
fPrecommittedPages = min_c(numPrecommittedPages, 255);
|
||||
fGuardedSize = numGuardPages * B_PAGE_SIZE;
|
||||
fCommittedSwapSize = 0;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousCache::Commit(off_t size)
|
||||
{
|
||||
// if we can overcommit, we don't commit here, but in anonymous_fault()
|
||||
if (fCanOvercommit) {
|
||||
if (fHasPrecommitted)
|
||||
return B_OK;
|
||||
|
||||
// pre-commit some pages to make a later failure less probable
|
||||
fHasPrecommitted = true;
|
||||
uint32 precommitted = fPrecommittedPages * B_PAGE_SIZE;
|
||||
if (size > precommitted)
|
||||
size = precommitted;
|
||||
}
|
||||
|
||||
return _Commit(size);
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
VMAnonymousCache::HasPage(off_t offset)
|
||||
{
|
||||
offset >>= PAGE_SHIFT;
|
||||
swap_addr_t pageIndex = _SwapBlockGetAddress(offset);
|
||||
if (pageIndex != SWAP_PAGE_NONE)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousCache::Read(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter)
|
||||
{
|
||||
off_t cacheOffset = offset >> PAGE_SHIFT;
|
||||
|
||||
for (uint32 i = 0, j = 0; i < count; i = j) {
|
||||
swap_addr_t startPageIndex = _SwapBlockGetAddress(cacheOffset + i);
|
||||
for (j = i + 1; j < count; j++) {
|
||||
swap_addr_t pageIndex = _SwapBlockGetAddress(cacheOffset + j);
|
||||
if (pageIndex != startPageIndex + j - i)
|
||||
break;
|
||||
}
|
||||
|
||||
swap_file *swapFile = find_swap_file(startPageIndex);
|
||||
if (swapFile == NULL) {
|
||||
panic("can't find swap file for page index %ld\n", startPageIndex);
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
off_t pos = (startPageIndex - swapFile->first_page) * PAGE_SIZE;
|
||||
|
||||
status_t status = vfs_read_pages(swapFile->vnode, NULL, pos, vecs + i,
|
||||
j - i, _numBytes, fsReenter);
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousCache::Write(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter)
|
||||
{
|
||||
offset >>= PAGE_SHIFT;
|
||||
uint32 n = count;
|
||||
for (uint32 i = 0; i < count; i += n) {
|
||||
swap_addr_t pageIndex;
|
||||
// try to allocate n pages, if fail, try to allocate n/2
|
||||
while ((pageIndex = swap_page_alloc(n)) == SWAP_PAGE_NONE
|
||||
&& n >= 2)
|
||||
n >>= 1;
|
||||
if (pageIndex == SWAP_PAGE_NONE)
|
||||
panic("can't allocate swap space\n");
|
||||
|
||||
for (uint32 j = 0; j < n; j++)
|
||||
_SwapBlockBuild(offset + i + j, pageIndex + j);
|
||||
|
||||
swap_file *swapFile = find_swap_file(pageIndex);
|
||||
if (swapFile == NULL) {
|
||||
panic("can't find swap file for page index %ld\n", pageIndex);
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
off_t pos = (pageIndex - swapFile->first_page) * PAGE_SIZE;
|
||||
|
||||
status_t status = vfs_write_pages(swapFile->vnode, NULL, pos, vecs + i ,
|
||||
n, _numBytes, fsReenter);
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousCache::Fault(struct vm_address_space *aspace, off_t offset)
|
||||
{
|
||||
if (fCanOvercommit) {
|
||||
if (fGuardedSize > 0) {
|
||||
uint32 guardOffset;
|
||||
|
||||
#ifdef STACK_GROWS_DOWNWARDS
|
||||
guardOffset = 0;
|
||||
#elif defined(STACK_GROWS_UPWARDS)
|
||||
guardOffset = virtual_size - fGuardedSize;
|
||||
#else
|
||||
# error Stack direction has not been defined in arch_config.h
|
||||
#endif
|
||||
|
||||
// report stack fault, guard page hit!
|
||||
if (offset >= guardOffset && offset < guardOffset + fGuardedSize) {
|
||||
TRACE(("stack overflow!\n"));
|
||||
return B_BAD_ADDRESS;
|
||||
}
|
||||
}
|
||||
|
||||
if (fPrecommittedPages == 0) {
|
||||
// try to commit additional swap space/memory
|
||||
if (swap_space_reserve(PAGE_SIZE) == PAGE_SIZE)
|
||||
fCommittedSwapSize += PAGE_SIZE;
|
||||
else if (vm_try_reserve_memory(B_PAGE_SIZE, 0) != B_OK)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
committed_size += B_PAGE_SIZE;
|
||||
} else
|
||||
fPrecommittedPages--;
|
||||
}
|
||||
|
||||
// This will cause vm_soft_fault() to handle the fault
|
||||
return B_BAD_HANDLER;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
VMAnonymousCache::MergeStore(VMCache* _source)
|
||||
{
|
||||
VMAnonymousCache* source = dynamic_cast<VMAnonymousCache*>(_source);
|
||||
if (source == NULL) {
|
||||
panic("VMAnonymousCache::MergeStore(): merge with incompatible cache "
|
||||
"%p requested", _source);
|
||||
return;
|
||||
}
|
||||
|
||||
// take over the source' committed size
|
||||
fCommittedSwapSize += source->fCommittedSwapSize;
|
||||
source->fCommittedSwapSize = 0;
|
||||
committed_size += source->committed_size;
|
||||
source->committed_size = 0;
|
||||
|
||||
off_t actualSize = virtual_end - virtual_base;
|
||||
if (committed_size > actualSize)
|
||||
_Commit(actualSize);
|
||||
|
||||
// TODO: iterate over swap blocks
|
||||
for (off_t offset = source->virtual_base; offset < source->virtual_end;
|
||||
offset += PAGE_SIZE) {
|
||||
swap_addr_t sourceSwapIndex = source->_SwapBlockGetAddress(offset);
|
||||
|
||||
if (sourceSwapIndex == SWAP_PAGE_NONE)
|
||||
// this page is not swapped out
|
||||
continue;
|
||||
|
||||
if (LookupPage(offset))
|
||||
// this page is shadowed and we can find it in the new cache,
|
||||
// free the swap space
|
||||
swap_page_dealloc(sourceSwapIndex, 1);
|
||||
else {
|
||||
swap_addr_t swapIndex = _SwapBlockGetAddress(offset);
|
||||
|
||||
if (swapIndex == SWAP_PAGE_NONE) {
|
||||
// the page is not shadowed,
|
||||
// assign the swap address to the new cache
|
||||
_SwapBlockBuild(offset, sourceSwapIndex);
|
||||
} else {
|
||||
// the page is shadowed and is also swapped out
|
||||
swap_page_dealloc(sourceSwapIndex, 1);
|
||||
}
|
||||
}
|
||||
source->_SwapBlockFree(offset);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
VMAnonymousCache::_SwapBlockBuild(off_t cacheOffset, swap_addr_t pageIndex)
|
||||
{
|
||||
mutex_lock(&sSwapHashLock);
|
||||
|
||||
swap_hash_key key = { this, cacheOffset };
|
||||
|
||||
swap_block *swap = sSwapHashTable.Lookup(key);
|
||||
if (swap == NULL) {
|
||||
swap = (swap_block *)object_cache_alloc(sSwapBlockCache,
|
||||
CACHE_DONT_SLEEP);
|
||||
if (swap == NULL) {
|
||||
// TODO: wait until memory can be allocated
|
||||
mutex_unlock(&sSwapHashLock);
|
||||
return;
|
||||
}
|
||||
|
||||
swap->key.cache = this;
|
||||
swap->key.cache_offset = cacheOffset & ~(off_t)SWAP_BLOCK_MASK;
|
||||
swap->used = 0;
|
||||
for (uint32 i = 0; i < SWAP_BLOCK_PAGES; i++)
|
||||
swap->swap_pages[i] = SWAP_PAGE_NONE;
|
||||
|
||||
sSwapHashTable.Insert(swap);
|
||||
}
|
||||
|
||||
swap_addr_t blockIndex = cacheOffset & SWAP_BLOCK_MASK;
|
||||
swap->swap_pages[blockIndex] = pageIndex;
|
||||
|
||||
swap->used++;
|
||||
|
||||
mutex_unlock(&sSwapHashLock);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
VMAnonymousCache::_SwapBlockFree(off_t cacheOffset)
|
||||
{
|
||||
mutex_lock(&sSwapHashLock);
|
||||
|
||||
swap_hash_key key = { this, cacheOffset };
|
||||
|
||||
swap_block *swap = sSwapHashTable.Lookup(key);
|
||||
if (swap != NULL) {
|
||||
swap_addr_t pageIndex = swap->swap_pages[cacheOffset & SWAP_BLOCK_MASK];
|
||||
if (pageIndex != SWAP_PAGE_NONE) {
|
||||
swap->swap_pages[cacheOffset & SWAP_BLOCK_MASK] = SWAP_PAGE_NONE;
|
||||
swap->used--;
|
||||
if (swap->used == 0) {
|
||||
sSwapHashTable.Remove(swap);
|
||||
object_cache_free(sSwapBlockCache, swap);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&sSwapHashLock);
|
||||
}
|
||||
|
||||
|
||||
swap_addr_t
|
||||
VMAnonymousCache::_SwapBlockGetAddress(off_t cacheOffset)
|
||||
{
|
||||
mutex_lock(&sSwapHashLock);
|
||||
|
||||
swap_hash_key key = { this, cacheOffset };
|
||||
swap_block *swap = sSwapHashTable.Lookup(key);
|
||||
swap_addr_t pageIndex = SWAP_PAGE_NONE;
|
||||
|
||||
if (swap != NULL) {
|
||||
swap_addr_t blockIndex = cacheOffset & SWAP_BLOCK_MASK;
|
||||
pageIndex = swap->swap_pages[blockIndex];
|
||||
}
|
||||
|
||||
mutex_unlock(&sSwapHashLock);
|
||||
|
||||
return pageIndex;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousCache::_Commit(off_t size)
|
||||
{
|
||||
// Basic strategy: reserve swap space first, only when running out of swap
|
||||
// space, reserve real memory.
|
||||
|
||||
off_t committedMemory = committed_size - fCommittedSwapSize;
|
||||
|
||||
// Regardless of whether we're asked to grow or shrink the commitment,
|
||||
// we always try to reserve as much as possible of the final commitment
|
||||
// in the swap space.
|
||||
if (size > fCommittedSwapSize) {
|
||||
fCommittedSwapSize += swap_space_reserve(size - committed_size);
|
||||
committed_size = fCommittedSwapSize + committedMemory;
|
||||
}
|
||||
|
||||
if (committed_size == size)
|
||||
return B_OK;
|
||||
|
||||
if (committed_size > size) {
|
||||
// The commitment shrinks -- unreserve real memory first.
|
||||
off_t toUnreserve = committed_size - size;
|
||||
if (committedMemory > 0) {
|
||||
off_t unreserved = min_c(toUnreserve, committedMemory);
|
||||
vm_unreserve_memory(unreserved);
|
||||
committedMemory -= unreserved;
|
||||
committed_size -= unreserved;
|
||||
toUnreserve -= unreserved;
|
||||
}
|
||||
|
||||
// Unreserve swap space.
|
||||
if (toUnreserve > 0) {
|
||||
swap_space_unreserve(toUnreserve);
|
||||
fCommittedSwapSize -= toUnreserve;
|
||||
committed_size -= toUnreserve;
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
// The commitment grows -- we have already tried to reserve swap space at
|
||||
// the start of the method, so we try to reserve real memory, now.
|
||||
|
||||
off_t toReserve = size - committed_size;
|
||||
if (vm_try_reserve_memory(toReserve, 1000000) != B_OK)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
committed_size = size;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
static swap_file *
|
||||
find_swap_file(swap_addr_t pageIndex)
|
||||
{
|
||||
for (SwapFileList::Iterator it = sSwapFileList.GetIterator();
|
||||
swap_file *swapFile = it.Next();) {
|
||||
if (pageIndex >= swapFile->first_page
|
||||
&& pageIndex < swapFile->last_page)
|
||||
return swapFile;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
static swap_addr_t
|
||||
swap_page_alloc(uint32 npages)
|
||||
{
|
||||
swap_addr_t hint = 0;
|
||||
swap_addr_t j;
|
||||
|
||||
if (sSwapFileList.IsEmpty())
|
||||
return SWAP_PAGE_NONE;
|
||||
|
||||
mutex_lock(&sSwapFileListLock);
|
||||
for (j = 0; j < sSwapFileCount; j++) {
|
||||
if (sSwapFileAlloc == NULL)
|
||||
sSwapFileAlloc = sSwapFileList.First();
|
||||
|
||||
hint = sSwapFileAlloc->hint;
|
||||
swap_addr_t pageCount = sSwapFileAlloc->last_page
|
||||
- sSwapFileAlloc->first_page;
|
||||
|
||||
swap_addr_t i = 0;
|
||||
while (i < npages && (hint + npages) < pageCount) {
|
||||
if (TESTBIT(sSwapFileAlloc->maps, hint + i)) {
|
||||
hint++;
|
||||
i = 0;
|
||||
} else
|
||||
i++;
|
||||
}
|
||||
|
||||
if (i == npages)
|
||||
break;
|
||||
|
||||
// this swap_file is full, find another
|
||||
sSwapFileAlloc = sSwapFileList.GetNext(sSwapFileAlloc);
|
||||
}
|
||||
|
||||
if (j == sSwapFileCount) {
|
||||
panic("swap space exhausted\n");
|
||||
mutex_unlock(&sSwapFileListLock);
|
||||
return SWAP_PAGE_NONE;
|
||||
}
|
||||
|
||||
swap_addr_t pageIndex = sSwapFileAlloc->first_page + hint;
|
||||
|
||||
for (uint32 i = 0; i < npages; i++)
|
||||
SETBIT(sSwapFileAlloc->maps, hint + i);
|
||||
if (hint == sSwapFileAlloc->hint)
|
||||
sSwapFileAlloc->hint += npages;
|
||||
|
||||
sSwapFileAlloc->used += npages;
|
||||
|
||||
// if this swap file has used more than 90% percent of its pages
|
||||
// switch to another
|
||||
if (sSwapFileAlloc->used
|
||||
> 9 * (sSwapFileAlloc->last_page - sSwapFileAlloc->first_page) / 10)
|
||||
sSwapFileAlloc = sSwapFileList.GetNext(sSwapFileAlloc);
|
||||
|
||||
mutex_unlock(&sSwapFileListLock);
|
||||
return pageIndex;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
swap_page_dealloc(swap_addr_t pageIndex, uint32 npages)
|
||||
{
|
||||
mutex_lock(&sSwapFileListLock);
|
||||
swap_file *swapFile = find_swap_file(pageIndex);
|
||||
|
||||
pageIndex -= swapFile->first_page;
|
||||
|
||||
for (uint32 i = 0; i < npages; i++)
|
||||
CLEARBIT(swapFile->maps, pageIndex + i);
|
||||
|
||||
if (swapFile->hint > pageIndex)
|
||||
swapFile->hint = pageIndex;
|
||||
|
||||
swapFile->used -= npages;
|
||||
mutex_unlock(&sSwapFileListLock);
|
||||
}
|
||||
|
||||
|
||||
static off_t
|
||||
swap_space_reserve(off_t amount)
|
||||
{
|
||||
mutex_lock(&sAvailSwapSpaceLock);
|
||||
if (sAvailSwapSpace >= amount)
|
||||
sAvailSwapSpace -= amount;
|
||||
else {
|
||||
sAvailSwapSpace = 0;
|
||||
amount = sAvailSwapSpace;
|
||||
}
|
||||
mutex_unlock(&sAvailSwapSpaceLock);
|
||||
|
||||
return amount;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
swap_space_unreserve(off_t amount)
|
||||
{
|
||||
mutex_lock(&sAvailSwapSpaceLock);
|
||||
sAvailSwapSpace += amount;
|
||||
mutex_unlock(&sAvailSwapSpaceLock);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
swap_file_add(char *path)
|
||||
{
|
||||
vnode *node = NULL;
|
||||
status_t status = vfs_get_vnode_from_path(path, true, &node);
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
|
||||
swap_file *swap = (swap_file *)malloc(sizeof(swap_file));
|
||||
if (swap == NULL) {
|
||||
vfs_put_vnode(node);
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
|
||||
swap->vnode = node;
|
||||
|
||||
struct stat st;
|
||||
status = vfs_stat_vnode(node, &st);
|
||||
if (status != B_OK) {
|
||||
free(swap);
|
||||
vfs_put_vnode(node);
|
||||
return status;
|
||||
}
|
||||
|
||||
if (!(S_ISREG(st.st_mode) || S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) {
|
||||
free(swap);
|
||||
vfs_put_vnode(node);
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
int32 pageCount = st.st_size >> PAGE_SHIFT;
|
||||
swap->used = 0;
|
||||
|
||||
swap->maps = (uint32 *)
|
||||
malloc((pageCount + NUM_BITS_PER_WORD - 1) / NUM_BITS_PER_WORD);
|
||||
if (swap->maps == NULL) {
|
||||
free(swap);
|
||||
vfs_put_vnode(node);
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
memset(swap->maps, 0, pageCount % 8 + 1);
|
||||
swap->hint = 0;
|
||||
|
||||
// set start page index and add this file to swap file list
|
||||
mutex_lock(&sSwapFileListLock);
|
||||
if (sSwapFileList.IsEmpty()) {
|
||||
swap->first_page = 0;
|
||||
swap->last_page = pageCount;
|
||||
}
|
||||
else {
|
||||
// leave one page gap between two swap files
|
||||
swap->first_page = sSwapFileList.Last()->last_page + 1;
|
||||
swap->last_page = swap->first_page + pageCount;
|
||||
}
|
||||
sSwapFileList.Add(swap);
|
||||
sSwapFileCount++;
|
||||
mutex_unlock(&sSwapFileListLock);
|
||||
|
||||
mutex_lock(&sAvailSwapSpaceLock);
|
||||
sAvailSwapSpace += pageCount * PAGE_SIZE;
|
||||
mutex_unlock(&sAvailSwapSpaceLock);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
swap_file_delete(char *path)
|
||||
{
|
||||
vnode *node = NULL;
|
||||
status_t status = vfs_get_vnode_from_path(path, true, &node);
|
||||
if (status != B_OK) {
|
||||
vfs_put_vnode(node);
|
||||
return status;
|
||||
}
|
||||
|
||||
mutex_lock(&sSwapFileListLock);
|
||||
|
||||
swap_file *swapFile = NULL;
|
||||
for (SwapFileList::Iterator it = sSwapFileList.GetIterator();
|
||||
(swapFile = it.Next()) != NULL;) {
|
||||
if (swapFile->vnode == node)
|
||||
break;
|
||||
}
|
||||
|
||||
vfs_put_vnode(node);
|
||||
|
||||
if (swapFile == NULL)
|
||||
return B_ERROR;
|
||||
|
||||
// if this file is currently used, we can't delete
|
||||
// TODO: mark this swap file deleting, and remove it after releasing
|
||||
// all the swap space
|
||||
if (swapFile->used > 0)
|
||||
return B_ERROR;
|
||||
|
||||
sSwapFileList.Remove(swapFile);
|
||||
sSwapFileCount--;
|
||||
mutex_unlock(&sSwapFileListLock);
|
||||
|
||||
mutex_lock(&sAvailSwapSpaceLock);
|
||||
sAvailSwapSpace -= (swapFile->last_page - swapFile->first_page) * PAGE_SIZE;
|
||||
mutex_unlock(&sAvailSwapSpaceLock);
|
||||
|
||||
vfs_put_vnode(swapFile->vnode);
|
||||
free(swapFile->maps);
|
||||
free(swapFile);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
swap_init(void)
|
||||
{
|
||||
// create swap block cache
|
||||
sSwapBlockCache = create_object_cache("swapblock",
|
||||
sizeof(swap_block), sizeof(void*), NULL, NULL, NULL);
|
||||
if (sSwapBlockCache == NULL)
|
||||
panic("can't create object cache for swap blocks\n");
|
||||
|
||||
// init swap hash table
|
||||
sSwapHashTable.Init();
|
||||
mutex_init(&sSwapHashLock, "swaphash");
|
||||
|
||||
// init swap file list
|
||||
mutex_init(&sSwapFileListLock, "swaplist");
|
||||
sSwapFileAlloc = NULL;
|
||||
sSwapFileCount = 0;
|
||||
|
||||
// init available swap space
|
||||
mutex_init(&sAvailSwapSpaceLock, "avail swap space");
|
||||
sAvailSwapSpace = 0;
|
||||
}
|
||||
|
52
src/system/kernel/vm/VMAnonymousCache.h
Normal file
52
src/system/kernel/vm/VMAnonymousCache.h
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
#ifndef _KERNEL_VM_STORE_ANONYMOUS_H
|
||||
#define _KERNEL_VM_STORE_ANONYMOUS_H
|
||||
|
||||
#include <vm_types.h>
|
||||
|
||||
|
||||
typedef page_num_t swap_addr_t;
|
||||
struct swap_block;
|
||||
|
||||
class VMAnonymousCache : public VMCache {
|
||||
public:
|
||||
virtual ~VMAnonymousCache();
|
||||
|
||||
status_t Init(bool canOvercommit, int32 numPrecommittedPages,
|
||||
int32 numGuardPages);
|
||||
|
||||
virtual status_t Commit(off_t size);
|
||||
virtual bool HasPage(off_t offset);
|
||||
|
||||
virtual status_t Read(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter);
|
||||
virtual status_t Write(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter);
|
||||
|
||||
virtual status_t Fault(struct vm_address_space *aspace, off_t offset);
|
||||
|
||||
virtual void MergeStore(VMCache* source);
|
||||
|
||||
private:
|
||||
void _SwapBlockBuild(off_t cacheOffset, swap_addr_t pageIndex);
|
||||
void _SwapBlockFree(off_t cacheOffset);
|
||||
swap_addr_t _SwapBlockGetAddress(off_t cacheOffset);
|
||||
status_t _Commit(off_t size);
|
||||
|
||||
private:
|
||||
bool fCanOvercommit;
|
||||
bool fHasPrecommitted;
|
||||
uint8 fPrecommittedPages;
|
||||
int32 fGuardedSize;
|
||||
off_t fCommittedSwapSize;
|
||||
};
|
||||
|
||||
|
||||
#endif /* _KERNEL_VM_STORE_ANONYMOUS_H */
|
172
src/system/kernel/vm/VMAnonymousNoSwapCache.cpp
Normal file
172
src/system/kernel/vm/VMAnonymousNoSwapCache.cpp
Normal file
@ -0,0 +1,172 @@
|
||||
/*
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
#include "VMAnonymousNoSwapCache.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <arch_config.h>
|
||||
#include <heap.h>
|
||||
#include <KernelExport.h>
|
||||
#include <vm_priv.h>
|
||||
|
||||
|
||||
//#define TRACE_STORE
|
||||
#ifdef TRACE_STORE
|
||||
# define TRACE(x) dprintf x
|
||||
#else
|
||||
# define TRACE(x) ;
|
||||
#endif
|
||||
|
||||
// The stack functionality looks like a good candidate to put into its own
|
||||
// store. I have not done this because once we have a swap file backing up
|
||||
// the memory, it would probably not be a good idea to separate this
|
||||
// anymore.
|
||||
|
||||
|
||||
VMAnonymousNoSwapCache::~VMAnonymousNoSwapCache()
|
||||
{
|
||||
vm_unreserve_memory(committed_size);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousNoSwapCache::Init(bool canOvercommit, int32 numPrecommittedPages,
|
||||
int32 numGuardPages)
|
||||
{
|
||||
TRACE(("VMAnonymousNoSwapCache::Init(canOvercommit = %s, numGuardPages = %ld) "
|
||||
"at %p\n", canOvercommit ? "yes" : "no", numGuardPages, store));
|
||||
|
||||
status_t error = VMCache::Init(CACHE_TYPE_RAM);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
fCanOvercommit = canOvercommit;
|
||||
fHasPrecommitted = false;
|
||||
fPrecommittedPages = min_c(numPrecommittedPages, 255);
|
||||
fGuardedSize = numGuardPages * B_PAGE_SIZE;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousNoSwapCache::Commit(off_t size)
|
||||
{
|
||||
// if we can overcommit, we don't commit here, but in anonymous_fault()
|
||||
if (fCanOvercommit) {
|
||||
if (fHasPrecommitted)
|
||||
return B_OK;
|
||||
|
||||
// pre-commit some pages to make a later failure less probable
|
||||
fHasPrecommitted = true;
|
||||
uint32 precommitted = fPrecommittedPages * B_PAGE_SIZE;
|
||||
if (size > precommitted)
|
||||
size = precommitted;
|
||||
}
|
||||
|
||||
// Check to see how much we could commit - we need real memory
|
||||
|
||||
if (size > committed_size) {
|
||||
// try to commit
|
||||
if (vm_try_reserve_memory(size - committed_size, 1000000) != B_OK)
|
||||
return B_NO_MEMORY;
|
||||
} else {
|
||||
// we can release some
|
||||
vm_unreserve_memory(committed_size - size);
|
||||
}
|
||||
|
||||
committed_size = size;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
VMAnonymousNoSwapCache::HasPage(off_t offset)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousNoSwapCache::Read(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter)
|
||||
{
|
||||
panic("anonymous_store: read called. Invalid!\n");
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousNoSwapCache::Write(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter)
|
||||
{
|
||||
// no place to write, this will cause the page daemon to skip this store
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousNoSwapCache::Fault(struct vm_address_space *aspace, off_t offset)
|
||||
{
|
||||
if (fCanOvercommit) {
|
||||
if (fGuardedSize > 0) {
|
||||
uint32 guardOffset;
|
||||
|
||||
#ifdef STACK_GROWS_DOWNWARDS
|
||||
guardOffset = 0;
|
||||
#elif defined(STACK_GROWS_UPWARDS)
|
||||
guardOffset = virtual_size - fGuardedSize;
|
||||
#else
|
||||
# error Stack direction has not been defined in arch_config.h
|
||||
#endif
|
||||
|
||||
// report stack fault, guard page hit!
|
||||
if (offset >= guardOffset && offset < guardOffset + fGuardedSize) {
|
||||
TRACE(("stack overflow!\n"));
|
||||
return B_BAD_ADDRESS;
|
||||
}
|
||||
}
|
||||
|
||||
if (fPrecommittedPages == 0) {
|
||||
// try to commit additional memory
|
||||
if (vm_try_reserve_memory(B_PAGE_SIZE, 0) != B_OK)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
committed_size += B_PAGE_SIZE;
|
||||
} else
|
||||
fPrecommittedPages--;
|
||||
}
|
||||
|
||||
// This will cause vm_soft_fault() to handle the fault
|
||||
return B_BAD_HANDLER;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
VMAnonymousNoSwapCache::MergeStore(VMCache* _source)
|
||||
{
|
||||
VMAnonymousNoSwapCache* source
|
||||
= dynamic_cast<VMAnonymousNoSwapCache*>(_source);
|
||||
if (source == NULL) {
|
||||
panic("VMAnonymousNoSwapCache::MergeStore(): merge with incompatible "
|
||||
"cache %p requested", _source);
|
||||
return;
|
||||
}
|
||||
|
||||
// take over the source' committed size
|
||||
committed_size += source->committed_size;
|
||||
source->committed_size = 0;
|
||||
|
||||
off_t actualSize = virtual_end - virtual_base;
|
||||
if (committed_size > actualSize) {
|
||||
vm_unreserve_memory(committed_size - actualSize);
|
||||
committed_size = actualSize;
|
||||
}
|
||||
}
|
42
src/system/kernel/vm/VMAnonymousNoSwapCache.h
Normal file
42
src/system/kernel/vm/VMAnonymousNoSwapCache.h
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
#ifndef _KERNEL_VM_STORE_ANONYMOUS_H
|
||||
#define _KERNEL_VM_STORE_ANONYMOUS_H
|
||||
|
||||
#include <vm_types.h>
|
||||
|
||||
|
||||
class VMAnonymousNoSwapCache : public VMCache {
|
||||
public:
|
||||
virtual ~VMAnonymousNoSwapCache();
|
||||
|
||||
status_t Init(bool canOvercommit, int32 numPrecommittedPages,
|
||||
int32 numGuardPages);
|
||||
|
||||
virtual status_t Commit(off_t size);
|
||||
virtual bool HasPage(off_t offset);
|
||||
|
||||
virtual status_t Read(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter);
|
||||
virtual status_t Write(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter);
|
||||
|
||||
virtual status_t Fault(struct vm_address_space *aspace, off_t offset);
|
||||
|
||||
virtual void MergeStore(VMCache* source);
|
||||
|
||||
private:
|
||||
bool fCanOvercommit;
|
||||
bool fHasPrecommitted;
|
||||
uint8 fPrecommittedPages;
|
||||
int32 fGuardedSize;
|
||||
};
|
||||
|
||||
|
||||
#endif /* _KERNEL_VM_STORE_ANONYMOUS_H */
|
43
src/system/kernel/vm/VMDeviceCache.cpp
Normal file
43
src/system/kernel/vm/VMDeviceCache.cpp
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
#include "VMDeviceCache.h"
|
||||
|
||||
|
||||
status_t
|
||||
VMDeviceCache::Init(addr_t baseAddress)
|
||||
{
|
||||
fBaseAddress = baseAddress;
|
||||
return VMCache::Init(CACHE_TYPE_DEVICE);
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
VMDeviceCache::HasPage(off_t offset)
|
||||
{
|
||||
// this should never be called
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMDeviceCache::Read(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter)
|
||||
{
|
||||
panic("device_store: read called. Invalid!\n");
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
VMDeviceCache::Write(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter)
|
||||
{
|
||||
// no place to write, this will cause the page daemon to skip this store
|
||||
return B_OK;
|
||||
}
|
31
src/system/kernel/vm/VMDeviceCache.h
Normal file
31
src/system/kernel/vm/VMDeviceCache.h
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2005-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
#ifndef _KERNEL_VM_STORE_DEVICE_H
|
||||
#define _KERNEL_VM_STORE_DEVICE_H
|
||||
|
||||
#include <vm_types.h>
|
||||
|
||||
|
||||
class VMDeviceCache : public VMCache {
|
||||
public:
|
||||
status_t Init(addr_t baseAddress);
|
||||
|
||||
virtual bool HasPage(off_t offset);
|
||||
|
||||
virtual status_t Read(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter);
|
||||
virtual status_t Write(off_t offset, const iovec *vecs, size_t count,
|
||||
size_t *_numBytes, bool fsReenter);
|
||||
|
||||
private:
|
||||
addr_t fBaseAddress;
|
||||
};
|
||||
|
||||
|
||||
#endif /* _KERNEL_VM_STORE_DEVICE_H */
|
13
src/system/kernel/vm/VMNullCache.cpp
Normal file
13
src/system/kernel/vm/VMNullCache.cpp
Normal file
@ -0,0 +1,13 @@
|
||||
/*
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
#include "VMNullCache.h"
|
||||
|
||||
|
||||
status_t
|
||||
VMNullCache::Init()
|
||||
{
|
||||
return VMCache::Init(CACHE_TYPE_NULL);
|
||||
}
|
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2005-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
@ -8,13 +9,13 @@
|
||||
#ifndef _KERNEL_VM_STORE_NULL_H
|
||||
#define _KERNEL_VM_STORE_NULL_H
|
||||
|
||||
|
||||
#include <vm_types.h>
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
#endif
|
||||
struct vm_store *vm_store_create_null(void);
|
||||
class VMNullCache : public VMCache {
|
||||
public:
|
||||
status_t Init();
|
||||
};
|
||||
|
||||
|
||||
#endif /* _KERNEL_VM_STORE_NULL_H */
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -13,7 +13,6 @@
|
||||
#include <vm.h>
|
||||
#include <vm_priv.h>
|
||||
#include <vm_cache.h>
|
||||
#include <vm_low_memory.h>
|
||||
#include <vm_page.h>
|
||||
|
||||
|
||||
@ -61,21 +60,12 @@ PageCacheLocker::Lock(vm_page* page, bool dontWait)
|
||||
return false;
|
||||
|
||||
// Grab a reference to this cache.
|
||||
vm_cache* cache = vm_cache_acquire_page_cache_ref(page);
|
||||
vm_cache* cache = vm_cache_acquire_locked_page_cache(page, dontWait);
|
||||
if (cache == NULL)
|
||||
return false;
|
||||
|
||||
if (dontWait) {
|
||||
if (mutex_trylock(&cache->lock) != B_OK) {
|
||||
vm_cache_release_ref(cache);
|
||||
return false;
|
||||
}
|
||||
} else
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
if (cache != page->cache || _IgnorePage(page)) {
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
if (_IgnorePage(page)) {
|
||||
cache->ReleaseRefAndUnlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -90,9 +80,7 @@ PageCacheLocker::Unlock()
|
||||
if (fPage == NULL)
|
||||
return;
|
||||
|
||||
vm_cache* cache = fPage->cache;
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
fPage->cache->ReleaseRefAndUnlock();
|
||||
|
||||
fPage = NULL;
|
||||
}
|
||||
|
@ -1,262 +0,0 @@
|
||||
/*
|
||||
* Copyright 2005-2008, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include <vm_low_memory.h>
|
||||
|
||||
#include <new>
|
||||
#include <signal.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <KernelExport.h>
|
||||
|
||||
#include <elf.h>
|
||||
#include <lock.h>
|
||||
#include <sem.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <util/DoublyLinkedList.h>
|
||||
#include <vm_page.h>
|
||||
#include <vm_priv.h>
|
||||
|
||||
|
||||
//#define TRACE_LOW_MEMORY
|
||||
#ifdef TRACE_LOW_MEMORY
|
||||
# define TRACE(x) dprintf x
|
||||
#else
|
||||
# define TRACE(x) ;
|
||||
#endif
|
||||
|
||||
|
||||
struct low_memory_handler : public DoublyLinkedListLinkImpl<low_memory_handler> {
|
||||
low_memory_func function;
|
||||
void *data;
|
||||
int32 priority;
|
||||
};
|
||||
|
||||
typedef DoublyLinkedList<low_memory_handler> HandlerList;
|
||||
|
||||
|
||||
static const bigtime_t kLowMemoryInterval = 3000000; // 3 secs
|
||||
static const bigtime_t kWarnMemoryInterval = 500000; // 0.5 secs
|
||||
|
||||
// page limits
|
||||
static const size_t kNoteLimit = 2048;
|
||||
static const size_t kWarnLimit = 256;
|
||||
static const size_t kCriticalLimit = 32;
|
||||
|
||||
|
||||
static int32 sLowMemoryState = B_NO_LOW_MEMORY;
|
||||
static bigtime_t sLastMeasurement;
|
||||
|
||||
static mutex sLowMemoryMutex = MUTEX_INITIALIZER("low memory");
|
||||
static sem_id sLowMemoryWaitSem;
|
||||
static HandlerList sLowMemoryHandlers;
|
||||
|
||||
|
||||
static void
|
||||
call_handlers(int32 level)
|
||||
{
|
||||
MutexLocker locker(&sLowMemoryMutex);
|
||||
HandlerList::Iterator iterator = sLowMemoryHandlers.GetIterator();
|
||||
|
||||
while (iterator.HasNext()) {
|
||||
low_memory_handler *handler = iterator.Next();
|
||||
|
||||
handler->function(handler->data, level);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static int32
|
||||
compute_state(void)
|
||||
{
|
||||
sLastMeasurement = system_time();
|
||||
|
||||
uint32 freePages = vm_page_num_free_pages();
|
||||
|
||||
if (freePages > kNoteLimit) {
|
||||
// TODO: work-around for a missing general low resource handler
|
||||
if (sem_used_sems() * 6 > sem_max_sems() * 5)
|
||||
return B_LOW_MEMORY_WARNING;
|
||||
if (sem_used_sems() * 3 > sem_max_sems() * 2)
|
||||
return B_LOW_MEMORY_NOTE;
|
||||
}
|
||||
|
||||
if (freePages >= kNoteLimit)
|
||||
return B_NO_LOW_MEMORY;
|
||||
|
||||
// specify low memory level
|
||||
if (freePages < kCriticalLimit)
|
||||
return B_LOW_MEMORY_CRITICAL;
|
||||
if (freePages < kWarnLimit)
|
||||
return B_LOW_MEMORY_WARNING;
|
||||
|
||||
return B_LOW_MEMORY_NOTE;
|
||||
}
|
||||
|
||||
|
||||
static int32
|
||||
low_memory(void *)
|
||||
{
|
||||
bigtime_t timeout = kLowMemoryInterval;
|
||||
while (true) {
|
||||
if (sLowMemoryState != B_LOW_MEMORY_CRITICAL) {
|
||||
acquire_sem_etc(sLowMemoryWaitSem, 1, B_RELATIVE_TIMEOUT,
|
||||
timeout);
|
||||
}
|
||||
|
||||
sLowMemoryState = compute_state();
|
||||
|
||||
TRACE(("vm_low_memory: state = %ld, %ld free pages\n",
|
||||
sLowMemoryState, vm_page_num_free_pages()));
|
||||
|
||||
if (sLowMemoryState < B_LOW_MEMORY_NOTE)
|
||||
continue;
|
||||
|
||||
call_handlers(sLowMemoryState);
|
||||
|
||||
if (sLowMemoryState == B_LOW_MEMORY_WARNING)
|
||||
timeout = kWarnMemoryInterval;
|
||||
else
|
||||
timeout = kLowMemoryInterval;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
dump_handlers(int argc, char **argv)
|
||||
{
|
||||
HandlerList::Iterator iterator = sLowMemoryHandlers.GetIterator();
|
||||
kprintf("function data prio function-name\n");
|
||||
|
||||
while (iterator.HasNext()) {
|
||||
low_memory_handler *handler = iterator.Next();
|
||||
|
||||
const char* symbol = NULL;
|
||||
elf_debug_lookup_symbol_address((addr_t)handler->function, NULL,
|
||||
&symbol, NULL, NULL);
|
||||
|
||||
kprintf("%p %p %3ld %s\n", handler->function, handler->data,
|
||||
handler->priority, symbol);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - private kernel API
|
||||
|
||||
|
||||
void
|
||||
vm_low_memory(size_t requirements)
|
||||
{
|
||||
// TODO: take requirements into account
|
||||
|
||||
vm_schedule_page_scanner(requirements);
|
||||
release_sem(sLowMemoryWaitSem);
|
||||
}
|
||||
|
||||
|
||||
int32
|
||||
vm_low_memory_state(void)
|
||||
{
|
||||
if (system_time() - sLastMeasurement > 500000)
|
||||
sLowMemoryState = compute_state();
|
||||
|
||||
return sLowMemoryState;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
vm_low_memory_init(void)
|
||||
{
|
||||
new(&sLowMemoryHandlers) HandlerList;
|
||||
// static initializers do not work in the kernel,
|
||||
// so we have to do it here, manually
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
vm_low_memory_init_post_thread(void)
|
||||
{
|
||||
sLowMemoryWaitSem = create_sem(0, "low memory wait");
|
||||
if (sLowMemoryWaitSem < B_OK)
|
||||
return sLowMemoryWaitSem;
|
||||
|
||||
thread_id thread = spawn_kernel_thread(&low_memory, "low memory handler",
|
||||
B_LOW_PRIORITY, NULL);
|
||||
send_signal_etc(thread, SIGCONT, B_DO_NOT_RESCHEDULE);
|
||||
|
||||
add_debugger_command("low_memory", &dump_handlers, "Dump list of low memory handlers");
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
unregister_low_memory_handler(low_memory_func function, void *data)
|
||||
{
|
||||
TRACE(("unregister_low_memory_handler(function = %p, data = %p)\n",
|
||||
function, data));
|
||||
|
||||
MutexLocker locker(&sLowMemoryMutex);
|
||||
HandlerList::Iterator iterator = sLowMemoryHandlers.GetIterator();
|
||||
|
||||
while (iterator.HasNext()) {
|
||||
low_memory_handler *handler = iterator.Next();
|
||||
|
||||
if (handler->function == function && handler->data == data) {
|
||||
sLowMemoryHandlers.Remove(handler);
|
||||
free(handler);
|
||||
return B_OK;
|
||||
}
|
||||
}
|
||||
|
||||
return B_ENTRY_NOT_FOUND;
|
||||
}
|
||||
|
||||
|
||||
/*! Registers a low memory handler. The higher the \a priority, the earlier
|
||||
the handler will be called in low memory situations.
|
||||
*/
|
||||
status_t
|
||||
register_low_memory_handler(low_memory_func function, void *data,
|
||||
int32 priority)
|
||||
{
|
||||
TRACE(("register_low_memory_handler(function = %p, data = %p)\n",
|
||||
function, data));
|
||||
|
||||
low_memory_handler *newHandler = (low_memory_handler *)malloc(
|
||||
sizeof(low_memory_handler));
|
||||
if (newHandler == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
newHandler->function = function;
|
||||
newHandler->data = data;
|
||||
newHandler->priority = priority;
|
||||
|
||||
MutexLocker locker(&sLowMemoryMutex);
|
||||
|
||||
// sort it in after priority (higher priority comes first)
|
||||
|
||||
HandlerList::ReverseIterator iterator
|
||||
= sLowMemoryHandlers.GetReverseIterator();
|
||||
low_memory_handler *last = NULL;
|
||||
while (iterator.HasNext()) {
|
||||
low_memory_handler *handler = iterator.Next();
|
||||
|
||||
if (handler->priority >= priority) {
|
||||
sLowMemoryHandlers.Insert(last, newHandler);
|
||||
return B_OK;
|
||||
}
|
||||
last = handler;
|
||||
}
|
||||
|
||||
sLowMemoryHandlers.Add(newHandler, false);
|
||||
return B_OK;
|
||||
}
|
||||
|
@ -18,12 +18,12 @@
|
||||
#include <boot/kernel_args.h>
|
||||
#include <condition_variable.h>
|
||||
#include <kernel.h>
|
||||
#include <low_resource_manager.h>
|
||||
#include <thread.h>
|
||||
#include <tracing.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <vm.h>
|
||||
#include <vm_address_space.h>
|
||||
#include <vm_low_memory.h>
|
||||
#include <vm_priv.h>
|
||||
#include <vm_page.h>
|
||||
#include <vm_cache.h>
|
||||
@ -846,7 +846,6 @@ page_scrubber(void *unused)
|
||||
static status_t
|
||||
write_page(vm_page *page, bool fsReenter)
|
||||
{
|
||||
vm_store *store = page->cache->store;
|
||||
size_t length = B_PAGE_SIZE;
|
||||
status_t status;
|
||||
iovec vecs[1];
|
||||
@ -859,7 +858,7 @@ write_page(vm_page *page, bool fsReenter)
|
||||
panic("could not map page!");
|
||||
vecs->iov_len = B_PAGE_SIZE;
|
||||
|
||||
status = store->ops->write(store, (off_t)page->cache_offset << PAGE_SHIFT,
|
||||
status = page->cache->Write((off_t)page->cache_offset << PAGE_SHIFT,
|
||||
vecs, 1, &length, fsReenter);
|
||||
|
||||
vm_put_physical_page((addr_t)vecs[0].iov_base);
|
||||
@ -991,18 +990,18 @@ page_writer(void* /*unused*/)
|
||||
|
||||
vm_cache *cache = page->cache;
|
||||
// TODO: write back temporary ones as soon as we have swap file support
|
||||
if (cache->temporary/* && vm_low_memory_state() == B_NO_LOW_MEMORY*/)
|
||||
if (cache->temporary
|
||||
/*&& low_resource_state(B_KERNEL_RESOURCE_PAGES)
|
||||
== B_NO_LOW_RESOURCE*/) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (cache->store->ops->acquire_unreferenced_ref != NULL) {
|
||||
// we need our own reference to the store, as it might
|
||||
// currently be destructed
|
||||
if (cache->store->ops->acquire_unreferenced_ref(cache->store)
|
||||
!= B_OK) {
|
||||
cacheLocker.Unlock();
|
||||
thread_yield(true);
|
||||
continue;
|
||||
}
|
||||
// we need our own reference to the store, as it might
|
||||
// currently be destructed
|
||||
if (cache->AcquireUnreferencedStoreRef() != B_OK) {
|
||||
cacheLocker.Unlock();
|
||||
thread_yield(true);
|
||||
continue;
|
||||
}
|
||||
|
||||
InterruptsSpinLocker locker(sPageLock);
|
||||
@ -1010,8 +1009,7 @@ page_writer(void* /*unused*/)
|
||||
// state might have change while we were locking the cache
|
||||
if (page->state != PAGE_STATE_MODIFIED) {
|
||||
// release the cache reference first
|
||||
if (cache->store->ops->release_ref != NULL)
|
||||
cache->store->ops->release_ref(cache->store);
|
||||
cache->ReleaseStoreRef();
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1025,7 +1023,7 @@ page_writer(void* /*unused*/)
|
||||
|
||||
//dprintf("write page %p, cache %p (%ld)\n", page, page->cache, page->cache->ref_count);
|
||||
vm_clear_map_flags(page, PAGE_MODIFIED);
|
||||
vm_cache_acquire_ref(cache);
|
||||
cache->AcquireRefLocked();
|
||||
u.pages[numPages++] = page;
|
||||
}
|
||||
|
||||
@ -1044,7 +1042,7 @@ page_writer(void* /*unused*/)
|
||||
|
||||
for (uint32 i = 0; i < numPages; i++) {
|
||||
vm_cache *cache = u.pages[i]->cache;
|
||||
mutex_lock(&cache->lock);
|
||||
cache->Lock();
|
||||
|
||||
if (writeStatus[i] == B_OK) {
|
||||
// put it into the active queue
|
||||
@ -1062,7 +1060,7 @@ page_writer(void* /*unused*/)
|
||||
if (!u.pages[i]->busy_writing) {
|
||||
// someone has cleared the busy_writing flag which tells
|
||||
// us our page has gone invalid
|
||||
vm_cache_remove_page(cache, u.pages[i]);
|
||||
cache->RemovePage(u.pages[i]);
|
||||
} else
|
||||
u.pages[i]->busy_writing = false;
|
||||
}
|
||||
@ -1070,7 +1068,7 @@ page_writer(void* /*unused*/)
|
||||
busyConditions[i].Unpublish();
|
||||
|
||||
u.caches[i] = cache;
|
||||
mutex_unlock(&cache->lock);
|
||||
cache->Unlock();
|
||||
}
|
||||
|
||||
for (uint32 i = 0; i < numPages; i++) {
|
||||
@ -1078,9 +1076,8 @@ page_writer(void* /*unused*/)
|
||||
|
||||
// We release the cache references after all pages were made
|
||||
// unbusy again - otherwise releasing a vnode could deadlock.
|
||||
if (cache->store->ops->release_ref != NULL)
|
||||
cache->store->ops->release_ref(cache->store);
|
||||
vm_cache_release_ref(cache);
|
||||
cache->ReleaseStoreRef();
|
||||
cache->ReleaseRef();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1147,45 +1144,11 @@ static bool
|
||||
steal_page(vm_page *page, bool stealActive)
|
||||
{
|
||||
// try to lock the page's cache
|
||||
|
||||
class PageCacheTryLocker {
|
||||
public:
|
||||
PageCacheTryLocker(vm_page *page)
|
||||
:
|
||||
fIsLocked(false),
|
||||
fOwnsLock(false)
|
||||
{
|
||||
fCache = vm_cache_acquire_page_cache_ref(page);
|
||||
if (fCache != NULL) {
|
||||
if (mutex_trylock(&fCache->lock) != B_OK)
|
||||
return;
|
||||
|
||||
fOwnsLock = true;
|
||||
|
||||
if (fCache == page->cache)
|
||||
fIsLocked = true;
|
||||
}
|
||||
}
|
||||
|
||||
~PageCacheTryLocker()
|
||||
{
|
||||
if (fOwnsLock)
|
||||
mutex_unlock(&fCache->lock);
|
||||
if (fCache != NULL)
|
||||
vm_cache_release_ref(fCache);
|
||||
}
|
||||
|
||||
bool IsLocked() { return fIsLocked; }
|
||||
|
||||
private:
|
||||
vm_cache *fCache;
|
||||
bool fIsLocked;
|
||||
bool fOwnsLock;
|
||||
} cacheLocker(page);
|
||||
|
||||
if (!cacheLocker.IsLocked())
|
||||
if (vm_cache_acquire_locked_page_cache(page, false) == NULL)
|
||||
return false;
|
||||
|
||||
AutoLocker<VMCache> cacheLocker(page->cache, true, false);
|
||||
|
||||
// check again if that page is still a candidate
|
||||
if (page->state != PAGE_STATE_INACTIVE
|
||||
&& (!stealActive || page->state != PAGE_STATE_ACTIVE
|
||||
@ -1210,7 +1173,7 @@ steal_page(vm_page *page, bool stealActive)
|
||||
//dprintf(" steal page %p from cache %p%s\n", page, page->cache,
|
||||
// page->state == PAGE_STATE_INACTIVE ? "" : " (ACTIVE)");
|
||||
|
||||
vm_cache_remove_page(page->cache, page);
|
||||
page->cache->RemovePage(page);
|
||||
|
||||
InterruptsSpinLocker _(sPageLock);
|
||||
remove_page_from_queue(page->state == PAGE_STATE_ACTIVE
|
||||
@ -1289,7 +1252,7 @@ steal_pages(vm_page **pages, size_t count, bool reserve)
|
||||
freeConditionEntry.Add(&sFreePageQueue);
|
||||
locker.Unlock();
|
||||
|
||||
vm_low_memory(count);
|
||||
low_resource(B_KERNEL_RESOURCE_PAGES, count, B_RELATIVE_TIMEOUT, 0);
|
||||
//snooze(50000);
|
||||
// sleep for 50ms
|
||||
|
||||
@ -1316,7 +1279,7 @@ steal_pages(vm_page **pages, size_t count, bool reserve)
|
||||
at this offset is not included.
|
||||
*/
|
||||
status_t
|
||||
vm_page_write_modified_page_range(struct vm_cache *cache, uint32 firstPage,
|
||||
vm_page_write_modified_page_range(struct VMCache *cache, uint32 firstPage,
|
||||
uint32 endPage, bool fsReenter)
|
||||
{
|
||||
// TODO: join adjacent pages into one vec list
|
||||
@ -1357,9 +1320,9 @@ vm_page_write_modified_page_range(struct vm_cache *cache, uint32 firstPage,
|
||||
// clear the modified flag
|
||||
vm_clear_map_flags(page, PAGE_MODIFIED);
|
||||
|
||||
mutex_unlock(&cache->lock);
|
||||
cache->Unlock();
|
||||
status_t status = write_page(page, fsReenter);
|
||||
mutex_lock(&cache->lock);
|
||||
cache->Lock();
|
||||
|
||||
InterruptsSpinLocker locker(&sPageLock);
|
||||
|
||||
@ -1378,7 +1341,7 @@ vm_page_write_modified_page_range(struct vm_cache *cache, uint32 firstPage,
|
||||
if (!page->busy_writing) {
|
||||
// someone has cleared the busy_writing flag which tells
|
||||
// us our page has gone invalid
|
||||
vm_cache_remove_page(cache, page);
|
||||
cache->RemovePage(page);
|
||||
} else {
|
||||
if (!dequeuedPage)
|
||||
set_page_state_nolock(page, PAGE_STATE_MODIFIED);
|
||||
@ -1401,7 +1364,7 @@ status_t
|
||||
vm_page_write_modified_pages(vm_cache *cache, bool fsReenter)
|
||||
{
|
||||
return vm_page_write_modified_page_range(cache, 0,
|
||||
(cache->virtual_size + B_PAGE_SIZE - 1) >> PAGE_SHIFT, fsReenter);
|
||||
(cache->virtual_end + B_PAGE_SIZE - 1) >> PAGE_SHIFT, fsReenter);
|
||||
}
|
||||
|
||||
|
||||
@ -1423,7 +1386,7 @@ vm_page_schedule_write_page(vm_page *page)
|
||||
/*! Cache must be locked.
|
||||
*/
|
||||
void
|
||||
vm_page_schedule_write_page_range(struct vm_cache *cache, uint32 firstPage,
|
||||
vm_page_schedule_write_page_range(struct VMCache *cache, uint32 firstPage,
|
||||
uint32 endPage)
|
||||
{
|
||||
uint32 modified = 0;
|
||||
|
@ -1,190 +0,0 @@
|
||||
/*
|
||||
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
|
||||
#include "vm_store_anonymous_noswap.h"
|
||||
|
||||
#include <heap.h>
|
||||
#include <KernelExport.h>
|
||||
#include <vm_priv.h>
|
||||
#include <arch_config.h>
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
|
||||
//#define TRACE_STORE
|
||||
#ifdef TRACE_STORE
|
||||
# define TRACE(x) dprintf x
|
||||
#else
|
||||
# define TRACE(x) ;
|
||||
#endif
|
||||
|
||||
// The stack functionality looks like a good candidate to put into its own
|
||||
// store. I have not done this because once we have a swap file backing up
|
||||
// the memory, it would probably not be a good idea to separate this
|
||||
// anymore.
|
||||
|
||||
typedef struct anonymous_store {
|
||||
vm_store vm;
|
||||
bool can_overcommit;
|
||||
bool has_precommitted;
|
||||
uint8 precommitted_pages;
|
||||
int32 guarded_size;
|
||||
} anonymous_store;
|
||||
|
||||
|
||||
static void
|
||||
anonymous_destroy(struct vm_store *store)
|
||||
{
|
||||
vm_unreserve_memory(store->committed_size);
|
||||
free(store);
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
anonymous_commit(struct vm_store *_store, off_t size)
|
||||
{
|
||||
anonymous_store *store = (anonymous_store *)_store;
|
||||
|
||||
size -= store->vm.cache->virtual_base;
|
||||
// anonymous stores don't need to span over their whole source
|
||||
|
||||
// if we can overcommit, we don't commit here, but in anonymous_fault()
|
||||
if (store->can_overcommit) {
|
||||
if (store->has_precommitted)
|
||||
return B_OK;
|
||||
|
||||
// pre-commit some pages to make a later failure less probable
|
||||
store->has_precommitted = true;
|
||||
uint32 precommitted = store->precommitted_pages * B_PAGE_SIZE;
|
||||
if (size > precommitted)
|
||||
size = precommitted;
|
||||
}
|
||||
|
||||
if (size == store->vm.committed_size)
|
||||
return B_OK;
|
||||
|
||||
// Check to see how much we could commit - we need real memory
|
||||
|
||||
if (size > store->vm.committed_size) {
|
||||
// try to commit
|
||||
if (vm_try_reserve_memory(size - store->vm.committed_size) != B_OK)
|
||||
return B_NO_MEMORY;
|
||||
} else {
|
||||
// we can release some
|
||||
vm_unreserve_memory(store->vm.committed_size - size);
|
||||
}
|
||||
|
||||
store->vm.committed_size = size;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
anonymous_has_page(struct vm_store *store, off_t offset)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
anonymous_read(struct vm_store *store, off_t offset, const iovec *vecs,
|
||||
size_t count, size_t *_numBytes, bool fsReenter)
|
||||
{
|
||||
panic("anonymous_store: read called. Invalid!\n");
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
anonymous_write(struct vm_store *store, off_t offset, const iovec *vecs,
|
||||
size_t count, size_t *_numBytes, bool fsReenter)
|
||||
{
|
||||
// no place to write, this will cause the page daemon to skip this store
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
anonymous_fault(struct vm_store *_store, struct vm_address_space *aspace,
|
||||
off_t offset)
|
||||
{
|
||||
anonymous_store *store = (anonymous_store *)_store;
|
||||
|
||||
if (store->can_overcommit) {
|
||||
if (store->guarded_size > 0) {
|
||||
uint32 guardOffset;
|
||||
|
||||
#ifdef STACK_GROWS_DOWNWARDS
|
||||
guardOffset = 0;
|
||||
#elif defined(STACK_GROWS_UPWARDS)
|
||||
guardOffset = store->vm.cache->virtual_size - store->guarded_size;
|
||||
#else
|
||||
# error Stack direction has not been defined in arch_config.h
|
||||
#endif
|
||||
|
||||
// report stack fault, guard page hit!
|
||||
if (offset >= guardOffset && offset
|
||||
< guardOffset + store->guarded_size) {
|
||||
TRACE(("stack overflow!\n"));
|
||||
return B_BAD_ADDRESS;
|
||||
}
|
||||
}
|
||||
|
||||
if (store->precommitted_pages == 0) {
|
||||
// try to commit additional memory
|
||||
if (vm_try_reserve_memory(B_PAGE_SIZE) != B_OK)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
store->vm.committed_size += B_PAGE_SIZE;
|
||||
} else
|
||||
store->precommitted_pages--;
|
||||
}
|
||||
|
||||
// This will cause vm_soft_fault() to handle the fault
|
||||
return B_BAD_HANDLER;
|
||||
}
|
||||
|
||||
|
||||
static vm_store_ops anonymous_ops = {
|
||||
&anonymous_destroy,
|
||||
&anonymous_commit,
|
||||
&anonymous_has_page,
|
||||
&anonymous_read,
|
||||
&anonymous_write,
|
||||
&anonymous_fault,
|
||||
NULL, // acquire unreferenced ref
|
||||
NULL, // acquire ref
|
||||
NULL // release ref
|
||||
};
|
||||
|
||||
|
||||
/*! Create a new vm_store that uses anonymous noswap memory */
|
||||
vm_store *
|
||||
vm_store_create_anonymous_noswap(bool canOvercommit,
|
||||
int32 numPrecommittedPages, int32 numGuardPages)
|
||||
{
|
||||
anonymous_store *store = (anonymous_store *)malloc_nogrow(
|
||||
sizeof(anonymous_store));
|
||||
if (store == NULL)
|
||||
return NULL;
|
||||
|
||||
TRACE(("vm_store_create_anonymous(canOvercommit = %s, numGuardPages = %ld) at %p\n",
|
||||
canOvercommit ? "yes" : "no", numGuardPages, store));
|
||||
|
||||
store->vm.ops = &anonymous_ops;
|
||||
store->vm.cache = NULL;
|
||||
store->vm.committed_size = 0;
|
||||
store->can_overcommit = canOvercommit;
|
||||
store->has_precommitted = false;
|
||||
store->precommitted_pages = min_c(numPrecommittedPages, 255);
|
||||
store->guarded_size = numGuardPages * B_PAGE_SIZE;
|
||||
|
||||
return &store->vm;
|
||||
}
|
||||
|
@ -1,26 +0,0 @@
|
||||
/*
|
||||
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
#ifndef _KERNEL_VM_STORE_ANONYMOUS_H
|
||||
#define _KERNEL_VM_STORE_ANONYMOUS_H
|
||||
|
||||
|
||||
#include <vm_types.h>
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
vm_store *vm_store_create_anonymous_noswap(bool canOvercommit,
|
||||
int32 numPrecommittedPages, int32 numGuardPages);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _KERNEL_VM_STORE_ANONYMOUS_H */
|
@ -1,103 +0,0 @@
|
||||
/*
|
||||
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
|
||||
#include "vm_store_device.h"
|
||||
|
||||
#include <heap.h>
|
||||
#include <KernelExport.h>
|
||||
#include <vm_priv.h>
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
|
||||
struct device_store {
|
||||
struct vm_store vm;
|
||||
addr_t base_address;
|
||||
};
|
||||
|
||||
|
||||
static void
|
||||
device_destroy(struct vm_store *store)
|
||||
{
|
||||
free(store);
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
device_commit(struct vm_store *store, off_t size)
|
||||
{
|
||||
store->committed_size = size;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
device_has_page(struct vm_store *store, off_t offset)
|
||||
{
|
||||
// this should never be called
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
device_read(struct vm_store *store, off_t offset, const iovec *vecs,
|
||||
size_t count, size_t *_numBytes, bool fsReenter)
|
||||
{
|
||||
panic("device_store: read called. Invalid!\n");
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
device_write(struct vm_store *store, off_t offset, const iovec *vecs,
|
||||
size_t count, size_t *_numBytes, bool fsReenter)
|
||||
{
|
||||
// no place to write, this will cause the page daemon to skip this store
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
device_fault(struct vm_store *_store, struct vm_address_space *aspace,
|
||||
off_t offset)
|
||||
{
|
||||
// devices are mapped in completely, so we shouldn't experience faults
|
||||
return B_BAD_ADDRESS;
|
||||
}
|
||||
|
||||
|
||||
static vm_store_ops device_ops = {
|
||||
&device_destroy,
|
||||
&device_commit,
|
||||
&device_has_page,
|
||||
&device_read,
|
||||
&device_write,
|
||||
&device_fault,
|
||||
NULL, // acquire unreferenced ref
|
||||
NULL, // acquire ref
|
||||
NULL // release ref
|
||||
};
|
||||
|
||||
|
||||
struct vm_store *
|
||||
vm_store_create_device(addr_t baseAddress)
|
||||
{
|
||||
struct device_store *store = malloc_nogrow(sizeof(struct device_store));
|
||||
if (store == NULL)
|
||||
return NULL;
|
||||
|
||||
store->vm.ops = &device_ops;
|
||||
store->vm.cache = NULL;
|
||||
store->vm.committed_size = 0;
|
||||
|
||||
store->base_address = baseAddress;
|
||||
|
||||
return &store->vm;
|
||||
}
|
||||
|
@ -1,20 +0,0 @@
|
||||
/*
|
||||
* Copyright 2005-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
#ifndef _KERNEL_VM_STORE_DEVICE_H
|
||||
#define _KERNEL_VM_STORE_DEVICE_H
|
||||
|
||||
|
||||
#include <vm_types.h>
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
#endif
|
||||
struct vm_store *vm_store_create_device(addr_t base_addr);
|
||||
|
||||
#endif /* _KERNEL_VM_STORE_DEVICE_H */
|
@ -1,92 +0,0 @@
|
||||
/*
|
||||
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
|
||||
#include "vm_store_null.h"
|
||||
|
||||
#include <heap.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
|
||||
static void
|
||||
null_destroy(struct vm_store *store)
|
||||
{
|
||||
free(store);
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
null_commit(struct vm_store *store, off_t size)
|
||||
{
|
||||
store->committed_size = size;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
null_has_page(struct vm_store *store, off_t offset)
|
||||
{
|
||||
return true; // we always have the page, man
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
null_read(struct vm_store *store, off_t offset, const iovec *vecs,
|
||||
size_t count, size_t *_numBytes, bool fsReenter)
|
||||
{
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
null_write(struct vm_store *store, off_t offset, const iovec *vecs,
|
||||
size_t count, size_t *_numBytes, bool fsReenter)
|
||||
{
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
null_fault(struct vm_store *store, struct vm_address_space *aspace,
|
||||
off_t offset)
|
||||
{
|
||||
/* we can't fault on this region, that's pretty much the point of the
|
||||
null store object */
|
||||
return B_BAD_ADDRESS;
|
||||
}
|
||||
|
||||
|
||||
static vm_store_ops null_ops = {
|
||||
&null_destroy,
|
||||
&null_commit,
|
||||
&null_has_page,
|
||||
&null_read,
|
||||
&null_write,
|
||||
&null_fault,
|
||||
NULL, // acquire unreferenced ref
|
||||
NULL, // acquire ref
|
||||
NULL // release ref
|
||||
};
|
||||
|
||||
|
||||
struct vm_store *
|
||||
vm_store_create_null(void)
|
||||
{
|
||||
struct vm_store *store;
|
||||
|
||||
store = malloc_nogrow(sizeof(struct vm_store));
|
||||
if (store == NULL)
|
||||
return NULL;
|
||||
|
||||
store->ops = &null_ops;
|
||||
store->cache = NULL;
|
||||
store->committed_size = 0;
|
||||
|
||||
return store;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user