* sMappingLock is now a mutex instead of a spinlock.
* The vm_translation_map is now correctly held in all of the vm_ mapping functions. * Removed the old vm_daemons.c file - there is now a new vm_daemons.cpp which contains the beginnings of our new page daemon. So far, it's pretty static and not much tested. What it currently does is to rescan all pages in the system with a two-handed clock algorithm and push pages into the modified and inactive lists. * These inactive pages aren't really stolen yet, even though their mappings are removed (ie. their next access will cause a page fault). This should slow down Haiku a bit more, great, huh? :-) * The page daemon currently only runs on low memory situations, though. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22156 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
d8badf6791
commit
87689e25ea
@ -69,7 +69,7 @@ status_t vm_create_vnode_cache(void *vnode, vm_cache **_cache);
|
||||
vm_area *vm_area_lookup(vm_address_space *addressSpace, addr_t address);
|
||||
status_t vm_set_area_memory_type(area_id id, addr_t physicalBase, uint32 type);
|
||||
status_t vm_get_page_mapping(team_id team, addr_t vaddr, addr_t *paddr);
|
||||
int32 vm_test_map_activation(vm_page *page);
|
||||
int32 vm_test_map_activation(vm_page *page, bool *_modified);
|
||||
void vm_clear_map_activation(vm_page *page);
|
||||
void vm_remove_all_page_mappings(vm_page *page);
|
||||
status_t vm_unmap_pages(vm_area *area, addr_t base, size_t length);
|
||||
|
@ -44,15 +44,12 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
// Should only be used by vm internals
|
||||
status_t vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite, bool isUser, addr_t *newip);
|
||||
status_t vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite,
|
||||
bool isUser, addr_t *newip);
|
||||
void vm_unreserve_memory(size_t bytes);
|
||||
status_t vm_try_reserve_memory(size_t bytes);
|
||||
status_t vm_daemon_init(void);
|
||||
|
||||
// used by the page daemon to walk the list of address spaces
|
||||
void vm_address_space_walk_start(struct hash_iterator *i);
|
||||
vm_address_space *vm_address_space_walk_next(struct hash_iterator *i);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -6,7 +6,7 @@ KernelMergeObject kernel_vm.o :
|
||||
vm.cpp
|
||||
vm_address_space.c
|
||||
vm_cache.cpp
|
||||
vm_daemons.c
|
||||
vm_daemons.cpp
|
||||
vm_low_memory.cpp
|
||||
vm_page.cpp
|
||||
vm_store_anonymous_noswap.c
|
||||
|
@ -191,7 +191,7 @@ public:
|
||||
static area_id sNextAreaID;
|
||||
static hash_table *sAreaHash;
|
||||
static sem_id sAreaHashLock;
|
||||
static spinlock sMappingLock;
|
||||
static mutex sMappingLock;
|
||||
static mutex sAreaCacheLock;
|
||||
|
||||
static off_t sAvailableMemory;
|
||||
@ -2354,13 +2354,12 @@ vm_get_page_mapping(team_id team, addr_t vaddr, addr_t *paddr)
|
||||
|
||||
|
||||
int32
|
||||
vm_test_map_activation(vm_page *page)
|
||||
vm_test_map_activation(vm_page *page, bool *_modified)
|
||||
{
|
||||
int32 activation = 0;
|
||||
bool modified = false;
|
||||
|
||||
// TODO: this can't work... (we need to lock the map, so this has to be a mutex)
|
||||
cpu_status state = disable_interrupts();
|
||||
acquire_spinlock(&sMappingLock);
|
||||
MutexLocker locker(sMappingLock);
|
||||
|
||||
vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
|
||||
vm_page_mapping *mapping;
|
||||
@ -2370,17 +2369,19 @@ vm_test_map_activation(vm_page *page)
|
||||
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
// map->ops->lock(map);
|
||||
map->ops->lock(map);
|
||||
addr_t address = area->base + (page->cache_offset << PAGE_SHIFT);
|
||||
map->ops->query_interrupt(map, address, &physicalAddress, &flags);
|
||||
// map->ops->unlock(map);
|
||||
map->ops->unlock(map);
|
||||
|
||||
if (flags & PAGE_ACCESSED)
|
||||
activation++;
|
||||
if (flags & PAGE_MODIFIED)
|
||||
modified = true;
|
||||
}
|
||||
|
||||
release_spinlock(&sMappingLock);
|
||||
restore_interrupts(state);
|
||||
if (_modified != NULL)
|
||||
*_modified = modified;
|
||||
|
||||
return activation;
|
||||
}
|
||||
@ -2389,9 +2390,7 @@ vm_test_map_activation(vm_page *page)
|
||||
void
|
||||
vm_clear_map_activation(vm_page *page)
|
||||
{
|
||||
// TODO: this can't work... (we need to lock the map, so this has to be a mutex)
|
||||
cpu_status state = disable_interrupts();
|
||||
acquire_spinlock(&sMappingLock);
|
||||
MutexLocker locker(sMappingLock);
|
||||
|
||||
vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
|
||||
vm_page_mapping *mapping;
|
||||
@ -2399,23 +2398,18 @@ vm_clear_map_activation(vm_page *page)
|
||||
vm_area *area = mapping->area;
|
||||
vm_translation_map *map = &area->address_space->translation_map;
|
||||
|
||||
// map->ops->lock(map);
|
||||
map->ops->lock(map);
|
||||
addr_t address = area->base + (page->cache_offset << PAGE_SHIFT);
|
||||
map->ops->clear_flags(map, address, PAGE_ACCESSED);
|
||||
// map->ops->unlock(map);
|
||||
map->ops->unlock(map);
|
||||
}
|
||||
|
||||
release_spinlock(&sMappingLock);
|
||||
restore_interrupts(state);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
vm_remove_all_page_mappings(vm_page *page)
|
||||
{
|
||||
// TODO: this can't work... (we need to lock the map, so this has to be a mutex)
|
||||
cpu_status state = disable_interrupts();
|
||||
acquire_spinlock(&sMappingLock);
|
||||
MutexLocker locker(sMappingLock);
|
||||
|
||||
vm_page_mappings queue;
|
||||
queue.MoveFrom(&page->mappings);
|
||||
@ -2426,16 +2420,15 @@ vm_remove_all_page_mappings(vm_page *page)
|
||||
vm_area *area = mapping->area;
|
||||
vm_translation_map *map = &area->address_space->translation_map;
|
||||
|
||||
// map->ops->lock(map);
|
||||
map->ops->lock(map);
|
||||
addr_t base = area->base + (page->cache_offset << PAGE_SHIFT);
|
||||
map->ops->unmap(map, base, base + (B_PAGE_SIZE - 1));
|
||||
// map->ops->unlock(map);
|
||||
map->ops->unlock(map);
|
||||
|
||||
area->mappings.Remove(mapping);
|
||||
}
|
||||
|
||||
release_spinlock(&sMappingLock);
|
||||
restore_interrupts(state);
|
||||
locker.Unlock();
|
||||
|
||||
// free now unused mappings
|
||||
|
||||
@ -2476,6 +2469,7 @@ vm_unmap_pages(vm_area *area, addr_t base, size_t size)
|
||||
}
|
||||
|
||||
map->ops->unmap(map, base, end);
|
||||
map->ops->unlock(map);
|
||||
|
||||
if (area->wiring == B_NO_LOCK) {
|
||||
uint32 startOffset = (area->cache_offset + base - area->base)
|
||||
@ -2484,15 +2478,16 @@ vm_unmap_pages(vm_area *area, addr_t base, size_t size)
|
||||
vm_page_mapping *mapping;
|
||||
vm_area_mappings queue;
|
||||
|
||||
cpu_status state = disable_interrupts();
|
||||
acquire_spinlock(&sMappingLock);
|
||||
mutex_lock(&sMappingLock);
|
||||
map->ops->lock(map);
|
||||
|
||||
vm_area_mappings::Iterator iterator = area->mappings.GetIterator();
|
||||
while (iterator.HasNext()) {
|
||||
mapping = iterator.Next();
|
||||
|
||||
vm_page *page = mapping->page;
|
||||
if (page->cache_offset < startOffset || page->cache_offset >= endOffset)
|
||||
if (page->cache_offset < startOffset
|
||||
|| page->cache_offset >= endOffset)
|
||||
continue;
|
||||
|
||||
mapping->page->mappings.Remove(mapping);
|
||||
@ -2501,15 +2496,14 @@ vm_unmap_pages(vm_area *area, addr_t base, size_t size)
|
||||
queue.Add(mapping);
|
||||
}
|
||||
|
||||
release_spinlock(&sMappingLock);
|
||||
restore_interrupts(state);
|
||||
map->ops->unlock(map);
|
||||
mutex_unlock(&sMappingLock);
|
||||
|
||||
while ((mapping = queue.RemoveHead()) != NULL) {
|
||||
free(mapping);
|
||||
}
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -2532,24 +2526,19 @@ vm_map_page(vm_area *area, vm_page *page, addr_t address, uint32 protection)
|
||||
map->ops->lock(map);
|
||||
map->ops->map(map, address, page->physical_page_number * B_PAGE_SIZE,
|
||||
protection);
|
||||
map->ops->unlock(map);
|
||||
|
||||
if (area->wiring != B_NO_LOCK) {
|
||||
page->wired_count++;
|
||||
// TODO: needs to be atomic on all platforms!
|
||||
} else {
|
||||
// insert mapping into lists
|
||||
cpu_status state = disable_interrupts();
|
||||
acquire_spinlock(&sMappingLock);
|
||||
MutexLocker locker(sMappingLock);
|
||||
|
||||
page->mappings.Add(mapping);
|
||||
area->mappings.Add(mapping);
|
||||
|
||||
release_spinlock(&sMappingLock);
|
||||
restore_interrupts(state);
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
|
||||
if (page->state != PAGE_STATE_MODIFIED)
|
||||
vm_page_set_state(page, PAGE_STATE_ACTIVE);
|
||||
|
||||
@ -3519,6 +3508,7 @@ vm_init_post_sem(kernel_args *args)
|
||||
|
||||
sAreaHashLock = create_sem(WRITE_COUNT, "area hash");
|
||||
mutex_init(&sAreaCacheLock, "area->cache");
|
||||
mutex_init(&sMappingLock, "page mappings");
|
||||
|
||||
slab_init_post_sem();
|
||||
|
||||
|
@ -313,27 +313,6 @@ vm_create_address_space(team_id id, addr_t base, addr_t size,
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
vm_address_space_walk_start(struct hash_iterator *iterator)
|
||||
{
|
||||
hash_open(sAddressSpaceTable, iterator);
|
||||
}
|
||||
|
||||
|
||||
vm_address_space *
|
||||
vm_address_space_walk_next(struct hash_iterator *iterator)
|
||||
{
|
||||
vm_address_space *aspace;
|
||||
|
||||
acquire_sem_etc(sAddressSpaceHashSem, READ_COUNT, 0, 0);
|
||||
aspace = hash_next(sAddressSpaceTable, iterator);
|
||||
if (aspace)
|
||||
atomic_add(&aspace->ref_count, 1);
|
||||
release_sem_etc(sAddressSpaceHashSem, READ_COUNT, 0);
|
||||
return aspace;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
vm_address_space_init(void)
|
||||
{
|
||||
|
@ -327,6 +327,7 @@ vm_cache_insert_page(vm_cache *cache, vm_page *page, off_t offset)
|
||||
cache->page_count++;
|
||||
|
||||
page->cache = cache;
|
||||
page->usage_count = 2;
|
||||
|
||||
state = disable_interrupts();
|
||||
acquire_spinlock(&sPageCacheTableLock);
|
||||
|
@ -1,226 +0,0 @@
|
||||
/*
|
||||
* Copyright 2004-2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
|
||||
#include <OS.h>
|
||||
|
||||
#include <thread.h>
|
||||
#include <vm.h>
|
||||
#include <vm_priv.h>
|
||||
#include <vm_cache.h>
|
||||
#include <vm_page.h>
|
||||
#include <vm_address_space.h>
|
||||
|
||||
|
||||
bool trimming_cycle;
|
||||
static addr_t free_memory_low_water;
|
||||
static addr_t free_memory_high_water;
|
||||
|
||||
|
||||
#if 0
|
||||
static void
|
||||
scan_pages(vm_address_space *aspace, addr_t free_target)
|
||||
{
|
||||
vm_area *firstArea;
|
||||
vm_area *area;
|
||||
vm_page *page;
|
||||
addr_t va;
|
||||
addr_t pa;
|
||||
uint32 flags, flags2;
|
||||
// int err;
|
||||
int quantum = PAGE_SCAN_QUANTUM;
|
||||
|
||||
// dprintf("scan_pages called on aspace 0x%x, id 0x%x, free_target %d\n", aspace, aspace->id, free_target);
|
||||
|
||||
acquire_sem_etc(aspace->sem, READ_COUNT, 0, 0);
|
||||
|
||||
firstArea = aspace->areas;
|
||||
while (firstArea && (firstArea->base + (firstArea->size - 1)) < aspace->scan_va)
|
||||
firstArea = firstArea->address_space_next;
|
||||
|
||||
if (!firstArea)
|
||||
firstArea = aspace->areas;
|
||||
|
||||
if (!firstArea) {
|
||||
release_sem_etc(aspace->sem, READ_COUNT, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
area = firstArea;
|
||||
for (;;) {
|
||||
// ignore reserved ranges
|
||||
while (area != NULL && area->id == RESERVED_AREA_ID)
|
||||
area = area->address_space_next;
|
||||
if (area == NULL)
|
||||
break;
|
||||
|
||||
// scan the pages in this area
|
||||
mutex_lock(&area->cache_ref->lock);
|
||||
if (!area->cache_ref->cache->scan_skip) {
|
||||
for(va = area->base; va < (area->base + area->size); va += B_PAGE_SIZE) {
|
||||
aspace->translation_map.ops->lock(&aspace->translation_map);
|
||||
aspace->translation_map.ops->query(&aspace->translation_map, va, &pa, &flags);
|
||||
if ((flags & PAGE_PRESENT) == 0) {
|
||||
aspace->translation_map.ops->unlock(&aspace->translation_map);
|
||||
continue;
|
||||
}
|
||||
|
||||
page = vm_lookup_page(pa / B_PAGE_SIZE);
|
||||
if (!page) {
|
||||
aspace->translation_map.ops->unlock(&aspace->translation_map);
|
||||
continue;
|
||||
}
|
||||
|
||||
// see if this page is busy, if it is lets forget it and move on
|
||||
if (page->state == PAGE_STATE_BUSY || page->state == PAGE_STATE_WIRED) {
|
||||
aspace->translation_map.ops->unlock(&aspace->translation_map);
|
||||
continue;
|
||||
}
|
||||
|
||||
flags2 = 0;
|
||||
if (free_target > 0) {
|
||||
// look for a page we can steal
|
||||
if (!(flags & PAGE_ACCESSED) && page->state == PAGE_STATE_ACTIVE) {
|
||||
// unmap the page
|
||||
aspace->translation_map.ops->unmap(&aspace->translation_map, va, va + B_PAGE_SIZE);
|
||||
|
||||
// flush the tlbs of all cpus
|
||||
aspace->translation_map.ops->flush(&aspace->translation_map);
|
||||
|
||||
// re-query the flags on the old pte, to make sure we have accurate modified bit data
|
||||
aspace->translation_map.ops->query(&aspace->translation_map, va, &pa, &flags2);
|
||||
|
||||
// clear the modified and accessed bits on the entries
|
||||
aspace->translation_map.ops->clear_flags(&aspace->translation_map, va, PAGE_MODIFIED|PAGE_ACCESSED);
|
||||
|
||||
// decrement the ref count on the page. If we just unmapped it for the last time,
|
||||
// put the page on the inactive list
|
||||
if (atomic_add(&page->ref_count, -1) == 1) {
|
||||
vm_page_set_state(page, PAGE_STATE_INACTIVE);
|
||||
free_target--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if the page is modified, but the state is active or inactive, put it on the modified list
|
||||
if (((flags & PAGE_MODIFIED) || (flags2 & PAGE_MODIFIED))
|
||||
&& (page->state == PAGE_STATE_ACTIVE || page->state == PAGE_STATE_INACTIVE)) {
|
||||
vm_page_set_state(page, PAGE_STATE_MODIFIED);
|
||||
}
|
||||
|
||||
aspace->translation_map.ops->unlock(&aspace->translation_map);
|
||||
if (--quantum == 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&area->cache_ref->lock);
|
||||
// move to the next area, wrapping around and stopping if we get back to the first area
|
||||
area = area->address_space_next ? area->address_space_next : aspace->areas;
|
||||
if (area == firstArea)
|
||||
break;
|
||||
|
||||
if (quantum == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
aspace->scan_va = area ? (firstArea->base + firstArea->size) : aspace->base;
|
||||
release_sem_etc(aspace->sem, READ_COUNT, 0);
|
||||
|
||||
// dprintf("exiting scan_pages\n");
|
||||
}
|
||||
|
||||
|
||||
static int32
|
||||
page_daemon(void *unused)
|
||||
{
|
||||
struct hash_iterator i;
|
||||
vm_address_space *old_aspace;
|
||||
vm_address_space *aspace;
|
||||
addr_t mapped_size;
|
||||
addr_t free_memory_target;
|
||||
int faults_per_second;
|
||||
bigtime_t now;
|
||||
|
||||
dprintf("page daemon starting\n");
|
||||
(void)unused;
|
||||
|
||||
for (;;) {
|
||||
snooze(PAGE_DAEMON_INTERVAL);
|
||||
|
||||
// scan through all of the address spaces
|
||||
vm_address_space_walk_start(&i);
|
||||
aspace = vm_address_space_walk_next(&i);
|
||||
while (aspace) {
|
||||
mapped_size = aspace->translation_map.ops->get_mapped_size(&aspace->translation_map);
|
||||
|
||||
// dprintf("page_daemon: looking at aspace 0x%x, id 0x%x, mapped size %d\n", aspace, aspace->id, mapped_size);
|
||||
|
||||
now = system_time();
|
||||
if (now - aspace->last_working_set_adjust > WORKING_SET_ADJUST_INTERVAL) {
|
||||
faults_per_second = (aspace->fault_count * 1000000) / (now - aspace->last_working_set_adjust);
|
||||
// dprintf(" faults_per_second = %d\n", faults_per_second);
|
||||
aspace->last_working_set_adjust = now;
|
||||
aspace->fault_count = 0;
|
||||
|
||||
if (faults_per_second > MAX_FAULTS_PER_SECOND
|
||||
&& mapped_size >= aspace->working_set_size
|
||||
&& aspace->working_set_size < aspace->max_working_set) {
|
||||
|
||||
aspace->working_set_size += WORKING_SET_INCREMENT;
|
||||
// dprintf(" new working set size = %d\n", aspace->working_set_size);
|
||||
} else if (faults_per_second < MIN_FAULTS_PER_SECOND
|
||||
&& mapped_size <= aspace->working_set_size
|
||||
&& aspace->working_set_size > aspace->min_working_set) {
|
||||
|
||||
aspace->working_set_size -= WORKING_SET_DECREMENT;
|
||||
// dprintf(" new working set size = %d\n", aspace->working_set_size);
|
||||
}
|
||||
}
|
||||
|
||||
// decide if we need to enter or leave the trimming cycle
|
||||
if (!trimming_cycle && vm_page_num_free_pages() < free_memory_low_water)
|
||||
trimming_cycle = true;
|
||||
else if (trimming_cycle && vm_page_num_free_pages() > free_memory_high_water)
|
||||
trimming_cycle = false;
|
||||
|
||||
// scan some pages, trying to free some if needed
|
||||
free_memory_target = 0;
|
||||
if (trimming_cycle && mapped_size > aspace->working_set_size)
|
||||
free_memory_target = mapped_size - aspace->working_set_size;
|
||||
|
||||
scan_pages(aspace, free_memory_target);
|
||||
|
||||
// must hold a ref to the old aspace while we grab the next one,
|
||||
// otherwise the iterator becomes out of date.
|
||||
old_aspace = aspace;
|
||||
aspace = vm_address_space_walk_next(&i);
|
||||
vm_put_address_space(old_aspace);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
status_t
|
||||
vm_daemon_init()
|
||||
{
|
||||
thread_id thread;
|
||||
|
||||
trimming_cycle = false;
|
||||
|
||||
// calculate the free memory low and high water at which point we enter/leave trimming phase
|
||||
free_memory_low_water = vm_page_num_pages() / 8;
|
||||
free_memory_high_water = vm_page_num_pages() / 4;
|
||||
|
||||
// create a kernel thread to select pages for pageout
|
||||
//thread = spawn_kernel_thread(&page_daemon, "page daemon", B_FIRST_REAL_TIME_PRIORITY, NULL);
|
||||
//send_signal_etc(thread, SIGCONT, B_DO_NOT_RESCHEDULE);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
233
src/system/kernel/vm/vm_daemons.cpp
Normal file
233
src/system/kernel/vm/vm_daemons.cpp
Normal file
@ -0,0 +1,233 @@
|
||||
/*
|
||||
* Copyright 2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include <signal.h>
|
||||
|
||||
#include <OS.h>
|
||||
|
||||
#include <vm.h>
|
||||
#include <vm_priv.h>
|
||||
#include <vm_cache.h>
|
||||
#include <vm_low_memory.h>
|
||||
#include <vm_page.h>
|
||||
|
||||
|
||||
static sem_id sPageDaemonSem;
|
||||
static uint32 sScanPagesCount;
|
||||
static uint32 sNumPages;
|
||||
static bigtime_t sScanWaitInterval;
|
||||
|
||||
|
||||
class PageCacheLocker {
|
||||
public:
|
||||
PageCacheLocker(vm_page* page);
|
||||
~PageCacheLocker();
|
||||
|
||||
bool IsLocked() { return fPage != NULL; }
|
||||
|
||||
bool Lock(vm_page* page);
|
||||
void Unlock();
|
||||
|
||||
private:
|
||||
bool _IgnorePage(vm_page* page);
|
||||
|
||||
vm_page* fPage;
|
||||
};
|
||||
|
||||
|
||||
PageCacheLocker::PageCacheLocker(vm_page* page)
|
||||
:
|
||||
fPage(NULL)
|
||||
{
|
||||
Lock(page);
|
||||
}
|
||||
|
||||
|
||||
PageCacheLocker::~PageCacheLocker()
|
||||
{
|
||||
Unlock();
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
PageCacheLocker::_IgnorePage(vm_page* page)
|
||||
{
|
||||
if (page->state == PAGE_STATE_WIRED || page->state == PAGE_STATE_BUSY
|
||||
|| page->state == PAGE_STATE_FREE || page->state == PAGE_STATE_CLEAR
|
||||
|| page->state == PAGE_STATE_UNUSED || page->cache == NULL)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
PageCacheLocker::Lock(vm_page* page)
|
||||
{
|
||||
if (_IgnorePage(page))
|
||||
return false;
|
||||
|
||||
vm_cache* cache = page->cache;
|
||||
|
||||
// Grab a reference to this cache - the page does not own a reference
|
||||
// to its cache, so we can't just acquire it the easy way
|
||||
while (true) {
|
||||
int32 count = cache->ref_count;
|
||||
if (count == 0) {
|
||||
cache = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (atomic_test_and_set(&cache->ref_count, count + 1, count) == count)
|
||||
break;
|
||||
}
|
||||
|
||||
if (cache == NULL)
|
||||
return false;
|
||||
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
if (cache != page->cache || _IgnorePage(page)) {
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
return false;
|
||||
}
|
||||
|
||||
fPage = page;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
PageCacheLocker::Unlock()
|
||||
{
|
||||
if (fPage == NULL)
|
||||
return;
|
||||
|
||||
vm_cache* cache = fPage->cache;
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
|
||||
fPage = NULL;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
static void
|
||||
clear_page_activation(int32 index)
|
||||
{
|
||||
vm_page *page = vm_page_at_index(index);
|
||||
PageCacheLocker locker(page);
|
||||
if (!locker.IsLocked())
|
||||
return;
|
||||
|
||||
if (page->state == PAGE_STATE_ACTIVE)
|
||||
vm_clear_map_activation(page);
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
check_page_activation(int32 index)
|
||||
{
|
||||
vm_page *page = vm_page_at_index(index);
|
||||
PageCacheLocker locker(page);
|
||||
if (!locker.IsLocked())
|
||||
return false;
|
||||
|
||||
bool modified;
|
||||
int32 activation = vm_test_map_activation(page, &modified);
|
||||
if (modified && page->state != PAGE_STATE_MODIFIED) {
|
||||
//dprintf("page %p -> move to modified\n", page);
|
||||
vm_page_set_state(page, PAGE_STATE_MODIFIED);
|
||||
}
|
||||
|
||||
if (activation > 0) {
|
||||
// page is still in active use
|
||||
if (page->usage_count < 0) {
|
||||
if (page->state != PAGE_STATE_MODIFIED)
|
||||
vm_page_set_state(page, PAGE_STATE_ACTIVE);
|
||||
page->usage_count = 1;
|
||||
//dprintf("page %p -> move to active\n", page);
|
||||
} else if (page->usage_count < 127)
|
||||
page->usage_count++;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (page->usage_count > -128)
|
||||
page->usage_count--;
|
||||
|
||||
if (page->usage_count < 0) {
|
||||
vm_remove_all_page_mappings(page);
|
||||
if (page->state == PAGE_STATE_MODIFIED) {
|
||||
// TODO: schedule to write back!
|
||||
} else
|
||||
vm_page_set_state(page, PAGE_STATE_INACTIVE);
|
||||
//dprintf("page %p -> move to inactive\n", page);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
page_daemon(void *unused)
|
||||
{
|
||||
uint32 clearPage = 0;
|
||||
uint32 checkPage = sNumPages / 2;
|
||||
|
||||
while (true) {
|
||||
acquire_sem_etc(sPageDaemonSem, 1, B_RELATIVE_TIMEOUT,
|
||||
sScanWaitInterval);
|
||||
|
||||
if (vm_low_memory_state() < B_LOW_MEMORY_NOTE) {
|
||||
// don't do anything if we have enough free memory left
|
||||
continue;
|
||||
}
|
||||
|
||||
uint32 leftToFree = 32;
|
||||
// TODO: make this depending on the low memory state
|
||||
|
||||
for (uint32 i = 0; i < sScanPagesCount && leftToFree > 0; i++) {
|
||||
if (clearPage == 0)
|
||||
dprintf("clear through\n");
|
||||
if (checkPage == 0)
|
||||
dprintf("check through\n");
|
||||
clear_page_activation(clearPage);
|
||||
|
||||
if (check_page_activation(checkPage))
|
||||
leftToFree--;
|
||||
|
||||
if (++clearPage == sNumPages)
|
||||
clearPage = 0;
|
||||
if (++checkPage == sNumPages)
|
||||
checkPage = 0;
|
||||
}
|
||||
}
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
vm_daemon_init()
|
||||
{
|
||||
sPageDaemonSem = create_sem(0, "page daemon");
|
||||
|
||||
sNumPages = vm_page_num_pages();
|
||||
// TODO: compute those depending on sNumPages and memory pressure!
|
||||
sScanPagesCount = 512;
|
||||
sScanWaitInterval = 1000000;
|
||||
|
||||
// create a kernel thread to select pages for pageout
|
||||
thread_id thread = spawn_kernel_thread(&page_daemon, "page daemon",
|
||||
B_LOW_PRIORITY, NULL);
|
||||
send_signal_etc(thread, SIGCONT, B_DO_NOT_RESCHEDULE);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user