haiku/src/system/kernel/vm/vm_daemons.c

227 lines
6.8 KiB
C
Raw Normal View History

/*
* Copyright 2004-2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <OS.h>
#include <thread.h>
#include <vm.h>
#include <vm_priv.h>
#include <vm_cache.h>
#include <vm_page.h>
#include <vm_address_space.h>
bool trimming_cycle;
static addr_t free_memory_low_water;
static addr_t free_memory_high_water;
Another work-in-progress towards having extra structures per mapping per page: * vm_area and vm_page now have a new field "mappings" where they will store lists of vm_page_mapping structures. vm_page::ref_count is gone, as it's no longer needed (it was never updated correctly, anyway). * vm_caches now have a type field, ie. CACHE_TYPE_RAM for anonymous areas - this makes the stores a bit less independent, but is quite handy in several places. * Added new vm_map_page() and vm_unmap_pages() functions to be used whenever you map in or unmap pages into/from an area. They don't do much more than handling vm_page::wired_count correctly right now, though (ie. B_LAZY_LOCK is now working as expected as well). * Moved the device fault handler to vm_map_physical_memory(); it was not really used as a fault handler, anyway. * Didn't notice Ingo's changes to the I/O space region broke lock_memory(). It now checks the type of the area that contains the memory, and doesn't lock anymore if not needed which solves the problem in a platform independent way. * Implemented lock_memory() and unlock_memory() for real: they now change the vm_page::wired_count member to identify pages that shouldn't be paged out. * vm_area_for() now uses vm_area_lookup() internally. * Fixed various potential overflow conditions with areas that reach 0xffffffff. * Creating anonymous areas with B_FULL_LOCK no longer causes vm_soft_fault() to be called, instead, the pages are allocated and mapped (via vm_map_page()) directly. * Removed the _vm_ prefix for create_area_struct() and create_reserved_area_struct(). * Fixed a bug in vm_page_write_modified() that would not have enqueued pages that failed to be written to the modified queue again when needed. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@20251 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-02-28 16:24:53 +03:00
#if 0
static void
scan_pages(vm_address_space *aspace, addr_t free_target)
{
vm_area *firstArea;
vm_area *area;
vm_page *page;
addr_t va;
addr_t pa;
uint32 flags, flags2;
// int err;
int quantum = PAGE_SCAN_QUANTUM;
// dprintf("scan_pages called on aspace 0x%x, id 0x%x, free_target %d\n", aspace, aspace->id, free_target);
acquire_sem_etc(aspace->sem, READ_COUNT, 0, 0);
firstArea = aspace->areas;
while (firstArea && (firstArea->base + (firstArea->size - 1)) < aspace->scan_va)
firstArea = firstArea->address_space_next;
if (!firstArea)
firstArea = aspace->areas;
if (!firstArea) {
release_sem_etc(aspace->sem, READ_COUNT, 0);
return;
}
area = firstArea;
for (;;) {
// ignore reserved ranges
while (area != NULL && area->id == RESERVED_AREA_ID)
area = area->address_space_next;
if (area == NULL)
break;
// scan the pages in this area
mutex_lock(&area->cache_ref->lock);
if (!area->cache_ref->cache->scan_skip) {
for(va = area->base; va < (area->base + area->size); va += B_PAGE_SIZE) {
aspace->translation_map.ops->lock(&aspace->translation_map);
aspace->translation_map.ops->query(&aspace->translation_map, va, &pa, &flags);
if ((flags & PAGE_PRESENT) == 0) {
aspace->translation_map.ops->unlock(&aspace->translation_map);
continue;
}
page = vm_lookup_page(pa / B_PAGE_SIZE);
if (!page) {
aspace->translation_map.ops->unlock(&aspace->translation_map);
continue;
}
// see if this page is busy, if it is lets forget it and move on
if (page->state == PAGE_STATE_BUSY || page->state == PAGE_STATE_WIRED) {
aspace->translation_map.ops->unlock(&aspace->translation_map);
continue;
}
flags2 = 0;
if (free_target > 0) {
// look for a page we can steal
if (!(flags & PAGE_ACCESSED) && page->state == PAGE_STATE_ACTIVE) {
// unmap the page
aspace->translation_map.ops->unmap(&aspace->translation_map, va, va + B_PAGE_SIZE);
// flush the tlbs of all cpus
aspace->translation_map.ops->flush(&aspace->translation_map);
// re-query the flags on the old pte, to make sure we have accurate modified bit data
aspace->translation_map.ops->query(&aspace->translation_map, va, &pa, &flags2);
// clear the modified and accessed bits on the entries
aspace->translation_map.ops->clear_flags(&aspace->translation_map, va, PAGE_MODIFIED|PAGE_ACCESSED);
// decrement the ref count on the page. If we just unmapped it for the last time,
// put the page on the inactive list
if (atomic_add(&page->ref_count, -1) == 1) {
vm_page_set_state(page, PAGE_STATE_INACTIVE);
free_target--;
}
}
}
// if the page is modified, but the state is active or inactive, put it on the modified list
if (((flags & PAGE_MODIFIED) || (flags2 & PAGE_MODIFIED))
&& (page->state == PAGE_STATE_ACTIVE || page->state == PAGE_STATE_INACTIVE)) {
vm_page_set_state(page, PAGE_STATE_MODIFIED);
}
aspace->translation_map.ops->unlock(&aspace->translation_map);
if (--quantum == 0)
break;
}
}
mutex_unlock(&area->cache_ref->lock);
// move to the next area, wrapping around and stopping if we get back to the first area
area = area->address_space_next ? area->address_space_next : aspace->areas;
if (area == firstArea)
break;
if (quantum == 0)
break;
}
aspace->scan_va = area ? (firstArea->base + firstArea->size) : aspace->base;
release_sem_etc(aspace->sem, READ_COUNT, 0);
// dprintf("exiting scan_pages\n");
}
static int32
page_daemon(void *unused)
{
struct hash_iterator i;
vm_address_space *old_aspace;
vm_address_space *aspace;
addr_t mapped_size;
addr_t free_memory_target;
int faults_per_second;
bigtime_t now;
dprintf("page daemon starting\n");
(void)unused;
for (;;) {
snooze(PAGE_DAEMON_INTERVAL);
// scan through all of the address spaces
vm_address_space_walk_start(&i);
aspace = vm_address_space_walk_next(&i);
while (aspace) {
mapped_size = aspace->translation_map.ops->get_mapped_size(&aspace->translation_map);
// dprintf("page_daemon: looking at aspace 0x%x, id 0x%x, mapped size %d\n", aspace, aspace->id, mapped_size);
now = system_time();
if (now - aspace->last_working_set_adjust > WORKING_SET_ADJUST_INTERVAL) {
faults_per_second = (aspace->fault_count * 1000000) / (now - aspace->last_working_set_adjust);
// dprintf(" faults_per_second = %d\n", faults_per_second);
aspace->last_working_set_adjust = now;
aspace->fault_count = 0;
if (faults_per_second > MAX_FAULTS_PER_SECOND
&& mapped_size >= aspace->working_set_size
&& aspace->working_set_size < aspace->max_working_set) {
aspace->working_set_size += WORKING_SET_INCREMENT;
// dprintf(" new working set size = %d\n", aspace->working_set_size);
} else if (faults_per_second < MIN_FAULTS_PER_SECOND
&& mapped_size <= aspace->working_set_size
&& aspace->working_set_size > aspace->min_working_set) {
aspace->working_set_size -= WORKING_SET_DECREMENT;
// dprintf(" new working set size = %d\n", aspace->working_set_size);
}
}
// decide if we need to enter or leave the trimming cycle
if (!trimming_cycle && vm_page_num_free_pages() < free_memory_low_water)
trimming_cycle = true;
else if (trimming_cycle && vm_page_num_free_pages() > free_memory_high_water)
trimming_cycle = false;
// scan some pages, trying to free some if needed
free_memory_target = 0;
if (trimming_cycle && mapped_size > aspace->working_set_size)
free_memory_target = mapped_size - aspace->working_set_size;
scan_pages(aspace, free_memory_target);
// must hold a ref to the old aspace while we grab the next one,
// otherwise the iterator becomes out of date.
old_aspace = aspace;
aspace = vm_address_space_walk_next(&i);
vm_put_address_space(old_aspace);
}
}
}
Another work-in-progress towards having extra structures per mapping per page: * vm_area and vm_page now have a new field "mappings" where they will store lists of vm_page_mapping structures. vm_page::ref_count is gone, as it's no longer needed (it was never updated correctly, anyway). * vm_caches now have a type field, ie. CACHE_TYPE_RAM for anonymous areas - this makes the stores a bit less independent, but is quite handy in several places. * Added new vm_map_page() and vm_unmap_pages() functions to be used whenever you map in or unmap pages into/from an area. They don't do much more than handling vm_page::wired_count correctly right now, though (ie. B_LAZY_LOCK is now working as expected as well). * Moved the device fault handler to vm_map_physical_memory(); it was not really used as a fault handler, anyway. * Didn't notice Ingo's changes to the I/O space region broke lock_memory(). It now checks the type of the area that contains the memory, and doesn't lock anymore if not needed which solves the problem in a platform independent way. * Implemented lock_memory() and unlock_memory() for real: they now change the vm_page::wired_count member to identify pages that shouldn't be paged out. * vm_area_for() now uses vm_area_lookup() internally. * Fixed various potential overflow conditions with areas that reach 0xffffffff. * Creating anonymous areas with B_FULL_LOCK no longer causes vm_soft_fault() to be called, instead, the pages are allocated and mapped (via vm_map_page()) directly. * Removed the _vm_ prefix for create_area_struct() and create_reserved_area_struct(). * Fixed a bug in vm_page_write_modified() that would not have enqueued pages that failed to be written to the modified queue again when needed. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@20251 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-02-28 16:24:53 +03:00
#endif
status_t
vm_daemon_init()
{
thread_id thread;
trimming_cycle = false;
// calculate the free memory low and high water at which point we enter/leave trimming phase
free_memory_low_water = vm_page_num_pages() / 8;
free_memory_high_water = vm_page_num_pages() / 4;
// create a kernel thread to select pages for pageout
//thread = spawn_kernel_thread(&page_daemon, "page daemon", B_FIRST_REAL_TIME_PRIORITY, NULL);
//send_signal_etc(thread, SIGCONT, B_DO_NOT_RESCHEDULE);
return B_OK;
}