/* * Copyright 2004-2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved. * Distributed under the terms of the MIT License. * * Copyright 2001, Travis Geiselbrecht. All rights reserved. * Distributed under the terms of the NewOS License. */ #include #include #include #include #include #include #include bool trimming_cycle; static addr_t free_memory_low_water; static addr_t free_memory_high_water; #if 0 static void scan_pages(vm_address_space *aspace, addr_t free_target) { vm_area *firstArea; vm_area *area; vm_page *page; addr_t va; addr_t pa; uint32 flags, flags2; // int err; int quantum = PAGE_SCAN_QUANTUM; // dprintf("scan_pages called on aspace 0x%x, id 0x%x, free_target %d\n", aspace, aspace->id, free_target); acquire_sem_etc(aspace->sem, READ_COUNT, 0, 0); firstArea = aspace->areas; while (firstArea && (firstArea->base + (firstArea->size - 1)) < aspace->scan_va) firstArea = firstArea->address_space_next; if (!firstArea) firstArea = aspace->areas; if (!firstArea) { release_sem_etc(aspace->sem, READ_COUNT, 0); return; } area = firstArea; for (;;) { // ignore reserved ranges while (area != NULL && area->id == RESERVED_AREA_ID) area = area->address_space_next; if (area == NULL) break; // scan the pages in this area mutex_lock(&area->cache_ref->lock); if (!area->cache_ref->cache->scan_skip) { for(va = area->base; va < (area->base + area->size); va += B_PAGE_SIZE) { aspace->translation_map.ops->lock(&aspace->translation_map); aspace->translation_map.ops->query(&aspace->translation_map, va, &pa, &flags); if ((flags & PAGE_PRESENT) == 0) { aspace->translation_map.ops->unlock(&aspace->translation_map); continue; } page = vm_lookup_page(pa / B_PAGE_SIZE); if (!page) { aspace->translation_map.ops->unlock(&aspace->translation_map); continue; } // see if this page is busy, if it is lets forget it and move on if (page->state == PAGE_STATE_BUSY || page->state == PAGE_STATE_WIRED) { aspace->translation_map.ops->unlock(&aspace->translation_map); continue; } flags2 = 0; if (free_target > 0) { // look for a page we can steal if (!(flags & PAGE_ACCESSED) && page->state == PAGE_STATE_ACTIVE) { // unmap the page aspace->translation_map.ops->unmap(&aspace->translation_map, va, va + B_PAGE_SIZE); // flush the tlbs of all cpus aspace->translation_map.ops->flush(&aspace->translation_map); // re-query the flags on the old pte, to make sure we have accurate modified bit data aspace->translation_map.ops->query(&aspace->translation_map, va, &pa, &flags2); // clear the modified and accessed bits on the entries aspace->translation_map.ops->clear_flags(&aspace->translation_map, va, PAGE_MODIFIED|PAGE_ACCESSED); // decrement the ref count on the page. If we just unmapped it for the last time, // put the page on the inactive list if (atomic_add(&page->ref_count, -1) == 1) { vm_page_set_state(page, PAGE_STATE_INACTIVE); free_target--; } } } // if the page is modified, but the state is active or inactive, put it on the modified list if (((flags & PAGE_MODIFIED) || (flags2 & PAGE_MODIFIED)) && (page->state == PAGE_STATE_ACTIVE || page->state == PAGE_STATE_INACTIVE)) { vm_page_set_state(page, PAGE_STATE_MODIFIED); } aspace->translation_map.ops->unlock(&aspace->translation_map); if (--quantum == 0) break; } } mutex_unlock(&area->cache_ref->lock); // move to the next area, wrapping around and stopping if we get back to the first area area = area->address_space_next ? area->address_space_next : aspace->areas; if (area == firstArea) break; if (quantum == 0) break; } aspace->scan_va = area ? (firstArea->base + firstArea->size) : aspace->base; release_sem_etc(aspace->sem, READ_COUNT, 0); // dprintf("exiting scan_pages\n"); } static int32 page_daemon(void *unused) { struct hash_iterator i; vm_address_space *old_aspace; vm_address_space *aspace; addr_t mapped_size; addr_t free_memory_target; int faults_per_second; bigtime_t now; dprintf("page daemon starting\n"); (void)unused; for (;;) { snooze(PAGE_DAEMON_INTERVAL); // scan through all of the address spaces vm_address_space_walk_start(&i); aspace = vm_address_space_walk_next(&i); while (aspace) { mapped_size = aspace->translation_map.ops->get_mapped_size(&aspace->translation_map); // dprintf("page_daemon: looking at aspace 0x%x, id 0x%x, mapped size %d\n", aspace, aspace->id, mapped_size); now = system_time(); if (now - aspace->last_working_set_adjust > WORKING_SET_ADJUST_INTERVAL) { faults_per_second = (aspace->fault_count * 1000000) / (now - aspace->last_working_set_adjust); // dprintf(" faults_per_second = %d\n", faults_per_second); aspace->last_working_set_adjust = now; aspace->fault_count = 0; if (faults_per_second > MAX_FAULTS_PER_SECOND && mapped_size >= aspace->working_set_size && aspace->working_set_size < aspace->max_working_set) { aspace->working_set_size += WORKING_SET_INCREMENT; // dprintf(" new working set size = %d\n", aspace->working_set_size); } else if (faults_per_second < MIN_FAULTS_PER_SECOND && mapped_size <= aspace->working_set_size && aspace->working_set_size > aspace->min_working_set) { aspace->working_set_size -= WORKING_SET_DECREMENT; // dprintf(" new working set size = %d\n", aspace->working_set_size); } } // decide if we need to enter or leave the trimming cycle if (!trimming_cycle && vm_page_num_free_pages() < free_memory_low_water) trimming_cycle = true; else if (trimming_cycle && vm_page_num_free_pages() > free_memory_high_water) trimming_cycle = false; // scan some pages, trying to free some if needed free_memory_target = 0; if (trimming_cycle && mapped_size > aspace->working_set_size) free_memory_target = mapped_size - aspace->working_set_size; scan_pages(aspace, free_memory_target); // must hold a ref to the old aspace while we grab the next one, // otherwise the iterator becomes out of date. old_aspace = aspace; aspace = vm_address_space_walk_next(&i); vm_put_address_space(old_aspace); } } } #endif status_t vm_daemon_init() { thread_id thread; trimming_cycle = false; // calculate the free memory low and high water at which point we enter/leave trimming phase free_memory_low_water = vm_page_num_pages() / 8; free_memory_high_water = vm_page_num_pages() / 4; // create a kernel thread to select pages for pageout //thread = spawn_kernel_thread(&page_daemon, "page daemon", B_FIRST_REAL_TIME_PRIORITY, NULL); //send_signal_etc(thread, SIGCONT, B_DO_NOT_RESCHEDULE); return B_OK; }