* Since the page scanner and thief can work more effectively when no vm_caches
are locked, there is now a vm_page_reserve_pages() call to ensure upfront that there is a page for me when I need it, and may have locked some caches. * The vm_soft_fault() routine now makes use of that feature. * vm_page_allocate_page() now resets the vm_page::usage_count, so that the file cache does not need to do this in read_chunk_into_cache() and write_chunk_to_cache(). * In cache_io() however, it need to update the usage_count - and it does that now. Since non-mapped caches don't have mappings, the page scanner will punish the cache pages stronger than other pages which is accidently just what we want. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22319 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
8a25e1293b
commit
8e0f884c71
@ -34,7 +34,10 @@ size_t vm_page_num_free_pages(void);
|
||||
|
||||
status_t vm_page_write_modified(vm_cache *cache, bool fsReenter);
|
||||
|
||||
vm_page *vm_page_allocate_page(int state);
|
||||
void vm_page_unreserve_pages(uint32 count);
|
||||
void vm_page_reserve_pages(uint32 count);
|
||||
|
||||
vm_page *vm_page_allocate_page(int pageState, bool reserved);
|
||||
status_t vm_page_allocate_pages(int pageState, vm_page **pages, uint32 numPages);
|
||||
vm_page *vm_page_allocate_page_run(int state, addr_t length);
|
||||
vm_page *vm_page_at_index(int32 index);
|
||||
|
@ -352,7 +352,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
vm_page *page;
|
||||
|
||||
// we need to allocate a pgtable
|
||||
page = vm_page_allocate_page(PAGE_STATE_CLEAR);
|
||||
page = vm_page_allocate_page(PAGE_STATE_CLEAR, false);
|
||||
|
||||
// mark the page WIRED
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
|
4
src/system/kernel/cache/block_allocator.cpp
vendored
4
src/system/kernel/cache/block_allocator.cpp
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2005-2006, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Copyright 2005-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
@ -347,7 +347,7 @@ block_range::Allocate(block_cache *cache, block_chunk **_chunk)
|
||||
if (pages[pageBaseIndex] == NULL) {
|
||||
// there are no pages for us yet
|
||||
for (uint32 i = 0; i < numPages; i++) {
|
||||
vm_page *page = vm_page_allocate_page(PAGE_STATE_FREE);
|
||||
vm_page *page = vm_page_allocate_page(PAGE_STATE_FREE, false);
|
||||
if (page == NULL) {
|
||||
// ToDo: handle this gracefully
|
||||
panic("no memory for block!!\n");
|
||||
|
24
src/system/kernel/cache/file_cache.cpp
vendored
24
src/system/kernel/cache/file_cache.cpp
vendored
@ -538,7 +538,7 @@ read_chunk_into_cache(file_cache_ref *ref, off_t offset, size_t numBytes,
|
||||
// allocate pages for the cache and mark them busy
|
||||
for (size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) {
|
||||
vm_page *page = pages[pageIndex++] = vm_page_allocate_page(
|
||||
PAGE_STATE_FREE);
|
||||
PAGE_STATE_FREE, false);
|
||||
if (page == NULL)
|
||||
panic("no more pages!");
|
||||
|
||||
@ -612,10 +612,6 @@ read_chunk_into_cache(file_cache_ref *ref, off_t offset, size_t numBytes,
|
||||
// make the pages accessible in the cache
|
||||
for (int32 i = pageIndex; i-- > 0;) {
|
||||
pages[i]->state = PAGE_STATE_ACTIVE;
|
||||
if (pages[i]->usage_count < 0)
|
||||
pages[i]->usage_count = 1;
|
||||
else
|
||||
pages[i]->usage_count++;
|
||||
|
||||
busyConditions[i].Unpublish();
|
||||
}
|
||||
@ -697,11 +693,13 @@ write_chunk_to_cache(file_cache_ref *ref, off_t offset, size_t numBytes,
|
||||
|
||||
// allocate pages for the cache and mark them busy
|
||||
for (size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) {
|
||||
// ToDo: if space is becoming tight, and this cache is already grown
|
||||
// TODO: if space is becoming tight, and this cache is already grown
|
||||
// big - shouldn't we better steal the pages directly in that case?
|
||||
// (a working set like approach for the file cache)
|
||||
// TODO: the pages we allocate here should have been reserved upfront
|
||||
// in cache_io()
|
||||
vm_page *page = pages[pageIndex++] = vm_page_allocate_page(
|
||||
PAGE_STATE_FREE);
|
||||
PAGE_STATE_FREE, false);
|
||||
busyConditions[pageIndex - 1].Publish(page, "page");
|
||||
|
||||
vm_cache_insert_page(ref->cache, page, offset + pos);
|
||||
@ -797,11 +795,6 @@ write_chunk_to_cache(file_cache_ref *ref, off_t offset, size_t numBytes,
|
||||
for (int32 i = pageIndex; i-- > 0;) {
|
||||
busyConditions[i].Unpublish();
|
||||
|
||||
if (pages[i]->usage_count < 0)
|
||||
pages[i]->usage_count = 1;
|
||||
else
|
||||
pages[i]->usage_count++;
|
||||
|
||||
if (writeThrough)
|
||||
pages[i]->state = PAGE_STATE_ACTIVE;
|
||||
else
|
||||
@ -960,6 +953,13 @@ cache_io(void *_cacheRef, off_t offset, addr_t buffer, size_t *_size,
|
||||
vm_get_physical_page(page->physical_page_number * B_PAGE_SIZE,
|
||||
&virtualAddress, PHYSICAL_PAGE_CAN_WAIT);
|
||||
|
||||
// Since we don't actually map pages as part of an area, we have
|
||||
// to manually maintain its usage_count
|
||||
if (page->usage_count < 0)
|
||||
page->usage_count = 1;
|
||||
else
|
||||
page->usage_count++;
|
||||
|
||||
// and copy the contents of the page already in memory
|
||||
if (doWrite) {
|
||||
user_memcpy((void *)(virtualAddress + pageOffset),
|
||||
|
@ -1498,7 +1498,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
|
||||
# endif
|
||||
continue;
|
||||
#endif
|
||||
vm_page *page = vm_page_allocate_page(PAGE_STATE_CLEAR);
|
||||
vm_page *page = vm_page_allocate_page(PAGE_STATE_CLEAR, false);
|
||||
if (page == NULL) {
|
||||
// this shouldn't really happen, as we reserve the memory upfront
|
||||
panic("couldn't fulfill B_FULL lock!");
|
||||
@ -3789,7 +3789,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
|
||||
if (store->ops->has_page != NULL
|
||||
&& store->ops->has_page(store, cacheOffset)) {
|
||||
// insert a fresh page and mark it busy -- we're going to read it in
|
||||
page = vm_page_allocate_page(PAGE_STATE_FREE);
|
||||
page = vm_page_allocate_page(PAGE_STATE_FREE, true);
|
||||
vm_cache_insert_page(cache, page, cacheOffset);
|
||||
|
||||
ConditionVariable<vm_page> busyCondition;
|
||||
@ -3955,7 +3955,7 @@ fault_get_page(vm_translation_map *map, vm_cache *topCache, off_t cacheOffset,
|
||||
if (page == NULL) {
|
||||
// we still haven't found a page, so we allocate a clean one
|
||||
|
||||
page = vm_page_allocate_page(PAGE_STATE_CLEAR);
|
||||
page = vm_page_allocate_page(PAGE_STATE_CLEAR, true);
|
||||
FTRACE(("vm_soft_fault: just allocated page 0x%lx\n", page->physical_page_number));
|
||||
|
||||
// Insert the new page into our cache, and replace it with the dummy page if necessary
|
||||
@ -4009,7 +4009,7 @@ fault_get_page(vm_translation_map *map, vm_cache *topCache, off_t cacheOffset,
|
||||
// ToDo: if memory is low, it might be a good idea to steal the page
|
||||
// from our source cache - if possible, that is
|
||||
FTRACE(("get new page, copy it, and put it into the topmost cache\n"));
|
||||
page = vm_page_allocate_page(PAGE_STATE_FREE);
|
||||
page = vm_page_allocate_page(PAGE_STATE_FREE, true);
|
||||
#if 0
|
||||
if (cacheOffset == 0x12000)
|
||||
dprintf("%ld: copy page %p to page %p from cache %p to cache %p\n", find_thread(NULL),
|
||||
@ -4176,6 +4176,11 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
// The top most cache has no fault handler, so let's see if the cache or its sources
|
||||
// already have the page we're searching for (we're going from top to bottom)
|
||||
|
||||
vm_page_reserve_pages(2);
|
||||
// we may need up to 2 pages - reserving them upfront makes sure
|
||||
// we don't have any cache locked, so that the page daemon/thief
|
||||
// can do their job without problems
|
||||
|
||||
vm_translation_map *map = &addressSpace->translation_map;
|
||||
vm_dummy_page dummyPage;
|
||||
dummyPage.cache = NULL;
|
||||
@ -4230,6 +4235,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
}
|
||||
|
||||
vm_cache_release_ref(topCache);
|
||||
vm_page_unreserve_pages(2);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -54,6 +54,7 @@ static page_queue sActivePageQueue;
|
||||
static vm_page *sPages;
|
||||
static addr_t sPhysicalPageOffset;
|
||||
static size_t sNumPages;
|
||||
static size_t sReservedPages;
|
||||
|
||||
static ConditionVariable<page_queue> sFreePageCondition;
|
||||
static spinlock sPageLock;
|
||||
@ -468,7 +469,7 @@ set_page_state_nolock(vm_page *page, int pageState)
|
||||
}
|
||||
|
||||
if (pageState == PAGE_STATE_CLEAR || pageState == PAGE_STATE_FREE) {
|
||||
if (sFreePageQueue.count + sClearPageQueue.count == 0)
|
||||
if (sFreePageQueue.count + sClearPageQueue.count <= sReservedPages)
|
||||
sFreePageCondition.NotifyAll();
|
||||
|
||||
if (page->cache != NULL)
|
||||
@ -1053,11 +1054,43 @@ vm_mark_page_range_inuse(addr_t startPage, addr_t length)
|
||||
}
|
||||
|
||||
|
||||
vm_page *
|
||||
vm_page_allocate_page(int pageState)
|
||||
void
|
||||
vm_page_unreserve_pages(uint32 count)
|
||||
{
|
||||
// TODO: we may want to have a "canWait" argument
|
||||
InterruptsSpinLocker locker(sPageLock);
|
||||
ASSERT(sReservedPages >= count);
|
||||
|
||||
sReservedPages -= count;
|
||||
|
||||
if (vm_page_num_free_pages() <= sReservedPages)
|
||||
sFreePageCondition.NotifyAll();
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
vm_page_reserve_pages(uint32 count)
|
||||
{
|
||||
InterruptsSpinLocker locker(sPageLock);
|
||||
|
||||
sReservedPages += count;
|
||||
size_t freePages = vm_page_num_free_pages();
|
||||
if (sReservedPages < freePages)
|
||||
return;
|
||||
|
||||
ConditionVariableEntry<page_queue> freeConditionEntry;
|
||||
freeConditionEntry.Add(&sFreePageQueue);
|
||||
vm_low_memory(sReservedPages - freePages);
|
||||
|
||||
// we need to wait until new pages become available
|
||||
locker.Unlock();
|
||||
|
||||
freeConditionEntry.Wait();
|
||||
}
|
||||
|
||||
|
||||
vm_page *
|
||||
vm_page_allocate_page(int pageState, bool reserved)
|
||||
{
|
||||
ConditionVariableEntry<page_queue> freeConditionEntry;
|
||||
page_queue *queue;
|
||||
page_queue *otherQueue;
|
||||
@ -1079,28 +1112,32 @@ vm_page_allocate_page(int pageState)
|
||||
|
||||
vm_page *page = NULL;
|
||||
while (true) {
|
||||
page = dequeue_page(queue);
|
||||
if (page == NULL) {
|
||||
#ifdef DEBUG
|
||||
if (queue->count != 0)
|
||||
panic("queue %p corrupted, count = %d\n", queue, queue->count);
|
||||
#endif
|
||||
|
||||
// if the primary queue was empty, grap the page from the
|
||||
// secondary queue
|
||||
page = dequeue_page(otherQueue);
|
||||
if (reserved || sReservedPages < vm_page_num_free_pages()) {
|
||||
page = dequeue_page(queue);
|
||||
if (page == NULL) {
|
||||
#ifdef DEBUG
|
||||
if (otherQueue->count != 0) {
|
||||
panic("other queue %p corrupted, count = %d\n", otherQueue,
|
||||
otherQueue->count);
|
||||
}
|
||||
if (queue->count != 0)
|
||||
panic("queue %p corrupted, count = %d\n", queue, queue->count);
|
||||
#endif
|
||||
|
||||
freeConditionEntry.Add(&sFreePageQueue);
|
||||
vm_low_memory(1);
|
||||
// if the primary queue was empty, grap the page from the
|
||||
// secondary queue
|
||||
page = dequeue_page(otherQueue);
|
||||
}
|
||||
}
|
||||
|
||||
if (page == NULL) {
|
||||
#ifdef DEBUG
|
||||
if (otherQueue->count != 0) {
|
||||
panic("other queue %p corrupted, count = %d\n", otherQueue,
|
||||
otherQueue->count);
|
||||
}
|
||||
#endif
|
||||
|
||||
freeConditionEntry.Add(&sFreePageQueue);
|
||||
vm_low_memory(sReservedPages + 1);
|
||||
}
|
||||
|
||||
if (page != NULL)
|
||||
break;
|
||||
|
||||
@ -1117,6 +1154,7 @@ vm_page_allocate_page(int pageState)
|
||||
|
||||
int oldPageState = page->state;
|
||||
page->state = PAGE_STATE_BUSY;
|
||||
page->usage_count = 2;
|
||||
|
||||
enqueue_page(&sActivePageQueue, page);
|
||||
|
||||
@ -1142,7 +1180,7 @@ vm_page_allocate_pages(int pageState, vm_page **pages, uint32 numPages)
|
||||
uint32 i;
|
||||
|
||||
for (i = 0; i < numPages; i++) {
|
||||
pages[i] = vm_page_allocate_page(pageState);
|
||||
pages[i] = vm_page_allocate_page(pageState, false);
|
||||
if (pages[i] == NULL) {
|
||||
// allocation failed, we need to free what we already have
|
||||
while (i-- > 0)
|
||||
@ -1185,6 +1223,7 @@ vm_page_allocate_page_run(int pageState, addr_t length)
|
||||
sPages[start + i].is_cleared
|
||||
= sPages[start + i].state == PAGE_STATE_CLEAR;
|
||||
set_page_state_nolock(&sPages[start + i], PAGE_STATE_BUSY);
|
||||
sPages[i].usage_count = 2;
|
||||
}
|
||||
firstPage = &sPages[start];
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user