bonefish+axeld:

* Removed the vm_cache/vm_store ref_count duality that besides being a bit ugly
  also created the page dameon cache retrieval problem: now, only areas (and
  cache consumers) retrieve a reference to the store (and therefore, the vnode).
  The page daemon doesn't need to care about this at all anymore, and the pseudo
  references of the vm_cache could be removed again.
* Rearranged deletion of vnodes such that its ID can be reused directly after
  fs_remove_vnode() has been called.
* vm_page_allocate_page() no longer panics when it runs out of pages, but just
  waits for new pages to become available using the new sFreeCondition condition
  variable - to make sure this happens in an acceptable time frame, it'll
  trigger a run of the low memory handlers.
* Implemented a page_thief() that steals inactive pages from caches and puts
  them into the free queue. It runs as a low memory handler.
* The file cache now sets the usage count on the pages it inserts into the
  cache (needs some rework though, cache_io() doesn't do it yet).
* Instead of panicking, the kernel will currently dead lock in low memory
  situations, since BFS does a bit too much in bfs_release_vnode().
* Some minor cleanup.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22315 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2007-09-26 00:20:23 +00:00
parent f9057a35a0
commit 6d4aea4796
10 changed files with 284 additions and 175 deletions

View File

@ -47,7 +47,7 @@ extern void cache_prefetch(dev_t mountID, ino_t vnodeID, off_t offset, size_t si
extern status_t file_cache_init_post_boot_device(void);
extern status_t file_cache_init(void);
extern vm_store *vm_create_vnode_store(void *vnode);
extern vm_store *vm_create_vnode_store(struct vnode *vnode);
#ifdef __cplusplus
}

View File

@ -31,6 +31,7 @@ struct vm_cache;
struct file_descriptor;
struct selectsync;
struct pollfd;
struct vnode;
/** The I/O context of a process/team, holds the fd array among others */

View File

@ -15,6 +15,7 @@
struct kernel_args;
struct team;
struct vnode;
#ifdef __cplusplus
@ -65,7 +66,7 @@ area_id vm_clone_area(team_id team, const char *name, void **address,
uint32 addressSpec, uint32 protection, uint32 mapping,
area_id sourceArea);
status_t vm_delete_area(team_id aid, area_id id);
status_t vm_create_vnode_cache(void *vnode, vm_cache **_cache);
status_t vm_create_vnode_cache(struct vnode *vnode, vm_cache **_cache);
vm_area *vm_area_lookup(vm_address_space *addressSpace, addr_t address);
status_t vm_set_area_memory_type(area_id id, addr_t physicalBase, uint32 type);
status_t vm_get_page_mapping(team_id team, addr_t vaddr, addr_t *paddr);

View File

@ -612,6 +612,11 @@ read_chunk_into_cache(file_cache_ref *ref, off_t offset, size_t numBytes,
// make the pages accessible in the cache
for (int32 i = pageIndex; i-- > 0;) {
pages[i]->state = PAGE_STATE_ACTIVE;
if (pages[i]->usage_count < 0)
pages[i]->usage_count = 1;
else
pages[i]->usage_count++;
busyConditions[i].Unpublish();
}
@ -792,6 +797,11 @@ write_chunk_to_cache(file_cache_ref *ref, off_t offset, size_t numBytes,
for (int32 i = pageIndex; i-- > 0;) {
busyConditions[i].Unpublish();
if (pages[i]->usage_count < 0)
pages[i]->usage_count = 1;
else
pages[i]->usage_count++;
if (writeThrough)
pages[i]->state = PAGE_STATE_ACTIVE;
else
@ -1248,17 +1258,10 @@ file_cache_create(dev_t mountID, ino_t vnodeID, off_t size, int fd)
if (vfs_lookup_vnode(mountID, vnodeID, &ref->vnode) != B_OK)
goto err2;
// Gets (usually creates) the cache for the node - note, this does grab a
// reference to the node...
// Gets (usually creates) the cache for the node
if (vfs_get_vnode_cache(ref->vnode, &ref->cache, true) != B_OK)
goto err2;
// ... that we don't need, and therefore release it again.
// Our caller already holds a reference to the vnode; it will destroy us
// when the last one goes away (which, of course, can only ever happen if
// we don't grab an extra reference).
vfs_put_vnode(ref->vnode);
ref->cache->virtual_size = size;
((vnode_store *)ref->cache->store)->file_cache_ref = ref;
return ref;

View File

@ -1,5 +1,5 @@
/*
* Copyright 2004-2006, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*/
@ -112,7 +112,7 @@ static vm_store_ops sStoreOps = {
extern "C" vm_store *
vm_create_vnode_store(void *vnode)
vm_create_vnode_store(struct vnode *vnode)
{
vnode_store *store = (vnode_store *)malloc(sizeof(struct vnode_store));
if (store == NULL)

View File

@ -1,5 +1,5 @@
/*
* Copyright 2004, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*/
#ifndef VNODE_STORE_H
@ -10,9 +10,9 @@
struct vnode_store {
vm_store vm;
void *vnode;
void *file_cache_ref;
vm_store vm;
struct vnode* vnode;
void* file_cache_ref;
};
#endif /* VNODE_STORE_H */

View File

@ -685,6 +685,12 @@ free_vnode(struct vnode *vnode, bool reenter)
// count, so that it will neither become negative nor 0.
vnode->ref_count = 2;
// The file system has removed the resources of the vnode now, so we can
// make it available again (and remove the busy vnode from the hash)
mutex_lock(&sVnodeMutex);
hash_remove(sVnodeTable, vnode);
mutex_unlock(&sVnodeMutex);
// TODO: Usually, when the vnode is unreferenced, no one can get hold of the
// cache either (i.e. no one can get a cache reference while we're deleting
// the vnode).. This is, however, not the case for the page daemon. It gets
@ -692,18 +698,15 @@ free_vnode(struct vnode *vnode, bool reenter)
// vnode reference while we're deleting the vnode.
if (!vnode->unpublished) {
if (vnode->remove)
FS_CALL(vnode, remove_vnode)(vnode->mount->cookie, vnode->private_node, reenter);
else
FS_CALL(vnode, put_vnode)(vnode->mount->cookie, vnode->private_node, reenter);
if (vnode->remove) {
FS_CALL(vnode, remove_vnode)(vnode->mount->cookie,
vnode->private_node, reenter);
} else {
FS_CALL(vnode, put_vnode)(vnode->mount->cookie, vnode->private_node,
reenter);
}
}
// The file system has removed the resources of the vnode now, so we can
// make it available again (and remove the busy vnode from the hash)
mutex_lock(&sVnodeMutex);
hash_remove(sVnodeTable, vnode);
mutex_unlock(&sVnodeMutex);
// if we have a vm_cache attached, remove it
if (vnode->cache)
vm_cache_release_ref(vnode->cache);
@ -3116,12 +3119,11 @@ vfs_write_pages(void *_vnode, void *cookie, off_t pos, const iovec *vecs, size_t
}
/** Gets the vnode's vm_cache object. If it didn't have one, it will be
* created if \a allocate is \c true.
* In case it's successful, it will also grab a reference to the cache
* it returns (and therefore, one from the \a vnode in question as well).
*/
/*! Gets the vnode's vm_cache object. If it didn't have one, it will be
created if \a allocate is \c true.
In case it's successful, it will also grab a reference to the cache
it returns.
*/
extern "C" status_t
vfs_get_vnode_cache(void *_vnode, vm_cache **_cache, bool allocate)
{
@ -4222,9 +4224,9 @@ common_fcntl(int fd, int op, uint32 argument, bool kernel)
// O_CLOEXEC is the only flag available at this time
mutex_lock(&context->io_mutex);
fd_set_close_on_exec(context, fd, argument & FD_CLOEXEC);
fd_set_close_on_exec(context, fd, (argument & FD_CLOEXEC) != 0);
mutex_unlock(&context->io_mutex);
status = B_OK;
break;
}

View File

@ -1761,7 +1761,7 @@ vm_create_null_area(team_id team, const char *name, void **address,
The vnode has to be marked busy when calling this function.
*/
status_t
vm_create_vnode_cache(void *vnode, struct vm_cache **_cache)
vm_create_vnode_cache(struct vnode *vnode, struct vm_cache **_cache)
{
status_t status;
@ -1787,11 +1787,10 @@ err1:
}
/** Will map the file at the path specified by \a name to an area in memory.
* The file will be mirrored beginning at the specified \a offset. The \a offset
* and \a size arguments have to be page aligned.
*/
/*! Will map the file at the path specified by \a name to an area in memory.
The file will be mirrored beginning at the specified \a offset. The \a offset
and \a size arguments have to be page aligned.
*/
static area_id
_vm_map_file(team_id team, const char *name, void **_address, uint32 addressSpec,
size_t size, uint32 protection, uint32 mapping, const char *path,
@ -1827,13 +1826,10 @@ _vm_map_file(team_id team, const char *name, void **_address, uint32 addressSpec
// ToDo: this only works for file systems that use the file cache
status = vfs_get_vnode_cache(vnode, &cache, false);
vfs_put_vnode(vnode);
// we don't need this vnode anymore - if the above call was
// successful, the store already has a ref to it
if (status < B_OK)
if (status < B_OK) {
vfs_put_vnode(vnode);
return status;
}
mutex_lock(&cache->lock);
@ -1842,6 +1838,10 @@ _vm_map_file(team_id team, const char *name, void **_address, uint32 addressSpec
mutex_unlock(&cache->lock);
vfs_put_vnode(vnode);
// we don't need this vnode anymore - if the above call was
// successful, the store already has a ref to it
if (status < B_OK || mapping == REGION_PRIVATE_MAP) {
// map_backing_store() cannot know we no longer need the ref
vm_cache_release_ref(cache);
@ -2539,6 +2539,9 @@ vm_map_page(vm_area *area, vm_page *page, addr_t address, uint32 protection)
area->mappings.Add(mapping);
}
if (page->usage_count < 0)
page->usage_count = 1;
if (page->state != PAGE_STATE_MODIFIED)
vm_page_set_state(page, PAGE_STATE_ACTIVE);

View File

@ -80,16 +80,14 @@ page_hash_func(void *_p, const void *_key, uint32 range)
}
/*! Acquires a pseudo reference to a cache yet unreferenced by the caller. The
/*! Acquires a reference to a cache yet unreferenced by the caller. The
caller must make sure, that the cache is not deleted, e.g. by holding the
cache's source cache lock or by holding the page cache table lock while the
cache is still referred to by a page. To get a real reference, the caller
must subsequently call vm_cache_acquire_ref() and decrement the cache's ref
count manually afterwards.
Returns \c true, if the pseudo reference could be acquired.
cache is still referred to by a page.
Returns \c true, if the reference could be acquired.
*/
static inline bool
acquire_unreferenced_cache_pseudo_ref(vm_cache* cache)
acquire_unreferenced_cache_ref(vm_cache* cache)
{
while (true) {
int32 count = cache->ref_count;
@ -102,6 +100,73 @@ acquire_unreferenced_cache_pseudo_ref(vm_cache* cache)
}
static void
delete_cache(vm_cache *cache)
{
if (cache->areas != NULL)
panic("cache %p to be deleted still has areas", cache);
if (!list_is_empty(&cache->consumers))
panic("cache %p to be deleted still has consumers", cache);
#if DEBUG_CACHE_LIST
int state = disable_interrupts();
acquire_spinlock(&sDebugCacheListLock);
if (cache->debug_previous)
cache->debug_previous->debug_next = cache->debug_next;
if (cache->debug_next)
cache->debug_next->debug_previous = cache->debug_previous;
if (cache == gDebugCacheList)
gDebugCacheList = cache->debug_next;
release_spinlock(&sDebugCacheListLock);
restore_interrupts(state);
#endif
// delete the cache's backing store
cache->store->ops->destroy(cache->store);
// free all of the pages in the cache
vm_page *page = cache->page_list;
while (page) {
vm_page *oldPage = page;
int state;
page = page->cache_next;
if (!oldPage->mappings.IsEmpty() || oldPage->wired_count != 0) {
panic("remove page %p from cache %p: page still has mappings!\n",
oldPage, cache);
}
// remove it from the hash table
state = disable_interrupts();
acquire_spinlock(&sPageCacheTableLock);
hash_remove(sPageCacheTable, oldPage);
oldPage->cache = NULL;
// TODO: we also need to remove all of the page's mappings!
release_spinlock(&sPageCacheTableLock);
restore_interrupts(state);
TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
oldPage->physical_page_number));
vm_page_set_state(oldPage, PAGE_STATE_FREE);
}
// remove the ref to the source
if (cache->source)
vm_cache_remove_consumer(cache->source, cache);
mutex_destroy(&cache->lock);
free(cache);
}
// #pragma mark -
status_t
vm_cache_init(kernel_args *args)
{
@ -181,9 +246,6 @@ vm_cache_acquire_ref(vm_cache *cache)
if (cache == NULL)
panic("vm_cache_acquire_ref: passed NULL\n");
if (cache->store->ops->acquire_ref != NULL)
cache->store->ops->acquire_ref(cache->store);
atomic_add(&cache->ref_count, 1);
}
@ -191,8 +253,6 @@ vm_cache_acquire_ref(vm_cache *cache)
void
vm_cache_release_ref(vm_cache *cache)
{
vm_page *page;
TRACE(("vm_cache_release_ref: cacheRef %p, ref will be %ld\n",
cache, cache->ref_count - 1));
@ -200,10 +260,6 @@ vm_cache_release_ref(vm_cache *cache)
panic("vm_cache_release_ref: passed NULL\n");
if (atomic_add(&cache->ref_count, -1) != 1) {
// the store ref is only released on the "working" refs, not
// on the initial one (this is vnode specific)
if (cache->store->ops->release_ref)
cache->store->ops->release_ref(cache->store);
#if 0
{
// count min references to see if everything is okay
@ -237,64 +293,7 @@ vm_cache_release_ref(vm_cache *cache)
// delete this cache
if (cache->areas != NULL)
panic("cache %p to be deleted still has areas", cache);
if (!list_is_empty(&cache->consumers))
panic("cache %p to be deleted still has consumers", cache);
#if DEBUG_CACHE_LIST
int state = disable_interrupts();
acquire_spinlock(&sDebugCacheListLock);
if (cache->debug_previous)
cache->debug_previous->debug_next = cache->debug_next;
if (cache->debug_next)
cache->debug_next->debug_previous = cache->debug_previous;
if (cache == gDebugCacheList)
gDebugCacheList = cache->debug_next;
release_spinlock(&sDebugCacheListLock);
restore_interrupts(state);
#endif
// delete the cache's backing store
cache->store->ops->destroy(cache->store);
// free all of the pages in the cache
page = cache->page_list;
while (page) {
vm_page *oldPage = page;
int state;
page = page->cache_next;
if (!oldPage->mappings.IsEmpty() || oldPage->wired_count != 0) {
panic("remove page %p from cache %p: page still has mappings!\n",
oldPage, cache);
}
// remove it from the hash table
state = disable_interrupts();
acquire_spinlock(&sPageCacheTableLock);
hash_remove(sPageCacheTable, oldPage);
oldPage->cache = NULL;
// TODO: we also need to remove all of the page's mappings!
release_spinlock(&sPageCacheTableLock);
restore_interrupts(state);
TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
oldPage->physical_page_number));
vm_page_set_state(oldPage, PAGE_STATE_FREE);
}
// remove the ref to the source
if (cache->source)
vm_cache_remove_consumer(cache->source, cache);
mutex_destroy(&cache->lock);
free(cache);
delete_cache(cache);
}
@ -307,16 +306,10 @@ vm_cache_acquire_page_cache_ref(vm_page* page)
if (cache == NULL)
return NULL;
// get a pseudo reference
if (!acquire_unreferenced_cache_pseudo_ref(cache))
// get a reference
if (!acquire_unreferenced_cache_ref(cache))
return NULL;
locker.Unlock();
// turn it into a real reference
vm_cache_acquire_ref(cache);
atomic_add(&cache->ref_count, -1);
return cache;
}
@ -541,6 +534,9 @@ vm_cache_remove_consumer(vm_cache *cache, vm_cache *consumer)
list_remove_item(&cache->consumers, consumer);
consumer->source = NULL;
if (cache->store->ops->release_ref)
cache->store->ops->release_ref(cache->store);
if (cache->areas == NULL && cache->source != NULL
&& !list_is_empty(&cache->consumers)
&& cache->consumers.link.next == cache->consumers.link.prev) {
@ -549,14 +545,7 @@ vm_cache_remove_consumer(vm_cache *cache, vm_cache *consumer)
consumer = (vm_cache *)list_get_first_item(&cache->consumers);
bool merge = acquire_unreferenced_cache_pseudo_ref(consumer);
if (merge) {
// We managed to increment the reference count, but that's not a
// full reference. We get a real one now and decrement the ref count
// again.
vm_cache_acquire_ref(consumer);
atomic_add(&consumer->ref_count, -1);
}
bool merge = acquire_unreferenced_cache_ref(consumer);
// In case we managed to grab a reference to the consumerRef,
// this doesn't guarantee that we get the cache we wanted
@ -696,6 +685,9 @@ vm_cache_add_consumer_locked(vm_cache *cache, vm_cache *consumer)
list_add_item(&cache->consumers, consumer);
vm_cache_acquire_ref(cache);
if (cache->store->ops->acquire_ref != NULL)
cache->store->ops->acquire_ref(cache->store);
}
@ -714,6 +706,9 @@ vm_cache_insert_area_locked(vm_cache *cache, vm_area *area)
area->cache_prev = NULL;
cache->areas = area;
if (cache->store->ops->acquire_ref != NULL)
cache->store->ops->acquire_ref(cache->store);
return B_OK;
}
@ -730,6 +725,9 @@ vm_cache_remove_area(vm_cache *cache, vm_area *area)
if (cache->areas == area)
cache->areas = area->cache_next;
if (cache->store->ops->release_ref)
cache->store->ops->release_ref(cache->store);
mutex_unlock(&cache->lock);
return B_OK;
}

View File

@ -6,6 +6,9 @@
* Distributed under the terms of the NewOS License.
*/
#include <signal.h>
#include <string.h>
#include <stdlib.h>
#include <KernelExport.h>
#include <OS.h>
@ -16,15 +19,14 @@
#include <condition_variable.h>
#include <kernel.h>
#include <thread.h>
#include <util/AutoLock.h>
#include <vm.h>
#include <vm_address_space.h>
#include <vm_low_memory.h>
#include <vm_priv.h>
#include <vm_page.h>
#include <vm_cache.h>
#include <signal.h>
#include <string.h>
#include <stdlib.h>
//#define TRACE_VM_PAGE
#ifdef TRACE_VM_PAGE
@ -53,6 +55,7 @@ static vm_page *sPages;
static addr_t sPhysicalPageOffset;
static size_t sNumPages;
static ConditionVariable<page_queue> sFreePageCondition;
static spinlock sPageLock;
static sem_id modified_pages_available;
@ -414,8 +417,8 @@ static int dump_free_page_table(int argc, char **argv)
static status_t
set_page_state_nolock(vm_page *page, int pageState)
{
page_queue *from_q = NULL;
page_queue *to_q = NULL;
page_queue *fromQueue = NULL;
page_queue *toQueue = NULL;
switch (page->state) {
case PAGE_STATE_BUSY:
@ -423,16 +426,16 @@ set_page_state_nolock(vm_page *page, int pageState)
case PAGE_STATE_INACTIVE:
case PAGE_STATE_WIRED:
case PAGE_STATE_UNUSED:
from_q = &sActivePageQueue;
fromQueue = &sActivePageQueue;
break;
case PAGE_STATE_MODIFIED:
from_q = &sModifiedPageQueue;
fromQueue = &sModifiedPageQueue;
break;
case PAGE_STATE_FREE:
from_q = &sFreePageQueue;
fromQueue = &sFreePageQueue;
break;
case PAGE_STATE_CLEAR:
from_q = &sClearPageQueue;
fromQueue = &sClearPageQueue;
break;
default:
panic("vm_page_set_state: vm_page %p in invalid state %d\n", page, page->state);
@ -442,10 +445,6 @@ set_page_state_nolock(vm_page *page, int pageState)
if (page->cache != NULL)
panic("free page %p has cache", page);
}
if (pageState == PAGE_STATE_CLEAR || pageState == PAGE_STATE_FREE) {
if (page->cache != NULL)
panic("to be freed page %p has cache", page);
}
switch (pageState) {
case PAGE_STATE_BUSY:
@ -453,22 +452,31 @@ set_page_state_nolock(vm_page *page, int pageState)
case PAGE_STATE_INACTIVE:
case PAGE_STATE_WIRED:
case PAGE_STATE_UNUSED:
to_q = &sActivePageQueue;
toQueue = &sActivePageQueue;
break;
case PAGE_STATE_MODIFIED:
to_q = &sModifiedPageQueue;
toQueue = &sModifiedPageQueue;
break;
case PAGE_STATE_FREE:
to_q = &sFreePageQueue;
toQueue = &sFreePageQueue;
break;
case PAGE_STATE_CLEAR:
to_q = &sClearPageQueue;
toQueue = &sClearPageQueue;
break;
default:
panic("vm_page_set_state: invalid target state %d\n", pageState);
}
if (pageState == PAGE_STATE_CLEAR || pageState == PAGE_STATE_FREE) {
if (sFreePageQueue.count + sClearPageQueue.count == 0)
sFreePageCondition.NotifyAll();
if (page->cache != NULL)
panic("to be freed page %p has cache", page);
}
page->state = pageState;
move_page_to_queue(from_q, to_q, page);
move_page_to_queue(fromQueue, toQueue, page);
return B_OK;
}
@ -661,6 +669,82 @@ write_page(vm_page *page, bool fsReenter)
}
static void
page_thief(void* /*unused*/, int32 level)
{
uint32 steal;
int32 score;
switch (level) {
default:
case B_LOW_MEMORY_NOTE:
steal = 10;
score = -20;
break;
case B_LOW_MEMORY_WARNING:
steal = 50;
score = -5;
break;
case B_LOW_MEMORY_CRITICAL:
steal = 500;
score = -1;
break;
}
vm_page* page = NULL;
InterruptsSpinLocker locker(sPageLock);
while (steal > 0) {
if (!locker.IsLocked())
locker.Lock();
// find a candidate to steal from the inactive queue
for (int32 i = sActivePageQueue.count; i-- > 0;) {
// move page to the head of the queue so that we don't
// scan it again directly
page = dequeue_page(&sActivePageQueue);
enqueue_page(&sActivePageQueue, page);
if (page->state == PAGE_STATE_INACTIVE
&& page->usage_count <= score)
break;
}
if (page == NULL) {
if (score == 0)
break;
score = 0;
continue;
}
locker.Unlock();
// try to lock the page's cache
vm_cache* cache = vm_cache_acquire_page_cache_ref(page);
if (cache == NULL)
continue;
if (mutex_trylock(&cache->lock) != B_OK
|| page->state != PAGE_STATE_INACTIVE) {
vm_cache_release_ref(cache);
continue;
}
// we can now steal this page
vm_cache_remove_page(cache, page);
vm_page_set_state(page, PAGE_STATE_FREE);
steal--;
mutex_unlock(&cache->lock);
vm_cache_release_ref(cache);
}
}
// #pragma mark - private kernel API
@ -904,6 +988,11 @@ vm_page_init_post_thread(kernel_args *args)
tid = thread_create_kernel_thread("pageout daemon", &pageout_daemon, B_FIRST_REAL_TIME_PRIORITY + 1);
thread_resume_thread(tid);
#endif
new (&sFreePageCondition) ConditionVariable<page_queue>;
sFreePageCondition.Publish(&sFreePageQueue, "free page");
register_low_memory_handler(page_thief, NULL, 0);
return B_OK;
}
@ -967,6 +1056,9 @@ vm_mark_page_range_inuse(addr_t startPage, addr_t length)
vm_page *
vm_page_allocate_page(int pageState)
{
// TODO: we may want to have a "canWait" argument
ConditionVariableEntry<page_queue> freeConditionEntry;
page_queue *queue;
page_queue *otherQueue;
@ -983,33 +1075,43 @@ vm_page_allocate_page(int pageState)
return NULL; // invalid
}
cpu_status state = disable_interrupts();
acquire_spinlock(&sPageLock);
InterruptsSpinLocker locker(sPageLock);
vm_page *page = dequeue_page(queue);
if (page == NULL) {
#ifdef DEBUG
if (queue->count != 0)
panic("queue %p corrupted, count = %d\n", queue, queue->count);
#endif
// if the primary queue was empty, grap the page from the
// secondary queue
page = dequeue_page(otherQueue);
vm_page *page = NULL;
while (true) {
page = dequeue_page(queue);
if (page == NULL) {
#ifdef DEBUG
if (otherQueue->count != 0) {
panic("other queue %p corrupted, count = %d\n", otherQueue,
otherQueue->count);
}
if (queue->count != 0)
panic("queue %p corrupted, count = %d\n", queue, queue->count);
#endif
// ToDo: issue "someone" to free up some pages for us, and go into
// wait state until that's done
panic("vm_allocate_page: out of memory! page state = %d\n",
pageState);
// if the primary queue was empty, grap the page from the
// secondary queue
page = dequeue_page(otherQueue);
if (page == NULL) {
#ifdef DEBUG
if (otherQueue->count != 0) {
panic("other queue %p corrupted, count = %d\n", otherQueue,
otherQueue->count);
}
#endif
freeConditionEntry.Add(&sFreePageQueue);
vm_low_memory(1);
}
}
if (page != NULL)
break;
// we need to wait until new pages become available
locker.Unlock();
freeConditionEntry.Wait();
locker.Lock();
}
if (page->cache != NULL)
panic("supposed to be free page %p has cache\n", page);
@ -1018,8 +1120,7 @@ vm_page_allocate_page(int pageState)
enqueue_page(&sActivePageQueue, page);
release_spinlock(&sPageLock);
restore_interrupts(state);
locker.Unlock();
// if needed take the page from the free queue and zero it out
if (pageState == PAGE_STATE_CLEAR && oldPageState != PAGE_STATE_CLEAR)