* Removed the page state PAGE_STATE_BUSY and instead introduced a vm_page::busy

flag. The obvious advantage is that one can still see what state a page is in
  and even move it between states while being marked busy.
* Removed the vm_page::is_dummy flag. Instead we mark marker pages busy, which
  in all cases has the same effect. Introduced a vm_page_is_dummy() that can
  still check whether a given page is a dummy page.
* vm_page_unreserve_pages(): Before adding to the system reserve make sure
  sUnreservedFreePages is non-negative. Otherwise we'd make nonexisting pages
  available for allocation. steal_pages() still has the same problem and it
  can't be solved that easily.
* map_page(): No longer changes the page state/mark the page unbusy. That's the
  caller's responsibility.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35331 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-01-29 10:00:45 +00:00
parent 1196f305d7
commit 72382fa629
12 changed files with 126 additions and 86 deletions

View File

@ -90,6 +90,7 @@ public:
void NotifyPageEvents(vm_page* page, uint32 events) void NotifyPageEvents(vm_page* page, uint32 events)
{ if (fPageEventWaiters != NULL) { if (fPageEventWaiters != NULL)
_NotifyPageEvents(page, events); } _NotifyPageEvents(page, events); }
inline void MarkPageUnbusy(vm_page* page);
vm_page* LookupPage(off_t offset); vm_page* LookupPage(off_t offset);
void InsertPage(vm_page* page, off_t offset); void InsertPage(vm_page* page, off_t offset);
@ -291,6 +292,14 @@ VMCache::ReleaseRefAndUnlock(bool consumerLocked)
} }
void
VMCache::MarkPageUnbusy(vm_page* page)
{
page->busy = false;
NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
}
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif

View File

@ -56,6 +56,7 @@ struct vm_page *vm_page_allocate_page_run_no_base(int state, addr_t count,
int priority); int priority);
struct vm_page *vm_page_at_index(int32 index); struct vm_page *vm_page_at_index(int32 index);
struct vm_page *vm_lookup_page(addr_t pageNumber); struct vm_page *vm_lookup_page(addr_t pageNumber);
bool vm_page_is_dummy(struct vm_page *page);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -106,7 +106,7 @@ public:
#endif #endif
uint8 state : 3; uint8 state : 3;
bool is_dummy : 1; bool busy : 1;
bool busy_writing : 1; bool busy_writing : 1;
// used in VMAnonymousCache::Merge() // used in VMAnonymousCache::Merge()
bool accessed : 1; bool accessed : 1;
@ -116,7 +116,6 @@ public:
int8 usage_count; int8 usage_count;
uint16 wired_count; uint16 wired_count;
VMCacheRef* CacheRef() const { return cache_ref; } VMCacheRef* CacheRef() const { return cache_ref; }
void SetCacheRef(VMCacheRef* cacheRef) { this->cache_ref = cacheRef; } void SetCacheRef(VMCacheRef* cacheRef) { this->cache_ref = cacheRef; }
@ -128,7 +127,6 @@ public:
enum { enum {
PAGE_STATE_ACTIVE = 0, PAGE_STATE_ACTIVE = 0,
PAGE_STATE_INACTIVE, PAGE_STATE_INACTIVE,
PAGE_STATE_BUSY,
PAGE_STATE_MODIFIED, PAGE_STATE_MODIFIED,
PAGE_STATE_FREE, PAGE_STATE_FREE,
PAGE_STATE_CLEAR, PAGE_STATE_CLEAR,

View File

@ -541,6 +541,7 @@ Aperture::AllocateMemory(aperture_memory *memory, uint32 flags)
if ((flags & B_APERTURE_NEED_PHYSICAL) != 0) { if ((flags & B_APERTURE_NEED_PHYSICAL) != 0) {
memory->page = vm_page_allocate_page_run(PAGE_STATE_CLEAR, 0, count, memory->page = vm_page_allocate_page_run(PAGE_STATE_CLEAR, 0, count,
VM_PRIORITY_SYSTEM); VM_PRIORITY_SYSTEM);
// TODO: Mark pages unbusy!
if (memory->page == NULL) if (memory->page == NULL)
return B_NO_MEMORY; return B_NO_MEMORY;
} else { } else {
@ -550,8 +551,10 @@ Aperture::AllocateMemory(aperture_memory *memory, uint32 flags)
return B_NO_MEMORY; return B_NO_MEMORY;
vm_page_reserve_pages(count, VM_PRIORITY_SYSTEM); vm_page_reserve_pages(count, VM_PRIORITY_SYSTEM);
for (uint32 i = 0; i < count; i++) for (uint32 i = 0; i < count; i++) {
memory->pages[i] = vm_page_allocate_page(PAGE_STATE_CLEAR); memory->pages[i] = vm_page_allocate_page(PAGE_STATE_CLEAR);
memory->pages[i]->busy = false;
}
vm_page_unreserve_pages(count); vm_page_unreserve_pages(count);
} }

View File

@ -547,6 +547,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
// mark the page WIRED // mark the page WIRED
vm_page_set_state(page, PAGE_STATE_WIRED); vm_page_set_state(page, PAGE_STATE_WIRED);
page->busy = false;
DEBUG_PAGE_ACCESS_END(page); DEBUG_PAGE_ACCESS_END(page);
@ -595,6 +596,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
// mark the page WIRED // mark the page WIRED
vm_page_set_state(page, PAGE_STATE_WIRED); vm_page_set_state(page, PAGE_STATE_WIRED);
page->busy = false;
DEBUG_PAGE_ACCESS_END(page); DEBUG_PAGE_ACCESS_END(page);

View File

@ -426,6 +426,7 @@ X86VMTranslationMap::Map(addr_t va, addr_t pa, uint32 attributes)
// mark the page WIRED // mark the page WIRED
vm_page_set_state(page, PAGE_STATE_WIRED); vm_page_set_state(page, PAGE_STATE_WIRED);
page->busy = false;
DEBUG_PAGE_ACCESS_END(page); DEBUG_PAGE_ACCESS_END(page);

View File

@ -209,8 +209,7 @@ PrecacheIO::IOFinished(status_t status, bool partialTransfer,
DEBUG_PAGE_ACCESS_TRANSFER(fPages[i], fAllocatingThread); DEBUG_PAGE_ACCESS_TRANSFER(fPages[i], fAllocatingThread);
fPages[i]->state = PAGE_STATE_ACTIVE; fCache->MarkPageUnbusy(fPages[i]);
fCache->NotifyPageEvents(fPages[i], PAGE_EVENT_NOT_BUSY);
DEBUG_PAGE_ACCESS_END(fPages[i]); DEBUG_PAGE_ACCESS_END(fPages[i]);
} }
@ -308,8 +307,7 @@ reserve_pages(file_cache_ref* ref, size_t reservePages, bool isWrite)
vm_page* page; vm_page* page;
for (VMCachePagesTree::Iterator it = cache->pages.GetIterator(); for (VMCachePagesTree::Iterator it = cache->pages.GetIterator();
(page = it.Next()) != NULL && left > 0;) { (page = it.Next()) != NULL && left > 0;) {
if (page->state != PAGE_STATE_MODIFIED if (page->state != PAGE_STATE_MODIFIED && !page->busy) {
&& page->state != PAGE_STATE_BUSY) {
DEBUG_PAGE_ACCESS_START(page); DEBUG_PAGE_ACCESS_START(page);
cache->RemovePage(page); cache->RemovePage(page);
vm_page_set_state(page, PAGE_STATE_FREE); vm_page_set_state(page, PAGE_STATE_FREE);
@ -442,9 +440,7 @@ read_into_cache(file_cache_ref* ref, void* cookie, off_t offset,
for (int32 i = pageIndex; i-- > 0;) { for (int32 i = pageIndex; i-- > 0;) {
DEBUG_PAGE_ACCESS_END(pages[i]); DEBUG_PAGE_ACCESS_END(pages[i]);
pages[i]->state = PAGE_STATE_ACTIVE; cache->MarkPageUnbusy(pages[i]);
cache->NotifyPageEvents(pages[i], PAGE_EVENT_NOT_BUSY);
} }
return B_OK; return B_OK;
@ -610,11 +606,9 @@ write_to_cache(file_cache_ref* ref, void* cookie, off_t offset,
// make the pages accessible in the cache // make the pages accessible in the cache
for (int32 i = pageIndex; i-- > 0;) { for (int32 i = pageIndex; i-- > 0;) {
ref->cache->NotifyPageEvents(pages[i], PAGE_EVENT_NOT_BUSY); ref->cache->MarkPageUnbusy(pages[i]);
if (writeThrough) if (!writeThrough)
pages[i]->state = PAGE_STATE_ACTIVE;
else
vm_page_set_state(pages[i], PAGE_STATE_MODIFIED); vm_page_set_state(pages[i], PAGE_STATE_MODIFIED);
DEBUG_PAGE_ACCESS_END(pages[i]); DEBUG_PAGE_ACCESS_END(pages[i]);
@ -772,7 +766,7 @@ cache_io(void* _cacheRef, void* cookie, off_t offset, addr_t buffer,
if (status != B_OK) if (status != B_OK)
return status; return status;
if (page->state == PAGE_STATE_BUSY) { if (page->busy) {
cache->WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true); cache->WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
continue; continue;
} }
@ -797,8 +791,7 @@ cache_io(void* _cacheRef, void* cookie, off_t offset, addr_t buffer,
// need to unlock the cache temporarily to avoid a potential // need to unlock the cache temporarily to avoid a potential
// deadlock. To make sure that our page doesn't go away, we mark // deadlock. To make sure that our page doesn't go away, we mark
// it busy for the time. // it busy for the time.
uint8 oldPageState = page->state; page->busy = true;
page->state = PAGE_STATE_BUSY;
locker.Unlock(); locker.Unlock();
// copy the contents of the page already in memory // copy the contents of the page already in memory
@ -818,14 +811,13 @@ cache_io(void* _cacheRef, void* cookie, off_t offset, addr_t buffer,
locker.Lock(); locker.Lock();
page->state = oldPageState;
if (doWrite && page->state != PAGE_STATE_MODIFIED) { if (doWrite && page->state != PAGE_STATE_MODIFIED) {
DEBUG_PAGE_ACCESS_START(page); DEBUG_PAGE_ACCESS_START(page);
vm_page_set_state(page, PAGE_STATE_MODIFIED); vm_page_set_state(page, PAGE_STATE_MODIFIED);
DEBUG_PAGE_ACCESS_END(page); DEBUG_PAGE_ACCESS_END(page);
} }
cache->NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY); cache->MarkPageUnbusy(page);
} }
if (bytesLeft <= bytesInPage) { if (bytesLeft <= bytesInPage) {

View File

@ -1352,6 +1352,7 @@ MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
vm_page* page = vm_page_allocate_page(PAGE_STATE_FREE); vm_page* page = vm_page_allocate_page(PAGE_STATE_FREE);
cache->InsertPage(page, offset); cache->InsertPage(page, offset);
vm_page_set_state(page, PAGE_STATE_WIRED); vm_page_set_state(page, PAGE_STATE_WIRED);
page->busy = false;
page->wired_count++; page->wired_count++;
atomic_add(&gMappedPagesCount, 1); atomic_add(&gMappedPagesCount, 1);

View File

@ -1058,7 +1058,7 @@ VMCache::Resize(off_t newSize, int priority)
for (VMCachePagesTree::Iterator it for (VMCachePagesTree::Iterator it
= pages.GetIterator(newPageCount, true, true); = pages.GetIterator(newPageCount, true, true);
vm_page* page = it.Next();) { vm_page* page = it.Next();) {
if (page->state == PAGE_STATE_BUSY) { if (page->busy) {
if (page->busy_writing) { if (page->busy_writing) {
// We cannot wait for the page to become available // We cannot wait for the page to become available
// as we might cause a deadlock this way // as we might cause a deadlock this way
@ -1107,7 +1107,7 @@ VMCache::FlushAndRemoveAllPages()
// remove pages // remove pages
for (VMCachePagesTree::Iterator it = pages.GetIterator(); for (VMCachePagesTree::Iterator it = pages.GetIterator();
vm_page* page = it.Next();) { vm_page* page = it.Next();) {
if (page->state == PAGE_STATE_BUSY) { if (page->busy) {
// wait for page to become unbusy // wait for page to become unbusy
WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true); WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);

View File

@ -515,9 +515,6 @@ map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection)
if (page->usage_count < 0) if (page->usage_count < 0)
page->usage_count = 1; page->usage_count = 1;
if (page->state != PAGE_STATE_MODIFIED)
vm_page_set_state(page, PAGE_STATE_ACTIVE);
return B_OK; return B_OK;
} }
@ -1128,8 +1125,16 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
vm_page* page = vm_page_allocate_page(newPageState); vm_page* page = vm_page_allocate_page(newPageState);
cache->InsertPage(page, offset); cache->InsertPage(page, offset);
map_page(area, page, address, protection); map_page(area, page, address, protection);
// TODO: This sets the page state to "active", but it would // vm_page_set_state(page, PAGE_STATE_WIRED);
// make more sense to set it to "wired". // TODO: The pages should be PAGE_STATE_WIRED, since there's
// no need for the page daemon to play with them (the same
// should be considered in vm_soft_fault()). ATM doing that
// will result in bad thrashing in systems with little
// memory due to the current tuning of the page daemon. It
// will age pages way too fast (since it just skips
// PAGE_STATE_WIRED pages, while it processes
// PAGE_STATE_ACTIVE with wired_count > 0).
page->busy = false;
DEBUG_PAGE_ACCESS_END(page); DEBUG_PAGE_ACCESS_END(page);
@ -1179,6 +1184,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
increment_page_wired_count(page); increment_page_wired_count(page);
cache->InsertPage(page, offset); cache->InsertPage(page, offset);
vm_page_set_state(page, PAGE_STATE_WIRED); vm_page_set_state(page, PAGE_STATE_WIRED);
page->busy = false;
DEBUG_PAGE_ACCESS_END(page); DEBUG_PAGE_ACCESS_END(page);
} }
@ -1212,6 +1218,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
increment_page_wired_count(page); increment_page_wired_count(page);
cache->InsertPage(page, offset); cache->InsertPage(page, offset);
vm_page_set_state(page, PAGE_STATE_WIRED); vm_page_set_state(page, PAGE_STATE_WIRED);
page->busy = false;
DEBUG_PAGE_ACCESS_END(page); DEBUG_PAGE_ACCESS_END(page);
} }
@ -1511,7 +1518,7 @@ pre_map_area_pages(VMArea* area, VMCache* cache)
break; break;
// skip inactive pages // skip inactive pages
if (page->state == PAGE_STATE_BUSY || page->usage_count <= 0) if (page->busy || page->usage_count <= 0)
continue; continue;
DEBUG_PAGE_ACCESS_START(page); DEBUG_PAGE_ACCESS_START(page);
@ -1817,7 +1824,7 @@ vm_clone_area(team_id team, const char* name, void** address,
// map in all pages from source // map in all pages from source
for (VMCachePagesTree::Iterator it = cache->pages.GetIterator(); for (VMCachePagesTree::Iterator it = cache->pages.GetIterator();
vm_page* page = it.Next();) { vm_page* page = it.Next();) {
if (page->state != PAGE_STATE_BUSY) { if (!page->busy) {
DEBUG_PAGE_ACCESS_START(page); DEBUG_PAGE_ACCESS_START(page);
map_page(newArea, page, map_page(newArea, page,
newArea->Base() + ((page->cache_offset << PAGE_SHIFT) newArea->Base() + ((page->cache_offset << PAGE_SHIFT)
@ -2738,7 +2745,7 @@ dump_cache(int argc, char** argv)
if (showPages) { if (showPages) {
for (VMCachePagesTree::Iterator it = cache->pages.GetIterator(); for (VMCachePagesTree::Iterator it = cache->pages.GetIterator();
vm_page* page = it.Next();) { vm_page* page = it.Next();) {
if (!page->is_dummy) { if (!vm_page_is_dummy(page)) {
kprintf("\t%p ppn 0x%lx offset 0x%lx state %u (%s) " kprintf("\t%p ppn 0x%lx offset 0x%lx state %u (%s) "
"wired_count %u\n", page, page->physical_page_number, "wired_count %u\n", page, page->physical_page_number,
page->cache_offset, page->state, page->cache_offset, page->state,
@ -3690,7 +3697,7 @@ fault_get_page(PageFaultContext& context)
for (;;) { for (;;) {
page = cache->LookupPage(context.cacheOffset); page = cache->LookupPage(context.cacheOffset);
if (page == NULL || page->state != PAGE_STATE_BUSY) { if (page == NULL || !page->busy) {
// Either there is no page or there is one and it is not busy. // Either there is no page or there is one and it is not busy.
break; break;
} }
@ -3745,8 +3752,7 @@ fault_get_page(PageFaultContext& context)
} }
// mark the page unbusy again // mark the page unbusy again
page->state = PAGE_STATE_ACTIVE; cache->MarkPageUnbusy(page);
cache->NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
DEBUG_PAGE_ACCESS_END(page); DEBUG_PAGE_ACCESS_END(page);
@ -3769,12 +3775,12 @@ fault_get_page(PageFaultContext& context)
// allocate a clean page // allocate a clean page
page = vm_page_allocate_page(PAGE_STATE_CLEAR); page = vm_page_allocate_page(PAGE_STATE_CLEAR);
page->busy = false;
FTRACE(("vm_soft_fault: just allocated page 0x%lx\n", FTRACE(("vm_soft_fault: just allocated page 0x%lx\n",
page->physical_page_number)); page->physical_page_number));
// insert the new page into our cache // insert the new page into our cache
cache->InsertPage(page, context.cacheOffset); cache->InsertPage(page, context.cacheOffset);
} else if (page->Cache() != context.topCache && context.isWrite) { } else if (page->Cache() != context.topCache && context.isWrite) {
// We have a page that has the data we want, but in the wrong cache // We have a page that has the data we want, but in the wrong cache
// object so we need to copy it and stick it into the top cache. // object so we need to copy it and stick it into the top cache.
@ -3784,12 +3790,12 @@ fault_get_page(PageFaultContext& context)
// from our source cache -- if possible, that is. // from our source cache -- if possible, that is.
FTRACE(("get new page, copy it, and put it into the topmost cache\n")); FTRACE(("get new page, copy it, and put it into the topmost cache\n"));
page = vm_page_allocate_page(PAGE_STATE_FREE); page = vm_page_allocate_page(PAGE_STATE_FREE);
page->busy = false;
// To not needlessly kill concurrency we unlock all caches but the top // To not needlessly kill concurrency we unlock all caches but the top
// one while copying the page. Lacking another mechanism to ensure that // one while copying the page. Lacking another mechanism to ensure that
// the source page doesn't disappear, we mark it busy. // the source page doesn't disappear, we mark it busy.
int sourcePageState = sourcePage->state; sourcePage->busy = true;
sourcePage->state = PAGE_STATE_BUSY;
context.cacheChainLocker.UnlockKeepRefs(true); context.cacheChainLocker.UnlockKeepRefs(true);
// copy the page // copy the page
@ -3797,8 +3803,7 @@ fault_get_page(PageFaultContext& context)
sourcePage->physical_page_number * B_PAGE_SIZE); sourcePage->physical_page_number * B_PAGE_SIZE);
context.cacheChainLocker.RelockCaches(true); context.cacheChainLocker.RelockCaches(true);
sourcePage->state = sourcePageState; sourcePage->Cache()->MarkPageUnbusy(sourcePage);
sourcePage->Cache()->NotifyPageEvents(sourcePage, PAGE_EVENT_NOT_BUSY);
// insert the new page into our cache // insert the new page into our cache
context.topCache->InsertPage(page, context.cacheOffset); context.topCache->InsertPage(page, context.cacheOffset);
@ -3964,8 +3969,6 @@ vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress,
// fine, though. We'll simply leave and probably fault again. // fine, though. We'll simply leave and probably fault again.
// To make sure we'll have more luck then, we ensure that the // To make sure we'll have more luck then, we ensure that the
// minimum object reserve is available. // minimum object reserve is available.
if (context.page->state == PAGE_STATE_BUSY)
vm_page_set_state(context.page, PAGE_STATE_ACTIVE);
DEBUG_PAGE_ACCESS_END(context.page); DEBUG_PAGE_ACCESS_END(context.page);
context.UnlockAll(); context.UnlockAll();

View File

@ -65,7 +65,7 @@ PageCacheLocker::~PageCacheLocker()
bool bool
PageCacheLocker::_IgnorePage(vm_page* page) PageCacheLocker::_IgnorePage(vm_page* page)
{ {
if (page->state == PAGE_STATE_WIRED || page->state == PAGE_STATE_BUSY if (page->busy || page->state == PAGE_STATE_WIRED
|| page->state == PAGE_STATE_FREE || page->state == PAGE_STATE_CLEAR || page->state == PAGE_STATE_FREE || page->state == PAGE_STATE_CLEAR
|| page->state == PAGE_STATE_UNUSED || page->wired_count > 0) || page->state == PAGE_STATE_UNUSED || page->wired_count > 0)
return true; return true;
@ -257,7 +257,7 @@ clear_page_activation(int32 index)
if (!locker.IsLocked()) if (!locker.IsLocked())
return; return;
if (page->state == PAGE_STATE_ACTIVE) if (!page->busy && page->state == PAGE_STATE_ACTIVE)
vm_clear_map_flags(page, PAGE_ACCESSED); vm_clear_map_flags(page, PAGE_ACCESSED);
} }

View File

@ -334,8 +334,6 @@ page_state_to_string(int state)
return "active"; return "active";
case PAGE_STATE_INACTIVE: case PAGE_STATE_INACTIVE:
return "inactive"; return "inactive";
case PAGE_STATE_BUSY:
return "busy";
case PAGE_STATE_MODIFIED: case PAGE_STATE_MODIFIED:
return "modified"; return "modified";
case PAGE_STATE_FREE: case PAGE_STATE_FREE:
@ -420,10 +418,10 @@ dump_page(int argc, char **argv)
kprintf("cache: %p\n", page->Cache()); kprintf("cache: %p\n", page->Cache());
kprintf("cache_offset: %ld\n", page->cache_offset); kprintf("cache_offset: %ld\n", page->cache_offset);
kprintf("cache_next: %p\n", page->cache_next); kprintf("cache_next: %p\n", page->cache_next);
kprintf("is dummy: %d\n", page->is_dummy);
kprintf("state: %s\n", page_state_to_string(page->state)); kprintf("state: %s\n", page_state_to_string(page->state));
kprintf("wired_count: %d\n", page->wired_count); kprintf("wired_count: %d\n", page->wired_count);
kprintf("usage_count: %d\n", page->usage_count); kprintf("usage_count: %d\n", page->usage_count);
kprintf("busy: %d\n", page->busy);
kprintf("busy_writing: %d\n", page->busy_writing); kprintf("busy_writing: %d\n", page->busy_writing);
kprintf("accessed: %d\n", page->accessed); kprintf("accessed: %d\n", page->accessed);
kprintf("modified: %d\n", page->modified); kprintf("modified: %d\n", page->modified);
@ -545,16 +543,20 @@ dump_page_stats(int argc, char **argv)
{ {
page_num_t swappableModified = 0; page_num_t swappableModified = 0;
page_num_t swappableModifiedInactive = 0; page_num_t swappableModifiedInactive = 0;
uint32 counter[8]; size_t counter[8];
size_t busyCounter[8];
addr_t i; addr_t i;
memset(counter, 0, sizeof(counter)); memset(counter, 0, sizeof(counter));
memset(busyCounter, 0, sizeof(busyCounter));
for (i = 0; i < sNumPages; i++) { for (i = 0; i < sNumPages; i++) {
if (sPages[i].state > 7) if (sPages[i].state > 7)
panic("page %li at %p has invalid state!\n", i, &sPages[i]); panic("page %li at %p has invalid state!\n", i, &sPages[i]);
counter[sPages[i].state]++; counter[sPages[i].state]++;
if (sPages[i].busy)
busyCounter[sPages[i].state]++;
if (sPages[i].state == PAGE_STATE_MODIFIED && sPages[i].Cache() != NULL if (sPages[i].state == PAGE_STATE_MODIFIED && sPages[i].Cache() != NULL
&& sPages[i].Cache()->temporary && sPages[i].wired_count == 0) { && sPages[i].Cache()->temporary && sPages[i].wired_count == 0) {
@ -566,12 +568,20 @@ dump_page_stats(int argc, char **argv)
kprintf("page stats:\n"); kprintf("page stats:\n");
kprintf("total: %lu\n", sNumPages); kprintf("total: %lu\n", sNumPages);
kprintf("active: %lu\ninactive: %lu\nbusy: %lu\nunused: %lu\n",
counter[PAGE_STATE_ACTIVE], counter[PAGE_STATE_INACTIVE], kprintf("active: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
counter[PAGE_STATE_BUSY], counter[PAGE_STATE_UNUSED]); counter[PAGE_STATE_ACTIVE], busyCounter[PAGE_STATE_ACTIVE]);
kprintf("wired: %lu\nmodified: %lu\nfree: %lu\nclear: %lu\n", kprintf("inactive: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
counter[PAGE_STATE_WIRED], counter[PAGE_STATE_MODIFIED], counter[PAGE_STATE_INACTIVE], busyCounter[PAGE_STATE_INACTIVE]);
counter[PAGE_STATE_FREE], counter[PAGE_STATE_CLEAR]); kprintf("unused: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
counter[PAGE_STATE_UNUSED], busyCounter[PAGE_STATE_UNUSED]);
kprintf("wired: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
counter[PAGE_STATE_WIRED], busyCounter[PAGE_STATE_WIRED]);
kprintf("modified: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
counter[PAGE_STATE_MODIFIED], busyCounter[PAGE_STATE_MODIFIED]);
kprintf("free: %" B_PRIuSIZE "\n", counter[PAGE_STATE_FREE]);
kprintf("clear: %" B_PRIuSIZE "\n", counter[PAGE_STATE_CLEAR]);
kprintf("unreserved free pages: %" B_PRId32 "\n", sUnreservedFreePages); kprintf("unreserved free pages: %" B_PRId32 "\n", sUnreservedFreePages);
kprintf("system reserved pages: %" B_PRId32 "\n", sSystemReservedPages); kprintf("system reserved pages: %" B_PRId32 "\n", sSystemReservedPages);
kprintf("page deficit: %lu\n", sPageDeficit); kprintf("page deficit: %lu\n", sPageDeficit);
@ -626,7 +636,6 @@ free_page(vm_page* page, bool clear)
VMPageQueue* fromQueue; VMPageQueue* fromQueue;
switch (page->state) { switch (page->state) {
case PAGE_STATE_BUSY:
case PAGE_STATE_ACTIVE: case PAGE_STATE_ACTIVE:
fromQueue = &sActivePageQueue; fromQueue = &sActivePageQueue;
break; break;
@ -699,7 +708,6 @@ set_page_state(vm_page *page, int pageState)
VMPageQueue* fromQueue; VMPageQueue* fromQueue;
switch (page->state) { switch (page->state) {
case PAGE_STATE_BUSY:
case PAGE_STATE_ACTIVE: case PAGE_STATE_ACTIVE:
fromQueue = &sActivePageQueue; fromQueue = &sActivePageQueue;
break; break;
@ -726,7 +734,6 @@ set_page_state(vm_page *page, int pageState)
VMPageQueue* toQueue; VMPageQueue* toQueue;
switch (pageState) { switch (pageState) {
case PAGE_STATE_BUSY:
case PAGE_STATE_ACTIVE: case PAGE_STATE_ACTIVE:
toQueue = &sActivePageQueue; toQueue = &sActivePageQueue;
break; break;
@ -862,7 +869,8 @@ page_scrubber(void *unused)
DEBUG_PAGE_ACCESS_START(page[i]); DEBUG_PAGE_ACCESS_START(page[i]);
page[i]->state = PAGE_STATE_BUSY; page[i]->state = PAGE_STATE_ACTIVE;
page[i]->busy = true;
scrubCount++; scrubCount++;
} }
@ -884,6 +892,7 @@ page_scrubber(void *unused)
// and put them into the clear queue // and put them into the clear queue
for (int32 i = 0; i < scrubCount; i++) { for (int32 i = 0; i < scrubCount; i++) {
page[i]->state = PAGE_STATE_CLEAR; page[i]->state = PAGE_STATE_CLEAR;
page[i]->busy = false;
DEBUG_PAGE_ACCESS_END(page[i]); DEBUG_PAGE_ACCESS_END(page[i]);
sClearPageQueue.PrependUnlocked(page[i]); sClearPageQueue.PrependUnlocked(page[i]);
} }
@ -899,13 +908,6 @@ page_scrubber(void *unused)
} }
static inline bool
is_marker_page(struct vm_page *page)
{
return page->is_dummy;
}
static void static void
remove_page_marker(struct vm_page &marker) remove_page_marker(struct vm_page &marker)
{ {
@ -953,7 +955,7 @@ next_modified_page(struct vm_page &marker)
page = sModifiedPageQueue.Head(); page = sModifiedPageQueue.Head();
for (; page != NULL; page = sModifiedPageQueue.Next(page)) { for (; page != NULL; page = sModifiedPageQueue.Next(page)) {
if (!is_marker_page(page) && page->state != PAGE_STATE_BUSY) { if (!page->busy) {
// insert marker // insert marker
marker.state = PAGE_STATE_MODIFIED; marker.state = PAGE_STATE_MODIFIED;
sModifiedPageQueue.InsertAfter(page, &marker); sModifiedPageQueue.InsertAfter(page, &marker);
@ -1035,7 +1037,6 @@ private:
struct VMCache* fCache; struct VMCache* fCache;
bool fDequeuedPage; bool fDequeuedPage;
bool fIsActive; bool fIsActive;
int fOldPageState;
}; };
@ -1060,7 +1061,7 @@ PageWriteWrapper::SetTo(vm_page* page, bool dequeuedPage)
{ {
DEBUG_PAGE_ACCESS_CHECK(page); DEBUG_PAGE_ACCESS_CHECK(page);
if (page->state == PAGE_STATE_BUSY) if (page->busy)
panic("setting page write wrapper to busy page"); panic("setting page write wrapper to busy page");
if (fIsActive) if (fIsActive)
@ -1071,8 +1072,7 @@ PageWriteWrapper::SetTo(vm_page* page, bool dequeuedPage)
fDequeuedPage = dequeuedPage; fDequeuedPage = dequeuedPage;
fIsActive = true; fIsActive = true;
fOldPageState = fPage->state; fPage->busy = true;
fPage->state = PAGE_STATE_BUSY;
fPage->busy_writing = true; fPage->busy_writing = true;
} }
@ -1120,6 +1120,9 @@ PageWriteWrapper::Done(status_t result)
DEBUG_PAGE_ACCESS_CHECK(fPage); DEBUG_PAGE_ACCESS_CHECK(fPage);
fPage->busy = false;
// Set unbusy and notify later by hand, since we might free the page.
if (result == B_OK) { if (result == B_OK) {
// put it into the active/inactive queue // put it into the active/inactive queue
move_page_to_active_or_inactive_queue(fPage, fDequeuedPage); move_page_to_active_or_inactive_queue(fPage, fDequeuedPage);
@ -1132,10 +1135,8 @@ PageWriteWrapper::Done(status_t result)
if (fDequeuedPage) { if (fDequeuedPage) {
fPage->state = PAGE_STATE_MODIFIED; fPage->state = PAGE_STATE_MODIFIED;
sModifiedPageQueue.AppendUnlocked(fPage); sModifiedPageQueue.AppendUnlocked(fPage);
} else { } else
fPage->state = fOldPageState;
set_page_state(fPage, PAGE_STATE_MODIFIED); set_page_state(fPage, PAGE_STATE_MODIFIED);
}
if (!fPage->busy_writing) { if (!fPage->busy_writing) {
// The busy_writing flag was cleared. That means the cache has been // The busy_writing flag was cleared. That means the cache has been
@ -1400,9 +1401,9 @@ page_writer(void* /*unused*/)
} }
vm_page marker; vm_page marker;
marker.is_dummy = true;
marker.SetCacheRef(NULL); marker.SetCacheRef(NULL);
marker.state = PAGE_STATE_UNUSED; marker.state = PAGE_STATE_UNUSED;
marker.busy = true;
#if DEBUG_PAGE_QUEUE #if DEBUG_PAGE_QUEUE
marker.queue = NULL; marker.queue = NULL;
#endif #endif
@ -1493,7 +1494,7 @@ page_writer(void* /*unused*/)
} }
// state might have changed while we were locking the cache // state might have changed while we were locking the cache
if (page->state != PAGE_STATE_MODIFIED) { if (page->busy || page->state != PAGE_STATE_MODIFIED) {
// release the cache reference // release the cache reference
DEBUG_PAGE_ACCESS_END(page); DEBUG_PAGE_ACCESS_END(page);
cache->ReleaseStoreRef(); cache->ReleaseStoreRef();
@ -1565,7 +1566,7 @@ find_page_candidate(struct vm_page &marker)
} }
while (page != NULL) { while (page != NULL) {
if (!is_marker_page(page) && page->state == PAGE_STATE_INACTIVE) { if (!page->busy) {
// we found a candidate, insert marker // we found a candidate, insert marker
marker.state = PAGE_STATE_INACTIVE; marker.state = PAGE_STATE_INACTIVE;
sInactivePageQueue.InsertAfter(page, &marker); sInactivePageQueue.InsertAfter(page, &marker);
@ -1591,7 +1592,7 @@ steal_page(vm_page *page)
MethodDeleter<VMCache> _2(cache, &VMCache::ReleaseRefLocked); MethodDeleter<VMCache> _2(cache, &VMCache::ReleaseRefLocked);
// check again if that page is still a candidate // check again if that page is still a candidate
if (page->state != PAGE_STATE_INACTIVE) if (page->busy || page->state != PAGE_STATE_INACTIVE)
return false; return false;
DEBUG_PAGE_ACCESS_START(page); DEBUG_PAGE_ACCESS_START(page);
@ -1628,9 +1629,9 @@ steal_pages(vm_page **pages, size_t count)
{ {
while (true) { while (true) {
vm_page marker; vm_page marker;
marker.is_dummy = true;
marker.SetCacheRef(NULL); marker.SetCacheRef(NULL);
marker.state = PAGE_STATE_UNUSED; marker.state = PAGE_STATE_UNUSED;
marker.busy = true;
#if DEBUG_PAGE_QUEUE #if DEBUG_PAGE_QUEUE
marker.queue = NULL; marker.queue = NULL;
#endif #endif
@ -1765,12 +1766,13 @@ vm_page_write_modified_page_range(struct VMCache* cache, uint32 firstPage,
bool dequeuedPage = false; bool dequeuedPage = false;
if (page != NULL) { if (page != NULL) {
if (page->state == PAGE_STATE_MODIFIED) { if (page->busy) {
page = NULL;
} else if (page->state == PAGE_STATE_MODIFIED) {
DEBUG_PAGE_ACCESS_START(page); DEBUG_PAGE_ACCESS_START(page);
sModifiedPageQueue.RemoveUnlocked(page); sModifiedPageQueue.RemoveUnlocked(page);
dequeuedPage = true; dequeuedPage = true;
} else if (page->state == PAGE_STATE_BUSY } else if (!vm_test_map_modification(page)) {
|| !vm_test_map_modification(page)) {
page = NULL; page = NULL;
} else } else
DEBUG_PAGE_ACCESS_START(page); DEBUG_PAGE_ACCESS_START(page);
@ -1869,7 +1871,7 @@ vm_page_schedule_write_page_range(struct VMCache *cache, uint32 firstPage,
if (page->cache_offset >= endPage) if (page->cache_offset >= endPage)
break; break;
if (page->state == PAGE_STATE_MODIFIED) { if (!page->busy && page->state == PAGE_STATE_MODIFIED) {
DEBUG_PAGE_ACCESS_START(page); DEBUG_PAGE_ACCESS_START(page);
vm_page_requeue(page, false); vm_page_requeue(page, false);
modified++; modified++;
@ -1930,7 +1932,6 @@ vm_page_init(kernel_args *args)
// initialize the free page table // initialize the free page table
for (uint32 i = 0; i < sNumPages; i++) { for (uint32 i = 0; i < sNumPages; i++) {
sPages[i].physical_page_number = sPhysicalPageOffset + i; sPages[i].physical_page_number = sPhysicalPageOffset + i;
sPages[i].is_dummy = false;
sPages[i].state = PAGE_STATE_FREE; sPages[i].state = PAGE_STATE_FREE;
new(&sPages[i].mappings) vm_page_mappings(); new(&sPages[i].mappings) vm_page_mappings();
sPages[i].wired_count = 0; sPages[i].wired_count = 0;
@ -2066,6 +2067,7 @@ vm_mark_page_range_inuse(addr_t startPage, addr_t length)
? sFreePageQueue : sClearPageQueue; ? sFreePageQueue : sClearPageQueue;
queue.Remove(page); queue.Remove(page);
page->state = PAGE_STATE_UNUSED; page->state = PAGE_STATE_UNUSED;
page->busy = false;
atomic_add(&sUnreservedFreePages, -1); atomic_add(&sUnreservedFreePages, -1);
DEBUG_PAGE_ACCESS_END(page); DEBUG_PAGE_ACCESS_END(page);
break; break;
@ -2074,7 +2076,6 @@ vm_mark_page_range_inuse(addr_t startPage, addr_t length)
break; break;
case PAGE_STATE_ACTIVE: case PAGE_STATE_ACTIVE:
case PAGE_STATE_INACTIVE: case PAGE_STATE_INACTIVE:
case PAGE_STATE_BUSY:
case PAGE_STATE_MODIFIED: case PAGE_STATE_MODIFIED:
case PAGE_STATE_UNUSED: case PAGE_STATE_UNUSED:
default: default:
@ -2102,6 +2103,25 @@ vm_page_unreserve_pages(uint32 count)
T(UnreservePages(count)); T(UnreservePages(count));
while (true) {
int32 freePages = sUnreservedFreePages;
if (freePages >= 0)
break;
int32 toUnreserve = std::min((int32)count, -freePages);
if (atomic_test_and_set(&sUnreservedFreePages,
freePages + toUnreserve, freePages) == freePages) {
count -= toUnreserve;
if (count == 0) {
// TODO: Notify waiting system priority reservers.
return;
}
break;
}
// the count changed in the meantime -- retry
}
while (true) { while (true) {
int32 systemReserve = sSystemReservedPages; int32 systemReserve = sSystemReservedPages;
if (systemReserve >= (int32)kMinimumSystemReserve) if (systemReserve >= (int32)kMinimumSystemReserve)
@ -2113,8 +2133,10 @@ vm_page_unreserve_pages(uint32 count)
systemReserve + toUnreserve, systemReserve) systemReserve + toUnreserve, systemReserve)
== systemReserve) { == systemReserve) {
count -= toUnreserve; count -= toUnreserve;
if (count == 0) if (count == 0) {
// TODO: Notify waiting system priority reservers.
return; return;
}
break; break;
} }
@ -2314,7 +2336,8 @@ vm_page_allocate_page(int pageState)
DEBUG_PAGE_ACCESS_START(page); DEBUG_PAGE_ACCESS_START(page);
int oldPageState = page->state; int oldPageState = page->state;
page->state = PAGE_STATE_BUSY; page->state = PAGE_STATE_ACTIVE;
page->busy = true;
page->usage_count = 2; page->usage_count = 2;
page->accessed = false; page->accessed = false;
page->modified = false; page->modified = false;
@ -2352,7 +2375,8 @@ allocate_page_run(page_num_t start, page_num_t length, int pageState,
freePages.Add(&page); freePages.Add(&page);
} }
page.state = PAGE_STATE_BUSY; page.state = PAGE_STATE_ACTIVE;
page.busy = true;
page.usage_count = 1; page.usage_count = 1;
page.accessed = false; page.accessed = false;
page.modified = false; page.modified = false;
@ -2497,6 +2521,13 @@ vm_lookup_page(addr_t pageNumber)
} }
bool
vm_page_is_dummy(struct vm_page *page)
{
return page < sPages || page >= sPages + sNumPages;
}
/*! Free the page that belonged to a certain cache. /*! Free the page that belonged to a certain cache.
You can use vm_page_set_state() manually if you prefer, but only You can use vm_page_set_state() manually if you prefer, but only
if the page does not equal PAGE_STATE_MODIFIED. if the page does not equal PAGE_STATE_MODIFIED.
@ -2538,7 +2569,6 @@ vm_page_requeue(struct vm_page *page, bool tail)
VMPageQueue *queue = NULL; VMPageQueue *queue = NULL;
switch (page->state) { switch (page->state) {
case PAGE_STATE_BUSY:
case PAGE_STATE_ACTIVE: case PAGE_STATE_ACTIVE:
queue = &sActivePageQueue; queue = &sActivePageQueue;
break; break;