* Removed the page state PAGE_STATE_BUSY and instead introduced a vm_page::busy
flag. The obvious advantage is that one can still see what state a page is in and even move it between states while being marked busy. * Removed the vm_page::is_dummy flag. Instead we mark marker pages busy, which in all cases has the same effect. Introduced a vm_page_is_dummy() that can still check whether a given page is a dummy page. * vm_page_unreserve_pages(): Before adding to the system reserve make sure sUnreservedFreePages is non-negative. Otherwise we'd make nonexisting pages available for allocation. steal_pages() still has the same problem and it can't be solved that easily. * map_page(): No longer changes the page state/mark the page unbusy. That's the caller's responsibility. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35331 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
1196f305d7
commit
72382fa629
@ -90,6 +90,7 @@ public:
|
||||
void NotifyPageEvents(vm_page* page, uint32 events)
|
||||
{ if (fPageEventWaiters != NULL)
|
||||
_NotifyPageEvents(page, events); }
|
||||
inline void MarkPageUnbusy(vm_page* page);
|
||||
|
||||
vm_page* LookupPage(off_t offset);
|
||||
void InsertPage(vm_page* page, off_t offset);
|
||||
@ -291,6 +292,14 @@ VMCache::ReleaseRefAndUnlock(bool consumerLocked)
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
VMCache::MarkPageUnbusy(vm_page* page)
|
||||
{
|
||||
page->busy = false;
|
||||
NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
|
||||
}
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
@ -56,6 +56,7 @@ struct vm_page *vm_page_allocate_page_run_no_base(int state, addr_t count,
|
||||
int priority);
|
||||
struct vm_page *vm_page_at_index(int32 index);
|
||||
struct vm_page *vm_lookup_page(addr_t pageNumber);
|
||||
bool vm_page_is_dummy(struct vm_page *page);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ public:
|
||||
#endif
|
||||
|
||||
uint8 state : 3;
|
||||
bool is_dummy : 1;
|
||||
bool busy : 1;
|
||||
bool busy_writing : 1;
|
||||
// used in VMAnonymousCache::Merge()
|
||||
bool accessed : 1;
|
||||
@ -116,7 +116,6 @@ public:
|
||||
int8 usage_count;
|
||||
uint16 wired_count;
|
||||
|
||||
|
||||
VMCacheRef* CacheRef() const { return cache_ref; }
|
||||
void SetCacheRef(VMCacheRef* cacheRef) { this->cache_ref = cacheRef; }
|
||||
|
||||
@ -128,7 +127,6 @@ public:
|
||||
enum {
|
||||
PAGE_STATE_ACTIVE = 0,
|
||||
PAGE_STATE_INACTIVE,
|
||||
PAGE_STATE_BUSY,
|
||||
PAGE_STATE_MODIFIED,
|
||||
PAGE_STATE_FREE,
|
||||
PAGE_STATE_CLEAR,
|
||||
|
@ -541,6 +541,7 @@ Aperture::AllocateMemory(aperture_memory *memory, uint32 flags)
|
||||
if ((flags & B_APERTURE_NEED_PHYSICAL) != 0) {
|
||||
memory->page = vm_page_allocate_page_run(PAGE_STATE_CLEAR, 0, count,
|
||||
VM_PRIORITY_SYSTEM);
|
||||
// TODO: Mark pages unbusy!
|
||||
if (memory->page == NULL)
|
||||
return B_NO_MEMORY;
|
||||
} else {
|
||||
@ -550,8 +551,10 @@ Aperture::AllocateMemory(aperture_memory *memory, uint32 flags)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
vm_page_reserve_pages(count, VM_PRIORITY_SYSTEM);
|
||||
for (uint32 i = 0; i < count; i++)
|
||||
for (uint32 i = 0; i < count; i++) {
|
||||
memory->pages[i] = vm_page_allocate_page(PAGE_STATE_CLEAR);
|
||||
memory->pages[i]->busy = false;
|
||||
}
|
||||
vm_page_unreserve_pages(count);
|
||||
}
|
||||
|
||||
|
@ -547,6 +547,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
|
||||
// mark the page WIRED
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
page->busy = false;
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
|
||||
@ -595,6 +596,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
|
||||
// mark the page WIRED
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
page->busy = false;
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
|
||||
|
@ -426,6 +426,7 @@ X86VMTranslationMap::Map(addr_t va, addr_t pa, uint32 attributes)
|
||||
|
||||
// mark the page WIRED
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
page->busy = false;
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
|
||||
|
24
src/system/kernel/cache/file_cache.cpp
vendored
24
src/system/kernel/cache/file_cache.cpp
vendored
@ -209,8 +209,7 @@ PrecacheIO::IOFinished(status_t status, bool partialTransfer,
|
||||
|
||||
DEBUG_PAGE_ACCESS_TRANSFER(fPages[i], fAllocatingThread);
|
||||
|
||||
fPages[i]->state = PAGE_STATE_ACTIVE;
|
||||
fCache->NotifyPageEvents(fPages[i], PAGE_EVENT_NOT_BUSY);
|
||||
fCache->MarkPageUnbusy(fPages[i]);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(fPages[i]);
|
||||
}
|
||||
@ -308,8 +307,7 @@ reserve_pages(file_cache_ref* ref, size_t reservePages, bool isWrite)
|
||||
vm_page* page;
|
||||
for (VMCachePagesTree::Iterator it = cache->pages.GetIterator();
|
||||
(page = it.Next()) != NULL && left > 0;) {
|
||||
if (page->state != PAGE_STATE_MODIFIED
|
||||
&& page->state != PAGE_STATE_BUSY) {
|
||||
if (page->state != PAGE_STATE_MODIFIED && !page->busy) {
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
cache->RemovePage(page);
|
||||
vm_page_set_state(page, PAGE_STATE_FREE);
|
||||
@ -442,9 +440,7 @@ read_into_cache(file_cache_ref* ref, void* cookie, off_t offset,
|
||||
for (int32 i = pageIndex; i-- > 0;) {
|
||||
DEBUG_PAGE_ACCESS_END(pages[i]);
|
||||
|
||||
pages[i]->state = PAGE_STATE_ACTIVE;
|
||||
|
||||
cache->NotifyPageEvents(pages[i], PAGE_EVENT_NOT_BUSY);
|
||||
cache->MarkPageUnbusy(pages[i]);
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
@ -610,11 +606,9 @@ write_to_cache(file_cache_ref* ref, void* cookie, off_t offset,
|
||||
|
||||
// make the pages accessible in the cache
|
||||
for (int32 i = pageIndex; i-- > 0;) {
|
||||
ref->cache->NotifyPageEvents(pages[i], PAGE_EVENT_NOT_BUSY);
|
||||
ref->cache->MarkPageUnbusy(pages[i]);
|
||||
|
||||
if (writeThrough)
|
||||
pages[i]->state = PAGE_STATE_ACTIVE;
|
||||
else
|
||||
if (!writeThrough)
|
||||
vm_page_set_state(pages[i], PAGE_STATE_MODIFIED);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(pages[i]);
|
||||
@ -772,7 +766,7 @@ cache_io(void* _cacheRef, void* cookie, off_t offset, addr_t buffer,
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
|
||||
if (page->state == PAGE_STATE_BUSY) {
|
||||
if (page->busy) {
|
||||
cache->WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
|
||||
continue;
|
||||
}
|
||||
@ -797,8 +791,7 @@ cache_io(void* _cacheRef, void* cookie, off_t offset, addr_t buffer,
|
||||
// need to unlock the cache temporarily to avoid a potential
|
||||
// deadlock. To make sure that our page doesn't go away, we mark
|
||||
// it busy for the time.
|
||||
uint8 oldPageState = page->state;
|
||||
page->state = PAGE_STATE_BUSY;
|
||||
page->busy = true;
|
||||
locker.Unlock();
|
||||
|
||||
// copy the contents of the page already in memory
|
||||
@ -818,14 +811,13 @@ cache_io(void* _cacheRef, void* cookie, off_t offset, addr_t buffer,
|
||||
|
||||
locker.Lock();
|
||||
|
||||
page->state = oldPageState;
|
||||
if (doWrite && page->state != PAGE_STATE_MODIFIED) {
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
vm_page_set_state(page, PAGE_STATE_MODIFIED);
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
|
||||
cache->NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
|
||||
cache->MarkPageUnbusy(page);
|
||||
}
|
||||
|
||||
if (bytesLeft <= bytesInPage) {
|
||||
|
@ -1352,6 +1352,7 @@ MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
|
||||
vm_page* page = vm_page_allocate_page(PAGE_STATE_FREE);
|
||||
cache->InsertPage(page, offset);
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
page->busy = false;
|
||||
|
||||
page->wired_count++;
|
||||
atomic_add(&gMappedPagesCount, 1);
|
||||
|
@ -1058,7 +1058,7 @@ VMCache::Resize(off_t newSize, int priority)
|
||||
for (VMCachePagesTree::Iterator it
|
||||
= pages.GetIterator(newPageCount, true, true);
|
||||
vm_page* page = it.Next();) {
|
||||
if (page->state == PAGE_STATE_BUSY) {
|
||||
if (page->busy) {
|
||||
if (page->busy_writing) {
|
||||
// We cannot wait for the page to become available
|
||||
// as we might cause a deadlock this way
|
||||
@ -1107,7 +1107,7 @@ VMCache::FlushAndRemoveAllPages()
|
||||
// remove pages
|
||||
for (VMCachePagesTree::Iterator it = pages.GetIterator();
|
||||
vm_page* page = it.Next();) {
|
||||
if (page->state == PAGE_STATE_BUSY) {
|
||||
if (page->busy) {
|
||||
// wait for page to become unbusy
|
||||
WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
|
||||
|
||||
|
@ -515,9 +515,6 @@ map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection)
|
||||
if (page->usage_count < 0)
|
||||
page->usage_count = 1;
|
||||
|
||||
if (page->state != PAGE_STATE_MODIFIED)
|
||||
vm_page_set_state(page, PAGE_STATE_ACTIVE);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -1128,8 +1125,16 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
vm_page* page = vm_page_allocate_page(newPageState);
|
||||
cache->InsertPage(page, offset);
|
||||
map_page(area, page, address, protection);
|
||||
// TODO: This sets the page state to "active", but it would
|
||||
// make more sense to set it to "wired".
|
||||
// vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
// TODO: The pages should be PAGE_STATE_WIRED, since there's
|
||||
// no need for the page daemon to play with them (the same
|
||||
// should be considered in vm_soft_fault()). ATM doing that
|
||||
// will result in bad thrashing in systems with little
|
||||
// memory due to the current tuning of the page daemon. It
|
||||
// will age pages way too fast (since it just skips
|
||||
// PAGE_STATE_WIRED pages, while it processes
|
||||
// PAGE_STATE_ACTIVE with wired_count > 0).
|
||||
page->busy = false;
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
|
||||
@ -1179,6 +1184,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
increment_page_wired_count(page);
|
||||
cache->InsertPage(page, offset);
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
page->busy = false;
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
@ -1212,6 +1218,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
increment_page_wired_count(page);
|
||||
cache->InsertPage(page, offset);
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
page->busy = false;
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
@ -1511,7 +1518,7 @@ pre_map_area_pages(VMArea* area, VMCache* cache)
|
||||
break;
|
||||
|
||||
// skip inactive pages
|
||||
if (page->state == PAGE_STATE_BUSY || page->usage_count <= 0)
|
||||
if (page->busy || page->usage_count <= 0)
|
||||
continue;
|
||||
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
@ -1817,7 +1824,7 @@ vm_clone_area(team_id team, const char* name, void** address,
|
||||
// map in all pages from source
|
||||
for (VMCachePagesTree::Iterator it = cache->pages.GetIterator();
|
||||
vm_page* page = it.Next();) {
|
||||
if (page->state != PAGE_STATE_BUSY) {
|
||||
if (!page->busy) {
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
map_page(newArea, page,
|
||||
newArea->Base() + ((page->cache_offset << PAGE_SHIFT)
|
||||
@ -2738,7 +2745,7 @@ dump_cache(int argc, char** argv)
|
||||
if (showPages) {
|
||||
for (VMCachePagesTree::Iterator it = cache->pages.GetIterator();
|
||||
vm_page* page = it.Next();) {
|
||||
if (!page->is_dummy) {
|
||||
if (!vm_page_is_dummy(page)) {
|
||||
kprintf("\t%p ppn 0x%lx offset 0x%lx state %u (%s) "
|
||||
"wired_count %u\n", page, page->physical_page_number,
|
||||
page->cache_offset, page->state,
|
||||
@ -3690,7 +3697,7 @@ fault_get_page(PageFaultContext& context)
|
||||
|
||||
for (;;) {
|
||||
page = cache->LookupPage(context.cacheOffset);
|
||||
if (page == NULL || page->state != PAGE_STATE_BUSY) {
|
||||
if (page == NULL || !page->busy) {
|
||||
// Either there is no page or there is one and it is not busy.
|
||||
break;
|
||||
}
|
||||
@ -3745,8 +3752,7 @@ fault_get_page(PageFaultContext& context)
|
||||
}
|
||||
|
||||
// mark the page unbusy again
|
||||
page->state = PAGE_STATE_ACTIVE;
|
||||
cache->NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
|
||||
cache->MarkPageUnbusy(page);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
|
||||
@ -3769,12 +3775,12 @@ fault_get_page(PageFaultContext& context)
|
||||
|
||||
// allocate a clean page
|
||||
page = vm_page_allocate_page(PAGE_STATE_CLEAR);
|
||||
page->busy = false;
|
||||
FTRACE(("vm_soft_fault: just allocated page 0x%lx\n",
|
||||
page->physical_page_number));
|
||||
|
||||
// insert the new page into our cache
|
||||
cache->InsertPage(page, context.cacheOffset);
|
||||
|
||||
} else if (page->Cache() != context.topCache && context.isWrite) {
|
||||
// We have a page that has the data we want, but in the wrong cache
|
||||
// object so we need to copy it and stick it into the top cache.
|
||||
@ -3784,12 +3790,12 @@ fault_get_page(PageFaultContext& context)
|
||||
// from our source cache -- if possible, that is.
|
||||
FTRACE(("get new page, copy it, and put it into the topmost cache\n"));
|
||||
page = vm_page_allocate_page(PAGE_STATE_FREE);
|
||||
page->busy = false;
|
||||
|
||||
// To not needlessly kill concurrency we unlock all caches but the top
|
||||
// one while copying the page. Lacking another mechanism to ensure that
|
||||
// the source page doesn't disappear, we mark it busy.
|
||||
int sourcePageState = sourcePage->state;
|
||||
sourcePage->state = PAGE_STATE_BUSY;
|
||||
sourcePage->busy = true;
|
||||
context.cacheChainLocker.UnlockKeepRefs(true);
|
||||
|
||||
// copy the page
|
||||
@ -3797,8 +3803,7 @@ fault_get_page(PageFaultContext& context)
|
||||
sourcePage->physical_page_number * B_PAGE_SIZE);
|
||||
|
||||
context.cacheChainLocker.RelockCaches(true);
|
||||
sourcePage->state = sourcePageState;
|
||||
sourcePage->Cache()->NotifyPageEvents(sourcePage, PAGE_EVENT_NOT_BUSY);
|
||||
sourcePage->Cache()->MarkPageUnbusy(sourcePage);
|
||||
|
||||
// insert the new page into our cache
|
||||
context.topCache->InsertPage(page, context.cacheOffset);
|
||||
@ -3964,8 +3969,6 @@ vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress,
|
||||
// fine, though. We'll simply leave and probably fault again.
|
||||
// To make sure we'll have more luck then, we ensure that the
|
||||
// minimum object reserve is available.
|
||||
if (context.page->state == PAGE_STATE_BUSY)
|
||||
vm_page_set_state(context.page, PAGE_STATE_ACTIVE);
|
||||
DEBUG_PAGE_ACCESS_END(context.page);
|
||||
|
||||
context.UnlockAll();
|
||||
|
@ -65,7 +65,7 @@ PageCacheLocker::~PageCacheLocker()
|
||||
bool
|
||||
PageCacheLocker::_IgnorePage(vm_page* page)
|
||||
{
|
||||
if (page->state == PAGE_STATE_WIRED || page->state == PAGE_STATE_BUSY
|
||||
if (page->busy || page->state == PAGE_STATE_WIRED
|
||||
|| page->state == PAGE_STATE_FREE || page->state == PAGE_STATE_CLEAR
|
||||
|| page->state == PAGE_STATE_UNUSED || page->wired_count > 0)
|
||||
return true;
|
||||
@ -257,7 +257,7 @@ clear_page_activation(int32 index)
|
||||
if (!locker.IsLocked())
|
||||
return;
|
||||
|
||||
if (page->state == PAGE_STATE_ACTIVE)
|
||||
if (!page->busy && page->state == PAGE_STATE_ACTIVE)
|
||||
vm_clear_map_flags(page, PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
|
@ -334,8 +334,6 @@ page_state_to_string(int state)
|
||||
return "active";
|
||||
case PAGE_STATE_INACTIVE:
|
||||
return "inactive";
|
||||
case PAGE_STATE_BUSY:
|
||||
return "busy";
|
||||
case PAGE_STATE_MODIFIED:
|
||||
return "modified";
|
||||
case PAGE_STATE_FREE:
|
||||
@ -420,10 +418,10 @@ dump_page(int argc, char **argv)
|
||||
kprintf("cache: %p\n", page->Cache());
|
||||
kprintf("cache_offset: %ld\n", page->cache_offset);
|
||||
kprintf("cache_next: %p\n", page->cache_next);
|
||||
kprintf("is dummy: %d\n", page->is_dummy);
|
||||
kprintf("state: %s\n", page_state_to_string(page->state));
|
||||
kprintf("wired_count: %d\n", page->wired_count);
|
||||
kprintf("usage_count: %d\n", page->usage_count);
|
||||
kprintf("busy: %d\n", page->busy);
|
||||
kprintf("busy_writing: %d\n", page->busy_writing);
|
||||
kprintf("accessed: %d\n", page->accessed);
|
||||
kprintf("modified: %d\n", page->modified);
|
||||
@ -545,16 +543,20 @@ dump_page_stats(int argc, char **argv)
|
||||
{
|
||||
page_num_t swappableModified = 0;
|
||||
page_num_t swappableModifiedInactive = 0;
|
||||
uint32 counter[8];
|
||||
size_t counter[8];
|
||||
size_t busyCounter[8];
|
||||
addr_t i;
|
||||
|
||||
memset(counter, 0, sizeof(counter));
|
||||
memset(busyCounter, 0, sizeof(busyCounter));
|
||||
|
||||
for (i = 0; i < sNumPages; i++) {
|
||||
if (sPages[i].state > 7)
|
||||
panic("page %li at %p has invalid state!\n", i, &sPages[i]);
|
||||
|
||||
counter[sPages[i].state]++;
|
||||
if (sPages[i].busy)
|
||||
busyCounter[sPages[i].state]++;
|
||||
|
||||
if (sPages[i].state == PAGE_STATE_MODIFIED && sPages[i].Cache() != NULL
|
||||
&& sPages[i].Cache()->temporary && sPages[i].wired_count == 0) {
|
||||
@ -566,12 +568,20 @@ dump_page_stats(int argc, char **argv)
|
||||
|
||||
kprintf("page stats:\n");
|
||||
kprintf("total: %lu\n", sNumPages);
|
||||
kprintf("active: %lu\ninactive: %lu\nbusy: %lu\nunused: %lu\n",
|
||||
counter[PAGE_STATE_ACTIVE], counter[PAGE_STATE_INACTIVE],
|
||||
counter[PAGE_STATE_BUSY], counter[PAGE_STATE_UNUSED]);
|
||||
kprintf("wired: %lu\nmodified: %lu\nfree: %lu\nclear: %lu\n",
|
||||
counter[PAGE_STATE_WIRED], counter[PAGE_STATE_MODIFIED],
|
||||
counter[PAGE_STATE_FREE], counter[PAGE_STATE_CLEAR]);
|
||||
|
||||
kprintf("active: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
|
||||
counter[PAGE_STATE_ACTIVE], busyCounter[PAGE_STATE_ACTIVE]);
|
||||
kprintf("inactive: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
|
||||
counter[PAGE_STATE_INACTIVE], busyCounter[PAGE_STATE_INACTIVE]);
|
||||
kprintf("unused: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
|
||||
counter[PAGE_STATE_UNUSED], busyCounter[PAGE_STATE_UNUSED]);
|
||||
kprintf("wired: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
|
||||
counter[PAGE_STATE_WIRED], busyCounter[PAGE_STATE_WIRED]);
|
||||
kprintf("modified: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
|
||||
counter[PAGE_STATE_MODIFIED], busyCounter[PAGE_STATE_MODIFIED]);
|
||||
kprintf("free: %" B_PRIuSIZE "\n", counter[PAGE_STATE_FREE]);
|
||||
kprintf("clear: %" B_PRIuSIZE "\n", counter[PAGE_STATE_CLEAR]);
|
||||
|
||||
kprintf("unreserved free pages: %" B_PRId32 "\n", sUnreservedFreePages);
|
||||
kprintf("system reserved pages: %" B_PRId32 "\n", sSystemReservedPages);
|
||||
kprintf("page deficit: %lu\n", sPageDeficit);
|
||||
@ -626,7 +636,6 @@ free_page(vm_page* page, bool clear)
|
||||
VMPageQueue* fromQueue;
|
||||
|
||||
switch (page->state) {
|
||||
case PAGE_STATE_BUSY:
|
||||
case PAGE_STATE_ACTIVE:
|
||||
fromQueue = &sActivePageQueue;
|
||||
break;
|
||||
@ -699,7 +708,6 @@ set_page_state(vm_page *page, int pageState)
|
||||
VMPageQueue* fromQueue;
|
||||
|
||||
switch (page->state) {
|
||||
case PAGE_STATE_BUSY:
|
||||
case PAGE_STATE_ACTIVE:
|
||||
fromQueue = &sActivePageQueue;
|
||||
break;
|
||||
@ -726,7 +734,6 @@ set_page_state(vm_page *page, int pageState)
|
||||
VMPageQueue* toQueue;
|
||||
|
||||
switch (pageState) {
|
||||
case PAGE_STATE_BUSY:
|
||||
case PAGE_STATE_ACTIVE:
|
||||
toQueue = &sActivePageQueue;
|
||||
break;
|
||||
@ -862,7 +869,8 @@ page_scrubber(void *unused)
|
||||
|
||||
DEBUG_PAGE_ACCESS_START(page[i]);
|
||||
|
||||
page[i]->state = PAGE_STATE_BUSY;
|
||||
page[i]->state = PAGE_STATE_ACTIVE;
|
||||
page[i]->busy = true;
|
||||
scrubCount++;
|
||||
}
|
||||
|
||||
@ -884,6 +892,7 @@ page_scrubber(void *unused)
|
||||
// and put them into the clear queue
|
||||
for (int32 i = 0; i < scrubCount; i++) {
|
||||
page[i]->state = PAGE_STATE_CLEAR;
|
||||
page[i]->busy = false;
|
||||
DEBUG_PAGE_ACCESS_END(page[i]);
|
||||
sClearPageQueue.PrependUnlocked(page[i]);
|
||||
}
|
||||
@ -899,13 +908,6 @@ page_scrubber(void *unused)
|
||||
}
|
||||
|
||||
|
||||
static inline bool
|
||||
is_marker_page(struct vm_page *page)
|
||||
{
|
||||
return page->is_dummy;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
remove_page_marker(struct vm_page &marker)
|
||||
{
|
||||
@ -953,7 +955,7 @@ next_modified_page(struct vm_page &marker)
|
||||
page = sModifiedPageQueue.Head();
|
||||
|
||||
for (; page != NULL; page = sModifiedPageQueue.Next(page)) {
|
||||
if (!is_marker_page(page) && page->state != PAGE_STATE_BUSY) {
|
||||
if (!page->busy) {
|
||||
// insert marker
|
||||
marker.state = PAGE_STATE_MODIFIED;
|
||||
sModifiedPageQueue.InsertAfter(page, &marker);
|
||||
@ -1035,7 +1037,6 @@ private:
|
||||
struct VMCache* fCache;
|
||||
bool fDequeuedPage;
|
||||
bool fIsActive;
|
||||
int fOldPageState;
|
||||
};
|
||||
|
||||
|
||||
@ -1060,7 +1061,7 @@ PageWriteWrapper::SetTo(vm_page* page, bool dequeuedPage)
|
||||
{
|
||||
DEBUG_PAGE_ACCESS_CHECK(page);
|
||||
|
||||
if (page->state == PAGE_STATE_BUSY)
|
||||
if (page->busy)
|
||||
panic("setting page write wrapper to busy page");
|
||||
|
||||
if (fIsActive)
|
||||
@ -1071,8 +1072,7 @@ PageWriteWrapper::SetTo(vm_page* page, bool dequeuedPage)
|
||||
fDequeuedPage = dequeuedPage;
|
||||
fIsActive = true;
|
||||
|
||||
fOldPageState = fPage->state;
|
||||
fPage->state = PAGE_STATE_BUSY;
|
||||
fPage->busy = true;
|
||||
fPage->busy_writing = true;
|
||||
}
|
||||
|
||||
@ -1120,6 +1120,9 @@ PageWriteWrapper::Done(status_t result)
|
||||
|
||||
DEBUG_PAGE_ACCESS_CHECK(fPage);
|
||||
|
||||
fPage->busy = false;
|
||||
// Set unbusy and notify later by hand, since we might free the page.
|
||||
|
||||
if (result == B_OK) {
|
||||
// put it into the active/inactive queue
|
||||
move_page_to_active_or_inactive_queue(fPage, fDequeuedPage);
|
||||
@ -1132,10 +1135,8 @@ PageWriteWrapper::Done(status_t result)
|
||||
if (fDequeuedPage) {
|
||||
fPage->state = PAGE_STATE_MODIFIED;
|
||||
sModifiedPageQueue.AppendUnlocked(fPage);
|
||||
} else {
|
||||
fPage->state = fOldPageState;
|
||||
} else
|
||||
set_page_state(fPage, PAGE_STATE_MODIFIED);
|
||||
}
|
||||
|
||||
if (!fPage->busy_writing) {
|
||||
// The busy_writing flag was cleared. That means the cache has been
|
||||
@ -1400,9 +1401,9 @@ page_writer(void* /*unused*/)
|
||||
}
|
||||
|
||||
vm_page marker;
|
||||
marker.is_dummy = true;
|
||||
marker.SetCacheRef(NULL);
|
||||
marker.state = PAGE_STATE_UNUSED;
|
||||
marker.busy = true;
|
||||
#if DEBUG_PAGE_QUEUE
|
||||
marker.queue = NULL;
|
||||
#endif
|
||||
@ -1493,7 +1494,7 @@ page_writer(void* /*unused*/)
|
||||
}
|
||||
|
||||
// state might have changed while we were locking the cache
|
||||
if (page->state != PAGE_STATE_MODIFIED) {
|
||||
if (page->busy || page->state != PAGE_STATE_MODIFIED) {
|
||||
// release the cache reference
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
cache->ReleaseStoreRef();
|
||||
@ -1565,7 +1566,7 @@ find_page_candidate(struct vm_page &marker)
|
||||
}
|
||||
|
||||
while (page != NULL) {
|
||||
if (!is_marker_page(page) && page->state == PAGE_STATE_INACTIVE) {
|
||||
if (!page->busy) {
|
||||
// we found a candidate, insert marker
|
||||
marker.state = PAGE_STATE_INACTIVE;
|
||||
sInactivePageQueue.InsertAfter(page, &marker);
|
||||
@ -1591,7 +1592,7 @@ steal_page(vm_page *page)
|
||||
MethodDeleter<VMCache> _2(cache, &VMCache::ReleaseRefLocked);
|
||||
|
||||
// check again if that page is still a candidate
|
||||
if (page->state != PAGE_STATE_INACTIVE)
|
||||
if (page->busy || page->state != PAGE_STATE_INACTIVE)
|
||||
return false;
|
||||
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
@ -1628,9 +1629,9 @@ steal_pages(vm_page **pages, size_t count)
|
||||
{
|
||||
while (true) {
|
||||
vm_page marker;
|
||||
marker.is_dummy = true;
|
||||
marker.SetCacheRef(NULL);
|
||||
marker.state = PAGE_STATE_UNUSED;
|
||||
marker.busy = true;
|
||||
#if DEBUG_PAGE_QUEUE
|
||||
marker.queue = NULL;
|
||||
#endif
|
||||
@ -1765,12 +1766,13 @@ vm_page_write_modified_page_range(struct VMCache* cache, uint32 firstPage,
|
||||
|
||||
bool dequeuedPage = false;
|
||||
if (page != NULL) {
|
||||
if (page->state == PAGE_STATE_MODIFIED) {
|
||||
if (page->busy) {
|
||||
page = NULL;
|
||||
} else if (page->state == PAGE_STATE_MODIFIED) {
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
sModifiedPageQueue.RemoveUnlocked(page);
|
||||
dequeuedPage = true;
|
||||
} else if (page->state == PAGE_STATE_BUSY
|
||||
|| !vm_test_map_modification(page)) {
|
||||
} else if (!vm_test_map_modification(page)) {
|
||||
page = NULL;
|
||||
} else
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
@ -1869,7 +1871,7 @@ vm_page_schedule_write_page_range(struct VMCache *cache, uint32 firstPage,
|
||||
if (page->cache_offset >= endPage)
|
||||
break;
|
||||
|
||||
if (page->state == PAGE_STATE_MODIFIED) {
|
||||
if (!page->busy && page->state == PAGE_STATE_MODIFIED) {
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
vm_page_requeue(page, false);
|
||||
modified++;
|
||||
@ -1930,7 +1932,6 @@ vm_page_init(kernel_args *args)
|
||||
// initialize the free page table
|
||||
for (uint32 i = 0; i < sNumPages; i++) {
|
||||
sPages[i].physical_page_number = sPhysicalPageOffset + i;
|
||||
sPages[i].is_dummy = false;
|
||||
sPages[i].state = PAGE_STATE_FREE;
|
||||
new(&sPages[i].mappings) vm_page_mappings();
|
||||
sPages[i].wired_count = 0;
|
||||
@ -2066,6 +2067,7 @@ vm_mark_page_range_inuse(addr_t startPage, addr_t length)
|
||||
? sFreePageQueue : sClearPageQueue;
|
||||
queue.Remove(page);
|
||||
page->state = PAGE_STATE_UNUSED;
|
||||
page->busy = false;
|
||||
atomic_add(&sUnreservedFreePages, -1);
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
break;
|
||||
@ -2074,7 +2076,6 @@ vm_mark_page_range_inuse(addr_t startPage, addr_t length)
|
||||
break;
|
||||
case PAGE_STATE_ACTIVE:
|
||||
case PAGE_STATE_INACTIVE:
|
||||
case PAGE_STATE_BUSY:
|
||||
case PAGE_STATE_MODIFIED:
|
||||
case PAGE_STATE_UNUSED:
|
||||
default:
|
||||
@ -2102,6 +2103,25 @@ vm_page_unreserve_pages(uint32 count)
|
||||
|
||||
T(UnreservePages(count));
|
||||
|
||||
while (true) {
|
||||
int32 freePages = sUnreservedFreePages;
|
||||
if (freePages >= 0)
|
||||
break;
|
||||
|
||||
int32 toUnreserve = std::min((int32)count, -freePages);
|
||||
if (atomic_test_and_set(&sUnreservedFreePages,
|
||||
freePages + toUnreserve, freePages) == freePages) {
|
||||
count -= toUnreserve;
|
||||
if (count == 0) {
|
||||
// TODO: Notify waiting system priority reservers.
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// the count changed in the meantime -- retry
|
||||
}
|
||||
|
||||
while (true) {
|
||||
int32 systemReserve = sSystemReservedPages;
|
||||
if (systemReserve >= (int32)kMinimumSystemReserve)
|
||||
@ -2113,8 +2133,10 @@ vm_page_unreserve_pages(uint32 count)
|
||||
systemReserve + toUnreserve, systemReserve)
|
||||
== systemReserve) {
|
||||
count -= toUnreserve;
|
||||
if (count == 0)
|
||||
if (count == 0) {
|
||||
// TODO: Notify waiting system priority reservers.
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2314,7 +2336,8 @@ vm_page_allocate_page(int pageState)
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
|
||||
int oldPageState = page->state;
|
||||
page->state = PAGE_STATE_BUSY;
|
||||
page->state = PAGE_STATE_ACTIVE;
|
||||
page->busy = true;
|
||||
page->usage_count = 2;
|
||||
page->accessed = false;
|
||||
page->modified = false;
|
||||
@ -2352,7 +2375,8 @@ allocate_page_run(page_num_t start, page_num_t length, int pageState,
|
||||
freePages.Add(&page);
|
||||
}
|
||||
|
||||
page.state = PAGE_STATE_BUSY;
|
||||
page.state = PAGE_STATE_ACTIVE;
|
||||
page.busy = true;
|
||||
page.usage_count = 1;
|
||||
page.accessed = false;
|
||||
page.modified = false;
|
||||
@ -2497,6 +2521,13 @@ vm_lookup_page(addr_t pageNumber)
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
vm_page_is_dummy(struct vm_page *page)
|
||||
{
|
||||
return page < sPages || page >= sPages + sNumPages;
|
||||
}
|
||||
|
||||
|
||||
/*! Free the page that belonged to a certain cache.
|
||||
You can use vm_page_set_state() manually if you prefer, but only
|
||||
if the page does not equal PAGE_STATE_MODIFIED.
|
||||
@ -2538,7 +2569,6 @@ vm_page_requeue(struct vm_page *page, bool tail)
|
||||
VMPageQueue *queue = NULL;
|
||||
|
||||
switch (page->state) {
|
||||
case PAGE_STATE_BUSY:
|
||||
case PAGE_STATE_ACTIVE:
|
||||
queue = &sActivePageQueue;
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user