* Replaced the vm_page_allocate_page*() "pageState" parameter by a more

general "flags" parameter. It encodes the target state of the page -- so
  that the page isn't unnecessarily put in the wrong page queue first -- a
  flag whether the page should be cleared, and one to indicate whether the
  page should be marked busy.
* Added page state PAGE_STATE_CACHED. Not used yet.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35333 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-01-29 15:54:40 +00:00
parent 00d70bc148
commit e65c400299
9 changed files with 115 additions and 89 deletions

View File

@ -49,10 +49,10 @@ void vm_page_unreserve_pages(uint32 count);
void vm_page_reserve_pages(uint32 count, int priority);
bool vm_page_try_reserve_pages(uint32 count, int priority);
struct vm_page *vm_page_allocate_page(int pageState);
struct vm_page *vm_page_allocate_page_run(int state, addr_t base,
struct vm_page *vm_page_allocate_page(uint32 flags);
struct vm_page *vm_page_allocate_page_run(uint32 flags, addr_t base,
addr_t length, int priority);
struct vm_page *vm_page_allocate_page_run_no_base(int state, addr_t count,
struct vm_page *vm_page_allocate_page_run_no_base(uint32 flags, addr_t count,
int priority);
struct vm_page *vm_page_at_index(int32 index);
struct vm_page *vm_lookup_page(addr_t pageNumber);

View File

@ -128,13 +128,23 @@ enum {
PAGE_STATE_ACTIVE = 0,
PAGE_STATE_INACTIVE,
PAGE_STATE_MODIFIED,
PAGE_STATE_CACHED,
PAGE_STATE_FREE,
PAGE_STATE_CLEAR,
PAGE_STATE_WIRED,
PAGE_STATE_UNUSED
PAGE_STATE_UNUSED,
PAGE_STATE_COUNT,
PAGE_STATE_FIRST_UNQUEUED = PAGE_STATE_WIRED
};
#define VM_PAGE_ALLOC_STATE 0x00000007
#define VM_PAGE_ALLOC_CLEAR 0x00000010
#define VM_PAGE_ALLOC_BUSY 0x00000020
#if DEBUG_PAGE_ACCESS
# include <thread.h>

View File

@ -539,9 +539,9 @@ Aperture::AllocateMemory(aperture_memory *memory, uint32 flags)
uint32 count = size / B_PAGE_SIZE;
if ((flags & B_APERTURE_NEED_PHYSICAL) != 0) {
memory->page = vm_page_allocate_page_run(PAGE_STATE_CLEAR, 0, count,
memory->page = vm_page_allocate_page_run(
PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR, 0, count,
VM_PRIORITY_SYSTEM);
// TODO: Mark pages unbusy!
if (memory->page == NULL)
return B_NO_MEMORY;
} else {
@ -552,8 +552,8 @@ Aperture::AllocateMemory(aperture_memory *memory, uint32 flags)
vm_page_reserve_pages(count, VM_PRIORITY_SYSTEM);
for (uint32 i = 0; i < count; i++) {
memory->pages[i] = vm_page_allocate_page(PAGE_STATE_CLEAR);
memory->pages[i]->busy = false;
memory->pages[i] = vm_page_allocate_page(
PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
}
vm_page_unreserve_pages(count);
}

View File

@ -543,11 +543,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
unsigned int i;
// we need to allocate a pgtable
page = vm_page_allocate_page(PAGE_STATE_CLEAR);
// mark the page WIRED
vm_page_set_state(page, PAGE_STATE_WIRED);
page->busy = false;
page = vm_page_allocate_page(PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
DEBUG_PAGE_ACCESS_END(page);
@ -592,11 +588,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
unsigned int i;
// we need to allocate a pgtable
page = vm_page_allocate_page(PAGE_STATE_CLEAR);
// mark the page WIRED
vm_page_set_state(page, PAGE_STATE_WIRED);
page->busy = false;
page = vm_page_allocate_page(PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
DEBUG_PAGE_ACCESS_END(page);

View File

@ -422,11 +422,7 @@ X86VMTranslationMap::Map(addr_t va, addr_t pa, uint32 attributes)
vm_page *page;
// we need to allocate a pgtable
page = vm_page_allocate_page(PAGE_STATE_CLEAR);
// mark the page WIRED
vm_page_set_state(page, PAGE_STATE_WIRED);
page->busy = false;
page = vm_page_allocate_page(PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
DEBUG_PAGE_ACCESS_END(page);

View File

@ -157,7 +157,8 @@ PrecacheIO::Prepare()
// allocate pages for the cache and mark them busy
uint32 i = 0;
for (size_t pos = 0; pos < fSize; pos += B_PAGE_SIZE) {
vm_page* page = vm_page_allocate_page(PAGE_STATE_FREE);
vm_page* page = vm_page_allocate_page(
PAGE_STATE_ACTIVE | VM_PAGE_ALLOC_BUSY);
fCache->InsertPage(page, fOffset + pos);
@ -385,7 +386,7 @@ read_into_cache(file_cache_ref* ref, void* cookie, off_t offset,
// allocate pages for the cache and mark them busy
for (size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) {
vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
PAGE_STATE_FREE);
PAGE_STATE_ACTIVE | VM_PAGE_ALLOC_BUSY);
cache->InsertPage(page, offset + pos);
@ -508,7 +509,7 @@ write_to_cache(file_cache_ref* ref, void* cookie, off_t offset,
// TODO: the pages we allocate here should have been reserved upfront
// in cache_io()
vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
PAGE_STATE_FREE);
PAGE_STATE_ACTIVE | VM_PAGE_ALLOC_BUSY);
ref->cache->InsertPage(page, offset + pos);
@ -1074,7 +1075,8 @@ file_cache_init(void)
{
// allocate a clean page we can use for writing zeroes
vm_page_reserve_pages(1, VM_PRIORITY_SYSTEM);
vm_page* page = vm_page_allocate_page(PAGE_STATE_CLEAR);
vm_page* page = vm_page_allocate_page(
PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
vm_page_unreserve_pages(1);
sZeroPage = (addr_t)page->physical_page_number * B_PAGE_SIZE;

View File

@ -1349,10 +1349,8 @@ MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
addr_t endAreaOffset = areaOffset + size;
for (size_t offset = areaOffset; offset < endAreaOffset;
offset += B_PAGE_SIZE) {
vm_page* page = vm_page_allocate_page(PAGE_STATE_FREE);
vm_page* page = vm_page_allocate_page(PAGE_STATE_WIRED);
cache->InsertPage(page, offset);
vm_page_set_state(page, PAGE_STATE_WIRED);
page->busy = false;
page->wired_count++;
atomic_add(&gMappedPagesCount, 1);

View File

@ -920,8 +920,8 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
bool isStack = (protection & B_STACK_AREA) != 0;
page_num_t guardPages;
bool canOvercommit = false;
uint32 newPageState = (flags & CREATE_AREA_DONT_CLEAR) != 0
? PAGE_STATE_FREE : PAGE_STATE_CLEAR;
uint32 pageAllocFlags = (flags & CREATE_AREA_DONT_CLEAR) == 0
? VM_PAGE_ALLOC_CLEAR : 0;
TRACE(("create_anonymous_area [%ld] %s: size 0x%lx\n", team, name, size));
@ -1049,8 +1049,8 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
if (wiring == B_CONTIGUOUS) {
// we try to allocate the page run here upfront as this may easily
// fail for obvious reasons
page = vm_page_allocate_page_run(newPageState, physicalAddress,
size / B_PAGE_SIZE, priority);
page = vm_page_allocate_page_run(PAGE_STATE_WIRED | pageAllocFlags,
physicalAddress, size / B_PAGE_SIZE, priority);
if (page == NULL) {
status = B_NO_MEMORY;
goto err0;
@ -1122,10 +1122,9 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
# endif
continue;
#endif
vm_page* page = vm_page_allocate_page(newPageState);
cache->InsertPage(page, offset);
map_page(area, page, address, protection);
// vm_page_set_state(page, PAGE_STATE_WIRED);
vm_page* page = vm_page_allocate_page(
PAGE_STATE_ACTIVE | pageAllocFlags);
// PAGE_STATE_WIRED | pageAllocFlags);
// TODO: The pages should be PAGE_STATE_WIRED, since there's
// no need for the page daemon to play with them (the same
// should be considered in vm_soft_fault()). ATM doing that
@ -1134,7 +1133,8 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
// will age pages way too fast (since it just skips
// PAGE_STATE_WIRED pages, while it processes
// PAGE_STATE_ACTIVE with wired_count > 0).
page->busy = false;
cache->InsertPage(page, offset);
map_page(area, page, address, protection);
DEBUG_PAGE_ACCESS_END(page);
@ -1217,8 +1217,6 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
increment_page_wired_count(page);
cache->InsertPage(page, offset);
vm_page_set_state(page, PAGE_STATE_WIRED);
page->busy = false;
DEBUG_PAGE_ACCESS_END(page);
}
@ -3720,7 +3718,8 @@ fault_get_page(PageFaultContext& context)
// see if the backing store has it
if (cache->HasPage(context.cacheOffset)) {
// insert a fresh page and mark it busy -- we're going to read it in
page = vm_page_allocate_page(PAGE_STATE_FREE);
page = vm_page_allocate_page(
PAGE_STATE_ACTIVE | VM_PAGE_ALLOC_BUSY);
cache->InsertPage(page, context.cacheOffset);
// We need to unlock all caches and the address space while reading
@ -3774,8 +3773,7 @@ fault_get_page(PageFaultContext& context)
cache = context.isWrite ? context.topCache : lastCache;
// allocate a clean page
page = vm_page_allocate_page(PAGE_STATE_CLEAR);
page->busy = false;
page = vm_page_allocate_page(PAGE_STATE_ACTIVE | VM_PAGE_ALLOC_CLEAR);
FTRACE(("vm_soft_fault: just allocated page 0x%lx\n",
page->physical_page_number));
@ -3789,8 +3787,7 @@ fault_get_page(PageFaultContext& context)
// TODO: If memory is low, it might be a good idea to steal the page
// from our source cache -- if possible, that is.
FTRACE(("get new page, copy it, and put it into the topmost cache\n"));
page = vm_page_allocate_page(PAGE_STATE_FREE);
page->busy = false;
page = vm_page_allocate_page(PAGE_STATE_ACTIVE);
// To not needlessly kill concurrency we unlock all caches but the top
// one while copying the page. Lacking another mechanism to ensure that

View File

@ -73,11 +73,14 @@ static const uint32 kMinimumSystemReserve = VM_PAGE_RESERVE_USER;
int32 gMappedPagesCount;
static VMPageQueue sFreePageQueue;
static VMPageQueue sClearPageQueue;
static VMPageQueue sModifiedPageQueue;
static VMPageQueue sInactivePageQueue;
static VMPageQueue sActivePageQueue;
static VMPageQueue sPageQueues[PAGE_STATE_COUNT];
static VMPageQueue& sFreePageQueue = sPageQueues[PAGE_STATE_FREE];
static VMPageQueue& sClearPageQueue = sPageQueues[PAGE_STATE_CLEAR];
static VMPageQueue& sModifiedPageQueue = sPageQueues[PAGE_STATE_MODIFIED];
static VMPageQueue& sInactivePageQueue = sPageQueues[PAGE_STATE_INACTIVE];
static VMPageQueue& sActivePageQueue = sPageQueues[PAGE_STATE_ACTIVE];
static VMPageQueue& sCachedPageQueue = sPageQueues[PAGE_STATE_CACHED];
static vm_page *sPages;
static addr_t sPhysicalPageOffset;
@ -295,6 +298,7 @@ find_page(int argc, char **argv)
{ "modified", &sModifiedPageQueue },
{ "active", &sActivePageQueue },
{ "inactive", &sInactivePageQueue },
{ "cached", &sCachedPageQueue },
{ NULL, NULL }
};
@ -336,6 +340,8 @@ page_state_to_string(int state)
return "inactive";
case PAGE_STATE_MODIFIED:
return "modified";
case PAGE_STATE_CACHED:
return "cached";
case PAGE_STATE_FREE:
return "free";
case PAGE_STATE_CLEAR:
@ -494,6 +500,8 @@ dump_page_queue(int argc, char **argv)
queue = &sActivePageQueue;
else if (!strcmp(argv[1], "inactive"))
queue = &sInactivePageQueue;
else if (!strcmp(argv[1], "cached"))
queue = &sCachedPageQueue;
else {
kprintf("page_queue: unknown queue \"%s\".\n", argv[1]);
return 0;
@ -573,6 +581,8 @@ dump_page_stats(int argc, char **argv)
counter[PAGE_STATE_ACTIVE], busyCounter[PAGE_STATE_ACTIVE]);
kprintf("inactive: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
counter[PAGE_STATE_INACTIVE], busyCounter[PAGE_STATE_INACTIVE]);
kprintf("cached: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
counter[PAGE_STATE_CACHED], busyCounter[PAGE_STATE_CACHED]);
kprintf("unused: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
counter[PAGE_STATE_UNUSED], busyCounter[PAGE_STATE_UNUSED]);
kprintf("wired: %" B_PRIuSIZE " (busy: %" B_PRIuSIZE ")\n",
@ -598,6 +608,8 @@ dump_page_stats(int argc, char **argv)
sActivePageQueue.Count());
kprintf("inactive queue: %p, count = %ld\n", &sInactivePageQueue,
sInactivePageQueue.Count());
kprintf("cached queue: %p, count = %ld\n", &sCachedPageQueue,
sCachedPageQueue.Count());
return 0;
}
@ -645,6 +657,9 @@ free_page(vm_page* page, bool clear)
case PAGE_STATE_MODIFIED:
fromQueue = &sModifiedPageQueue;
break;
case PAGE_STATE_CACHED:
fromQueue = &sCachedPageQueue;
break;
case PAGE_STATE_FREE:
case PAGE_STATE_CLEAR:
panic("free_page(): page %p already free", page);
@ -717,6 +732,9 @@ set_page_state(vm_page *page, int pageState)
case PAGE_STATE_MODIFIED:
fromQueue = &sModifiedPageQueue;
break;
case PAGE_STATE_CACHED:
fromQueue = &sCachedPageQueue;
break;
case PAGE_STATE_FREE:
case PAGE_STATE_CLEAR:
panic("set_page_state(): page %p is free/clear", page);
@ -743,6 +761,9 @@ set_page_state(vm_page *page, int pageState)
case PAGE_STATE_MODIFIED:
toQueue = &sModifiedPageQueue;
break;
case PAGE_STATE_CACHED:
toQueue = &sCachedPageQueue;
break;
case PAGE_STATE_FREE:
case PAGE_STATE_CLEAR:
panic("set_page_state(): target state is free/clear");
@ -809,6 +830,7 @@ move_page_to_active_or_inactive_queue(vm_page *page, bool dequeued)
state = PAGE_STATE_ACTIVE;
else
state = PAGE_STATE_INACTIVE;
// TODO: Cached queue!
if (dequeued) {
page->state = state;
@ -928,6 +950,9 @@ remove_page_marker(struct vm_page &marker)
case PAGE_STATE_MODIFIED:
queue = &sModifiedPageQueue;
break;
case PAGE_STATE_CACHED:
queue = &sCachedPageQueue;
break;
default:
return;
@ -1919,6 +1944,7 @@ vm_page_init(kernel_args *args)
sModifiedPageQueue.Init("modified pages queue");
sInactivePageQueue.Init("inactive pages queue");
sActivePageQueue.Init("active pages queue");
sCachedPageQueue.Init("cached pages queue");
sFreePageQueue.Init("free pages queue");
sClearPageQueue.Init("clear pages queue");
@ -2077,6 +2103,7 @@ vm_mark_page_range_inuse(addr_t startPage, addr_t length)
case PAGE_STATE_ACTIVE:
case PAGE_STATE_INACTIVE:
case PAGE_STATE_MODIFIED:
case PAGE_STATE_CACHED:
case PAGE_STATE_UNUSED:
default:
// uh
@ -2279,22 +2306,21 @@ vm_page_try_reserve_pages(uint32 count, int priority)
vm_page *
vm_page_allocate_page(int pageState)
vm_page_allocate_page(uint32 flags)
{
uint32 pageState = flags & VM_PAGE_ALLOC_STATE;
ASSERT(pageState != PAGE_STATE_FREE);
ASSERT(pageState != PAGE_STATE_CLEAR);
VMPageQueue* queue;
VMPageQueue* otherQueue;
switch (pageState) {
case PAGE_STATE_FREE:
queue = &sFreePageQueue;
otherQueue = &sClearPageQueue;
break;
case PAGE_STATE_CLEAR:
queue = &sClearPageQueue;
otherQueue = &sFreePageQueue;
break;
default:
return NULL; // invalid
if ((flags & VM_PAGE_ALLOC_CLEAR) != 0) {
queue = &sClearPageQueue;
otherQueue = &sFreePageQueue;
} else {
queue = &sFreePageQueue;
otherQueue = &sClearPageQueue;
}
atomic_add(&sUnreservedFreePages, -1);
@ -2336,19 +2362,20 @@ vm_page_allocate_page(int pageState)
DEBUG_PAGE_ACCESS_START(page);
int oldPageState = page->state;
page->state = PAGE_STATE_ACTIVE;
page->busy = true;
page->state = pageState;
page->busy = (flags & VM_PAGE_ALLOC_BUSY) != 0;
page->usage_count = 2;
page->accessed = false;
page->modified = false;
locker.Unlock();
sActivePageQueue.AppendUnlocked(page);
if (pageState < PAGE_STATE_FIRST_UNQUEUED)
sPageQueues[pageState].AppendUnlocked(page);
// clear the page, if we had to take it from the free queue and a clear
// page was requested
if (pageState == PAGE_STATE_CLEAR && oldPageState != PAGE_STATE_CLEAR)
if ((flags & VM_PAGE_ALLOC_CLEAR) != 0 && oldPageState != PAGE_STATE_CLEAR)
clear_page(page);
return page;
@ -2356,9 +2383,13 @@ vm_page_allocate_page(int pageState)
static vm_page*
allocate_page_run(page_num_t start, page_num_t length, int pageState,
allocate_page_run(page_num_t start, page_num_t length, uint32 flags,
WriteLocker& freeClearQueueLocker)
{
uint32 pageState = flags & VM_PAGE_ALLOC_STATE;
ASSERT(pageState != PAGE_STATE_FREE);
ASSERT(pageState != PAGE_STATE_CLEAR);
T(AllocatePageRun(length));
// pull the pages out of the appropriate queues
@ -2375,8 +2406,8 @@ allocate_page_run(page_num_t start, page_num_t length, int pageState,
freePages.Add(&page);
}
page.state = PAGE_STATE_ACTIVE;
page.busy = true;
page.state = flags & VM_PAGE_ALLOC_STATE;
page.busy = flags & VM_PAGE_ALLOC_BUSY;
page.usage_count = 1;
page.accessed = false;
page.modified = false;
@ -2385,16 +2416,18 @@ allocate_page_run(page_num_t start, page_num_t length, int pageState,
freeClearQueueLocker.Unlock();
// clear pages, if requested
if (pageState == PAGE_STATE_CLEAR) {
if ((flags & VM_PAGE_ALLOC_CLEAR) != 0) {
for (VMPageQueue::PageList::Iterator it = freePages.GetIterator();
vm_page* page = it.Next();) {
clear_page(page);
}
}
// add pages to active queue
freePages.MoveFrom(&clearPages);
sActivePageQueue.AppendUnlocked(freePages, length);
// add pages to target queue
if (pageState < PAGE_STATE_FIRST_UNQUEUED) {
freePages.MoveFrom(&clearPages);
sPageQueues[pageState].AppendUnlocked(freePages, length);
}
// Note: We don't unreserve the pages since we pulled them out of the
// free/clear queues without adjusting sUnreservedFreePages.
@ -2404,7 +2437,7 @@ allocate_page_run(page_num_t start, page_num_t length, int pageState,
vm_page *
vm_page_allocate_page_run(int pageState, addr_t base, addr_t length,
vm_page_allocate_page_run(uint32 flags, addr_t base, addr_t length,
int priority)
{
uint32 start = base >> PAGE_SHIFT;
@ -2435,7 +2468,7 @@ vm_page_allocate_page_run(int pageState, addr_t base, addr_t length,
}
if (foundRun)
return allocate_page_run(start, length, pageState,
return allocate_page_run(start, length, flags,
freeClearQueueLocker);
start += i;
@ -2444,21 +2477,16 @@ vm_page_allocate_page_run(int pageState, addr_t base, addr_t length,
vm_page *
vm_page_allocate_page_run_no_base(int pageState, addr_t count, int priority)
vm_page_allocate_page_run_no_base(uint32 flags, addr_t count, int priority)
{
VMPageQueue* queue;
VMPageQueue* otherQueue;
switch (pageState) {
case PAGE_STATE_FREE:
queue = &sFreePageQueue;
otherQueue = &sClearPageQueue;
break;
case PAGE_STATE_CLEAR:
queue = &sClearPageQueue;
otherQueue = &sFreePageQueue;
break;
default:
return NULL; // invalid
if ((flags & VM_PAGE_ALLOC_CLEAR) != 0) {
queue = &sClearPageQueue;
otherQueue = &sFreePageQueue;
} else {
queue = &sFreePageQueue;
otherQueue = &sClearPageQueue;
}
if (!vm_page_try_reserve_pages(count, priority))
@ -2485,7 +2513,7 @@ vm_page_allocate_page_run_no_base(int pageState, addr_t count, int priority)
}
if (foundRun) {
return allocate_page_run(page - sPages, count, pageState,
return allocate_page_run(page - sPages, count, flags,
freeClearQueueLocker);
}
}
@ -2578,6 +2606,9 @@ vm_page_requeue(struct vm_page *page, bool tail)
case PAGE_STATE_MODIFIED:
queue = &sModifiedPageQueue;
break;
case PAGE_STATE_CACHED:
queue = &sCachedPageQueue;
break;
case PAGE_STATE_FREE:
case PAGE_STATE_CLEAR:
panic("vm_page_requeue() called for free/clear page %p", page);