* Added new debug feature (DEBUG_PAGE_ACCESS) to detect invalid concurrent
access to a vm_page. It is basically an atomically accessed thread ID field in the vm_page structure, which is explicitly set by macros marking the critical sections. As a first positive effect I had to review quite a bit of code and found several issues. * Added several TODOs and comments. Some harmless ones, but also a few troublesome ones in vm.cpp regarding page unmapping. * file_cache: PrecacheIO::Prepare()/read_into_cache: Removed superfluous vm_page_allocate_page() return value checks. It cannot fail anymore. * Removed the heavily contended "pages" lock. We use different policies now: - sModifiedTemporaryPages is accessed atomically. - sPageDeficitLock and sFreePageCondition are protected by a new mutex. - The page queues have individual locks (mutexes). - Renamed set_page_state_nolock() to set_page_state(). Unless the caller says otherwise, it does now lock the affected pages queues itself. Also changed the return value to void -- we panic() anyway. * set_page_state(): Add free/clear pages to the beginning of their respective queues as this is more cache-friendly. * Pages with the states PAGE_STATE_WIRED or PAGE_STATE_UNUSED are no longer in any queue. They were in the "active" queue, but there's no good reason to have them there. In case we decide to let the page daemon work the queues (like FreeBSD) they would just be in the way. * Pulled the common part of vm_page_allocate_page_run[_no_base]() into a helper function. Also fixed a bug I introduced previously: The functions must not vm_page_unreserve_pages() on success, since they remove the pages from the free/clear queue without decrementing sUnreservedFreePages. * vm_page_set_state(): Changed return type to void. The function cannot really fail and no-one was checking it anyway. * vm_page_free(), vm_page_set_state(): Added assertion: The page must not be free/clear before. This is implied by the policy that no-one is allowed to access free/clear pages without holding the respective queue's lock, which is not the case at this point. This found the bug fixed in r34912. * vm_page_requeue(): Added general assertions. panic() when requeuing of free/clear pages is requested. Same reason as above. * vm_clone_area(), B_FULL_LOCK case: Don't map busy pages. The implementation is still not correct, though. My usual -j8 Haiku build test runs another 10% faster, now. The total kernel time drops about 18%. As hoped the new locks have only a fraction of the old "pages" lock contention. Other locks lead the "most wanted list" now. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@34933 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
c1f316db61
commit
3cd2094396
@ -80,10 +80,14 @@
|
||||
|
||||
// VM
|
||||
|
||||
// Enables the vm_page::queue, i.e. it is tracked which queue the page should
|
||||
// be in.
|
||||
// Enables the vm_page::queue field, i.e. it is tracked which queue the page
|
||||
// should be in.
|
||||
#define DEBUG_PAGE_QUEUE 0
|
||||
|
||||
// Enables the vm_page::access_count field, which is used to detect invalid
|
||||
// concurrent access to the page.
|
||||
#define DEBUG_PAGE_ACCESS 1
|
||||
|
||||
// Enables a global list of all vm_cache structures.
|
||||
#define DEBUG_CACHE_LIST KDEBUG_LEVEL_1
|
||||
|
||||
|
@ -28,7 +28,7 @@ status_t vm_page_init_post_thread(struct kernel_args *args);
|
||||
status_t vm_mark_page_inuse(addr_t page);
|
||||
status_t vm_mark_page_range_inuse(addr_t startPage, addr_t length);
|
||||
void vm_page_free(struct VMCache *cache, struct vm_page *page);
|
||||
status_t vm_page_set_state(struct vm_page *page, int state);
|
||||
void vm_page_set_state(struct vm_page *page, int state);
|
||||
void vm_page_requeue(struct vm_page *page, bool tail);
|
||||
|
||||
// get some data about the number of pages in the system
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
@ -90,6 +90,10 @@ struct vm_page {
|
||||
void* queue;
|
||||
#endif
|
||||
|
||||
#if DEBUG_PAGE_ACCESS
|
||||
vint32 accessing_thread;
|
||||
#endif
|
||||
|
||||
uint8 type : 2;
|
||||
uint8 state : 3;
|
||||
|
||||
@ -121,4 +125,70 @@ enum {
|
||||
};
|
||||
|
||||
|
||||
#if DEBUG_PAGE_ACCESS
|
||||
# include <thread.h>
|
||||
|
||||
static inline void
|
||||
vm_page_debug_access_start(vm_page* page)
|
||||
{
|
||||
thread_id threadID = thread_get_current_thread_id();
|
||||
thread_id previousThread = atomic_test_and_set(&page->accessing_thread,
|
||||
threadID, -1);
|
||||
if (previousThread != -1) {
|
||||
panic("Invalid concurrent access to page %p (start), currently "
|
||||
"accessed by: %" B_PRId32, page, previousThread);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
vm_page_debug_access_end(vm_page* page)
|
||||
{
|
||||
thread_id threadID = thread_get_current_thread_id();
|
||||
thread_id previousThread = atomic_test_and_set(&page->accessing_thread, -1,
|
||||
threadID);
|
||||
if (previousThread != threadID) {
|
||||
panic("Invalid concurrent access to page %p (end) by current thread, "
|
||||
"current accessor is: %" B_PRId32, page, previousThread);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
vm_page_debug_access_check(vm_page* page)
|
||||
{
|
||||
thread_id thread = page->accessing_thread;
|
||||
if (thread != thread_get_current_thread_id()) {
|
||||
panic("Invalid concurrent access to page %p (check), currently "
|
||||
"accessed by: %" B_PRId32, page, thread);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
vm_page_debug_access_transfer(vm_page* page, thread_id expectedPreviousThread)
|
||||
{
|
||||
thread_id threadID = thread_get_current_thread_id();
|
||||
thread_id previousThread = atomic_test_and_set(&page->accessing_thread,
|
||||
threadID, expectedPreviousThread);
|
||||
if (previousThread != expectedPreviousThread) {
|
||||
panic("Invalid access transfer for page %p, currently accessed by: "
|
||||
"%" B_PRId32 ", expected: %" B_PRId32, page, previousThread,
|
||||
expectedPreviousThread);
|
||||
}
|
||||
}
|
||||
|
||||
# define DEBUG_PAGE_ACCESS_START(page) vm_page_debug_access_start(page)
|
||||
# define DEBUG_PAGE_ACCESS_END(page) vm_page_debug_access_end(page)
|
||||
# define DEBUG_PAGE_ACCESS_CHECK(page) vm_page_debug_access_check(page)
|
||||
# define DEBUG_PAGE_ACCESS_TRANSFER(page, thread) \
|
||||
vm_page_debug_access_transfer(page, thread)
|
||||
#else
|
||||
# define DEBUG_PAGE_ACCESS_START(page) do {} while (false)
|
||||
# define DEBUG_PAGE_ACCESS_END(page) do {} while (false)
|
||||
# define DEBUG_PAGE_ACCESS_CHECK(page) do {} while (false)
|
||||
# define DEBUG_PAGE_ACCESS_TRANSFER(page, thread) do {} while (false)
|
||||
#endif
|
||||
|
||||
|
||||
#endif // _KERNEL_VM_VM_TYPES_H
|
||||
|
@ -83,6 +83,9 @@ struct aperture_memory {
|
||||
vm_page **pages;
|
||||
vm_page *page;
|
||||
};
|
||||
#ifdef DEBUG_PAGE_ACCESS
|
||||
thread_id allocating_thread;
|
||||
#endif
|
||||
#else
|
||||
area_id area;
|
||||
#endif
|
||||
@ -550,6 +553,11 @@ Aperture::AllocateMemory(aperture_memory *memory, uint32 flags)
|
||||
memory->pages[i] = vm_page_allocate_page(PAGE_STATE_CLEAR);
|
||||
vm_page_unreserve_pages(count);
|
||||
}
|
||||
|
||||
#ifdef DEBUG_PAGE_ACCESS
|
||||
memory->allocating_thread = find_thread(NULL);
|
||||
#endif
|
||||
|
||||
#else
|
||||
void *address;
|
||||
memory->area = create_area("GART memory", &address, B_ANY_KERNEL_ADDRESS,
|
||||
@ -682,12 +690,15 @@ Aperture::_Free(aperture_memory *memory)
|
||||
if ((memory->flags & B_APERTURE_NEED_PHYSICAL) != 0) {
|
||||
vm_page *page = memory->page;
|
||||
for (uint32 i = 0; i < count; i++, page++) {
|
||||
DEBUG_PAGE_ACCESS_TRANSFER(page, memory->allocating_thread);
|
||||
vm_page_set_state(page, PAGE_STATE_FREE);
|
||||
}
|
||||
|
||||
memory->page = NULL;
|
||||
} else {
|
||||
for (uint32 i = 0; i < count; i++) {
|
||||
DEBUG_PAGE_ACCESS_TRANSFER(memory->pages[i],
|
||||
memory->allocating_thread);
|
||||
vm_page_set_state(memory->pages[i], PAGE_STATE_FREE);
|
||||
}
|
||||
|
||||
|
@ -376,10 +376,13 @@ destroy_tmap(vm_translation_map *map)
|
||||
panic("destroy_tmap: didn't find pgtable page\n");
|
||||
return;
|
||||
}
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
vm_page_set_state(page, PAGE_STATE_FREE);
|
||||
}
|
||||
if (((i+1)%NUM_DIRTBL_PER_PAGE) == 0)
|
||||
if (((i + 1) % NUM_DIRTBL_PER_PAGE) == 0) {
|
||||
DEBUG_PAGE_ACCESS_END(dirpage);
|
||||
vm_page_set_state(dirpage, PAGE_STATE_FREE);
|
||||
}
|
||||
}
|
||||
free(map->arch_data->rtdir_virt);
|
||||
}
|
||||
@ -545,6 +548,8 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
// mark the page WIRED
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
|
||||
pgdir = page->physical_page_number * B_PAGE_SIZE;
|
||||
|
||||
TRACE(("map_tmap: asked for free page for pgdir. 0x%lx\n", pgdir));
|
||||
@ -591,6 +596,8 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
// mark the page WIRED
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
|
||||
pgtable = page->physical_page_number * B_PAGE_SIZE;
|
||||
|
||||
TRACE(("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable));
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
@ -274,6 +274,7 @@ destroy_tmap(vm_translation_map *map)
|
||||
page = vm_lookup_page(pgtable_addr);
|
||||
if (!page)
|
||||
panic("destroy_tmap: didn't find pgtable page\n");
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
vm_page_set_state(page, PAGE_STATE_FREE);
|
||||
}
|
||||
}
|
||||
@ -369,6 +370,8 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
// mark the page WIRED
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
|
||||
pgtable = page->physical_page_number * B_PAGE_SIZE;
|
||||
|
||||
TRACE(("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable));
|
||||
|
39
src/system/kernel/cache/file_cache.cpp
vendored
39
src/system/kernel/cache/file_cache.cpp
vendored
@ -92,6 +92,9 @@ private:
|
||||
off_t fOffset;
|
||||
uint32 fVecCount;
|
||||
size_t fSize;
|
||||
#if DEBUG_PAGE_ACCESS
|
||||
thread_id fAllocatingThread;
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef status_t (*cache_func)(file_cache_ref* ref, void* cookie, off_t offset,
|
||||
@ -150,8 +153,6 @@ PrecacheIO::Prepare()
|
||||
uint32 i = 0;
|
||||
for (size_t pos = 0; pos < fSize; pos += B_PAGE_SIZE) {
|
||||
vm_page* page = vm_page_allocate_page(PAGE_STATE_FREE);
|
||||
if (page == NULL)
|
||||
break;
|
||||
|
||||
fCache->InsertPage(page, fOffset + pos);
|
||||
|
||||
@ -160,15 +161,9 @@ PrecacheIO::Prepare()
|
||||
fPages[i++] = page;
|
||||
}
|
||||
|
||||
if (i != fPageCount) {
|
||||
// allocating pages failed
|
||||
while (i-- > 0) {
|
||||
fCache->NotifyPageEvents(fPages[i], PAGE_EVENT_NOT_BUSY);
|
||||
fCache->RemovePage(fPages[i]);
|
||||
vm_page_set_state(fPages[i], PAGE_STATE_FREE);
|
||||
}
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
#if DEBUG_PAGE_ACCESS
|
||||
fAllocatingThread = find_thread(NULL);
|
||||
#endif
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
@ -207,12 +202,17 @@ PrecacheIO::IOFinished(status_t status, bool partialTransfer,
|
||||
+ bytesTouched, 0, B_PAGE_SIZE - bytesTouched);
|
||||
}
|
||||
|
||||
DEBUG_PAGE_ACCESS_TRANSFER(fPages[i], fAllocatingThread);
|
||||
|
||||
fPages[i]->state = PAGE_STATE_ACTIVE;
|
||||
fCache->NotifyPageEvents(fPages[i], PAGE_EVENT_NOT_BUSY);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(fPages[i]);
|
||||
}
|
||||
|
||||
// Free pages after failed I/O
|
||||
for (uint32 i = pagesTransferred; i < fPageCount; i++) {
|
||||
DEBUG_PAGE_ACCESS_TRANSFER(fPages[i], fAllocatingThread);
|
||||
fCache->NotifyPageEvents(fPages[i], PAGE_EVENT_NOT_BUSY);
|
||||
fCache->RemovePage(fPages[i]);
|
||||
vm_page_set_state(fPages[i], PAGE_STATE_FREE);
|
||||
@ -305,6 +305,7 @@ reserve_pages(file_cache_ref* ref, size_t reservePages, bool isWrite)
|
||||
(page = it.Next()) != NULL && left > 0;) {
|
||||
if (page->state != PAGE_STATE_MODIFIED
|
||||
&& page->state != PAGE_STATE_BUSY) {
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
cache->RemovePage(page);
|
||||
vm_page_set_state(page, PAGE_STATE_FREE);
|
||||
left--;
|
||||
@ -382,8 +383,6 @@ read_into_cache(file_cache_ref* ref, void* cookie, off_t offset,
|
||||
for (size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) {
|
||||
vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
|
||||
PAGE_STATE_FREE);
|
||||
if (page == NULL)
|
||||
panic("no more pages!");
|
||||
|
||||
cache->InsertPage(page, offset + pos);
|
||||
|
||||
@ -436,6 +435,8 @@ read_into_cache(file_cache_ref* ref, void* cookie, off_t offset,
|
||||
|
||||
// make the pages accessible in the cache
|
||||
for (int32 i = pageIndex; i-- > 0;) {
|
||||
DEBUG_PAGE_ACCESS_END(pages[i]);
|
||||
|
||||
pages[i]->state = PAGE_STATE_ACTIVE;
|
||||
|
||||
cache->NotifyPageEvents(pages[i], PAGE_EVENT_NOT_BUSY);
|
||||
@ -610,6 +611,8 @@ write_to_cache(file_cache_ref* ref, void* cookie, off_t offset,
|
||||
pages[i]->state = PAGE_STATE_ACTIVE;
|
||||
else
|
||||
vm_page_set_state(pages[i], PAGE_STATE_MODIFIED);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(pages[i]);
|
||||
}
|
||||
|
||||
return status;
|
||||
@ -798,6 +801,9 @@ cache_io(void* _cacheRef, void* cookie, off_t offset, addr_t buffer,
|
||||
// Since we don't actually map pages as part of an area, we have
|
||||
// to manually maintain their usage_count
|
||||
page->usage_count = 2;
|
||||
// TODO: Just because this request comes from the FS API, it
|
||||
// doesn't mean the page is not mapped. We might actually
|
||||
// decrease the usage count of a hot page here.
|
||||
|
||||
if (doWrite || useBuffer) {
|
||||
// Since the following user_mem{cpy,set}() might cause a page
|
||||
@ -827,8 +833,11 @@ cache_io(void* _cacheRef, void* cookie, off_t offset, addr_t buffer,
|
||||
locker.Lock();
|
||||
|
||||
page->state = oldPageState;
|
||||
if (doWrite && page->state != PAGE_STATE_MODIFIED)
|
||||
if (doWrite && page->state != PAGE_STATE_MODIFIED) {
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
vm_page_set_state(page, PAGE_STATE_MODIFIED);
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
|
||||
cache->NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
|
||||
}
|
||||
@ -1001,6 +1010,8 @@ cache_prefetch_vnode(struct vnode* vnode, off_t offset, size_t size)
|
||||
|
||||
cache->ReleaseRefAndUnlock();
|
||||
vm_page_unreserve_pages(reservePages);
|
||||
// TODO: We should periodically unreserve as we go, so we don't
|
||||
// unnecessarily put pressure on the free page pool.
|
||||
}
|
||||
|
||||
|
||||
|
@ -18,6 +18,7 @@ KernelMergeObject kernel_vm.o :
|
||||
VMKernelAddressSpace.cpp
|
||||
VMKernelArea.cpp
|
||||
VMNullCache.cpp
|
||||
VMPageQueue.cpp
|
||||
VMUserAddressSpace.cpp
|
||||
VMUserArea.cpp
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright 2008, Zhao Shuai, upczhsh@163.com.
|
||||
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
@ -952,6 +952,7 @@ VMAnonymousCache::_MergePagesSmallerConsumer(VMAnonymousCache* source)
|
||||
vm_page* sourcePage = source->LookupPage(
|
||||
(off_t)page->cache_offset << PAGE_SHIFT);
|
||||
if (sourcePage != NULL) {
|
||||
DEBUG_PAGE_ACCESS_START(sourcePage);
|
||||
source->RemovePage(sourcePage);
|
||||
vm_page_free(source, sourcePage);
|
||||
}
|
||||
@ -1000,6 +1001,7 @@ VMAnonymousCache::_MergeSwapPages(VMAnonymousCache* source)
|
||||
if (swapBlock->swap_slots[i] != SWAP_SLOT_NONE) {
|
||||
vm_page* page = source->LookupPage(
|
||||
(off_t)(swapBlockPageIndex + i) << PAGE_SHIFT);
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
source->RemovePage(page);
|
||||
vm_page_free(source, page);
|
||||
}
|
||||
|
@ -612,6 +612,7 @@ VMCache::Delete()
|
||||
|
||||
TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
|
||||
oldPage->physical_page_number));
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
vm_page_free(this, page);
|
||||
}
|
||||
|
||||
@ -1032,6 +1033,7 @@ VMCache::Resize(off_t newSize)
|
||||
}
|
||||
|
||||
// remove the page and put it into the free queue
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
vm_remove_all_page_mappings(page, NULL);
|
||||
ASSERT(page->wired_count == 0);
|
||||
// TODO: Find a real solution! Unmapping is probably fine, but
|
||||
@ -1081,6 +1083,7 @@ VMCache::FlushAndRemoveAllPages()
|
||||
if (page->wired_count > 0 || !page->mappings.IsEmpty())
|
||||
return B_BUSY;
|
||||
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
RemovePage(page);
|
||||
vm_page_free(this, page);
|
||||
// Note: When iterating through a IteratableSplayTree
|
||||
|
17
src/system/kernel/vm/VMPageQueue.cpp
Normal file
17
src/system/kernel/vm/VMPageQueue.cpp
Normal file
@ -0,0 +1,17 @@
|
||||
/*
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include "VMPageQueue.h"
|
||||
|
||||
|
||||
void
|
||||
VMPageQueue::Init(const char* name, int lockingOrder)
|
||||
{
|
||||
fName = name;
|
||||
fLockingOrder = lockingOrder;
|
||||
fCount = 0;
|
||||
mutex_init(&fLock, fName);
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
@ -10,6 +10,13 @@
|
||||
#define VM_PAGE_QUEUE_H
|
||||
|
||||
|
||||
#include <util/DoublyLinkedList.h>
|
||||
|
||||
#include <lock.h>
|
||||
#include <vm/vm_types.h>
|
||||
|
||||
|
||||
|
||||
struct VMPageQueue {
|
||||
public:
|
||||
typedef DoublyLinkedList<vm_page, DoublyLinkedListMemberGetLink<vm_page,
|
||||
@ -17,7 +24,15 @@ public:
|
||||
typedef PageList::ConstIterator Iterator;
|
||||
|
||||
public:
|
||||
inline VMPageQueue();
|
||||
void Init(const char* name, int lockingOrder);
|
||||
|
||||
const char* Name() const { return fName; }
|
||||
int LockingOrder() const { return fLockingOrder; }
|
||||
|
||||
inline bool Lock();
|
||||
inline void Unlock();
|
||||
|
||||
inline void LockMultiple(VMPageQueue* other);
|
||||
|
||||
inline void Append(vm_page* page);
|
||||
inline void Prepend(vm_page* page);
|
||||
@ -31,22 +46,43 @@ public:
|
||||
inline vm_page* Previous(vm_page* page) const;
|
||||
inline vm_page* Next(vm_page* page) const;
|
||||
|
||||
inline void MoveFrom(VMPageQueue* from, vm_page* page);
|
||||
|
||||
inline uint32 Count() const { return fCount; }
|
||||
|
||||
inline Iterator GetIterator() const;
|
||||
|
||||
private:
|
||||
PageList fPages;
|
||||
const char* fName;
|
||||
int fLockingOrder;
|
||||
mutex fLock;
|
||||
uint32 fCount;
|
||||
PageList fPages;
|
||||
};
|
||||
|
||||
|
||||
VMPageQueue::VMPageQueue()
|
||||
:
|
||||
fCount(0)
|
||||
bool
|
||||
VMPageQueue::Lock()
|
||||
{
|
||||
return mutex_lock(&fLock) == B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
VMPageQueue::Unlock()
|
||||
{
|
||||
mutex_unlock(&fLock);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
VMPageQueue::LockMultiple(VMPageQueue* other)
|
||||
{
|
||||
if (fLockingOrder < other->fLockingOrder) {
|
||||
Lock();
|
||||
other->Lock();
|
||||
} else {
|
||||
other->Lock();
|
||||
Lock();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -147,18 +183,6 @@ VMPageQueue::RemoveHead()
|
||||
}
|
||||
|
||||
|
||||
/*! Moves a page to the tail of this queue, but only does so if
|
||||
the page is currently in another queue.
|
||||
*/
|
||||
void
|
||||
VMPageQueue::MoveFrom(VMPageQueue* from, vm_page* page)
|
||||
{
|
||||
if (from != this) {
|
||||
from->Remove(page);
|
||||
Append(page);
|
||||
}
|
||||
}
|
||||
|
||||
vm_page*
|
||||
VMPageQueue::Head() const
|
||||
{
|
||||
@ -194,4 +218,87 @@ VMPageQueue::GetIterator() const
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - VMPageQueuePairLocker
|
||||
|
||||
|
||||
struct VMPageQueuePairLocker {
|
||||
VMPageQueuePairLocker()
|
||||
:
|
||||
fQueue1(NULL),
|
||||
fQueue2(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
VMPageQueuePairLocker(VMPageQueue& queue1, VMPageQueue& queue2)
|
||||
:
|
||||
fQueue1(&queue1),
|
||||
fQueue2(&queue2)
|
||||
{
|
||||
_Lock();
|
||||
}
|
||||
|
||||
~VMPageQueuePairLocker()
|
||||
{
|
||||
_Unlock();
|
||||
}
|
||||
|
||||
void SetTo(VMPageQueue* queue1, VMPageQueue* queue2)
|
||||
{
|
||||
_Unlock();
|
||||
fQueue1 = queue1;
|
||||
fQueue2 = queue2;
|
||||
_Lock();
|
||||
}
|
||||
|
||||
void Unlock()
|
||||
{
|
||||
if (fQueue1 != NULL) {
|
||||
fQueue1->Unlock();
|
||||
fQueue1 = NULL;
|
||||
}
|
||||
|
||||
if (fQueue2 != NULL) {
|
||||
fQueue2->Unlock();
|
||||
fQueue2 = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void _Lock()
|
||||
{
|
||||
if (fQueue1 == fQueue2) {
|
||||
if (fQueue1 == NULL)
|
||||
return;
|
||||
fQueue1->Lock();
|
||||
fQueue2 = NULL;
|
||||
} else {
|
||||
if (fQueue1 == NULL) {
|
||||
fQueue2->Lock();
|
||||
} else if (fQueue2 == NULL) {
|
||||
fQueue1->Lock();
|
||||
} else if (fQueue1->LockingOrder() < fQueue2->LockingOrder()) {
|
||||
fQueue1->Lock();
|
||||
fQueue2->Lock();
|
||||
} else {
|
||||
fQueue2->Lock();
|
||||
fQueue1->Lock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void _Unlock()
|
||||
{
|
||||
if (fQueue1 != NULL)
|
||||
fQueue1->Unlock();
|
||||
|
||||
if (fQueue2 != NULL)
|
||||
fQueue2->Unlock();
|
||||
}
|
||||
|
||||
private:
|
||||
VMPageQueue* fQueue1;
|
||||
VMPageQueue* fQueue2;
|
||||
};
|
||||
|
||||
|
||||
#endif // VM_PAGE_QUEUE_H
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
@ -344,6 +344,9 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
|
||||
|
||||
// unmap pages
|
||||
vm_unmap_pages(area, address, oldSize - newSize, false);
|
||||
// TODO: preserveModified = false is wrong, since this could be a
|
||||
// cloned area or a write-mmap()ed file, in which case we'd lose
|
||||
// information.
|
||||
|
||||
// If no one else uses the area's cache, we can resize it, too.
|
||||
if (cache->areas == area && area->cache_next == NULL
|
||||
@ -366,6 +369,7 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
|
||||
|
||||
// unmap pages
|
||||
vm_unmap_pages(area, oldBase, newBase - oldBase, false);
|
||||
// TODO: See the vm_unmap_pages() above.
|
||||
|
||||
// resize the area
|
||||
status_t error = addressSpace->ShrinkAreaHead(area, newSize);
|
||||
@ -389,6 +393,7 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
|
||||
|
||||
// unmap pages
|
||||
vm_unmap_pages(area, address, area->Size() - firstNewSize, false);
|
||||
// TODO: See the vm_unmap_pages() above.
|
||||
|
||||
// resize the area
|
||||
addr_t oldSize = area->Size();
|
||||
@ -874,6 +879,10 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
vm_page* page = vm_page_allocate_page(newPageState);
|
||||
cache->InsertPage(page, offset);
|
||||
vm_map_page(area, page, address, protection);
|
||||
// TODO: This sets the page state to "active", but it would
|
||||
// make more sense to set it to "wired".
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
|
||||
// Periodically unreserve pages we've already allocated, so that
|
||||
// we don't unnecessarily increase the pressure on the VM.
|
||||
@ -917,9 +926,13 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
physicalAddress);
|
||||
}
|
||||
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
|
||||
increment_page_wired_count(page);
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
cache->InsertPage(page, offset);
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
@ -950,8 +963,10 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
panic("couldn't map physical page in page run\n");
|
||||
|
||||
increment_page_wired_count(page);
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
cache->InsertPage(page, offset);
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
@ -1246,9 +1261,11 @@ pre_map_area_pages(VMArea* area, VMCache* cache)
|
||||
if (page->state == PAGE_STATE_BUSY || page->usage_count <= 0)
|
||||
continue;
|
||||
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
vm_map_page(area, page,
|
||||
baseAddress + (page->cache_offset * B_PAGE_SIZE - cacheOffset),
|
||||
B_READ_AREA | B_KERNEL_READ_AREA);
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1543,10 +1560,17 @@ vm_clone_area(team_id team, const char* name, void** address,
|
||||
// map in all pages from source
|
||||
for (VMCachePagesTree::Iterator it = cache->pages.GetIterator();
|
||||
vm_page* page = it.Next();) {
|
||||
vm_map_page(newArea, page, newArea->Base()
|
||||
+ ((page->cache_offset << PAGE_SHIFT)
|
||||
- newArea->cache_offset), protection);
|
||||
if (page->state != PAGE_STATE_BUSY) {
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
vm_map_page(newArea, page,
|
||||
newArea->Base() + ((page->cache_offset << PAGE_SHIFT)
|
||||
- newArea->cache_offset),
|
||||
protection);
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
}
|
||||
// TODO: B_FULL_LOCK means that all pages are locked. We are not
|
||||
// ensuring that!
|
||||
|
||||
vm_page_unreserve_pages(reservePages);
|
||||
}
|
||||
@ -1573,6 +1597,9 @@ delete_area(VMAddressSpace* addressSpace, VMArea* area)
|
||||
|
||||
// Unmap the virtual address space the area occupied
|
||||
vm_unmap_pages(area, area->Base(), area->Size(), !area->cache->temporary);
|
||||
// TODO: Even if the cache is temporary we might need to preserve the
|
||||
// modified flag, since the area could be a clone and backed by swap.
|
||||
// We would lose information in this case.
|
||||
|
||||
if (!area->cache->temporary)
|
||||
area->cache->WriteModified();
|
||||
@ -1995,6 +2022,9 @@ vm_remove_all_page_mappings(vm_page* page, uint32* _flags)
|
||||
}
|
||||
|
||||
|
||||
/*! If \a preserveModified is \c true, the caller must hold the lock of the
|
||||
page's cache and the page must not be busy.
|
||||
*/
|
||||
bool
|
||||
vm_unmap_page(VMArea* area, addr_t virtualAddress, bool preserveModified)
|
||||
{
|
||||
@ -2068,6 +2098,11 @@ vm_unmap_page(VMArea* area, addr_t virtualAddress, bool preserveModified)
|
||||
}
|
||||
|
||||
|
||||
/*! If \a preserveModified is \c true, the caller must hold the lock of all
|
||||
mapped pages' caches and none of the pages must be busy.
|
||||
TODO: Particularly the latter is very inconvenient. See the TODOs below for
|
||||
reasons for this requirement.
|
||||
*/
|
||||
status_t
|
||||
vm_unmap_pages(VMArea* area, addr_t base, size_t size, bool preserveModified)
|
||||
{
|
||||
@ -2116,9 +2151,18 @@ vm_unmap_pages(VMArea* area, addr_t base, size_t size, bool preserveModified)
|
||||
physicalAddress);
|
||||
}
|
||||
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
// TODO: No guarantee for that. See below.
|
||||
|
||||
if ((flags & PAGE_MODIFIED) != 0
|
||||
&& page->state != PAGE_STATE_MODIFIED)
|
||||
&& page->state != PAGE_STATE_MODIFIED) {
|
||||
vm_page_set_state(page, PAGE_STATE_MODIFIED);
|
||||
// TODO: We are only allowed to do this, if (a) we have also
|
||||
// locked the cache and (b) the page is not busy! Not doing
|
||||
// it is problematic, too, since we'd lose information.
|
||||
}
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
}
|
||||
map->ops->unlock(map);
|
||||
@ -2170,6 +2214,8 @@ vm_map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection)
|
||||
vm_translation_map* map = &area->address_space->TranslationMap();
|
||||
vm_page_mapping* mapping = NULL;
|
||||
|
||||
DEBUG_PAGE_ACCESS_CHECK(page);
|
||||
|
||||
if (area->wiring == B_NO_LOCK) {
|
||||
mapping = (vm_page_mapping*)malloc_nogrow(sizeof(vm_page_mapping));
|
||||
if (mapping == NULL)
|
||||
@ -2883,8 +2929,10 @@ unmap_and_free_physical_pages(vm_translation_map* map, addr_t start, addr_t end)
|
||||
if (map->ops->query(map, current, &physicalAddress, &flags) == B_OK
|
||||
&& (flags & PAGE_PRESENT) != 0) {
|
||||
vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
|
||||
if (page != NULL)
|
||||
if (page != NULL) {
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
vm_page_set_state(page, PAGE_STATE_FREE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -3708,6 +3756,8 @@ fault_get_page(PageFaultContext& context)
|
||||
page->state = PAGE_STATE_ACTIVE;
|
||||
cache->NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
|
||||
// Since we needed to unlock everything temporarily, the area
|
||||
// situation might have changed. So we need to restart the whole
|
||||
// process.
|
||||
@ -3749,7 +3799,8 @@ fault_get_page(PageFaultContext& context)
|
||||
|
||||
// insert the new page into our cache
|
||||
context.topCache->InsertPage(page, context.cacheOffset);
|
||||
}
|
||||
} else
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
|
||||
context.page = page;
|
||||
return B_OK;
|
||||
@ -3870,7 +3921,7 @@ vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress,
|
||||
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
vm_page* mappedPage;
|
||||
vm_page* mappedPage = NULL;
|
||||
if (context.map->ops->query(context.map, address, &physicalAddress,
|
||||
&flags) == B_OK
|
||||
&& (flags & PAGE_PRESENT) != 0
|
||||
@ -3889,12 +3940,26 @@ vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress,
|
||||
|
||||
context.map->ops->unlock(context.map);
|
||||
|
||||
if (unmapPage)
|
||||
if (unmapPage) {
|
||||
// Note: The mapped page is a page of a lower cache. We are
|
||||
// guaranteed to have that cached locked, our new page is a copy of
|
||||
// that page, and the page is not busy. The logic for that guarantee
|
||||
// is as follows: Since the page is mapped, it must live in the top
|
||||
// cache (ruled out above) or any of its lower caches, and there is
|
||||
// (was before the new page was inserted) no other page in any
|
||||
// cache between the top cache and the page's cache (otherwise that
|
||||
// would be mapped instead). That in turn means that our algorithm
|
||||
// must have found it and therefore it cannot be busy either.
|
||||
DEBUG_PAGE_ACCESS_START(mappedPage);
|
||||
vm_unmap_page(area, address, true);
|
||||
DEBUG_PAGE_ACCESS_END(mappedPage);
|
||||
}
|
||||
|
||||
if (mapPage)
|
||||
vm_map_page(area, context.page, address, newProtection);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(context.page);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
@ -5241,6 +5306,8 @@ _user_set_memory_protection(void* _address, size_t size, int protection)
|
||||
|
||||
if (unmapPage)
|
||||
vm_unmap_page(area, pageAddress, true);
|
||||
// TODO: We need to lock the page's cache for that, since
|
||||
// it potentially changes the page's state.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -271,6 +271,8 @@ check_page_activation(int32 index)
|
||||
if (!locker.IsLocked())
|
||||
return false;
|
||||
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
|
||||
bool modified;
|
||||
int32 activation = vm_test_map_activation(page, &modified);
|
||||
if (modified && page->state != PAGE_STATE_MODIFIED) {
|
||||
@ -293,6 +295,7 @@ check_page_activation(int32 index)
|
||||
track_page_usage(page);
|
||||
#endif
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -310,8 +313,10 @@ check_page_activation(int32 index)
|
||||
// recheck eventual last minute changes
|
||||
if ((flags & PAGE_MODIFIED) != 0 && page->state != PAGE_STATE_MODIFIED)
|
||||
vm_page_set_state(page, PAGE_STATE_MODIFIED);
|
||||
if ((flags & PAGE_ACCESSED) != 0 && ++page->usage_count >= 0)
|
||||
if ((flags & PAGE_ACCESSED) != 0 && ++page->usage_count >= 0) {
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (page->state == PAGE_STATE_MODIFIED)
|
||||
vm_page_schedule_write_page(page);
|
||||
@ -321,6 +326,7 @@ check_page_activation(int32 index)
|
||||
T(DeactivatePage(page));
|
||||
}
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -334,6 +340,8 @@ free_page_swap_space(int32 index)
|
||||
if (!locker.IsLocked())
|
||||
return false;
|
||||
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
|
||||
if (page->cache->temporary && page->wired_count == 0
|
||||
&& page->cache->HasPage(page->cache_offset << PAGE_SHIFT)
|
||||
&& page->usage_count > 0) {
|
||||
@ -343,9 +351,11 @@ free_page_swap_space(int32 index)
|
||||
// stolen and we'd lose its data.
|
||||
vm_page_set_state(page, PAGE_STATE_MODIFIED);
|
||||
T(FreedPageSwap(page));
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user