vm_page no longer points directly to its containing cache, but rather to a
VMCacheRef object which points to the cache. This allows to optimize VMCache::MoveAllPages(), since it no longer needs to iterate over all pages to adjust their cache pointer. It can simple swap the cache refs of the two caches instead. Reduces the total -j8 Haiku image build time only marginally. The kernel time drops almost 10%, though. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35155 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
2416d6ae42
commit
6379e53e2d
@ -61,6 +61,7 @@ struct VMCachePagesTreeDefinition {
|
||||
|
||||
typedef IteratableSplayTree<VMCachePagesTreeDefinition> VMCachePagesTree;
|
||||
|
||||
|
||||
struct VMCache {
|
||||
public:
|
||||
VMCache();
|
||||
@ -171,6 +172,7 @@ public:
|
||||
|
||||
private:
|
||||
struct PageEventWaiter;
|
||||
friend struct VMCacheRef;
|
||||
|
||||
private:
|
||||
void _NotifyPageEvents(vm_page* page, uint32 events);
|
||||
@ -185,6 +187,7 @@ private:
|
||||
mutex fLock;
|
||||
PageEventWaiter* fPageEventWaiters;
|
||||
void* fUserData;
|
||||
VMCacheRef* fCacheRef;
|
||||
};
|
||||
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
class AsyncIOCallback;
|
||||
struct vm_page_mapping;
|
||||
struct VMCache;
|
||||
struct VMCacheRef;
|
||||
typedef DoublyLinkedListLink<vm_page_mapping> vm_page_mapping_link;
|
||||
|
||||
|
||||
@ -71,12 +72,23 @@ typedef class DoublyLinkedQueue<vm_page_mapping, DoublyLinkedAreaLink>
|
||||
|
||||
typedef uint32 page_num_t;
|
||||
|
||||
|
||||
struct VMCacheRef {
|
||||
VMCache* cache;
|
||||
int32 ref_count;
|
||||
|
||||
VMCacheRef(VMCache* cache);
|
||||
};
|
||||
|
||||
|
||||
struct vm_page {
|
||||
DoublyLinkedListLink<vm_page> queue_link;
|
||||
|
||||
addr_t physical_page_number;
|
||||
|
||||
VMCache* cache;
|
||||
private:
|
||||
VMCacheRef* cache_ref;
|
||||
public:
|
||||
page_num_t cache_offset;
|
||||
// in page size units
|
||||
|
||||
@ -103,6 +115,13 @@ struct vm_page {
|
||||
|
||||
int8 usage_count;
|
||||
uint16 wired_count;
|
||||
|
||||
|
||||
VMCacheRef* CacheRef() const { return cache_ref; }
|
||||
void SetCacheRef(VMCacheRef* cacheRef) { this->cache_ref = cacheRef; }
|
||||
|
||||
VMCache* Cache() const
|
||||
{ return cache_ref != NULL ? cache_ref->cache : NULL; }
|
||||
};
|
||||
|
||||
|
||||
|
@ -768,7 +768,7 @@ X86VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
|
||||
if (page->wired_count == 0 && page->mappings.IsEmpty())
|
||||
atomic_add(&gMappedPagesCount, -1);
|
||||
|
||||
if (unmapPages || page->cache != area->cache) {
|
||||
if (unmapPages || page->Cache() != area->cache) {
|
||||
addr_t address = area->Base()
|
||||
+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
|
||||
|
||||
|
@ -1286,7 +1286,7 @@ swap_init_post_modules()
|
||||
bool
|
||||
swap_free_page_swap_space(vm_page* page)
|
||||
{
|
||||
VMAnonymousCache* cache = dynamic_cast<VMAnonymousCache*>(page->cache);
|
||||
VMAnonymousCache* cache = dynamic_cast<VMAnonymousCache*>(page->Cache());
|
||||
if (cache == NULL)
|
||||
return false;
|
||||
|
||||
|
@ -491,15 +491,21 @@ vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
|
||||
mutex_lock(&sCacheListLock);
|
||||
|
||||
while (dontWait) {
|
||||
VMCache* cache = page->cache;
|
||||
if (cache == NULL || !cache->TryLock()) {
|
||||
VMCacheRef* cacheRef = page->CacheRef();
|
||||
if (cacheRef == NULL) {
|
||||
mutex_unlock(&sCacheListLock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (cache == page->cache) {
|
||||
cache->AcquireRefLocked();
|
||||
VMCache* cache = cacheRef->cache;
|
||||
if (!cache->TryLock()) {
|
||||
mutex_unlock(&sCacheListLock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (cacheRef == page->CacheRef()) {
|
||||
mutex_unlock(&sCacheListLock);
|
||||
cache->AcquireRefLocked();
|
||||
return cache;
|
||||
}
|
||||
|
||||
@ -508,26 +514,28 @@ vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
|
||||
}
|
||||
|
||||
while (true) {
|
||||
VMCache* cache = page->cache;
|
||||
if (cache == NULL) {
|
||||
VMCacheRef* cacheRef = page->CacheRef();
|
||||
if (cacheRef == NULL) {
|
||||
mutex_unlock(&sCacheListLock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
VMCache* cache = cacheRef->cache;
|
||||
if (!cache->SwitchLock(&sCacheListLock)) {
|
||||
// cache has been deleted
|
||||
mutex_lock(&sCacheListLock);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (cache == page->cache) {
|
||||
mutex_lock(&sCacheListLock);
|
||||
if (cache == page->Cache()) {
|
||||
mutex_unlock(&sCacheListLock);
|
||||
cache->AcquireRefLocked();
|
||||
return cache;
|
||||
}
|
||||
|
||||
// the cache changed in the meantime
|
||||
cache->Unlock();
|
||||
mutex_lock(&sCacheListLock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -535,6 +543,17 @@ vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
|
||||
// #pragma mark - VMCache
|
||||
|
||||
|
||||
VMCacheRef::VMCacheRef(VMCache* cache)
|
||||
:
|
||||
cache(cache),
|
||||
ref_count(1)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - VMCache
|
||||
|
||||
|
||||
bool
|
||||
VMCache::_IsMergeable() const
|
||||
{
|
||||
@ -545,12 +564,15 @@ VMCache::_IsMergeable() const
|
||||
|
||||
|
||||
VMCache::VMCache()
|
||||
:
|
||||
fCacheRef(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
VMCache::~VMCache()
|
||||
{
|
||||
delete fCacheRef;
|
||||
}
|
||||
|
||||
|
||||
@ -572,6 +594,10 @@ VMCache::Init(uint32 cacheType)
|
||||
type = cacheType;
|
||||
fPageEventWaiters = NULL;
|
||||
|
||||
fCacheRef = new(nogrow) VMCacheRef(this);
|
||||
if (fCacheRef == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
#if DEBUG_CACHE_LIST
|
||||
mutex_lock(&sCacheListLock);
|
||||
|
||||
@ -607,8 +633,7 @@ VMCache::Delete()
|
||||
|
||||
// remove it
|
||||
pages.Remove(page);
|
||||
page->cache = NULL;
|
||||
// TODO: we also need to remove all of the page's mappings!
|
||||
page->SetCacheRef(NULL);
|
||||
|
||||
TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
|
||||
oldPage->physical_page_number));
|
||||
@ -689,7 +714,7 @@ VMCache::LookupPage(off_t offset)
|
||||
vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
|
||||
|
||||
#if KDEBUG
|
||||
if (page != NULL && page->cache != this)
|
||||
if (page != NULL && page->Cache() != this)
|
||||
panic("page %p not in cache %p\n", page, this);
|
||||
#endif
|
||||
|
||||
@ -704,9 +729,9 @@ VMCache::InsertPage(vm_page* page, off_t offset)
|
||||
this, page, offset));
|
||||
AssertLocked();
|
||||
|
||||
if (page->cache != NULL) {
|
||||
if (page->CacheRef() != NULL) {
|
||||
panic("insert page %p into cache %p: page cache is set to %p\n",
|
||||
page, this, page->cache);
|
||||
page, this, page->Cache());
|
||||
}
|
||||
|
||||
T2(InsertPage(this, page, offset));
|
||||
@ -714,7 +739,7 @@ VMCache::InsertPage(vm_page* page, off_t offset)
|
||||
page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
|
||||
page_count++;
|
||||
page->usage_count = 2;
|
||||
page->cache = this;
|
||||
page->SetCacheRef(fCacheRef);
|
||||
|
||||
#if KDEBUG
|
||||
vm_page* otherPage = pages.Lookup(page->cache_offset);
|
||||
@ -739,16 +764,16 @@ VMCache::RemovePage(vm_page* page)
|
||||
TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
|
||||
AssertLocked();
|
||||
|
||||
if (page->cache != this) {
|
||||
if (page->Cache() != this) {
|
||||
panic("remove page %p from cache %p: page cache is set to %p\n", page,
|
||||
this, page->cache);
|
||||
this, page->Cache());
|
||||
}
|
||||
|
||||
T2(RemovePage(this, page));
|
||||
|
||||
pages.Remove(page);
|
||||
page->cache = NULL;
|
||||
page_count--;
|
||||
page->SetCacheRef(NULL);
|
||||
}
|
||||
|
||||
|
||||
@ -758,7 +783,7 @@ VMCache::RemovePage(vm_page* page)
|
||||
void
|
||||
VMCache::MovePage(vm_page* page)
|
||||
{
|
||||
VMCache* oldCache = page->cache;
|
||||
VMCache* oldCache = page->Cache();
|
||||
|
||||
AssertLocked();
|
||||
oldCache->AssertLocked();
|
||||
@ -771,7 +796,7 @@ VMCache::MovePage(vm_page* page)
|
||||
// insert here
|
||||
pages.Insert(page);
|
||||
page_count++;
|
||||
page->cache = this;
|
||||
page->SetCacheRef(fCacheRef);
|
||||
T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
|
||||
}
|
||||
|
||||
@ -790,12 +815,20 @@ VMCache::MoveAllPages(VMCache* fromCache)
|
||||
page_count = fromCache->page_count;
|
||||
fromCache->page_count = 0;
|
||||
|
||||
// swap the VMCacheRefs
|
||||
mutex_lock(&sCacheListLock);
|
||||
std::swap(fCacheRef, fromCache->fCacheRef);
|
||||
fCacheRef->cache = this;
|
||||
fromCache->fCacheRef->cache = fromCache;
|
||||
mutex_unlock(&sCacheListLock);
|
||||
|
||||
#if VM_CACHE_TRACING >= 2
|
||||
for (VMCachePagesTree::Iterator it = pages.GetIterator();
|
||||
vm_page* page = it.Next();) {
|
||||
page->cache = this;
|
||||
T2(RemovePage(fromCache, page));
|
||||
T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -3686,7 +3686,7 @@ fault_get_page(PageFaultContext& context)
|
||||
// insert the new page into our cache
|
||||
cache->InsertPage(page, context.cacheOffset);
|
||||
|
||||
} else if (page->cache != context.topCache && context.isWrite) {
|
||||
} else if (page->Cache() != context.topCache && context.isWrite) {
|
||||
// We have a page that has the data we want, but in the wrong cache
|
||||
// object so we need to copy it and stick it into the top cache.
|
||||
vm_page* sourcePage = page;
|
||||
@ -3709,7 +3709,7 @@ fault_get_page(PageFaultContext& context)
|
||||
|
||||
context.cacheChainLocker.RelockCaches(true);
|
||||
sourcePage->state = sourcePageState;
|
||||
sourcePage->cache->NotifyPageEvents(sourcePage, PAGE_EVENT_NOT_BUSY);
|
||||
sourcePage->Cache()->NotifyPageEvents(sourcePage, PAGE_EVENT_NOT_BUSY);
|
||||
|
||||
// insert the new page into our cache
|
||||
context.topCache->InsertPage(page, context.cacheOffset);
|
||||
@ -3824,7 +3824,7 @@ vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress,
|
||||
// it's mapped in read-only, so that we cannot overwrite someone else's
|
||||
// data (copy-on-write)
|
||||
uint32 newProtection = protection;
|
||||
if (context.page->cache != context.topCache && !isWrite)
|
||||
if (context.page->Cache() != context.topCache && !isWrite)
|
||||
newProtection &= ~(B_WRITE_AREA | B_KERNEL_WRITE_AREA);
|
||||
|
||||
bool unmapPage = false;
|
||||
@ -5210,7 +5210,7 @@ _user_set_memory_protection(void* _address, size_t size, int protection)
|
||||
// If the page is not in the topmost cache and write access is
|
||||
// requested, we have to unmap it. Otherwise we can re-map it with
|
||||
// the new protection.
|
||||
bool unmapPage = page->cache != topCache
|
||||
bool unmapPage = page->Cache() != topCache
|
||||
&& (protection & B_WRITE_AREA) != 0;
|
||||
|
||||
if (!unmapPage) {
|
||||
|
@ -67,8 +67,7 @@ PageCacheLocker::_IgnorePage(vm_page* page)
|
||||
{
|
||||
if (page->state == PAGE_STATE_WIRED || page->state == PAGE_STATE_BUSY
|
||||
|| page->state == PAGE_STATE_FREE || page->state == PAGE_STATE_CLEAR
|
||||
|| page->state == PAGE_STATE_UNUSED || page->wired_count > 0
|
||||
|| page->cache == NULL)
|
||||
|| page->state == PAGE_STATE_UNUSED || page->wired_count > 0)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -102,7 +101,7 @@ PageCacheLocker::Unlock()
|
||||
if (fPage == NULL)
|
||||
return;
|
||||
|
||||
fPage->cache->ReleaseRefAndUnlock();
|
||||
fPage->Cache()->ReleaseRefAndUnlock();
|
||||
|
||||
fPage = NULL;
|
||||
}
|
||||
@ -342,8 +341,9 @@ free_page_swap_space(int32 index)
|
||||
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
|
||||
if (page->cache->temporary && page->wired_count == 0
|
||||
&& page->cache->HasPage(page->cache_offset << PAGE_SHIFT)
|
||||
VMCache* cache = page->Cache();
|
||||
if (cache->temporary && page->wired_count == 0
|
||||
&& cache->HasPage(page->cache_offset << PAGE_SHIFT)
|
||||
&& page->usage_count > 0) {
|
||||
// TODO: how to judge a page is highly active?
|
||||
if (swap_free_page_swap_space(page)) {
|
||||
|
@ -240,7 +240,7 @@ class WritePage : public AbstractTraceEntry {
|
||||
public:
|
||||
WritePage(vm_page* page)
|
||||
:
|
||||
fCache(page->cache),
|
||||
fCache(page->Cache()),
|
||||
fPage(page)
|
||||
{
|
||||
Initialized();
|
||||
@ -404,7 +404,7 @@ dump_page(int argc, char **argv)
|
||||
kprintf("queue_next,prev: %p, %p\n", page->queue_link.next,
|
||||
page->queue_link.previous);
|
||||
kprintf("physical_number: %#lx\n", page->physical_page_number);
|
||||
kprintf("cache: %p\n", page->cache);
|
||||
kprintf("cache: %p\n", page->Cache());
|
||||
kprintf("cache_offset: %ld\n", page->cache_offset);
|
||||
kprintf("cache_next: %p\n", page->cache_next);
|
||||
kprintf("is dummy: %d\n", page->is_dummy);
|
||||
@ -496,8 +496,8 @@ dump_page_queue(int argc, char **argv)
|
||||
const char *type = "none";
|
||||
int i;
|
||||
|
||||
if (page->cache != NULL) {
|
||||
switch (page->cache->type) {
|
||||
if (page->Cache() != NULL) {
|
||||
switch (page->Cache()->type) {
|
||||
case CACHE_TYPE_RAM:
|
||||
type = "RAM";
|
||||
break;
|
||||
@ -518,7 +518,7 @@ dump_page_queue(int argc, char **argv)
|
||||
|
||||
kprintf("page cache type state wired usage\n");
|
||||
for (i = 0; page; i++, page = queue->Next(page)) {
|
||||
kprintf("%p %p %-7s %8s %5d %5d\n", page, page->cache,
|
||||
kprintf("%p %p %-7s %8s %5d %5d\n", page, page->Cache(),
|
||||
type, page_state_to_string(page->state),
|
||||
page->wired_count, page->usage_count);
|
||||
}
|
||||
@ -543,8 +543,8 @@ dump_page_stats(int argc, char **argv)
|
||||
|
||||
counter[sPages[i].state]++;
|
||||
|
||||
if (sPages[i].state == PAGE_STATE_MODIFIED && sPages[i].cache != NULL
|
||||
&& sPages[i].cache->temporary && sPages[i].wired_count == 0) {
|
||||
if (sPages[i].state == PAGE_STATE_MODIFIED && sPages[i].Cache() != NULL
|
||||
&& sPages[i].Cache()->temporary && sPages[i].wired_count == 0) {
|
||||
swappableModified++;
|
||||
if (sPages[i].usage_count < 0)
|
||||
swappableModifiedInactive++;
|
||||
@ -610,7 +610,7 @@ free_page(vm_page* page, bool clear)
|
||||
return;
|
||||
}
|
||||
|
||||
if (page->cache != NULL)
|
||||
if (page->CacheRef() != NULL)
|
||||
panic("to be freed page %p has cache", page);
|
||||
if (!page->mappings.IsEmpty() || page->wired_count > 0)
|
||||
panic("to be freed page %p has mappings", page);
|
||||
@ -715,7 +715,8 @@ set_page_state(vm_page *page, int pageState)
|
||||
sFreePageCondition.NotifyOne();
|
||||
}
|
||||
|
||||
if (page->cache != NULL && page->cache->temporary) {
|
||||
VMCache* cache = page->Cache();
|
||||
if (cache != NULL && cache->temporary) {
|
||||
if (pageState == PAGE_STATE_MODIFIED)
|
||||
atomic_add(&sModifiedTemporaryPages, 1);
|
||||
else if (page->state == PAGE_STATE_MODIFIED)
|
||||
@ -730,8 +731,8 @@ set_page_state(vm_page *page, int pageState)
|
||||
// page states and active pages have a cache that must be locked at
|
||||
// this point. So we rely on the fact that everyone must lock the cache
|
||||
// before trying to change/interpret the page state.
|
||||
ASSERT(page->cache != NULL);
|
||||
page->cache->AssertLocked();
|
||||
ASSERT(cache != NULL);
|
||||
cache->AssertLocked();
|
||||
page->state = pageState;
|
||||
} else {
|
||||
if (fromQueue != NULL)
|
||||
@ -767,7 +768,7 @@ move_page_to_active_or_inactive_queue(vm_page *page, bool dequeued)
|
||||
VMPageQueue& queue = state == PAGE_STATE_ACTIVE
|
||||
? sActivePageQueue : sInactivePageQueue;
|
||||
queue.AppendUnlocked(page);
|
||||
if (page->cache->temporary)
|
||||
if (page->Cache()->temporary)
|
||||
atomic_add(&sModifiedTemporaryPages, -1);
|
||||
} else
|
||||
set_page_state(page, state);
|
||||
@ -1026,7 +1027,7 @@ PageWriteWrapper::SetTo(vm_page* page, bool dequeuedPage)
|
||||
panic("re-setting page write wrapper that isn't completed");
|
||||
|
||||
fPage = page;
|
||||
fCache = page->cache;
|
||||
fCache = page->Cache();
|
||||
fDequeuedPage = dequeuedPage;
|
||||
fIsActive = true;
|
||||
|
||||
@ -1125,7 +1126,7 @@ void
|
||||
PageWriteTransfer::SetTo(PageWriterRun* run, vm_page* page, int32 maxPages)
|
||||
{
|
||||
fRun = run;
|
||||
fCache = page->cache;
|
||||
fCache = page->Cache();
|
||||
fOffset = page->cache_offset;
|
||||
fPageCount = 1;
|
||||
fMaxPages = maxPages;
|
||||
@ -1142,7 +1143,7 @@ PageWriteTransfer::SetTo(PageWriterRun* run, vm_page* page, int32 maxPages)
|
||||
bool
|
||||
PageWriteTransfer::AddPage(vm_page* page)
|
||||
{
|
||||
if (page->cache != fCache
|
||||
if (page->Cache() != fCache
|
||||
|| (fMaxPages >= 0 && fPageCount >= (uint32)fMaxPages))
|
||||
return false;
|
||||
|
||||
@ -1271,7 +1272,7 @@ PageWriterRun::AddPage(vm_page* page)
|
||||
|
||||
if (fTransferCount == 0 || !fTransfers[fTransferCount - 1].AddPage(page)) {
|
||||
fTransfers[fTransferCount++].SetTo(this, page,
|
||||
page->cache->MaxPagesPerAsyncWrite());
|
||||
page->Cache()->MaxPagesPerAsyncWrite());
|
||||
}
|
||||
}
|
||||
|
||||
@ -1360,7 +1361,7 @@ page_writer(void* /*unused*/)
|
||||
|
||||
vm_page marker;
|
||||
marker.is_dummy = true;
|
||||
marker.cache = NULL;
|
||||
marker.SetCacheRef(NULL);
|
||||
marker.state = PAGE_STATE_UNUSED;
|
||||
#if DEBUG_PAGE_QUEUE
|
||||
marker.queue = NULL;
|
||||
@ -1425,7 +1426,7 @@ page_writer(void* /*unused*/)
|
||||
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
|
||||
VMCache *cache = page->cache;
|
||||
VMCache *cache = page->Cache();
|
||||
|
||||
// Don't write back wired (locked) pages and don't write RAM pages
|
||||
// until we're low on pages. Also avoid writing temporary pages that
|
||||
@ -1544,9 +1545,10 @@ steal_page(vm_page *page)
|
||||
// try to lock the page's cache
|
||||
if (vm_cache_acquire_locked_page_cache(page, false) == NULL)
|
||||
return false;
|
||||
VMCache* cache = page->Cache();
|
||||
|
||||
AutoLocker<VMCache> cacheLocker(page->cache, true);
|
||||
MethodDeleter<VMCache> _2(page->cache, &VMCache::ReleaseRefLocked);
|
||||
AutoLocker<VMCache> cacheLocker(cache, true);
|
||||
MethodDeleter<VMCache> _2(cache, &VMCache::ReleaseRefLocked);
|
||||
|
||||
// check again if that page is still a candidate
|
||||
if (page->state != PAGE_STATE_INACTIVE)
|
||||
@ -1574,7 +1576,7 @@ steal_page(vm_page *page)
|
||||
//dprintf(" steal page %p from cache %p%s\n", page, page->cache,
|
||||
// page->state == PAGE_STATE_INACTIVE ? "" : " (ACTIVE)");
|
||||
|
||||
page->cache->RemovePage(page);
|
||||
cache->RemovePage(page);
|
||||
|
||||
sInactivePageQueue.RemoveUnlocked(page);
|
||||
return true;
|
||||
@ -1587,7 +1589,7 @@ steal_pages(vm_page **pages, size_t count)
|
||||
while (true) {
|
||||
vm_page marker;
|
||||
marker.is_dummy = true;
|
||||
marker.cache = NULL;
|
||||
marker.SetCacheRef(NULL);
|
||||
marker.state = PAGE_STATE_UNUSED;
|
||||
#if DEBUG_PAGE_QUEUE
|
||||
marker.queue = NULL;
|
||||
@ -1890,7 +1892,7 @@ vm_page_init(kernel_args *args)
|
||||
sPages[i].wired_count = 0;
|
||||
sPages[i].usage_count = 0;
|
||||
sPages[i].busy_writing = false;
|
||||
sPages[i].cache = NULL;
|
||||
sPages[i].SetCacheRef(NULL);
|
||||
#if DEBUG_PAGE_QUEUE
|
||||
sPages[i].queue = NULL;
|
||||
#endif
|
||||
@ -2160,7 +2162,7 @@ vm_page_allocate_page(int pageState)
|
||||
}
|
||||
}
|
||||
|
||||
if (page->cache != NULL)
|
||||
if (page->CacheRef() != NULL)
|
||||
panic("supposed to be free page %p has cache\n", page);
|
||||
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
@ -2383,8 +2385,7 @@ vm_page_set_state(vm_page *page, int pageState)
|
||||
void
|
||||
vm_page_requeue(struct vm_page *page, bool tail)
|
||||
{
|
||||
ASSERT(page->cache != NULL);
|
||||
page->cache->AssertLocked();
|
||||
ASSERT(page->Cache() != NULL);
|
||||
DEBUG_PAGE_ACCESS_CHECK(page);
|
||||
|
||||
VMPageQueue *queue = NULL;
|
||||
|
Loading…
Reference in New Issue
Block a user