diff --git a/build/config_headers/kernel_debug_config.h b/build/config_headers/kernel_debug_config.h index 6d4f085811..186fefb7e2 100644 --- a/build/config_headers/kernel_debug_config.h +++ b/build/config_headers/kernel_debug_config.h @@ -84,10 +84,6 @@ // be in. #define DEBUG_PAGE_QUEUE 0 -// Enables extra debug fields in the vm_page used to track page transitions -// between caches. -#define DEBUG_PAGE_CACHE_TRANSITIONS 0 - // Enables a global list of all vm_cache structures. #define DEBUG_CACHE_LIST KDEBUG_LEVEL_1 diff --git a/headers/private/kernel/vm/VMCache.h b/headers/private/kernel/vm/VMCache.h index 6f458e021c..6ea7de38c8 100644 --- a/headers/private/kernel/vm/VMCache.h +++ b/headers/private/kernel/vm/VMCache.h @@ -96,6 +96,8 @@ public: vm_page* LookupPage(off_t offset); void InsertPage(vm_page* page, off_t offset); void RemovePage(vm_page* page); + void MovePage(vm_page* page); + void MoveAllPages(VMCache* fromCache); void AddConsumer(VMCache* consumer); diff --git a/headers/private/kernel/vm/vm_types.h b/headers/private/kernel/vm/vm_types.h index 102d6bc5b0..50160b4685 100644 --- a/headers/private/kernel/vm/vm_types.h +++ b/headers/private/kernel/vm/vm_types.h @@ -90,18 +90,13 @@ struct vm_page { void* queue; #endif -#if DEBUG_PAGE_CACHE_TRANSITIONS - uint32 debug_flags; - struct vm_page* collided_page; -#endif - uint8 type : 2; uint8 state : 3; uint8 is_cleared : 1; // is currently only used in vm_page_allocate_page_run() uint8 busy_writing : 1; - uint8 merge_swap : 1; + uint8 unused : 1; // used in VMAnonymousCache::Merge() int8 usage_count; diff --git a/src/system/kernel/vm/VMAnonymousCache.cpp b/src/system/kernel/vm/VMAnonymousCache.cpp index 0257fb87b2..d56514b9b9 100644 --- a/src/system/kernel/vm/VMAnonymousCache.cpp +++ b/src/system/kernel/vm/VMAnonymousCache.cpp @@ -751,11 +751,15 @@ VMAnonymousCache::Merge(VMCache* _source) if (committed_size > actualSize) _Commit(actualSize); - // Move all not shadowed pages from the source to the consumer cache. - _MergePagesSmallerSource(source); - // Move all not shadowed swap pages from the source to the consumer cache. + // Also remove all source pages that are shadowed by consumer swap pages. _MergeSwapPages(source); + + // Move all not shadowed pages from the source to the consumer cache. + if (source->page_count < page_count) + _MergePagesSmallerSource(source); + else + _MergePagesSmallerConsumer(source); } @@ -916,44 +920,59 @@ VMAnonymousCache::_Commit(off_t size) void VMAnonymousCache::_MergePagesSmallerSource(VMAnonymousCache* source) { + // The source cache has less pages than the consumer (this cache), so we + // iterate through the source's pages and move the ones that are not + // shadowed up to the consumer. + for (VMCachePagesTree::Iterator it = source->pages.GetIterator(); vm_page* page = it.Next();) { // Note: Removing the current node while iterating through a // IteratableSplayTree is safe. vm_page* consumerPage = LookupPage( (off_t)page->cache_offset << PAGE_SHIFT); - swap_addr_t consumerSwapSlot = _SwapBlockGetAddress(page->cache_offset); - if (consumerPage == NULL && consumerSwapSlot == SWAP_SLOT_NONE) { + if (consumerPage == NULL) { // the page is not yet in the consumer cache - move it upwards source->RemovePage(page); InsertPage(page, (off_t)page->cache_offset << PAGE_SHIFT); - - // If the moved-up page has a swap page associated, we mark it, so - // that the swap page is moved upwards, too. We would lose if the - // page was modified and written to swap, and is now not marked - // modified. - if (source->_SwapBlockGetAddress(page->cache_offset) - != SWAP_SLOT_NONE) { - page->merge_swap = true; - } -#if DEBUG_PAGE_CACHE_TRANSITIONS - } else { - page->debug_flags = 0; - if (consumerPage->state == PAGE_STATE_BUSY) - page->debug_flags |= 0x1; - if (consumerPage->type == PAGE_TYPE_DUMMY) - page->debug_flags |= 0x2; - page->collided_page = consumerPage; - consumerPage->collided_page = page; -#endif // DEBUG_PAGE_CACHE_TRANSITIONS } } } +void +VMAnonymousCache::_MergePagesSmallerConsumer(VMAnonymousCache* source) +{ + // The consumer (this cache) has less pages than the source, so we move the + // consumer's pages to the source (freeing shadowed ones) and finally just + // all pages of the source back to the consumer. + + for (VMCachePagesTree::Iterator it = pages.GetIterator(); + vm_page* page = it.Next();) { + // If a source page is in the way, remove and free it. + vm_page* sourcePage = source->LookupPage( + (off_t)page->cache_offset << PAGE_SHIFT); + if (sourcePage != NULL) { + source->RemovePage(sourcePage); + vm_page_free(source, sourcePage); + } + + // Note: Removing the current node while iterating through a + // IteratableSplayTree is safe. + source->MovePage(page); + } + + MoveAllPages(source); +} + + void VMAnonymousCache::_MergeSwapPages(VMAnonymousCache* source) { + // If neither source nor consumer have swap pages, we don't have to do + // anything. + if (source->fAllocatedSwapSize == 0 && fAllocatedSwapSize == 0) + return; + for (off_t offset = source->virtual_base & ~(off_t)(B_PAGE_SIZE * SWAP_BLOCK_PAGES - 1); offset < source->virtual_end; @@ -965,48 +984,43 @@ VMAnonymousCache::_MergeSwapPages(VMAnonymousCache* source) swap_hash_key key = { source, swapBlockPageIndex }; swap_block* sourceSwapBlock = sSwapHashTable.Lookup(key); - if (sourceSwapBlock == NULL) - continue; - // remove the source swap block -- we will either take over the swap // space (and the block) or free it - sSwapHashTable.RemoveUnchecked(sourceSwapBlock); + if (sourceSwapBlock != NULL) + sSwapHashTable.RemoveUnchecked(sourceSwapBlock); key.cache = this; swap_block* swapBlock = sSwapHashTable.Lookup(key); locker.Unlock(); + // remove all source pages that are shadowed by consumer swap pages + if (swapBlock != NULL) { + for (uint32 i = 0; i < SWAP_BLOCK_PAGES; i++) { + if (swapBlock->swap_slots[i] != SWAP_SLOT_NONE) { + vm_page* page = source->LookupPage( + (off_t)(swapBlockPageIndex + i) << PAGE_SHIFT); + source->RemovePage(page); + vm_page_free(source, page); + } + } + } + + if (sourceSwapBlock == NULL) + continue; + for (uint32 i = 0; i < SWAP_BLOCK_PAGES; i++) { off_t pageIndex = swapBlockPageIndex + i; swap_addr_t sourceSlotIndex = sourceSwapBlock->swap_slots[i]; if (sourceSlotIndex == SWAP_SLOT_NONE) - // this page is not swapped out continue; - vm_page* page = LookupPage((off_t)pageIndex << PAGE_SHIFT); - - bool keepSwapPage = true; - if (page != NULL && !page->merge_swap) { - // The consumer already has a page at this index and it wasn't - // one taken over from the source. So we can simply free the - // swap space. - keepSwapPage = false; - } else { - if (page != NULL) { - // The page was taken over from the source cache. Clear the - // indicator flag. We'll take over the swap page too. - page->merge_swap = false; - } else if (swapBlock != NULL - && swapBlock->swap_slots[i] != SWAP_SLOT_NONE) { - // There's no page in the consumer cache, but a swap page. - // Free the source swap page. - keepSwapPage = false; - } - } - - if (!keepSwapPage) { + if ((swapBlock != NULL + && swapBlock->swap_slots[i] != SWAP_SLOT_NONE) + || LookupPage((off_t)pageIndex << PAGE_SHIFT) != NULL) { + // The consumer already has a page or a swapped out page + // at this index. So we can free the source swap space. swap_slot_dealloc(sourceSlotIndex, 1); sourceSwapBlock->swap_slots[i] = SWAP_SLOT_NONE; sourceSwapBlock->used--; @@ -1036,7 +1050,7 @@ VMAnonymousCache::_MergeSwapPages(VMAnonymousCache* source) locker.Unlock(); } else { // We need to take over some of the source's swap pages and there's - // already swap block in the consumer cache. Copy the respective + // already a swap block in the consumer cache. Copy the respective // swap addresses and discard the source swap block. for (uint32 i = 0; i < SWAP_BLOCK_PAGES; i++) { if (sourceSwapBlock->swap_slots[i] != SWAP_SLOT_NONE) diff --git a/src/system/kernel/vm/VMCache.cpp b/src/system/kernel/vm/VMCache.cpp index 48b3a0eaea..462704b350 100644 --- a/src/system/kernel/vm/VMCache.cpp +++ b/src/system/kernel/vm/VMCache.cpp @@ -13,6 +13,8 @@ #include #include +#include + #include #include #include @@ -789,6 +791,53 @@ VMCache::RemovePage(vm_page* page) } +/*! Moves the given page from its current cache inserts it into this cache. + Both caches must be locked. +*/ +void +VMCache::MovePage(vm_page* page) +{ + VMCache* oldCache = page->cache; + + AssertLocked(); + oldCache->AssertLocked(); + + // remove from old cache + oldCache->pages.Remove(page); + oldCache->page_count--; + T2(RemovePage(oldCache, page)); + + // insert here + pages.Insert(page); + page_count++; + page->cache = this; + T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT)); +} + + +/*! Moves all pages from the given cache to this one. + Both caches must be locked. This cache must be empty. +*/ +void +VMCache::MoveAllPages(VMCache* fromCache) +{ + AssertLocked(); + fromCache->AssertLocked(); + ASSERT(page_count == 0); + + std::swap(fromCache->pages, pages); + page_count = fromCache->page_count; + fromCache->page_count = 0; + + for (VMCachePagesTree::Iterator it = pages.GetIterator(); + vm_page* page = it.Next();) { + page->cache = this; + T2(RemovePage(fromCache, page)); + T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT)); + } +} + + /*! Waits until one or more events happened for a given page which belongs to this cache. The cache must be locked. It will be unlocked by the method. \a relock @@ -1165,16 +1214,6 @@ VMCache::Merge(VMCache* source) // the page is not yet in the consumer cache - move it upwards source->RemovePage(page); InsertPage(page, (off_t)page->cache_offset << PAGE_SHIFT); -#if DEBUG_PAGE_CACHE_TRANSITIONS - } else { - page->debug_flags = 0; - if (consumerPage->state == PAGE_STATE_BUSY) - page->debug_flags |= 0x1; - if (consumerPage->type == PAGE_TYPE_DUMMY) - page->debug_flags |= 0x2; - page->collided_page = consumerPage; - consumerPage->collided_page = page; -#endif // DEBUG_PAGE_CACHE_TRANSITIONS } } } diff --git a/src/system/kernel/vm/vm_page.cpp b/src/system/kernel/vm/vm_page.cpp index 6cf9b81393..79a4af0019 100644 --- a/src/system/kernel/vm/vm_page.cpp +++ b/src/system/kernel/vm/vm_page.cpp @@ -564,10 +564,6 @@ dump_page(int argc, char **argv) #if DEBUG_PAGE_QUEUE kprintf("queue: %p\n", page->queue); #endif - #if DEBUG_PAGE_CACHE_TRANSITIONS - kprintf("debug_flags: 0x%lx\n", page->debug_flags); - kprintf("collided page: %p\n", page->collided_page); - #endif // DEBUG_PAGE_CACHE_TRANSITIONS kprintf("area mappings:\n"); vm_page_mappings::Iterator iterator = page->mappings.GetIterator(); @@ -1906,15 +1902,10 @@ vm_page_init(kernel_args *args) sPages[i].wired_count = 0; sPages[i].usage_count = 0; sPages[i].busy_writing = false; - sPages[i].merge_swap = false; sPages[i].cache = NULL; #if DEBUG_PAGE_QUEUE sPages[i].queue = NULL; #endif - #if DEBUG_PAGE_CACHE_TRANSITIONS - sPages[i].debug_flags = 0; - sPages[i].collided_page = NULL; - #endif // DEBUG_PAGE_CACHE_TRANSITIONS enqueue_page(&sFreePageQueue, &sPages[i]); }