diff --git a/headers/private/kernel/vm/VMCache.h b/headers/private/kernel/vm/VMCache.h index e8b3cad7ac..d5185b93b7 100644 --- a/headers/private/kernel/vm/VMCache.h +++ b/headers/private/kernel/vm/VMCache.h @@ -113,8 +113,6 @@ public: void MovePage(vm_page* page, off_t offset); void MovePage(vm_page* page); void MoveAllPages(VMCache* fromCache); - void MovePageRange(VMCache* source, off_t offset, - off_t size, off_t newOffset); inline page_num_t WiredPagesCount() const; inline void IncrementWiredPagesCount(); @@ -134,6 +132,8 @@ public: int priority); virtual status_t Resize(off_t newSize, int priority); virtual status_t Rebase(off_t newBase, int priority); + virtual status_t Adopt(VMCache* source, off_t offset, off_t size, + off_t newOffset); status_t FlushAndRemoveAllPages(); diff --git a/src/system/kernel/vm/VMAnonymousCache.cpp b/src/system/kernel/vm/VMAnonymousCache.cpp index a8a4413227..646be9da68 100644 --- a/src/system/kernel/vm/VMAnonymousCache.cpp +++ b/src/system/kernel/vm/VMAnonymousCache.cpp @@ -562,6 +562,108 @@ VMAnonymousCache::Rebase(off_t newBase, int priority) } +/*! Moves the swap pages for the given range from the source cache into this + cache. Both caches must be locked. +*/ +status_t +VMAnonymousCache::Adopt(VMCache* _source, off_t offset, off_t size, + off_t newOffset) +{ + VMAnonymousCache* source = dynamic_cast(_source); + if (source == NULL) { + panic("VMAnonymousCache::Adopt(): adopt from incompatible cache %p " + "requested", _source); + return B_ERROR; + } + + off_t pageIndex = newOffset >> PAGE_SHIFT; + off_t sourcePageIndex = offset >> PAGE_SHIFT; + off_t sourceEndPageIndex = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT; + swap_block* swapBlock = NULL; + + WriteLocker locker(sSwapHashLock); + + while (sourcePageIndex < sourceEndPageIndex + && source->fAllocatedSwapSize > 0) { + swap_addr_t left + = SWAP_BLOCK_PAGES - (sourcePageIndex & SWAP_BLOCK_MASK); + + swap_hash_key sourceKey = { source, sourcePageIndex }; + swap_block* sourceSwapBlock = sSwapHashTable.Lookup(sourceKey); + if (sourceSwapBlock == NULL || sourceSwapBlock->used == 0) { + sourcePageIndex += left; + pageIndex += left; + swapBlock = NULL; + continue; + } + + for (; left > 0 && sourceSwapBlock->used > 0; + left--, sourcePageIndex++, pageIndex++) { + + swap_addr_t blockIndex = pageIndex & SWAP_BLOCK_MASK; + if (swapBlock == NULL || blockIndex == 0) { + swap_hash_key key = { this, pageIndex }; + swapBlock = sSwapHashTable.Lookup(key); + + if (swapBlock == NULL) { + swapBlock = (swap_block*)object_cache_alloc(sSwapBlockCache, + CACHE_DONT_WAIT_FOR_MEMORY + | CACHE_DONT_LOCK_KERNEL_SPACE); + if (swapBlock == NULL) + return B_NO_MEMORY; + + swapBlock->key.cache = this; + swapBlock->key.page_index + = pageIndex & ~(off_t)SWAP_BLOCK_MASK; + swapBlock->used = 0; + for (uint32 i = 0; i < SWAP_BLOCK_PAGES; i++) + swapBlock->swap_slots[i] = SWAP_SLOT_NONE; + + sSwapHashTable.InsertUnchecked(swapBlock); + } + } + + swap_addr_t sourceBlockIndex = sourcePageIndex & SWAP_BLOCK_MASK; + swap_addr_t slotIndex + = sourceSwapBlock->swap_slots[sourceBlockIndex]; + if (slotIndex == SWAP_SLOT_NONE) + continue; + + ASSERT(swapBlock->swap_slots[blockIndex] == SWAP_SLOT_NONE); + + swapBlock->swap_slots[blockIndex] = slotIndex; + swapBlock->used++; + fAllocatedSwapSize += B_PAGE_SIZE; + + sourceSwapBlock->swap_slots[sourceBlockIndex] = SWAP_SLOT_NONE; + sourceSwapBlock->used--; + source->fAllocatedSwapSize -= B_PAGE_SIZE; + + TRACE("adopted slot %#" B_PRIx32 " from %p at page %" B_PRIdOFF + " to %p at page %" B_PRIdOFF "\n", slotIndex, source, + sourcePageIndex, this, pageIndex); + } + + if (left > 0) { + sourcePageIndex += left; + pageIndex += left; + swapBlock = NULL; + } + + if (sourceSwapBlock->used == 0) { + // All swap pages have been adopted, we can discard the swap block. + sSwapHashTable.RemoveUnchecked(sourceSwapBlock); + object_cache_free(sSwapBlockCache, sourceSwapBlock, + CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE); + } + } + + locker.Unlock(); + + return VMCache::Adopt(source, offset, size, newOffset); +} + + status_t VMAnonymousCache::Commit(off_t size, int priority) { diff --git a/src/system/kernel/vm/VMAnonymousCache.h b/src/system/kernel/vm/VMAnonymousCache.h index f2df602f4f..b2c4f11c0e 100644 --- a/src/system/kernel/vm/VMAnonymousCache.h +++ b/src/system/kernel/vm/VMAnonymousCache.h @@ -41,6 +41,8 @@ public: virtual status_t Resize(off_t newSize, int priority); virtual status_t Rebase(off_t newBase, int priority); + virtual status_t Adopt(VMCache* source, off_t offset, + off_t size, off_t newOffset); virtual status_t Commit(off_t size, int priority); virtual bool HasPage(off_t offset); diff --git a/src/system/kernel/vm/VMCache.cpp b/src/system/kernel/vm/VMCache.cpp index 9cc13434d2..f0f626c50d 100644 --- a/src/system/kernel/vm/VMCache.cpp +++ b/src/system/kernel/vm/VMCache.cpp @@ -924,27 +924,6 @@ VMCache::MoveAllPages(VMCache* fromCache) } -/*! Moves the given pages from their current cache and inserts them into this - cache. Both caches must be locked. -*/ -void -VMCache::MovePageRange(VMCache* source, off_t offset, off_t size, - off_t newOffset) -{ - page_num_t startPage = offset >> PAGE_SHIFT; - page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT; - int32 offsetChange = (int32)(newOffset - offset); - - VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true, - true); - for (vm_page* page = it.Next(); - page != NULL && page->cache_offset < endPage; - page = it.Next()) { - MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange); - } -} - - /*! Waits until one or more events happened for a given page which belongs to this cache. The cache must be locked. It will be unlocked by the method. \a relock @@ -1264,6 +1243,28 @@ VMCache::Rebase(off_t newBase, int priority) } +/*! Moves pages in the given range from the source cache into this cache. Both + caches must be locked. +*/ +status_t +VMCache::Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset) +{ + page_num_t startPage = offset >> PAGE_SHIFT; + page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT; + off_t offsetChange = newOffset - offset; + + VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true, + true); + for (vm_page* page = it.Next(); + page != NULL && page->cache_offset < endPage; + page = it.Next()) { + MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange); + } + + return B_OK; +} + + /*! You have to call this function with the VMCache lock held. */ status_t VMCache::FlushAndRemoveAllPages() diff --git a/src/system/kernel/vm/vm.cpp b/src/system/kernel/vm/vm.cpp index c0c24a6b08..ecf90c5bc8 100644 --- a/src/system/kernel/vm/vm.cpp +++ b/src/system/kernel/vm/vm.cpp @@ -735,31 +735,46 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address, secondCache->Lock(); secondCache->temporary = cache->temporary; - - // Transfer the concerned pages from the first cache. - secondCache->MovePageRange(cache, secondBase - area->Base() - + area->cache_offset, secondSize, area->cache_offset); secondCache->virtual_base = area->cache_offset; secondCache->virtual_end = area->cache_offset + secondSize; - // Since VMCache::Resize() can temporarily drop the lock, we must - // unlock all lower caches to prevent locking order inversion. - cacheChainLocker.Unlock(cache); - cache->Resize(cache->virtual_base + firstNewSize, priority); - // Don't unlock the cache yet because we might have to resize it - // back. + // Transfer the concerned pages from the first cache. + off_t adoptOffset = area->cache_offset + secondBase - area->Base(); + error = secondCache->Adopt(cache, adoptOffset, secondSize, + area->cache_offset); + + if (error == B_OK) { + // Since VMCache::Resize() can temporarily drop the lock, we must + // unlock all lower caches to prevent locking order inversion. + cacheChainLocker.Unlock(cache); + cache->Resize(cache->virtual_base + firstNewSize, priority); + // Don't unlock the cache yet because we might have to resize it + // back. + + // Map the second area. + error = map_backing_store(addressSpace, secondCache, + area->cache_offset, area->name, secondSize, area->wiring, + area->protection, REGION_NO_PRIVATE_MAP, 0, + &addressRestrictions, kernel, &secondArea, NULL); + } - // Map the second area. - error = map_backing_store(addressSpace, secondCache, area->cache_offset, - area->name, secondSize, area->wiring, area->protection, - REGION_NO_PRIVATE_MAP, 0, &addressRestrictions, kernel, &secondArea, - NULL); if (error != B_OK) { // Restore the original cache. cache->Resize(cache->virtual_base + oldSize, priority); + // Move the pages back. - cache->MovePageRange(secondCache, area->cache_offset, secondSize, - secondBase - area->Base() + area->cache_offset); + status_t readoptStatus = cache->Adopt(secondCache, + area->cache_offset, secondSize, adoptOffset); + if (readoptStatus != B_OK) { + // Some (swap) pages have not been moved back and will be lost + // once the second cache is deleted. + panic("failed to restore cache range: %s", + strerror(readoptStatus)); + + // TODO: Handle out of memory cases by freeing memory and + // retrying. + } + cache->ReleaseRefAndUnlock(); secondCache->ReleaseRefAndUnlock(); addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);