diff --git a/headers/private/kernel/vm/VMCache.h b/headers/private/kernel/vm/VMCache.h index 65ef81460d..a8c0f04ead 100644 --- a/headers/private/kernel/vm/VMCache.h +++ b/headers/private/kernel/vm/VMCache.h @@ -110,7 +110,7 @@ public: status_t WriteModified(); status_t SetMinimalCommitment(off_t commitment, int priority); - status_t Resize(off_t newSize, int priority); + virtual status_t Resize(off_t newSize, int priority); status_t FlushAndRemoveAllPages(); diff --git a/src/system/kernel/vm/VMAnonymousCache.cpp b/src/system/kernel/vm/VMAnonymousCache.cpp index 66d34b37f2..f17b4a7042 100644 --- a/src/system/kernel/vm/VMAnonymousCache.cpp +++ b/src/system/kernel/vm/VMAnonymousCache.cpp @@ -463,6 +463,65 @@ VMAnonymousCache::Init(bool canOvercommit, int32 numPrecommittedPages, } +status_t +VMAnonymousCache::Resize(off_t newSize, int priority) +{ + // If the cache size shrinks, drop all swap pages beyond the new size. + if (fAllocatedSwapSize > 0) { + page_num_t oldPageCount = (virtual_end + B_PAGE_SIZE - 1) >> PAGE_SHIFT; + swap_block* swapBlock = NULL; + + for (page_num_t pageIndex = (newSize + B_PAGE_SIZE - 1) >> PAGE_SHIFT; + pageIndex < oldPageCount && fAllocatedSwapSize > 0; + pageIndex++) { + WriteLocker locker(sSwapHashLock); + + // Get the swap slot index for the page. + swap_addr_t blockIndex = pageIndex & SWAP_BLOCK_MASK; + if (swapBlock == NULL || blockIndex == 0) { + swap_hash_key key = { this, pageIndex }; + swapBlock = sSwapHashTable.Lookup(key); + + if (swapBlock == NULL) { + pageIndex = ROUNDUP(pageIndex + 1, SWAP_BLOCK_PAGES); + continue; + } + } + + swap_addr_t slotIndex = swapBlock->swap_slots[blockIndex]; + vm_page* page; + if (slotIndex != SWAP_SLOT_NONE + && ((page = LookupPage((off_t)pageIndex * B_PAGE_SIZE)) == NULL + || !page->busy)) { + // TODO: We skip (i.e. leak) swap space of busy pages, since + // there could be I/O going on (paging in/out). Waiting is + // not an option as 1. unlocking the cache means that new + // swap pages could be added in a range we've already + // cleared (since the cache still has the old size) and 2. + // we'd risk a deadlock in case we come from the file cache + // and the FS holds the node's write-lock. We should mark + // the page invalid and let the one responsible clean up. + // There's just no such mechanism yet. + swap_slot_dealloc(slotIndex, 1); + fAllocatedSwapSize -= B_PAGE_SIZE; + + swapBlock->swap_slots[blockIndex] = SWAP_SLOT_NONE; + if (--swapBlock->used == 0) { + // All swap pages have been freed -- we can discard the swap + // block. + sSwapHashTable.RemoveUnchecked(swapBlock); + object_cache_free(sSwapBlockCache, swapBlock, + CACHE_DONT_WAIT_FOR_MEMORY + | CACHE_DONT_LOCK_KERNEL_SPACE); + } + } + } + } + + return VMCache::Resize(newSize, priority); +} + + status_t VMAnonymousCache::Commit(off_t size, int priority) { diff --git a/src/system/kernel/vm/VMAnonymousCache.h b/src/system/kernel/vm/VMAnonymousCache.h index c4a4671f6f..6ef72c9db7 100644 --- a/src/system/kernel/vm/VMAnonymousCache.h +++ b/src/system/kernel/vm/VMAnonymousCache.h @@ -38,6 +38,8 @@ public: int32 numGuardPages, uint32 allocationFlags); + virtual status_t Resize(off_t newSize, int priority); + virtual status_t Commit(off_t size, int priority); virtual bool HasPage(off_t offset); virtual bool DebugHasPage(off_t offset); diff --git a/src/system/kernel/vm/VMCache.cpp b/src/system/kernel/vm/VMCache.cpp index 6c9de3a856..9bc7997ad9 100644 --- a/src/system/kernel/vm/VMCache.cpp +++ b/src/system/kernel/vm/VMCache.cpp @@ -1038,8 +1038,6 @@ VMCache::SetMinimalCommitment(off_t commitment, int priority) status_t VMCache::Resize(off_t newSize, int priority) { -// TODO: This method must be virtual as VMAnonymousCache needs to free allocated -// swap pages! TRACE(("VMCache::Resize(cache %p, newSize %Ld) old size %Ld\n", this, newSize, this->virtual_end)); this->AssertLocked();