Made VMCache::Resize() virtual and let VMAnonymousCache override it to free

swap space when the cache shrinks. Currently the implementation stil leaks
swap space of busy pages.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@36373 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-04-20 14:04:18 +00:00
parent ba16e4ec61
commit efeca209a1
4 changed files with 62 additions and 3 deletions

View File

@ -110,7 +110,7 @@ public:
status_t WriteModified();
status_t SetMinimalCommitment(off_t commitment,
int priority);
status_t Resize(off_t newSize, int priority);
virtual status_t Resize(off_t newSize, int priority);
status_t FlushAndRemoveAllPages();

View File

@ -463,6 +463,65 @@ VMAnonymousCache::Init(bool canOvercommit, int32 numPrecommittedPages,
}
status_t
VMAnonymousCache::Resize(off_t newSize, int priority)
{
// If the cache size shrinks, drop all swap pages beyond the new size.
if (fAllocatedSwapSize > 0) {
page_num_t oldPageCount = (virtual_end + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
swap_block* swapBlock = NULL;
for (page_num_t pageIndex = (newSize + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
pageIndex < oldPageCount && fAllocatedSwapSize > 0;
pageIndex++) {
WriteLocker locker(sSwapHashLock);
// Get the swap slot index for the page.
swap_addr_t blockIndex = pageIndex & SWAP_BLOCK_MASK;
if (swapBlock == NULL || blockIndex == 0) {
swap_hash_key key = { this, pageIndex };
swapBlock = sSwapHashTable.Lookup(key);
if (swapBlock == NULL) {
pageIndex = ROUNDUP(pageIndex + 1, SWAP_BLOCK_PAGES);
continue;
}
}
swap_addr_t slotIndex = swapBlock->swap_slots[blockIndex];
vm_page* page;
if (slotIndex != SWAP_SLOT_NONE
&& ((page = LookupPage((off_t)pageIndex * B_PAGE_SIZE)) == NULL
|| !page->busy)) {
// TODO: We skip (i.e. leak) swap space of busy pages, since
// there could be I/O going on (paging in/out). Waiting is
// not an option as 1. unlocking the cache means that new
// swap pages could be added in a range we've already
// cleared (since the cache still has the old size) and 2.
// we'd risk a deadlock in case we come from the file cache
// and the FS holds the node's write-lock. We should mark
// the page invalid and let the one responsible clean up.
// There's just no such mechanism yet.
swap_slot_dealloc(slotIndex, 1);
fAllocatedSwapSize -= B_PAGE_SIZE;
swapBlock->swap_slots[blockIndex] = SWAP_SLOT_NONE;
if (--swapBlock->used == 0) {
// All swap pages have been freed -- we can discard the swap
// block.
sSwapHashTable.RemoveUnchecked(swapBlock);
object_cache_free(sSwapBlockCache, swapBlock,
CACHE_DONT_WAIT_FOR_MEMORY
| CACHE_DONT_LOCK_KERNEL_SPACE);
}
}
}
}
return VMCache::Resize(newSize, priority);
}
status_t
VMAnonymousCache::Commit(off_t size, int priority)
{

View File

@ -38,6 +38,8 @@ public:
int32 numGuardPages,
uint32 allocationFlags);
virtual status_t Resize(off_t newSize, int priority);
virtual status_t Commit(off_t size, int priority);
virtual bool HasPage(off_t offset);
virtual bool DebugHasPage(off_t offset);

View File

@ -1038,8 +1038,6 @@ VMCache::SetMinimalCommitment(off_t commitment, int priority)
status_t
VMCache::Resize(off_t newSize, int priority)
{
// TODO: This method must be virtual as VMAnonymousCache needs to free allocated
// swap pages!
TRACE(("VMCache::Resize(cache %p, newSize %Ld) old size %Ld\n",
this, newSize, this->virtual_end));
this->AssertLocked();