kernel/vm: Implement swap adoption for cut_area middle case.

Rename MovePageRange to Adopt and group it with Resize/Rebase as it
covers the third, middle cut case.

Implement VMAnonymousCache::Adopt() to actually adopt swap pages. This
has to recreate swap blocks instead of taking them over from the source
cache as the cut offset or base offset between the caches may not be
swap block aligned. This means that adoption may fail due to memory
shortage in allocating the swap blocks.

For the middle cut case it is therefore now possible to have the adopt
fail in which case the previous cache restore logic is applied. Since
the readoption of the pages from the second cache can fail for the same
reason, there is a slight chance that we can't restore and lose pages.
For now, just panic in such a case and add a TODO to free memory and
retry.

Change-Id: I9a661f00c8f03bbbea2fe6dee90371c68d7951e6
Reviewed-on: https://review.haiku-os.org/c/haiku/+/2588
Reviewed-by: waddlesplash <waddlesplash@gmail.com>
This commit is contained in:
Michael Lotz 2020-05-05 02:07:05 +02:00 committed by waddlesplash
parent 6dc4e9d72e
commit 4e2b49bc0c
5 changed files with 160 additions and 40 deletions

View File

@ -113,8 +113,6 @@ public:
void MovePage(vm_page* page, off_t offset);
void MovePage(vm_page* page);
void MoveAllPages(VMCache* fromCache);
void MovePageRange(VMCache* source, off_t offset,
off_t size, off_t newOffset);
inline page_num_t WiredPagesCount() const;
inline void IncrementWiredPagesCount();
@ -134,6 +132,8 @@ public:
int priority);
virtual status_t Resize(off_t newSize, int priority);
virtual status_t Rebase(off_t newBase, int priority);
virtual status_t Adopt(VMCache* source, off_t offset, off_t size,
off_t newOffset);
status_t FlushAndRemoveAllPages();

View File

@ -562,6 +562,108 @@ VMAnonymousCache::Rebase(off_t newBase, int priority)
}
/*! Moves the swap pages for the given range from the source cache into this
cache. Both caches must be locked.
*/
status_t
VMAnonymousCache::Adopt(VMCache* _source, off_t offset, off_t size,
off_t newOffset)
{
VMAnonymousCache* source = dynamic_cast<VMAnonymousCache*>(_source);
if (source == NULL) {
panic("VMAnonymousCache::Adopt(): adopt from incompatible cache %p "
"requested", _source);
return B_ERROR;
}
off_t pageIndex = newOffset >> PAGE_SHIFT;
off_t sourcePageIndex = offset >> PAGE_SHIFT;
off_t sourceEndPageIndex = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
swap_block* swapBlock = NULL;
WriteLocker locker(sSwapHashLock);
while (sourcePageIndex < sourceEndPageIndex
&& source->fAllocatedSwapSize > 0) {
swap_addr_t left
= SWAP_BLOCK_PAGES - (sourcePageIndex & SWAP_BLOCK_MASK);
swap_hash_key sourceKey = { source, sourcePageIndex };
swap_block* sourceSwapBlock = sSwapHashTable.Lookup(sourceKey);
if (sourceSwapBlock == NULL || sourceSwapBlock->used == 0) {
sourcePageIndex += left;
pageIndex += left;
swapBlock = NULL;
continue;
}
for (; left > 0 && sourceSwapBlock->used > 0;
left--, sourcePageIndex++, pageIndex++) {
swap_addr_t blockIndex = pageIndex & SWAP_BLOCK_MASK;
if (swapBlock == NULL || blockIndex == 0) {
swap_hash_key key = { this, pageIndex };
swapBlock = sSwapHashTable.Lookup(key);
if (swapBlock == NULL) {
swapBlock = (swap_block*)object_cache_alloc(sSwapBlockCache,
CACHE_DONT_WAIT_FOR_MEMORY
| CACHE_DONT_LOCK_KERNEL_SPACE);
if (swapBlock == NULL)
return B_NO_MEMORY;
swapBlock->key.cache = this;
swapBlock->key.page_index
= pageIndex & ~(off_t)SWAP_BLOCK_MASK;
swapBlock->used = 0;
for (uint32 i = 0; i < SWAP_BLOCK_PAGES; i++)
swapBlock->swap_slots[i] = SWAP_SLOT_NONE;
sSwapHashTable.InsertUnchecked(swapBlock);
}
}
swap_addr_t sourceBlockIndex = sourcePageIndex & SWAP_BLOCK_MASK;
swap_addr_t slotIndex
= sourceSwapBlock->swap_slots[sourceBlockIndex];
if (slotIndex == SWAP_SLOT_NONE)
continue;
ASSERT(swapBlock->swap_slots[blockIndex] == SWAP_SLOT_NONE);
swapBlock->swap_slots[blockIndex] = slotIndex;
swapBlock->used++;
fAllocatedSwapSize += B_PAGE_SIZE;
sourceSwapBlock->swap_slots[sourceBlockIndex] = SWAP_SLOT_NONE;
sourceSwapBlock->used--;
source->fAllocatedSwapSize -= B_PAGE_SIZE;
TRACE("adopted slot %#" B_PRIx32 " from %p at page %" B_PRIdOFF
" to %p at page %" B_PRIdOFF "\n", slotIndex, source,
sourcePageIndex, this, pageIndex);
}
if (left > 0) {
sourcePageIndex += left;
pageIndex += left;
swapBlock = NULL;
}
if (sourceSwapBlock->used == 0) {
// All swap pages have been adopted, we can discard the swap block.
sSwapHashTable.RemoveUnchecked(sourceSwapBlock);
object_cache_free(sSwapBlockCache, sourceSwapBlock,
CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
}
}
locker.Unlock();
return VMCache::Adopt(source, offset, size, newOffset);
}
status_t
VMAnonymousCache::Commit(off_t size, int priority)
{

View File

@ -41,6 +41,8 @@ public:
virtual status_t Resize(off_t newSize, int priority);
virtual status_t Rebase(off_t newBase, int priority);
virtual status_t Adopt(VMCache* source, off_t offset,
off_t size, off_t newOffset);
virtual status_t Commit(off_t size, int priority);
virtual bool HasPage(off_t offset);

View File

@ -924,27 +924,6 @@ VMCache::MoveAllPages(VMCache* fromCache)
}
/*! Moves the given pages from their current cache and inserts them into this
cache. Both caches must be locked.
*/
void
VMCache::MovePageRange(VMCache* source, off_t offset, off_t size,
off_t newOffset)
{
page_num_t startPage = offset >> PAGE_SHIFT;
page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
int32 offsetChange = (int32)(newOffset - offset);
VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true,
true);
for (vm_page* page = it.Next();
page != NULL && page->cache_offset < endPage;
page = it.Next()) {
MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange);
}
}
/*! Waits until one or more events happened for a given page which belongs to
this cache.
The cache must be locked. It will be unlocked by the method. \a relock
@ -1264,6 +1243,28 @@ VMCache::Rebase(off_t newBase, int priority)
}
/*! Moves pages in the given range from the source cache into this cache. Both
caches must be locked.
*/
status_t
VMCache::Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset)
{
page_num_t startPage = offset >> PAGE_SHIFT;
page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
off_t offsetChange = newOffset - offset;
VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true,
true);
for (vm_page* page = it.Next();
page != NULL && page->cache_offset < endPage;
page = it.Next()) {
MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange);
}
return B_OK;
}
/*! You have to call this function with the VMCache lock held. */
status_t
VMCache::FlushAndRemoveAllPages()

View File

@ -735,31 +735,46 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
secondCache->Lock();
secondCache->temporary = cache->temporary;
// Transfer the concerned pages from the first cache.
secondCache->MovePageRange(cache, secondBase - area->Base()
+ area->cache_offset, secondSize, area->cache_offset);
secondCache->virtual_base = area->cache_offset;
secondCache->virtual_end = area->cache_offset + secondSize;
// Since VMCache::Resize() can temporarily drop the lock, we must
// unlock all lower caches to prevent locking order inversion.
cacheChainLocker.Unlock(cache);
cache->Resize(cache->virtual_base + firstNewSize, priority);
// Don't unlock the cache yet because we might have to resize it
// back.
// Transfer the concerned pages from the first cache.
off_t adoptOffset = area->cache_offset + secondBase - area->Base();
error = secondCache->Adopt(cache, adoptOffset, secondSize,
area->cache_offset);
if (error == B_OK) {
// Since VMCache::Resize() can temporarily drop the lock, we must
// unlock all lower caches to prevent locking order inversion.
cacheChainLocker.Unlock(cache);
cache->Resize(cache->virtual_base + firstNewSize, priority);
// Don't unlock the cache yet because we might have to resize it
// back.
// Map the second area.
error = map_backing_store(addressSpace, secondCache,
area->cache_offset, area->name, secondSize, area->wiring,
area->protection, REGION_NO_PRIVATE_MAP, 0,
&addressRestrictions, kernel, &secondArea, NULL);
}
// Map the second area.
error = map_backing_store(addressSpace, secondCache, area->cache_offset,
area->name, secondSize, area->wiring, area->protection,
REGION_NO_PRIVATE_MAP, 0, &addressRestrictions, kernel, &secondArea,
NULL);
if (error != B_OK) {
// Restore the original cache.
cache->Resize(cache->virtual_base + oldSize, priority);
// Move the pages back.
cache->MovePageRange(secondCache, area->cache_offset, secondSize,
secondBase - area->Base() + area->cache_offset);
status_t readoptStatus = cache->Adopt(secondCache,
area->cache_offset, secondSize, adoptOffset);
if (readoptStatus != B_OK) {
// Some (swap) pages have not been moved back and will be lost
// once the second cache is deleted.
panic("failed to restore cache range: %s",
strerror(readoptStatus));
// TODO: Handle out of memory cases by freeing memory and
// retrying.
}
cache->ReleaseRefAndUnlock();
secondCache->ReleaseRefAndUnlock();
addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);