* vm_set_area_protection() would remap the whole area instead of just the page

it intended to. That resulted in more writable pages where you wouldn't want
  them (ie. allowing the area to change pages in lower caches).
* We were losing modified pages: vm_unmap_pages() sometimes has to preserve
  the modified flag (eg. when called from page fault).
* Both of these were responsible that stealing active pages would crash
  applications - even if less likely, this could also have happened when
  stealing inactive pages. Therefore, I've activated stealing active pages
  again.
* The page writer now pushes the pages of busy vnodes to the end of the queue,
  so that it won't pick them up again too soon (the vnode destruction would
  be in the process of writing those pages back, anyway).
* The page thief now triggers the page writer to run once it has to steal
  active pages. This might be a bit too aggressive, though.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22495 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2007-10-09 11:05:50 +00:00
parent 0d871d3c5f
commit 477c9d1dc0
3 changed files with 45 additions and 12 deletions

View File

@ -131,7 +131,8 @@ bool vm_test_map_modification(struct vm_page *page);
int32 vm_test_map_activation(struct vm_page *page, bool *_modified);
void vm_clear_map_flags(struct vm_page *page, uint32 flags);
void vm_remove_all_page_mappings(struct vm_page *page, uint32 *_flags);
status_t vm_unmap_pages(struct vm_area *area, addr_t base, size_t length);
status_t vm_unmap_pages(struct vm_area *area, addr_t base, size_t length,
bool preserveModified);
status_t vm_map_page(struct vm_area *area, struct vm_page *page, addr_t address,
uint32 protection);
status_t vm_get_physical_page(addr_t paddr, addr_t *vaddr, uint32 flags);

View File

@ -2061,10 +2061,10 @@ delete_area(vm_address_space *addressSpace, vm_area *area)
// still exists in the area list.
// Unmap the virtual address space the area occupied
vm_unmap_pages(area, area->base, area->size);
vm_unmap_pages(area, area->base, area->size, !area->cache->temporary);
// TODO: do that only for vnode stores
vm_cache_write_modified(area->cache, false);
if (!area->cache->temporary)
vm_cache_write_modified(area->cache, false);
arch_vm_unset_memory_type(area);
remove_area_from_address_space(addressSpace, area);
@ -2333,7 +2333,7 @@ vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection)
while (page) {
addr_t address = area->base
+ (page->cache_offset << PAGE_SHIFT);
map->ops->protect(map, area->base, area->base + area->size,
map->ops->protect(map, address, address - 1 + B_PAGE_SIZE,
newProtection);
page = page->cache_next;
}
@ -2505,7 +2505,7 @@ vm_remove_all_page_mappings(vm_page *page, uint32 *_flags)
status_t
vm_unmap_pages(vm_area *area, addr_t base, size_t size)
vm_unmap_pages(vm_area *area, addr_t base, size_t size, bool preserveModified)
{
vm_translation_map *map = &area->address_space->translation_map;
addr_t end = base + (size - 1);
@ -2535,6 +2535,29 @@ vm_unmap_pages(vm_area *area, addr_t base, size_t size)
}
map->ops->unmap(map, base, end);
if (preserveModified) {
map->ops->flush(map);
for (addr_t virtualAddress = base; virtualAddress < end;
virtualAddress += B_PAGE_SIZE) {
addr_t physicalAddress;
uint32 flags;
status_t status = map->ops->query(map, virtualAddress,
&physicalAddress, &flags);
if (status < B_OK || (flags & PAGE_PRESENT) == 0)
continue;
vm_page *page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
if (page == NULL) {
panic("area %p looking up page failed for pa 0x%lx\n", area,
physicalAddress);
}
if ((flags & PAGE_MODIFIED) != 0
&& page->state != PAGE_STATE_MODIFIED)
vm_page_set_state(page, PAGE_STATE_MODIFIED);
}
}
map->ops->unlock(map);
if (area->wiring == B_NO_LOCK) {
@ -4251,7 +4274,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
// In case this is a copy-on-write page, we need to unmap it from the area now
if (isWrite && page->cache == topCache)
vm_unmap_pages(area, address, B_PAGE_SIZE);
vm_unmap_pages(area, address, B_PAGE_SIZE, true);
// TODO: there is currently no mechanism to prevent a page being mapped
// more than once in case of a second page fault!
@ -4907,8 +4930,10 @@ resize_area(area_id areaID, size_t newSize)
current->size = newSize;
// we also need to unmap all pages beyond the new size, if the area has shrinked
if (newSize < oldSize)
vm_unmap_pages(current, current->base + newSize, oldSize - newSize);
if (newSize < oldSize) {
vm_unmap_pages(current, current->base + newSize, oldSize - newSize,
false);
}
}
if (status == B_OK)

View File

@ -688,8 +688,14 @@ page_writer(void* /*unused*/)
// we need our own reference to the store, as it might
// currently be destructed
if (cache->store->ops->acquire_unreferenced_ref(cache->store)
!= B_OK)
!= B_OK) {
// put it to the tail of the queue, then, so that we
// won't touch it too soon again
vm_page_requeue(page, true);
cacheLocker.Unlock();
thread_yield();
continue;
}
}
locker.Lock();
@ -821,12 +827,13 @@ page_thief(void* /*unused*/)
score = 127;
desperate = true;
} else {
// TODO: for now, we never steal active pages
break;
stealActive = true;
score = 5;
steal = 5;
}
// let the page writer clear some pages for reuse
release_sem_etc(sWriterWaitSem, 1, B_DO_NOT_RESCHEDULE);
continue;
}