* vm_remove_all_page_mappings() now returns an accumulation of the flags of
the unmapped page. * This is needed by everyone who calls this to make sure modifications to a page aren't ignored. Namely, the page scanner and the page thief were affected. * Cleaned up locking the page's cache a bit in page_thief(); there is now a helper class that takes care of everything. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22438 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
06cd40f399
commit
9d8c209014
@ -130,7 +130,7 @@ status_t vm_get_page_mapping(team_id team, addr_t vaddr, addr_t *paddr);
|
||||
bool vm_test_map_modification(struct vm_page *page);
|
||||
int32 vm_test_map_activation(struct vm_page *page, bool *_modified);
|
||||
void vm_clear_map_flags(struct vm_page *page, uint32 flags);
|
||||
void vm_remove_all_page_mappings(struct vm_page *page);
|
||||
void vm_remove_all_page_mappings(struct vm_page *page, uint32 *_flags);
|
||||
status_t vm_unmap_pages(struct vm_area *area, addr_t base, size_t length);
|
||||
status_t vm_map_page(struct vm_area *area, struct vm_page *page, addr_t address,
|
||||
uint32 protection);
|
||||
|
@ -2433,9 +2433,14 @@ vm_clear_map_flags(vm_page *page, uint32 flags)
|
||||
}
|
||||
|
||||
|
||||
/*! Removes all mappings from a page.
|
||||
After you've called this function, the page is unmapped from memory.
|
||||
The accumulated page flags of all mappings can be found in \a _flags.
|
||||
*/
|
||||
void
|
||||
vm_remove_all_page_mappings(vm_page *page)
|
||||
vm_remove_all_page_mappings(vm_page *page, uint32 *_flags)
|
||||
{
|
||||
uint32 accumulatedFlags = 0;
|
||||
MutexLocker locker(sMappingLock);
|
||||
|
||||
vm_page_mappings queue;
|
||||
@ -2446,13 +2451,19 @@ vm_remove_all_page_mappings(vm_page *page)
|
||||
while ((mapping = iterator.Next()) != NULL) {
|
||||
vm_area *area = mapping->area;
|
||||
vm_translation_map *map = &area->address_space->translation_map;
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
|
||||
map->ops->lock(map);
|
||||
addr_t base = area->base + (page->cache_offset << PAGE_SHIFT);
|
||||
map->ops->unmap(map, base, base + (B_PAGE_SIZE - 1));
|
||||
map->ops->flush(map);
|
||||
map->ops->query(map, base, &physicalAddress, &flags);
|
||||
map->ops->unlock(map);
|
||||
|
||||
area->mappings.Remove(mapping);
|
||||
|
||||
accumulatedFlags |= flags;
|
||||
}
|
||||
|
||||
locker.Unlock();
|
||||
@ -2462,6 +2473,9 @@ vm_remove_all_page_mappings(vm_page *page)
|
||||
while ((mapping = queue.RemoveHead()) != NULL) {
|
||||
free(mapping);
|
||||
}
|
||||
|
||||
if (_flags != NULL)
|
||||
*_flags = accumulatedFlags;
|
||||
}
|
||||
|
||||
|
||||
|
@ -139,7 +139,15 @@ check_page_activation(int32 index)
|
||||
page->usage_count--;
|
||||
|
||||
if (page->usage_count < 0) {
|
||||
vm_remove_all_page_mappings(page);
|
||||
uint32 flags;
|
||||
vm_remove_all_page_mappings(page, &flags);
|
||||
|
||||
// recheck eventual last minute changes
|
||||
if ((flags & PAGE_MODIFIED) != 0 && page->state != PAGE_STATE_MODIFIED)
|
||||
vm_page_set_state(page, PAGE_STATE_MODIFIED);
|
||||
if ((flags & PAGE_ACCESSED) != 0 && ++page->usage_count >= 0)
|
||||
return false;
|
||||
|
||||
if (page->state == PAGE_STATE_MODIFIED)
|
||||
vm_page_schedule_write_page(page);
|
||||
else
|
||||
|
@ -842,34 +842,67 @@ page_thief(void* /*unused*/)
|
||||
|
||||
// try to lock the page's cache
|
||||
|
||||
vm_cache* cache = vm_cache_acquire_page_cache_ref(page);
|
||||
if (cache == NULL)
|
||||
class PageCacheTryLocker {
|
||||
public:
|
||||
PageCacheTryLocker(vm_page *page)
|
||||
: fIsLocked(false)
|
||||
{
|
||||
fCache = vm_cache_acquire_page_cache_ref(page);
|
||||
if (fCache != NULL
|
||||
&& mutex_trylock(&fCache->lock) == B_OK) {
|
||||
if (fCache == page->cache)
|
||||
fIsLocked = true;
|
||||
else
|
||||
mutex_unlock(&fCache->lock);
|
||||
}
|
||||
}
|
||||
|
||||
~PageCacheTryLocker()
|
||||
{
|
||||
if (fIsLocked)
|
||||
mutex_unlock(&fCache->lock);
|
||||
|
||||
vm_cache_release_ref(fCache);
|
||||
}
|
||||
|
||||
bool IsLocked() { return fIsLocked; }
|
||||
|
||||
private:
|
||||
vm_cache *fCache;
|
||||
bool fIsLocked;
|
||||
} cacheLocker(page);
|
||||
|
||||
if (!cacheLocker.IsLocked())
|
||||
continue;
|
||||
|
||||
if (mutex_trylock(&cache->lock) != B_OK) {
|
||||
vm_cache_release_ref(cache);
|
||||
continue;
|
||||
}
|
||||
if (page->state != PAGE_STATE_INACTIVE) {
|
||||
// TODO: if this is an active page (as in stealActive), we need
|
||||
// to unmap it and check if it's modified in an atomic operation.
|
||||
// For now, we'll just ignore it, even though this might let
|
||||
// vm_page_allocate_page() wait forever...
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
continue;
|
||||
}
|
||||
|
||||
// recheck eventual last minute changes
|
||||
uint32 flags;
|
||||
vm_remove_all_page_mappings(page, &flags);
|
||||
if ((flags & PAGE_MODIFIED) != 0) {
|
||||
// page was modified, don't steal it
|
||||
vm_page_set_state(page, PAGE_STATE_MODIFIED);
|
||||
continue;
|
||||
} else if ((flags & PAGE_ACCESSED) != 0) {
|
||||
// page is in active use, don't steal it
|
||||
vm_page_set_state(page, PAGE_STATE_ACTIVE);
|
||||
continue;
|
||||
}
|
||||
|
||||
// we can now steal this page
|
||||
|
||||
//dprintf(" steal page %p from cache %p\n", page, cache);
|
||||
vm_remove_all_page_mappings(page);
|
||||
vm_cache_remove_page(cache, page);
|
||||
|
||||
vm_cache_remove_page(page->cache, page);
|
||||
vm_page_set_state(page, PAGE_STATE_FREE);
|
||||
steal--;
|
||||
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user