* Mapping a page might actually need memory - since we usually have locks that

interfere with the page thief, we always need to have reserved a page for
  this upfront. I introduced a function to the vm_translation_map layer that
  estimates how much pages a mapping might need at maximum. All functions that
  map a page now call this and reserve the needed pages upfront.
  It might not be a nice solution, but it works.
* The page thief could run into a panic when trying to call vm_cache_release_ref()
  on a non-existing (NULL) cache.
* Also, it will now ignore wired active pages.
* There is still a race condition between the page writer and the vnode
  destruction - writing a page back needs a valid vnode, but that might just
  have been deleted.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22455 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2007-10-06 11:18:21 +00:00
parent aa4d8ee6ee
commit 0e18334057
5 changed files with 59 additions and 11 deletions

View File

@ -30,6 +30,7 @@ typedef struct vm_translation_map_ops {
void (*destroy)(vm_translation_map *map);
status_t (*lock)(vm_translation_map *map);
status_t (*unlock)(vm_translation_map *map);
size_t (*map_max_pages_need)(vm_translation_map *map, addr_t start, addr_t end);
status_t (*map)(vm_translation_map *map, addr_t va, addr_t pa,
uint32 attributes);
status_t (*unmap)(vm_translation_map *map, addr_t start, addr_t end);

View File

@ -1,5 +1,5 @@
/*
* Copyright 2003-2006, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
@ -205,6 +205,13 @@ fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
}
static size_t
map_max_pages_need(vm_translation_map *map, addr_t start, addr_t end)
{
return 0;
}
static status_t
map_tmap(vm_translation_map *map, addr_t virtualAddress, addr_t physicalAddress, uint32 attributes)
{
@ -454,6 +461,7 @@ static vm_translation_map_ops tmap_ops = {
destroy_tmap,
lock_tmap,
unlock_tmap,
map_max_pages_need,
map_tmap,
unmap_tmap,
query_tmap,

View File

@ -325,6 +325,13 @@ put_page_table_entry_in_pgtable(page_table_entry *entry,
}
static size_t
map_max_pages_need(vm_translation_map */*map*/, addr_t start, addr_t end)
{
return VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start);
}
static status_t
map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
{
@ -352,7 +359,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
vm_page *page;
// we need to allocate a pgtable
page = vm_page_allocate_page(PAGE_STATE_CLEAR, false);
page = vm_page_allocate_page(PAGE_STATE_CLEAR, true);
// mark the page WIRED
vm_page_set_state(page, PAGE_STATE_WIRED);
@ -737,6 +744,7 @@ static vm_translation_map_ops tmap_ops = {
destroy_tmap,
lock_tmap,
unlock_tmap,
map_max_pages_need,
map_tmap,
unmap_tmap,
query_tmap,

View File

@ -1483,6 +1483,11 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
case B_FULL_LOCK:
{
vm_translation_map *map = &addressSpace->translation_map;
size_t reservePages = map->ops->map_max_pages_need(map,
area->base, area->base + (area->size - 1));
vm_page_reserve_pages(reservePages);
// Allocate and map all pages for this area
mutex_lock(&cache->lock);
@ -1510,6 +1515,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
}
mutex_unlock(&cache->lock);
vm_page_unreserve_pages(reservePages);
break;
}
@ -1561,9 +1567,12 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
// map them in the address space
vm_translation_map *map = &addressSpace->translation_map;
addr_t physicalAddress = page->physical_page_number * B_PAGE_SIZE;
addr_t virtualAddress;
addr_t virtualAddress = area->base;
size_t reservePages = map->ops->map_max_pages_need(map,
virtualAddress, virtualAddress + (area->size - 1));
off_t offset = 0;
vm_page_reserve_pages(reservePages);
mutex_lock(&cache->lock);
map->ops->lock(map);
@ -1587,6 +1596,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
map->ops->unlock(map);
mutex_unlock(&cache->lock);
vm_page_unreserve_pages(reservePages);
break;
}
@ -1687,6 +1697,10 @@ vm_map_physical_memory(team_id team, const char *name, void **_address,
// make sure our area is mapped in completely
vm_translation_map *map = &locker.AddressSpace()->translation_map;
size_t reservePages = map->ops->map_max_pages_need(map, area->base,
area->base + (size - 1));
vm_page_reserve_pages(reservePages);
map->ops->lock(map);
for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
@ -1695,6 +1709,7 @@ vm_map_physical_memory(team_id team, const char *name, void **_address,
}
map->ops->unlock(map);
vm_page_unreserve_pages(reservePages);
}
if (status < B_OK)
@ -1966,6 +1981,10 @@ vm_clone_area(team_id team, const char *name, void **address,
map->ops->unlock(map);
map = &targetAddressSpace->translation_map;
size_t reservePages = map->ops->map_max_pages_need(map,
newArea->base, newArea->base + (newArea->size - 1));
vm_page_reserve_pages(reservePages);
map->ops->lock(map);
for (addr_t offset = 0; offset < newArea->size;
@ -1975,7 +1994,13 @@ vm_clone_area(team_id team, const char *name, void **address,
}
map->ops->unlock(map);
vm_page_unreserve_pages(reservePages);
} else {
vm_translation_map *map = &targetAddressSpace->translation_map;
size_t reservePages = map->ops->map_max_pages_need(map,
newArea->base, newArea->base + (newArea->size - 1));
vm_page_reserve_pages(reservePages);
// map in all pages from source
for (vm_page *page = cache->page_list; page != NULL;
page = page->cache_next) {
@ -1983,6 +2008,8 @@ vm_clone_area(team_id team, const char *name, void **address,
+ ((page->cache_offset << PAGE_SHIFT) - newArea->cache_offset),
protection);
}
vm_page_unreserve_pages(reservePages);
}
}
if (status == B_OK)
@ -2549,6 +2576,7 @@ vm_unmap_pages(vm_area *area, addr_t base, size_t size)
}
/*! When calling this function, you need to have pages reserved! */
status_t
vm_map_page(vm_area *area, vm_page *page, addr_t address, uint32 protection)
{
@ -4193,12 +4221,14 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
// The top most cache has no fault handler, so let's see if the cache or its sources
// already have the page we're searching for (we're going from top to bottom)
vm_page_reserve_pages(2);
vm_translation_map *map = &addressSpace->translation_map;
size_t reservePages = 2 + map->ops->map_max_pages_need(map,
originalAddress, originalAddress);
vm_page_reserve_pages(reservePages);
// we may need up to 2 pages - reserving them upfront makes sure
// we don't have any cache locked, so that the page daemon/thief
// can do their job without problems
vm_translation_map *map = &addressSpace->translation_map;
vm_dummy_page dummyPage;
dummyPage.cache = NULL;
dummyPage.state = PAGE_STATE_INACTIVE;
@ -4252,7 +4282,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
}
vm_cache_release_ref(topCache);
vm_page_unreserve_pages(2);
vm_page_unreserve_pages(reservePages);
return status;
}

View File

@ -776,7 +776,8 @@ page_thief(void* /*unused*/)
enqueue_page(&sActivePageQueue, page);
if ((page->state == PAGE_STATE_INACTIVE
|| stealActive && page->state == PAGE_STATE_ACTIVE)
|| (stealActive && page->state == PAGE_STATE_ACTIVE
&& page->wired_count == 0))
&& page->usage_count <= score)
break;
}
@ -826,7 +827,7 @@ page_thief(void* /*unused*/)
{
if (fIsLocked)
mutex_unlock(&fCache->lock);
if (fCache != NULL)
vm_cache_release_ref(fCache);
}
@ -863,7 +864,7 @@ page_thief(void* /*unused*/)
// we can now steal this page
//dprintf(" steal page %p from cache %p\n", page, cache);
//dprintf(" steal page %p from cache %p\n", page, page->cache);
vm_cache_remove_page(page->cache, page);
vm_page_set_state(page, PAGE_STATE_FREE);