Got rid of two ToDo-items: instead of clearing the PAGE_MODIFIED flag after having

written the page, we now do it before, so that it cannot lose any changed data
anymore; it doesn't matter if the page is written to while writing it back, the
worst thing that can happen is that we write the same page twice. Also, we don't
rely on the PAGE_MODIFIED bit anymore, we now check all mappings of that page
to find all modified pages, no matter how far the (currently disabled) page
daemon had come.
Also, destroying an area will now result in writing back changed pages - this
is only really important for memory mapped files, though, and should probably
be avoided for other vm_store types.
Minor cleanup.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15597 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2005-12-19 23:01:11 +00:00
parent 8d7b8e8ce7
commit 9d845483cf
3 changed files with 48 additions and 29 deletions

View File

@ -91,11 +91,11 @@ typedef struct vm_area {
off_t cache_offset;
struct vm_address_space *aspace;
struct vm_area *aspace_next;
struct vm_area *aspace_next;
struct vm_virtual_map *map;
struct vm_area *cache_next;
struct vm_area *cache_prev;
struct vm_area *hash_next;
struct vm_area *cache_next;
struct vm_area *cache_prev;
struct vm_area *hash_next;
} vm_area;
// virtual map (1 per address space)

View File

@ -832,7 +832,8 @@ vm_create_anonymous_area(aspace_id aid, const char *name, void **address,
switch (wiring) {
case B_NO_LOCK:
case B_LAZY_LOCK:
break; // do nothing
// do nothing - the pages are mapped in as needed
break;
case B_FULL_LOCK:
{
@ -893,6 +894,8 @@ vm_create_anonymous_area(aspace_id aid, const char *name, void **address,
case B_CONTIGUOUS:
{
// We have already allocated our continuous pages run, so we can now just
// map them in the address space
addr_t physicalAddress = page->ppn * B_PAGE_SIZE;
addr_t virtualAddress;
off_t offset = 0;
@ -1338,6 +1341,8 @@ _vm_put_area(vm_area *area, bool aspaceLocked)
aspace = area->aspace;
vm_cache_write_modified(area->cache_ref);
arch_vm_unset_memory_type(area);
remove_area_from_virtual_map(aspace, area, aspaceLocked);

View File

@ -163,52 +163,63 @@ vm_page_write_modified(vm_cache *cache)
// ToDo: join adjacent pages into one vec list
for (; page; page = page->cache_next) {
status_t status;
bool gotPage = false;
status_t status;
vm_area *area;
cpu_status state = disable_interrupts();
acquire_spinlock(&page_lock);
// ToDo: if this is a memory mapped page, the PAGE_MODIFIED bit might
// not yet have been propagated to the page state!
if (page->state == PAGE_STATE_MODIFIED) {
remove_page_from_queue(&page_modified_queue, page);
page->state = PAGE_STATE_BUSY;
gotPage = true;
// ToDo: just setting PAGE_STAGE_BUSY is not enough, we would also
// need to remove all mappings of this page - else, you could still
// write to this page.
}
release_spinlock(&page_lock);
restore_interrupts(state);
// We may have a modified page - however, while we're writing it back, the page
// is still mapped. In order not to lose any changes to the page, we mark it clean
// before actually writing it back; if writing the page fails for some reason, we
// just keep it in the modified page list, but that should happen only rarely.
// If the page is changed after we cleared the dirty flag, but before we had
// the chance to write it back, then we'll write it again later - that will
// probably not happen that often, though.
for (area = page->cache->ref->areas; area; area = area->cache_next) {
if (page->offset >= area->cache_offset
&& page->offset < area->cache_offset + area->size) {
vm_translation_map *map = &area->aspace->translation_map;
map->ops->lock(map);
if (!gotPage) {
// Check if the PAGE_MODIFIED bit hasn't been propagated yet
addr_t physicalAddress;
uint32 flags;
map->ops->query(map, page->offset - area->cache_offset + area->base,
&physicalAddress, &flags);
if (flags & PAGE_MODIFIED)
gotPage = true;
}
if (gotPage) {
// clear the modified flag
map->ops->clear_flags(map, page->offset - area->cache_offset
+ area->base, PAGE_MODIFIED);
}
map->ops->unlock(map);
}
}
if (!gotPage)
continue;
// got modified page, let's write it back
mutex_unlock(&cache->ref->lock);
status = write_page(page);
mutex_lock(&cache->ref->lock);
if (status == B_OK) {
vm_area *area;
// It's written back now, so we can clear the modified flag in all mappings
for (area = page->cache->ref->areas; area; area = area->cache_next) {
if (page->offset >= area->cache_offset
&& page->offset < area->cache_offset + area->size) {
vm_translation_map *map = &area->aspace->translation_map;
map->ops->lock(map);
map->ops->clear_flags(map, page->offset - area->cache_offset + area->base,
PAGE_MODIFIED);
map->ops->unlock(map);
}
}
// put it into the active queue
state = disable_interrupts();
@ -223,6 +234,9 @@ vm_page_write_modified(vm_cache *cache)
release_spinlock(&page_lock);
restore_interrupts(state);
} else {
// We don't have to put the PAGE_MODIFIED bit back, as it's still
// in the modified pages list.
}
}