* Fixed (or rather, worked around) a deadlock in the VM: when a file was

resized but still had dirty pages to be written back, 
  vm_cache_resize() (which is called with the inode lock being held)
  deadlocked with the page writer.
* Now, I reintroduced busy_writing: it'll be set by everything that
  writes back pages (vm_page_write_modified(), and the page writer),
  and will be checked for in vm_cache_resize() - other functions are not
  affected for now, AFAICT.
* vm_cache_resize() will clear that flag, and the writer will check it
  again after it wrote back the page (which will fail when it's outside
  the file bounds), and if it's cleared, it will get rid of the page
  (if the file has been resized again in the mean time, writing it will
  succeed then, and we'll keep the page around).


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23334 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2008-01-09 22:25:21 +00:00
parent 2c5faa49db
commit 3e5b9076f9
3 changed files with 48 additions and 13 deletions

View File

@ -100,6 +100,7 @@ struct vm_page {
uint8 is_cleared : 1;
// is currently only used in vm_page_allocate_page_run()
uint8 busy_writing : 1;
uint16 wired_count;
int8 usage_count;

View File

@ -500,15 +500,23 @@ vm_cache_resize(vm_cache *cache, off_t newSize)
if (page->cache_offset >= newPageCount) {
if (page->state == PAGE_STATE_BUSY) {
// wait for page to become unbusy
ConditionVariableEntry<vm_page> entry;
entry.Add(page);
mutex_unlock(&cache->lock);
entry.Wait();
mutex_lock(&cache->lock);
if (page->busy_writing) {
// We cannot wait for the page to become available
// as we might cause a deadlock this way
page->busy_writing = false;
// this will notify the writer to free the page
page = next;
} else {
// wait for page to become unbusy
ConditionVariableEntry<vm_page> entry;
entry.Add(page);
mutex_unlock(&cache->lock);
entry.Wait();
mutex_lock(&cache->lock);
// restart from the start of the list
page = cache->page_list;
// restart from the start of the list
page = cache->page_list;
}
continue;
}

View File

@ -835,6 +835,9 @@ write_page(vm_page *page, bool fsReenter)
page, page->cache_offset, status);
}
#endif
if (status == B_OK && length == 0)
status = B_ERROR;
return status;
}
@ -960,6 +963,7 @@ page_writer(void* /*unused*/)
InterruptsSpinLocker locker(sPageLock);
remove_page_from_queue(&sModifiedPageQueue, page);
page->state = PAGE_STATE_BUSY;
page->busy_writing = true;
busyConditions[numPages].Publish(page, "page");
@ -992,12 +996,21 @@ page_writer(void* /*unused*/)
// put it into the active queue
InterruptsSpinLocker locker(sPageLock);
move_page_to_active_or_inactive_queue(pages[i], true);
pages[i]->busy_writing = false;
} else {
// We don't have to put the PAGE_MODIFIED bit back, as it's
// still in the modified pages list.
InterruptsSpinLocker locker(sPageLock);
pages[i]->state = PAGE_STATE_MODIFIED;
enqueue_page(&sModifiedPageQueue, pages[i]);
{
InterruptsSpinLocker locker(sPageLock);
pages[i]->state = PAGE_STATE_MODIFIED;
enqueue_page(&sModifiedPageQueue, pages[i]);
}
if (!pages[i]->busy_writing) {
// someone has cleared the busy_writing flag which tells
// us our page has gone invalid
vm_cache_remove_page(cache, pages[i]);
} else
pages[i]->busy_writing = false;
}
busyConditions[i].Unpublish();
@ -1246,6 +1259,7 @@ vm_page_write_modified_pages(vm_cache *cache, bool fsReenter)
}
page->state = PAGE_STATE_BUSY;
page->busy_writing = true;
ConditionVariable<vm_page> busyCondition;
busyCondition.Publish(page, "page");
@ -1272,14 +1286,25 @@ vm_page_write_modified_pages(vm_cache *cache, bool fsReenter)
if (status == B_OK) {
// put it into the active/inactive queue
move_page_to_active_or_inactive_queue(page, dequeuedPage);
page->busy_writing = false;
} else {
// We don't have to put the PAGE_MODIFIED bit back, as it's still
// in the modified pages list.
if (dequeuedPage) {
page->state = PAGE_STATE_MODIFIED;
enqueue_page(&sModifiedPageQueue, page);
} else
set_page_state_nolock(page, PAGE_STATE_MODIFIED);
}
if (!page->busy_writing) {
// someone has cleared the busy_writing flag which tells
// us our page has gone invalid
vm_cache_remove_page(cache, page);
} else {
if (!dequeuedPage)
set_page_state_nolock(page, PAGE_STATE_MODIFIED);
page->busy_writing = false;
}
}
busyCondition.Unpublish();
@ -1345,6 +1370,7 @@ vm_page_init(kernel_args *args)
new(&sPages[i].mappings) vm_page_mappings();
sPages[i].wired_count = 0;
sPages[i].usage_count = 0;
sPages[i].busy_writing = false;
sPages[i].cache = NULL;
#ifdef DEBUG_PAGE_QUEUE
sPages[i].queue = NULL;