* cache_io() could insert a second page at the same position in the vm_cache

since it unlocked the cache while waiting on a busy page. Now, we're filling
  the pending request before unlocking the cache.
* Fixed the deadlock I mentioned in the last commit: if a page fault happens
  at the same time we're trying to read/write from/to a page, we no longer
  fight for the BFS inode lock, but eventually doing the job twice if needed.
  Will need to go over the "write modified" functions to make sure they are
  behaving as well.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@17091 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2006-04-11 18:48:04 +00:00
parent be891a5e5d
commit 8645479b36
2 changed files with 122 additions and 23 deletions

View File

@ -725,6 +725,19 @@ write_to_cache(file_cache_ref *ref, off_t offset, size_t size, addr_t buffer, si
} }
static status_t
satisfy_cache_io(file_cache_ref *ref, off_t offset, addr_t buffer, addr_t lastBuffer,
bool doWrite)
{
size_t requestSize = buffer - lastBuffer;
if (doWrite)
return write_to_cache(ref, offset, requestSize, lastBuffer, requestSize);
return read_into_cache(ref, offset, requestSize, lastBuffer, requestSize);
}
static status_t static status_t
cache_io(void *_cacheRef, off_t offset, addr_t buffer, size_t *_size, bool doWrite) cache_io(void *_cacheRef, off_t offset, addr_t buffer, size_t *_size, bool doWrite)
{ {
@ -767,38 +780,74 @@ cache_io(void *_cacheRef, off_t offset, addr_t buffer, size_t *_size, bool doWri
for (; bytesLeft > 0; offset += B_PAGE_SIZE) { for (; bytesLeft > 0; offset += B_PAGE_SIZE) {
// check if this page is already in memory // check if this page is already in memory
addr_t virtualAddress;
restart: restart:
vm_page *page = vm_cache_lookup_page(cache, offset); vm_page *page = vm_cache_lookup_page(cache, offset);
if (page != NULL && page->state == PAGE_STATE_BUSY) { vm_page *dummyPage = NULL;
// ToDo: don't wait forever! if (page != NULL) {
mutex_unlock(&cache->lock); // The page is busy - since we need to unlock the cache sometime
snooze(20000); // in the near future, we need to satisfy the request of the pages
mutex_lock(&cache->lock); // we didn't get yet (to make sure no one else interferes in the
goto restart; // mean time).
status_t status = B_OK;
if (lastBuffer != buffer) {
status = satisfy_cache_io(ref, lastOffset + lastPageOffset,
buffer, lastBuffer, doWrite);
if (status == B_OK) {
lastBuffer = buffer;
lastLeft = bytesLeft;
lastOffset = offset;
lastPageOffset = 0;
pageOffset = 0;
}
}
if (status != B_OK) {
mutex_unlock(&cache->lock);
return status;
}
if (page->state == PAGE_STATE_BUSY) {
if (page->type == PAGE_TYPE_DUMMY) {
dummyPage = page;
page = vm_page_allocate_page(PAGE_STATE_FREE);
if (page == NULL) {
mutex_unlock(&cache->lock);
return B_NO_MEMORY;
}
} else {
mutex_unlock(&cache->lock);
// ToDo: don't wait forever!
snooze(20000);
mutex_lock(&cache->lock);
goto restart;
}
}
} }
size_t bytesInPage = min_c(size_t(B_PAGE_SIZE - pageOffset), bytesLeft); size_t bytesInPage = min_c(size_t(B_PAGE_SIZE - pageOffset), bytesLeft);
addr_t virtualAddress;
TRACE(("lookup page from offset %Ld: %p, size = %lu, pageOffset = %lu\n", offset, page, bytesLeft, pageOffset)); TRACE(("lookup page from offset %Ld: %p, size = %lu, pageOffset = %lu\n", offset, page, bytesLeft, pageOffset));
if (page != NULL if (page != NULL) {
&& vm_get_physical_page(page->physical_page_number * B_PAGE_SIZE, vm_get_physical_page(page->physical_page_number * B_PAGE_SIZE,
&virtualAddress, PHYSICAL_PAGE_CAN_WAIT) == B_OK) { &virtualAddress, PHYSICAL_PAGE_CAN_WAIT);
// it is, so let's satisfy the first part of the request, if we have to
if (lastBuffer != buffer) { if (dummyPage != NULL && (!doWrite || bytesInPage != B_PAGE_SIZE)) {
size_t requestSize = buffer - lastBuffer; // This page is currently in-use by someone else - since we cannot
status_t status; // know if this someone does what we want, and if it even can do
if (doWrite) { // what we want (we may own a lock the blocks the other request),
status = write_to_cache(ref, lastOffset + lastPageOffset, // we need to handle this case specifically
requestSize, lastBuffer, requestSize); iovec vec;
} else { vec.iov_base = (void *)virtualAddress;
status = read_into_cache(ref, lastOffset + lastPageOffset, vec.iov_len = B_PAGE_SIZE;
requestSize, lastBuffer, requestSize);
} size_t size = B_PAGE_SIZE;
status_t status = pages_io(ref, offset, &vec, 1, &size, false);
if (status != B_OK) { if (status != B_OK) {
vm_put_physical_page(virtualAddress); vm_put_physical_page(virtualAddress);
mutex_unlock(&cache->lock); mutex_unlock(&cache->lock);
return B_IO_ERROR; return status;
} }
} }
@ -814,6 +863,41 @@ cache_io(void *_cacheRef, off_t offset, addr_t buffer, size_t *_size, bool doWri
vm_put_physical_page(virtualAddress); vm_put_physical_page(virtualAddress);
if (dummyPage != NULL) {
// check if the dummy page is still in place
restart_dummy_lookup:
vm_page *currentPage = vm_cache_lookup_page(cache, offset);
if (currentPage->state == PAGE_STATE_BUSY) {
if (currentPage->type == PAGE_TYPE_DUMMY) {
// we let the other party add our page
currentPage->queue_next = page;
} else {
mutex_unlock(&cache->lock);
// ToDo: don't wait forever!
snooze(20000);
mutex_lock(&cache->lock);
goto restart_dummy_lookup;
}
} else if (currentPage != NULL) {
// we need to copy our new page into the old one
addr_t destinationAddress;
vm_get_physical_page(page->physical_page_number * B_PAGE_SIZE,
&virtualAddress, PHYSICAL_PAGE_CAN_WAIT);
vm_get_physical_page(currentPage->physical_page_number * B_PAGE_SIZE,
&destinationAddress, PHYSICAL_PAGE_CAN_WAIT);
memcpy((void *)destinationAddress, (void *)virtualAddress, B_PAGE_SIZE);
vm_put_physical_page(destinationAddress);
vm_put_physical_page(virtualAddress);
vm_page_set_state(page, PAGE_STATE_FREE);
} else {
// there is no page in place anymore, we'll put ours into it
vm_cache_insert_page(cache, page, offset);
}
}
if (bytesLeft <= bytesInPage) { if (bytesLeft <= bytesInPage) {
// we've read the last page, so we're done! // we've read the last page, so we're done!
mutex_unlock(&cache->lock); mutex_unlock(&cache->lock);

View File

@ -2622,6 +2622,12 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
mutex_unlock(&cache_ref->lock); mutex_unlock(&cache_ref->lock);
page = vm_page_allocate_page(PAGE_STATE_FREE); page = vm_page_allocate_page(PAGE_STATE_FREE);
dummy_page.queue_next = page;
dummy_page.busy_reading = true;
// we mark that page busy reading, so that the file cache can ignore
// us in case it works on the very same page
addressSpace->translation_map.ops->get_physical_page(page->physical_page_number * B_PAGE_SIZE, (addr_t *)&vec.iov_base, PHYSICAL_PAGE_CAN_WAIT); addressSpace->translation_map.ops->get_physical_page(page->physical_page_number * B_PAGE_SIZE, (addr_t *)&vec.iov_base, PHYSICAL_PAGE_CAN_WAIT);
// ToDo: handle errors here // ToDo: handle errors here
err = cache_ref->cache->store->ops->read(cache_ref->cache->store, cacheOffset, &vec, 1, &bytesRead); err = cache_ref->cache->store->ops->read(cache_ref->cache->store, cacheOffset, &vec, 1, &bytesRead);
@ -2633,7 +2639,16 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
vm_cache_remove_page(cache_ref, &dummy_page); vm_cache_remove_page(cache_ref, &dummy_page);
dummy_page.state = PAGE_STATE_INACTIVE; dummy_page.state = PAGE_STATE_INACTIVE;
} }
vm_cache_insert_page(cache_ref, page, cacheOffset);
// We insert the queue_next here, because someone else could have
// replaced our page
vm_cache_insert_page(cache_ref, dummy_page.queue_next, cacheOffset);
if (dummy_page.queue_next != page) {
// Indeed, the page got replaced by someone else - we can safely
// throw our page away now
vm_page_set_state(page, PAGE_STATE_FREE);
}
mutex_unlock(&cache_ref->lock); mutex_unlock(&cache_ref->lock);
break; break;
} }