* Added some more debug output.

* Cleanup.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22375 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2007-09-29 15:48:11 +00:00
parent b062823d81
commit 3392e9bcd6
2 changed files with 23 additions and 42 deletions

View File

@ -2280,7 +2280,7 @@ _dump_vnode(struct vnode *vnode)
kprintf(" private_node: %p\n", vnode->private_node); kprintf(" private_node: %p\n", vnode->private_node);
kprintf(" mount: %p\n", vnode->mount); kprintf(" mount: %p\n", vnode->mount);
kprintf(" covered_by: %p\n", vnode->covered_by); kprintf(" covered_by: %p\n", vnode->covered_by);
kprintf(" cache_ref: %p\n", vnode->cache); kprintf(" cache: %p\n", vnode->cache);
kprintf(" flags: %s%s%s\n", vnode->remove ? "r" : "-", kprintf(" flags: %s%s%s\n", vnode->remove ? "r" : "-",
vnode->busy ? "b" : "-", vnode->unpublished ? "u" : "-"); vnode->busy ? "b" : "-", vnode->unpublished ? "u" : "-");
kprintf(" advisory_lock: %p\n", vnode->advisory_locking); kprintf(" advisory_lock: %p\n", vnode->advisory_locking);

View File

@ -317,19 +317,16 @@ vm_cache_acquire_page_cache_ref(vm_page* page)
vm_page * vm_page *
vm_cache_lookup_page(vm_cache *cache, off_t offset) vm_cache_lookup_page(vm_cache *cache, off_t offset)
{ {
struct page_lookup_key key;
cpu_status state;
vm_page *page;
ASSERT_LOCKED_MUTEX(&cache->lock); ASSERT_LOCKED_MUTEX(&cache->lock);
struct page_lookup_key key;
key.offset = (uint32)(offset >> PAGE_SHIFT); key.offset = (uint32)(offset >> PAGE_SHIFT);
key.cache = cache; key.cache = cache;
state = disable_interrupts(); cpu_status state = disable_interrupts();
acquire_spinlock(&sPageCacheTableLock); acquire_spinlock(&sPageCacheTableLock);
page = (vm_page *)hash_lookup(sPageCacheTable, &key); vm_page *page = (vm_page *)hash_lookup(sPageCacheTable, &key);
release_spinlock(&sPageCacheTableLock); release_spinlock(&sPageCacheTableLock);
restore_interrupts(state); restore_interrupts(state);
@ -344,8 +341,6 @@ vm_cache_lookup_page(vm_cache *cache, off_t offset)
void void
vm_cache_insert_page(vm_cache *cache, vm_page *page, off_t offset) vm_cache_insert_page(vm_cache *cache, vm_page *page, off_t offset)
{ {
cpu_status state;
TRACE(("vm_cache_insert_page: cache %p, page %p, offset %Ld\n", TRACE(("vm_cache_insert_page: cache %p, page %p, offset %Ld\n",
cache, page, offset)); cache, page, offset));
ASSERT_LOCKED_MUTEX(&cache->lock); ASSERT_LOCKED_MUTEX(&cache->lock);
@ -367,8 +362,7 @@ vm_cache_insert_page(vm_cache *cache, vm_page *page, off_t offset)
page->usage_count = 2; page->usage_count = 2;
state = disable_interrupts(); InterruptsSpinLocker locker(sPageCacheTableLock);
acquire_spinlock(&sPageCacheTableLock);
page->cache = cache; page->cache = cache;
@ -385,9 +379,6 @@ vm_cache_insert_page(vm_cache *cache, vm_page *page, off_t offset)
#endif // KDEBUG #endif // KDEBUG
hash_insert(sPageCacheTable, page); hash_insert(sPageCacheTable, page);
release_spinlock(&sPageCacheTableLock);
restore_interrupts(state);
} }
@ -399,8 +390,6 @@ vm_cache_insert_page(vm_cache *cache, vm_page *page, off_t offset)
void void
vm_cache_remove_page(vm_cache *cache, vm_page *page) vm_cache_remove_page(vm_cache *cache, vm_page *page)
{ {
cpu_status state;
TRACE(("vm_cache_remove_page: cache %p, page %p\n", cache, page)); TRACE(("vm_cache_remove_page: cache %p, page %p\n", cache, page));
ASSERT_LOCKED_MUTEX(&cache->lock); ASSERT_LOCKED_MUTEX(&cache->lock);
@ -409,7 +398,7 @@ vm_cache_remove_page(vm_cache *cache, vm_page *page)
cache, page->cache); cache, page->cache);
} }
state = disable_interrupts(); cpu_status state = disable_interrupts();
acquire_spinlock(&sPageCacheTableLock); acquire_spinlock(&sPageCacheTableLock);
hash_remove(sPageCacheTable, page); hash_remove(sPageCacheTable, page);
@ -435,15 +424,13 @@ vm_cache_remove_page(vm_cache *cache, vm_page *page)
status_t status_t
vm_cache_write_modified(vm_cache *cache, bool fsReenter) vm_cache_write_modified(vm_cache *cache, bool fsReenter)
{ {
status_t status;
TRACE(("vm_cache_write_modified(cache = %p)\n", cache)); TRACE(("vm_cache_write_modified(cache = %p)\n", cache));
if (cache->temporary) if (cache->temporary)
return B_OK; return B_OK;
mutex_lock(&cache->lock); mutex_lock(&cache->lock);
status = vm_page_write_modified_pages(cache, fsReenter); status_t status = vm_page_write_modified_pages(cache, fsReenter);
mutex_unlock(&cache->lock); mutex_unlock(&cache->lock);
return status; return status;
@ -458,11 +445,13 @@ vm_cache_write_modified(vm_cache *cache, bool fsReenter)
status_t status_t
vm_cache_set_minimal_commitment_locked(vm_cache *cache, off_t commitment) vm_cache_set_minimal_commitment_locked(vm_cache *cache, off_t commitment)
{ {
status_t status = B_OK; TRACE(("vm_cache_set_minimal_commitment_locked(cache %p, commitment %Ld)\n",
vm_store *store = cache->store; cache, commitment));
ASSERT_LOCKED_MUTEX(&cache->lock); ASSERT_LOCKED_MUTEX(&cache->lock);
vm_store *store = cache->store;
status_t status = B_OK;
// If we don't have enough committed space to cover through to the new end of region... // If we don't have enough committed space to cover through to the new end of region...
if (store->committed_size < commitment) { if (store->committed_size < commitment) {
// ToDo: should we check if the cache's virtual size is large // ToDo: should we check if the cache's virtual size is large
@ -489,17 +478,17 @@ vm_cache_set_minimal_commitment_locked(vm_cache *cache, off_t commitment)
status_t status_t
vm_cache_resize(vm_cache *cache, off_t newSize) vm_cache_resize(vm_cache *cache, off_t newSize)
{ {
uint32 oldPageCount, newPageCount; TRACE(("vm_cache_resize(cache %p, newSize %Ld) old size %Ld\n",
status_t status; cache, newSize, cache->virtual_size));
ASSERT_LOCKED_MUTEX(&cache->lock); ASSERT_LOCKED_MUTEX(&cache->lock);
status = cache->store->ops->commit(cache->store, newSize); status_t status = cache->store->ops->commit(cache->store, newSize);
if (status != B_OK) if (status != B_OK)
return status; return status;
oldPageCount = (uint32)((cache->virtual_size + B_PAGE_SIZE - 1) >> PAGE_SHIFT); uint32 oldPageCount = (uint32)((cache->virtual_size + B_PAGE_SIZE - 1)
newPageCount = (uint32)((newSize + B_PAGE_SIZE - 1) >> PAGE_SHIFT); >> PAGE_SHIFT);
uint32 newPageCount = (uint32)((newSize + B_PAGE_SIZE - 1) >> PAGE_SHIFT);
if (newPageCount < oldPageCount) { if (newPageCount < oldPageCount) {
// we need to remove all pages in the cache outside of the new virtual // we need to remove all pages in the cache outside of the new virtual
@ -615,11 +604,6 @@ vm_cache_remove_consumer(vm_cache *cache, vm_cache *consumer)
if (consumerPage == NULL) { if (consumerPage == NULL) {
// the page already is not yet in the consumer cache - move // the page already is not yet in the consumer cache - move
// it upwards // it upwards
#if 0
if (consumer->virtual_base == 0x11000)
dprintf("%ld: move page %p offset %ld from cache %p to cache %p\n",
find_thread(NULL), page, page->cache_offset, cache, consumer);
#endif
vm_cache_remove_page(cache, page); vm_cache_remove_page(cache, page);
vm_cache_insert_page(consumer, page, vm_cache_insert_page(consumer, page,
(off_t)page->cache_offset << PAGE_SHIFT); (off_t)page->cache_offset << PAGE_SHIFT);
@ -628,7 +612,7 @@ if (consumer->virtual_base == 0x11000)
// the page is currently busy taking a read fault - IOW, // the page is currently busy taking a read fault - IOW,
// vm_soft_fault() has mapped our page so we can just // vm_soft_fault() has mapped our page so we can just
// move it up // move it up
//dprintf("%ld: merged busy page %p, cache %p, offset %ld\n", find_thread(NULL), page, cacheRef->cache, page->cache_offset); //dprintf("%ld: merged busy page %p, cache %p, offset %ld\n", find_thread(NULL), page, cacheRef->cache, page->cache_offset);
vm_cache_remove_page(consumer, consumerPage); vm_cache_remove_page(consumer, consumerPage);
consumerPage->state = PAGE_STATE_INACTIVE; consumerPage->state = PAGE_STATE_INACTIVE;
((vm_dummy_page*)consumerPage)->busy_condition.Unpublish(); ((vm_dummy_page*)consumerPage)->busy_condition.Unpublish();
@ -647,11 +631,6 @@ if (consumer->virtual_base == 0x11000)
consumerPage->collided_page = page; consumerPage->collided_page = page;
#endif // DEBUG_PAGE_CACHE_TRANSITIONS #endif // DEBUG_PAGE_CACHE_TRANSITIONS
} }
#if 0
else if (consumer->virtual_base == 0x11000)
dprintf("%ld: did not move page %p offset %ld from cache %p to cache %p because there is page %p\n",
find_thread(NULL), page, page->cache_offset, cache, consumer, consumerPage);
#endif
} }
newSource = cache->source; newSource = cache->source;
@ -717,6 +696,7 @@ vm_cache_add_consumer_locked(vm_cache *cache, vm_cache *consumer)
status_t status_t
vm_cache_insert_area_locked(vm_cache *cache, vm_area *area) vm_cache_insert_area_locked(vm_cache *cache, vm_area *area)
{ {
TRACE(("vm_cache_insert_area_locked(cache %p, area %p)\n", cache, area));
ASSERT_LOCKED_MUTEX(&cache->lock); ASSERT_LOCKED_MUTEX(&cache->lock);
area->cache_next = cache->areas; area->cache_next = cache->areas;
@ -735,7 +715,9 @@ vm_cache_insert_area_locked(vm_cache *cache, vm_area *area)
status_t status_t
vm_cache_remove_area(vm_cache *cache, vm_area *area) vm_cache_remove_area(vm_cache *cache, vm_area *area)
{ {
mutex_lock(&cache->lock); TRACE(("vm_cache_remove_area(cache %p, area %p)\n", cache, area));
MutexLocker locker(cache->lock);
if (area->cache_prev) if (area->cache_prev)
area->cache_prev->cache_next = area->cache_next; area->cache_prev->cache_next = area->cache_next;
@ -747,6 +729,5 @@ vm_cache_remove_area(vm_cache *cache, vm_area *area)
if (cache->store->ops->release_ref) if (cache->store->ops->release_ref)
cache->store->ops->release_ref(cache->store); cache->store->ops->release_ref(cache->store);
mutex_unlock(&cache->lock);
return B_OK; return B_OK;
} }