* Reserving areas never put down their reference of their address space when

removed in several cases.
* vfs_get_vnode_cache() now always gives out a reference to the cache it
  returns; if it needs to allocate a new one, the vnode owns one reference,
  and the caller another.
* therefore, file_cache_create() now owns a reference to its vm_cache_ref, and
  frees it in file_cache_delete().


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@21533 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2007-06-30 15:36:06 +00:00
parent eef8417e27
commit 25f46ea449
3 changed files with 11 additions and 7 deletions

View File

@ -1292,6 +1292,7 @@ file_cache_delete(void *_cacheRef)
TRACE(("file_cache_delete(ref = %p)\n", ref));
vm_cache_release_ref(ref->cache);
vfs_put_vnode(ref->device);
delete ref;
}

View File

@ -3135,11 +3135,12 @@ vfs_get_vnode_cache(void *_vnode, vm_cache_ref **_cache, bool allocate)
vnode->busy = wasBusy;
} else
status = B_BAD_VALUE;
} else
vm_cache_acquire_ref(vnode->cache);
}
if (status == B_OK)
if (status == B_OK) {
vm_cache_acquire_ref(vnode->cache);
*_cache = vnode->cache;
}
mutex_unlock(&sVnodeMutex);
return status;

View File

@ -213,6 +213,7 @@ find_reserved_area(vm_address_space *addressSpace, addr_t start,
if (size == next->size) {
// the new area fully covers the reversed range
area->address_space_next = next->address_space_next;
vm_put_address_space(addressSpace);
free(next);
} else {
// resize the reserved range behind the area
@ -543,6 +544,8 @@ map_backing_store(vm_address_space *addressSpace, vm_cache_ref *cacheRef,
newCache->type = CACHE_TYPE_RAM;
newCache->temporary = 1;
newCache->scan_skip = cache->scan_skip;
newCache->virtual_base = offset;
newCache->virtual_size = offset + size;
vm_cache_add_consumer_locked(cacheRef, newCache);
@ -550,10 +553,8 @@ map_backing_store(vm_address_space *addressSpace, vm_cache_ref *cacheRef,
mutex_lock(&newCacheRef->lock);
cache = newCache;
cacheRef = newCache->ref;
cacheRef = newCacheRef;
store = newStore;
cache->virtual_base = offset;
cache->virtual_size = offset + size;
}
status = vm_cache_set_minimal_commitment_locked(cacheRef, offset + size);
@ -649,6 +650,7 @@ vm_unreserve_address_range(team_id team, void *address, addr_t size)
addressSpace->areas = reserved->address_space_next;
area = reserved->address_space_next;
vm_put_address_space(addressSpace);
free(reserved);
continue;
}
@ -3738,7 +3740,7 @@ fill_area_info(struct vm_area *area, area_info *info, size_t size)
needs any kind of locking, and actually exists.
Used by both lock_memory() and unlock_memory().
*/
status_t
static status_t
test_lock_memory(vm_address_space *addressSpace, addr_t address,
bool &needsLocking)
{