* Fixed the strange vm_cache_ref reference count mechanism: now, a fresh
vm_cache_ref starts with a reference count of 1. When acquiring a vm_cache, you no longer need to worry if that should go through the vm_store, or not; as it now always does. * map_backing_store() no longer needs to play with the vm_cache_ref references. * that simplified some code. * vfs_get_vnode_cache() now grabs a reference to the cache, if successful. * better balanced vnode ownership on vnode_store creation (vnode_store released the vnode before if its creation failed). git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15641 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
d5abe15e84
commit
ea2cd27e57
@ -22,7 +22,7 @@ extern "C" {
|
||||
status_t vm_cache_init(struct kernel_args *args);
|
||||
vm_cache *vm_cache_create(vm_store *store);
|
||||
vm_cache_ref *vm_cache_ref_create(vm_cache *cache);
|
||||
void vm_cache_acquire_ref(vm_cache_ref *cache_ref, bool acquire_store_ref);
|
||||
void vm_cache_acquire_ref(vm_cache_ref *cache_ref);
|
||||
void vm_cache_release_ref(vm_cache_ref *cache_ref);
|
||||
vm_page *vm_cache_lookup_page(vm_cache_ref *cacheRef, off_t page);
|
||||
void vm_cache_insert_page(vm_cache_ref *cacheRef, vm_page *page, off_t offset);
|
||||
|
1
src/system/kernel/cache/file_cache.cpp
vendored
1
src/system/kernel/cache/file_cache.cpp
vendored
@ -947,6 +947,7 @@ cache_prefetch_vnode(void *vnode, off_t offset, size_t size)
|
||||
|
||||
out:
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
}
|
||||
|
||||
|
||||
|
6
src/system/kernel/cache/vnode_store.cpp
vendored
6
src/system/kernel/cache/vnode_store.cpp
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2004, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Copyright 2004-2005, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
@ -112,10 +112,8 @@ extern "C" vm_store *
|
||||
vm_create_vnode_store(void *vnode)
|
||||
{
|
||||
vnode_store *store = (vnode_store *)malloc(sizeof(struct vnode_store));
|
||||
if (store == NULL) {
|
||||
vfs_put_vnode(vnode);
|
||||
if (store == NULL)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
store->vm.ops = &sStoreOps;
|
||||
store->vm.cache = NULL;
|
||||
|
@ -2805,12 +2805,19 @@ vfs_write_pages(void *_vnode, void *cookie, off_t pos, const iovec *vecs, size_t
|
||||
}
|
||||
|
||||
|
||||
/** Gets the vnode's vm_cache object. If it didn't have one, it will be
|
||||
* created if \a allocate is \c true.
|
||||
* In case it's successful, it will also grab a reference to the cache
|
||||
* it returns.
|
||||
*/
|
||||
|
||||
extern "C" status_t
|
||||
vfs_get_vnode_cache(void *_vnode, vm_cache_ref **_cache, bool allocate)
|
||||
{
|
||||
struct vnode *vnode = (struct vnode *)_vnode;
|
||||
|
||||
if (vnode->cache != NULL) {
|
||||
vm_cache_acquire_ref(vnode->cache);
|
||||
*_cache = vnode->cache;
|
||||
return B_OK;
|
||||
}
|
||||
@ -2825,7 +2832,8 @@ vfs_get_vnode_cache(void *_vnode, vm_cache_ref **_cache, bool allocate)
|
||||
status = vm_create_vnode_cache(vnode, &vnode->cache);
|
||||
else
|
||||
status = B_BAD_VALUE;
|
||||
}
|
||||
} else
|
||||
vm_cache_acquire_ref(vnode->cache);
|
||||
|
||||
if (status == B_OK)
|
||||
*_cache = vnode->cache;
|
||||
|
@ -499,7 +499,6 @@ insert_area(vm_address_space *addressSpace, void **_address,
|
||||
}
|
||||
|
||||
|
||||
// a ref to the cache holding this store must be held before entering here
|
||||
static status_t
|
||||
map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtualAddress,
|
||||
off_t offset, addr_t size, uint32 addressSpec, int wiring, int protection,
|
||||
@ -508,11 +507,8 @@ map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtu
|
||||
vm_cache *cache;
|
||||
vm_cache_ref *cache_ref;
|
||||
vm_area *area;
|
||||
vm_cache *nu_cache;
|
||||
vm_cache_ref *nu_cache_ref = NULL;
|
||||
vm_store *nu_store;
|
||||
|
||||
int err;
|
||||
status_t err;
|
||||
|
||||
TRACE(("map_backing_store: aspace %p, store %p, *vaddr %p, offset 0x%Lx, size %lu, addressSpec %ld, wiring %d, protection %d, _area %p, area_name '%s'\n",
|
||||
addressSpace, store, *_virtualAddress, offset, size, addressSpec,
|
||||
@ -528,6 +524,11 @@ map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtu
|
||||
// if this is a private map, we need to create a new cache & store object
|
||||
// pair to handle the private copies of pages as they are written to
|
||||
if (mapping == REGION_PRIVATE_MAP) {
|
||||
vm_cache *nu_cache;
|
||||
vm_cache_ref *nu_cache_ref = NULL;
|
||||
vm_store *nu_store;
|
||||
|
||||
// ToDo: panic???
|
||||
// create an anonymous store object
|
||||
nu_store = vm_store_create_anonymous_noswap((protection & B_STACK_AREA) != 0, USER_STACK_GUARD_PAGES);
|
||||
if (nu_store == NULL)
|
||||
@ -543,9 +544,6 @@ map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtu
|
||||
|
||||
nu_cache->source = cache;
|
||||
|
||||
// grab a ref to the cache object we're now linked to as a source
|
||||
vm_cache_acquire_ref(cache_ref, true);
|
||||
|
||||
cache = nu_cache;
|
||||
cache_ref = cache->ref;
|
||||
store = nu_store;
|
||||
@ -554,9 +552,7 @@ map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtu
|
||||
|
||||
err = vm_cache_set_minimal_commitment(cache_ref, offset + size);
|
||||
if (err != B_OK)
|
||||
goto err1a;
|
||||
|
||||
vm_cache_acquire_ref(cache_ref, true);
|
||||
goto err1;
|
||||
|
||||
acquire_sem_etc(addressSpace->sem, WRITE_COUNT, 0, 0);
|
||||
|
||||
@ -565,12 +561,12 @@ map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtu
|
||||
// okay, someone is trying to delete this address space now, so we can't
|
||||
// insert the area, so back out
|
||||
err = B_BAD_TEAM_ID;
|
||||
goto err1b;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
err = insert_area(addressSpace, _virtualAddress, addressSpec, size, area);
|
||||
if (err < B_OK)
|
||||
goto err1b;
|
||||
goto err2;
|
||||
|
||||
// attach the cache to the area
|
||||
area->cache_ref = cache_ref;
|
||||
@ -591,18 +587,14 @@ map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtu
|
||||
*_area = area;
|
||||
return B_OK;
|
||||
|
||||
err1b:
|
||||
err2:
|
||||
release_sem_etc(addressSpace->sem, WRITE_COUNT, 0);
|
||||
vm_cache_release_ref(cache_ref);
|
||||
goto err;
|
||||
err1a:
|
||||
if (nu_cache_ref) {
|
||||
// had never acquired it's initial ref, so acquire and then release it
|
||||
// this should clean up all the objects it references
|
||||
vm_cache_acquire_ref(cache_ref, true);
|
||||
err1:
|
||||
if (mapping == REGION_PRIVATE_MAP) {
|
||||
// we created this cache, so we must delete it again
|
||||
vm_cache_release_ref(cache_ref);
|
||||
}
|
||||
err:
|
||||
|
||||
free(area->name);
|
||||
free(area);
|
||||
return err;
|
||||
@ -724,7 +716,7 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
|
||||
vm_page *page = NULL;
|
||||
bool isStack = (protection & B_STACK_AREA) != 0;
|
||||
bool canOvercommit = false;
|
||||
status_t err;
|
||||
status_t status;
|
||||
|
||||
TRACE(("create_anonymous_area %s: size 0x%lx\n", name, size));
|
||||
|
||||
@ -783,6 +775,7 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
|
||||
}
|
||||
}
|
||||
|
||||
// ToDo: panic???
|
||||
// create an anonymous store object
|
||||
store = vm_store_create_anonymous_noswap(canOvercommit, isStack ?
|
||||
((protection & B_USER_PROTECTION) != 0 ?
|
||||
@ -810,11 +803,10 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
|
||||
break;
|
||||
}
|
||||
|
||||
vm_cache_acquire_ref(cache_ref, true);
|
||||
err = map_backing_store(addressSpace, store, address, 0, size, addressSpec, wiring,
|
||||
status = map_backing_store(addressSpace, store, address, 0, size, addressSpec, wiring,
|
||||
protection, REGION_NO_PRIVATE_MAP, &area, name);
|
||||
vm_cache_release_ref(cache_ref);
|
||||
if (err < 0) {
|
||||
if (status < B_OK) {
|
||||
vm_cache_release_ref(cache_ref);
|
||||
vm_put_address_space(addressSpace);
|
||||
|
||||
if (wiring == B_CONTIGUOUS) {
|
||||
@ -829,7 +821,7 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
|
||||
vm_page_set_state(page, PAGE_STATE_FREE);
|
||||
}
|
||||
}
|
||||
return err;
|
||||
return status;
|
||||
}
|
||||
|
||||
cache_ref = store->cache->ref;
|
||||
@ -917,8 +909,8 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
|
||||
panic("couldn't lookup physical page just allocated\n");
|
||||
|
||||
atomic_add(&page->ref_count, 1);
|
||||
err = (*map->ops->map)(map, virtualAddress, physicalAddress, protection);
|
||||
if (err < 0)
|
||||
status = (*map->ops->map)(map, virtualAddress, physicalAddress, protection);
|
||||
if (status < 0)
|
||||
panic("couldn't map physical page in page run\n");
|
||||
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
@ -937,9 +929,6 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
|
||||
|
||||
TRACE(("vm_create_anonymous_area: done\n"));
|
||||
|
||||
if (area == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
return area->id;
|
||||
}
|
||||
|
||||
@ -991,10 +980,10 @@ vm_map_physical_memory(team_id areaID, const char *name, void **_address,
|
||||
// tell the page scanner to skip over this area, it's pages are special
|
||||
cache->scan_skip = 1;
|
||||
|
||||
vm_cache_acquire_ref(cacheRef, true);
|
||||
status = map_backing_store(addressSpace, store, _address, 0, size,
|
||||
addressSpec & ~B_MTR_MASK, 0, protection, REGION_NO_PRIVATE_MAP, &area, name);
|
||||
vm_cache_release_ref(cacheRef);
|
||||
if (status < B_OK)
|
||||
vm_cache_release_ref(cacheRef);
|
||||
|
||||
if (status >= B_OK && (addressSpec & B_MTR_MASK) != 0) {
|
||||
// set requested memory type
|
||||
@ -1041,6 +1030,7 @@ vm_create_null_area(team_id aid, const char *name, void **address, uint32 addres
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
// create an null store object
|
||||
// TODO: panic???
|
||||
store = vm_store_create_null();
|
||||
if (store == NULL)
|
||||
panic("vm_map_physical_memory: vm_store_create_null returned NULL");
|
||||
@ -1053,12 +1043,13 @@ vm_create_null_area(team_id aid, const char *name, void **address, uint32 addres
|
||||
// tell the page scanner to skip over this area, no pages will be mapped here
|
||||
cache->scan_skip = 1;
|
||||
|
||||
vm_cache_acquire_ref(cache_ref, true);
|
||||
err = map_backing_store(addressSpace, store, address, 0, size, addressSpec, 0, B_KERNEL_READ_AREA, REGION_NO_PRIVATE_MAP, &area, name);
|
||||
vm_cache_release_ref(cache_ref);
|
||||
vm_put_address_space(addressSpace);
|
||||
if (err < 0)
|
||||
|
||||
if (err < B_OK) {
|
||||
vm_cache_release_ref(cache_ref);
|
||||
return err;
|
||||
}
|
||||
|
||||
return area->id;
|
||||
}
|
||||
@ -1090,10 +1081,6 @@ vm_create_vnode_cache(void *vnode, struct vm_cache_ref **_cacheRef)
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
|
||||
// acquire the cache ref once to represent the ref that the vnode will have
|
||||
// this is one of the only places where we dont want to ref to ripple down to the store
|
||||
vm_cache_acquire_ref(cacheRef, false);
|
||||
|
||||
*_cacheRef = cacheRef;
|
||||
return B_OK;
|
||||
}
|
||||
@ -1124,7 +1111,8 @@ _vm_map_file(team_id aid, const char *name, void **_address, uint32 addressSpec,
|
||||
if (addressSpace == NULL)
|
||||
return B_BAD_TEAM_ID;
|
||||
|
||||
TRACE(("_vm_map_file(\"%s\", offset = %Ld, size = %lu, mapping %ld)\n", path, offset, size, mapping));
|
||||
TRACE(("_vm_map_file(\"%s\", offset = %Ld, size = %lu, mapping %ld)\n",
|
||||
path, offset, size, mapping));
|
||||
|
||||
offset = ROUNDOWN(offset, B_PAGE_SIZE);
|
||||
size = PAGE_ALIGN(size);
|
||||
@ -1134,26 +1122,26 @@ _vm_map_file(team_id aid, const char *name, void **_address, uint32 addressSpec,
|
||||
if (status < B_OK)
|
||||
goto err1;
|
||||
|
||||
// ToDo: this only works for file systems that use the file cache
|
||||
status = vfs_get_vnode_cache(vnode, &cacheRef, false);
|
||||
|
||||
vfs_put_vnode(vnode);
|
||||
// we don't need this vnode anymore - if the above call was
|
||||
// successful, the store already has a ref to it
|
||||
|
||||
if (status < B_OK)
|
||||
goto err1;
|
||||
|
||||
status = map_backing_store(addressSpace, cacheRef->cache->store, _address,
|
||||
offset, size, addressSpec, 0, protection, mapping, &area, name);
|
||||
if (status < B_OK)
|
||||
goto err2;
|
||||
|
||||
// acquire a ref to the cache before we do work on it. Dont ripple the ref acquision to the vnode
|
||||
// below because we'll have to release it later anyway, since we grabbed a ref to the vnode at
|
||||
// vfs_get_vnode_from_path(). This puts the ref counts in sync.
|
||||
vm_cache_acquire_ref(cacheRef, false);
|
||||
status = map_backing_store(addressSpace, cacheRef->cache->store, _address, offset, size,
|
||||
addressSpec, 0, protection, mapping, &area, name);
|
||||
vm_cache_release_ref(cacheRef);
|
||||
vm_put_address_space(addressSpace);
|
||||
|
||||
if (status < B_OK)
|
||||
return status;
|
||||
|
||||
return area->id;
|
||||
|
||||
err2:
|
||||
vfs_put_vnode(vnode);
|
||||
vm_cache_release_ref(cacheRef);
|
||||
err1:
|
||||
vm_put_address_space(addressSpace);
|
||||
return status;
|
||||
@ -1167,7 +1155,8 @@ vm_map_file(team_id aid, const char *name, void **address, uint32 addressSpec,
|
||||
if (!arch_vm_supports_protection(protection))
|
||||
return B_NOT_SUPPORTED;
|
||||
|
||||
return _vm_map_file(aid, name, address, addressSpec, size, protection, mapping, path, offset, true);
|
||||
return _vm_map_file(aid, name, address, addressSpec, size, protection,
|
||||
mapping, path, offset, true);
|
||||
}
|
||||
|
||||
|
||||
@ -1234,11 +1223,9 @@ vm_clone_area(team_id team, const char *name, void **address, uint32 addressSpec
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
vm_cache_acquire_ref(sourceArea->cache_ref, true);
|
||||
status = map_backing_store(addressSpace, sourceArea->cache_ref->cache->store, address,
|
||||
sourceArea->cache_offset, sourceArea->size, addressSpec, sourceArea->wiring,
|
||||
protection, mapping, &newArea, name);
|
||||
vm_cache_release_ref(sourceArea->cache_ref);
|
||||
status = map_backing_store(addressSpace, sourceArea->cache_ref->cache->store,
|
||||
address, sourceArea->cache_offset, sourceArea->size, addressSpec,
|
||||
sourceArea->wiring, protection, mapping, &newArea, name);
|
||||
}
|
||||
|
||||
vm_put_area(sourceArea);
|
||||
@ -1439,7 +1426,7 @@ vm_copy_on_write_area(vm_area *area)
|
||||
upperCacheRef->ref_count = 1;
|
||||
|
||||
// grab a ref to the cache object we're now linked to as a source
|
||||
vm_cache_acquire_ref(lowerCacheRef, true);
|
||||
vm_cache_acquire_ref(lowerCacheRef);
|
||||
|
||||
// We now need to remap all pages from the area read-only, so that
|
||||
// a copy will be created on next write access
|
||||
@ -1507,6 +1494,8 @@ vm_copy_area(team_id addressSpaceID, const char *name, void **_address, uint32 a
|
||||
if (status < B_OK)
|
||||
goto err;
|
||||
|
||||
vm_cache_acquire_ref(cacheRef);
|
||||
|
||||
// If the source area is writable, we need to move it one layer up as well
|
||||
|
||||
if ((source->protection & (B_KERNEL_WRITE_AREA | B_WRITE_AREA)) != 0) {
|
||||
@ -1929,7 +1918,7 @@ vm_delete_areas(struct vm_address_space *addressSpace)
|
||||
vm_area *area;
|
||||
vm_area *next, *last = NULL;
|
||||
|
||||
TRACE(("vm_delete_areas: called on aspace 0x%lx\n", addressSpace->id));
|
||||
TRACE(("vm_delete_areas: called on address space 0x%lx\n", addressSpace->id));
|
||||
|
||||
acquire_sem_etc(addressSpace->sem, WRITE_COUNT, 0, 0);
|
||||
|
||||
@ -2485,7 +2474,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
|
||||
top_cache_ref = area->cache_ref;
|
||||
cacheOffset = address - area->base + area->cache_offset;
|
||||
vm_cache_acquire_ref(top_cache_ref, true);
|
||||
vm_cache_acquire_ref(top_cache_ref);
|
||||
change_count = addressSpace->change_count;
|
||||
release_sem_etc(addressSpace->sem, READ_COUNT, 0);
|
||||
|
||||
|
@ -122,7 +122,7 @@ vm_cache_ref_create(vm_cache *cache)
|
||||
ref->cache = cache;
|
||||
mutex_init(&ref->lock, "cache_ref_mutex");
|
||||
ref->areas = NULL;
|
||||
ref->ref_count = 0;
|
||||
ref->ref_count = 1;
|
||||
cache->ref = ref;
|
||||
|
||||
return ref;
|
||||
@ -130,14 +130,15 @@ vm_cache_ref_create(vm_cache *cache)
|
||||
|
||||
|
||||
void
|
||||
vm_cache_acquire_ref(vm_cache_ref *cache_ref, bool acquire_store_ref)
|
||||
vm_cache_acquire_ref(vm_cache_ref *cache_ref)
|
||||
{
|
||||
// dprintf("vm_cache_acquire_ref: cache_ref 0x%x, ref will be %d\n", cache_ref, cache_ref->ref_count+1);
|
||||
TRACE(("vm_cache_acquire_ref: cache_ref %p, ref will be %ld\n",
|
||||
cache_ref, cache_ref->ref_count + 1));
|
||||
|
||||
if (cache_ref == NULL)
|
||||
panic("vm_cache_acquire_ref: passed NULL\n");
|
||||
|
||||
if (acquire_store_ref && cache_ref->cache->store->ops->acquire_ref)
|
||||
if (cache_ref->cache->store->ops->acquire_ref != NULL)
|
||||
cache_ref->cache->store->ops->acquire_ref(cache_ref->cache->store);
|
||||
|
||||
atomic_add(&cache_ref->ref_count, 1);
|
||||
@ -149,12 +150,15 @@ vm_cache_release_ref(vm_cache_ref *cache_ref)
|
||||
{
|
||||
vm_page *page;
|
||||
|
||||
TRACE(("vm_cache_release_ref: cache_ref %p, ref will be %ld\n", cache_ref, cache_ref->ref_count - 1));
|
||||
TRACE(("vm_cache_release_ref: cache_ref %p, ref will be %ld\n",
|
||||
cache_ref, cache_ref->ref_count - 1));
|
||||
|
||||
if (cache_ref == NULL)
|
||||
panic("vm_cache_release_ref: passed NULL\n");
|
||||
|
||||
if (atomic_add(&cache_ref->ref_count, -1) != 1) {
|
||||
// the store ref is only released on the "working" refs, not
|
||||
// on the initial one (this is vnode specific)
|
||||
if (cache_ref->cache->store->ops->release_ref)
|
||||
cache_ref->cache->store->ops->release_ref(cache_ref->cache->store);
|
||||
|
||||
@ -184,7 +188,8 @@ vm_cache_release_ref(vm_cache_ref *cache_ref)
|
||||
release_spinlock(&page_cache_table_lock);
|
||||
restore_interrupts(state);
|
||||
|
||||
TRACE(("vm_cache_release_ref: freeing page 0x%lx\n", oldPage->ppn));
|
||||
TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
|
||||
oldPage->physical_page_number));
|
||||
vm_page_set_state(oldPage, PAGE_STATE_FREE);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user