* vm_copy_on_write_area() did not always correctly divide the ref_count of the
two cache_refs - it needs to count the consumers of the lower cache to find its actual number of references; the upper cache could still be in use by someone else. * There were several locking bugs in the VM code; since cache_ref::cache can change, we must not access it without having the cache_ref locked. * As a result, map_backing_store() now requires you to have the lock of the store's cache_ref held. * And therefore, some functions in vm_cache.c must no longer lock the cache_ref on their own, but require the caller to have it locked already. * Added the -s option to the cache/cache_ref KDL commands: it will only print the requested structure, and not its counterpart (useful if accessing one structure results in a page fault, as was possible previously). git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@19796 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
f445b04baf
commit
647b1f70a5
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright 2003-2006, Axel Dörfler, axeld@pinc-software.de.
|
* Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||||
* Distributed under the terms of the MIT License.
|
* Distributed under the terms of the MIT License.
|
||||||
*
|
*
|
||||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||||
@ -28,11 +28,11 @@ vm_page *vm_cache_lookup_page(vm_cache_ref *cacheRef, off_t page);
|
|||||||
void vm_cache_insert_page(vm_cache_ref *cacheRef, vm_page *page, off_t offset);
|
void vm_cache_insert_page(vm_cache_ref *cacheRef, vm_page *page, off_t offset);
|
||||||
void vm_cache_remove_page(vm_cache_ref *cacheRef, vm_page *page);
|
void vm_cache_remove_page(vm_cache_ref *cacheRef, vm_page *page);
|
||||||
void vm_cache_remove_consumer(vm_cache_ref *cacheRef, vm_cache *consumer);
|
void vm_cache_remove_consumer(vm_cache_ref *cacheRef, vm_cache *consumer);
|
||||||
void vm_cache_add_consumer(vm_cache_ref *cacheRef, vm_cache *consumer);
|
void vm_cache_add_consumer_locked(vm_cache_ref *cacheRef, vm_cache *consumer);
|
||||||
status_t vm_cache_write_modified(vm_cache_ref *ref, bool fsReenter);
|
status_t vm_cache_write_modified(vm_cache_ref *ref, bool fsReenter);
|
||||||
status_t vm_cache_set_minimal_commitment(vm_cache_ref *ref, off_t commitment);
|
status_t vm_cache_set_minimal_commitment_locked(vm_cache_ref *ref, off_t commitment);
|
||||||
status_t vm_cache_resize(vm_cache_ref *cacheRef, off_t newSize);
|
status_t vm_cache_resize(vm_cache_ref *cacheRef, off_t newSize);
|
||||||
status_t vm_cache_insert_area(vm_cache_ref *cacheRef, vm_area *area);
|
status_t vm_cache_insert_area_locked(vm_cache_ref *cacheRef, vm_area *area);
|
||||||
status_t vm_cache_remove_area(vm_cache_ref *cacheRef, vm_area *area);
|
status_t vm_cache_remove_area(vm_cache_ref *cacheRef, vm_area *area);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
@ -501,6 +501,9 @@ insert_area(vm_address_space *addressSpace, void **_address,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*!
|
||||||
|
The \a store's owner must be locked when calling this function.
|
||||||
|
*/
|
||||||
static status_t
|
static status_t
|
||||||
map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtualAddress,
|
map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtualAddress,
|
||||||
off_t offset, addr_t size, uint32 addressSpec, int wiring, int protection,
|
off_t offset, addr_t size, uint32 addressSpec, int wiring, int protection,
|
||||||
@ -550,7 +553,7 @@ map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtu
|
|||||||
newCache->temporary = 1;
|
newCache->temporary = 1;
|
||||||
newCache->scan_skip = cache->scan_skip;
|
newCache->scan_skip = cache->scan_skip;
|
||||||
|
|
||||||
vm_cache_add_consumer(cacheRef, newCache);
|
vm_cache_add_consumer_locked(cacheRef, newCache);
|
||||||
|
|
||||||
cache = newCache;
|
cache = newCache;
|
||||||
cacheRef = newCache->ref;
|
cacheRef = newCache->ref;
|
||||||
@ -559,7 +562,7 @@ map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtu
|
|||||||
cache->virtual_size = offset + size;
|
cache->virtual_size = offset + size;
|
||||||
}
|
}
|
||||||
|
|
||||||
status = vm_cache_set_minimal_commitment(cacheRef, offset + size);
|
status = vm_cache_set_minimal_commitment_locked(cacheRef, offset + size);
|
||||||
if (status != B_OK)
|
if (status != B_OK)
|
||||||
goto err2;
|
goto err2;
|
||||||
|
|
||||||
@ -581,7 +584,7 @@ map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtu
|
|||||||
area->cache_ref = cacheRef;
|
area->cache_ref = cacheRef;
|
||||||
area->cache_offset = offset;
|
area->cache_offset = offset;
|
||||||
// point the cache back to the area
|
// point the cache back to the area
|
||||||
vm_cache_insert_area(cacheRef, area);
|
vm_cache_insert_area_locked(cacheRef, area);
|
||||||
|
|
||||||
// insert the area in the global area hash table
|
// insert the area in the global area hash table
|
||||||
acquire_sem_etc(sAreaHashLock, WRITE_COUNT, 0 ,0);
|
acquire_sem_etc(sAreaHashLock, WRITE_COUNT, 0 ,0);
|
||||||
@ -817,8 +820,13 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&cache->ref->lock);
|
||||||
|
|
||||||
status = map_backing_store(addressSpace, store, address, 0, size, addressSpec, wiring,
|
status = map_backing_store(addressSpace, store, address, 0, size, addressSpec, wiring,
|
||||||
protection, REGION_NO_PRIVATE_MAP, &area, name);
|
protection, REGION_NO_PRIVATE_MAP, &area, name);
|
||||||
|
|
||||||
|
mutex_unlock(&cache->ref->lock);
|
||||||
|
|
||||||
if (status < B_OK) {
|
if (status < B_OK) {
|
||||||
vm_cache_release_ref(cache->ref);
|
vm_cache_release_ref(cache->ref);
|
||||||
goto err1;
|
goto err1;
|
||||||
@ -1006,8 +1014,13 @@ vm_map_physical_memory(team_id areaID, const char *name, void **_address,
|
|||||||
cache->scan_skip = 1;
|
cache->scan_skip = 1;
|
||||||
cache->virtual_size = size;
|
cache->virtual_size = size;
|
||||||
|
|
||||||
|
mutex_lock(&cache->ref->lock);
|
||||||
|
|
||||||
status = map_backing_store(addressSpace, store, _address, 0, size,
|
status = map_backing_store(addressSpace, store, _address, 0, size,
|
||||||
addressSpec & ~B_MTR_MASK, 0, protection, REGION_NO_PRIVATE_MAP, &area, name);
|
addressSpec & ~B_MTR_MASK, 0, protection, REGION_NO_PRIVATE_MAP, &area, name);
|
||||||
|
|
||||||
|
mutex_unlock(&cache->ref->lock);
|
||||||
|
|
||||||
if (status < B_OK)
|
if (status < B_OK)
|
||||||
vm_cache_release_ref(cache->ref);
|
vm_cache_release_ref(cache->ref);
|
||||||
|
|
||||||
@ -1083,8 +1096,12 @@ vm_create_null_area(team_id aid, const char *name, void **address,
|
|||||||
cache->scan_skip = 1;
|
cache->scan_skip = 1;
|
||||||
cache->virtual_size = size;
|
cache->virtual_size = size;
|
||||||
|
|
||||||
|
mutex_lock(&cache->ref->lock);
|
||||||
|
|
||||||
status = map_backing_store(addressSpace, store, address, 0, size, addressSpec, 0,
|
status = map_backing_store(addressSpace, store, address, 0, size, addressSpec, 0,
|
||||||
B_KERNEL_READ_AREA, REGION_NO_PRIVATE_MAP, &area, name);
|
B_KERNEL_READ_AREA, REGION_NO_PRIVATE_MAP, &area, name);
|
||||||
|
|
||||||
|
mutex_unlock(&cache->ref->lock);
|
||||||
vm_put_address_space(addressSpace);
|
vm_put_address_space(addressSpace);
|
||||||
|
|
||||||
if (status < B_OK) {
|
if (status < B_OK) {
|
||||||
@ -1188,9 +1205,13 @@ _vm_map_file(team_id aid, const char *name, void **_address, uint32 addressSpec,
|
|||||||
if (status < B_OK)
|
if (status < B_OK)
|
||||||
goto err1;
|
goto err1;
|
||||||
|
|
||||||
|
mutex_lock(&cacheRef->lock);
|
||||||
|
|
||||||
status = map_backing_store(addressSpace, cacheRef->cache->store, _address,
|
status = map_backing_store(addressSpace, cacheRef->cache->store, _address,
|
||||||
offset, size, addressSpec, 0, protection, mapping, &area, name);
|
offset, size, addressSpec, 0, protection, mapping, &area, name);
|
||||||
|
|
||||||
|
mutex_unlock(&cacheRef->lock);
|
||||||
|
|
||||||
if (status < B_OK || mapping == REGION_PRIVATE_MAP) {
|
if (status < B_OK || mapping == REGION_PRIVATE_MAP) {
|
||||||
// map_backing_store() cannot know we no longer need the ref
|
// map_backing_store() cannot know we no longer need the ref
|
||||||
vm_cache_release_ref(cacheRef);
|
vm_cache_release_ref(cacheRef);
|
||||||
@ -1282,9 +1303,13 @@ vm_clone_area(team_id team, const char *name, void **address, uint32 addressSpec
|
|||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
|
mutex_lock(&sourceArea->cache_ref->lock);
|
||||||
|
|
||||||
status = map_backing_store(addressSpace, sourceArea->cache_ref->cache->store,
|
status = map_backing_store(addressSpace, sourceArea->cache_ref->cache->store,
|
||||||
address, sourceArea->cache_offset, sourceArea->size, addressSpec,
|
address, sourceArea->cache_offset, sourceArea->size, addressSpec,
|
||||||
sourceArea->wiring, protection, mapping, &newArea, name);
|
sourceArea->wiring, protection, mapping, &newArea, name);
|
||||||
|
|
||||||
|
mutex_unlock(&sourceArea->cache_ref->lock);
|
||||||
}
|
}
|
||||||
if (status == B_OK && mapping != REGION_PRIVATE_MAP) {
|
if (status == B_OK && mapping != REGION_PRIVATE_MAP) {
|
||||||
// If the mapping is REGION_PRIVATE_MAP, map_backing_store() needed
|
// If the mapping is REGION_PRIVATE_MAP, map_backing_store() needed
|
||||||
@ -1463,22 +1488,28 @@ vm_copy_on_write_area(vm_area *area)
|
|||||||
// So the old cache gets a new cache_ref and the area a new cache.
|
// So the old cache gets a new cache_ref and the area a new cache.
|
||||||
|
|
||||||
upperCacheRef = area->cache_ref;
|
upperCacheRef = area->cache_ref;
|
||||||
|
|
||||||
|
// we will exchange the cache_ref's cache, so we better hold its lock
|
||||||
|
mutex_lock(&upperCacheRef->lock);
|
||||||
|
|
||||||
lowerCache = upperCacheRef->cache;
|
lowerCache = upperCacheRef->cache;
|
||||||
|
|
||||||
// create an anonymous store object
|
// create an anonymous store object
|
||||||
store = vm_store_create_anonymous_noswap(false, 0, 0);
|
store = vm_store_create_anonymous_noswap(false, 0, 0);
|
||||||
if (store == NULL)
|
if (store == NULL) {
|
||||||
return B_NO_MEMORY;
|
|
||||||
|
|
||||||
upperCache = vm_cache_create(store);
|
|
||||||
if (upperCache == NULL) {
|
|
||||||
status = B_NO_MEMORY;
|
status = B_NO_MEMORY;
|
||||||
goto err1;
|
goto err1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
upperCache = vm_cache_create(store);
|
||||||
|
if (upperCache == NULL) {
|
||||||
|
status = B_NO_MEMORY;
|
||||||
|
goto err2;
|
||||||
|
}
|
||||||
|
|
||||||
status = vm_cache_ref_create(lowerCache);
|
status = vm_cache_ref_create(lowerCache);
|
||||||
if (status < B_OK)
|
if (status < B_OK)
|
||||||
goto err2;
|
goto err3;
|
||||||
|
|
||||||
lowerCacheRef = lowerCache->ref;
|
lowerCacheRef = lowerCache->ref;
|
||||||
|
|
||||||
@ -1488,24 +1519,30 @@ vm_copy_on_write_area(vm_area *area)
|
|||||||
protection |= B_READ_AREA;
|
protection |= B_READ_AREA;
|
||||||
|
|
||||||
// we need to hold the cache_ref lock when we want to switch its cache
|
// we need to hold the cache_ref lock when we want to switch its cache
|
||||||
mutex_lock(&upperCacheRef->lock);
|
|
||||||
mutex_lock(&lowerCacheRef->lock);
|
mutex_lock(&lowerCacheRef->lock);
|
||||||
|
|
||||||
upperCache->temporary = 1;
|
upperCache->temporary = 1;
|
||||||
upperCache->scan_skip = lowerCache->scan_skip;
|
upperCache->scan_skip = lowerCache->scan_skip;
|
||||||
upperCache->virtual_base = lowerCache->virtual_base;
|
upperCache->virtual_base = lowerCache->virtual_base;
|
||||||
upperCache->virtual_size = lowerCache->virtual_size;
|
upperCache->virtual_size = lowerCache->virtual_size;
|
||||||
upperCache->source = lowerCache;
|
|
||||||
list_add_item(&lowerCache->consumers, upperCache);
|
|
||||||
upperCache->ref = upperCacheRef;
|
upperCache->ref = upperCacheRef;
|
||||||
upperCacheRef->cache = upperCache;
|
upperCacheRef->cache = upperCache;
|
||||||
|
|
||||||
// we need to manually alter the ref_count (divide it between the two)
|
// we need to manually alter the ref_count (divide it between the two)
|
||||||
lowerCacheRef->ref_count = upperCacheRef->ref_count - 1;
|
// the lower cache_ref has only known refs, so compute them
|
||||||
upperCacheRef->ref_count = 1;
|
{
|
||||||
|
int32 count = 0;
|
||||||
|
vm_cache *consumer = NULL;
|
||||||
|
while ((consumer = (vm_cache *)list_get_next_item(
|
||||||
|
&lowerCache->consumers, consumer)) != NULL) {
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
lowerCacheRef->ref_count = count;
|
||||||
|
atomic_add(&upperCacheRef->ref_count, -count);
|
||||||
|
}
|
||||||
|
|
||||||
// grab a ref to the cache object we're now linked to as a source
|
vm_cache_add_consumer_locked(lowerCacheRef, upperCache);
|
||||||
vm_cache_acquire_ref(lowerCacheRef);
|
|
||||||
|
|
||||||
// We now need to remap all pages from the area read-only, so that
|
// We now need to remap all pages from the area read-only, so that
|
||||||
// a copy will be created on next write access
|
// a copy will be created on next write access
|
||||||
@ -1527,10 +1564,12 @@ vm_copy_on_write_area(vm_area *area)
|
|||||||
|
|
||||||
return B_OK;
|
return B_OK;
|
||||||
|
|
||||||
err2:
|
err3:
|
||||||
free(upperCache);
|
free(upperCache);
|
||||||
err1:
|
err2:
|
||||||
store->ops->destroy(store);
|
store->ops->destroy(store);
|
||||||
|
err1:
|
||||||
|
mutex_unlock(&upperCacheRef->lock);
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1565,11 +1604,15 @@ vm_copy_area(team_id addressSpaceID, const char *name, void **_address, uint32 a
|
|||||||
|
|
||||||
// First, create a cache on top of the source area
|
// First, create a cache on top of the source area
|
||||||
|
|
||||||
|
mutex_lock(&cacheRef->lock);
|
||||||
|
|
||||||
status = map_backing_store(addressSpace, cacheRef->cache->store, _address,
|
status = map_backing_store(addressSpace, cacheRef->cache->store, _address,
|
||||||
source->cache_offset, source->size, addressSpec, source->wiring, protection,
|
source->cache_offset, source->size, addressSpec, source->wiring, protection,
|
||||||
writableCopy ? REGION_PRIVATE_MAP : REGION_NO_PRIVATE_MAP,
|
writableCopy ? REGION_PRIVATE_MAP : REGION_NO_PRIVATE_MAP,
|
||||||
&target, name);
|
&target, name);
|
||||||
|
|
||||||
|
mutex_unlock(&cacheRef->lock);
|
||||||
|
|
||||||
if (status < B_OK)
|
if (status < B_OK)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
@ -1640,10 +1683,10 @@ vm_set_area_protection(team_id aspaceID, area_id areaID, uint32 newProtection)
|
|||||||
}
|
}
|
||||||
|
|
||||||
cacheRef = area->cache_ref;
|
cacheRef = area->cache_ref;
|
||||||
cache = cacheRef->cache;
|
|
||||||
|
|
||||||
mutex_lock(&cacheRef->lock);
|
mutex_lock(&cacheRef->lock);
|
||||||
|
|
||||||
|
cache = cacheRef->cache;
|
||||||
|
|
||||||
if ((area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0
|
if ((area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0
|
||||||
&& (newProtection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) == 0) {
|
&& (newProtection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) == 0) {
|
||||||
// change from read/write to read-only
|
// change from read/write to read-only
|
||||||
@ -1860,25 +1903,39 @@ dump_cache(int argc, char **argv)
|
|||||||
vm_cache *cache;
|
vm_cache *cache;
|
||||||
vm_cache_ref *cacheRef;
|
vm_cache_ref *cacheRef;
|
||||||
bool showPages = false;
|
bool showPages = false;
|
||||||
int addressIndex = 1;
|
bool showCache = true;
|
||||||
|
bool showCacheRef = true;
|
||||||
|
int i = 1;
|
||||||
|
|
||||||
if (argc < 2) {
|
if (argc < 2) {
|
||||||
kprintf("usage: %s [-p] <address>\n"
|
kprintf("usage: %s [-ps] <address>\n"
|
||||||
" if -p is specified, all pages are shown.", argv[0]);
|
" if -p is specified, all pages are shown, if -s is used\n"
|
||||||
|
" only the cache/cache_ref info is shown respectively.\n", argv[0]);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (!strcmp(argv[1], "-p")) {
|
while (argv[i][0] == '-') {
|
||||||
showPages = true;
|
char *arg = argv[i] + 1;
|
||||||
addressIndex++;
|
while (arg[0]) {
|
||||||
|
if (arg[0] == 'p')
|
||||||
|
showPages = true;
|
||||||
|
else if (arg[0] == 's') {
|
||||||
|
if (!strcmp(argv[0], "cache"))
|
||||||
|
showCacheRef = false;
|
||||||
|
else
|
||||||
|
showCache = false;
|
||||||
|
}
|
||||||
|
arg++;
|
||||||
|
}
|
||||||
|
i++;
|
||||||
}
|
}
|
||||||
if (strlen(argv[addressIndex]) < 2
|
if (argv[i] == NULL || strlen(argv[i]) < 2
|
||||||
|| argv[addressIndex][0] != '0'
|
|| argv[i][0] != '0'
|
||||||
|| argv[addressIndex][1] != 'x') {
|
|| argv[i][1] != 'x') {
|
||||||
kprintf("%s: invalid argument, pass address\n", argv[0]);
|
kprintf("%s: invalid argument, pass address\n", argv[0]);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
addr_t address = strtoul(argv[addressIndex], NULL, 0);
|
addr_t address = strtoul(argv[i], NULL, 0);
|
||||||
if (address == NULL)
|
if (address == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -1890,54 +1947,62 @@ dump_cache(int argc, char **argv)
|
|||||||
cache = cacheRef->cache;
|
cache = cacheRef->cache;
|
||||||
}
|
}
|
||||||
|
|
||||||
kprintf("cache_ref at %p:\n", cacheRef);
|
if (showCacheRef) {
|
||||||
kprintf(" ref_count: %ld\n", cacheRef->ref_count);
|
kprintf("cache_ref at %p:\n", cacheRef);
|
||||||
kprintf(" lock.holder: %ld\n", cacheRef->lock.holder);
|
if (!showCache)
|
||||||
kprintf(" lock.sem: 0x%lx\n", cacheRef->lock.sem);
|
kprintf(" cache: %p\n", cacheRef->cache);
|
||||||
kprintf(" areas:\n");
|
kprintf(" ref_count: %ld\n", cacheRef->ref_count);
|
||||||
|
kprintf(" lock.holder: %ld\n", cacheRef->lock.holder);
|
||||||
|
kprintf(" lock.sem: 0x%lx\n", cacheRef->lock.sem);
|
||||||
|
kprintf(" areas:\n");
|
||||||
|
|
||||||
for (vm_area *area = cacheRef->areas; area != NULL; area = area->cache_next) {
|
for (vm_area *area = cacheRef->areas; area != NULL; area = area->cache_next) {
|
||||||
kprintf(" area 0x%lx, %s\n", area->id, area->name);
|
kprintf(" area 0x%lx, %s\n", area->id, area->name);
|
||||||
kprintf("\tbase_addr: 0x%lx, size: 0x%lx\n", area->base, area->size);
|
kprintf("\tbase_addr: 0x%lx, size: 0x%lx\n", area->base, area->size);
|
||||||
kprintf("\tprotection: 0x%lx\n", area->protection);
|
kprintf("\tprotection: 0x%lx\n", area->protection);
|
||||||
kprintf("\towner: 0x%lx ", area->address_space->id);
|
kprintf("\towner: 0x%lx\n", area->address_space->id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kprintf("cache at %p:\n", cache);
|
if (showCache) {
|
||||||
kprintf(" source: %p\n", cache->source);
|
kprintf("cache at %p:\n", cache);
|
||||||
kprintf(" store: %p\n", cache->store);
|
if (!showCacheRef)
|
||||||
kprintf(" virtual_base: 0x%Lx\n", cache->virtual_base);
|
kprintf(" cache_ref: %p\n", cache->ref);
|
||||||
kprintf(" virtual_size: 0x%Lx\n", cache->virtual_size);
|
kprintf(" source: %p\n", cache->source);
|
||||||
kprintf(" temporary: %ld\n", cache->temporary);
|
kprintf(" store: %p\n", cache->store);
|
||||||
kprintf(" scan_skip: %ld\n", cache->scan_skip);
|
kprintf(" virtual_base: 0x%Lx\n", cache->virtual_base);
|
||||||
|
kprintf(" virtual_size: 0x%Lx\n", cache->virtual_size);
|
||||||
|
kprintf(" temporary: %ld\n", cache->temporary);
|
||||||
|
kprintf(" scan_skip: %ld\n", cache->scan_skip);
|
||||||
|
|
||||||
kprintf(" consumers:\n");
|
kprintf(" consumers:\n");
|
||||||
vm_cache *consumer = NULL;
|
vm_cache *consumer = NULL;
|
||||||
while ((consumer = (vm_cache *)list_get_next_item(&cache->consumers, consumer)) != NULL) {
|
while ((consumer = (vm_cache *)list_get_next_item(&cache->consumers, consumer)) != NULL) {
|
||||||
kprintf("\t%p\n", consumer);
|
kprintf("\t%p\n", consumer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kprintf(" pages:\n");
|
||||||
|
int32 count = 0;
|
||||||
|
for (vm_page *page = cache->page_list; page != NULL; page = page->cache_next) {
|
||||||
|
count++;
|
||||||
|
if (!showPages)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (page->type == PAGE_TYPE_PHYSICAL) {
|
||||||
|
kprintf("\t%p ppn 0x%lx offset 0x%lx type %ld state %ld (%s) ref_count %ld\n",
|
||||||
|
page, page->physical_page_number, page->cache_offset, page->type, page->state,
|
||||||
|
page_state_to_text(page->state), page->ref_count);
|
||||||
|
} else if(page->type == PAGE_TYPE_DUMMY) {
|
||||||
|
kprintf("\t%p DUMMY PAGE state %ld (%s)\n",
|
||||||
|
page, page->state, page_state_to_text(page->state));
|
||||||
|
} else
|
||||||
|
kprintf("\t%p UNKNOWN PAGE type %ld\n", page, page->type);
|
||||||
|
}
|
||||||
|
|
||||||
kprintf(" pages:\n");
|
|
||||||
int32 count = 0;
|
|
||||||
for (vm_page *page = cache->page_list; page != NULL; page = page->cache_next) {
|
|
||||||
count++;
|
|
||||||
if (!showPages)
|
if (!showPages)
|
||||||
continue;
|
kprintf("\t%ld in cache\n", count);
|
||||||
|
|
||||||
if (page->type == PAGE_TYPE_PHYSICAL) {
|
|
||||||
kprintf("\t%p ppn 0x%lx offset 0x%lx type %ld state %ld (%s) ref_count %ld\n",
|
|
||||||
page, page->physical_page_number, page->cache_offset, page->type, page->state,
|
|
||||||
page_state_to_text(page->state), page->ref_count);
|
|
||||||
} else if(page->type == PAGE_TYPE_DUMMY) {
|
|
||||||
kprintf("\t%p DUMMY PAGE state %ld (%s)\n",
|
|
||||||
page, page->state, page_state_to_text(page->state));
|
|
||||||
} else
|
|
||||||
kprintf("\t%p UNKNOWN PAGE type %ld\n", page, page->type);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!showPages)
|
|
||||||
kprintf("\t%ld in cache\n", count);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2624,6 +2689,8 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
|||||||
changeCount = addressSpace->change_count;
|
changeCount = addressSpace->change_count;
|
||||||
release_sem_etc(addressSpace->sem, READ_COUNT, 0);
|
release_sem_etc(addressSpace->sem, READ_COUNT, 0);
|
||||||
|
|
||||||
|
mutex_lock(&topCacheRef->lock);
|
||||||
|
|
||||||
// See if this cache has a fault handler - this will do all the work for us
|
// See if this cache has a fault handler - this will do all the work for us
|
||||||
if (topCacheRef->cache->store->ops->fault != NULL) {
|
if (topCacheRef->cache->store->ops->fault != NULL) {
|
||||||
// Note, since the page fault is resolved with interrupts enabled, the
|
// Note, since the page fault is resolved with interrupts enabled, the
|
||||||
@ -2631,12 +2698,15 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
|||||||
// the store must take this into account
|
// the store must take this into account
|
||||||
status_t status = (*topCacheRef->cache->store->ops->fault)(topCacheRef->cache->store, addressSpace, cacheOffset);
|
status_t status = (*topCacheRef->cache->store->ops->fault)(topCacheRef->cache->store, addressSpace, cacheOffset);
|
||||||
if (status != B_BAD_HANDLER) {
|
if (status != B_BAD_HANDLER) {
|
||||||
|
mutex_unlock(&topCacheRef->lock);
|
||||||
vm_cache_release_ref(topCacheRef);
|
vm_cache_release_ref(topCacheRef);
|
||||||
vm_put_address_space(addressSpace);
|
vm_put_address_space(addressSpace);
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&topCacheRef->lock);
|
||||||
|
|
||||||
// The top most cache has no fault handler, so let's see if the cache or its sources
|
// The top most cache has no fault handler, so let's see if the cache or its sources
|
||||||
// already have the page we're searching for (we're going from top to bottom)
|
// already have the page we're searching for (we're going from top to bottom)
|
||||||
|
|
||||||
@ -2833,10 +2903,10 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
|||||||
|
|
||||||
if (dummyPage.state == PAGE_STATE_BUSY) {
|
if (dummyPage.state == PAGE_STATE_BUSY) {
|
||||||
// we had inserted the dummy cache in another cache, so let's remove it from there
|
// we had inserted the dummy cache in another cache, so let's remove it from there
|
||||||
vm_cache_ref *temp_cache = dummyPage.cache->ref;
|
vm_cache_ref *tempRef = dummyPage.cache->ref;
|
||||||
mutex_lock(&temp_cache->lock);
|
mutex_lock(&tempRef->lock);
|
||||||
vm_cache_remove_page(temp_cache, &dummyPage);
|
vm_cache_remove_page(tempRef, &dummyPage);
|
||||||
mutex_unlock(&temp_cache->lock);
|
mutex_unlock(&tempRef->lock);
|
||||||
dummyPage.state = PAGE_STATE_INACTIVE;
|
dummyPage.state = PAGE_STATE_INACTIVE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2875,10 +2945,10 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
|||||||
if (dummyPage.state == PAGE_STATE_BUSY) {
|
if (dummyPage.state == PAGE_STATE_BUSY) {
|
||||||
// We still have the dummy page in the cache - that happens if we didn't need
|
// We still have the dummy page in the cache - that happens if we didn't need
|
||||||
// to allocate a new page before, but could use one in another cache
|
// to allocate a new page before, but could use one in another cache
|
||||||
vm_cache_ref *temp_cache = dummyPage.cache->ref;
|
vm_cache_ref *tempRef = dummyPage.cache->ref;
|
||||||
mutex_lock(&temp_cache->lock);
|
mutex_lock(&tempRef->lock);
|
||||||
vm_cache_remove_page(temp_cache, &dummyPage);
|
vm_cache_remove_page(tempRef, &dummyPage);
|
||||||
mutex_unlock(&temp_cache->lock);
|
mutex_unlock(&tempRef->lock);
|
||||||
dummyPage.state = PAGE_STATE_INACTIVE;
|
dummyPage.state = PAGE_STATE_INACTIVE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3339,7 +3409,7 @@ set_area_protection(area_id area, uint32 newProtection)
|
|||||||
status_t
|
status_t
|
||||||
resize_area(area_id areaID, size_t newSize)
|
resize_area(area_id areaID, size_t newSize)
|
||||||
{
|
{
|
||||||
vm_cache_ref *cache;
|
vm_cache_ref *cacheRef;
|
||||||
vm_area *area, *current;
|
vm_area *area, *current;
|
||||||
status_t status = B_OK;
|
status_t status = B_OK;
|
||||||
size_t oldSize;
|
size_t oldSize;
|
||||||
@ -3352,25 +3422,25 @@ resize_area(area_id areaID, size_t newSize)
|
|||||||
if (area == NULL)
|
if (area == NULL)
|
||||||
return B_BAD_VALUE;
|
return B_BAD_VALUE;
|
||||||
|
|
||||||
|
cacheRef = area->cache_ref;
|
||||||
|
mutex_lock(&cacheRef->lock);
|
||||||
|
|
||||||
// Resize all areas of this area's cache
|
// Resize all areas of this area's cache
|
||||||
|
|
||||||
cache = area->cache_ref;
|
|
||||||
oldSize = area->size;
|
oldSize = area->size;
|
||||||
|
|
||||||
// ToDo: we should only allow to resize anonymous memory areas!
|
// ToDo: we should only allow to resize anonymous memory areas!
|
||||||
if (!cache->cache->temporary) {
|
if (!cacheRef->cache->temporary) {
|
||||||
status = B_NOT_ALLOWED;
|
status = B_NOT_ALLOWED;
|
||||||
goto err1;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToDo: we must lock all address spaces here!
|
// ToDo: we must lock all address spaces here!
|
||||||
|
|
||||||
mutex_lock(&cache->lock);
|
|
||||||
|
|
||||||
if (oldSize < newSize) {
|
if (oldSize < newSize) {
|
||||||
// We need to check if all areas of this cache can be resized
|
// We need to check if all areas of this cache can be resized
|
||||||
|
|
||||||
for (current = cache->areas; current; current = current->cache_next) {
|
for (current = cacheRef->areas; current; current = current->cache_next) {
|
||||||
if (current->address_space_next && current->address_space_next->base <= (current->base + newSize)) {
|
if (current->address_space_next && current->address_space_next->base <= (current->base + newSize)) {
|
||||||
// if the area was created inside a reserved area, it can also be
|
// if the area was created inside a reserved area, it can also be
|
||||||
// resized in that area
|
// resized in that area
|
||||||
@ -3381,14 +3451,14 @@ resize_area(area_id areaID, size_t newSize)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
status = B_ERROR;
|
status = B_ERROR;
|
||||||
goto err2;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Okay, looks good so far, so let's do it
|
// Okay, looks good so far, so let's do it
|
||||||
|
|
||||||
for (current = cache->areas; current; current = current->cache_next) {
|
for (current = cacheRef->areas; current; current = current->cache_next) {
|
||||||
if (current->address_space_next && current->address_space_next->base <= (current->base + newSize)) {
|
if (current->address_space_next && current->address_space_next->base <= (current->base + newSize)) {
|
||||||
vm_area *next = current->address_space_next;
|
vm_area *next = current->address_space_next;
|
||||||
if (next->id == RESERVED_AREA_ID && next->cache_offset <= current->base
|
if (next->id == RESERVED_AREA_ID && next->cache_offset <= current->base
|
||||||
@ -3421,17 +3491,16 @@ resize_area(area_id areaID, size_t newSize)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (status == B_OK)
|
if (status == B_OK)
|
||||||
status = vm_cache_resize(cache, newSize);
|
status = vm_cache_resize(cacheRef, newSize);
|
||||||
|
|
||||||
if (status < B_OK) {
|
if (status < B_OK) {
|
||||||
// This shouldn't really be possible, but hey, who knows
|
// This shouldn't really be possible, but hey, who knows
|
||||||
for (current = cache->areas; current; current = current->cache_next)
|
for (current = cacheRef->areas; current; current = current->cache_next)
|
||||||
current->size = oldSize;
|
current->size = oldSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
err2:
|
out:
|
||||||
mutex_unlock(&cache->lock);
|
mutex_unlock(&cacheRef->lock);
|
||||||
err1:
|
|
||||||
vm_put_area(area);
|
vm_put_area(area);
|
||||||
|
|
||||||
// ToDo: we must honour the lock restrictions of this area
|
// ToDo: we must honour the lock restrictions of this area
|
||||||
|
@ -185,7 +185,7 @@ vm_cache_release_ref(vm_cache_ref *cacheRef)
|
|||||||
// delete this cache
|
// delete this cache
|
||||||
|
|
||||||
if (cacheRef->areas != NULL)
|
if (cacheRef->areas != NULL)
|
||||||
panic("cache to be deleted still has areas");
|
panic("cache_ref %p to be deleted still has areas", cacheRef);
|
||||||
if (!list_is_empty(&cacheRef->cache->consumers))
|
if (!list_is_empty(&cacheRef->cache->consumers))
|
||||||
panic("cache %p to be deleted still has consumers", cacheRef->cache);
|
panic("cache %p to be deleted still has consumers", cacheRef->cache);
|
||||||
|
|
||||||
@ -330,14 +330,18 @@ vm_cache_write_modified(vm_cache_ref *ref, bool fsReenter)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*!
|
||||||
|
Commits the memory to the store if the \a commitment is larger than
|
||||||
|
what's committed already.
|
||||||
|
Assumes you have the \a ref's lock held.
|
||||||
|
*/
|
||||||
status_t
|
status_t
|
||||||
vm_cache_set_minimal_commitment(vm_cache_ref *ref, off_t commitment)
|
vm_cache_set_minimal_commitment_locked(vm_cache_ref *ref, off_t commitment)
|
||||||
{
|
{
|
||||||
status_t status = B_OK;
|
status_t status = B_OK;
|
||||||
vm_store *store;
|
vm_store *store = ref->cache->store;
|
||||||
|
|
||||||
mutex_lock(&ref->lock);
|
ASSERT_LOCKED_MUTEX(&ref->lock);
|
||||||
store = ref->cache->store;
|
|
||||||
|
|
||||||
// If we don't have enough committed space to cover through to the new end of region...
|
// If we don't have enough committed space to cover through to the new end of region...
|
||||||
if (store->committed_size < commitment) {
|
if (store->committed_size < commitment) {
|
||||||
@ -348,7 +352,6 @@ vm_cache_set_minimal_commitment(vm_cache_ref *ref, off_t commitment)
|
|||||||
status = store->ops->commit(store, commitment);
|
status = store->ops->commit(store, commitment);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&ref->lock);
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -399,6 +402,7 @@ vm_cache_resize(vm_cache_ref *cacheRef, off_t newSize)
|
|||||||
/*!
|
/*!
|
||||||
Removes the \a consumer from the \a cacheRef's cache.
|
Removes the \a consumer from the \a cacheRef's cache.
|
||||||
It will also release the reference to the cacheRef owned by the consumer.
|
It will also release the reference to the cacheRef owned by the consumer.
|
||||||
|
Assumes you have the consumer's cache_ref lock held.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_cache_remove_consumer(vm_cache_ref *cacheRef, vm_cache *consumer)
|
vm_cache_remove_consumer(vm_cache_ref *cacheRef, vm_cache *consumer)
|
||||||
@ -407,9 +411,9 @@ vm_cache_remove_consumer(vm_cache_ref *cacheRef, vm_cache *consumer)
|
|||||||
vm_cache *newSource = NULL;
|
vm_cache *newSource = NULL;
|
||||||
|
|
||||||
TRACE(("remove consumer vm cache %p from cache %p\n", consumer, cache));
|
TRACE(("remove consumer vm cache %p from cache %p\n", consumer, cache));
|
||||||
|
ASSERT_LOCKED_MUTEX(&consumer->ref->lock);
|
||||||
|
|
||||||
mutex_lock(&cacheRef->lock);
|
mutex_lock(&cacheRef->lock);
|
||||||
|
|
||||||
list_remove_item(&cache->consumers, consumer);
|
list_remove_item(&cache->consumers, consumer);
|
||||||
consumer->source = NULL;
|
consumer->source = NULL;
|
||||||
|
|
||||||
@ -453,19 +457,21 @@ vm_cache_remove_consumer(vm_cache_ref *cacheRef, vm_cache *consumer)
|
|||||||
|
|
||||||
if (newSource != NULL) {
|
if (newSource != NULL) {
|
||||||
// The remaining consumer has gotten a new source
|
// The remaining consumer has gotten a new source
|
||||||
|
mutex_lock(&newSource->ref->lock);
|
||||||
|
|
||||||
list_remove_item(&newSource->consumers, cache);
|
list_remove_item(&newSource->consumers, cache);
|
||||||
list_add_item(&newSource->consumers, consumer);
|
list_add_item(&newSource->consumers, consumer);
|
||||||
consumer->source = newSource;
|
consumer->source = newSource;
|
||||||
cache->source = NULL;
|
cache->source = NULL;
|
||||||
|
|
||||||
mutex_unlock(&cacheRef->lock);
|
mutex_unlock(&newSource->ref->lock);
|
||||||
|
|
||||||
// Release the other reference to the cache - we take over
|
// Release the other reference to the cache - we take over
|
||||||
// its reference of its source cache
|
// its reference of its source cache
|
||||||
vm_cache_release_ref(cacheRef);
|
vm_cache_release_ref(cacheRef);
|
||||||
} else
|
}
|
||||||
mutex_unlock(&cacheRef->lock);
|
|
||||||
|
|
||||||
|
mutex_unlock(&cacheRef->lock);
|
||||||
vm_cache_release_ref(cacheRef);
|
vm_cache_release_ref(cacheRef);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -474,26 +480,30 @@ vm_cache_remove_consumer(vm_cache_ref *cacheRef, vm_cache *consumer)
|
|||||||
Marks the \a cacheRef's cache as source of the \a consumer cache,
|
Marks the \a cacheRef's cache as source of the \a consumer cache,
|
||||||
and adds the \a consumer to its list.
|
and adds the \a consumer to its list.
|
||||||
This also grabs a reference to the source cache.
|
This also grabs a reference to the source cache.
|
||||||
|
Assumes you have the cache_ref and the consumer's lock held.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
vm_cache_add_consumer(vm_cache_ref *cacheRef, vm_cache *consumer)
|
vm_cache_add_consumer_locked(vm_cache_ref *cacheRef, vm_cache *consumer)
|
||||||
{
|
{
|
||||||
TRACE(("add consumer vm cache %p to cache %p\n", consumer, cacheRef->cache));
|
TRACE(("add consumer vm cache %p to cache %p\n", consumer, cacheRef->cache));
|
||||||
mutex_lock(&cacheRef->lock);
|
ASSERT_LOCKED_MUTEX(&cacheRef->lock);
|
||||||
|
ASSERT_LOCKED_MUTEX(&consumer->ref->lock);
|
||||||
|
|
||||||
consumer->source = cacheRef->cache;
|
consumer->source = cacheRef->cache;
|
||||||
list_add_item(&cacheRef->cache->consumers, consumer);
|
list_add_item(&cacheRef->cache->consumers, consumer);
|
||||||
|
|
||||||
vm_cache_acquire_ref(cacheRef);
|
vm_cache_acquire_ref(cacheRef);
|
||||||
|
|
||||||
mutex_unlock(&cacheRef->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*!
|
||||||
|
Adds the \a area to the \a cacheRef.
|
||||||
|
Assumes you have the locked the cache_ref.
|
||||||
|
*/
|
||||||
status_t
|
status_t
|
||||||
vm_cache_insert_area(vm_cache_ref *cacheRef, vm_area *area)
|
vm_cache_insert_area_locked(vm_cache_ref *cacheRef, vm_area *area)
|
||||||
{
|
{
|
||||||
mutex_lock(&cacheRef->lock);
|
ASSERT_LOCKED_MUTEX(&cacheRef->lock);
|
||||||
|
|
||||||
area->cache_next = cacheRef->areas;
|
area->cache_next = cacheRef->areas;
|
||||||
if (area->cache_next)
|
if (area->cache_next)
|
||||||
@ -501,7 +511,6 @@ vm_cache_insert_area(vm_cache_ref *cacheRef, vm_area *area)
|
|||||||
area->cache_prev = NULL;
|
area->cache_prev = NULL;
|
||||||
cacheRef->areas = area;
|
cacheRef->areas = area;
|
||||||
|
|
||||||
mutex_unlock(&cacheRef->lock);
|
|
||||||
return B_OK;
|
return B_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user