Replaced the vm_cache mutex by a cutex. This should save quite a few

semaphores.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@25277 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2008-05-01 01:59:09 +00:00
parent 8562499f44
commit 184de764fe
6 changed files with 112 additions and 128 deletions

View File

@ -135,7 +135,7 @@ struct vm_dummy_page : vm_page {
};
struct vm_cache {
mutex lock;
cutex lock;
struct vm_area *areas;
vint32 ref_count;
struct list_link consumer_link;

View File

@ -119,7 +119,7 @@ reserve_pages(file_cache_ref *ref, size_t reservePages, bool isWrite)
{
if (vm_low_memory_state() != B_NO_LOW_MEMORY) {
vm_cache *cache = ref->cache;
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
if (list_is_empty(&cache->consumers) && cache->areas == NULL
&& access_is_sequential(ref)) {
@ -153,7 +153,7 @@ reserve_pages(file_cache_ref *ref, size_t reservePages, bool isWrite)
}
}
}
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
}
vm_page_reserve_pages(reservePages);
@ -208,7 +208,7 @@ read_into_cache(file_cache_ref *ref, void *cookie, off_t offset,
}
push_access(ref, offset, bufferSize, false);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
vm_page_unreserve_pages(lastReservedPages);
// read file into reserved pages
@ -229,7 +229,7 @@ read_into_cache(file_cache_ref *ref, void *cookie, off_t offset,
}
}
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
for (int32 i = 0; i < pageIndex; i++) {
busyConditions[i].Unpublish();
@ -263,7 +263,7 @@ read_into_cache(file_cache_ref *ref, void *cookie, off_t offset,
}
reserve_pages(ref, reservePages, false);
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
// make the pages accessible in the cache
for (int32 i = pageIndex; i-- > 0;) {
@ -292,7 +292,7 @@ read_from_file(file_cache_ref *ref, void *cookie, off_t offset,
vec.iov_len = bufferSize;
push_access(ref, offset, bufferSize, false);
mutex_unlock(&ref->cache->lock);
cutex_unlock(&ref->cache->lock);
vm_page_unreserve_pages(lastReservedPages);
status_t status = vfs_read_pages(ref->vnode, cookie, offset + pageOffset,
@ -300,7 +300,7 @@ read_from_file(file_cache_ref *ref, void *cookie, off_t offset,
if (status == B_OK)
reserve_pages(ref, reservePages, false);
mutex_lock(&ref->cache->lock);
cutex_lock(&ref->cache->lock);
return status;
}
@ -351,7 +351,7 @@ write_to_cache(file_cache_ref *ref, void *cookie, off_t offset,
}
push_access(ref, offset, bufferSize, true);
mutex_unlock(&ref->cache->lock);
cutex_unlock(&ref->cache->lock);
vm_page_unreserve_pages(lastReservedPages);
// copy contents (and read in partially written pages first)
@ -433,7 +433,7 @@ write_to_cache(file_cache_ref *ref, void *cookie, off_t offset,
if (status == B_OK)
reserve_pages(ref, reservePages, true);
mutex_lock(&ref->cache->lock);
cutex_lock(&ref->cache->lock);
// unmap the pages again
@ -482,7 +482,7 @@ write_to_file(file_cache_ref *ref, void *cookie, off_t offset, int32 pageOffset,
vec.iov_len = bufferSize;
push_access(ref, offset, bufferSize, true);
mutex_unlock(&ref->cache->lock);
cutex_unlock(&ref->cache->lock);
vm_page_unreserve_pages(lastReservedPages);
status_t status = B_OK;
@ -508,7 +508,7 @@ write_to_file(file_cache_ref *ref, void *cookie, off_t offset, int32 pageOffset,
if (status == B_OK)
reserve_pages(ref, reservePages, true);
mutex_lock(&ref->cache->lock);
cutex_lock(&ref->cache->lock);
return status;
}
@ -604,7 +604,7 @@ cache_io(void *_cacheRef, void *cookie, off_t offset, addr_t buffer,
size_t reservePages = 0;
reserve_pages(ref, lastReservedPages, doWrite);
MutexLocker locker(cache->lock);
CutexLocker locker(cache->lock);
while (bytesLeft > 0) {
// check if this page is already in memory
@ -780,7 +780,7 @@ cache_prefetch_vnode(struct vnode *vnode, off_t offset, size_t size)
off_t lastOffset = offset;
size_t lastSize = 0;
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
for (; bytesLeft > 0; offset += B_PAGE_SIZE) {
// check if this page is already in memory
@ -792,9 +792,9 @@ cache_prefetch_vnode(struct vnode *vnode, off_t offset, size_t size)
// if busy retry again later
ConditionVariableEntry entry;
entry.Add(page);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
entry.Wait();
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
goto restart;
}
@ -825,7 +825,7 @@ cache_prefetch_vnode(struct vnode *vnode, off_t offset, size_t size)
read_into_cache(ref, lastOffset, lastLeft, NULL, 0);
out:
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
vm_cache_release_ref(cache);
#endif
}
@ -985,7 +985,7 @@ file_cache_set_size(void *_cacheRef, off_t newSize)
if (ref == NULL)
return B_OK;
MutexLocker _(ref->cache->lock);
CutexLocker _(ref->cache->lock);
off_t offset = ref->cache->virtual_size;
off_t size = newSize;

View File

@ -1236,7 +1236,7 @@ map_backing_store(vm_address_space *addressSpace, vm_cache *cache,
TRACE(("map_backing_store: aspace %p, cache %p, *vaddr %p, offset 0x%Lx, size %lu, addressSpec %ld, wiring %d, protection %d, _area %p, area_name '%s'\n",
addressSpace, cache, *_virtualAddress, offset, size, addressSpec,
wiring, protection, _area, areaName));
ASSERT_LOCKED_MUTEX(&cache->lock);
ASSERT_LOCKED_CUTEX(&cache->lock);
vm_area *area = create_area_struct(addressSpace, areaName, wiring,
protection);
@ -1267,7 +1267,7 @@ map_backing_store(vm_address_space *addressSpace, vm_cache *cache,
goto err1;
}
mutex_lock(&newCache->lock);
cutex_lock(&newCache->lock);
newCache->type = CACHE_TYPE_RAM;
newCache->temporary = 1;
newCache->scan_skip = cache->scan_skip;
@ -1310,7 +1310,7 @@ map_backing_store(vm_address_space *addressSpace, vm_cache *cache,
// point the cache back to the area
vm_cache_insert_area_locked(cache, area);
if (mapping == REGION_PRIVATE_MAP)
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
// insert the area in the global area hash table
acquire_sem_etc(sAreaHashLock, WRITE_COUNT, 0 ,0);
@ -1328,10 +1328,10 @@ err2:
// We created this cache, so we must delete it again. Note, that we
// need to temporarily unlock the source cache or we'll otherwise
// deadlock, since vm_cache_remove_consumer will try to lock it too.
mutex_unlock(&cache->lock);
mutex_unlock(&sourceCache->lock);
cutex_unlock(&cache->lock);
cutex_unlock(&sourceCache->lock);
vm_cache_release_ref(cache);
mutex_lock(&sourceCache->lock);
cutex_lock(&sourceCache->lock);
}
err1:
free(area->name);
@ -1525,13 +1525,13 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
break;
}
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
status = map_backing_store(addressSpace, cache, address, 0, size,
addressSpec, wiring, protection, REGION_NO_PRIVATE_MAP, &area, name,
unmapAddressRange);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
if (status < B_OK) {
vm_cache_release_ref(cache);
@ -1554,7 +1554,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
vm_page_reserve_pages(reservePages);
// Allocate and map all pages for this area
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
off_t offset = 0;
for (addr_t address = area->base; address < area->base + (area->size - 1);
@ -1579,7 +1579,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
vm_map_page(area, page, address, protection);
}
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
vm_page_unreserve_pages(reservePages);
break;
}
@ -1595,7 +1595,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
if (!kernel_startup)
panic("ALREADY_WIRED flag used outside kernel startup\n");
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
map->ops->lock(map);
for (addr_t virtualAddress = area->base; virtualAddress < area->base
@ -1622,7 +1622,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
}
map->ops->unlock(map);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
break;
}
@ -1638,7 +1638,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
off_t offset = 0;
vm_page_reserve_pages(reservePages);
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
map->ops->lock(map);
for (virtualAddress = area->base; virtualAddress < area->base
@ -1660,7 +1660,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
}
map->ops->unlock(map);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
vm_page_unreserve_pages(reservePages);
break;
}
@ -1739,13 +1739,13 @@ vm_map_physical_memory(team_id team, const char *name, void **_address,
cache->type = CACHE_TYPE_DEVICE;
cache->virtual_size = size;
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
status_t status = map_backing_store(locker.AddressSpace(), cache, _address,
0, size, addressSpec & ~B_MTR_MASK, B_FULL_LOCK, protection,
REGION_NO_PRIVATE_MAP, &area, name, false);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
if (status < B_OK)
vm_cache_release_ref(cache);
@ -1821,13 +1821,13 @@ vm_create_null_area(team_id team, const char *name, void **address,
cache->type = CACHE_TYPE_NULL;
cache->virtual_size = size;
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
status = map_backing_store(locker.AddressSpace(), cache, address, 0, size,
addressSpec, 0, B_KERNEL_READ_AREA, REGION_NO_PRIVATE_MAP, &area, name,
false);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
if (status < B_OK) {
vm_cache_release_ref(cache);
@ -1929,14 +1929,14 @@ _vm_map_file(team_id team, const char *name, void **_address, uint32 addressSpec
if (status < B_OK)
return status;
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
vm_area *area;
status = map_backing_store(locker.AddressSpace(), cache, _address,
offset, size, addressSpec, 0, protection, mapping, &area, name,
addressSpec == B_EXACT_ADDRESS);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
if (status < B_OK || mapping == REGION_PRIVATE_MAP) {
// map_backing_store() cannot know we no longer need the ref
@ -1971,14 +1971,14 @@ vm_area_get_locked_cache(vm_area *area)
vm_cache_acquire_ref(cache);
locker.Unlock();
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
locker.Lock();
if (cache == area->cache)
return cache;
// the cache changed in the meantime
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
vm_cache_release_ref(cache);
}
}
@ -1987,7 +1987,7 @@ vm_area_get_locked_cache(vm_area *area)
void
vm_area_put_locked_cache(vm_cache *cache)
{
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
vm_cache_release_ref(cache);
}
@ -2207,7 +2207,7 @@ vm_copy_on_write_area(vm_cache* lowerCache)
return B_NO_MEMORY;
}
mutex_lock(&upperCache->lock);
cutex_lock(&upperCache->lock);
upperCache->type = CACHE_TYPE_RAM;
upperCache->temporary = 1;
@ -3040,8 +3040,10 @@ dump_cache(int argc, char **argv)
kprintf(" virtual_size: 0x%Lx\n", cache->virtual_size);
kprintf(" temporary: %ld\n", cache->temporary);
kprintf(" scan_skip: %ld\n", cache->scan_skip);
kprintf(" lock: %p\n", &cache->lock);
#ifdef KDEBUG
kprintf(" lock.holder: %ld\n", cache->lock.holder);
kprintf(" lock.sem: 0x%lx\n", cache->lock.sem);
#endif
kprintf(" areas:\n");
for (vm_area *area = cache->areas; area != NULL; area = area->cache_next) {
@ -3689,15 +3691,6 @@ vm_init_post_sem(kernel_args *args)
arch_vm_translation_map_init_post_sem(args);
vm_address_space_init_post_sem();
for (area = vm_kernel_address_space()->areas; area;
area = area->address_space_next) {
if (area->id == RESERVED_AREA_ID)
continue;
if (area->cache->lock.sem < 0)
mutex_init(&area->cache->lock, "vm_cache");
}
sAreaHashLock = create_sem(WRITE_COUNT, "area hash");
mutex_init(&sAreaCacheLock, "area->cache");
mutex_init(&sMappingLock, "page mappings");
@ -3859,10 +3852,10 @@ retry:
vm_cache_acquire_ref(source);
mutex_lock(&source->lock);
cutex_lock(&source->lock);
if (source->busy) {
mutex_unlock(&source->lock);
cutex_unlock(&source->lock);
vm_cache_release_ref(source);
goto retry;
}
@ -3896,7 +3889,7 @@ fault_remove_dummy_page(vm_dummy_page &dummyPage, bool isLocked)
{
vm_cache *cache = dummyPage.cache;
if (!isLocked)
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
if (dummyPage.state == PAGE_STATE_BUSY) {
vm_cache_remove_page(cache, &dummyPage);
@ -3905,7 +3898,7 @@ fault_remove_dummy_page(vm_dummy_page &dummyPage, bool isLocked)
}
if (!isLocked)
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
vm_cache_release_ref(cache);
}
@ -3930,7 +3923,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
vm_page *page = NULL;
vm_cache_acquire_ref(cache);
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
// we release this later in the loop
while (cache != NULL) {
@ -3954,9 +3947,9 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
{
ConditionVariableEntry entry;
entry.Add(page);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
entry.Wait();
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
}
if (cache->busy) {
@ -3965,7 +3958,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
// the top cache.
ConditionVariableEntry entry;
entry.Add(cache);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
vm_cache_release_ref(cache);
entry.Wait();
*_restart = true;
@ -3989,7 +3982,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
ConditionVariable busyCondition;
busyCondition.Publish(page, "page");
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
// get a virtual address for the page
iovec vec;
@ -4004,7 +3997,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
map->ops->put_physical_page((addr_t)vec.iov_base);
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
if (status < B_OK) {
// on error remove and free the page
@ -4015,7 +4008,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
vm_cache_remove_page(cache, page);
vm_page_set_state(page, PAGE_STATE_FREE);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
vm_cache_release_ref(cache);
return status;
}
@ -4038,16 +4031,16 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
// the source cache is currently in the process of being merged
// with his only consumer (cacheRef); since its pages are moved
// upwards, too, we try this cache again
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
thread_yield(true);
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
if (cache->busy) {
// The cache became busy, which means, it is about to be
// removed by vm_cache_remove_consumer(). We start again with
// the top cache.
ConditionVariableEntry entry;
entry.Add(cache);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
vm_cache_release_ref(cache);
entry.Wait();
*_restart = true;
@ -4058,7 +4051,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
} else if (status < B_OK)
nextCache = NULL;
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
// at this point, we still hold a ref to this cache (through lastCacheRef)
cache = nextCache;
@ -4073,7 +4066,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
// Read-only pages come in the deepest cache - only the
// top most cache may have direct write access.
vm_cache_acquire_ref(cache);
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
if (cache->busy) {
// The cache became busy, which means, it is about to be
@ -4081,7 +4074,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
// the top cache.
ConditionVariableEntry entry;
entry.Add(cache);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
vm_cache_release_ref(cache);
entry.Wait();
*_restart = true;
@ -4092,7 +4085,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
// for, but it could as well be a dummy page from someone
// else or an otherwise busy page. We can't really handle
// that here. Hence we completely restart this functions.
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
vm_cache_release_ref(cache);
*_restart = true;
}
@ -4135,7 +4128,7 @@ fault_get_page(vm_translation_map *map, vm_cache *topCache, off_t cacheOffset,
break;
// Remove the dummy page, if it has been inserted.
mutex_lock(&topCache->lock);
cutex_lock(&topCache->lock);
if (dummyPage.state == PAGE_STATE_BUSY) {
ASSERT_PRINT(dummyPage.cache == topCache, "dummy page: %p\n",
@ -4143,7 +4136,7 @@ fault_get_page(vm_translation_map *map, vm_cache *topCache, off_t cacheOffset,
fault_remove_dummy_page(dummyPage, true);
}
mutex_unlock(&topCache->lock);
cutex_unlock(&topCache->lock);
}
if (page == NULL) {
@ -4182,9 +4175,9 @@ fault_get_page(vm_translation_map *map, vm_cache *topCache, off_t cacheOffset,
// This is not the top cache into which we inserted the dummy page,
// let's remove it from there. We need to temporarily unlock our
// cache to comply with the cache locking policy.
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
fault_remove_dummy_page(dummyPage, false);
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
}
}
@ -4232,8 +4225,8 @@ if (cacheOffset == 0x12000)
if (sourcePage->state != PAGE_STATE_MODIFIED)
vm_page_set_state(sourcePage, PAGE_STATE_ACTIVE);
mutex_unlock(&cache->lock);
mutex_lock(&topCache->lock);
cutex_unlock(&cache->lock);
cutex_lock(&topCache->lock);
// Since the top cache has been unlocked for a while, someone else
// (vm_cache_remove_consumer()) might have replaced our dummy page.
@ -4251,9 +4244,9 @@ if (cacheOffset == 0x12000)
// The page is busy, wait till it becomes unbusy.
ConditionVariableEntry entry;
entry.Add(newPage);
mutex_unlock(&topCache->lock);
cutex_unlock(&topCache->lock);
entry.Wait();
mutex_lock(&topCache->lock);
cutex_lock(&topCache->lock);
}
if (newPage) {
@ -4365,7 +4358,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
}
}
mutex_unlock(&topCache->lock);
cutex_unlock(&topCache->lock);
// The top most cache has no fault handler, so let's see if the cache or its sources
// already have the page we're searching for (we're going from top to bottom)
@ -4415,7 +4408,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
vm_map_page(area, page, address, newProtection);
mutex_unlock(&pageSource->lock);
cutex_unlock(&pageSource->lock);
vm_cache_release_ref(pageSource);
}

View File

@ -159,7 +159,7 @@ delete_cache(vm_cache* cache)
if (cache->source)
vm_cache_remove_consumer(cache->source, cache);
mutex_destroy(&cache->lock);
cutex_destroy(&cache->lock);
free(cache);
}
@ -194,14 +194,7 @@ vm_cache_create(vm_store* store)
if (cache == NULL)
return NULL;
status_t status = mutex_init(&cache->lock, "vm_cache");
if (status < B_OK && (!kernel_startup || status != B_NO_MORE_SEMS)) {
// During early boot, we cannot create semaphores - they are
// created later in vm_init_post_sem()
free(cache);
return NULL;
}
cutex_init(&cache->lock, "vm_cache");
list_init_etc(&cache->consumers, offsetof(vm_cache, consumer_link));
cache->page_list = NULL;
cache->areas = NULL;
@ -272,7 +265,7 @@ vm_cache_release_ref(vm_cache* cache)
vm_cache* c;
bool locked = false;
if (cacheRef->lock.holder != find_thread(NULL)) {
mutex_lock(&cacheRef->lock);
cutex_lock(&cacheRef->lock);
locked = true;
}
for (a = cacheRef->areas; a != NULL; a = a->cache_next)
@ -285,7 +278,7 @@ vm_cache_release_ref(vm_cache* cache)
if (cacheRef->ref_count < min)
panic("cache_ref %p has too little ref_count!!!!", cacheRef);
if (locked)
mutex_unlock(&cacheRef->lock);
cutex_unlock(&cacheRef->lock);
}
#endif
return;
@ -317,7 +310,7 @@ vm_cache_acquire_page_cache_ref(vm_page* page)
vm_page*
vm_cache_lookup_page(vm_cache* cache, off_t offset)
{
ASSERT_LOCKED_MUTEX(&cache->lock);
ASSERT_LOCKED_CUTEX(&cache->lock);
struct page_lookup_key key;
key.offset = (uint32)(offset >> PAGE_SHIFT);
@ -343,7 +336,7 @@ vm_cache_insert_page(vm_cache* cache, vm_page* page, off_t offset)
{
TRACE(("vm_cache_insert_page: cache %p, page %p, offset %Ld\n",
cache, page, offset));
ASSERT_LOCKED_MUTEX(&cache->lock);
ASSERT_LOCKED_CUTEX(&cache->lock);
if (page->cache != NULL) {
panic("insert page %p into cache %p: page cache is set to %p\n",
@ -390,7 +383,7 @@ void
vm_cache_remove_page(vm_cache* cache, vm_page* page)
{
TRACE(("vm_cache_remove_page: cache %p, page %p\n", cache, page));
ASSERT_LOCKED_MUTEX(&cache->lock);
ASSERT_LOCKED_CUTEX(&cache->lock);
if (page->cache != cache) {
panic("remove page %p from cache %p: page cache is set to %p\n", page,
@ -428,9 +421,9 @@ vm_cache_write_modified(vm_cache* cache, bool fsReenter)
if (cache->temporary)
return B_OK;
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
status_t status = vm_page_write_modified_pages(cache, fsReenter);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
return status;
}
@ -445,7 +438,7 @@ vm_cache_set_minimal_commitment_locked(vm_cache* cache, off_t commitment)
{
TRACE(("vm_cache_set_minimal_commitment_locked(cache %p, commitment %Ld)\n",
cache, commitment));
ASSERT_LOCKED_MUTEX(&cache->lock);
ASSERT_LOCKED_CUTEX(&cache->lock);
vm_store* store = cache->store;
status_t status = B_OK;
@ -478,7 +471,7 @@ vm_cache_resize(vm_cache* cache, off_t newSize)
{
TRACE(("vm_cache_resize(cache %p, newSize %Ld) old size %Ld\n",
cache, newSize, cache->virtual_size));
ASSERT_LOCKED_MUTEX(&cache->lock);
ASSERT_LOCKED_CUTEX(&cache->lock);
status_t status = cache->store->ops->commit(cache->store, newSize);
if (status != B_OK)
@ -509,9 +502,9 @@ vm_cache_resize(vm_cache* cache, off_t newSize)
// wait for page to become unbusy
ConditionVariableEntry entry;
entry.Add(page);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
entry.Wait();
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
// restart from the start of the list
page = cache->page_list;
@ -541,7 +534,7 @@ void
vm_cache_remove_consumer(vm_cache* cache, vm_cache* consumer)
{
TRACE(("remove consumer vm cache %p from cache %p\n", consumer, cache));
ASSERT_LOCKED_MUTEX(&consumer->lock);
ASSERT_LOCKED_CUTEX(&consumer->lock);
// Remove the store ref before locking the cache. Otherwise we'd call into
// the VFS while holding the cache lock, which would reverse the usual
@ -550,7 +543,7 @@ vm_cache_remove_consumer(vm_cache* cache, vm_cache* consumer)
cache->store->ops->release_ref(cache->store);
// remove the consumer from the cache, but keep its reference until later
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
list_remove_item(&cache->consumers, consumer);
consumer->source = NULL;
@ -576,10 +569,10 @@ vm_cache_remove_consumer(vm_cache* cache, vm_cache* consumer)
// need to unlock our cache now
busyCondition.Publish(cache, "cache");
cache->busy = true;
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
mutex_lock(&consumer->lock);
mutex_lock(&cache->lock);
cutex_lock(&consumer->lock);
cutex_lock(&cache->lock);
if (cache->areas != NULL || cache->source == NULL
|| list_is_empty(&cache->consumers)
@ -590,7 +583,7 @@ vm_cache_remove_consumer(vm_cache* cache, vm_cache* consumer)
merge = false;
cache->busy = false;
busyCondition.Unpublish();
mutex_unlock(&consumer->lock);
cutex_unlock(&consumer->lock);
vm_cache_release_ref(consumer);
}
}
@ -644,14 +637,14 @@ vm_cache_remove_consumer(vm_cache* cache, vm_cache* consumer)
vm_cache* newSource = cache->source;
// The remaining consumer has gotten a new source
mutex_lock(&newSource->lock);
cutex_lock(&newSource->lock);
list_remove_item(&newSource->consumers, cache);
list_add_item(&newSource->consumers, consumer);
consumer->source = newSource;
cache->source = NULL;
mutex_unlock(&newSource->lock);
cutex_unlock(&newSource->lock);
// Release the other reference to the cache - we take over
// its reference of its source cache; we can do this here
@ -661,7 +654,7 @@ if (cache->ref_count < 2)
panic("cacheRef %p ref count too low!\n", cache);
vm_cache_release_ref(cache);
mutex_unlock(&consumer->lock);
cutex_unlock(&consumer->lock);
vm_cache_release_ref(consumer);
}
@ -669,7 +662,7 @@ panic("cacheRef %p ref count too low!\n", cache);
busyCondition.Unpublish();
}
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
vm_cache_release_ref(cache);
}
@ -683,8 +676,8 @@ void
vm_cache_add_consumer_locked(vm_cache* cache, vm_cache* consumer)
{
TRACE(("add consumer vm cache %p to cache %p\n", consumer, cache));
ASSERT_LOCKED_MUTEX(&cache->lock);
ASSERT_LOCKED_MUTEX(&consumer->lock);
ASSERT_LOCKED_CUTEX(&cache->lock);
ASSERT_LOCKED_CUTEX(&consumer->lock);
consumer->source = cache;
list_add_item(&cache->consumers, consumer);
@ -703,7 +696,7 @@ status_t
vm_cache_insert_area_locked(vm_cache* cache, vm_area* area)
{
TRACE(("vm_cache_insert_area_locked(cache %p, area %p)\n", cache, area));
ASSERT_LOCKED_MUTEX(&cache->lock);
ASSERT_LOCKED_CUTEX(&cache->lock);
area->cache_next = cache->areas;
if (area->cache_next)
@ -723,7 +716,7 @@ vm_cache_remove_area(vm_cache* cache, vm_area* area)
{
TRACE(("vm_cache_remove_area(cache %p, area %p)\n", cache, area));
MutexLocker locker(cache->lock);
CutexLocker locker(cache->lock);
if (area->cache_prev)
area->cache_prev->cache_next = area->cache_next;

View File

@ -66,15 +66,15 @@ PageCacheLocker::Lock(vm_page* page, bool dontWait)
return false;
if (dontWait) {
if (mutex_trylock(&cache->lock) != B_OK) {
if (cutex_trylock(&cache->lock) != B_OK) {
vm_cache_release_ref(cache);
return false;
}
} else
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
if (cache != page->cache || _IgnorePage(page)) {
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
vm_cache_release_ref(cache);
return false;
}
@ -91,7 +91,7 @@ PageCacheLocker::Unlock()
return;
vm_cache* cache = fPage->cache;
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
vm_cache_release_ref(cache);
fPage = NULL;

View File

@ -1043,7 +1043,7 @@ page_writer(void* /*unused*/)
for (uint32 i = 0; i < numPages; i++) {
vm_cache *cache = u.pages[i]->cache;
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
if (writeStatus[i] == B_OK) {
// put it into the active queue
@ -1069,7 +1069,7 @@ page_writer(void* /*unused*/)
busyConditions[i].Unpublish();
u.caches[i] = cache;
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
}
for (uint32 i = 0; i < numPages; i++) {
@ -1156,12 +1156,10 @@ steal_page(vm_page *page, bool stealActive)
{
fCache = vm_cache_acquire_page_cache_ref(page);
if (fCache != NULL) {
if (fCache->lock.holder != thread_get_current_thread_id()) {
if (mutex_trylock(&fCache->lock) != B_OK)
return;
if (cutex_trylock(&fCache->lock) != B_OK)
return;
fOwnsLock = true;
}
fOwnsLock = true;
if (fCache == page->cache)
fIsLocked = true;
@ -1171,7 +1169,7 @@ steal_page(vm_page *page, bool stealActive)
~PageCacheTryLocker()
{
if (fOwnsLock)
mutex_unlock(&fCache->lock);
cutex_unlock(&fCache->lock);
if (fCache != NULL)
vm_cache_release_ref(fCache);
}
@ -1347,9 +1345,9 @@ vm_page_write_modified_pages(vm_cache *cache, bool fsReenter)
// clear the modified flag
vm_clear_map_flags(page, PAGE_MODIFIED);
mutex_unlock(&cache->lock);
cutex_unlock(&cache->lock);
status_t status = write_page(page, fsReenter);
mutex_lock(&cache->lock);
cutex_lock(&cache->lock);
InterruptsSpinLocker locker(&sPageLock);