Removed the condition that only caches with a source can be merged.

Instead we only allow temporary caches to be merged. This remedies the
problem that after fork() + join() there remains a superfluous cache
layer for all RAM areas.
I haven't tested it, but this might improve the jam situation
memory-wise (huge heap is committed one less time), though it might
worsen it performance-wise (lots of heap pages are moved with every
merge).


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@25717 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2008-05-30 01:02:29 +00:00
parent ad66253c1a
commit e4d2f9ba4f

View File

@ -383,7 +383,7 @@ delete_cache(vm_cache* cache)
static bool
is_cache_mergeable(vm_cache* cache)
{
return (cache->areas == NULL && cache->source != NULL
return (cache->areas == NULL && cache->temporary
&& !list_is_empty(&cache->consumers)
&& cache->consumers.link.next == cache->consumers.link.prev);
}
@ -488,20 +488,23 @@ merge_cache_with_only_consumer(vm_cache* cache)
}
}
vm_cache* newSource = cache->source;
// The remaining consumer has got a new source.
if (cache->source != NULL) {
vm_cache* newSource = cache->source;
// The remaining consumer has got a new source
mutex_lock(&newSource->lock);
mutex_lock(&newSource->lock);
list_remove_item(&newSource->consumers, cache);
list_add_item(&newSource->consumers, consumer);
consumer->source = newSource;
cache->source = NULL;
list_remove_item(&newSource->consumers, cache);
list_add_item(&newSource->consumers, consumer);
consumer->source = newSource;
cache->source = NULL;
mutex_unlock(&newSource->lock);
mutex_unlock(&newSource->lock);
} else
consumer->source = NULL;
// Release the reference the cache's consumer owned. The consumer takes
// over the cache's ref to its source instead.
// over the cache's ref to its source (if any) instead.
if (cache->ref_count < 2)
panic("cacheRef %p ref count too low!\n", cache);
vm_cache_release_ref(cache);