2002-07-09 16:24:59 +04:00
|
|
|
/*
|
2007-01-12 18:07:18 +03:00
|
|
|
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
|
2004-11-23 06:34:04 +03:00
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*
|
|
|
|
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
|
|
|
* Distributed under the terms of the NewOS License.
|
|
|
|
*/
|
2002-10-30 02:07:06 +03:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
#include <vm_cache.h>
|
2007-08-10 00:08:25 +04:00
|
|
|
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
|
|
|
#include <arch/cpu.h>
|
|
|
|
#include <condition_variable.h>
|
2002-07-09 16:24:59 +04:00
|
|
|
#include <debug.h>
|
2007-08-10 00:08:25 +04:00
|
|
|
#include <int.h>
|
|
|
|
#include <kernel.h>
|
2002-07-09 16:24:59 +04:00
|
|
|
#include <lock.h>
|
|
|
|
#include <smp.h>
|
2007-09-27 16:21:33 +04:00
|
|
|
#include <util/khash.h>
|
|
|
|
#include <util/AutoLock.h>
|
2007-08-10 00:08:25 +04:00
|
|
|
#include <vm.h>
|
|
|
|
#include <vm_page.h>
|
|
|
|
#include <vm_priv.h>
|
2007-09-27 16:21:33 +04:00
|
|
|
#include <vm_types.h>
|
2005-03-19 04:58:05 +03:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
//#define TRACE_VM_CACHE
|
|
|
|
#ifdef TRACE_VM_CACHE
|
|
|
|
# define TRACE(x) dprintf x
|
|
|
|
#else
|
|
|
|
# define TRACE(x) ;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
static hash_table *sPageCacheTable;
|
2006-10-10 21:25:38 +04:00
|
|
|
static spinlock sPageCacheTableLock;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
* Added "caches" debugger command (to be enable by defining
DEBUG_CACHE_LIST) that prints an unspectacular list of pointers to all
existing caches. Feel free to extend.
* Enhanced MultiAddressSpaceLocker:
- It supports choosing between read and write lock per address space,
now.
- Added AddAreaCacheAndLock(), which adds the address spaces of all
areas that are attached to a given area's cache, locks them, and
locks the cache. It makes sure that the area list didn't change in
the meantime and optionally also that all areas have their
no_cache_change flags cleared.
* Changed vm_copy_on_write_area() to take a cache instead of an area,
requiring it to be locked and all address spaces of affected areas to
be read-locked, plus all areas' no_cache_change flags to be cleared.
Callers simply use MultiAddressSpaceLocker:: AddAreaCacheAndLock() to
do that. This resolves an open TODO, that the areas' base, size, and
protection fields were accessed without their address spaces being
locked.
* vm_copy_area() does now always insert a cache for the target area. Not
doing that would cause source and target area being attached to
the same cache in case the target protection was read-only. This
would make them behave like cloned areas, which would lead to trouble
when one of the areas would be changed to writable later.
* Fixed the !writable -> writable case in vm_set_area_protection(). It
would simply change the protection of all mapped pages for this area,
including ones from lower caches, thus causing later writes to the
area to be seen by areas that shouldn't see them. This fixes a problem
with software breakpoints in gdb. They could cause other programs to
be dropped into the debugger.
* resize_area() uses MultiAddressSpaceLocker::AddAreaCacheAndLock() now,
too, and could be compacted quite a bit.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22152 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-09-03 02:55:23 +04:00
|
|
|
#if DEBUG_CACHE_LIST
|
|
|
|
vm_cache* gDebugCacheList;
|
|
|
|
static spinlock sDebugCacheListLock;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
struct page_lookup_key {
|
2005-12-21 15:38:31 +03:00
|
|
|
uint32 offset;
|
2004-10-09 02:56:51 +04:00
|
|
|
vm_cache *cache;
|
2002-07-09 16:24:59 +04:00
|
|
|
};
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
static int
|
|
|
|
page_compare_func(void *_p, const void *_key)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_page *page = (vm_page *)_p;
|
|
|
|
const struct page_lookup_key *key = (page_lookup_key *)_key;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-11-23 06:34:04 +03:00
|
|
|
TRACE(("page_compare_func: page %p, key %p\n", page, key));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-12-21 15:38:31 +03:00
|
|
|
if (page->cache == key->cache && page->cache_offset == key->offset)
|
2002-07-09 16:24:59 +04:00
|
|
|
return 0;
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
return -1;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-11-29 11:38:52 +03:00
|
|
|
static uint32
|
|
|
|
page_hash_func(void *_p, const void *_key, uint32 range)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_page *page = (vm_page *)_p;
|
|
|
|
const struct page_lookup_key *key = (page_lookup_key *)_key;
|
2005-12-21 15:38:31 +03:00
|
|
|
|
2007-09-09 21:36:13 +04:00
|
|
|
#define HASH(offset, ref) ((offset) + ((uint32)(ref) >> 6) * 997)
|
|
|
|
// sizeof(vm_cache) >= 64, hence (uint32)(ref) >> 6 is still unique
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2004-10-09 02:56:51 +04:00
|
|
|
if (page)
|
2005-12-21 15:38:31 +03:00
|
|
|
return HASH(page->cache_offset, page->cache) % range;
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2004-10-09 02:56:51 +04:00
|
|
|
return HASH(key->offset, key->cache) % range;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2007-09-26 04:20:23 +04:00
|
|
|
/*! Acquires a reference to a cache yet unreferenced by the caller. The
|
2007-09-09 18:36:10 +04:00
|
|
|
caller must make sure, that the cache is not deleted, e.g. by holding the
|
|
|
|
cache's source cache lock or by holding the page cache table lock while the
|
2007-09-26 04:20:23 +04:00
|
|
|
cache is still referred to by a page.
|
|
|
|
Returns \c true, if the reference could be acquired.
|
2007-09-09 18:36:10 +04:00
|
|
|
*/
|
|
|
|
static inline bool
|
2007-09-26 04:20:23 +04:00
|
|
|
acquire_unreferenced_cache_ref(vm_cache* cache)
|
2007-09-09 18:36:10 +04:00
|
|
|
{
|
|
|
|
while (true) {
|
|
|
|
int32 count = cache->ref_count;
|
|
|
|
if (count == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (atomic_test_and_set(&cache->ref_count, count + 1, count) == count)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-09-26 04:20:23 +04:00
|
|
|
static void
|
|
|
|
delete_cache(vm_cache *cache)
|
|
|
|
{
|
|
|
|
if (cache->areas != NULL)
|
|
|
|
panic("cache %p to be deleted still has areas", cache);
|
|
|
|
if (!list_is_empty(&cache->consumers))
|
|
|
|
panic("cache %p to be deleted still has consumers", cache);
|
|
|
|
|
|
|
|
#if DEBUG_CACHE_LIST
|
|
|
|
int state = disable_interrupts();
|
|
|
|
acquire_spinlock(&sDebugCacheListLock);
|
|
|
|
|
|
|
|
if (cache->debug_previous)
|
|
|
|
cache->debug_previous->debug_next = cache->debug_next;
|
|
|
|
if (cache->debug_next)
|
|
|
|
cache->debug_next->debug_previous = cache->debug_previous;
|
|
|
|
if (cache == gDebugCacheList)
|
|
|
|
gDebugCacheList = cache->debug_next;
|
|
|
|
|
|
|
|
release_spinlock(&sDebugCacheListLock);
|
|
|
|
restore_interrupts(state);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// delete the cache's backing store
|
|
|
|
cache->store->ops->destroy(cache->store);
|
|
|
|
|
|
|
|
// free all of the pages in the cache
|
|
|
|
vm_page *page = cache->page_list;
|
|
|
|
while (page) {
|
|
|
|
vm_page *oldPage = page;
|
|
|
|
int state;
|
|
|
|
|
|
|
|
page = page->cache_next;
|
|
|
|
|
|
|
|
if (!oldPage->mappings.IsEmpty() || oldPage->wired_count != 0) {
|
|
|
|
panic("remove page %p from cache %p: page still has mappings!\n",
|
|
|
|
oldPage, cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove it from the hash table
|
|
|
|
state = disable_interrupts();
|
|
|
|
acquire_spinlock(&sPageCacheTableLock);
|
|
|
|
|
|
|
|
hash_remove(sPageCacheTable, oldPage);
|
|
|
|
oldPage->cache = NULL;
|
|
|
|
// TODO: we also need to remove all of the page's mappings!
|
|
|
|
|
|
|
|
release_spinlock(&sPageCacheTableLock);
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
|
|
|
TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
|
|
|
|
oldPage->physical_page_number));
|
2008-01-09 21:15:28 +03:00
|
|
|
vm_page_free(cache, oldPage);
|
2007-09-26 04:20:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// remove the ref to the source
|
|
|
|
if (cache->source)
|
|
|
|
vm_cache_remove_consumer(cache->source, cache);
|
|
|
|
|
|
|
|
mutex_destroy(&cache->lock);
|
|
|
|
free(cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// #pragma mark -
|
|
|
|
|
|
|
|
|
2004-10-08 19:10:50 +04:00
|
|
|
status_t
|
2006-10-10 21:16:06 +04:00
|
|
|
vm_cache_init(kernel_args *args)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2007-09-09 21:36:13 +04:00
|
|
|
// TODO: The table should grow/shrink dynamically.
|
|
|
|
sPageCacheTable = hash_init(vm_page_num_pages() / 2,
|
|
|
|
offsetof(vm_page, hash_next), &page_compare_func, &page_hash_func);
|
2006-10-10 21:25:38 +04:00
|
|
|
if (sPageCacheTable == NULL)
|
|
|
|
panic("vm_cache_init: no memory\n");
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-10-08 19:10:50 +04:00
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
vm_cache *
|
|
|
|
vm_cache_create(vm_store *store)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
vm_cache *cache;
|
|
|
|
|
2005-12-21 20:05:50 +03:00
|
|
|
if (store == NULL) {
|
|
|
|
panic("vm_cache created with NULL store!");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
cache = (vm_cache *)malloc(sizeof(vm_cache));
|
2004-09-11 03:43:15 +04:00
|
|
|
if (cache == NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
return NULL;
|
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
status_t status = mutex_init(&cache->lock, "vm_cache");
|
|
|
|
if (status < B_OK && (!kernel_startup || status != B_NO_MORE_SEMS)) {
|
|
|
|
// During early boot, we cannot create semaphores - they are
|
|
|
|
// created later in vm_init_post_sem()
|
|
|
|
free(cache);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_init_etc(&cache->consumers, offsetof(vm_cache, consumer_link));
|
2002-07-09 16:24:59 +04:00
|
|
|
cache->page_list = NULL;
|
2007-07-18 04:16:27 +04:00
|
|
|
cache->areas = NULL;
|
|
|
|
cache->ref_count = 1;
|
2002-07-09 16:24:59 +04:00
|
|
|
cache->source = NULL;
|
2007-01-12 18:07:18 +03:00
|
|
|
cache->virtual_base = 0;
|
2002-07-09 16:24:59 +04:00
|
|
|
cache->virtual_size = 0;
|
|
|
|
cache->temporary = 0;
|
|
|
|
cache->scan_skip = 0;
|
2006-03-18 23:17:31 +03:00
|
|
|
cache->page_count = 0;
|
2007-02-01 15:12:54 +03:00
|
|
|
cache->busy = false;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
* Added "caches" debugger command (to be enable by defining
DEBUG_CACHE_LIST) that prints an unspectacular list of pointers to all
existing caches. Feel free to extend.
* Enhanced MultiAddressSpaceLocker:
- It supports choosing between read and write lock per address space,
now.
- Added AddAreaCacheAndLock(), which adds the address spaces of all
areas that are attached to a given area's cache, locks them, and
locks the cache. It makes sure that the area list didn't change in
the meantime and optionally also that all areas have their
no_cache_change flags cleared.
* Changed vm_copy_on_write_area() to take a cache instead of an area,
requiring it to be locked and all address spaces of affected areas to
be read-locked, plus all areas' no_cache_change flags to be cleared.
Callers simply use MultiAddressSpaceLocker:: AddAreaCacheAndLock() to
do that. This resolves an open TODO, that the areas' base, size, and
protection fields were accessed without their address spaces being
locked.
* vm_copy_area() does now always insert a cache for the target area. Not
doing that would cause source and target area being attached to
the same cache in case the target protection was read-only. This
would make them behave like cloned areas, which would lead to trouble
when one of the areas would be changed to writable later.
* Fixed the !writable -> writable case in vm_set_area_protection(). It
would simply change the protection of all mapped pages for this area,
including ones from lower caches, thus causing later writes to the
area to be seen by areas that shouldn't see them. This fixes a problem
with software breakpoints in gdb. They could cause other programs to
be dropped into the debugger.
* resize_area() uses MultiAddressSpaceLocker::AddAreaCacheAndLock() now,
too, and could be compacted quite a bit.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22152 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-09-03 02:55:23 +04:00
|
|
|
|
|
|
|
#if DEBUG_CACHE_LIST
|
|
|
|
int state = disable_interrupts();
|
|
|
|
acquire_spinlock(&sDebugCacheListLock);
|
|
|
|
|
|
|
|
if (gDebugCacheList)
|
|
|
|
gDebugCacheList->debug_previous = cache;
|
|
|
|
cache->debug_previous = NULL;
|
|
|
|
cache->debug_next = gDebugCacheList;
|
|
|
|
gDebugCacheList = cache;
|
|
|
|
|
|
|
|
release_spinlock(&sDebugCacheListLock);
|
|
|
|
restore_interrupts(state);
|
|
|
|
#endif
|
|
|
|
|
2005-12-21 20:05:50 +03:00
|
|
|
// connect the store to its cache
|
|
|
|
cache->store = store;
|
|
|
|
store->cache = cache;
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
return cache;
|
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
void
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_acquire_ref(vm_cache *cache)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2007-07-18 04:16:27 +04:00
|
|
|
TRACE(("vm_cache_acquire_ref: cache %p, ref will be %ld\n",
|
|
|
|
cache, cache->ref_count + 1));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
if (cache == NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
panic("vm_cache_acquire_ref: passed NULL\n");
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
atomic_add(&cache->ref_count, 1);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
void
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_release_ref(vm_cache *cache)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2006-10-10 21:25:38 +04:00
|
|
|
TRACE(("vm_cache_release_ref: cacheRef %p, ref will be %ld\n",
|
2007-07-18 04:16:27 +04:00
|
|
|
cache, cache->ref_count - 1));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
if (cache == NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
panic("vm_cache_release_ref: passed NULL\n");
|
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
if (atomic_add(&cache->ref_count, -1) != 1) {
|
2007-02-01 15:12:54 +03:00
|
|
|
#if 0
|
|
|
|
{
|
|
|
|
// count min references to see if everything is okay
|
2007-03-30 13:45:50 +04:00
|
|
|
struct stack_frame {
|
|
|
|
struct stack_frame* previous;
|
|
|
|
void* return_address;
|
|
|
|
};
|
2007-02-01 15:12:54 +03:00
|
|
|
int32 min = 0;
|
|
|
|
vm_area *a;
|
|
|
|
vm_cache *c;
|
|
|
|
bool locked = false;
|
|
|
|
if (cacheRef->lock.holder != find_thread(NULL)) {
|
|
|
|
mutex_lock(&cacheRef->lock);
|
|
|
|
locked = true;
|
|
|
|
}
|
|
|
|
for (a = cacheRef->areas; a != NULL; a = a->cache_next)
|
|
|
|
min++;
|
|
|
|
for (c = NULL; (c = list_get_next_item(&cacheRef->cache->consumers, c)) != NULL; )
|
|
|
|
min++;
|
2007-03-30 13:45:50 +04:00
|
|
|
dprintf("! %ld release cache_ref %p, ref_count is now %ld (min %ld, called from %p)\n",
|
|
|
|
find_thread(NULL), cacheRef, cacheRef->ref_count,
|
|
|
|
min, ((struct stack_frame *)x86_read_ebp())->return_address);
|
2007-02-01 15:12:54 +03:00
|
|
|
if (cacheRef->ref_count < min)
|
|
|
|
panic("cache_ref %p has too little ref_count!!!!", cacheRef);
|
|
|
|
if (locked)
|
|
|
|
mutex_unlock(&cacheRef->lock);
|
|
|
|
}
|
|
|
|
#endif
|
2004-09-11 03:43:15 +04:00
|
|
|
return;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
// delete this cache
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-09-26 04:20:23 +04:00
|
|
|
delete_cache(cache);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2007-09-09 18:36:10 +04:00
|
|
|
vm_cache*
|
|
|
|
vm_cache_acquire_page_cache_ref(vm_page* page)
|
|
|
|
{
|
|
|
|
InterruptsSpinLocker locker(sPageCacheTableLock);
|
|
|
|
|
|
|
|
vm_cache* cache = page->cache;
|
|
|
|
if (cache == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
2007-09-26 04:20:23 +04:00
|
|
|
// get a reference
|
|
|
|
if (!acquire_unreferenced_cache_ref(cache))
|
2007-09-09 18:36:10 +04:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return cache;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
vm_page *
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_lookup_page(vm_cache *cache, off_t offset)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2007-07-18 04:16:27 +04:00
|
|
|
ASSERT_LOCKED_MUTEX(&cache->lock);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-09-29 19:48:11 +04:00
|
|
|
struct page_lookup_key key;
|
2005-12-21 15:38:31 +03:00
|
|
|
key.offset = (uint32)(offset >> PAGE_SHIFT);
|
2007-07-18 04:16:27 +04:00
|
|
|
key.cache = cache;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-09-29 19:48:11 +04:00
|
|
|
cpu_status state = disable_interrupts();
|
2006-10-10 21:25:38 +04:00
|
|
|
acquire_spinlock(&sPageCacheTableLock);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-09-29 19:48:11 +04:00
|
|
|
vm_page *page = (vm_page *)hash_lookup(sPageCacheTable, &key);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2006-10-10 21:25:38 +04:00
|
|
|
release_spinlock(&sPageCacheTableLock);
|
2002-07-25 05:05:51 +04:00
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
if (page != NULL && cache != page->cache)
|
|
|
|
panic("page %p not in cache %p\n", page, cache);
|
2007-03-30 13:01:58 +04:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
void
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_insert_page(vm_cache *cache, vm_page *page, off_t offset)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2007-07-18 04:16:27 +04:00
|
|
|
TRACE(("vm_cache_insert_page: cache %p, page %p, offset %Ld\n",
|
|
|
|
cache, page, offset));
|
|
|
|
ASSERT_LOCKED_MUTEX(&cache->lock);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-03-30 13:01:58 +04:00
|
|
|
if (page->cache != NULL) {
|
|
|
|
panic("insert page %p into cache %p: page cache is set to %p\n",
|
2007-07-18 04:16:27 +04:00
|
|
|
page, cache, page->cache);
|
2007-03-30 13:01:58 +04:00
|
|
|
}
|
|
|
|
|
2005-12-21 15:38:31 +03:00
|
|
|
page->cache_offset = (uint32)(offset >> PAGE_SHIFT);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
if (cache->page_list != NULL)
|
|
|
|
cache->page_list->cache_prev = page;
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
page->cache_next = cache->page_list;
|
2002-07-09 16:24:59 +04:00
|
|
|
page->cache_prev = NULL;
|
2007-07-18 04:16:27 +04:00
|
|
|
cache->page_list = page;
|
|
|
|
cache->page_count++;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-09-03 19:41:14 +04:00
|
|
|
page->usage_count = 2;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-09-29 19:48:11 +04:00
|
|
|
InterruptsSpinLocker locker(sPageCacheTableLock);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-09-09 18:36:10 +04:00
|
|
|
page->cache = cache;
|
|
|
|
|
2007-08-04 04:36:50 +04:00
|
|
|
#if KDEBUG
|
|
|
|
struct page_lookup_key key;
|
|
|
|
key.offset = (uint32)(offset >> PAGE_SHIFT);
|
|
|
|
key.cache = cache;
|
|
|
|
vm_page* otherPage = (vm_page *)hash_lookup(sPageCacheTable, &key);
|
|
|
|
if (otherPage != NULL) {
|
|
|
|
panic("vm_cache_insert_page(): there's already page %p with cache "
|
|
|
|
"offset %lu in cache %p; inserting page %p", otherPage,
|
|
|
|
page->cache_offset, cache, page);
|
|
|
|
}
|
|
|
|
#endif // KDEBUG
|
|
|
|
|
2006-10-10 21:25:38 +04:00
|
|
|
hash_insert(sPageCacheTable, page);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2006-10-10 21:25:38 +04:00
|
|
|
/*!
|
|
|
|
Removes the vm_page from this cache. Of course, the page must
|
|
|
|
really be in this cache or evil things will happen.
|
2007-07-18 04:16:27 +04:00
|
|
|
The cache lock must be held.
|
2006-10-10 21:25:38 +04:00
|
|
|
*/
|
2004-09-11 03:43:15 +04:00
|
|
|
void
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_remove_page(vm_cache *cache, vm_page *page)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2007-07-18 04:16:27 +04:00
|
|
|
TRACE(("vm_cache_remove_page: cache %p, page %p\n", cache, page));
|
|
|
|
ASSERT_LOCKED_MUTEX(&cache->lock);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
axeld + bonefish:
* More conditional debug code (wrt page transitions between caches).
* Replaced debugger command cache_chain by a nicer cache_tree.
* While handling a soft fault: When we temporarily unlock a cache, it
can theoretically become busy. One such occurrence is now handled
properly, two more panic() ATM, though should be fixed.
* When merging caches, we do now always replace a dummy page in the
upper cache, not only when the concurrent page fault is a read fault.
This prevents a page from the lower (to be discarded) cache from still
remaining mapped (causing a panic).
* When merging caches and replacing a dummy page, we were trying to
remove the dummy page from the wrong cache (causing a panic).
The Haiku kernel seems now to run shockingly stable. ATM, we have more
than two hours uptime of a system booted and running over network. We
didn't manage to get it down by fully building Pe, downloading, unzipping,
and playing with various stuff. Someone should finally fix all those app
server drawing bugs, though (hint, hint! ;-)).
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@21672 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-07-20 02:52:23 +04:00
|
|
|
if (page->cache != cache) {
|
|
|
|
panic("remove page %p from cache %p: page cache is set to %p\n", page,
|
|
|
|
cache, page->cache);
|
|
|
|
}
|
2007-03-30 13:01:58 +04:00
|
|
|
|
2007-09-29 19:48:11 +04:00
|
|
|
cpu_status state = disable_interrupts();
|
2006-10-10 21:25:38 +04:00
|
|
|
acquire_spinlock(&sPageCacheTableLock);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2006-10-10 21:25:38 +04:00
|
|
|
hash_remove(sPageCacheTable, page);
|
2007-09-09 18:36:10 +04:00
|
|
|
page->cache = NULL;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2006-10-10 21:25:38 +04:00
|
|
|
release_spinlock(&sPageCacheTableLock);
|
2002-07-25 05:05:51 +04:00
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
if (cache->page_list == page) {
|
2004-09-11 03:43:15 +04:00
|
|
|
if (page->cache_next != NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
page->cache_next->cache_prev = NULL;
|
2007-07-18 04:16:27 +04:00
|
|
|
cache->page_list = page->cache_next;
|
2002-07-09 16:24:59 +04:00
|
|
|
} else {
|
2004-09-11 03:43:15 +04:00
|
|
|
if (page->cache_prev != NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
page->cache_prev->cache_next = page->cache_next;
|
2004-09-11 03:43:15 +04:00
|
|
|
if (page->cache_next != NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
page->cache_next->cache_prev = page->cache_prev;
|
|
|
|
}
|
2007-07-18 04:16:27 +04:00
|
|
|
cache->page_count--;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2004-11-23 06:34:04 +03:00
|
|
|
status_t
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_write_modified(vm_cache *cache, bool fsReenter)
|
2004-11-23 06:34:04 +03:00
|
|
|
{
|
2007-07-18 04:16:27 +04:00
|
|
|
TRACE(("vm_cache_write_modified(cache = %p)\n", cache));
|
2004-11-23 06:34:04 +03:00
|
|
|
|
2007-08-16 03:11:15 +04:00
|
|
|
if (cache->temporary)
|
|
|
|
return B_OK;
|
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
mutex_lock(&cache->lock);
|
2007-09-29 19:48:11 +04:00
|
|
|
status_t status = vm_page_write_modified_pages(cache, fsReenter);
|
2007-07-18 04:16:27 +04:00
|
|
|
mutex_unlock(&cache->lock);
|
2004-11-23 06:34:04 +03:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-01-14 21:41:57 +03:00
|
|
|
/*!
|
|
|
|
Commits the memory to the store if the \a commitment is larger than
|
|
|
|
what's committed already.
|
|
|
|
Assumes you have the \a ref's lock held.
|
|
|
|
*/
|
2004-11-03 20:24:41 +03:00
|
|
|
status_t
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_set_minimal_commitment_locked(vm_cache *cache, off_t commitment)
|
2004-11-03 20:24:41 +03:00
|
|
|
{
|
2007-09-29 19:48:11 +04:00
|
|
|
TRACE(("vm_cache_set_minimal_commitment_locked(cache %p, commitment %Ld)\n",
|
|
|
|
cache, commitment));
|
2007-07-18 04:16:27 +04:00
|
|
|
ASSERT_LOCKED_MUTEX(&cache->lock);
|
2004-11-03 20:24:41 +03:00
|
|
|
|
2007-09-29 19:48:11 +04:00
|
|
|
vm_store *store = cache->store;
|
|
|
|
status_t status = B_OK;
|
|
|
|
|
2004-11-03 20:24:41 +03:00
|
|
|
// If we don't have enough committed space to cover through to the new end of region...
|
|
|
|
if (store->committed_size < commitment) {
|
|
|
|
// ToDo: should we check if the cache's virtual size is large
|
|
|
|
// enough for a commitment of that size?
|
|
|
|
|
|
|
|
// try to commit more memory
|
2005-12-21 20:05:50 +03:00
|
|
|
status = store->ops->commit(store, commitment);
|
2004-11-03 20:24:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-10-10 21:25:38 +04:00
|
|
|
/*!
|
|
|
|
This function updates the size field of the vm_cache structure.
|
|
|
|
If needed, it will free up all pages that don't belong to the cache anymore.
|
2007-07-18 04:16:27 +04:00
|
|
|
The cache lock must be held when you call it.
|
2006-10-10 21:25:38 +04:00
|
|
|
Since removed pages don't belong to the cache any longer, they are not
|
|
|
|
written back before they will be removed.
|
2007-09-29 01:20:57 +04:00
|
|
|
|
|
|
|
Note, this function way temporarily release the cache lock in case it
|
|
|
|
has to wait for busy pages.
|
2006-10-10 21:25:38 +04:00
|
|
|
*/
|
2004-10-08 19:10:50 +04:00
|
|
|
status_t
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_resize(vm_cache *cache, off_t newSize)
|
2004-10-08 19:10:50 +04:00
|
|
|
{
|
2007-09-29 19:48:11 +04:00
|
|
|
TRACE(("vm_cache_resize(cache %p, newSize %Ld) old size %Ld\n",
|
|
|
|
cache, newSize, cache->virtual_size));
|
2007-07-18 04:16:27 +04:00
|
|
|
ASSERT_LOCKED_MUTEX(&cache->lock);
|
2004-11-03 20:24:41 +03:00
|
|
|
|
2007-09-29 19:48:11 +04:00
|
|
|
status_t status = cache->store->ops->commit(cache->store, newSize);
|
2004-11-03 20:24:41 +03:00
|
|
|
if (status != B_OK)
|
|
|
|
return status;
|
|
|
|
|
2007-09-29 19:48:11 +04:00
|
|
|
uint32 oldPageCount = (uint32)((cache->virtual_size + B_PAGE_SIZE - 1)
|
|
|
|
>> PAGE_SHIFT);
|
|
|
|
uint32 newPageCount = (uint32)((newSize + B_PAGE_SIZE - 1) >> PAGE_SHIFT);
|
2006-03-07 01:28:40 +03:00
|
|
|
|
|
|
|
if (newPageCount < oldPageCount) {
|
2007-09-29 01:20:57 +04:00
|
|
|
// we need to remove all pages in the cache outside of the new virtual
|
|
|
|
// size
|
|
|
|
vm_page *page = cache->page_list, *next;
|
2004-10-08 19:10:50 +04:00
|
|
|
|
2007-09-29 01:20:57 +04:00
|
|
|
while (page != NULL) {
|
2004-10-08 19:10:50 +04:00
|
|
|
next = page->cache_next;
|
|
|
|
|
2006-03-07 01:28:40 +03:00
|
|
|
if (page->cache_offset >= newPageCount) {
|
2007-09-29 01:20:57 +04:00
|
|
|
if (page->state == PAGE_STATE_BUSY) {
|
* Fixed (or rather, worked around) a deadlock in the VM: when a file was
resized but still had dirty pages to be written back,
vm_cache_resize() (which is called with the inode lock being held)
deadlocked with the page writer.
* Now, I reintroduced busy_writing: it'll be set by everything that
writes back pages (vm_page_write_modified(), and the page writer),
and will be checked for in vm_cache_resize() - other functions are not
affected for now, AFAICT.
* vm_cache_resize() will clear that flag, and the writer will check it
again after it wrote back the page (which will fail when it's outside
the file bounds), and if it's cleared, it will get rid of the page
(if the file has been resized again in the mean time, writing it will
succeed then, and we'll keep the page around).
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23334 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-01-10 01:25:21 +03:00
|
|
|
if (page->busy_writing) {
|
|
|
|
// We cannot wait for the page to become available
|
|
|
|
// as we might cause a deadlock this way
|
|
|
|
page->busy_writing = false;
|
|
|
|
// this will notify the writer to free the page
|
|
|
|
page = next;
|
|
|
|
} else {
|
|
|
|
// wait for page to become unbusy
|
|
|
|
ConditionVariableEntry<vm_page> entry;
|
|
|
|
entry.Add(page);
|
|
|
|
mutex_unlock(&cache->lock);
|
|
|
|
entry.Wait();
|
|
|
|
mutex_lock(&cache->lock);
|
|
|
|
|
|
|
|
// restart from the start of the list
|
|
|
|
page = cache->page_list;
|
|
|
|
}
|
2007-09-29 01:20:57 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2004-10-08 19:10:50 +04:00
|
|
|
// remove the page and put it into the free queue
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_remove_page(cache, page);
|
2008-01-09 21:15:28 +03:00
|
|
|
vm_page_free(cache, page);
|
2004-10-08 19:10:50 +04:00
|
|
|
}
|
2007-09-29 01:20:57 +04:00
|
|
|
|
|
|
|
page = next;
|
2004-10-08 19:10:50 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cache->virtual_size = newSize;
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-10-10 21:16:06 +04:00
|
|
|
/*!
|
2007-07-18 04:16:27 +04:00
|
|
|
Removes the \a consumer from the \a cache.
|
2006-10-10 21:16:06 +04:00
|
|
|
It will also release the reference to the cacheRef owned by the consumer.
|
2007-07-18 04:16:27 +04:00
|
|
|
Assumes you have the consumer's cache lock held.
|
2006-10-10 21:16:06 +04:00
|
|
|
*/
|
|
|
|
void
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_remove_consumer(vm_cache *cache, vm_cache *consumer)
|
2006-10-10 21:16:06 +04:00
|
|
|
{
|
2007-07-18 04:16:27 +04:00
|
|
|
TRACE(("remove consumer vm cache %p from cache %p\n", consumer, cache));
|
|
|
|
ASSERT_LOCKED_MUTEX(&consumer->lock);
|
2006-10-10 21:16:06 +04:00
|
|
|
|
2008-03-24 08:15:42 +03:00
|
|
|
// Remove the store ref before locking the cache. Otherwise we'd call into
|
|
|
|
// the VFS while holding the cache lock, which would reverse the usual
|
|
|
|
// locking order.
|
|
|
|
if (cache->store->ops->release_ref)
|
|
|
|
cache->store->ops->release_ref(cache->store);
|
|
|
|
|
2007-01-20 15:49:44 +03:00
|
|
|
// remove the consumer from the cache, but keep its reference until later
|
2007-07-18 04:16:27 +04:00
|
|
|
mutex_lock(&cache->lock);
|
2006-10-10 21:16:06 +04:00
|
|
|
list_remove_item(&cache->consumers, consumer);
|
|
|
|
consumer->source = NULL;
|
2006-10-11 02:47:00 +04:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
if (cache->areas == NULL && cache->source != NULL
|
2006-10-10 21:16:06 +04:00
|
|
|
&& !list_is_empty(&cache->consumers)
|
|
|
|
&& cache->consumers.link.next == cache->consumers.link.prev) {
|
|
|
|
// The cache is not really needed anymore - it can be merged with its only
|
|
|
|
// consumer left.
|
2006-10-11 02:47:00 +04:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
consumer = (vm_cache *)list_get_first_item(&cache->consumers);
|
2006-10-11 02:47:00 +04:00
|
|
|
|
2007-09-26 04:20:23 +04:00
|
|
|
bool merge = acquire_unreferenced_cache_ref(consumer);
|
2007-01-15 02:29:23 +03:00
|
|
|
|
2007-09-09 18:36:10 +04:00
|
|
|
// In case we managed to grab a reference to the consumerRef,
|
|
|
|
// this doesn't guarantee that we get the cache we wanted
|
|
|
|
// to, so we need to check if this cache is really the last
|
|
|
|
// consumer of the cache we want to merge it with.
|
|
|
|
|
2007-08-10 00:08:25 +04:00
|
|
|
ConditionVariable<vm_cache> busyCondition;
|
|
|
|
|
2007-01-15 02:29:23 +03:00
|
|
|
if (merge) {
|
|
|
|
// But since we need to keep the locking order upper->lower cache, we
|
|
|
|
// need to unlock our cache now
|
2007-08-10 00:08:25 +04:00
|
|
|
busyCondition.Publish(cache, "cache");
|
2007-02-01 15:12:54 +03:00
|
|
|
cache->busy = true;
|
2007-07-18 04:16:27 +04:00
|
|
|
mutex_unlock(&cache->lock);
|
2007-01-15 02:29:23 +03:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
mutex_lock(&consumer->lock);
|
|
|
|
mutex_lock(&cache->lock);
|
2007-01-20 15:49:44 +03:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
if (cache->areas != NULL || cache->source == NULL
|
2007-01-15 02:29:23 +03:00
|
|
|
|| list_is_empty(&cache->consumers)
|
|
|
|
|| cache->consumers.link.next != cache->consumers.link.prev
|
2007-01-20 15:49:44 +03:00
|
|
|
|| consumer != list_get_first_item(&cache->consumers)) {
|
axeld + bonefish:
* More conditional debug code (wrt page transitions between caches).
* Replaced debugger command cache_chain by a nicer cache_tree.
* While handling a soft fault: When we temporarily unlock a cache, it
can theoretically become busy. One such occurrence is now handled
properly, two more panic() ATM, though should be fixed.
* When merging caches, we do now always replace a dummy page in the
upper cache, not only when the concurrent page fault is a read fault.
This prevents a page from the lower (to be discarded) cache from still
remaining mapped (causing a panic).
* When merging caches and replacing a dummy page, we were trying to
remove the dummy page from the wrong cache (causing a panic).
The Haiku kernel seems now to run shockingly stable. ATM, we have more
than two hours uptime of a system booted and running over network. We
didn't manage to get it down by fully building Pe, downloading, unzipping,
and playing with various stuff. Someone should finally fix all those app
server drawing bugs, though (hint, hint! ;-)).
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@21672 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-07-20 02:52:23 +04:00
|
|
|
dprintf("vm_cache_remove_consumer(): cache %p was modified; "
|
2007-07-23 04:52:07 +04:00
|
|
|
"not merging it\n", cache);
|
2007-01-15 02:29:23 +03:00
|
|
|
merge = false;
|
2007-02-01 15:12:54 +03:00
|
|
|
cache->busy = false;
|
2007-08-10 00:08:25 +04:00
|
|
|
busyCondition.Unpublish();
|
2007-07-18 04:16:27 +04:00
|
|
|
mutex_unlock(&consumer->lock);
|
|
|
|
vm_cache_release_ref(consumer);
|
2007-01-15 02:29:23 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (merge) {
|
|
|
|
vm_page *page, *nextPage;
|
2007-01-20 15:49:44 +03:00
|
|
|
vm_cache *newSource;
|
2006-10-11 02:47:00 +04:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
consumer = (vm_cache *)list_remove_head_item(&cache->consumers);
|
2006-10-11 02:47:00 +04:00
|
|
|
|
2007-01-15 02:29:23 +03:00
|
|
|
TRACE(("merge vm cache %p (ref == %ld) with vm cache %p\n",
|
2007-07-18 04:16:27 +04:00
|
|
|
cache, cache->ref_count, consumer));
|
2006-10-11 02:47:00 +04:00
|
|
|
|
2007-01-15 02:29:23 +03:00
|
|
|
for (page = cache->page_list; page != NULL; page = nextPage) {
|
2007-04-03 15:23:37 +04:00
|
|
|
vm_page *consumerPage;
|
2007-01-15 02:29:23 +03:00
|
|
|
nextPage = page->cache_next;
|
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
consumerPage = vm_cache_lookup_page(consumer,
|
2007-04-03 15:23:37 +04:00
|
|
|
(off_t)page->cache_offset << PAGE_SHIFT);
|
|
|
|
if (consumerPage == NULL) {
|
|
|
|
// the page already is not yet in the consumer cache - move
|
|
|
|
// it upwards
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_remove_page(cache, page);
|
|
|
|
vm_cache_insert_page(consumer, page,
|
2007-02-01 15:12:54 +03:00
|
|
|
(off_t)page->cache_offset << PAGE_SHIFT);
|
2007-06-21 17:57:46 +04:00
|
|
|
} else if (consumerPage->state == PAGE_STATE_BUSY
|
axeld + bonefish:
* More conditional debug code (wrt page transitions between caches).
* Replaced debugger command cache_chain by a nicer cache_tree.
* While handling a soft fault: When we temporarily unlock a cache, it
can theoretically become busy. One such occurrence is now handled
properly, two more panic() ATM, though should be fixed.
* When merging caches, we do now always replace a dummy page in the
upper cache, not only when the concurrent page fault is a read fault.
This prevents a page from the lower (to be discarded) cache from still
remaining mapped (causing a panic).
* When merging caches and replacing a dummy page, we were trying to
remove the dummy page from the wrong cache (causing a panic).
The Haiku kernel seems now to run shockingly stable. ATM, we have more
than two hours uptime of a system booted and running over network. We
didn't manage to get it down by fully building Pe, downloading, unzipping,
and playing with various stuff. Someone should finally fix all those app
server drawing bugs, though (hint, hint! ;-)).
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@21672 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-07-20 02:52:23 +04:00
|
|
|
&& consumerPage->type == PAGE_TYPE_DUMMY) {
|
2007-06-21 17:57:46 +04:00
|
|
|
// the page is currently busy taking a read fault - IOW,
|
|
|
|
// vm_soft_fault() has mapped our page so we can just
|
|
|
|
// move it up
|
2007-09-29 19:48:11 +04:00
|
|
|
//dprintf("%ld: merged busy page %p, cache %p, offset %ld\n", find_thread(NULL), page, cacheRef->cache, page->cache_offset);
|
axeld + bonefish:
* More conditional debug code (wrt page transitions between caches).
* Replaced debugger command cache_chain by a nicer cache_tree.
* While handling a soft fault: When we temporarily unlock a cache, it
can theoretically become busy. One such occurrence is now handled
properly, two more panic() ATM, though should be fixed.
* When merging caches, we do now always replace a dummy page in the
upper cache, not only when the concurrent page fault is a read fault.
This prevents a page from the lower (to be discarded) cache from still
remaining mapped (causing a panic).
* When merging caches and replacing a dummy page, we were trying to
remove the dummy page from the wrong cache (causing a panic).
The Haiku kernel seems now to run shockingly stable. ATM, we have more
than two hours uptime of a system booted and running over network. We
didn't manage to get it down by fully building Pe, downloading, unzipping,
and playing with various stuff. Someone should finally fix all those app
server drawing bugs, though (hint, hint! ;-)).
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@21672 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-07-20 02:52:23 +04:00
|
|
|
vm_cache_remove_page(consumer, consumerPage);
|
2007-06-21 17:57:46 +04:00
|
|
|
consumerPage->state = PAGE_STATE_INACTIVE;
|
2007-08-10 00:08:25 +04:00
|
|
|
((vm_dummy_page*)consumerPage)->busy_condition.Unpublish();
|
2007-06-21 17:57:46 +04:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_remove_page(cache, page);
|
|
|
|
vm_cache_insert_page(consumer, page,
|
2007-06-21 17:57:46 +04:00
|
|
|
(off_t)page->cache_offset << PAGE_SHIFT);
|
axeld + bonefish:
* More conditional debug code (wrt page transitions between caches).
* Replaced debugger command cache_chain by a nicer cache_tree.
* While handling a soft fault: When we temporarily unlock a cache, it
can theoretically become busy. One such occurrence is now handled
properly, two more panic() ATM, though should be fixed.
* When merging caches, we do now always replace a dummy page in the
upper cache, not only when the concurrent page fault is a read fault.
This prevents a page from the lower (to be discarded) cache from still
remaining mapped (causing a panic).
* When merging caches and replacing a dummy page, we were trying to
remove the dummy page from the wrong cache (causing a panic).
The Haiku kernel seems now to run shockingly stable. ATM, we have more
than two hours uptime of a system booted and running over network. We
didn't manage to get it down by fully building Pe, downloading, unzipping,
and playing with various stuff. Someone should finally fix all those app
server drawing bugs, though (hint, hint! ;-)).
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@21672 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-07-20 02:52:23 +04:00
|
|
|
#ifdef DEBUG_PAGE_CACHE_TRANSITIONS
|
|
|
|
} else {
|
|
|
|
page->debug_flags = 0;
|
|
|
|
if (consumerPage->state == PAGE_STATE_BUSY)
|
|
|
|
page->debug_flags |= 0x1;
|
|
|
|
if (consumerPage->type == PAGE_TYPE_DUMMY)
|
|
|
|
page->debug_flags |= 0x2;
|
|
|
|
page->collided_page = consumerPage;
|
|
|
|
consumerPage->collided_page = page;
|
|
|
|
#endif // DEBUG_PAGE_CACHE_TRANSITIONS
|
2007-06-21 17:57:46 +04:00
|
|
|
}
|
2006-10-11 02:47:00 +04:00
|
|
|
}
|
|
|
|
|
2007-01-15 02:29:23 +03:00
|
|
|
newSource = cache->source;
|
2007-02-01 15:12:54 +03:00
|
|
|
|
|
|
|
// The remaining consumer has gotten a new source
|
2007-07-18 04:16:27 +04:00
|
|
|
mutex_lock(&newSource->lock);
|
2007-02-01 15:12:54 +03:00
|
|
|
|
|
|
|
list_remove_item(&newSource->consumers, cache);
|
|
|
|
list_add_item(&newSource->consumers, consumer);
|
|
|
|
consumer->source = newSource;
|
|
|
|
cache->source = NULL;
|
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
mutex_unlock(&newSource->lock);
|
2007-02-01 15:12:54 +03:00
|
|
|
|
|
|
|
// Release the other reference to the cache - we take over
|
|
|
|
// its reference of its source cache; we can do this here
|
|
|
|
// (with the cacheRef locked) since we own another reference
|
|
|
|
// from the first consumer we removed
|
2007-07-18 04:16:27 +04:00
|
|
|
if (cache->ref_count < 2)
|
|
|
|
panic("cacheRef %p ref count too low!\n", cache);
|
|
|
|
vm_cache_release_ref(cache);
|
2007-02-01 15:12:54 +03:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
mutex_unlock(&consumer->lock);
|
|
|
|
vm_cache_release_ref(consumer);
|
2006-10-11 02:47:00 +04:00
|
|
|
}
|
2007-08-10 00:08:25 +04:00
|
|
|
|
|
|
|
if (cache->busy)
|
|
|
|
busyCondition.Unpublish();
|
2006-10-10 21:16:06 +04:00
|
|
|
}
|
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
mutex_unlock(&cache->lock);
|
|
|
|
vm_cache_release_ref(cache);
|
2006-10-10 21:16:06 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*!
|
2007-07-18 04:16:27 +04:00
|
|
|
Marks the \a cache as source of the \a consumer cache,
|
2006-10-10 21:16:06 +04:00
|
|
|
and adds the \a consumer to its list.
|
|
|
|
This also grabs a reference to the source cache.
|
2007-07-18 04:16:27 +04:00
|
|
|
Assumes you have the cache and the consumer's lock held.
|
2006-10-10 21:16:06 +04:00
|
|
|
*/
|
|
|
|
void
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_add_consumer_locked(vm_cache *cache, vm_cache *consumer)
|
2006-10-10 21:16:06 +04:00
|
|
|
{
|
2007-07-18 04:16:27 +04:00
|
|
|
TRACE(("add consumer vm cache %p to cache %p\n", consumer, cache));
|
|
|
|
ASSERT_LOCKED_MUTEX(&cache->lock);
|
|
|
|
ASSERT_LOCKED_MUTEX(&consumer->lock);
|
2006-10-10 21:16:06 +04:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
consumer->source = cache;
|
|
|
|
list_add_item(&cache->consumers, consumer);
|
2006-10-10 21:16:06 +04:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_acquire_ref(cache);
|
2007-09-26 04:20:23 +04:00
|
|
|
|
|
|
|
if (cache->store->ops->acquire_ref != NULL)
|
|
|
|
cache->store->ops->acquire_ref(cache->store);
|
2006-10-10 21:16:06 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-01-14 21:41:57 +03:00
|
|
|
/*!
|
2007-07-18 04:16:27 +04:00
|
|
|
Adds the \a area to the \a cache.
|
|
|
|
Assumes you have the locked the cache.
|
2007-01-14 21:41:57 +03:00
|
|
|
*/
|
2004-10-08 19:10:50 +04:00
|
|
|
status_t
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_insert_area_locked(vm_cache *cache, vm_area *area)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2007-09-29 19:48:11 +04:00
|
|
|
TRACE(("vm_cache_insert_area_locked(cache %p, area %p)\n", cache, area));
|
2007-07-18 04:16:27 +04:00
|
|
|
ASSERT_LOCKED_MUTEX(&cache->lock);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-07-18 04:16:27 +04:00
|
|
|
area->cache_next = cache->areas;
|
2004-11-08 17:25:09 +03:00
|
|
|
if (area->cache_next)
|
|
|
|
area->cache_next->cache_prev = area;
|
|
|
|
area->cache_prev = NULL;
|
2007-07-18 04:16:27 +04:00
|
|
|
cache->areas = area;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-09-26 04:20:23 +04:00
|
|
|
if (cache->store->ops->acquire_ref != NULL)
|
|
|
|
cache->store->ops->acquire_ref(cache->store);
|
|
|
|
|
2004-10-08 19:10:50 +04:00
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2004-10-08 19:10:50 +04:00
|
|
|
status_t
|
2007-07-18 04:16:27 +04:00
|
|
|
vm_cache_remove_area(vm_cache *cache, vm_area *area)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2007-09-29 19:48:11 +04:00
|
|
|
TRACE(("vm_cache_remove_area(cache %p, area %p)\n", cache, area));
|
|
|
|
|
|
|
|
MutexLocker locker(cache->lock);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-11-08 17:25:09 +03:00
|
|
|
if (area->cache_prev)
|
|
|
|
area->cache_prev->cache_next = area->cache_next;
|
|
|
|
if (area->cache_next)
|
|
|
|
area->cache_next->cache_prev = area->cache_prev;
|
2007-07-18 04:16:27 +04:00
|
|
|
if (cache->areas == area)
|
|
|
|
cache->areas = area->cache_next;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2007-09-26 04:20:23 +04:00
|
|
|
if (cache->store->ops->release_ref)
|
|
|
|
cache->store->ops->release_ref(cache->store);
|
|
|
|
|
2004-10-08 19:10:50 +04:00
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|