2002-07-09 16:24:59 +04:00
|
|
|
/*
|
2004-11-23 06:34:04 +03:00
|
|
|
* Copyright 2002-2004, Axel Dörfler, axeld@pinc-software.de.
|
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*
|
|
|
|
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
|
|
|
* Distributed under the terms of the NewOS License.
|
|
|
|
*/
|
2002-10-30 02:07:06 +03:00
|
|
|
|
2004-11-08 17:25:09 +03:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
#include <kernel.h>
|
|
|
|
#include <vm.h>
|
|
|
|
#include <vm_priv.h>
|
|
|
|
#include <vm_cache.h>
|
|
|
|
#include <vm_page.h>
|
2002-10-30 02:07:06 +03:00
|
|
|
#include <malloc.h>
|
2002-07-09 16:24:59 +04:00
|
|
|
#include <int.h>
|
|
|
|
#include <khash.h>
|
|
|
|
#include <lock.h>
|
|
|
|
#include <debug.h>
|
|
|
|
#include <lock.h>
|
|
|
|
#include <smp.h>
|
|
|
|
#include <arch/cpu.h>
|
|
|
|
#include <Errors.h>
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
//#define TRACE_VM_CACHE
|
|
|
|
#ifdef TRACE_VM_CACHE
|
|
|
|
# define TRACE(x) dprintf x
|
|
|
|
#else
|
|
|
|
# define TRACE(x) ;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
/* hash table of pages keyed by cache they're in and offset */
|
|
|
|
#define PAGE_TABLE_SIZE 1024 /* make this dynamic */
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
static void *page_cache_table;
|
2002-10-26 20:13:36 +04:00
|
|
|
static spinlock page_cache_table_lock;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
struct page_lookup_key {
|
|
|
|
off_t offset;
|
2004-10-09 02:56:51 +04:00
|
|
|
vm_cache *cache;
|
2002-07-09 16:24:59 +04:00
|
|
|
};
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
static int
|
|
|
|
page_compare_func(void *_p, const void *_key)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2004-10-09 02:56:51 +04:00
|
|
|
vm_page *page = _p;
|
2002-07-09 16:24:59 +04:00
|
|
|
const struct page_lookup_key *key = _key;
|
|
|
|
|
2004-11-23 06:34:04 +03:00
|
|
|
TRACE(("page_compare_func: page %p, key %p\n", page, key));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-10-09 02:56:51 +04:00
|
|
|
if (page->cache == key->cache && page->offset == key->offset)
|
2002-07-09 16:24:59 +04:00
|
|
|
return 0;
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
return -1;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-11-29 11:38:52 +03:00
|
|
|
static uint32
|
|
|
|
page_hash_func(void *_p, const void *_key, uint32 range)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2004-10-09 02:56:51 +04:00
|
|
|
vm_page *page = _p;
|
2002-07-09 16:24:59 +04:00
|
|
|
const struct page_lookup_key *key = _key;
|
|
|
|
#if 0
|
|
|
|
if(p)
|
|
|
|
dprintf("page_hash_func: p 0x%x, key 0x%x, HASH = 0x%x\n", p, key, HASH(p->offset, p->cache_ref) % range);
|
|
|
|
else
|
|
|
|
dprintf("page_hash_func: p 0x%x, key 0x%x, HASH = 0x%x\n", p, key, HASH(key->offset, key->ref) % range);
|
|
|
|
#endif
|
2004-09-11 03:43:15 +04:00
|
|
|
#define HASH(offset, ref) ((unsigned int)(offset >> 12) ^ ((unsigned int)(ref)>>4))
|
|
|
|
|
2004-10-09 02:56:51 +04:00
|
|
|
if (page)
|
|
|
|
return HASH(page->offset, page->cache) % range;
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2004-10-09 02:56:51 +04:00
|
|
|
return HASH(key->offset, key->cache) % range;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2004-10-08 19:10:50 +04:00
|
|
|
status_t
|
2004-09-11 03:43:15 +04:00
|
|
|
vm_cache_init(kernel_args *ka)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
vm_page p;
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
page_cache_table = hash_init(PAGE_TABLE_SIZE, (int)&p.hash_next - (int)&p,
|
|
|
|
&page_compare_func, &page_hash_func);
|
|
|
|
if (!page_cache_table)
|
2002-07-09 16:24:59 +04:00
|
|
|
panic("vm_cache_init: cannot allocate memory for page cache hash table\n");
|
|
|
|
page_cache_table_lock = 0;
|
|
|
|
|
2004-10-08 19:10:50 +04:00
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
vm_cache *
|
|
|
|
vm_cache_create(vm_store *store)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
vm_cache *cache;
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
cache = malloc(sizeof(vm_cache));
|
2004-09-11 03:43:15 +04:00
|
|
|
if (cache == NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
cache->page_list = NULL;
|
|
|
|
cache->ref = NULL;
|
|
|
|
cache->source = NULL;
|
|
|
|
cache->store = store;
|
2004-09-11 03:43:15 +04:00
|
|
|
if (store != NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
store->cache = cache;
|
|
|
|
cache->virtual_size = 0;
|
|
|
|
cache->temporary = 0;
|
|
|
|
cache->scan_skip = 0;
|
|
|
|
|
|
|
|
return cache;
|
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
vm_cache_ref *
|
|
|
|
vm_cache_ref_create(vm_cache *cache)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
vm_cache_ref *ref;
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
ref = malloc(sizeof(vm_cache_ref));
|
2004-09-11 03:43:15 +04:00
|
|
|
if (ref == NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
ref->cache = cache;
|
|
|
|
mutex_init(&ref->lock, "cache_ref_mutex");
|
2004-11-08 17:25:09 +03:00
|
|
|
ref->areas = NULL;
|
2002-07-09 16:24:59 +04:00
|
|
|
ref->ref_count = 0;
|
|
|
|
cache->ref = ref;
|
|
|
|
|
|
|
|
return ref;
|
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
void
|
|
|
|
vm_cache_acquire_ref(vm_cache_ref *cache_ref, bool acquire_store_ref)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
// dprintf("vm_cache_acquire_ref: cache_ref 0x%x, ref will be %d\n", cache_ref, cache_ref->ref_count+1);
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
if (cache_ref == NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
panic("vm_cache_acquire_ref: passed NULL\n");
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
if (acquire_store_ref && cache_ref->cache->store->ops->acquire_ref)
|
2002-07-09 16:24:59 +04:00
|
|
|
cache_ref->cache->store->ops->acquire_ref(cache_ref->cache->store);
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
atomic_add(&cache_ref->ref_count, 1);
|
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
void
|
|
|
|
vm_cache_release_ref(vm_cache_ref *cache_ref)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
vm_page *page;
|
|
|
|
|
2004-11-23 06:34:04 +03:00
|
|
|
TRACE(("vm_cache_release_ref: cache_ref %p, ref will be %ld\n", cache_ref, cache_ref->ref_count - 1));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
if (cache_ref == NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
panic("vm_cache_release_ref: passed NULL\n");
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
if (atomic_add(&cache_ref->ref_count, -1) != 1) {
|
|
|
|
if (cache_ref->cache->store->ops->release_ref)
|
|
|
|
cache_ref->cache->store->ops->release_ref(cache_ref->cache->store);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
return;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
// delete this cache
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
// delete the cache's backing store, if it has one
|
2004-11-03 20:24:41 +03:00
|
|
|
if (cache_ref->cache->store)
|
2004-09-11 03:43:15 +04:00
|
|
|
(*cache_ref->cache->store->ops->destroy)(cache_ref->cache->store);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
// free all of the pages in the cache
|
|
|
|
page = cache_ref->cache->page_list;
|
|
|
|
while (page) {
|
|
|
|
vm_page *oldPage = page;
|
|
|
|
int state;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
page = page->cache_next;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
// remove it from the hash table
|
|
|
|
state = disable_interrupts();
|
|
|
|
acquire_spinlock(&page_cache_table_lock);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
hash_remove(page_cache_table, oldPage);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
release_spinlock(&page_cache_table_lock);
|
|
|
|
restore_interrupts(state);
|
|
|
|
|
2004-11-23 06:34:04 +03:00
|
|
|
TRACE(("vm_cache_release_ref: freeing page 0x%lx\n", oldPage->ppn));
|
2004-09-11 03:43:15 +04:00
|
|
|
vm_page_set_state(oldPage, PAGE_STATE_FREE);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
// remove the ref to the source
|
|
|
|
if (cache_ref->cache->source)
|
|
|
|
vm_cache_release_ref(cache_ref->cache->source->ref);
|
|
|
|
|
|
|
|
mutex_destroy(&cache_ref->lock);
|
|
|
|
free(cache_ref->cache);
|
|
|
|
free(cache_ref);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
vm_page *
|
|
|
|
vm_cache_lookup_page(vm_cache_ref *cache_ref, off_t offset)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct page_lookup_key key;
|
2004-12-14 01:05:47 +03:00
|
|
|
cpu_status state;
|
|
|
|
vm_page *page;
|
|
|
|
|
|
|
|
ASSERT_LOCKED_MUTEX(&cache_ref->lock);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
key.offset = offset;
|
2004-10-09 02:56:51 +04:00
|
|
|
key.cache = cache_ref->cache;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-07-25 05:05:51 +04:00
|
|
|
state = disable_interrupts();
|
2002-07-09 16:24:59 +04:00
|
|
|
acquire_spinlock(&page_cache_table_lock);
|
|
|
|
|
|
|
|
page = hash_lookup(page_cache_table, &key);
|
|
|
|
|
|
|
|
release_spinlock(&page_cache_table_lock);
|
2002-07-25 05:05:51 +04:00
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
|
|
|
void
|
|
|
|
vm_cache_insert_page(vm_cache_ref *cache_ref, vm_page *page, off_t offset)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2004-12-14 01:05:47 +03:00
|
|
|
cpu_status state;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-11-23 06:34:04 +03:00
|
|
|
TRACE(("vm_cache_insert_page: cache_ref %p, page %p, offset %Ld\n", cache_ref, page, offset));
|
2004-12-14 01:05:47 +03:00
|
|
|
ASSERT_LOCKED_MUTEX(&cache_ref->lock);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
page->offset = offset;
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
if (cache_ref->cache->page_list != NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
cache_ref->cache->page_list->cache_prev = page;
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
page->cache_next = cache_ref->cache->page_list;
|
|
|
|
page->cache_prev = NULL;
|
|
|
|
cache_ref->cache->page_list = page;
|
|
|
|
|
2004-10-09 02:56:51 +04:00
|
|
|
page->cache = cache_ref->cache;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-07-25 05:05:51 +04:00
|
|
|
state = disable_interrupts();
|
2002-07-09 16:24:59 +04:00
|
|
|
acquire_spinlock(&page_cache_table_lock);
|
|
|
|
|
|
|
|
hash_insert(page_cache_table, page);
|
|
|
|
|
|
|
|
release_spinlock(&page_cache_table_lock);
|
2002-07-25 05:05:51 +04:00
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2004-10-08 19:10:50 +04:00
|
|
|
/** Removes the vm_page from this cache. Of course, the page must
|
|
|
|
* really be in this cache or evil things will happen.
|
|
|
|
* The vm_cache_ref lock must be held.
|
|
|
|
*/
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
void
|
|
|
|
vm_cache_remove_page(vm_cache_ref *cache_ref, vm_page *page)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2004-12-14 01:05:47 +03:00
|
|
|
cpu_status state;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-11-23 06:34:04 +03:00
|
|
|
TRACE(("vm_cache_remove_page: cache %p, page %p\n", cache_ref, page));
|
2004-10-08 19:10:50 +04:00
|
|
|
ASSERT_LOCKED_MUTEX(&cache_ref->lock);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-07-25 05:05:51 +04:00
|
|
|
state = disable_interrupts();
|
2002-07-09 16:24:59 +04:00
|
|
|
acquire_spinlock(&page_cache_table_lock);
|
|
|
|
|
|
|
|
hash_remove(page_cache_table, page);
|
|
|
|
|
|
|
|
release_spinlock(&page_cache_table_lock);
|
2002-07-25 05:05:51 +04:00
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
if (cache_ref->cache->page_list == page) {
|
|
|
|
if (page->cache_next != NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
page->cache_next->cache_prev = NULL;
|
|
|
|
cache_ref->cache->page_list = page->cache_next;
|
|
|
|
} else {
|
2004-09-11 03:43:15 +04:00
|
|
|
if (page->cache_prev != NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
page->cache_prev->cache_next = page->cache_next;
|
2004-09-11 03:43:15 +04:00
|
|
|
if (page->cache_next != NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
page->cache_next->cache_prev = page->cache_prev;
|
|
|
|
}
|
2004-10-09 02:56:51 +04:00
|
|
|
page->cache = NULL;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2004-11-23 06:34:04 +03:00
|
|
|
status_t
|
|
|
|
vm_cache_write_modified(vm_cache_ref *ref)
|
|
|
|
{
|
|
|
|
status_t status;
|
|
|
|
|
|
|
|
TRACE(("vm_cache_write_modified(ref = %p)\n", ref));
|
|
|
|
|
|
|
|
mutex_lock(&ref->lock);
|
|
|
|
status = vm_page_write_modified(ref->cache);
|
|
|
|
mutex_unlock(&ref->lock);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-11-03 20:24:41 +03:00
|
|
|
status_t
|
|
|
|
vm_cache_set_minimal_commitment(vm_cache_ref *ref, off_t commitment)
|
|
|
|
{
|
|
|
|
status_t status = B_OK;
|
|
|
|
vm_store *store;
|
|
|
|
|
|
|
|
mutex_lock(&ref->lock);
|
|
|
|
store = ref->cache->store;
|
|
|
|
|
|
|
|
// If we don't have enough committed space to cover through to the new end of region...
|
|
|
|
if (store->committed_size < commitment) {
|
|
|
|
// ToDo: should we check if the cache's virtual size is large
|
|
|
|
// enough for a commitment of that size?
|
|
|
|
|
|
|
|
// try to commit more memory
|
|
|
|
status = (store->ops->commit)(store, commitment);
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_unlock(&ref->lock);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-10-09 03:11:50 +04:00
|
|
|
/** This function updates the size field of the vm_cache structure.
|
|
|
|
* If needed, it will free up all pages that don't belong to the cache anymore.
|
|
|
|
* The vm_cache_ref lock must be held when you call it.
|
|
|
|
*/
|
|
|
|
|
2004-10-08 19:10:50 +04:00
|
|
|
status_t
|
2004-11-23 06:34:04 +03:00
|
|
|
vm_cache_resize(vm_cache_ref *cacheRef, off_t newSize)
|
2004-10-08 19:10:50 +04:00
|
|
|
{
|
|
|
|
vm_cache *cache = cacheRef->cache;
|
2004-11-03 20:24:41 +03:00
|
|
|
status_t status;
|
2004-11-23 06:34:04 +03:00
|
|
|
off_t oldSize;
|
2004-10-08 19:10:50 +04:00
|
|
|
|
2004-11-03 20:24:41 +03:00
|
|
|
ASSERT_LOCKED_MUTEX(&cacheRef->lock);
|
|
|
|
|
|
|
|
status = cache->store->ops->commit(cache->store, newSize);
|
|
|
|
if (status != B_OK)
|
|
|
|
return status;
|
|
|
|
|
2004-10-08 19:10:50 +04:00
|
|
|
oldSize = cache->virtual_size;
|
|
|
|
if (newSize < oldSize) {
|
|
|
|
// we need to remove all pages in the cache outside of the new virtual size
|
|
|
|
vm_page *page, *next;
|
|
|
|
|
|
|
|
for (page = cache->page_list; page; page = next) {
|
|
|
|
next = page->cache_next;
|
|
|
|
|
|
|
|
if (page->offset >= newSize) {
|
|
|
|
// remove the page and put it into the free queue
|
|
|
|
vm_cache_remove_page(cacheRef, page);
|
|
|
|
vm_page_set_state(page, PAGE_STATE_FREE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cache->virtual_size = newSize;
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
2004-11-08 17:25:09 +03:00
|
|
|
vm_cache_insert_area(vm_cache_ref *cache_ref, vm_area *area)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
mutex_lock(&cache_ref->lock);
|
|
|
|
|
2004-11-08 17:25:09 +03:00
|
|
|
area->cache_next = cache_ref->areas;
|
|
|
|
if (area->cache_next)
|
|
|
|
area->cache_next->cache_prev = area;
|
|
|
|
area->cache_prev = NULL;
|
|
|
|
cache_ref->areas = area;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
mutex_unlock(&cache_ref->lock);
|
2004-10-08 19:10:50 +04:00
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-09-11 03:43:15 +04:00
|
|
|
|
2004-10-08 19:10:50 +04:00
|
|
|
status_t
|
2004-11-08 17:25:09 +03:00
|
|
|
vm_cache_remove_area(vm_cache_ref *cache_ref, vm_area *area)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
mutex_lock(&cache_ref->lock);
|
|
|
|
|
2004-11-08 17:25:09 +03:00
|
|
|
if (area->cache_prev)
|
|
|
|
area->cache_prev->cache_next = area->cache_next;
|
|
|
|
if (area->cache_next)
|
|
|
|
area->cache_next->cache_prev = area->cache_prev;
|
|
|
|
if (cache_ref->areas == area)
|
|
|
|
cache_ref->areas = area->cache_next;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
mutex_unlock(&cache_ref->lock);
|
2004-10-08 19:10:50 +04:00
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|