* Merged vm_cache_ref and vm_cache to a single structure (Axel & Ingo).
* Renamed vm_cache.c to vm_cache.cpp git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@21642 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
1296365693
commit
58f6e8e5e4
@ -37,9 +37,9 @@ struct cache_module_info {
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern void cache_node_opened(void *vnode, int32 fdType, vm_cache_ref *cache,
|
||||
extern void cache_node_opened(void *vnode, int32 fdType, vm_cache *cache,
|
||||
dev_t mountID, ino_t parentID, ino_t vnodeID, const char *name);
|
||||
extern void cache_node_closed(void *vnode, int32 fdType, vm_cache_ref *cache,
|
||||
extern void cache_node_closed(void *vnode, int32 fdType, vm_cache *cache,
|
||||
dev_t mountID, ino_t vnodeID);
|
||||
extern void cache_node_launched(size_t argCount, char * const *args);
|
||||
extern void cache_prefetch_vnode(void *vnode, off_t offset, size_t size);
|
||||
|
@ -27,7 +27,7 @@
|
||||
#define MAX_NODE_MONITORS 65536
|
||||
|
||||
struct kernel_args;
|
||||
struct vm_cache_ref;
|
||||
struct vm_cache;
|
||||
struct file_descriptor;
|
||||
struct selectsync;
|
||||
struct pollfd;
|
||||
@ -91,7 +91,7 @@ status_t vfs_read_pages(void *vnode, void *cookie, off_t pos,
|
||||
const iovec *vecs, size_t count, size_t *_numBytes, bool fsReenter);
|
||||
status_t vfs_write_pages(void *vnode, void *cookie, off_t pos,
|
||||
const iovec *vecs, size_t count, size_t *_numBytes, bool fsReenter);
|
||||
status_t vfs_get_vnode_cache(void *vnode, struct vm_cache_ref **_cache, bool allocate);
|
||||
status_t vfs_get_vnode_cache(void *vnode, struct vm_cache **_cache, bool allocate);
|
||||
status_t vfs_get_file_map( void *_vnode, off_t offset, size_t size,
|
||||
struct file_io_vec *vecs, size_t *_count);
|
||||
status_t vfs_get_fs_node_from_path(dev_t mountID, const char *path,
|
||||
|
@ -55,6 +55,8 @@ area_id vm_map_physical_memory(team_id team, const char *name, void **address,
|
||||
area_id vm_map_file(team_id aid, const char *name, void **address,
|
||||
uint32 addressSpec, addr_t size, uint32 protection, uint32 mapping,
|
||||
const char *path, off_t offset);
|
||||
vm_cache *vm_area_get_locked_cache(vm_area *area);
|
||||
void vm_area_put_locked_cache(vm_cache *cache);
|
||||
area_id vm_create_null_area(team_id team, const char *name, void **address,
|
||||
uint32 addressSpec, addr_t size);
|
||||
area_id vm_copy_area(team_id team, const char *name, void **_address,
|
||||
@ -63,7 +65,7 @@ area_id vm_clone_area(team_id team, const char *name, void **address,
|
||||
uint32 addressSpec, uint32 protection, uint32 mapping,
|
||||
area_id sourceArea);
|
||||
status_t vm_delete_area(team_id aid, area_id id);
|
||||
status_t vm_create_vnode_cache(void *vnode, vm_cache_ref **_cacheRef);
|
||||
status_t vm_create_vnode_cache(void *vnode, vm_cache **_cache);
|
||||
vm_area *vm_area_lookup(vm_address_space *addressSpace, addr_t address);
|
||||
status_t vm_set_area_memory_type(area_id id, addr_t physicalBase, uint32 type);
|
||||
status_t vm_get_page_mapping(team_id team, addr_t vaddr, addr_t *paddr);
|
||||
|
@ -21,19 +21,18 @@ extern "C" {
|
||||
|
||||
status_t vm_cache_init(struct kernel_args *args);
|
||||
vm_cache *vm_cache_create(vm_store *store);
|
||||
status_t vm_cache_ref_create(vm_cache *cache, bool acquireLock);
|
||||
void vm_cache_acquire_ref(vm_cache_ref *cache_ref);
|
||||
void vm_cache_release_ref(vm_cache_ref *cache_ref);
|
||||
vm_page *vm_cache_lookup_page(vm_cache_ref *cacheRef, off_t page);
|
||||
void vm_cache_insert_page(vm_cache_ref *cacheRef, vm_page *page, off_t offset);
|
||||
void vm_cache_remove_page(vm_cache_ref *cacheRef, vm_page *page);
|
||||
void vm_cache_remove_consumer(vm_cache_ref *cacheRef, vm_cache *consumer);
|
||||
void vm_cache_add_consumer_locked(vm_cache_ref *cacheRef, vm_cache *consumer);
|
||||
status_t vm_cache_write_modified(vm_cache_ref *ref, bool fsReenter);
|
||||
status_t vm_cache_set_minimal_commitment_locked(vm_cache_ref *ref, off_t commitment);
|
||||
status_t vm_cache_resize(vm_cache_ref *cacheRef, off_t newSize);
|
||||
status_t vm_cache_insert_area_locked(vm_cache_ref *cacheRef, vm_area *area);
|
||||
status_t vm_cache_remove_area(vm_cache_ref *cacheRef, vm_area *area);
|
||||
void vm_cache_acquire_ref(vm_cache *cache);
|
||||
void vm_cache_release_ref(vm_cache *cache);
|
||||
vm_page *vm_cache_lookup_page(vm_cache *cache, off_t page);
|
||||
void vm_cache_insert_page(vm_cache *cache, vm_page *page, off_t offset);
|
||||
void vm_cache_remove_page(vm_cache *cache, vm_page *page);
|
||||
void vm_cache_remove_consumer(vm_cache *cache, vm_cache *consumer);
|
||||
void vm_cache_add_consumer_locked(vm_cache *cache, vm_cache *consumer);
|
||||
status_t vm_cache_write_modified(vm_cache *cache, bool fsReenter);
|
||||
status_t vm_cache_set_minimal_commitment_locked(vm_cache *cache, off_t commitment);
|
||||
status_t vm_cache_resize(vm_cache *cache, off_t newSize);
|
||||
status_t vm_cache_insert_area_locked(vm_cache *cache, vm_area *area);
|
||||
status_t vm_cache_remove_area(vm_cache *cache, vm_area *area);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -125,23 +125,15 @@ enum {
|
||||
CACHE_TYPE_NULL
|
||||
};
|
||||
|
||||
// vm_cache_ref
|
||||
typedef struct vm_cache_ref {
|
||||
struct vm_cache *cache;
|
||||
mutex lock;
|
||||
|
||||
struct vm_area *areas;
|
||||
|
||||
vint32 ref_count;
|
||||
} vm_cache_ref;
|
||||
|
||||
// vm_cache
|
||||
typedef struct vm_cache {
|
||||
mutex lock;
|
||||
struct vm_area *areas;
|
||||
vint32 ref_count;
|
||||
struct list_link consumer_link;
|
||||
struct list consumers;
|
||||
// list of caches that use this cache as a source
|
||||
vm_page *page_list;
|
||||
vm_cache_ref *ref;
|
||||
struct vm_cache *source;
|
||||
struct vm_store *store;
|
||||
off_t virtual_base;
|
||||
@ -165,7 +157,8 @@ typedef struct vm_area {
|
||||
uint16 memory_type;
|
||||
vint32 ref_count;
|
||||
|
||||
struct vm_cache_ref *cache_ref;
|
||||
struct vm_cache *cache;
|
||||
vint32 no_cache_change;
|
||||
off_t cache_offset;
|
||||
uint32 cache_type;
|
||||
vm_area_mappings mappings;
|
||||
|
38
src/system/kernel/cache/file_cache.cpp
vendored
38
src/system/kernel/cache/file_cache.cpp
vendored
@ -60,7 +60,7 @@ struct file_map {
|
||||
};
|
||||
|
||||
struct file_cache_ref {
|
||||
vm_cache_ref *cache;
|
||||
vm_cache *cache;
|
||||
void *vnode;
|
||||
void *device;
|
||||
void *cookie;
|
||||
@ -520,7 +520,7 @@ read_chunk_into_cache(file_cache_ref *ref, off_t offset, size_t numBytes,
|
||||
TRACE(("read_chunk(offset = %Ld, size = %lu, pageOffset = %ld, buffer = %#lx, bufferSize = %lu\n",
|
||||
offset, size, pageOffset, buffer, bufferSize));
|
||||
|
||||
vm_cache_ref *cache = ref->cache;
|
||||
vm_cache *cache = ref->cache;
|
||||
|
||||
iovec vecs[MAX_IO_VECS];
|
||||
int32 vecCount = 0;
|
||||
@ -713,9 +713,10 @@ write_chunk_to_cache(file_cache_ref *ref, off_t offset, size_t numBytes,
|
||||
addr_t last = (addr_t)vecs[vecCount - 1].iov_base
|
||||
+ vecs[vecCount - 1].iov_len - B_PAGE_SIZE;
|
||||
|
||||
if (offset + pageOffset + bufferSize == ref->cache->cache->virtual_size) {
|
||||
if (offset + pageOffset + bufferSize == ref->cache->virtual_size) {
|
||||
// the space in the page after this write action needs to be cleaned
|
||||
memset((void *)(last + lastPageOffset), 0, B_PAGE_SIZE - lastPageOffset);
|
||||
memset((void *)(last + lastPageOffset), 0,
|
||||
B_PAGE_SIZE - lastPageOffset);
|
||||
} else if (vecCount > 1) {
|
||||
// the end of this write does not happen on a page boundary, so we
|
||||
// need to fetch the last page before we can update it
|
||||
@ -842,8 +843,8 @@ cache_io(void *_cacheRef, off_t offset, addr_t buffer, size_t *_size, bool doWri
|
||||
panic("cache_io() called with NULL ref!\n");
|
||||
|
||||
file_cache_ref *ref = (file_cache_ref *)_cacheRef;
|
||||
vm_cache_ref *cache = ref->cache;
|
||||
off_t fileSize = cache->cache->virtual_size;
|
||||
vm_cache *cache = ref->cache;
|
||||
off_t fileSize = cache->virtual_size;
|
||||
|
||||
TRACE(("cache_io(ref = %p, offset = %Ld, buffer = %p, size = %lu, %s)\n",
|
||||
ref, offset, (void *)buffer, *_size, doWrite ? "write" : "read"));
|
||||
@ -1084,12 +1085,13 @@ file_cache_control(const char *subsystem, uint32 function, void *buffer, size_t
|
||||
extern "C" void
|
||||
cache_prefetch_vnode(void *vnode, off_t offset, size_t size)
|
||||
{
|
||||
vm_cache_ref *cache;
|
||||
vm_cache *cache;
|
||||
if (vfs_get_vnode_cache(vnode, &cache, false) != B_OK)
|
||||
return;
|
||||
|
||||
file_cache_ref *ref = (struct file_cache_ref *)((vnode_store *)cache->cache->store)->file_cache_ref;
|
||||
off_t fileSize = cache->cache->virtual_size;
|
||||
file_cache_ref *ref = (struct file_cache_ref *)
|
||||
((vnode_store *)cache->store)->file_cache_ref;
|
||||
off_t fileSize = cache->virtual_size;
|
||||
|
||||
if (size > fileSize)
|
||||
size = fileSize;
|
||||
@ -1160,7 +1162,7 @@ cache_prefetch(dev_t mountID, ino_t vnodeID, off_t offset, size_t size)
|
||||
|
||||
|
||||
extern "C" void
|
||||
cache_node_opened(void *vnode, int32 fdType, vm_cache_ref *cache, dev_t mountID,
|
||||
cache_node_opened(void *vnode, int32 fdType, vm_cache *cache, dev_t mountID,
|
||||
ino_t parentID, ino_t vnodeID, const char *name)
|
||||
{
|
||||
if (sCacheModule == NULL || sCacheModule->node_opened == NULL)
|
||||
@ -1168,9 +1170,10 @@ cache_node_opened(void *vnode, int32 fdType, vm_cache_ref *cache, dev_t mountID,
|
||||
|
||||
off_t size = -1;
|
||||
if (cache != NULL) {
|
||||
file_cache_ref *ref = (file_cache_ref *)((vnode_store *)cache->cache->store)->file_cache_ref;
|
||||
file_cache_ref *ref = (file_cache_ref *)
|
||||
((vnode_store *)cache->store)->file_cache_ref;
|
||||
if (ref != NULL)
|
||||
size = ref->cache->cache->virtual_size;
|
||||
size = cache->virtual_size;
|
||||
}
|
||||
|
||||
sCacheModule->node_opened(vnode, fdType, mountID, parentID, vnodeID, name, size);
|
||||
@ -1178,7 +1181,7 @@ cache_node_opened(void *vnode, int32 fdType, vm_cache_ref *cache, dev_t mountID,
|
||||
|
||||
|
||||
extern "C" void
|
||||
cache_node_closed(void *vnode, int32 fdType, vm_cache_ref *cache,
|
||||
cache_node_closed(void *vnode, int32 fdType, vm_cache *cache,
|
||||
dev_t mountID, ino_t vnodeID)
|
||||
{
|
||||
if (sCacheModule == NULL || sCacheModule->node_closed == NULL)
|
||||
@ -1237,7 +1240,7 @@ file_cache_create(dev_t mountID, ino_t vnodeID, off_t size, int fd)
|
||||
if (ref == NULL)
|
||||
return NULL;
|
||||
|
||||
// TODO: delay vm_cache/vm_cache_ref creation until data is
|
||||
// TODO: delay vm_cache creation until data is
|
||||
// requested/written for the first time? Listing lots of
|
||||
// files in Tracker (and elsewhere) could be slowed down.
|
||||
// Since the file_cache_ref itself doesn't have a lock,
|
||||
@ -1255,7 +1258,8 @@ file_cache_create(dev_t mountID, ino_t vnodeID, off_t size, int fd)
|
||||
if (vfs_get_cookie_from_fd(fd, &ref->cookie) != B_OK)
|
||||
goto err2;
|
||||
|
||||
// Get the vnode for the object (note, this does not grab a reference to the node)
|
||||
// Get the vnode for the object
|
||||
// (note, this does not grab a reference to the node)
|
||||
if (vfs_lookup_vnode(mountID, vnodeID, &ref->vnode) != B_OK)
|
||||
goto err2;
|
||||
|
||||
@ -1270,8 +1274,8 @@ file_cache_create(dev_t mountID, ino_t vnodeID, off_t size, int fd)
|
||||
// we don't grab an extra reference).
|
||||
vfs_put_vnode(ref->vnode);
|
||||
|
||||
ref->cache->cache->virtual_size = size;
|
||||
((vnode_store *)ref->cache->cache->store)->file_cache_ref = ref;
|
||||
ref->cache->virtual_size = size;
|
||||
((vnode_store *)ref->cache->store)->file_cache_ref = ref;
|
||||
return ref;
|
||||
|
||||
err2:
|
||||
|
@ -65,7 +65,7 @@ const static uint32 kMaxUnusedVnodes = 8192;
|
||||
|
||||
struct vnode {
|
||||
struct vnode *next;
|
||||
vm_cache_ref *cache;
|
||||
vm_cache *cache;
|
||||
dev_t device;
|
||||
list_link mount_link;
|
||||
list_link unused_link;
|
||||
@ -2415,13 +2415,14 @@ dump_vnode_caches(int argc, char **argv)
|
||||
|
||||
// count pages in cache
|
||||
size_t numPages = 0;
|
||||
for (struct vm_page *page = vnode->cache->cache->page_list;
|
||||
for (struct vm_page *page = vnode->cache->page_list;
|
||||
page != NULL; page = page->cache_next) {
|
||||
numPages++;
|
||||
}
|
||||
|
||||
kprintf("%p%4ld%10Ld %p %8Ld%8ld\n", vnode, vnode->device, vnode->id, vnode->cache,
|
||||
(vnode->cache->cache->virtual_size + B_PAGE_SIZE - 1) / B_PAGE_SIZE, numPages);
|
||||
kprintf("%p%4ld%10Ld %p %8Ld%8ld\n", vnode, vnode->device, vnode->id,
|
||||
vnode->cache, (vnode->cache->virtual_size + B_PAGE_SIZE - 1)
|
||||
/ B_PAGE_SIZE, numPages);
|
||||
}
|
||||
|
||||
hash_close(sVnodeTable, &iterator, false);
|
||||
@ -3106,7 +3107,7 @@ vfs_write_pages(void *_vnode, void *cookie, off_t pos, const iovec *vecs, size_t
|
||||
*/
|
||||
|
||||
extern "C" status_t
|
||||
vfs_get_vnode_cache(void *_vnode, vm_cache_ref **_cache, bool allocate)
|
||||
vfs_get_vnode_cache(void *_vnode, vm_cache **_cache, bool allocate)
|
||||
{
|
||||
struct vnode *vnode = (struct vnode *)_vnode;
|
||||
|
||||
|
@ -3,7 +3,7 @@ SubDir HAIKU_TOP src system kernel vm ;
|
||||
KernelMergeObject kernel_vm.o :
|
||||
vm.cpp
|
||||
vm_address_space.c
|
||||
vm_cache.c
|
||||
vm_cache.cpp
|
||||
vm_daemons.c
|
||||
vm_low_memory.cpp
|
||||
vm_page.c
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -34,7 +34,7 @@
|
||||
/* hash table of pages keyed by cache they're in and offset */
|
||||
#define PAGE_TABLE_SIZE 1024 /* TODO: make this dynamic */
|
||||
|
||||
static void *sPageCacheTable;
|
||||
static hash_table *sPageCacheTable;
|
||||
static spinlock sPageCacheTableLock;
|
||||
|
||||
struct page_lookup_key {
|
||||
@ -46,8 +46,8 @@ struct page_lookup_key {
|
||||
static int
|
||||
page_compare_func(void *_p, const void *_key)
|
||||
{
|
||||
vm_page *page = _p;
|
||||
const struct page_lookup_key *key = _key;
|
||||
vm_page *page = (vm_page *)_p;
|
||||
const struct page_lookup_key *key = (page_lookup_key *)_key;
|
||||
|
||||
TRACE(("page_compare_func: page %p, key %p\n", page, key));
|
||||
|
||||
@ -61,8 +61,8 @@ page_compare_func(void *_p, const void *_key)
|
||||
static uint32
|
||||
page_hash_func(void *_p, const void *_key, uint32 range)
|
||||
{
|
||||
vm_page *page = _p;
|
||||
const struct page_lookup_key *key = _key;
|
||||
vm_page *page = (vm_page *)_p;
|
||||
const struct page_lookup_key *key = (page_lookup_key *)_key;
|
||||
|
||||
#define HASH(offset, ref) ((offset) ^ ((uint32)(ref) >> 4))
|
||||
|
||||
@ -95,13 +95,22 @@ vm_cache_create(vm_store *store)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cache = malloc(sizeof(vm_cache));
|
||||
cache = (vm_cache *)malloc(sizeof(vm_cache));
|
||||
if (cache == NULL)
|
||||
return NULL;
|
||||
|
||||
list_init(&cache->consumers);
|
||||
status_t status = mutex_init(&cache->lock, "vm_cache");
|
||||
if (status < B_OK && (!kernel_startup || status != B_NO_MORE_SEMS)) {
|
||||
// During early boot, we cannot create semaphores - they are
|
||||
// created later in vm_init_post_sem()
|
||||
free(cache);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
list_init_etc(&cache->consumers, offsetof(vm_cache, consumer_link));
|
||||
cache->page_list = NULL;
|
||||
cache->ref = NULL;
|
||||
cache->areas = NULL;
|
||||
cache->ref_count = 1;
|
||||
cache->source = NULL;
|
||||
cache->virtual_base = 0;
|
||||
cache->virtual_size = 0;
|
||||
@ -118,70 +127,38 @@ vm_cache_create(vm_store *store)
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
vm_cache_ref_create(vm_cache *cache, bool acquireLock)
|
||||
{
|
||||
vm_cache_ref *ref;
|
||||
status_t status;
|
||||
|
||||
ref = malloc(sizeof(vm_cache_ref));
|
||||
if (ref == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
status = mutex_init(&ref->lock, "cache_ref_mutex");
|
||||
if (status < B_OK && (!kernel_startup || status != B_NO_MORE_SEMS)) {
|
||||
// During early boot, we cannot create semaphores - they are
|
||||
// created later in vm_init_post_sem()
|
||||
free(ref);
|
||||
return status;
|
||||
}
|
||||
|
||||
if (acquireLock)
|
||||
mutex_lock(&ref->lock);
|
||||
|
||||
ref->areas = NULL;
|
||||
ref->ref_count = 1;
|
||||
|
||||
// connect the cache to its ref
|
||||
ref->cache = cache;
|
||||
cache->ref = ref;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
vm_cache_acquire_ref(vm_cache_ref *cacheRef)
|
||||
vm_cache_acquire_ref(vm_cache *cache)
|
||||
{
|
||||
TRACE(("vm_cache_acquire_ref: cacheRef %p, ref will be %ld\n",
|
||||
cacheRef, cacheRef->ref_count + 1));
|
||||
TRACE(("vm_cache_acquire_ref: cache %p, ref will be %ld\n",
|
||||
cache, cache->ref_count + 1));
|
||||
|
||||
if (cacheRef == NULL)
|
||||
if (cache == NULL)
|
||||
panic("vm_cache_acquire_ref: passed NULL\n");
|
||||
|
||||
if (cacheRef->cache->store->ops->acquire_ref != NULL)
|
||||
cacheRef->cache->store->ops->acquire_ref(cacheRef->cache->store);
|
||||
if (cache->store->ops->acquire_ref != NULL)
|
||||
cache->store->ops->acquire_ref(cache->store);
|
||||
|
||||
atomic_add(&cacheRef->ref_count, 1);
|
||||
atomic_add(&cache->ref_count, 1);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
vm_cache_release_ref(vm_cache_ref *cacheRef)
|
||||
vm_cache_release_ref(vm_cache *cache)
|
||||
{
|
||||
vm_page *page;
|
||||
|
||||
TRACE(("vm_cache_release_ref: cacheRef %p, ref will be %ld\n",
|
||||
cacheRef, cacheRef->ref_count - 1));
|
||||
cache, cache->ref_count - 1));
|
||||
|
||||
if (cacheRef == NULL)
|
||||
if (cache == NULL)
|
||||
panic("vm_cache_release_ref: passed NULL\n");
|
||||
|
||||
if (atomic_add(&cacheRef->ref_count, -1) != 1) {
|
||||
if (atomic_add(&cache->ref_count, -1) != 1) {
|
||||
// the store ref is only released on the "working" refs, not
|
||||
// on the initial one (this is vnode specific)
|
||||
if (cacheRef->cache->store->ops->release_ref)
|
||||
cacheRef->cache->store->ops->release_ref(cacheRef->cache->store);
|
||||
if (cache->store->ops->release_ref)
|
||||
cache->store->ops->release_ref(cache->store);
|
||||
#if 0
|
||||
{
|
||||
// count min references to see if everything is okay
|
||||
@ -215,25 +192,25 @@ vm_cache_release_ref(vm_cache_ref *cacheRef)
|
||||
|
||||
// delete this cache
|
||||
|
||||
if (cacheRef->areas != NULL)
|
||||
panic("cache_ref %p to be deleted still has areas", cacheRef);
|
||||
if (!list_is_empty(&cacheRef->cache->consumers))
|
||||
panic("cache %p to be deleted still has consumers", cacheRef->cache);
|
||||
if (cache->areas != NULL)
|
||||
panic("cache %p to be deleted still has areas", cache);
|
||||
if (!list_is_empty(&cache->consumers))
|
||||
panic("cache %p to be deleted still has consumers", cache);
|
||||
|
||||
// delete the cache's backing store
|
||||
cacheRef->cache->store->ops->destroy(cacheRef->cache->store);
|
||||
cache->store->ops->destroy(cache->store);
|
||||
|
||||
// free all of the pages in the cache
|
||||
page = cacheRef->cache->page_list;
|
||||
page = cache->page_list;
|
||||
while (page) {
|
||||
vm_page *oldPage = page;
|
||||
int state;
|
||||
|
||||
page = page->cache_next;
|
||||
|
||||
if (oldPage->mappings != NULL || oldPage->wired_count != 0) {
|
||||
if (!oldPage->mappings.IsEmpty() || oldPage->wired_count != 0) {
|
||||
panic("remove page %p from cache %p: page still has mappings!\n",
|
||||
oldPage, cacheRef->cache);
|
||||
oldPage, cache);
|
||||
}
|
||||
|
||||
// remove it from the hash table
|
||||
@ -253,67 +230,66 @@ vm_cache_release_ref(vm_cache_ref *cacheRef)
|
||||
}
|
||||
|
||||
// remove the ref to the source
|
||||
if (cacheRef->cache->source)
|
||||
vm_cache_remove_consumer(cacheRef->cache->source->ref, cacheRef->cache);
|
||||
if (cache->source)
|
||||
vm_cache_remove_consumer(cache->source, cache);
|
||||
|
||||
mutex_destroy(&cacheRef->lock);
|
||||
free(cacheRef->cache);
|
||||
free(cacheRef);
|
||||
mutex_destroy(&cache->lock);
|
||||
free(cache);
|
||||
}
|
||||
|
||||
|
||||
vm_page *
|
||||
vm_cache_lookup_page(vm_cache_ref *cacheRef, off_t offset)
|
||||
vm_cache_lookup_page(vm_cache *cache, off_t offset)
|
||||
{
|
||||
struct page_lookup_key key;
|
||||
cpu_status state;
|
||||
vm_page *page;
|
||||
|
||||
ASSERT_LOCKED_MUTEX(&cacheRef->lock);
|
||||
ASSERT_LOCKED_MUTEX(&cache->lock);
|
||||
|
||||
key.offset = (uint32)(offset >> PAGE_SHIFT);
|
||||
key.cache = cacheRef->cache;
|
||||
key.cache = cache;
|
||||
|
||||
state = disable_interrupts();
|
||||
acquire_spinlock(&sPageCacheTableLock);
|
||||
|
||||
page = hash_lookup(sPageCacheTable, &key);
|
||||
page = (vm_page *)hash_lookup(sPageCacheTable, &key);
|
||||
|
||||
release_spinlock(&sPageCacheTableLock);
|
||||
restore_interrupts(state);
|
||||
|
||||
if (page != NULL && cacheRef->cache != page->cache)
|
||||
panic("page %p not in cache %p\n", page, cacheRef->cache);
|
||||
if (page != NULL && cache != page->cache)
|
||||
panic("page %p not in cache %p\n", page, cache);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
vm_cache_insert_page(vm_cache_ref *cacheRef, vm_page *page, off_t offset)
|
||||
vm_cache_insert_page(vm_cache *cache, vm_page *page, off_t offset)
|
||||
{
|
||||
cpu_status state;
|
||||
|
||||
TRACE(("vm_cache_insert_page: cacheRef %p, page %p, offset %Ld\n",
|
||||
cacheRef, page, offset));
|
||||
ASSERT_LOCKED_MUTEX(&cacheRef->lock);
|
||||
TRACE(("vm_cache_insert_page: cache %p, page %p, offset %Ld\n",
|
||||
cache, page, offset));
|
||||
ASSERT_LOCKED_MUTEX(&cache->lock);
|
||||
|
||||
if (page->cache != NULL) {
|
||||
panic("insert page %p into cache %p: page cache is set to %p\n",
|
||||
page, cacheRef->cache, page->cache);
|
||||
page, cache, page->cache);
|
||||
}
|
||||
|
||||
page->cache_offset = (uint32)(offset >> PAGE_SHIFT);
|
||||
|
||||
if (cacheRef->cache->page_list != NULL)
|
||||
cacheRef->cache->page_list->cache_prev = page;
|
||||
if (cache->page_list != NULL)
|
||||
cache->page_list->cache_prev = page;
|
||||
|
||||
page->cache_next = cacheRef->cache->page_list;
|
||||
page->cache_next = cache->page_list;
|
||||
page->cache_prev = NULL;
|
||||
cacheRef->cache->page_list = page;
|
||||
cacheRef->cache->page_count++;
|
||||
cache->page_list = page;
|
||||
cache->page_count++;
|
||||
|
||||
page->cache = cacheRef->cache;
|
||||
page->cache = cache;
|
||||
|
||||
state = disable_interrupts();
|
||||
acquire_spinlock(&sPageCacheTableLock);
|
||||
@ -328,18 +304,18 @@ vm_cache_insert_page(vm_cache_ref *cacheRef, vm_page *page, off_t offset)
|
||||
/*!
|
||||
Removes the vm_page from this cache. Of course, the page must
|
||||
really be in this cache or evil things will happen.
|
||||
The vm_cache_ref lock must be held.
|
||||
The cache lock must be held.
|
||||
*/
|
||||
void
|
||||
vm_cache_remove_page(vm_cache_ref *cacheRef, vm_page *page)
|
||||
vm_cache_remove_page(vm_cache *cache, vm_page *page)
|
||||
{
|
||||
cpu_status state;
|
||||
|
||||
TRACE(("vm_cache_remove_page: cache %p, page %p\n", cacheRef, page));
|
||||
ASSERT_LOCKED_MUTEX(&cacheRef->lock);
|
||||
TRACE(("vm_cache_remove_page: cache %p, page %p\n", cache, page));
|
||||
ASSERT_LOCKED_MUTEX(&cache->lock);
|
||||
|
||||
if (page->cache != cacheRef->cache)
|
||||
panic("remove page from %p: page cache is set to %p\n", cacheRef->cache, page->cache);
|
||||
if (page->cache != cache)
|
||||
panic("remove page from %p: page cache is set to %p\n", cache, page->cache);
|
||||
|
||||
state = disable_interrupts();
|
||||
acquire_spinlock(&sPageCacheTableLock);
|
||||
@ -349,31 +325,31 @@ vm_cache_remove_page(vm_cache_ref *cacheRef, vm_page *page)
|
||||
release_spinlock(&sPageCacheTableLock);
|
||||
restore_interrupts(state);
|
||||
|
||||
if (cacheRef->cache->page_list == page) {
|
||||
if (cache->page_list == page) {
|
||||
if (page->cache_next != NULL)
|
||||
page->cache_next->cache_prev = NULL;
|
||||
cacheRef->cache->page_list = page->cache_next;
|
||||
cache->page_list = page->cache_next;
|
||||
} else {
|
||||
if (page->cache_prev != NULL)
|
||||
page->cache_prev->cache_next = page->cache_next;
|
||||
if (page->cache_next != NULL)
|
||||
page->cache_next->cache_prev = page->cache_prev;
|
||||
}
|
||||
cacheRef->cache->page_count--;
|
||||
cache->page_count--;
|
||||
page->cache = NULL;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
vm_cache_write_modified(vm_cache_ref *ref, bool fsReenter)
|
||||
vm_cache_write_modified(vm_cache *cache, bool fsReenter)
|
||||
{
|
||||
status_t status;
|
||||
|
||||
TRACE(("vm_cache_write_modified(ref = %p)\n", ref));
|
||||
TRACE(("vm_cache_write_modified(cache = %p)\n", cache));
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
status = vm_page_write_modified(ref->cache, fsReenter);
|
||||
mutex_unlock(&ref->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
status = vm_page_write_modified(cache, fsReenter);
|
||||
mutex_unlock(&cache->lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -385,12 +361,12 @@ vm_cache_write_modified(vm_cache_ref *ref, bool fsReenter)
|
||||
Assumes you have the \a ref's lock held.
|
||||
*/
|
||||
status_t
|
||||
vm_cache_set_minimal_commitment_locked(vm_cache_ref *ref, off_t commitment)
|
||||
vm_cache_set_minimal_commitment_locked(vm_cache *cache, off_t commitment)
|
||||
{
|
||||
status_t status = B_OK;
|
||||
vm_store *store = ref->cache->store;
|
||||
vm_store *store = cache->store;
|
||||
|
||||
ASSERT_LOCKED_MUTEX(&ref->lock);
|
||||
ASSERT_LOCKED_MUTEX(&cache->lock);
|
||||
|
||||
// If we don't have enough committed space to cover through to the new end of region...
|
||||
if (store->committed_size < commitment) {
|
||||
@ -408,18 +384,17 @@ vm_cache_set_minimal_commitment_locked(vm_cache_ref *ref, off_t commitment)
|
||||
/*!
|
||||
This function updates the size field of the vm_cache structure.
|
||||
If needed, it will free up all pages that don't belong to the cache anymore.
|
||||
The vm_cache_ref lock must be held when you call it.
|
||||
The cache lock must be held when you call it.
|
||||
Since removed pages don't belong to the cache any longer, they are not
|
||||
written back before they will be removed.
|
||||
*/
|
||||
status_t
|
||||
vm_cache_resize(vm_cache_ref *cacheRef, off_t newSize)
|
||||
vm_cache_resize(vm_cache *cache, off_t newSize)
|
||||
{
|
||||
vm_cache *cache = cacheRef->cache;
|
||||
status_t status;
|
||||
uint32 oldPageCount, newPageCount;
|
||||
status_t status;
|
||||
|
||||
ASSERT_LOCKED_MUTEX(&cacheRef->lock);
|
||||
ASSERT_LOCKED_MUTEX(&cache->lock);
|
||||
|
||||
status = cache->store->ops->commit(cache->store, newSize);
|
||||
if (status != B_OK)
|
||||
@ -437,7 +412,7 @@ vm_cache_resize(vm_cache_ref *cacheRef, off_t newSize)
|
||||
|
||||
if (page->cache_offset >= newPageCount) {
|
||||
// remove the page and put it into the free queue
|
||||
vm_cache_remove_page(cacheRef, page);
|
||||
vm_cache_remove_page(cache, page);
|
||||
vm_page_set_state(page, PAGE_STATE_FREE);
|
||||
}
|
||||
}
|
||||
@ -449,45 +424,38 @@ vm_cache_resize(vm_cache_ref *cacheRef, off_t newSize)
|
||||
|
||||
|
||||
/*!
|
||||
Removes the \a consumer from the \a cacheRef's cache.
|
||||
Removes the \a consumer from the \a cache.
|
||||
It will also release the reference to the cacheRef owned by the consumer.
|
||||
Assumes you have the consumer's cache_ref lock held.
|
||||
Assumes you have the consumer's cache lock held.
|
||||
*/
|
||||
void
|
||||
vm_cache_remove_consumer(vm_cache_ref *cacheRef, vm_cache *consumer)
|
||||
vm_cache_remove_consumer(vm_cache *cache, vm_cache *consumer)
|
||||
{
|
||||
vm_cache *cache;
|
||||
|
||||
TRACE(("remove consumer vm cache %p from cache %p\n", consumer, cacheRef->cache));
|
||||
ASSERT_LOCKED_MUTEX(&consumer->ref->lock);
|
||||
TRACE(("remove consumer vm cache %p from cache %p\n", consumer, cache));
|
||||
ASSERT_LOCKED_MUTEX(&consumer->lock);
|
||||
|
||||
// remove the consumer from the cache, but keep its reference until later
|
||||
mutex_lock(&cacheRef->lock);
|
||||
cache = cacheRef->cache;
|
||||
mutex_lock(&cache->lock);
|
||||
list_remove_item(&cache->consumers, consumer);
|
||||
consumer->source = NULL;
|
||||
|
||||
if (cacheRef->areas == NULL && cache->source != NULL
|
||||
if (cache->areas == NULL && cache->source != NULL
|
||||
&& !list_is_empty(&cache->consumers)
|
||||
&& cache->consumers.link.next == cache->consumers.link.prev) {
|
||||
// The cache is not really needed anymore - it can be merged with its only
|
||||
// consumer left.
|
||||
vm_cache_ref *consumerRef;
|
||||
bool merge = false;
|
||||
|
||||
consumer = list_get_first_item(&cache->consumers);
|
||||
consumer = (vm_cache *)list_get_first_item(&cache->consumers);
|
||||
|
||||
// Our cache doesn't have a ref to its consumer (only the other way around),
|
||||
// so we cannot just acquire it here; it might be deleted right now
|
||||
while (true) {
|
||||
int32 count;
|
||||
consumerRef = consumer->ref;
|
||||
|
||||
count = consumerRef->ref_count;
|
||||
int32 count = consumer->ref_count;
|
||||
if (count == 0)
|
||||
break;
|
||||
|
||||
if (atomic_test_and_set(&consumerRef->ref_count, count + 1, count) == count) {
|
||||
if (atomic_test_and_set(&consumer->ref_count, count + 1, count) == count) {
|
||||
// We managed to grab a reference to the consumerRef.
|
||||
// Since this doesn't guarantee that we get the cache we wanted
|
||||
// to, we need to check if this cache is really the last
|
||||
@ -501,23 +469,19 @@ vm_cache_remove_consumer(vm_cache_ref *cacheRef, vm_cache *consumer)
|
||||
// But since we need to keep the locking order upper->lower cache, we
|
||||
// need to unlock our cache now
|
||||
cache->busy = true;
|
||||
mutex_unlock(&cacheRef->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
|
||||
mutex_lock(&consumerRef->lock);
|
||||
mutex_lock(&cacheRef->lock);
|
||||
mutex_lock(&consumer->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
// the cache and the situation might have changed
|
||||
cache = cacheRef->cache;
|
||||
consumer = consumerRef->cache;
|
||||
|
||||
if (cacheRef->areas != NULL || cache->source == NULL
|
||||
if (cache->areas != NULL || cache->source == NULL
|
||||
|| list_is_empty(&cache->consumers)
|
||||
|| cache->consumers.link.next != cache->consumers.link.prev
|
||||
|| consumer != list_get_first_item(&cache->consumers)) {
|
||||
merge = false;
|
||||
cache->busy = false;
|
||||
mutex_unlock(&consumerRef->lock);
|
||||
vm_cache_release_ref(consumerRef);
|
||||
mutex_unlock(&consumer->lock);
|
||||
vm_cache_release_ref(consumer);
|
||||
}
|
||||
}
|
||||
|
||||
@ -525,16 +489,16 @@ vm_cache_remove_consumer(vm_cache_ref *cacheRef, vm_cache *consumer)
|
||||
vm_page *page, *nextPage;
|
||||
vm_cache *newSource;
|
||||
|
||||
consumer = list_remove_head_item(&cache->consumers);
|
||||
consumer = (vm_cache *)list_remove_head_item(&cache->consumers);
|
||||
|
||||
TRACE(("merge vm cache %p (ref == %ld) with vm cache %p\n",
|
||||
cache, cacheRef->ref_count, consumer));
|
||||
cache, cache->ref_count, consumer));
|
||||
|
||||
for (page = cache->page_list; page != NULL; page = nextPage) {
|
||||
vm_page *consumerPage;
|
||||
nextPage = page->cache_next;
|
||||
|
||||
consumerPage = vm_cache_lookup_page(consumerRef,
|
||||
consumerPage = vm_cache_lookup_page(consumer,
|
||||
(off_t)page->cache_offset << PAGE_SHIFT);
|
||||
if (consumerPage == NULL) {
|
||||
// the page already is not yet in the consumer cache - move
|
||||
@ -544,8 +508,8 @@ if (consumer->virtual_base == 0x11000)
|
||||
dprintf("%ld: move page %p offset %ld from cache %p to cache %p\n",
|
||||
find_thread(NULL), page, page->cache_offset, cache, consumer);
|
||||
#endif
|
||||
vm_cache_remove_page(cacheRef, page);
|
||||
vm_cache_insert_page(consumerRef, page,
|
||||
vm_cache_remove_page(cache, page);
|
||||
vm_cache_insert_page(consumer, page,
|
||||
(off_t)page->cache_offset << PAGE_SHIFT);
|
||||
} else if (consumerPage->state == PAGE_STATE_BUSY
|
||||
&& consumerPage->type == PAGE_TYPE_DUMMY
|
||||
@ -554,11 +518,11 @@ if (consumer->virtual_base == 0x11000)
|
||||
// vm_soft_fault() has mapped our page so we can just
|
||||
// move it up
|
||||
//dprintf("%ld: merged busy page %p, cache %p, offset %ld\n", find_thread(NULL), page, cacheRef->cache, page->cache_offset);
|
||||
vm_cache_remove_page(cacheRef, consumerPage);
|
||||
vm_cache_remove_page(cache, consumerPage);
|
||||
consumerPage->state = PAGE_STATE_INACTIVE;
|
||||
|
||||
vm_cache_remove_page(cacheRef, page);
|
||||
vm_cache_insert_page(consumerRef, page,
|
||||
vm_cache_remove_page(cache, page);
|
||||
vm_cache_insert_page(consumer, page,
|
||||
(off_t)page->cache_offset << PAGE_SHIFT);
|
||||
}
|
||||
#if 0
|
||||
@ -571,84 +535,84 @@ else if (consumer->virtual_base == 0x11000)
|
||||
newSource = cache->source;
|
||||
|
||||
// The remaining consumer has gotten a new source
|
||||
mutex_lock(&newSource->ref->lock);
|
||||
mutex_lock(&newSource->lock);
|
||||
|
||||
list_remove_item(&newSource->consumers, cache);
|
||||
list_add_item(&newSource->consumers, consumer);
|
||||
consumer->source = newSource;
|
||||
cache->source = NULL;
|
||||
|
||||
mutex_unlock(&newSource->ref->lock);
|
||||
mutex_unlock(&newSource->lock);
|
||||
|
||||
// Release the other reference to the cache - we take over
|
||||
// its reference of its source cache; we can do this here
|
||||
// (with the cacheRef locked) since we own another reference
|
||||
// from the first consumer we removed
|
||||
if (cacheRef->ref_count < 2)
|
||||
panic("cacheRef %p ref count too low!\n", cacheRef);
|
||||
vm_cache_release_ref(cacheRef);
|
||||
if (cache->ref_count < 2)
|
||||
panic("cacheRef %p ref count too low!\n", cache);
|
||||
vm_cache_release_ref(cache);
|
||||
|
||||
mutex_unlock(&consumerRef->lock);
|
||||
vm_cache_release_ref(consumerRef);
|
||||
mutex_unlock(&consumer->lock);
|
||||
vm_cache_release_ref(consumer);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&cacheRef->lock);
|
||||
vm_cache_release_ref(cacheRef);
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
}
|
||||
|
||||
|
||||
/*!
|
||||
Marks the \a cacheRef's cache as source of the \a consumer cache,
|
||||
Marks the \a cache as source of the \a consumer cache,
|
||||
and adds the \a consumer to its list.
|
||||
This also grabs a reference to the source cache.
|
||||
Assumes you have the cache_ref and the consumer's lock held.
|
||||
Assumes you have the cache and the consumer's lock held.
|
||||
*/
|
||||
void
|
||||
vm_cache_add_consumer_locked(vm_cache_ref *cacheRef, vm_cache *consumer)
|
||||
vm_cache_add_consumer_locked(vm_cache *cache, vm_cache *consumer)
|
||||
{
|
||||
TRACE(("add consumer vm cache %p to cache %p\n", consumer, cacheRef->cache));
|
||||
ASSERT_LOCKED_MUTEX(&cacheRef->lock);
|
||||
ASSERT_LOCKED_MUTEX(&consumer->ref->lock);
|
||||
TRACE(("add consumer vm cache %p to cache %p\n", consumer, cache));
|
||||
ASSERT_LOCKED_MUTEX(&cache->lock);
|
||||
ASSERT_LOCKED_MUTEX(&consumer->lock);
|
||||
|
||||
consumer->source = cacheRef->cache;
|
||||
list_add_item(&cacheRef->cache->consumers, consumer);
|
||||
consumer->source = cache;
|
||||
list_add_item(&cache->consumers, consumer);
|
||||
|
||||
vm_cache_acquire_ref(cacheRef);
|
||||
vm_cache_acquire_ref(cache);
|
||||
}
|
||||
|
||||
|
||||
/*!
|
||||
Adds the \a area to the \a cacheRef.
|
||||
Assumes you have the locked the cache_ref.
|
||||
Adds the \a area to the \a cache.
|
||||
Assumes you have the locked the cache.
|
||||
*/
|
||||
status_t
|
||||
vm_cache_insert_area_locked(vm_cache_ref *cacheRef, vm_area *area)
|
||||
vm_cache_insert_area_locked(vm_cache *cache, vm_area *area)
|
||||
{
|
||||
ASSERT_LOCKED_MUTEX(&cacheRef->lock);
|
||||
ASSERT_LOCKED_MUTEX(&cache->lock);
|
||||
|
||||
area->cache_next = cacheRef->areas;
|
||||
area->cache_next = cache->areas;
|
||||
if (area->cache_next)
|
||||
area->cache_next->cache_prev = area;
|
||||
area->cache_prev = NULL;
|
||||
cacheRef->areas = area;
|
||||
cache->areas = area;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
vm_cache_remove_area(vm_cache_ref *cacheRef, vm_area *area)
|
||||
vm_cache_remove_area(vm_cache *cache, vm_area *area)
|
||||
{
|
||||
mutex_lock(&cacheRef->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
if (area->cache_prev)
|
||||
area->cache_prev->cache_next = area->cache_next;
|
||||
if (area->cache_next)
|
||||
area->cache_next->cache_prev = area->cache_prev;
|
||||
if (cacheRef->areas == area)
|
||||
cacheRef->areas = area->cache_next;
|
||||
if (cache->areas == area)
|
||||
cache->areas = area->cache_next;
|
||||
|
||||
mutex_unlock(&cacheRef->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
return B_OK;
|
||||
}
|
@ -663,7 +663,6 @@ status_t
|
||||
vm_page_write_modified(vm_cache *cache, bool fsReenter)
|
||||
{
|
||||
vm_page *page = cache->page_list;
|
||||
vm_cache_ref *ref = cache->ref;
|
||||
|
||||
// ToDo: join adjacent pages into one vec list
|
||||
|
||||
@ -697,7 +696,7 @@ vm_page_write_modified(vm_cache *cache, bool fsReenter)
|
||||
|
||||
pageOffset = (off_t)page->cache_offset << PAGE_SHIFT;
|
||||
|
||||
for (area = ref->areas; area; area = area->cache_next) {
|
||||
for (area = cache->areas; area; area = area->cache_next) {
|
||||
if (pageOffset >= area->cache_offset
|
||||
&& pageOffset < area->cache_offset + area->size) {
|
||||
vm_translation_map *map = &area->address_space->translation_map;
|
||||
@ -726,12 +725,11 @@ vm_page_write_modified(vm_cache *cache, bool fsReenter)
|
||||
if (!gotPage)
|
||||
continue;
|
||||
|
||||
mutex_unlock(&ref->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
|
||||
status = write_page(page, fsReenter);
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
cache = ref->cache;
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
if (status == B_OK) {
|
||||
if (dequeuedPage) {
|
||||
|
Loading…
Reference in New Issue
Block a user