diff --git a/headers/private/kernel/vm_types.h b/headers/private/kernel/vm_types.h index 220fff929b..bcd055547b 100644 --- a/headers/private/kernel/vm_types.h +++ b/headers/private/kernel/vm_types.h @@ -71,6 +71,7 @@ typedef struct vm_cache { struct vm_cache *source; struct vm_store *store; off_t virtual_size; + uint32 page_count; uint32 temporary : 1; uint32 scan_skip : 1; } vm_cache; diff --git a/src/system/kernel/vm/vm.cpp b/src/system/kernel/vm/vm.cpp index ca45001871..38a21448de 100644 --- a/src/system/kernel/vm/vm.cpp +++ b/src/system/kernel/vm/vm.cpp @@ -3105,11 +3105,17 @@ fill_area_info(struct vm_area *area, area_info *info, size_t size) info->protection = area->protection & B_USER_PROTECTION; info->lock = B_FULL_LOCK; info->team = area->address_space->id; - info->ram_size = area->size; info->copy_count = 0; info->in_count = 0; info->out_count = 0; // ToDo: retrieve real values here! + + mutex_lock(&area->cache_ref->lock); + + // Note, this is a simplification; the cache could be larger than this area + info->ram_size = area->cache_ref->cache->page_count * B_PAGE_SIZE; + + mutex_unlock(&area->cache_ref->lock); } diff --git a/src/system/kernel/vm/vm_cache.c b/src/system/kernel/vm/vm_cache.c index 35f88871b0..b610831b9c 100644 --- a/src/system/kernel/vm/vm_cache.c +++ b/src/system/kernel/vm/vm_cache.c @@ -107,6 +107,7 @@ vm_cache_create(vm_store *store) cache->virtual_size = 0; cache->temporary = 0; cache->scan_skip = 0; + cache->page_count = 0; // connect the store to its cache cache->store = store; @@ -243,23 +244,24 @@ vm_cache_lookup_page(vm_cache_ref *cache_ref, off_t offset) void -vm_cache_insert_page(vm_cache_ref *cache_ref, vm_page *page, off_t offset) +vm_cache_insert_page(vm_cache_ref *cacheRef, vm_page *page, off_t offset) { cpu_status state; - TRACE(("vm_cache_insert_page: cache_ref %p, page %p, offset %Ld\n", cache_ref, page, offset)); - ASSERT_LOCKED_MUTEX(&cache_ref->lock); + TRACE(("vm_cache_insert_page: cache_ref %p, page %p, offset %Ld\n", cacheRef, page, offset)); + ASSERT_LOCKED_MUTEX(&cacheRef->lock); page->cache_offset = (uint32)(offset >> PAGE_SHIFT); - if (cache_ref->cache->page_list != NULL) - cache_ref->cache->page_list->cache_prev = page; + if (cacheRef->cache->page_list != NULL) + cacheRef->cache->page_list->cache_prev = page; - page->cache_next = cache_ref->cache->page_list; + page->cache_next = cacheRef->cache->page_list; page->cache_prev = NULL; - cache_ref->cache->page_list = page; + cacheRef->cache->page_list = page; + cacheRef->cache->page_count++; - page->cache = cache_ref->cache; + page->cache = cacheRef->cache; state = disable_interrupts(); acquire_spinlock(&page_cache_table_lock); @@ -278,12 +280,12 @@ vm_cache_insert_page(vm_cache_ref *cache_ref, vm_page *page, off_t offset) */ void -vm_cache_remove_page(vm_cache_ref *cache_ref, vm_page *page) +vm_cache_remove_page(vm_cache_ref *cacheRef, vm_page *page) { cpu_status state; - TRACE(("vm_cache_remove_page: cache %p, page %p\n", cache_ref, page)); - ASSERT_LOCKED_MUTEX(&cache_ref->lock); + TRACE(("vm_cache_remove_page: cache %p, page %p\n", cacheRef, page)); + ASSERT_LOCKED_MUTEX(&cacheRef->lock); state = disable_interrupts(); acquire_spinlock(&page_cache_table_lock); @@ -293,16 +295,17 @@ vm_cache_remove_page(vm_cache_ref *cache_ref, vm_page *page) release_spinlock(&page_cache_table_lock); restore_interrupts(state); - if (cache_ref->cache->page_list == page) { + if (cacheRef->cache->page_list == page) { if (page->cache_next != NULL) page->cache_next->cache_prev = NULL; - cache_ref->cache->page_list = page->cache_next; + cacheRef->cache->page_list = page->cache_next; } else { if (page->cache_prev != NULL) page->cache_prev->cache_next = page->cache_next; if (page->cache_next != NULL) page->cache_next->cache_prev = page->cache_prev; } + cacheRef->cache->page_count--; page->cache = NULL; }