We now track how many pages are in a vm_cache. Therefore, the area_info.ram_size

now reflects the number of pages in the areas cache, instead of just the size of the
area.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@16834 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2006-03-18 20:17:31 +00:00
parent bc9902cf98
commit 5f0bf2a3e1
3 changed files with 24 additions and 14 deletions

View File

@ -71,6 +71,7 @@ typedef struct vm_cache {
struct vm_cache *source;
struct vm_store *store;
off_t virtual_size;
uint32 page_count;
uint32 temporary : 1;
uint32 scan_skip : 1;
} vm_cache;

View File

@ -3105,11 +3105,17 @@ fill_area_info(struct vm_area *area, area_info *info, size_t size)
info->protection = area->protection & B_USER_PROTECTION;
info->lock = B_FULL_LOCK;
info->team = area->address_space->id;
info->ram_size = area->size;
info->copy_count = 0;
info->in_count = 0;
info->out_count = 0;
// ToDo: retrieve real values here!
mutex_lock(&area->cache_ref->lock);
// Note, this is a simplification; the cache could be larger than this area
info->ram_size = area->cache_ref->cache->page_count * B_PAGE_SIZE;
mutex_unlock(&area->cache_ref->lock);
}

View File

@ -107,6 +107,7 @@ vm_cache_create(vm_store *store)
cache->virtual_size = 0;
cache->temporary = 0;
cache->scan_skip = 0;
cache->page_count = 0;
// connect the store to its cache
cache->store = store;
@ -243,23 +244,24 @@ vm_cache_lookup_page(vm_cache_ref *cache_ref, off_t offset)
void
vm_cache_insert_page(vm_cache_ref *cache_ref, vm_page *page, off_t offset)
vm_cache_insert_page(vm_cache_ref *cacheRef, vm_page *page, off_t offset)
{
cpu_status state;
TRACE(("vm_cache_insert_page: cache_ref %p, page %p, offset %Ld\n", cache_ref, page, offset));
ASSERT_LOCKED_MUTEX(&cache_ref->lock);
TRACE(("vm_cache_insert_page: cache_ref %p, page %p, offset %Ld\n", cacheRef, page, offset));
ASSERT_LOCKED_MUTEX(&cacheRef->lock);
page->cache_offset = (uint32)(offset >> PAGE_SHIFT);
if (cache_ref->cache->page_list != NULL)
cache_ref->cache->page_list->cache_prev = page;
if (cacheRef->cache->page_list != NULL)
cacheRef->cache->page_list->cache_prev = page;
page->cache_next = cache_ref->cache->page_list;
page->cache_next = cacheRef->cache->page_list;
page->cache_prev = NULL;
cache_ref->cache->page_list = page;
cacheRef->cache->page_list = page;
cacheRef->cache->page_count++;
page->cache = cache_ref->cache;
page->cache = cacheRef->cache;
state = disable_interrupts();
acquire_spinlock(&page_cache_table_lock);
@ -278,12 +280,12 @@ vm_cache_insert_page(vm_cache_ref *cache_ref, vm_page *page, off_t offset)
*/
void
vm_cache_remove_page(vm_cache_ref *cache_ref, vm_page *page)
vm_cache_remove_page(vm_cache_ref *cacheRef, vm_page *page)
{
cpu_status state;
TRACE(("vm_cache_remove_page: cache %p, page %p\n", cache_ref, page));
ASSERT_LOCKED_MUTEX(&cache_ref->lock);
TRACE(("vm_cache_remove_page: cache %p, page %p\n", cacheRef, page));
ASSERT_LOCKED_MUTEX(&cacheRef->lock);
state = disable_interrupts();
acquire_spinlock(&page_cache_table_lock);
@ -293,16 +295,17 @@ vm_cache_remove_page(vm_cache_ref *cache_ref, vm_page *page)
release_spinlock(&page_cache_table_lock);
restore_interrupts(state);
if (cache_ref->cache->page_list == page) {
if (cacheRef->cache->page_list == page) {
if (page->cache_next != NULL)
page->cache_next->cache_prev = NULL;
cache_ref->cache->page_list = page->cache_next;
cacheRef->cache->page_list = page->cache_next;
} else {
if (page->cache_prev != NULL)
page->cache_prev->cache_next = page->cache_next;
if (page->cache_next != NULL)
page->cache_next->cache_prev = page->cache_prev;
}
cacheRef->cache->page_count--;
page->cache = NULL;
}