* Private and temporary vm_caches now maintain their new virtual_base field, which allows

them to commit substantially less memory (we we're committing about 40 MB (!) too much
  after a complete system boot). This means you'll run out of memory less likely now.
* fill_area_info() no longer filters out kernel protection flags - we may want to keep
  filtering them when called from userland, though, dunno.
* Added new debugger command "avail" which shows how much memory has been committed, and
  how much is regarded as free space.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@19771 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2007-01-12 15:07:18 +00:00
parent 7d90af85d3
commit d1b0be94b0
4 changed files with 56 additions and 34 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -75,7 +75,9 @@ typedef struct vm_cache {
vm_cache_ref *ref;
struct vm_cache *source;
struct vm_store *store;
off_t virtual_base;
off_t virtual_size;
// the size is absolute, and independent from virtual_base
uint32 page_count;
uint32 temporary : 1;
uint32 scan_skip : 1;

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -506,9 +506,8 @@ map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtu
off_t offset, addr_t size, uint32 addressSpec, int wiring, int protection,
int mapping, vm_area **_area, const char *areaName)
{
vm_cache *cache;
vm_cache_ref *cacheRef;
vm_cache *cache;
status_t status;
TRACE(("map_backing_store: aspace %p, store %p, *vaddr %p, offset 0x%Lx, size %lu, addressSpec %ld, wiring %d, protection %d, _area %p, area_name '%s'\n",
@ -528,7 +527,6 @@ map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtu
vm_cache *newCache;
vm_store *newStore;
// ToDo: panic???
// create an anonymous store object
newStore = vm_store_create_anonymous_noswap((protection & B_STACK_AREA) != 0,
USER_STACK_GUARD_PAGES);
@ -557,6 +555,7 @@ map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtu
cache = newCache;
cacheRef = newCache->ref;
store = newStore;
cache->virtual_base = offset;
cache->virtual_size = offset + size;
}
@ -1658,7 +1657,8 @@ vm_set_area_protection(team_id aspaceID, area_id areaID, uint32 newProtection)
count++;
}
status = cache->store->ops->commit(cache->store, count * B_PAGE_SIZE);
status = cache->store->ops->commit(cache->store,
cache->virtual_base + count * B_PAGE_SIZE);
// ToDo: we may be able to join with our source cache, if count == 0
}
@ -1905,6 +1905,7 @@ dump_cache(int argc, char **argv)
kprintf(" %p\n", consumer);
}
kprintf("store: %p\n", cache->store);
kprintf("virtual_base: 0x%Lx\n", cache->virtual_base);
kprintf("virtual_size: 0x%Lx\n", cache->virtual_size);
kprintf("temporary: %ld\n", cache->temporary);
kprintf("scan_skip: %ld\n", cache->scan_skip);
@ -2008,6 +2009,15 @@ dump_area_list(int argc, char **argv)
}
static int
dump_available_memory(int argc, char **argv)
{
kprintf("Available memory: %Ld/%lu bytes\n",
sAvailableMemory, vm_page_num_pages() * B_PAGE_SIZE);
return 0;
}
status_t
vm_delete_areas(struct vm_address_space *addressSpace)
{
@ -2356,6 +2366,7 @@ vm_init(kernel_args *args)
add_debugger_command("area", &dump_area, "Dump info about a particular area");
add_debugger_command("cache_ref", &dump_cache_ref, "Dump cache_ref data structure");
add_debugger_command("cache", &dump_cache, "Dump cache_ref data structure");
add_debugger_command("avail", &dump_available_memory, "Dump available memory");
// add_debugger_command("dl", &display_mem, "dump memory long words (64-bit)");
add_debugger_command("dw", &display_mem, "dump memory words (32-bit)");
add_debugger_command("ds", &display_mem, "dump memory shorts (16-bit)");
@ -2971,6 +2982,30 @@ fix_protection(uint32 *protection)
}
static void
fill_area_info(struct vm_area *area, area_info *info, size_t size)
{
strlcpy(info->name, area->name, B_OS_NAME_LENGTH);
info->area = area->id;
info->address = (void *)area->base;
info->size = area->size;
info->protection = area->protection;
info->lock = B_FULL_LOCK;
info->team = area->address_space->id;
info->copy_count = 0;
info->in_count = 0;
info->out_count = 0;
// ToDo: retrieve real values here!
mutex_lock(&area->cache_ref->lock);
// Note, this is a simplification; the cache could be larger than this area
info->ram_size = area->cache_ref->cache->page_count * B_PAGE_SIZE;
mutex_unlock(&area->cache_ref->lock);
}
// #pragma mark -
@ -3005,7 +3040,7 @@ user_memset(void *s, char c, size_t count)
}
// #pragma mark -
// #pragma mark - kernel public API
long
@ -3205,30 +3240,6 @@ find_area(const char *name)
}
static void
fill_area_info(struct vm_area *area, area_info *info, size_t size)
{
strlcpy(info->name, area->name, B_OS_NAME_LENGTH);
info->area = area->id;
info->address = (void *)area->base;
info->size = area->size;
info->protection = area->protection & B_USER_PROTECTION;
info->lock = B_FULL_LOCK;
info->team = area->address_space->id;
info->copy_count = 0;
info->in_count = 0;
info->out_count = 0;
// ToDo: retrieve real values here!
mutex_lock(&area->cache_ref->lock);
// Note, this is a simplification; the cache could be larger than this area
info->ram_size = area->cache_ref->cache->page_count * B_PAGE_SIZE;
mutex_unlock(&area->cache_ref->lock);
}
status_t
_get_area_info(area_id id, area_info *info, size_t size)
{
@ -3580,7 +3591,7 @@ delete_area(area_id area)
}
// #pragma mark -
// #pragma mark - Userland syscalls
status_t
@ -3647,6 +3658,9 @@ _user_get_area_info(area_id area, area_info *userInfo)
if (status < B_OK)
return status;
// TODO: do we want to prevent userland from seeing kernel protections?
//info.protection &= B_USER_PROTECTION;
if (user_memcpy(userInfo, &info, sizeof(area_info)) < B_OK)
return B_BAD_ADDRESS;
@ -3670,6 +3684,8 @@ _user_get_next_area_info(team_id team, int32 *userCookie, area_info *userInfo)
if (status != B_OK)
return status;
//info.protection &= B_USER_PROTECTION;
if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
|| user_memcpy(userInfo, &info, sizeof(area_info)) < B_OK)
return B_BAD_ADDRESS;

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -103,6 +103,7 @@ vm_cache_create(vm_store *store)
cache->page_list = NULL;
cache->ref = NULL;
cache->source = NULL;
cache->virtual_base = 0;
cache->virtual_size = 0;
cache->temporary = 0;
cache->scan_skip = 0;

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -52,6 +52,9 @@ anonymous_commit(struct vm_store *_store, off_t size)
if (store->can_overcommit)
return B_OK;
size -= store->vm.cache->virtual_base;
// anonymous stores don't need to span over their whole source
// Check to see how much we could commit - we need real memory
if (size > store->vm.committed_size) {