The heap is now located at 384 MB, and creates a reserved range that spans over the
following 1152 MB - that area only guarantees that the heap can grow this much before the application need the memory for something else. And even then, the heap range is reused from top-to-bottom, allowing for maximum heap usage. Of course, if the memory after the heap range is not claimed yet, it can still be claimed by the heap, too. Added new syscall to create the reserved range. Fixed a bug in vm_delete_areas(): when it removed reserved areas, the area list could get messed up. Fixed a bug in resize_area(): resized areas could never be deleted (missing vm_put_area())! resize_area() now supports reserved regions (but not perfectly yet, see ToDo items). git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@12692 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
4bdd131ac5
commit
4393552b4d
@ -210,6 +210,7 @@ extern status_t _kern_transfer_area(area_id area, void **_address, uint32 addre
|
||||
extern status_t _kern_set_area_protection(area_id area, uint32 newProtection);
|
||||
extern area_id _kern_clone_area(const char *name, void **_address, uint32 addressSpec,
|
||||
uint32 protection, area_id sourceArea);
|
||||
extern status_t _kern_init_heap_address_range(addr_t base, addr_t size);
|
||||
|
||||
area_id sys_vm_map_file(const char *name, void **address, int addr_type,
|
||||
addr_t size, int lock, int mapping, const char *path, off_t offset);
|
||||
|
@ -86,6 +86,7 @@ status_t _user_transfer_area(area_id area, void **_address, uint32 addressSpec,
|
||||
status_t _user_set_area_protection(area_id area, uint32 newProtection);
|
||||
area_id _user_clone_area(const char *name, void **_address, uint32 addressSpec,
|
||||
uint32 protection, area_id sourceArea);
|
||||
status_t _user_init_heap_address_range(addr_t base, addr_t size);
|
||||
|
||||
// to protect code regions with interrupts turned on
|
||||
void permit_page_faults(void);
|
||||
|
@ -232,7 +232,7 @@ find_reserved_area(vm_virtual_map *map, addr_t start, addr_t size, vm_area *area
|
||||
} else {
|
||||
// the area splits the reserved range into two separate ones
|
||||
// we need a new reserved area to cover this space
|
||||
vm_area *reserved = _vm_create_reserved_region_struct(map, 0);
|
||||
vm_area *reserved = _vm_create_reserved_region_struct(map, next->protection);
|
||||
if (reserved == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
@ -244,6 +244,7 @@ find_reserved_area(vm_virtual_map *map, addr_t start, addr_t size, vm_area *area
|
||||
reserved->size = next->base + next->size - start - size;
|
||||
next->size = start - next->base;
|
||||
reserved->base = start + size;
|
||||
reserved->cache_offset = next->cache_offset;
|
||||
}
|
||||
|
||||
area->base = start;
|
||||
@ -336,6 +337,7 @@ second_chance:
|
||||
next = map->areas;
|
||||
last = NULL;
|
||||
for (last = NULL; next; next = next->aspace_next, last = next) {
|
||||
// ToDo: take free space after the reserved area into account!
|
||||
if (next->size == size) {
|
||||
// the reserved area is entirely covered, and thus, removed
|
||||
if (last)
|
||||
@ -674,7 +676,7 @@ vm_reserve_address_range(aspace_id aid, void **_address, uint32 addressSpec, add
|
||||
// check to see if this aspace has entered DELETE state
|
||||
if (addressSpace->state == VM_ASPACE_STATE_DELETION) {
|
||||
// okay, someone is trying to delete this aspace now, so we can't
|
||||
// insert the area, so back out
|
||||
// insert the area, let's back out
|
||||
status = B_BAD_TEAM_ID;
|
||||
goto err2;
|
||||
}
|
||||
@ -685,6 +687,9 @@ vm_reserve_address_range(aspace_id aid, void **_address, uint32 addressSpec, add
|
||||
|
||||
// the area is now reserved!
|
||||
|
||||
area->cache_offset = area->base;
|
||||
// we cache the original base address here
|
||||
|
||||
release_sem_etc(addressSpace->virtual_map.sem, WRITE_COUNT, 0);
|
||||
return B_OK;
|
||||
|
||||
@ -1330,6 +1335,8 @@ vm_copy_on_write_area(vm_area *area)
|
||||
uint32 protection;
|
||||
status_t status;
|
||||
|
||||
TRACE(("vm_copy_on_write_area(area = %p)\n", area));
|
||||
|
||||
// We need to separate the vm_cache from its vm_cache_ref: the area
|
||||
// and its cache_ref goes into a new layer on top of the old one.
|
||||
// So the old cache gets a new cache_ref and the area a new cache.
|
||||
@ -1848,23 +1855,36 @@ status_t
|
||||
vm_delete_areas(struct vm_address_space *aspace)
|
||||
{
|
||||
vm_area *area;
|
||||
vm_area *next;
|
||||
vm_area *next, *last = NULL;
|
||||
|
||||
TRACE(("vm_delete_areas: called on aspace 0x%lx\n", aspace->id));
|
||||
|
||||
acquire_sem_etc(aspace->virtual_map.sem, WRITE_COUNT, 0, 0);
|
||||
|
||||
// delete all the areas in this aspace
|
||||
|
||||
// remove all reserved areas in this address space
|
||||
|
||||
for (area = aspace->virtual_map.areas; area; area = next) {
|
||||
next = area->aspace_next;
|
||||
|
||||
if (area->id == RESERVED_AREA_ID) {
|
||||
// just remove it
|
||||
if (last)
|
||||
last->aspace_next = area->aspace_next;
|
||||
else
|
||||
aspace->virtual_map.areas = area->aspace_next;
|
||||
|
||||
free(area);
|
||||
continue;
|
||||
}
|
||||
|
||||
last = area;
|
||||
}
|
||||
|
||||
// delete all the areas in this aspace
|
||||
|
||||
for (area = aspace->virtual_map.areas; area; area = next) {
|
||||
next = area->aspace_next;
|
||||
|
||||
// decrement the ref on this area, may actually push the ref < 0, if there
|
||||
// is a concurrent delete_area() on that specific area, but that's ok here
|
||||
_vm_put_area(area, true);
|
||||
@ -3041,8 +3061,12 @@ resize_area(area_id areaID, size_t newSize)
|
||||
oldSize = area->size;
|
||||
|
||||
// ToDo: we should only allow to resize anonymous memory areas!
|
||||
if (!cache->cache->temporary)
|
||||
return B_NOT_ALLOWED;
|
||||
if (!cache->cache->temporary) {
|
||||
status = B_NOT_ALLOWED;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
// ToDo: we must lock all address spaces here!
|
||||
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
@ -3051,8 +3075,16 @@ resize_area(area_id areaID, size_t newSize)
|
||||
|
||||
for (current = cache->areas; current; current = current->cache_next) {
|
||||
if (current->aspace_next && current->aspace_next->base <= (current->base + newSize)) {
|
||||
// if the area was created inside a reserved area, it can also be
|
||||
// resized in that area
|
||||
// ToDo: if there is free space after the reserved area, it could be used as well...
|
||||
vm_area *next = current->aspace_next;
|
||||
if (next->id == RESERVED_AREA_ID && next->cache_offset <= current->base
|
||||
&& next->base - 1 + next->size >= current->base - 1 + newSize)
|
||||
continue;
|
||||
|
||||
status = B_ERROR;
|
||||
goto out;
|
||||
goto err2;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3061,8 +3093,22 @@ resize_area(area_id areaID, size_t newSize)
|
||||
|
||||
for (current = cache->areas; current; current = current->cache_next) {
|
||||
if (current->aspace_next && current->aspace_next->base <= (current->base + newSize)) {
|
||||
status = B_ERROR;
|
||||
break;
|
||||
vm_area *next = current->aspace_next;
|
||||
if (next->id == RESERVED_AREA_ID && next->cache_offset <= current->base
|
||||
&& next->base - 1 + next->size >= current->base - 1 + newSize) {
|
||||
// resize reserved area
|
||||
addr_t offset = current->base + newSize - next->base;
|
||||
if (next->size <= offset) {
|
||||
current->aspace_next = next->aspace_next;
|
||||
free(next);
|
||||
} else {
|
||||
next->size -= offset;
|
||||
next->base += offset;
|
||||
}
|
||||
} else {
|
||||
status = B_ERROR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
current->size = newSize;
|
||||
@ -3086,8 +3132,10 @@ resize_area(area_id areaID, size_t newSize)
|
||||
current->size = oldSize;
|
||||
}
|
||||
|
||||
out:
|
||||
err2:
|
||||
mutex_unlock(&cache->lock);
|
||||
err1:
|
||||
vm_put_area(area);
|
||||
|
||||
// ToDo: we must honour the lock restrictions of this area
|
||||
return status;
|
||||
@ -3268,6 +3316,14 @@ delete_area(area_id area)
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
status_t
|
||||
_user_init_heap_address_range(addr_t base, addr_t size)
|
||||
{
|
||||
return vm_reserve_address_range(vm_get_current_user_aspace_id(), (void *)&base,
|
||||
B_EXACT_ADDRESS, size, RESERVED_AVOID_BASE);
|
||||
}
|
||||
|
||||
|
||||
area_id
|
||||
_user_area_for(void *address)
|
||||
{
|
||||
|
@ -22,6 +22,7 @@
|
||||
|
||||
#include <OS.h>
|
||||
#include <Debug.h>
|
||||
#include <syscalls.h>
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
@ -74,19 +75,20 @@ __init_heap(void)
|
||||
{
|
||||
hoardHeap::initNumProcs();
|
||||
|
||||
sHeapAreaSize = kInitialHeapSize;
|
||||
// ToDo: add a VM call that instructs other areas to avoid the space after the heap when possible
|
||||
// (and if not, create it at the end of that range, so that the heap can grow as much as possible)
|
||||
// Then, move the heap back to 256 or 512 MB
|
||||
sHeapBase = (void*)0x30000000;
|
||||
// let the heap start at 3*256 MB for now
|
||||
// This will locate the heap base at 384 MB and reserve the next 1152 MB
|
||||
// for it. They may get reclaimed by other areas, though, but the maximum
|
||||
// size of the heap is guaranteed until the space is really needed.
|
||||
sHeapBase = (void *)0x18000000;
|
||||
status_t status = _kern_init_heap_address_range((addr_t)sHeapBase, 0x48000000);
|
||||
|
||||
sHeapArea = create_area("heap", (void **)&sHeapBase, B_BASE_ADDRESS,
|
||||
sHeapAreaSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
|
||||
sHeapArea = create_area("heap", (void **)&sHeapBase,
|
||||
status == B_OK ? B_EXACT_ADDRESS : B_BASE_ADDRESS,
|
||||
kInitialHeapSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
|
||||
if (sHeapArea < B_OK)
|
||||
return sHeapArea;
|
||||
|
||||
sFreeHeapBase = (addr_t)sHeapBase;
|
||||
sHeapAreaSize = kInitialHeapSize;
|
||||
|
||||
sHeapLock = create_sem(1, "heap");
|
||||
if (sHeapLock < B_OK)
|
||||
|
Loading…
Reference in New Issue
Block a user