When a B_CONTIGUOUS area is created, its pages are now reserved upfront, as
this is the one thing most likely to fail - it now also handles this case gracefully instead of dying. Small cleanup, cleared some other ToDos: some user functions now delete the area when they could not copy the target address. git-svn-id: file:///srv/svn/repos/haiku/trunk/current@8911 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
76faade4e7
commit
eab2037630
@ -733,12 +733,13 @@ region_id
|
|||||||
vm_create_anonymous_region(aspace_id aid, const char *name, void **address,
|
vm_create_anonymous_region(aspace_id aid, const char *name, void **address,
|
||||||
int addr_type, addr_t size, int wiring, int lock)
|
int addr_type, addr_t size, int wiring, int lock)
|
||||||
{
|
{
|
||||||
int err;
|
|
||||||
vm_region *region;
|
vm_region *region;
|
||||||
vm_cache *cache;
|
vm_cache *cache;
|
||||||
vm_store *store;
|
vm_store *store;
|
||||||
vm_address_space *aspace;
|
vm_address_space *aspace;
|
||||||
vm_cache_ref *cache_ref;
|
vm_cache_ref *cache_ref;
|
||||||
|
vm_page *page = NULL;
|
||||||
|
status_t err;
|
||||||
|
|
||||||
TRACE(("create_anonymous_region: %s: size 0x%lx\n", name, size));
|
TRACE(("create_anonymous_region: %s: size 0x%lx\n", name, size));
|
||||||
|
|
||||||
@ -776,6 +777,15 @@ vm_create_anonymous_region(aspace_id aid, const char *name, void **address,
|
|||||||
|
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
|
if (wiring == B_CONTIGUOUS) {
|
||||||
|
// we try to allocate the page run here upfront as this may easily fail for obvious reasons
|
||||||
|
page = vm_page_allocate_page_run(PAGE_STATE_CLEAR, size / B_PAGE_SIZE);
|
||||||
|
if (page == NULL) {
|
||||||
|
vm_put_aspace(aspace);
|
||||||
|
return B_NO_MEMORY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// create an anonymous store object
|
// create an anonymous store object
|
||||||
store = vm_store_create_anonymous_noswap();
|
store = vm_store_create_anonymous_noswap();
|
||||||
if (store == NULL)
|
if (store == NULL)
|
||||||
@ -808,6 +818,19 @@ vm_create_anonymous_region(aspace_id aid, const char *name, void **address,
|
|||||||
vm_cache_release_ref(cache_ref);
|
vm_cache_release_ref(cache_ref);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
vm_put_aspace(aspace);
|
vm_put_aspace(aspace);
|
||||||
|
|
||||||
|
if (wiring == B_CONTIGUOUS) {
|
||||||
|
// we had reserved the area space upfront...
|
||||||
|
addr_t pageNumber = page->ppn;
|
||||||
|
int32 i;
|
||||||
|
for (i = size / B_PAGE_SIZE; i-- > 0; pageNumber++) {
|
||||||
|
page = vm_lookup_page(pageNumber);
|
||||||
|
if (page == NULL)
|
||||||
|
panic("couldn't lookup physical page just allocated\n");
|
||||||
|
|
||||||
|
vm_page_set_state(page, PAGE_STATE_FREE);
|
||||||
|
}
|
||||||
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -818,6 +841,7 @@ vm_create_anonymous_region(aspace_id aid, const char *name, void **address,
|
|||||||
case B_NO_LOCK:
|
case B_NO_LOCK:
|
||||||
case B_LAZY_LOCK:
|
case B_LAZY_LOCK:
|
||||||
break; // do nothing
|
break; // do nothing
|
||||||
|
|
||||||
case B_FULL_LOCK:
|
case B_FULL_LOCK:
|
||||||
{
|
{
|
||||||
// pages aren't mapped at this point, but we just simulate a fault on
|
// pages aren't mapped at this point, but we just simulate a fault on
|
||||||
@ -830,6 +854,7 @@ vm_create_anonymous_region(aspace_id aid, const char *name, void **address,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
case B_ALREADY_WIRED:
|
case B_ALREADY_WIRED:
|
||||||
{
|
{
|
||||||
// the pages should already be mapped. This is only really useful during
|
// the pages should already be mapped. This is only really useful during
|
||||||
@ -839,7 +864,6 @@ vm_create_anonymous_region(aspace_id aid, const char *name, void **address,
|
|||||||
addr_t pa;
|
addr_t pa;
|
||||||
uint32 flags;
|
uint32 flags;
|
||||||
int err;
|
int err;
|
||||||
vm_page *page;
|
|
||||||
off_t offset = 0;
|
off_t offset = 0;
|
||||||
|
|
||||||
mutex_lock(&cache_ref->lock);
|
mutex_lock(&cache_ref->lock);
|
||||||
@ -863,51 +887,48 @@ vm_create_anonymous_region(aspace_id aid, const char *name, void **address,
|
|||||||
mutex_unlock(&cache_ref->lock);
|
mutex_unlock(&cache_ref->lock);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case B_CONTIGUOUS: {
|
|
||||||
addr_t va;
|
|
||||||
addr_t phys_addr;
|
|
||||||
int err;
|
|
||||||
vm_page *page;
|
|
||||||
off_t offset = 0;
|
|
||||||
|
|
||||||
page = vm_page_allocate_page_run(PAGE_STATE_CLEAR, ROUNDUP(region->size, PAGE_SIZE) / PAGE_SIZE);
|
case B_CONTIGUOUS:
|
||||||
if(page == NULL) {
|
{
|
||||||
// XXX back out of this
|
addr_t physicalAddress = page->ppn * B_PAGE_SIZE;
|
||||||
panic("couldn't allocate page run of size %ld\n", region->size);
|
addr_t virtualAddress;
|
||||||
}
|
off_t offset = 0;
|
||||||
phys_addr = page->ppn * PAGE_SIZE;
|
|
||||||
|
|
||||||
mutex_lock(&cache_ref->lock);
|
mutex_lock(&cache_ref->lock);
|
||||||
(*aspace->translation_map.ops->lock)(&aspace->translation_map);
|
(*aspace->translation_map.ops->lock)(&aspace->translation_map);
|
||||||
for(va = region->base; va < region->base + region->size; va += PAGE_SIZE, offset += PAGE_SIZE, phys_addr += PAGE_SIZE) {
|
|
||||||
page = vm_lookup_page(phys_addr / PAGE_SIZE);
|
for (virtualAddress = region->base; virtualAddress < region->base + region->size;
|
||||||
if(page == NULL) {
|
virtualAddress += B_PAGE_SIZE, offset += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
|
||||||
|
page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
|
||||||
|
if (page == NULL)
|
||||||
panic("couldn't lookup physical page just allocated\n");
|
panic("couldn't lookup physical page just allocated\n");
|
||||||
}
|
|
||||||
atomic_add(&page->ref_count, 1);
|
atomic_add(&page->ref_count, 1);
|
||||||
err = (*aspace->translation_map.ops->map)(&aspace->translation_map, va, phys_addr, lock);
|
err = (*aspace->translation_map.ops->map)(&aspace->translation_map,
|
||||||
if(err < 0) {
|
virtualAddress, physicalAddress, lock);
|
||||||
|
if (err < 0)
|
||||||
panic("couldn't map physical page in page run\n");
|
panic("couldn't map physical page in page run\n");
|
||||||
}
|
|
||||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||||
vm_cache_insert_page(cache_ref, page, offset);
|
vm_cache_insert_page(cache_ref, page, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
(*aspace->translation_map.ops->unlock)(&aspace->translation_map);
|
(*aspace->translation_map.ops->unlock)(&aspace->translation_map);
|
||||||
mutex_unlock(&cache_ref->lock);
|
mutex_unlock(&cache_ref->lock);
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
;
|
break;
|
||||||
}
|
}
|
||||||
vm_put_aspace(aspace);
|
vm_put_aspace(aspace);
|
||||||
|
|
||||||
// dprintf("create_anonymous_region: done\n");
|
// dprintf("create_anonymous_region: done\n");
|
||||||
|
|
||||||
if(region)
|
if (region == NULL)
|
||||||
return region->id;
|
return B_NO_MEMORY;
|
||||||
else
|
|
||||||
return ENOMEM;
|
return region->id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1059,6 +1080,8 @@ _vm_map_file(aspace_id aid, char *name, void **_address, uint32 addressSpec,
|
|||||||
if (aspace == NULL)
|
if (aspace == NULL)
|
||||||
return ERR_VM_INVALID_ASPACE;
|
return ERR_VM_INVALID_ASPACE;
|
||||||
|
|
||||||
|
TRACE(("_vm_map_file(\"%s\", offset = %Ld, size = %lu, mapping %d)\n", path, offset, size, mapping));
|
||||||
|
|
||||||
offset = ROUNDOWN(offset, B_PAGE_SIZE);
|
offset = ROUNDOWN(offset, B_PAGE_SIZE);
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
@ -1165,36 +1188,26 @@ vm_clone_region(aspace_id aid, char *name, void **address, int addr_type,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int
|
static status_t
|
||||||
__vm_delete_region(vm_address_space *aspace, vm_region *region)
|
|
||||||
{
|
|
||||||
// ToDo: I am really not sure if it's a good idea not to
|
|
||||||
// wait until the area has really been freed - code following
|
|
||||||
// might rely on the address space to available again, and
|
|
||||||
// there is no other way to wait for the completion of the
|
|
||||||
// deletion.
|
|
||||||
if (region->aspace == aspace)
|
|
||||||
vm_put_region(region);
|
|
||||||
|
|
||||||
return B_NO_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static int
|
|
||||||
_vm_delete_region(vm_address_space *aspace, region_id rid)
|
_vm_delete_region(vm_address_space *aspace, region_id rid)
|
||||||
{
|
{
|
||||||
|
status_t status = B_OK;
|
||||||
vm_region *region;
|
vm_region *region;
|
||||||
|
|
||||||
TRACE(("vm_delete_region: aspace id 0x%lx, region id 0x%lx\n", aspace->id, rid));
|
TRACE(("vm_delete_region: aspace id 0x%lx, region id 0x%lx\n", aspace->id, rid));
|
||||||
|
|
||||||
region = vm_get_region_by_id(rid);
|
region = vm_get_region_by_id(rid);
|
||||||
if (region == NULL)
|
if (region == NULL)
|
||||||
return ERR_VM_INVALID_REGION;
|
return B_BAD_VALUE;
|
||||||
|
|
||||||
|
if (region->aspace == aspace) {
|
||||||
|
vm_put_region(region);
|
||||||
|
// next put below will actually delete it
|
||||||
|
} else
|
||||||
|
status = B_NOT_ALLOWED;
|
||||||
|
|
||||||
__vm_delete_region(aspace, region);
|
|
||||||
vm_put_region(region);
|
vm_put_region(region);
|
||||||
|
return status;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -2302,6 +2315,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
|||||||
TRACEPFAULT;
|
TRACEPFAULT;
|
||||||
|
|
||||||
// page must be busy
|
// page must be busy
|
||||||
|
// ToDo: don't wait forever!
|
||||||
mutex_unlock(&cache_ref->lock);
|
mutex_unlock(&cache_ref->lock);
|
||||||
snooze(20000);
|
snooze(20000);
|
||||||
mutex_lock(&cache_ref->lock);
|
mutex_lock(&cache_ref->lock);
|
||||||
@ -2926,7 +2940,7 @@ _user_clone_area(const char *userName, void **userAddress, uint32 addressSpec,
|
|||||||
return clonedArea;
|
return clonedArea;
|
||||||
|
|
||||||
if (user_memcpy(userAddress, &address, sizeof(address)) < B_OK) {
|
if (user_memcpy(userAddress, &address, sizeof(address)) < B_OK) {
|
||||||
// ToDo: delete cloned area?
|
delete_area(clonedArea);
|
||||||
return B_BAD_ADDRESS;
|
return B_BAD_ADDRESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2964,7 +2978,7 @@ _user_create_area(const char *userName, void **userAddress, uint32 addressSpec,
|
|||||||
area = create_area(name, &address, addressSpec, size, lock, protection);
|
area = create_area(name, &address, addressSpec, size, lock, protection);
|
||||||
|
|
||||||
if (area >= B_OK && user_memcpy(userAddress, &address, sizeof(address)) < B_OK) {
|
if (area >= B_OK && user_memcpy(userAddress, &address, sizeof(address)) < B_OK) {
|
||||||
// ToDo: shouldn't I delete the area here?
|
delete_area(area);
|
||||||
return B_BAD_ADDRESS;
|
return B_BAD_ADDRESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user