* Many VM area creation functions just panicked when a vm_store, vm_cache,

or vm_cache_ref couldn't be created, instead of cleaning up and returning
  an appropriate error.
* vm_cache_ref_create() no returns a status_t instead of the vm_cache_ref
  (as that's part of the vm_cache anyway).


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@16602 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2006-03-06 13:06:10 +00:00
parent 8013da5719
commit b420ef6461
3 changed files with 169 additions and 127 deletions

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2003-2004, Axel Dörfler, axeld@pinc-software.de. * Copyright 2003-2006, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License. * Distributed under the terms of the MIT License.
* *
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -21,7 +21,7 @@ extern "C" {
status_t vm_cache_init(struct kernel_args *args); status_t vm_cache_init(struct kernel_args *args);
vm_cache *vm_cache_create(vm_store *store); vm_cache *vm_cache_create(vm_store *store);
vm_cache_ref *vm_cache_ref_create(vm_cache *cache); status_t vm_cache_ref_create(vm_cache *cache);
void vm_cache_acquire_ref(vm_cache_ref *cache_ref); void vm_cache_acquire_ref(vm_cache_ref *cache_ref);
void vm_cache_release_ref(vm_cache_ref *cache_ref); void vm_cache_release_ref(vm_cache_ref *cache_ref);
vm_page *vm_cache_lookup_page(vm_cache_ref *cacheRef, off_t page); vm_page *vm_cache_lookup_page(vm_cache_ref *cacheRef, off_t page);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de. * Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License. * Distributed under the terms of the MIT License.
* *
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -507,54 +507,62 @@ map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtu
int mapping, vm_area **_area, const char *areaName) int mapping, vm_area **_area, const char *areaName)
{ {
vm_cache *cache; vm_cache *cache;
vm_cache_ref *cache_ref; vm_cache_ref *cacheRef;
vm_area *area;
status_t err; status_t status;
TRACE(("map_backing_store: aspace %p, store %p, *vaddr %p, offset 0x%Lx, size %lu, addressSpec %ld, wiring %d, protection %d, _area %p, area_name '%s'\n", TRACE(("map_backing_store: aspace %p, store %p, *vaddr %p, offset 0x%Lx, size %lu, addressSpec %ld, wiring %d, protection %d, _area %p, area_name '%s'\n",
addressSpace, store, *_virtualAddress, offset, size, addressSpec, addressSpace, store, *_virtualAddress, offset, size, addressSpec,
wiring, protection, _area, areaName)); wiring, protection, _area, areaName));
area = _vm_create_area_struct(addressSpace, areaName, wiring, protection); vm_area *area = _vm_create_area_struct(addressSpace, areaName, wiring, protection);
if (area == NULL) if (area == NULL)
return B_NO_MEMORY; return B_NO_MEMORY;
cache = store->cache; cache = store->cache;
cache_ref = cache->ref; cacheRef = cache->ref;
// if this is a private map, we need to create a new cache & store object // if this is a private map, we need to create a new cache & store object
// pair to handle the private copies of pages as they are written to // pair to handle the private copies of pages as they are written to
if (mapping == REGION_PRIVATE_MAP) { if (mapping == REGION_PRIVATE_MAP) {
vm_cache *nu_cache; vm_cache *newCache;
vm_cache_ref *nu_cache_ref = NULL; vm_store *newStore;
vm_store *nu_store;
// ToDo: panic??? // ToDo: panic???
// create an anonymous store object // create an anonymous store object
nu_store = vm_store_create_anonymous_noswap((protection & B_STACK_AREA) != 0, USER_STACK_GUARD_PAGES); newStore = vm_store_create_anonymous_noswap((protection & B_STACK_AREA) != 0,
if (nu_store == NULL) USER_STACK_GUARD_PAGES);
panic("map_backing_store: vm_create_store_anonymous_noswap returned NULL"); if (newStore == NULL) {
nu_cache = vm_cache_create(nu_store); status = B_NO_MEMORY;
if (nu_cache == NULL) goto err1;
panic("map_backing_store: vm_cache_create returned NULL"); }
nu_cache_ref = vm_cache_ref_create(nu_cache); newCache = vm_cache_create(newStore);
if (nu_cache_ref == NULL) if (newCache == NULL) {
panic("map_backing_store: vm_cache_ref_create returned NULL"); status = B_NO_MEMORY;
nu_cache->temporary = 1; newStore->ops->destroy(newStore);
nu_cache->scan_skip = cache->scan_skip; goto err1;
}
status = vm_cache_ref_create(newCache);
if (status < B_OK) {
newStore->ops->destroy(newStore);
free(newCache);
goto err1;
}
nu_cache->source = cache; newCache->temporary = 1;
newCache->scan_skip = cache->scan_skip;
cache = nu_cache; newCache->source = cache;
cache_ref = cache->ref;
store = nu_store; cache = newCache;
cacheRef = cache->ref;
store = newStore;
cache->virtual_size = offset + size; cache->virtual_size = offset + size;
} }
err = vm_cache_set_minimal_commitment(cache_ref, offset + size); status = vm_cache_set_minimal_commitment(cacheRef, offset + size);
if (err != B_OK) if (status != B_OK)
goto err1; goto err2;
acquire_sem_etc(addressSpace->sem, WRITE_COUNT, 0, 0); acquire_sem_etc(addressSpace->sem, WRITE_COUNT, 0, 0);
@ -562,19 +570,19 @@ map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtu
if (addressSpace->state == VM_ASPACE_STATE_DELETION) { if (addressSpace->state == VM_ASPACE_STATE_DELETION) {
// okay, someone is trying to delete this address space now, so we can't // okay, someone is trying to delete this address space now, so we can't
// insert the area, so back out // insert the area, so back out
err = B_BAD_TEAM_ID; status = B_BAD_TEAM_ID;
goto err2; goto err3;
} }
err = insert_area(addressSpace, _virtualAddress, addressSpec, size, area); status = insert_area(addressSpace, _virtualAddress, addressSpec, size, area);
if (err < B_OK) if (status < B_OK)
goto err2; goto err3;
// attach the cache to the area // attach the cache to the area
area->cache_ref = cache_ref; area->cache_ref = cacheRef;
area->cache_offset = offset; area->cache_offset = offset;
// point the cache back to the area // point the cache back to the area
vm_cache_insert_area(cache_ref, area); vm_cache_insert_area(cacheRef, area);
// insert the area in the global area hash table // insert the area in the global area hash table
acquire_sem_etc(sAreaHashLock, WRITE_COUNT, 0 ,0); acquire_sem_etc(sAreaHashLock, WRITE_COUNT, 0 ,0);
@ -589,17 +597,17 @@ map_backing_store(vm_address_space *addressSpace, vm_store *store, void **_virtu
*_area = area; *_area = area;
return B_OK; return B_OK;
err2: err3:
release_sem_etc(addressSpace->sem, WRITE_COUNT, 0); release_sem_etc(addressSpace->sem, WRITE_COUNT, 0);
err1: err2:
if (mapping == REGION_PRIVATE_MAP) { if (mapping == REGION_PRIVATE_MAP) {
// we created this cache, so we must delete it again // we created this cache, so we must delete it again
vm_cache_release_ref(cache_ref); vm_cache_release_ref(cacheRef);
} }
err1:
free(area->name); free(area->name);
free(area); free(area);
return err; return status;
} }
@ -710,11 +718,10 @@ area_id
vm_create_anonymous_area(team_id aid, const char *name, void **address, vm_create_anonymous_area(team_id aid, const char *name, void **address,
uint32 addressSpec, addr_t size, uint32 wiring, uint32 protection) uint32 addressSpec, addr_t size, uint32 wiring, uint32 protection)
{ {
vm_cache_ref *cacheRef;
vm_area *area; vm_area *area;
vm_cache *cache; vm_cache *cache;
vm_store *store; vm_store *store;
vm_address_space *addressSpace;
vm_cache_ref *cache_ref;
vm_page *page = NULL; vm_page *page = NULL;
bool isStack = (protection & B_STACK_AREA) != 0; bool isStack = (protection & B_STACK_AREA) != 0;
bool canOvercommit = false; bool canOvercommit = false;
@ -761,7 +768,7 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
return B_BAD_VALUE; return B_BAD_VALUE;
} }
addressSpace = vm_get_address_space_by_id(aid); vm_address_space *addressSpace = vm_get_address_space_by_id(aid);
if (addressSpace == NULL) if (addressSpace == NULL)
return B_BAD_TEAM_ID; return B_BAD_TEAM_ID;
@ -777,19 +784,23 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
} }
} }
// ToDo: panic???
// create an anonymous store object // create an anonymous store object
store = vm_store_create_anonymous_noswap(canOvercommit, isStack ? store = vm_store_create_anonymous_noswap(canOvercommit, isStack ?
((protection & B_USER_PROTECTION) != 0 ? ((protection & B_USER_PROTECTION) != 0 ?
USER_STACK_GUARD_PAGES : KERNEL_STACK_GUARD_PAGES) : 0); USER_STACK_GUARD_PAGES : KERNEL_STACK_GUARD_PAGES) : 0);
if (store == NULL) if (store == NULL) {
panic("vm_create_anonymous_area: vm_create_store_anonymous_noswap returned NULL"); status = B_NO_MEMORY;
goto err1;
}
cache = vm_cache_create(store); cache = vm_cache_create(store);
if (cache == NULL) if (cache == NULL) {
panic("vm_create_anonymous_area: vm_cache_create returned NULL"); status = B_NO_MEMORY;
cache_ref = vm_cache_ref_create(cache); goto err2;
if (cache_ref == NULL) }
panic("vm_create_anonymous_area: vm_cache_ref_create returned NULL"); status = vm_cache_ref_create(cache);
if (status < B_OK)
goto err3;
cache->temporary = 1; cache->temporary = 1;
switch (wiring) { switch (wiring) {
@ -808,25 +819,12 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
status = map_backing_store(addressSpace, store, address, 0, size, addressSpec, wiring, status = map_backing_store(addressSpace, store, address, 0, size, addressSpec, wiring,
protection, REGION_NO_PRIVATE_MAP, &area, name); protection, REGION_NO_PRIVATE_MAP, &area, name);
if (status < B_OK) { if (status < B_OK) {
vm_cache_release_ref(cache_ref); vm_cache_release_ref(cache->ref);
vm_put_address_space(addressSpace); goto err1;
if (wiring == B_CONTIGUOUS) {
// we had reserved the area space upfront...
addr_t pageNumber = page->physical_page_number;
int32 i;
for (i = size / B_PAGE_SIZE; i-- > 0; pageNumber++) {
page = vm_lookup_page(pageNumber);
if (page == NULL)
panic("couldn't lookup physical page just allocated\n");
vm_page_set_state(page, PAGE_STATE_FREE);
}
}
return status;
} }
cache_ref = store->cache->ref; cacheRef = store->cache->ref;
switch (wiring) { switch (wiring) {
case B_NO_LOCK: case B_NO_LOCK:
case B_LAZY_LOCK: case B_LAZY_LOCK:
@ -869,7 +867,7 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
if (!kernel_startup) if (!kernel_startup)
panic("ALREADY_WIRED flag used outside kernel startup\n"); panic("ALREADY_WIRED flag used outside kernel startup\n");
mutex_lock(&cache_ref->lock); mutex_lock(&cacheRef->lock);
(*map->ops->lock)(map); (*map->ops->lock)(map);
for (va = area->base; va < area->base + area->size; va += B_PAGE_SIZE, offset += B_PAGE_SIZE) { for (va = area->base; va < area->base + area->size; va += B_PAGE_SIZE, offset += B_PAGE_SIZE) {
err = (*map->ops->query)(map, va, &pa, &flags); err = (*map->ops->query)(map, va, &pa, &flags);
@ -884,10 +882,10 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
} }
atomic_add(&page->ref_count, 1); atomic_add(&page->ref_count, 1);
vm_page_set_state(page, PAGE_STATE_WIRED); vm_page_set_state(page, PAGE_STATE_WIRED);
vm_cache_insert_page(cache_ref, page, offset); vm_cache_insert_page(cacheRef, page, offset);
} }
(*map->ops->unlock)(map); (*map->ops->unlock)(map);
mutex_unlock(&cache_ref->lock); mutex_unlock(&cacheRef->lock);
break; break;
} }
@ -900,7 +898,7 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
addr_t virtualAddress; addr_t virtualAddress;
off_t offset = 0; off_t offset = 0;
mutex_lock(&cache_ref->lock); mutex_lock(&cacheRef->lock);
(*map->ops->lock)(map); (*map->ops->lock)(map);
for (virtualAddress = area->base; virtualAddress < area->base + area->size; for (virtualAddress = area->base; virtualAddress < area->base + area->size;
@ -916,11 +914,11 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
panic("couldn't map physical page in page run\n"); panic("couldn't map physical page in page run\n");
vm_page_set_state(page, PAGE_STATE_WIRED); vm_page_set_state(page, PAGE_STATE_WIRED);
vm_cache_insert_page(cache_ref, page, offset); vm_cache_insert_page(cacheRef, page, offset);
} }
(*map->ops->unlock)(map); (*map->ops->unlock)(map);
mutex_unlock(&cache_ref->lock); mutex_unlock(&cacheRef->lock);
break; break;
} }
@ -932,6 +930,27 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
TRACE(("vm_create_anonymous_area: done\n")); TRACE(("vm_create_anonymous_area: done\n"));
return area->id; return area->id;
err3:
free(cache);
err2:
store->ops->destroy(store);
err1:
if (wiring == B_CONTIGUOUS) {
// we had reserved the area space upfront...
addr_t pageNumber = page->physical_page_number;
int32 i;
for (i = size / B_PAGE_SIZE; i-- > 0; pageNumber++) {
page = vm_lookup_page(pageNumber);
if (page == NULL)
panic("couldn't lookup physical page just allocated\n");
vm_page_set_state(page, PAGE_STATE_FREE);
}
}
vm_put_address_space(addressSpace);
return status;
} }
@ -942,11 +961,9 @@ vm_map_physical_memory(team_id areaID, const char *name, void **_address,
{ {
vm_area *area; vm_area *area;
vm_cache *cache; vm_cache *cache;
vm_cache_ref *cacheRef;
vm_store *store; vm_store *store;
addr_t mapOffset; addr_t mapOffset;
status_t status; status_t status;
vm_address_space *addressSpace = vm_get_address_space_by_id(areaID);
TRACE(("vm_map_physical_memory(aspace = %ld, \"%s\", virtual = %p, spec = %ld," TRACE(("vm_map_physical_memory(aspace = %ld, \"%s\", virtual = %p, spec = %ld,"
" size = %lu, protection = %ld, phys = %p)\n", " size = %lu, protection = %ld, phys = %p)\n",
@ -956,6 +973,7 @@ vm_map_physical_memory(team_id areaID, const char *name, void **_address,
if (!arch_vm_supports_protection(protection)) if (!arch_vm_supports_protection(protection))
return B_NOT_SUPPORTED; return B_NOT_SUPPORTED;
vm_address_space *addressSpace = vm_get_address_space_by_id(areaID);
if (addressSpace == NULL) if (addressSpace == NULL)
return B_BAD_TEAM_ID; return B_BAD_TEAM_ID;
@ -968,16 +986,20 @@ vm_map_physical_memory(team_id areaID, const char *name, void **_address,
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
// create an device store object // create an device store object
// TODO: panic???
store = vm_store_create_device(physicalAddress); store = vm_store_create_device(physicalAddress);
if (store == NULL) if (store == NULL) {
panic("vm_map_physical_memory: vm_store_create_device returned NULL"); status = B_NO_MEMORY;
goto err1;
}
cache = vm_cache_create(store); cache = vm_cache_create(store);
if (cache == NULL) if (cache == NULL) {
panic("vm_map_physical_memory: vm_cache_create returned NULL"); status = B_NO_MEMORY;
cacheRef = vm_cache_ref_create(cache); goto err2;
if (cacheRef == NULL) }
panic("vm_map_physical_memory: vm_cache_ref_create returned NULL"); status = vm_cache_ref_create(cache);
if (status < B_OK)
goto err3;
// tell the page scanner to skip over this area, it's pages are special // tell the page scanner to skip over this area, it's pages are special
cache->scan_skip = 1; cache->scan_skip = 1;
@ -985,7 +1007,7 @@ vm_map_physical_memory(team_id areaID, const char *name, void **_address,
status = map_backing_store(addressSpace, store, _address, 0, size, status = map_backing_store(addressSpace, store, _address, 0, size,
addressSpec & ~B_MTR_MASK, 0, protection, REGION_NO_PRIVATE_MAP, &area, name); addressSpec & ~B_MTR_MASK, 0, protection, REGION_NO_PRIVATE_MAP, &area, name);
if (status < B_OK) if (status < B_OK)
vm_cache_release_ref(cacheRef); vm_cache_release_ref(cache->ref);
if (status >= B_OK && (addressSpec & B_MTR_MASK) != 0) { if (status >= B_OK && (addressSpec & B_MTR_MASK) != 0) {
// set requested memory type // set requested memory type
@ -1012,6 +1034,14 @@ vm_map_physical_memory(team_id areaID, const char *name, void **_address,
*_address = (void *)((addr_t)*_address + mapOffset); *_address = (void *)((addr_t)*_address + mapOffset);
return area->id; return area->id;
err3:
free(cache);
err2:
store->ops->destroy(store);
err1:
vm_put_address_space(addressSpace);
return status;
} }
@ -1022,8 +1052,7 @@ vm_create_null_area(team_id aid, const char *name, void **address, uint32 addres
vm_cache *cache; vm_cache *cache;
vm_cache_ref *cache_ref; vm_cache_ref *cache_ref;
vm_store *store; vm_store *store;
// addr_t map_offset; status_t status;
int err;
vm_address_space *addressSpace = vm_get_address_space_by_id(aid); vm_address_space *addressSpace = vm_get_address_space_by_id(aid);
if (addressSpace == NULL) if (addressSpace == NULL)
@ -1032,59 +1061,72 @@ vm_create_null_area(team_id aid, const char *name, void **address, uint32 addres
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
// create an null store object // create an null store object
// TODO: panic???
store = vm_store_create_null(); store = vm_store_create_null();
if (store == NULL) if (store == NULL) {
panic("vm_map_physical_memory: vm_store_create_null returned NULL"); status = B_NO_MEMORY;
goto err1;
}
cache = vm_cache_create(store); cache = vm_cache_create(store);
if (cache == NULL) if (cache == NULL) {
panic("vm_map_physical_memory: vm_cache_create returned NULL"); status = B_NO_MEMORY;
cache_ref = vm_cache_ref_create(cache); goto err2;
if (cache_ref == NULL) }
panic("vm_map_physical_memory: vm_cache_ref_create returned NULL"); status = vm_cache_ref_create(cache);
if (status < B_OK)
goto err3;
// tell the page scanner to skip over this area, no pages will be mapped here // tell the page scanner to skip over this area, no pages will be mapped here
cache->scan_skip = 1; cache->scan_skip = 1;
err = map_backing_store(addressSpace, store, address, 0, size, addressSpec, 0, B_KERNEL_READ_AREA, REGION_NO_PRIVATE_MAP, &area, name); status = map_backing_store(addressSpace, store, address, 0, size, addressSpec, 0,
B_KERNEL_READ_AREA, REGION_NO_PRIVATE_MAP, &area, name);
vm_put_address_space(addressSpace); vm_put_address_space(addressSpace);
if (err < B_OK) { if (status < B_OK) {
vm_cache_release_ref(cache_ref); vm_cache_release_ref(cache->ref);
return err; return status;
} }
return area->id; return area->id;
err3:
free(cache);
err2:
store->ops->destroy(store);
err1:
vm_put_address_space(addressSpace);
return status;
} }
status_t status_t
vm_create_vnode_cache(void *vnode, struct vm_cache_ref **_cacheRef) vm_create_vnode_cache(void *vnode, struct vm_cache_ref **_cacheRef)
{ {
vm_cache_ref *cacheRef; status_t status;
vm_cache *cache;
vm_store *store;
// create a vnode store object // create a vnode store object
store = vm_create_vnode_store(vnode); vm_store *store = vm_create_vnode_store(vnode);
if (store == NULL) { if (store == NULL)
dprintf("vm_create_vnode_cache: couldn't create vnode store\n");
return B_NO_MEMORY; return B_NO_MEMORY;
}
cache = vm_cache_create(store); vm_cache *cache = vm_cache_create(store);
if (cache == NULL) { if (cache == NULL) {
dprintf("vm_create_vnode_cache: vm_cache_create returned NULL\n"); status = B_NO_MEMORY;
return B_NO_MEMORY; goto err1;
} }
status = vm_cache_ref_create(cache);
if (status < B_OK)
goto err2;
cacheRef = vm_cache_ref_create(cache); *_cacheRef = cache->ref;
if (cacheRef == NULL) {
dprintf("vm_create_vnode_cache: vm_cache_ref_create returned NULL\n");
return B_NO_MEMORY;
}
*_cacheRef = cacheRef;
return B_OK; return B_OK;
err2:
free(cache);
err1:
store->ops->destroy(store);
return status;
} }
@ -1400,11 +1442,11 @@ vm_copy_on_write_area(vm_area *area)
goto err1; goto err1;
} }
lowerCacheRef = vm_cache_ref_create(lowerCache); status = vm_cache_ref_create(lowerCache);
if (lowerCacheRef == NULL) { if (status < B_OK)
status = B_NO_MEMORY;
goto err2; goto err2;
}
lowerCacheRef = lowerCache->ref;
// The area must be readable in the same way it was previously writable // The area must be readable in the same way it was previously writable
protection = B_KERNEL_READ_AREA; protection = B_KERNEL_READ_AREA;
@ -2378,7 +2420,7 @@ vm_page_fault(addr_t address, addr_t fault_address, bool is_write, bool is_user,
area ? area->name : "???", fault_address - (area ? area->base : 0x0)); area ? area->name : "???", fault_address - (area ? area->base : 0x0));
// We can print a stack trace of the userland thread here. // We can print a stack trace of the userland thread here.
#if 0 #if 1
if (area) { if (area) {
struct stack_frame { struct stack_frame {
#ifdef __INTEL__ #ifdef __INTEL__

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de. * Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License. * Distributed under the terms of the MIT License.
* *
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -116,7 +116,7 @@ vm_cache_create(vm_store *store)
} }
vm_cache_ref * status_t
vm_cache_ref_create(vm_cache *cache) vm_cache_ref_create(vm_cache *cache)
{ {
vm_cache_ref *ref; vm_cache_ref *ref;
@ -124,14 +124,14 @@ vm_cache_ref_create(vm_cache *cache)
ref = malloc(sizeof(vm_cache_ref)); ref = malloc(sizeof(vm_cache_ref));
if (ref == NULL) if (ref == NULL)
return NULL; return B_NO_MEMORY;
status = mutex_init(&ref->lock, "cache_ref_mutex"); status = mutex_init(&ref->lock, "cache_ref_mutex");
if (status < B_OK && (!kernel_startup || status != B_NO_MORE_SEMS)) { if (status < B_OK && (!kernel_startup || status != B_NO_MORE_SEMS)) {
// During early boot, we cannot create semaphores - they are // During early boot, we cannot create semaphores - they are
// created later in vm_init_post_sem() // created later in vm_init_post_sem()
free(ref); free(ref);
return NULL; return status;
} }
ref->areas = NULL; ref->areas = NULL;
@ -141,7 +141,7 @@ vm_cache_ref_create(vm_cache *cache)
ref->cache = cache; ref->cache = cache;
cache->ref = ref; cache->ref = ref;
return ref; return B_OK;
} }