* Changed recursive_lock to use a mutex instead of a semaphore.

* Adjusted code using recursive locks respectively. The initialization
  cannot fail anymore, and it is possible to use recursive locks in the
  early boot process (even uninitialized, if in BSS), which simplifies
  things a little.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@25687 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2008-05-28 23:12:36 +00:00
parent 6d4f2038c3
commit b0f5179aa5
18 changed files with 85 additions and 182 deletions

View File

@ -9,31 +9,9 @@
#ifndef _KERNEL_LOCK_H
#define _KERNEL_LOCK_H
#include <OS.h>
#include <debug.h>
typedef struct recursive_lock {
sem_id sem;
thread_id holder;
int recursion;
} recursive_lock;
typedef struct benaphore {
sem_id sem;
int32 count;
} benaphore;
// Note: this is currently a trivial r/w lock implementation
// it will be replaced with something better later - this
// or a similar API will be made publically available at this point.
typedef struct rw_lock {
sem_id sem;
int32 count;
benaphore writeLock;
} rw_lock;
#define RW_MAX_READERS 1000000
struct mutex_waiter;
@ -51,6 +29,31 @@ typedef struct mutex {
#define MUTEX_FLAG_CLONE_NAME 0x1
typedef struct recursive_lock {
mutex lock;
#ifndef KDEBUG
thread_id holder;
#endif
int recursion;
} recursive_lock;
typedef struct benaphore {
sem_id sem;
int32 count;
} benaphore;
// Note: this is currently a trivial r/w lock implementation
// it will be replaced with something better later - this
// or a similar API will be made publically available at this point.
typedef struct rw_lock {
sem_id sem;
int32 count;
benaphore writeLock;
} rw_lock;
#define RW_MAX_READERS 1000000
#if 0 && KDEBUG // XXX disable this for now, it causes problems when including thread.h here
# include <thread.h>
#define ASSERT_LOCKED_RECURSIVE(r) { ASSERT(thread_get_current_thread_id() == (r)->holder); }
@ -65,7 +68,10 @@ typedef struct mutex {
extern "C" {
#endif
extern status_t recursive_lock_init(recursive_lock *lock, const char *name);
extern void recursive_lock_init(recursive_lock *lock, const char *name);
// name is *not* cloned nor freed in recursive_lock_destroy()
extern void recursive_lock_init_etc(recursive_lock *lock, const char *name,
uint32 flags);
extern void recursive_lock_destroy(recursive_lock *lock);
extern status_t recursive_lock_lock(recursive_lock *lock);
extern void recursive_lock_unlock(recursive_lock *lock);

View File

@ -49,7 +49,8 @@ struct RemoteDiskDevice : recursive_lock {
status_t Init()
{
return recursive_lock_init(this, "remote disk device");
recursive_lock_init(this, "remote disk device");
return B_OK;
}
void Uninit()

View File

@ -52,9 +52,7 @@ init_driver(void)
memset(gDeviceNames, 0, sizeof(gDeviceNames));
// create the request mutex
status_t error = recursive_lock_init(&gTTYRequestLock, "tty requests");
if (error != B_OK)
return error;
recursive_lock_init(&gTTYRequestLock, "tty requests");
// create the global mutex
mutex_init(&gGlobalTTYLock, "tty global");

View File

@ -852,10 +852,7 @@ init()
goto err1;
}
if (recursive_lock_init(&sLock, "launch speedup") < B_OK) {
status = sLock.sem;
goto err2;
}
recursive_lock_init(&sLock, "launch speedup");
// register kernel syscalls
if (register_generic_syscall(LAUNCH_SPEEDUP_SYSCALLS,
@ -877,7 +874,6 @@ init()
err3:
recursive_lock_destroy(&sLock);
err2:
hash_uninit(sPrefetchHash);
err1:
hash_uninit(sTeamHash);

View File

@ -753,23 +753,14 @@ init()
sRulesHash = hash_init(64, 0, &rules_compare, &rules_hash);
if (sRulesHash == NULL) {
status = B_NO_MEMORY;
goto err1;
hash_uninit(sTeamHash);
return B_NO_MEMORY;
}
if (recursive_lock_init(&sLock, "rule based prefetcher") < B_OK) {
status = sLock.sem;
goto err2;
}
recursive_lock_init(&sLock, "rule based prefetcher");
load_rules();
return B_OK;
err2:
hash_uninit(sRulesHash);
err1:
hash_uninit(sTeamHash);
return status;
}

View File

@ -747,10 +747,7 @@ dosfs_mount(fs_volume *_vol, const char *device, uint32 flags,
goto error;
}
sprintf(name, "fat lock %lx", vol->id);
if ((result = recursive_lock_init(&(vol->vlock), name)) != 0) {
dprintf("error creating lock (%s)\n", strerror(result));
goto error;
}
recursive_lock_init_etc(&(vol->vlock), name, MUTEX_FLAG_CLONE_NAME);
#if DEBUG
if (atomic_add(&instances, 1) == 0) {

View File

@ -155,14 +155,13 @@ fs_mount(nspace_id nsid, const char *device, ulong flags, void *parms, size_t le
sprintf(lockname, "ntfs_lock %lx", ns->id);
#ifdef __HAIKU__
if ((result = recursive_lock_init(&(ns->vlock), lockname)) != 0)
recursive_lock_init_etc(&(ns->vlock), lockname, MUTEX_FLAG_CLONE_NAME);
#else
if ((result = new_lock(&(ns->vlock), lockname)) != B_OK)
#endif
{
ERRPRINT("fs_mount - error creating lock (%s)\n", strerror(result));
goto exit;
if ((result = new_lock(&(ns->vlock), lockname)) != B_OK) {
ERRPRINT("fs_mount - error creating lock (%s)\n", strerror(result));
goto exit;
}
#endif
handle = load_driver_settings("ntfs");
ns->show_sys_files = ! (strcasecmp(get_driver_parameter(handle, "hide_sys_files", "true", "true"), "true") == 0);

View File

@ -118,8 +118,7 @@ allocate_device_interface(net_device *device, net_device_module_info *module)
if (interface == NULL)
goto error_0;
if (recursive_lock_init(&interface->rx_lock, "rx lock") < B_OK)
goto error_1;
recursive_lock_init(&interface->rx_lock, "rx lock");
char name[128];
snprintf(name, sizeof(name), "%s receive queue", device->name);
@ -154,8 +153,6 @@ error_3:
error_2:
recursive_lock_destroy(&interface->rx_lock);
error_1:
delete interface;
error_0:

View File

@ -22,8 +22,7 @@ mtx_init(struct mtx *m, const char *name, const char *type, int opts)
if (opts == MTX_DEF) {
mutex_init_etc(&m->u.mutex, name, MUTEX_FLAG_CLONE_NAME);
} else if (opts == MTX_RECURSE) {
if (recursive_lock_init(&m->u.recursive, name) < B_OK)
panic("Hell just froze as someone was trying to init a recursive mutex.");
recursive_lock_init_etc(&m->u.recursive, name, MUTEX_FLAG_CLONE_NAME);
} else
panic("Uh-oh, someone is pressing the wrong buttons");

View File

@ -1099,15 +1099,7 @@ arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
map->ops = &tmap_ops;
map->map_count = 0;
if (!kernel) {
// During the boot process, there are no semaphores available at this
// point, so we only try to create the translation map lock if we're
// initialize a user translation map.
// vm_translation_map_init_kernel_map_post_sem() is used to complete
// the kernel translation map.
if (recursive_lock_init(&map->lock, "translation map") < B_OK)
return map->lock.sem;
}
recursive_lock_init(&map->lock, "translation map");
map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info));
if (map == NULL) {
@ -1164,9 +1156,6 @@ arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
static status_t
arch_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map)
{
if (recursive_lock_init(&map->lock, "translation map") < B_OK)
return map->lock.sem;
return B_OK;
}

View File

@ -486,15 +486,7 @@ arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
map->ops = &tmap_ops;
map->map_count = 0;
if (!kernel) {
// During the boot process, there are no semaphores available at this
// point, so we only try to create the translation map lock if we're
// initialize a user translation map.
// vm_translation_map_init_kernel_map_post_sem() is used to complete
// the kernel translation map.
if (recursive_lock_init(&map->lock, "translation map") < B_OK)
return map->lock.sem;
}
recursive_lock_init(&map->lock, "translation map");
map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info));
if (map->arch_data == NULL) {
@ -545,9 +537,6 @@ arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
status_t
arch_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map)
{
if (recursive_lock_init(&map->lock, "translation map") < B_OK)
return map->lock.sem;
return B_OK;
}

View File

@ -774,15 +774,7 @@ arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
map->ops = &tmap_ops;
map->map_count = 0;
if (!kernel) {
// During the boot process, there are no semaphores available at this
// point, so we only try to create the translation map lock if we're
// initialize a user translation map.
// vm_translation_map_init_kernel_map_post_sem() is used to complete
// the kernel translation map.
if (recursive_lock_init(&map->lock, "translation map") < B_OK)
return map->lock.sem;
}
recursive_lock_init(&map->lock, "translation map");
map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info));
if (map == NULL) {
@ -839,9 +831,6 @@ arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
status_t
arch_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map)
{
if (recursive_lock_init(&map->lock, "translation map") < B_OK)
return map->lock.sem;
return B_OK;
}

View File

@ -873,9 +873,7 @@ devfs_mount(fs_volume *volume, const char *devfs, uint32 flags,
fs->id = volume->id;
fs->next_vnode_id = 0;
err = recursive_lock_init(&fs->lock, "devfs lock");
if (err < B_OK)
goto err1;
recursive_lock_init(&fs->lock, "devfs lock");
fs->vnode_hash = hash_init(DEVFS_HASH_SIZE, offsetof(devfs_vnode, all_next),
//(addr_t)&vnode->all_next - (addr_t)vnode,
@ -912,7 +910,6 @@ err3:
hash_uninit(fs->vnode_hash);
err2:
recursive_lock_destroy(&fs->lock);
err1:
free(fs);
err:
return err;

View File

@ -239,7 +239,7 @@ NodeMonitorService::~NodeMonitorService()
status_t
NodeMonitorService::InitCheck()
{
return fRecursiveLock.sem >= B_OK ? B_OK : fRecursiveLock.sem;
return B_OK;
}

View File

@ -2626,7 +2626,7 @@ _dump_mount(struct fs_mount *mount)
kprintf(" root_vnode: %p\n", mount->root_vnode);
kprintf(" covers_vnode: %p\n", mount->covers_vnode);
kprintf(" partition: %p\n", mount->partition);
kprintf(" lock: %ld\n", mount->rlock.sem);
kprintf(" lock: %p\n", &mount->rlock);
kprintf(" flags: %s%s\n", mount->unmounting ? " unmounting" : "",
mount->owns_file_device ? " owns_file_device" : "");
@ -4416,9 +4416,7 @@ vfs_init(kernel_args *args)
mutex_init(&sFileSystemsMutex, "vfs_lock");
if (recursive_lock_init(&sMountOpLock, "vfs_mount_op_lock") < 0)
panic("vfs_init: error allocating mount op lock\n");
recursive_lock_init(&sMountOpLock, "vfs_mount_op_lock");
mutex_init(&sMountMutex, "vfs_mount_lock");
mutex_init(&sVnodeCoveredByMutex, "vfs_vnode_covered_by_lock");
mutex_init(&sVnodeMutex, "vfs_vnode_lock");
@ -6388,9 +6386,7 @@ fs_mount(char *path, const char *device, const char *fsName, uint32 flags,
goto err3;
}
status = recursive_lock_init(&mount->rlock, "mount rlock");
if (status < B_OK)
goto err4;
recursive_lock_init(&mount->rlock, "mount rlock");
// initialize structure
mount->id = sNextMountID++;
@ -6511,7 +6507,6 @@ err5:
mutex_unlock(&sMountMutex);
recursive_lock_destroy(&mount->rlock);
err4:
put_file_system(mount->fs);
free(mount->device_name);
err3:

View File

@ -33,33 +33,38 @@ struct mutex_waiter {
#define MUTEX_FLAG_RELEASED 0x2
#ifdef KDEBUG
# define RECURSIVE_LOCK_HOLDER(lock) ((lock)->lock.holder)
#else
# define RECURSIVE_LOCK_HOLDER(lock) ((lock)->holder)
#endif
int32
recursive_lock_get_recursion(recursive_lock *lock)
{
if (lock->holder == thread_get_current_thread_id())
if (RECURSIVE_LOCK_HOLDER(lock) == thread_get_current_thread_id())
return lock->recursion;
return -1;
}
status_t
void
recursive_lock_init(recursive_lock *lock, const char *name)
{
if (lock == NULL)
return B_BAD_VALUE;
if (name == NULL)
name = "recursive lock";
lock->holder = -1;
mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
RECURSIVE_LOCK_HOLDER(lock) = -1;
lock->recursion = 0;
lock->sem = create_sem(1, name);
}
if (lock->sem >= B_OK)
return B_OK;
return lock->sem;
void
recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
{
mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
RECURSIVE_LOCK_HOLDER(lock) = -1;
lock->recursion = 0;
}
@ -69,8 +74,7 @@ recursive_lock_destroy(recursive_lock *lock)
if (lock == NULL)
return;
delete_sem(lock->sem);
lock->sem = -1;
mutex_destroy(&lock->lock);
}
@ -80,15 +84,16 @@ recursive_lock_lock(recursive_lock *lock)
thread_id thread = thread_get_current_thread_id();
if (!kernel_startup && !are_interrupts_enabled())
panic("recursive_lock_lock: called with interrupts disabled for lock %p, sem %#lx\n", lock, lock->sem);
if (thread != lock->holder) {
status_t status = acquire_sem(lock->sem);
if (status < B_OK)
return status;
panic("recursive_lock_lock: called with interrupts disabled for lock "
"%p (\"%s\")\n", lock, lock->lock.name);
if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
mutex_lock(&lock->lock);
#ifndef KDEBUG
lock->holder = thread;
#endif
}
lock->recursion++;
return B_OK;
}
@ -97,12 +102,14 @@ recursive_lock_lock(recursive_lock *lock)
void
recursive_lock_unlock(recursive_lock *lock)
{
if (thread_get_current_thread_id() != lock->holder)
if (thread_get_current_thread_id() != RECURSIVE_LOCK_HOLDER(lock))
panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
if (--lock->recursion == 0) {
#ifndef KDEBUG
lock->holder = -1;
release_sem_etc(lock->sem, 1, 0/*B_DO_NOT_RESCHEDULE*/);
#endif
mutex_unlock(&lock->lock);
}
}

View File

@ -1094,8 +1094,7 @@ module_init(kernel_args *args)
{
struct preloaded_image *image;
if (recursive_lock_init(&sModulesLock, "modules rlock") < B_OK)
return B_ERROR;
recursive_lock_init(&sModulesLock, "modules rlock");
sModulesHash = hash_init(MODULE_HASH_SIZE, 0, module_compare, module_hash);
if (sModulesHash == NULL)

View File

@ -172,7 +172,6 @@ static kernel_args *sKernelArgs;
static status_t object_cache_reserve_internal(object_cache *cache,
size_t object_count, uint32 flags);
static status_t object_depot_init_locks(object_depot *depot);
static depot_magazine *alloc_magazine();
static void free_magazine(depot_magazine *magazine);
@ -405,20 +404,6 @@ internal_free(void *_buffer)
}
static status_t
recursive_lock_boot_init(recursive_lock *lock, const char *name, uint32 flags)
{
if (flags & CACHE_DURING_BOOT) {
lock->sem = -1;
lock->holder = 1;
lock->recursion = 0;
return B_OK;
}
return recursive_lock_init(lock, name);
}
static status_t
area_allocate_pages(object_cache *cache, void **pages, uint32 flags)
{
@ -618,16 +603,6 @@ object_cache_init(object_cache *cache, const char *name, size_t objectSize,
}
static status_t
object_cache_init_locks(object_cache *cache)
{
if (cache->flags & CACHE_NO_DEPOT)
return B_OK;
return object_depot_init_locks(&cache->depot);
}
static void
object_cache_commit_slab(object_cache *cache, slab *slab)
{
@ -1280,9 +1255,7 @@ object_depot_init(object_depot *depot, uint32 flags,
depot->empty = NULL;
depot->full_count = depot->empty_count = 0;
status_t status = recursive_lock_boot_init(&depot->lock, "depot", flags);
if (status < B_OK)
return status;
recursive_lock_init(&depot->lock, "depot");
depot->stores = (depot_cpu_store *)internal_alloc(sizeof(depot_cpu_store)
* smp_get_num_cpus(), flags);
@ -1292,7 +1265,7 @@ object_depot_init(object_depot *depot, uint32 flags,
}
for (int i = 0; i < smp_get_num_cpus(); i++) {
recursive_lock_boot_init(&depot->stores[i].lock, "cpu store", flags);
recursive_lock_init(&depot->stores[i].lock, "cpu store");
depot->stores[i].loaded = depot->stores[i].previous = NULL;
}
@ -1302,23 +1275,6 @@ object_depot_init(object_depot *depot, uint32 flags,
}
status_t
object_depot_init_locks(object_depot *depot)
{
status_t status = recursive_lock_init(&depot->lock, "depot");
if (status < B_OK)
return status;
for (int i = 0; i < smp_get_num_cpus(); i++) {
status = recursive_lock_init(&depot->stores[i].lock, "cpu store");
if (status < B_OK)
return status;
}
return B_OK;
}
void
object_depot_destroy(object_depot *depot)
{
@ -1504,8 +1460,6 @@ slab_init_post_sem()
while (it.HasNext()) {
object_cache *cache = it.Next();
if (object_cache_init_locks(cache) < B_OK)
panic("slab_init: failed to create sems");
if (cache->allocate_pages == early_allocate_pages)
object_cache_commit_pre_pages(cache);
}