2004-12-14 01:22:45 +03:00
|
|
|
/*
|
2011-06-12 04:00:23 +04:00
|
|
|
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
|
2009-10-12 18:27:02 +04:00
|
|
|
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
|
2004-12-14 01:22:45 +03:00
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*
|
|
|
|
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
|
|
|
* Distributed under the terms of the NewOS License.
|
|
|
|
*/
|
2002-07-09 16:24:59 +04:00
|
|
|
#ifndef _KERNEL_LOCK_H
|
|
|
|
#define _KERNEL_LOCK_H
|
|
|
|
|
2014-01-06 09:49:34 +04:00
|
|
|
|
2002-10-05 05:17:28 +04:00
|
|
|
#include <OS.h>
|
2014-01-06 09:49:34 +04:00
|
|
|
|
|
|
|
#include <arch/atomic.h>
|
2002-07-18 23:22:17 +04:00
|
|
|
#include <debug.h>
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2008-05-29 03:12:36 +04:00
|
|
|
|
|
|
|
struct mutex_waiter;
|
|
|
|
|
|
|
|
typedef struct mutex {
|
|
|
|
const char* name;
|
|
|
|
struct mutex_waiter* waiters;
|
2013-10-24 02:01:18 +04:00
|
|
|
spinlock lock;
|
2008-10-20 17:06:04 +04:00
|
|
|
#if KDEBUG
|
2008-05-29 03:12:36 +04:00
|
|
|
thread_id holder;
|
2019-09-14 04:36:59 +03:00
|
|
|
uint16 _unused;
|
2008-05-29 03:12:36 +04:00
|
|
|
#else
|
|
|
|
int32 count;
|
2009-12-01 12:38:34 +03:00
|
|
|
uint16 ignore_unlock_count;
|
2008-05-29 03:12:36 +04:00
|
|
|
#endif
|
|
|
|
uint8 flags;
|
|
|
|
} mutex;
|
|
|
|
|
|
|
|
#define MUTEX_FLAG_CLONE_NAME 0x1
|
|
|
|
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
typedef struct recursive_lock {
|
2008-05-29 03:12:36 +04:00
|
|
|
mutex lock;
|
2008-10-20 17:06:04 +04:00
|
|
|
#if !KDEBUG
|
2003-06-27 07:16:53 +04:00
|
|
|
thread_id holder;
|
2019-09-14 04:36:59 +03:00
|
|
|
#else
|
|
|
|
int32 _unused;
|
2008-05-29 03:12:36 +04:00
|
|
|
#endif
|
2003-06-27 07:16:53 +04:00
|
|
|
int recursion;
|
2002-07-09 16:24:59 +04:00
|
|
|
} recursive_lock;
|
|
|
|
|
2008-05-29 03:12:36 +04:00
|
|
|
|
2008-05-29 04:32:06 +04:00
|
|
|
struct rw_lock_waiter;
|
|
|
|
|
2003-06-27 07:16:53 +04:00
|
|
|
typedef struct rw_lock {
|
2008-05-29 04:32:06 +04:00
|
|
|
const char* name;
|
|
|
|
struct rw_lock_waiter* waiters;
|
2013-10-24 02:01:18 +04:00
|
|
|
spinlock lock;
|
2008-05-29 04:32:06 +04:00
|
|
|
thread_id holder;
|
2013-11-06 03:03:07 +04:00
|
|
|
int32 count;
|
2008-07-08 11:56:49 +04:00
|
|
|
int32 owner_count;
|
2009-12-31 20:03:41 +03:00
|
|
|
int16 active_readers;
|
|
|
|
// Only > 0 while a writer is waiting: number
|
|
|
|
// of active readers when the first waiting
|
|
|
|
// writer started waiting.
|
|
|
|
int16 pending_readers;
|
|
|
|
// Number of readers that have already
|
|
|
|
// incremented "count", but have not yet started
|
|
|
|
// to wait at the time the last writer unlocked.
|
2008-05-29 04:32:06 +04:00
|
|
|
uint32 flags;
|
2003-06-27 07:16:53 +04:00
|
|
|
} rw_lock;
|
|
|
|
|
2009-12-31 20:03:41 +03:00
|
|
|
#define RW_LOCK_WRITER_COUNT_BASE 0x10000
|
|
|
|
|
2008-05-29 04:32:06 +04:00
|
|
|
#define RW_LOCK_FLAG_CLONE_NAME 0x1
|
|
|
|
|
2003-06-27 07:16:53 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
#if KDEBUG
|
2008-07-30 19:27:58 +04:00
|
|
|
# define KDEBUG_RW_LOCK_DEBUG 0
|
|
|
|
// Define to 1 if you want to use ASSERT_READ_LOCKED_RW_LOCK().
|
|
|
|
// The rw_lock will just behave like a recursive locker then.
|
2008-07-23 00:36:32 +04:00
|
|
|
# define ASSERT_LOCKED_RECURSIVE(r) \
|
|
|
|
{ ASSERT(find_thread(NULL) == (r)->lock.holder); }
|
|
|
|
# define ASSERT_LOCKED_MUTEX(m) { ASSERT(find_thread(NULL) == (m)->holder); }
|
2008-07-30 19:27:58 +04:00
|
|
|
# define ASSERT_WRITE_LOCKED_RW_LOCK(l) \
|
|
|
|
{ ASSERT(find_thread(NULL) == (l)->holder); }
|
|
|
|
# if KDEBUG_RW_LOCK_DEBUG
|
|
|
|
# define ASSERT_READ_LOCKED_RW_LOCK(l) \
|
|
|
|
{ ASSERT(find_thread(NULL) == (l)->holder); }
|
|
|
|
# else
|
|
|
|
# define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
|
|
|
|
# endif
|
2007-06-21 09:37:46 +04:00
|
|
|
#else
|
2008-07-30 19:27:58 +04:00
|
|
|
# define ASSERT_LOCKED_RECURSIVE(r) do {} while (false)
|
|
|
|
# define ASSERT_LOCKED_MUTEX(m) do {} while (false)
|
|
|
|
# define ASSERT_WRITE_LOCKED_RW_LOCK(m) do {} while (false)
|
|
|
|
# define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
|
2007-06-21 09:37:46 +04:00
|
|
|
#endif
|
2004-12-14 01:22:45 +03:00
|
|
|
|
2003-06-27 07:16:53 +04:00
|
|
|
|
2008-05-29 06:10:10 +04:00
|
|
|
// static initializers
|
2008-10-20 17:06:04 +04:00
|
|
|
#if KDEBUG
|
2013-10-24 02:01:18 +04:00
|
|
|
# define MUTEX_INITIALIZER(name) \
|
|
|
|
{ name, NULL, B_SPINLOCK_INITIALIZER, -1, 0 }
|
2009-10-15 11:48:31 +04:00
|
|
|
# define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), 0 }
|
2008-05-29 06:10:10 +04:00
|
|
|
#else
|
2013-10-24 02:01:18 +04:00
|
|
|
# define MUTEX_INITIALIZER(name) \
|
|
|
|
{ name, NULL, B_SPINLOCK_INITIALIZER, 0, 0, 0 }
|
2009-10-15 11:48:31 +04:00
|
|
|
# define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), -1, 0 }
|
2008-05-29 06:10:10 +04:00
|
|
|
#endif
|
|
|
|
|
2013-10-24 02:01:18 +04:00
|
|
|
#define RW_LOCK_INITIALIZER(name) \
|
2018-12-10 07:01:11 +03:00
|
|
|
{ name, NULL, B_SPINLOCK_INITIALIZER, -1, 0, 0, 0, 0, 0 }
|
2008-05-29 06:10:10 +04:00
|
|
|
|
|
|
|
|
2009-03-06 02:15:15 +03:00
|
|
|
#if KDEBUG
|
|
|
|
# define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->lock.holder)
|
|
|
|
#else
|
|
|
|
# define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->holder)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2003-06-27 07:16:53 +04:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2008-05-29 03:12:36 +04:00
|
|
|
extern void recursive_lock_init(recursive_lock *lock, const char *name);
|
|
|
|
// name is *not* cloned nor freed in recursive_lock_destroy()
|
|
|
|
extern void recursive_lock_init_etc(recursive_lock *lock, const char *name,
|
|
|
|
uint32 flags);
|
2003-06-27 07:16:53 +04:00
|
|
|
extern void recursive_lock_destroy(recursive_lock *lock);
|
2007-02-07 17:07:31 +03:00
|
|
|
extern status_t recursive_lock_lock(recursive_lock *lock);
|
2008-07-07 20:17:34 +04:00
|
|
|
extern status_t recursive_lock_trylock(recursive_lock *lock);
|
2007-02-07 17:07:31 +03:00
|
|
|
extern void recursive_lock_unlock(recursive_lock *lock);
|
|
|
|
extern int32 recursive_lock_get_recursion(recursive_lock *lock);
|
2003-06-27 07:16:53 +04:00
|
|
|
|
2008-05-29 04:32:06 +04:00
|
|
|
extern void rw_lock_init(rw_lock* lock, const char* name);
|
|
|
|
// name is *not* cloned nor freed in rw_lock_destroy()
|
|
|
|
extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags);
|
|
|
|
extern void rw_lock_destroy(rw_lock* lock);
|
|
|
|
extern status_t rw_lock_write_lock(rw_lock* lock);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2008-05-29 04:32:06 +04:00
|
|
|
extern void mutex_init(mutex* lock, const char* name);
|
2008-05-02 02:07:36 +04:00
|
|
|
// name is *not* cloned nor freed in mutex_destroy()
|
2008-05-29 04:32:06 +04:00
|
|
|
extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags);
|
2008-05-02 02:07:36 +04:00
|
|
|
extern void mutex_destroy(mutex* lock);
|
2019-05-02 23:07:39 +03:00
|
|
|
extern void mutex_transfer_lock(mutex* lock, thread_id thread);
|
2019-06-15 19:14:49 +03:00
|
|
|
extern status_t mutex_switch_lock(mutex* from, mutex* to);
|
2008-07-23 00:36:32 +04:00
|
|
|
// Unlocks "from" and locks "to" such that unlocking and starting to wait
|
|
|
|
// for the lock is atomically. I.e. if "from" guards the object "to" belongs
|
|
|
|
// to, the operation is safe as long as "from" is held while destroying
|
|
|
|
// "to".
|
2010-01-07 18:31:29 +03:00
|
|
|
extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to);
|
|
|
|
// Like mutex_switch_lock(), just for a switching from a read-locked
|
|
|
|
// rw_lock.
|
2008-05-01 05:53:07 +04:00
|
|
|
|
2009-12-31 20:03:41 +03:00
|
|
|
|
2008-05-01 05:53:07 +04:00
|
|
|
// implementation private:
|
2009-12-31 20:03:41 +03:00
|
|
|
|
|
|
|
extern status_t _rw_lock_read_lock(rw_lock* lock);
|
2010-07-22 15:10:48 +04:00
|
|
|
extern status_t _rw_lock_read_lock_with_timeout(rw_lock* lock,
|
|
|
|
uint32 timeoutFlags, bigtime_t timeout);
|
2013-10-24 02:01:18 +04:00
|
|
|
extern void _rw_lock_read_unlock(rw_lock* lock);
|
|
|
|
extern void _rw_lock_write_unlock(rw_lock* lock);
|
2009-12-31 20:03:41 +03:00
|
|
|
|
2013-10-24 02:01:18 +04:00
|
|
|
extern status_t _mutex_lock(mutex* lock, void* locker);
|
|
|
|
extern void _mutex_unlock(mutex* lock);
|
2008-05-02 02:07:36 +04:00
|
|
|
extern status_t _mutex_trylock(mutex* lock);
|
2009-12-01 12:38:34 +03:00
|
|
|
extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,
|
|
|
|
bigtime_t timeout);
|
2008-05-02 02:07:36 +04:00
|
|
|
|
|
|
|
|
2009-12-31 20:03:41 +03:00
|
|
|
static inline status_t
|
|
|
|
rw_lock_read_lock(rw_lock* lock)
|
|
|
|
{
|
|
|
|
#if KDEBUG_RW_LOCK_DEBUG
|
|
|
|
return rw_lock_write_lock(lock);
|
|
|
|
#else
|
|
|
|
int32 oldCount = atomic_add(&lock->count, 1);
|
|
|
|
if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
|
|
|
|
return _rw_lock_read_lock(lock);
|
|
|
|
return B_OK;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-07-22 15:10:48 +04:00
|
|
|
static inline status_t
|
|
|
|
rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
|
|
|
|
bigtime_t timeout)
|
|
|
|
{
|
|
|
|
#if KDEBUG_RW_LOCK_DEBUG
|
|
|
|
return mutex_lock_with_timeout(lock, timeoutFlags, timeout);
|
|
|
|
#else
|
|
|
|
int32 oldCount = atomic_add(&lock->count, 1);
|
|
|
|
if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
|
|
|
|
return _rw_lock_read_lock_with_timeout(lock, timeoutFlags, timeout);
|
|
|
|
return B_OK;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-12-31 20:03:41 +03:00
|
|
|
static inline void
|
|
|
|
rw_lock_read_unlock(rw_lock* lock)
|
|
|
|
{
|
|
|
|
#if KDEBUG_RW_LOCK_DEBUG
|
|
|
|
rw_lock_write_unlock(lock);
|
|
|
|
#else
|
|
|
|
int32 oldCount = atomic_add(&lock->count, -1);
|
|
|
|
if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
|
2013-10-24 02:01:18 +04:00
|
|
|
_rw_lock_read_unlock(lock);
|
2009-12-31 20:03:41 +03:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-01-07 18:31:29 +03:00
|
|
|
static inline void
|
|
|
|
rw_lock_write_unlock(rw_lock* lock)
|
|
|
|
{
|
2013-10-24 02:01:18 +04:00
|
|
|
_rw_lock_write_unlock(lock);
|
2010-01-07 18:31:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-05-02 02:07:36 +04:00
|
|
|
static inline status_t
|
|
|
|
mutex_lock(mutex* lock)
|
|
|
|
{
|
2008-10-20 17:06:04 +04:00
|
|
|
#if KDEBUG
|
2013-10-24 02:01:18 +04:00
|
|
|
return _mutex_lock(lock, NULL);
|
2008-05-02 02:07:36 +04:00
|
|
|
#else
|
|
|
|
if (atomic_add(&lock->count, -1) < 0)
|
2013-10-24 02:01:18 +04:00
|
|
|
return _mutex_lock(lock, NULL);
|
2008-05-01 22:06:09 +04:00
|
|
|
return B_OK;
|
2008-05-01 05:53:07 +04:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static inline status_t
|
2008-05-02 02:07:36 +04:00
|
|
|
mutex_trylock(mutex* lock)
|
2008-05-01 05:53:07 +04:00
|
|
|
{
|
2008-10-20 17:06:04 +04:00
|
|
|
#if KDEBUG
|
2008-05-02 02:07:36 +04:00
|
|
|
return _mutex_trylock(lock);
|
2008-05-01 05:53:07 +04:00
|
|
|
#else
|
|
|
|
if (atomic_test_and_set(&lock->count, -1, 0) != 0)
|
|
|
|
return B_WOULD_BLOCK;
|
2008-05-01 22:06:09 +04:00
|
|
|
return B_OK;
|
2008-05-01 05:53:07 +04:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-12-01 12:38:34 +03:00
|
|
|
static inline status_t
|
|
|
|
mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
|
|
|
|
{
|
|
|
|
#if KDEBUG
|
|
|
|
return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
|
|
|
|
#else
|
|
|
|
if (atomic_add(&lock->count, -1) < 0)
|
|
|
|
return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
|
|
|
|
return B_OK;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-05-01 05:53:07 +04:00
|
|
|
static inline void
|
2008-05-02 02:07:36 +04:00
|
|
|
mutex_unlock(mutex* lock)
|
2008-05-01 05:53:07 +04:00
|
|
|
{
|
2008-10-20 17:06:04 +04:00
|
|
|
#if !KDEBUG
|
2008-05-01 05:53:07 +04:00
|
|
|
if (atomic_add(&lock->count, 1) < -1)
|
|
|
|
#endif
|
2013-10-24 02:01:18 +04:00
|
|
|
_mutex_unlock(lock);
|
2008-05-01 05:53:07 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-03-30 19:05:19 +04:00
|
|
|
static inline void
|
|
|
|
recursive_lock_transfer_lock(recursive_lock* lock, thread_id thread)
|
|
|
|
{
|
|
|
|
if (lock->recursion != 1)
|
|
|
|
panic("invalid recursion level for lock transfer!");
|
|
|
|
|
|
|
|
#if KDEBUG
|
2019-05-02 23:07:39 +03:00
|
|
|
mutex_transfer_lock(&lock->lock, thread);
|
2012-03-30 19:05:19 +04:00
|
|
|
#else
|
|
|
|
lock->holder = thread;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-05-01 05:53:07 +04:00
|
|
|
extern void lock_debug_init();
|
|
|
|
|
2003-06-27 07:16:53 +04:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-07-18 23:22:17 +04:00
|
|
|
#endif /* _KERNEL_LOCK_H */
|