Added new mutex_switch_from_read_lock() for unlocking a read lock and
starting to lock a mutex in an atomic operation. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@34935 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
8ab820f076
commit
c4f9831292
@ -124,7 +124,6 @@ extern void rw_lock_init(rw_lock* lock, const char* name);
|
||||
extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags);
|
||||
extern void rw_lock_destroy(rw_lock* lock);
|
||||
extern status_t rw_lock_write_lock(rw_lock* lock);
|
||||
extern void rw_lock_write_unlock(rw_lock* lock);
|
||||
|
||||
extern void mutex_init(mutex* lock, const char* name);
|
||||
// name is *not* cloned nor freed in mutex_destroy()
|
||||
@ -135,12 +134,16 @@ extern status_t mutex_switch_lock(mutex* from, mutex* to);
|
||||
// for the lock is atomically. I.e. if "from" guards the object "to" belongs
|
||||
// to, the operation is safe as long as "from" is held while destroying
|
||||
// "to".
|
||||
extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to);
|
||||
// Like mutex_switch_lock(), just for a switching from a read-locked
|
||||
// rw_lock.
|
||||
|
||||
|
||||
// implementation private:
|
||||
|
||||
extern status_t _rw_lock_read_lock(rw_lock* lock);
|
||||
extern void _rw_lock_read_unlock(rw_lock* lock);
|
||||
extern void _rw_lock_read_unlock(rw_lock* lock, bool threadsLocked);
|
||||
extern void _rw_lock_write_unlock(rw_lock* lock, bool threadsLocked);
|
||||
|
||||
extern status_t _mutex_lock(mutex* lock, bool threadsLocked);
|
||||
extern void _mutex_unlock(mutex* lock, bool threadsLocked);
|
||||
@ -171,11 +174,18 @@ rw_lock_read_unlock(rw_lock* lock)
|
||||
#else
|
||||
int32 oldCount = atomic_add(&lock->count, -1);
|
||||
if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
|
||||
_rw_lock_read_unlock(lock);
|
||||
_rw_lock_read_unlock(lock, false);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
rw_lock_write_unlock(rw_lock* lock)
|
||||
{
|
||||
_rw_lock_write_unlock(lock, false);
|
||||
}
|
||||
|
||||
|
||||
static inline status_t
|
||||
mutex_lock(mutex* lock)
|
||||
{
|
||||
|
@ -323,9 +323,9 @@ _rw_lock_read_lock(rw_lock* lock)
|
||||
|
||||
|
||||
void
|
||||
_rw_lock_read_unlock(rw_lock* lock)
|
||||
_rw_lock_read_unlock(rw_lock* lock, bool threadsLocked)
|
||||
{
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock, false, !threadsLocked);
|
||||
|
||||
// If we're still holding the write lock or if there are other readers,
|
||||
// no-one can be woken up.
|
||||
@ -389,9 +389,9 @@ rw_lock_write_lock(rw_lock* lock)
|
||||
|
||||
|
||||
void
|
||||
rw_lock_write_unlock(rw_lock* lock)
|
||||
_rw_lock_write_unlock(rw_lock* lock, bool threadsLocked)
|
||||
{
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
InterruptsSpinLocker locker(gThreadSpinlock, false, !threadsLocked);
|
||||
|
||||
if (thread_get_current_thread_id() != lock->holder) {
|
||||
panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
|
||||
@ -558,6 +558,23 @@ mutex_switch_lock(mutex* from, mutex* to)
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
mutex_switch_from_read_lock(rw_lock* from, mutex* to)
|
||||
{
|
||||
InterruptsSpinLocker locker(gThreadSpinlock);
|
||||
|
||||
#if KDEBUG_RW_LOCK_DEBUG
|
||||
_rw_lock_write_unlock(from, true);
|
||||
#else
|
||||
int32 oldCount = atomic_add(&from->count, -1);
|
||||
if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
|
||||
_rw_lock_read_unlock(from, true);
|
||||
#endif
|
||||
|
||||
return mutex_lock_threads_locked(to);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
_mutex_lock(mutex* lock, bool threadsLocked)
|
||||
{
|
||||
|
@ -360,9 +360,9 @@ _rw_lock_read_lock(rw_lock* lock)
|
||||
|
||||
|
||||
void
|
||||
_rw_lock_read_unlock(rw_lock* lock)
|
||||
_rw_lock_read_unlock(rw_lock* lock, bool threadsLocked)
|
||||
{
|
||||
AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
|
||||
AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked);
|
||||
|
||||
// If we're still holding the write lock or if there are other readers,
|
||||
// no-one can be woken up.
|
||||
@ -425,9 +425,9 @@ rw_lock_write_lock(rw_lock* lock)
|
||||
|
||||
|
||||
void
|
||||
rw_lock_write_unlock(rw_lock* lock)
|
||||
_rw_lock_write_unlock(rw_lock* lock, bool threadsLocked)
|
||||
{
|
||||
AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
|
||||
AutoLocker<ThreadSpinlock> locker(sThreadSpinlock, false, !threadsLocked);
|
||||
|
||||
if (find_thread(NULL) != lock->holder) {
|
||||
panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
|
||||
@ -548,6 +548,23 @@ mutex_switch_lock(mutex* from, mutex* to)
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
mutex_switch_from_read_lock(rw_lock* from, mutex* to)
|
||||
{
|
||||
AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
|
||||
|
||||
#if KDEBUG_RW_LOCK_DEBUG
|
||||
_rw_lock_write_unlock(from, true);
|
||||
#else
|
||||
int32 oldCount = atomic_add(&from->count, -1);
|
||||
if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
|
||||
_rw_lock_read_unlock(from, true);
|
||||
#endif
|
||||
|
||||
return mutex_lock_threads_locked(to);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
_mutex_lock(mutex* lock, bool threadsLocked)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user