Since there were no further complaints: Added mutex_lock_with_timeout().

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@34403 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2009-12-01 09:38:34 +00:00
parent f8ec4c04a0
commit e182b46db6
2 changed files with 115 additions and 1 deletions

View File

@ -22,6 +22,7 @@ typedef struct mutex {
thread_id holder; thread_id holder;
#else #else
int32 count; int32 count;
uint16 ignore_unlock_count;
#endif #endif
uint8 flags; uint8 flags;
} mutex; } mutex;
@ -81,7 +82,7 @@ typedef struct rw_lock {
# define MUTEX_INITIALIZER(name) { name, NULL, -1, 0 } # define MUTEX_INITIALIZER(name) { name, NULL, -1, 0 }
# define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), 0 } # define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), 0 }
#else #else
# define MUTEX_INITIALIZER(name) { name, NULL, 0, 0 } # define MUTEX_INITIALIZER(name) { name, NULL, 0, 0, 0 }
# define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), -1, 0 } # define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), -1, 0 }
#endif #endif
@ -132,6 +133,8 @@ extern status_t mutex_switch_lock(mutex* from, mutex* to);
extern status_t _mutex_lock(mutex* lock, bool threadsLocked); extern status_t _mutex_lock(mutex* lock, bool threadsLocked);
extern void _mutex_unlock(mutex* lock, bool threadsLocked); extern void _mutex_unlock(mutex* lock, bool threadsLocked);
extern status_t _mutex_trylock(mutex* lock); extern status_t _mutex_trylock(mutex* lock);
extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,
bigtime_t timeout);
static inline status_t static inline status_t
@ -173,6 +176,19 @@ mutex_trylock(mutex* lock)
} }
static inline status_t
mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
{
#if KDEBUG
return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
#else
if (atomic_add(&lock->count, -1) < 0)
return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
return B_OK;
#endif
}
static inline void static inline void
mutex_unlock(mutex* lock) mutex_unlock(mutex* lock)
{ {

View File

@ -431,6 +431,7 @@ mutex_init(mutex* lock, const char *name)
lock->holder = -1; lock->holder = -1;
#else #else
lock->count = 0; lock->count = 0;
lock->ignore_unlock_count = 0;
#endif #endif
lock->flags = 0; lock->flags = 0;
@ -448,6 +449,7 @@ mutex_init_etc(mutex* lock, const char *name, uint32 flags)
lock->holder = -1; lock->holder = -1;
#else #else
lock->count = 0; lock->count = 0;
lock->ignore_unlock_count = 0;
#endif #endif
lock->flags = flags & MUTEX_FLAG_CLONE_NAME; lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
@ -574,6 +576,11 @@ _mutex_unlock(mutex* lock, bool threadsLocked)
lock, lock->holder); lock, lock->holder);
return; return;
} }
#else
if (lock->ignore_unlock_count > 0) {
lock->ignore_unlock_count--;
return;
}
#endif #endif
mutex_waiter* waiter = lock->waiters; mutex_waiter* waiter = lock->waiters;
@ -620,6 +627,97 @@ _mutex_trylock(mutex* lock)
} }
status_t
_mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
{
#if KDEBUG
if (!gKernelStartup && !are_interrupts_enabled()) {
panic("_mutex_lock(): called with interrupts disabled for lock %p",
lock);
}
#endif
InterruptsSpinLocker locker(gThreadSpinlock);
// Might have been released after we decremented the count, but before
// we acquired the spinlock.
#if KDEBUG
if (lock->holder < 0) {
lock->holder = thread_get_current_thread_id();
return B_OK;
} else if (lock->holder == thread_get_current_thread_id()) {
panic("_mutex_lock(): double lock of %p by thread %ld", lock,
lock->holder);
} else if (lock->holder == 0)
panic("_mutex_lock(): using unitialized lock %p", lock);
#else
if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
lock->flags &= ~MUTEX_FLAG_RELEASED;
return B_OK;
}
#endif
// enqueue in waiter list
mutex_waiter waiter;
waiter.thread = thread_get_current_thread();
waiter.next = NULL;
if (lock->waiters != NULL) {
lock->waiters->last->next = &waiter;
} else
lock->waiters = &waiter;
lock->waiters->last = &waiter;
// block
thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
status_t error = thread_block_with_timeout_locked(timeoutFlags, timeout);
if (error == B_OK) {
#if KDEBUG
lock->holder = waiter.thread->id;
#endif
} else {
// If the timeout occurred, we must remove our waiter structure from
// the queue.
mutex_waiter* previousWaiter = NULL;
mutex_waiter* otherWaiter = lock->waiters;
while (otherWaiter != NULL && otherWaiter != &waiter) {
previousWaiter = otherWaiter;
otherWaiter = otherWaiter->next;
}
if (otherWaiter == &waiter) {
// the structure is still in the list -- dequeue
if (&waiter == lock->waiters) {
if (waiter.next != NULL)
waiter.next->last = waiter.last;
lock->waiters = waiter.next;
} else {
if (waiter.next == NULL)
lock->waiters->last = previousWaiter;
previousWaiter->next = waiter.next;
}
#if !KDEBUG
// we need to fix the lock count
if (atomic_add(&lock->count, 1) == -1) {
// This means we were the only thread waiting for the lock and
// the lock owner has already called atomic_add() in
// mutex_unlock(). That is we probably would get the lock very
// soon (if the lock holder has a low priority, that might
// actually take rather long, though), but the timeout already
// occurred, so we don't try to wait. Just increment the ignore
// unlock count.
lock->ignore_unlock_count++;
}
#endif
}
}
return error;
}
static int static int
dump_mutex_info(int argc, char** argv) dump_mutex_info(int argc, char** argv)
{ {