Added rw_lock_read_lock_with_timeout().

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@37680 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-07-22 11:10:48 +00:00
parent b349aae941
commit 4e08fb8589
3 changed files with 196 additions and 3 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -142,6 +142,8 @@ extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to);
// implementation private:
extern status_t _rw_lock_read_lock(rw_lock* lock);
extern status_t _rw_lock_read_lock_with_timeout(rw_lock* lock,
uint32 timeoutFlags, bigtime_t timeout);
extern void _rw_lock_read_unlock(rw_lock* lock, bool threadsLocked);
extern void _rw_lock_write_unlock(rw_lock* lock, bool threadsLocked);
@ -166,6 +168,21 @@ rw_lock_read_lock(rw_lock* lock)
}
static inline status_t
rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
bigtime_t timeout)
{
#if KDEBUG_RW_LOCK_DEBUG
return mutex_lock_with_timeout(lock, timeoutFlags, timeout);
#else
int32 oldCount = atomic_add(&lock->count, 1);
if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
return _rw_lock_read_lock_with_timeout(lock, timeoutFlags, timeout);
return B_OK;
#endif
}
static inline void
rw_lock_read_unlock(rw_lock* lock)
{

View File

@ -1,5 +1,5 @@
/*
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
@ -193,6 +193,7 @@ rw_lock_unblock(rw_lock* lock)
// unblock thread
thread_unblock_locked(waiter->thread, B_OK);
waiter->thread = NULL;
return RW_LOCK_WRITER_COUNT_BASE;
}
@ -208,6 +209,7 @@ rw_lock_unblock(rw_lock* lock)
// unblock thread
thread_unblock_locked(waiter->thread, B_OK);
waiter->thread = NULL;
} while ((waiter = lock->waiters) != NULL && !waiter->writer);
if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
@ -322,6 +324,88 @@ _rw_lock_read_lock(rw_lock* lock)
}
status_t
_rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
bigtime_t timeout)
{
InterruptsSpinLocker locker(gThreadSpinlock);
// We might be the writer ourselves.
if (lock->holder == thread_get_current_thread_id()) {
lock->owner_count++;
return B_OK;
}
// The writer that originally had the lock when we called atomic_add() might
// already have gone and another writer could have overtaken us. In this
// case the original writer set pending_readers, so we know that we don't
// have to wait.
if (lock->pending_readers > 0) {
lock->pending_readers--;
if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
lock->active_readers++;
return B_OK;
}
ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
// we need to wait
// enqueue in waiter list
rw_lock_waiter waiter;
waiter.thread = thread_get_current_thread();
waiter.next = NULL;
waiter.writer = false;
if (lock->waiters != NULL)
lock->waiters->last->next = &waiter;
else
lock->waiters = &waiter;
lock->waiters->last = &waiter;
// block
thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
status_t error = thread_block_with_timeout_locked(timeoutFlags, timeout);
if (error == B_OK || waiter.thread == NULL) {
// We were unblocked successfully -- potentially our unblocker overtook
// us after we already failed. In either case, we've got the lock, now.
return B_OK;
}
// We failed to get the lock -- dequeue from waiter list.
rw_lock_waiter* previous = NULL;
rw_lock_waiter* other = lock->waiters;
while (other != &waiter) {
previous = other;
other = other->next;
}
if (previous == NULL) {
// we are the first in line
lock->waiters = waiter.next;
if (lock->waiters != NULL)
lock->waiters->last = waiter.last;
} else {
// one or more other waiters are before us in the queue
previous->next = waiter.next;
if (lock->waiters->last == &waiter)
lock->waiters->last = previous;
}
// Decrement the count. ATM this is all we have to do. There's at least
// one writer ahead of us -- otherwise the last writer would have unblocked
// us (writers only manipulate the lock data with thread spinlock being
// held) -- so our leaving doesn't make a difference to the ones behind us
// in the queue.
atomic_add(&lock->count, -1);
return error;
}
void
_rw_lock_read_unlock(rw_lock* lock, bool threadsLocked)
{

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2009, Haiku Inc. All Rights Reserved.
* Copyright 2002-2010, Haiku Inc. All Rights Reserved.
* Distributed under the terms of the MIT license.
*
* Authors:
@ -238,6 +238,7 @@ rw_lock_unblock(rw_lock* lock)
// unblock thread
_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
waiter->thread = NULL;
return RW_LOCK_WRITER_COUNT_BASE;
}
@ -253,6 +254,7 @@ rw_lock_unblock(rw_lock* lock)
// unblock thread
_kern_unblock_thread(get_thread_id(waiter->thread), B_OK);
waiter->thread = NULL;
} while ((waiter = lock->waiters) != NULL && !waiter->writer);
if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
@ -359,6 +361,96 @@ _rw_lock_read_lock(rw_lock* lock)
}
status_t
_rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
bigtime_t timeout)
{
AutoLocker<ThreadSpinlock> locker(sThreadSpinlock);
// We might be the writer ourselves.
if (lock->holder == find_thread(NULL)) {
lock->owner_count++;
return B_OK;
}
// The writer that originally had the lock when we called atomic_add() might
// already have gone and another writer could have overtaken us. In this
// case the original writer set pending_readers, so we know that we don't
// have to wait.
if (lock->pending_readers > 0) {
lock->pending_readers--;
if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
lock->active_readers++;
return B_OK;
}
ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
// we need to wait
// enqueue in waiter list
rw_lock_waiter waiter;
waiter.thread = get_current_thread();
waiter.next = NULL;
waiter.writer = false;
if (lock->waiters != NULL)
lock->waiters->last->next = &waiter;
else
lock->waiters = &waiter;
lock->waiters->last = &waiter;
// block
get_user_thread()->wait_status = 1;
sThreadSpinlock.Unlock();
status_t error;
while ((error = _kern_block_thread(timeoutFlags, timeout))
== B_INTERRUPTED) {
}
sThreadSpinlock.Lock();
if (error == B_OK || waiter.thread == NULL) {
// We were unblocked successfully -- potentially our unblocker overtook
// us after we already failed. In either case, we've got the lock, now.
return B_OK;
}
// We failed to get the lock -- dequeue from waiter list.
rw_lock_waiter* previous = NULL;
rw_lock_waiter* other = lock->waiters;
while (other != &waiter) {
previous = other;
other = other->next;
}
if (previous == NULL) {
// we are the first in line
lock->waiters = waiter.next;
if (lock->waiters != NULL)
lock->waiters->last = waiter.last;
} else {
// one or more other waiters are before us in the queue
previous->next = waiter.next;
if (lock->waiters->last == &waiter)
lock->waiters->last = previous;
}
// Decrement the count. ATM this is all we have to do. There's at least
// one writer ahead of us -- otherwise the last writer would have unblocked
// us (writers only manipulate the lock data with thread spinlock being
// held) -- so our leaving doesn't make a difference to the ones behind us
// in the queue.
atomic_add(&lock->count, -1);
return error;
}
void
_rw_lock_read_unlock(rw_lock* lock, bool threadsLocked)
{