kernel/locks: Remove ignore_unlock_count and fix races in lock timeout.

As far as I can tell, there is no reason to ignore unlocks, ever;
if no threads are waiting, then mutex_unlock() will act appropriately.
So all we need to do is increment the lock's count here,
as we are relinquishing our request for locking.

On the other hand, if we did not find our structure in the lock,
that means we own the lock; so to return with an error from here
without changing the count would result in a deadlock, as the lock
would then be ours, despite our error code implying otherwise.

Additionally, take care of part of the case where we have woken up
by mutex_destroy(), by setting thread to NULL and checking for it
in that case. There is still a race here, however.

May fix #16044, as it appears there is a case where ACPICA
calls this with a timeout of 0 (we should make this be
a mutex_trylock, anyway.)

Change-Id: I98215df218514c70ac1922bc3a6f10e01087e44b
Reviewed-on: https://review.haiku-os.org/c/haiku/+/2716
Reviewed-by: waddlesplash <waddlesplash@gmail.com>
This commit is contained in:
Augustin Cavalier 2020-05-16 17:18:08 -04:00 committed by waddlesplash
parent 785518beef
commit fd161d7bf2
2 changed files with 19 additions and 20 deletions

View File

@ -24,10 +24,8 @@ typedef struct mutex {
spinlock lock;
#if KDEBUG
thread_id holder;
uint16 _unused;
#else
int32 count;
uint16 ignore_unlock_count;
#endif
uint8 flags;
} mutex;

View File

@ -609,7 +609,6 @@ mutex_init_etc(mutex* lock, const char *name, uint32 flags)
lock->holder = -1;
#else
lock->count = 0;
lock->ignore_unlock_count = 0;
#endif
lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
@ -642,7 +641,9 @@ mutex_destroy(mutex* lock)
lock->waiters = waiter->next;
// unblock thread
thread_unblock(waiter->thread, B_ERROR);
Thread* thread = waiter->thread;
waiter->thread = NULL;
thread_unblock(thread, B_ERROR);
}
lock->name = NULL;
@ -801,11 +802,6 @@ _mutex_unlock(mutex* lock)
thread_get_current_thread_id(), lock, lock->holder);
return;
}
#else
if (lock->ignore_unlock_count > 0) {
lock->ignore_unlock_count--;
return;
}
#endif
mutex_waiter* waiter = lock->waiters;
@ -826,7 +822,7 @@ _mutex_unlock(mutex* lock)
// unblock thread
thread_unblock(waiter->thread, B_OK);
} else {
// Nobody is waiting to acquire this lock. Just mark it as released.
// There are no waiters, so mark the lock as released.
#if KDEBUG
lock->holder = -1;
#else
@ -907,6 +903,13 @@ _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
ASSERT(lock->holder == waiter.thread->id);
#endif
} else {
// If the lock was destroyed, our "thread" entry will be NULL.
if (waiter.thread == NULL)
return B_ERROR;
// TODO: There is still a race condition during mutex destruction,
// if we resume due to a timeout before our thread is set to NULL.
locker.Lock();
// If the timeout occurred, we must remove our waiter structure from
@ -931,17 +934,15 @@ _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
#if !KDEBUG
// we need to fix the lock count
if (atomic_add(&lock->count, 1) == -1) {
// This means we were the only thread waiting for the lock and
// the lock owner has already called atomic_add() in
// mutex_unlock(). That is we probably would get the lock very
// soon (if the lock holder has a low priority, that might
// actually take rather long, though), but the timeout already
// occurred, so we don't try to wait. Just increment the ignore
// unlock count.
lock->ignore_unlock_count++;
}
atomic_add(&lock->count, 1);
#endif
} else {
// the structure is not in the list -- even though the timeout
// occurred, this means we own the lock now
#if KDEBUG
ASSERT(lock->holder == waiter.thread->id);
#endif
return B_OK;
}
}