kernel/condition_variable: Unblock earlier and simplify the code more.
Unblocking after unsetting fVariable just causes too many headaches and corner cases to deal with; the code as-is did not actually handle all of them, as it missed the case where the entry thread had called thread_prepare_to_block but had not yet actually blocked. Hopefully the last fix for #17444.
This commit is contained in:
parent
acdd232ad5
commit
7ce3c9283e
@ -400,23 +400,15 @@ ConditionVariable::_NotifyLocked(bool all, status_t result)
|
|||||||
} else {
|
} else {
|
||||||
const status_t waitStatus = atomic_get_and_set(&entry->fWaitStatus, result);
|
const status_t waitStatus = atomic_get_and_set(&entry->fWaitStatus, result);
|
||||||
|
|
||||||
// Prevent the thread from changing status after we unset its fVariable,
|
// We need not waste time trying to unblock the thread if it already unblocked.
|
||||||
// as otherwise it could become unblocked (or may already be so) and then
|
if (waitStatus == STATUS_WAITING && thread_is_blocked(thread))
|
||||||
// re-block itself on something else before we call thread_unblock.
|
thread_unblock(thread, result);
|
||||||
SpinLocker threadLocker(thread->scheduler_lock);
|
|
||||||
|
|
||||||
// No matter what the thread is doing, as we were the ones to clear its
|
// No matter what the thread is doing, as we were the ones to clear its
|
||||||
// fThread, so we are the ones responsible for decrementing fEntriesCount.
|
// fThread, so we are the ones responsible for decrementing fEntriesCount.
|
||||||
// (We may not validly access the entry once we unset its fVariable.)
|
// (We may not validly access the entry once we unset its fVariable.)
|
||||||
atomic_pointer_set(&entry->fVariable, (ConditionVariable*)NULL);
|
atomic_pointer_set(&entry->fVariable, (ConditionVariable*)NULL);
|
||||||
atomic_add(&fEntriesCount, -1);
|
atomic_add(&fEntriesCount, -1);
|
||||||
|
|
||||||
// Do this after unsetting fVariable, as in case the entry wakes up
|
|
||||||
// and tries to remove itself, it need not not have to wait for us.
|
|
||||||
// (We check thread->state here as it cannot change while we hold
|
|
||||||
// the scheduler lock, unlike thread_is_blocked, which can!)
|
|
||||||
if (waitStatus == STATUS_WAITING && thread->state == B_THREAD_WAITING)
|
|
||||||
thread_unblock_locked(thread, result);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!all)
|
if (!all)
|
||||||
|
Loading…
Reference in New Issue
Block a user