2007-08-10 00:03:17 +04:00
|
|
|
/*
|
2011-01-11 00:54:38 +03:00
|
|
|
* Copyright 2007-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
|
kernel/condition_variable: Granularize locking.
Before this commit, *all* ConditionVariable operations (yes, all;
even Wait, Notify, etc.) went through a single spinlock, that also
protected the sConditionVariableHash. This obviously does not scale
so well with core count, to say the least!
With this commit, we add spinlocks to each Variable and Entry.
This makes locking somewhat more complicated (and nuanced; see
inline comment), but the trade-off seems completely worth it:
(compile HaikuDepot in VMware, 2 cores)
before
real 1m20.219s
user 1m5.619s
sys 0m40.724s
after
real 1m12.667s
user 0m57.684s
sys 0m37.251s
The more cores there are, the more of an optimization this will
likely prove to be. But 10%-across-the-board is not bad to say
the least.
Change-Id: I1e40a997fff58a79e987d7cdcafa8f7358e1115a
2019-08-03 18:22:49 +03:00
|
|
|
* Copyright 2019, Haiku, Inc. All rights reserved.
|
2007-08-10 00:03:17 +04:00
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*/
|
|
|
|
#ifndef _KERNEL_CONDITION_VARIABLE_H
|
|
|
|
#define _KERNEL_CONDITION_VARIABLE_H
|
|
|
|
|
|
|
|
|
|
|
|
#include <OS.h>
|
|
|
|
|
2007-09-03 02:21:26 +04:00
|
|
|
#include <debug.h>
|
|
|
|
|
2007-08-10 00:03:17 +04:00
|
|
|
#ifdef __cplusplus
|
|
|
|
|
2008-04-20 19:19:48 +04:00
|
|
|
#include <util/DoublyLinkedList.h>
|
2007-08-10 00:03:17 +04:00
|
|
|
#include <util/OpenHashTable.h>
|
|
|
|
|
|
|
|
|
2009-06-23 03:49:05 +04:00
|
|
|
struct ConditionVariable;
|
2007-08-10 00:03:17 +04:00
|
|
|
|
2007-08-27 03:53:12 +04:00
|
|
|
|
2008-04-22 22:32:15 +04:00
|
|
|
struct ConditionVariableEntry
|
|
|
|
: DoublyLinkedListLinkImpl<ConditionVariableEntry> {
|
2007-08-27 03:53:12 +04:00
|
|
|
public:
|
2020-03-02 05:39:19 +03:00
|
|
|
ConditionVariableEntry();
|
|
|
|
~ConditionVariableEntry();
|
2007-09-03 02:21:26 +04:00
|
|
|
|
2008-05-17 14:21:37 +04:00
|
|
|
bool Add(const void* object);
|
|
|
|
status_t Wait(uint32 flags = 0, bigtime_t timeout = 0);
|
2008-04-23 01:46:23 +04:00
|
|
|
status_t Wait(const void* object, uint32 flags = 0,
|
|
|
|
bigtime_t timeout = 0);
|
2007-08-27 03:53:12 +04:00
|
|
|
|
2009-10-22 03:44:59 +04:00
|
|
|
inline status_t WaitStatus() const { return fWaitStatus; }
|
|
|
|
|
|
|
|
inline ConditionVariable* Variable() const { return fVariable; }
|
2008-04-22 22:32:15 +04:00
|
|
|
|
2008-04-23 01:46:23 +04:00
|
|
|
private:
|
2020-03-02 05:39:19 +03:00
|
|
|
inline void _AddToLockedVariable(ConditionVariable* variable);
|
|
|
|
void _RemoveFromVariable();
|
2008-04-23 01:46:23 +04:00
|
|
|
|
2008-04-22 22:32:15 +04:00
|
|
|
private:
|
kernel/condition_variable: Granularize locking.
Before this commit, *all* ConditionVariable operations (yes, all;
even Wait, Notify, etc.) went through a single spinlock, that also
protected the sConditionVariableHash. This obviously does not scale
so well with core count, to say the least!
With this commit, we add spinlocks to each Variable and Entry.
This makes locking somewhat more complicated (and nuanced; see
inline comment), but the trade-off seems completely worth it:
(compile HaikuDepot in VMware, 2 cores)
before
real 1m20.219s
user 1m5.619s
sys 0m40.724s
after
real 1m12.667s
user 0m57.684s
sys 0m37.251s
The more cores there are, the more of an optimization this will
likely prove to be. But 10%-across-the-board is not bad to say
the least.
Change-Id: I1e40a997fff58a79e987d7cdcafa8f7358e1115a
2019-08-03 18:22:49 +03:00
|
|
|
spinlock fLock;
|
2008-04-22 22:32:15 +04:00
|
|
|
ConditionVariable* fVariable;
|
2011-01-11 00:54:38 +03:00
|
|
|
Thread* fThread;
|
2008-05-17 14:21:37 +04:00
|
|
|
status_t fWaitStatus;
|
2007-08-27 03:53:12 +04:00
|
|
|
|
2009-06-23 03:49:05 +04:00
|
|
|
friend struct ConditionVariable;
|
2007-08-10 00:03:17 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2009-07-27 04:39:12 +04:00
|
|
|
struct ConditionVariable {
|
2007-08-10 00:03:17 +04:00
|
|
|
public:
|
2008-04-23 01:46:23 +04:00
|
|
|
void Init(const void* object,
|
|
|
|
const char* objectType);
|
|
|
|
// for anonymous (unpublished) cvars
|
|
|
|
|
2007-08-10 00:03:17 +04:00
|
|
|
void Publish(const void* object,
|
|
|
|
const char* objectType);
|
2013-11-08 05:41:26 +04:00
|
|
|
void Unpublish();
|
|
|
|
|
|
|
|
inline void NotifyOne(status_t result = B_OK);
|
|
|
|
inline void NotifyAll(status_t result = B_OK);
|
|
|
|
|
|
|
|
static void NotifyOne(const void* object, status_t result);
|
|
|
|
static void NotifyAll(const void* object, status_t result);
|
2009-12-03 15:47:29 +03:00
|
|
|
// (both methods) caller must ensure that
|
|
|
|
// the variable is not unpublished
|
|
|
|
// concurrently
|
2009-12-03 15:24:17 +03:00
|
|
|
|
2008-07-17 02:43:50 +04:00
|
|
|
void Add(ConditionVariableEntry* entry);
|
2008-04-23 01:46:23 +04:00
|
|
|
|
2008-07-23 00:36:32 +04:00
|
|
|
status_t Wait(uint32 flags = 0, bigtime_t timeout = 0);
|
|
|
|
// all-in one, i.e. doesn't need a
|
|
|
|
// ConditionVariableEntry
|
|
|
|
|
2008-09-03 18:53:01 +04:00
|
|
|
const void* Object() const { return fObject; }
|
|
|
|
const char* ObjectType() const { return fObjectType; }
|
2008-04-22 22:32:15 +04:00
|
|
|
|
|
|
|
static void ListAll();
|
|
|
|
void Dump() const;
|
2007-08-10 00:03:17 +04:00
|
|
|
|
|
|
|
private:
|
2013-11-08 05:41:26 +04:00
|
|
|
void _Notify(bool all, status_t result);
|
2009-10-22 03:44:59 +04:00
|
|
|
void _NotifyLocked(bool all, status_t result);
|
2007-08-10 00:03:17 +04:00
|
|
|
|
|
|
|
protected:
|
2008-04-22 22:32:15 +04:00
|
|
|
typedef DoublyLinkedList<ConditionVariableEntry> EntryList;
|
2008-04-20 19:19:48 +04:00
|
|
|
|
2007-08-10 00:03:17 +04:00
|
|
|
const void* fObject;
|
|
|
|
const char* fObjectType;
|
kernel/condition_variable: Granularize locking.
Before this commit, *all* ConditionVariable operations (yes, all;
even Wait, Notify, etc.) went through a single spinlock, that also
protected the sConditionVariableHash. This obviously does not scale
so well with core count, to say the least!
With this commit, we add spinlocks to each Variable and Entry.
This makes locking somewhat more complicated (and nuanced; see
inline comment), but the trade-off seems completely worth it:
(compile HaikuDepot in VMware, 2 cores)
before
real 1m20.219s
user 1m5.619s
sys 0m40.724s
after
real 1m12.667s
user 0m57.684s
sys 0m37.251s
The more cores there are, the more of an optimization this will
likely prove to be. But 10%-across-the-board is not bad to say
the least.
Change-Id: I1e40a997fff58a79e987d7cdcafa8f7358e1115a
2019-08-03 18:22:49 +03:00
|
|
|
|
|
|
|
spinlock fLock;
|
2008-04-20 19:19:48 +04:00
|
|
|
EntryList fEntries;
|
2009-07-27 04:39:12 +04:00
|
|
|
ConditionVariable* fNext;
|
2007-08-10 00:03:17 +04:00
|
|
|
|
2009-06-23 03:49:05 +04:00
|
|
|
friend struct ConditionVariableEntry;
|
|
|
|
friend struct ConditionVariableHashDefinition;
|
2007-08-10 00:03:17 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2007-08-27 03:53:12 +04:00
|
|
|
inline void
|
2013-11-08 05:41:26 +04:00
|
|
|
ConditionVariable::NotifyOne(status_t result)
|
2007-08-10 00:03:17 +04:00
|
|
|
{
|
2013-11-08 05:41:26 +04:00
|
|
|
_Notify(false, result);
|
2007-08-10 00:03:17 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-04-22 22:32:15 +04:00
|
|
|
inline void
|
2013-11-08 05:41:26 +04:00
|
|
|
ConditionVariable::NotifyAll(status_t result)
|
2007-08-27 03:53:12 +04:00
|
|
|
{
|
2013-11-08 05:41:26 +04:00
|
|
|
_Notify(true, result);
|
2007-08-27 03:53:12 +04:00
|
|
|
}
|
2007-08-10 00:03:17 +04:00
|
|
|
|
|
|
|
|
|
|
|
extern "C" {
|
|
|
|
#endif // __cplusplus
|
|
|
|
|
|
|
|
extern void condition_variable_init();
|
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
} // extern "C"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* _KERNEL_CONDITION_VARIABLE_H */
|