user_mutex: Per-team contexts.

This requires the introduction of the flag B_USER_MUTEX_SHARED, and then
actually using the SHARED flags in pthread structures to determine when
it should be passed through.

This commit still uses wired memory even for per-team contexts.
That will change in the next commit.

GLTeapot FPS seems about the same.

Change-Id: I749a00dcea1531e113a65299b6d6610f57511fcc
Reviewed-on: https://review.haiku-os.org/c/haiku/+/6602
Reviewed-by: waddlesplash <waddlesplash@gmail.com>
Tested-by: Commit checker robot <no-reply+buildbot@haiku-os.org>
This commit is contained in:
Augustin Cavalier 2023-06-13 12:43:00 -04:00 committed by waddlesplash
parent 0ab9f280ec
commit 93d7d1c52c
10 changed files with 193 additions and 71 deletions

View File

@ -65,6 +65,7 @@ struct realtime_sem_context; // defined in realtime_sem.cpp
struct select_info;
struct user_thread; // defined in libroot/user_thread.h
struct VMAddressSpace;
struct user_mutex_context; // defined in user_mutex.cpp
struct xsi_sem_context; // defined in xsi_semaphore.cpp
namespace Scheduler {
@ -240,6 +241,7 @@ struct Team : TeamThreadIteratorEntry<team_id>, KernelReferenceable,
int state; // current team state, see above
int32 flags;
struct io_context *io_context;
struct user_mutex_context *user_mutex_context;
struct realtime_sem_context *realtime_sem_context;
struct xsi_sem_context *xsi_sem_context;
struct team_death_entry *death_entry; // protected by fLock

View File

@ -13,13 +13,16 @@
extern "C" {
#endif
struct user_mutex_context;
void user_mutex_init();
void delete_user_mutex_context(struct user_mutex_context* context);
status_t _user_mutex_lock(int32* mutex, const char* name, uint32 flags,
bigtime_t timeout);
status_t _user_mutex_unblock(int32* mutex, uint32 flags);
status_t _user_mutex_switch_lock(int32* fromMutex, int32* toMutex,
const char* name, uint32 flags, bigtime_t timeout);
status_t _user_mutex_switch_lock(int32* fromMutex, uint32 fromFlags,
int32* toMutex, const char* name, uint32 toFlags, bigtime_t timeout);
status_t _user_mutex_sem_acquire(int32* sem, const char* name, uint32 flags,
bigtime_t timeout);
status_t _user_mutex_sem_release(int32* sem);

View File

@ -19,6 +19,9 @@
#define THREAD_CANCEL_ENABLED 0x08
#define THREAD_CANCEL_ASYNCHRONOUS 0x10
// _pthread_mutex::flags values
#define MUTEX_FLAG_SHARED 0x80000000
struct thread_creation_attributes;

View File

@ -78,8 +78,9 @@ extern ssize_t _kern_wait_for_objects(object_wait_info* infos, int numInfos,
extern status_t _kern_mutex_lock(int32* mutex, const char* name,
uint32 flags, bigtime_t timeout);
extern status_t _kern_mutex_unblock(int32* mutex, uint32 flags);
extern status_t _kern_mutex_switch_lock(int32* fromMutex, int32* toMutex,
const char* name, uint32 flags, bigtime_t timeout);
extern status_t _kern_mutex_switch_lock(int32* fromMutex, uint32 fromFlags,
int32* toMutex, const char* name, uint32 toflags,
bigtime_t timeout);
extern status_t _kern_mutex_sem_acquire(int32* sem, const char* name,
uint32 flags, bigtime_t timeout);
extern status_t _kern_mutex_sem_release(int32* sem);

View File

@ -6,7 +6,9 @@
#define _SYSTEM_USER_MUTEX_DEFS_H
// user mutex specific flags passed to _kern_mutex_unblock()
// flags passed to _kern_mutex_{un}block
#define B_USER_MUTEX_SHARED 0x40000000
// Mutex is in shared memory.
#define B_USER_MUTEX_UNBLOCK_ALL 0x80000000
// All threads currently waiting on the mutex will be unblocked. The mutex
// state will be locked.

View File

@ -16,6 +16,7 @@
#include <smp.h>
#include <syscall_restart.h>
#include <util/AutoLock.h>
#include <util/ThreadAutoLock.h>
#include <util/OpenHashTable.h>
#include <vm/vm.h>
#include <vm/VMArea.h>
@ -29,7 +30,7 @@
* during unblock, and they can thus safely (without races) unset WAITING.
*/
struct UserMutexEntry {
phys_addr_t address;
generic_addr_t address;
UserMutexEntry* hash_next;
int32 ref_count;
@ -38,10 +39,10 @@ struct UserMutexEntry {
};
struct UserMutexHashDefinition {
typedef phys_addr_t KeyType;
typedef generic_addr_t KeyType;
typedef UserMutexEntry ValueType;
size_t HashKey(phys_addr_t key) const
size_t HashKey(generic_addr_t key) const
{
return key >> 2;
}
@ -51,7 +52,7 @@ struct UserMutexHashDefinition {
return HashKey(value->address);
}
bool Compare(phys_addr_t key, const UserMutexEntry* value) const
bool Compare(generic_addr_t key, const UserMutexEntry* value) const
{
return value->address == key;
}
@ -65,8 +66,11 @@ struct UserMutexHashDefinition {
typedef BOpenHashTable<UserMutexHashDefinition> UserMutexTable;
static UserMutexTable sUserMutexTable;
static rw_lock sUserMutexTableLock = RW_LOCK_INITIALIZER("user mutex table");
struct user_mutex_context {
UserMutexTable table;
rw_lock lock;
};
static user_mutex_context sSharedUserMutexContext;
// #pragma mark - user atomics
@ -112,17 +116,67 @@ user_atomic_test_and_set(int32* value, int32 newValue, int32 testAgainst)
}
// #pragma mark - user mutex entries
// #pragma mark - user mutex context
void
user_mutex_init()
{
sSharedUserMutexContext.lock = RW_LOCK_INITIALIZER("shared user mutex table");
if (sSharedUserMutexContext.table.Init() != B_OK)
panic("user_mutex_init(): Failed to init table!");
}
struct user_mutex_context*
get_team_user_mutex_context()
{
struct user_mutex_context* context =
thread_get_current_thread()->team->user_mutex_context;
if (context != NULL)
return context;
Team* team = thread_get_current_thread()->team;
TeamLocker teamLocker(team);
if (team->user_mutex_context != NULL)
return team->user_mutex_context;
context = new(std::nothrow) user_mutex_context;
if (context == NULL)
return NULL;
context->lock = RW_LOCK_INITIALIZER("user mutex table");
if (context->table.Init() != B_OK) {
delete context;
return NULL;
}
team->user_mutex_context = context;
return context;
}
void
delete_user_mutex_context(struct user_mutex_context* context)
{
if (context == NULL)
return;
// This should be empty at this point in team destruction.
ASSERT(context->table.IsEmpty());
delete context;
}
static UserMutexEntry*
get_user_mutex_entry(phys_addr_t address, bool noInsert = false, bool alreadyLocked = false)
get_user_mutex_entry(struct user_mutex_context* context,
generic_addr_t address, bool noInsert = false, bool alreadyLocked = false)
{
ReadLocker tableReadLocker;
if (!alreadyLocked)
tableReadLocker.SetTo(sUserMutexTableLock, false);
tableReadLocker.SetTo(context->lock, false);
UserMutexEntry* entry = sUserMutexTable.Lookup(address);
UserMutexEntry* entry = context->table.Lookup(address);
if (entry != NULL) {
atomic_add(&entry->ref_count, 1);
return entry;
@ -130,9 +184,9 @@ get_user_mutex_entry(phys_addr_t address, bool noInsert = false, bool alreadyLoc
return entry;
tableReadLocker.Unlock();
WriteLocker tableWriteLocker(sUserMutexTableLock);
WriteLocker tableWriteLocker(context->lock);
entry = sUserMutexTable.Lookup(address);
entry = context->table.Lookup(address);
if (entry != NULL) {
atomic_add(&entry->ref_count, 1);
return entry;
@ -147,32 +201,32 @@ get_user_mutex_entry(phys_addr_t address, bool noInsert = false, bool alreadyLoc
rw_lock_init(&entry->lock, "UserMutexEntry lock");
entry->condition.Init(entry, "UserMutexEntry");
sUserMutexTable.Insert(entry);
context->table.Insert(entry);
return entry;
}
static void
put_user_mutex_entry(UserMutexEntry* entry)
put_user_mutex_entry(struct user_mutex_context* context, UserMutexEntry* entry)
{
if (entry == NULL)
return;
const phys_addr_t address = entry->address;
const generic_addr_t address = entry->address;
if (atomic_add(&entry->ref_count, -1) != 1)
return;
WriteLocker tableWriteLocker(sUserMutexTableLock);
WriteLocker tableWriteLocker(context->lock);
// Was it removed & deleted while we were waiting for the lock?
if (sUserMutexTable.Lookup(address) != entry)
if (context->table.Lookup(address) != entry)
return;
// Or did someone else acquire a reference to it?
if (atomic_get(&entry->ref_count) > 0)
return;
sUserMutexTable.Remove(entry);
context->table.Remove(entry);
tableWriteLocker.Unlock();
rw_lock_destroy(&entry->lock);
@ -314,6 +368,16 @@ user_mutex_sem_release(UserMutexEntry* entry, int32* sem)
static status_t
user_mutex_lock(int32* mutex, const char* name, uint32 flags, bigtime_t timeout)
{
struct user_mutex_context* context;
if ((flags & B_USER_MUTEX_SHARED) == 0) {
context = get_team_user_mutex_context();
if (context == NULL)
return B_NO_MEMORY;
} else {
context = &sSharedUserMutexContext;
}
// wire the page and get the physical address
VMPageWiringInfo wiringInfo;
status_t error = vm_wire_page(B_CURRENT_TEAM, (addr_t)mutex, true,
@ -322,7 +386,7 @@ user_mutex_lock(int32* mutex, const char* name, uint32 flags, bigtime_t timeout)
return error;
// get the lock
UserMutexEntry* entry = get_user_mutex_entry(wiringInfo.physicalAddress);
UserMutexEntry* entry = get_user_mutex_entry(context, wiringInfo.physicalAddress);
if (entry == NULL)
return B_NO_MEMORY;
{
@ -330,7 +394,7 @@ user_mutex_lock(int32* mutex, const char* name, uint32 flags, bigtime_t timeout)
error = user_mutex_lock_locked(entry, mutex,
flags, timeout, entryLocker);
}
put_user_mutex_entry(entry);
put_user_mutex_entry(context, entry);
// unwire the page
vm_unwire_page(&wiringInfo);
@ -340,9 +404,27 @@ user_mutex_lock(int32* mutex, const char* name, uint32 flags, bigtime_t timeout)
static status_t
user_mutex_switch_lock(int32* fromMutex, int32* toMutex, const char* name,
uint32 flags, bigtime_t timeout)
user_mutex_switch_lock(int32* fromMutex, uint32 fromFlags,
int32* toMutex, const char* name, uint32 toFlags, bigtime_t timeout)
{
struct user_mutex_context* fromContext, *toContext;
if ((fromFlags & B_USER_MUTEX_SHARED) == 0) {
fromContext = get_team_user_mutex_context();
if (fromContext == NULL)
return B_NO_MEMORY;
} else {
fromContext = &sSharedUserMutexContext;
}
if ((toFlags & B_USER_MUTEX_SHARED) == 0) {
toContext = get_team_user_mutex_context();
if (toContext == NULL)
return B_NO_MEMORY;
} else {
toContext = &sSharedUserMutexContext;
}
// wire the pages and get the physical addresses
VMPageWiringInfo fromWiringInfo;
status_t error = vm_wire_page(B_CURRENT_TEAM, (addr_t)fromMutex, true,
@ -359,7 +441,7 @@ user_mutex_switch_lock(int32* fromMutex, int32* toMutex, const char* name,
// unlock the first mutex and lock the second one
UserMutexEntry* fromEntry = NULL,
*toEntry = get_user_mutex_entry(toWiringInfo.physicalAddress);
*toEntry = get_user_mutex_entry(toContext, toWiringInfo.physicalAddress);
if (toEntry == NULL)
return B_NO_MEMORY;
{
@ -375,15 +457,15 @@ user_mutex_switch_lock(int32* fromMutex, int32* toMutex, const char* name,
const int32 oldValue = user_atomic_and(fromMutex, ~(int32)B_USER_MUTEX_LOCKED);
if ((oldValue & B_USER_MUTEX_WAITING) != 0)
fromEntry = get_user_mutex_entry(fromWiringInfo.physicalAddress, true);
fromEntry = get_user_mutex_entry(fromContext, fromWiringInfo.physicalAddress, true);
if (fromEntry != NULL)
user_mutex_unblock(fromEntry, fromMutex, flags);
user_mutex_unblock(fromEntry, fromMutex, fromFlags);
if (!alreadyLocked)
error = waiter.Wait(flags, timeout);
error = waiter.Wait(toFlags, timeout);
}
put_user_mutex_entry(fromEntry);
put_user_mutex_entry(toEntry);
put_user_mutex_entry(fromContext, fromEntry);
put_user_mutex_entry(toContext, toEntry);
// unwire the pages
vm_unwire_page(&toWiringInfo);
@ -393,17 +475,6 @@ user_mutex_switch_lock(int32* fromMutex, int32* toMutex, const char* name,
}
// #pragma mark - kernel private
void
user_mutex_init()
{
if (sUserMutexTable.Init() != B_OK)
panic("user_mutex_init(): Failed to init table!");
}
// #pragma mark - syscalls
@ -429,6 +500,16 @@ _user_mutex_unblock(int32* mutex, uint32 flags)
if (mutex == NULL || !IS_USER_ADDRESS(mutex) || (addr_t)mutex % 4 != 0)
return B_BAD_ADDRESS;
struct user_mutex_context* context;
if ((flags & B_USER_MUTEX_SHARED) == 0) {
context = get_team_user_mutex_context();
if (context == NULL)
return B_NO_MEMORY;
} else {
context = &sSharedUserMutexContext;
}
// wire the page and get the physical address
VMPageWiringInfo wiringInfo;
status_t error = vm_wire_page(B_CURRENT_TEAM, (addr_t)mutex, true,
@ -438,8 +519,8 @@ _user_mutex_unblock(int32* mutex, uint32 flags)
// In the case where there is no entry, we must hold the read lock until we
// unset WAITING, because otherwise some other thread could initiate a wait.
ReadLocker tableReadLocker(sUserMutexTableLock);
UserMutexEntry* entry = get_user_mutex_entry(wiringInfo.physicalAddress, true, true);
ReadLocker tableReadLocker(context->lock);
UserMutexEntry* entry = get_user_mutex_entry(context, wiringInfo.physicalAddress, true, true);
if (entry == NULL) {
user_atomic_and(mutex, ~(int32)B_USER_MUTEX_WAITING);
tableReadLocker.Unlock();
@ -447,7 +528,7 @@ _user_mutex_unblock(int32* mutex, uint32 flags)
tableReadLocker.Unlock();
user_mutex_unblock(entry, mutex, flags);
}
put_user_mutex_entry(entry);
put_user_mutex_entry(context, entry);
vm_unwire_page(&wiringInfo);
@ -456,8 +537,8 @@ _user_mutex_unblock(int32* mutex, uint32 flags)
status_t
_user_mutex_switch_lock(int32* fromMutex, int32* toMutex, const char* name,
uint32 flags, bigtime_t timeout)
_user_mutex_switch_lock(int32* fromMutex, uint32 fromFlags,
int32* toMutex, const char* name, uint32 toFlags, bigtime_t timeout)
{
if (fromMutex == NULL || !IS_USER_ADDRESS(fromMutex)
|| (addr_t)fromMutex % 4 != 0 || toMutex == NULL
@ -465,8 +546,8 @@ _user_mutex_switch_lock(int32* fromMutex, int32* toMutex, const char* name,
return B_BAD_ADDRESS;
}
return user_mutex_switch_lock(fromMutex, toMutex, name,
flags | B_CAN_INTERRUPT, timeout);
return user_mutex_switch_lock(fromMutex, fromFlags, toMutex, name,
toFlags | B_CAN_INTERRUPT, timeout);
}
@ -479,6 +560,11 @@ _user_mutex_sem_acquire(int32* sem, const char* name, uint32 flags,
syscall_restart_handle_timeout_pre(flags, timeout);
struct user_mutex_context* context;
// TODO: use the per-team context when possible
context = &sSharedUserMutexContext;
// wire the page and get the physical address
VMPageWiringInfo wiringInfo;
status_t error = vm_wire_page(B_CURRENT_TEAM, (addr_t)sem, true,
@ -486,7 +572,7 @@ _user_mutex_sem_acquire(int32* sem, const char* name, uint32 flags,
if (error != B_OK)
return error;
UserMutexEntry* entry = get_user_mutex_entry(wiringInfo.physicalAddress);
UserMutexEntry* entry = get_user_mutex_entry(context, wiringInfo.physicalAddress);
if (entry == NULL)
return B_NO_MEMORY;
{
@ -494,7 +580,7 @@ _user_mutex_sem_acquire(int32* sem, const char* name, uint32 flags,
error = user_mutex_sem_acquire_locked(entry, sem,
flags | B_CAN_INTERRUPT, timeout, entryLocker);
}
put_user_mutex_entry(entry);
put_user_mutex_entry(context, entry);
vm_unwire_page(&wiringInfo);
return syscall_restart_handle_timeout_post(error, timeout);
@ -507,6 +593,11 @@ _user_mutex_sem_release(int32* sem)
if (sem == NULL || !IS_USER_ADDRESS(sem) || (addr_t)sem % 4 != 0)
return B_BAD_ADDRESS;
struct user_mutex_context* context;
// TODO: use the per-team context when possible
context = &sSharedUserMutexContext;
// wire the page and get the physical address
VMPageWiringInfo wiringInfo;
status_t error = vm_wire_page(B_CURRENT_TEAM, (addr_t)sem, true,
@ -514,11 +605,12 @@ _user_mutex_sem_release(int32* sem)
if (error != B_OK)
return error;
UserMutexEntry* entry = get_user_mutex_entry(wiringInfo.physicalAddress, true);
UserMutexEntry* entry = get_user_mutex_entry(context,
wiringInfo.physicalAddress, true);
{
user_mutex_sem_release(entry, sem);
}
put_user_mutex_entry(entry);
put_user_mutex_entry(context, entry);
vm_unwire_page(&wiringInfo);
return B_OK;

View File

@ -51,6 +51,7 @@
#include <syscalls.h>
#include <tls.h>
#include <tracing.h>
#include <user_mutex.h>
#include <user_runtime.h>
#include <user_thread.h>
#include <usergroup.h>
@ -440,6 +441,7 @@ Team::Team(team_id id, bool kernel)
state = TEAM_STATE_BIRTH;
flags = 0;
io_context = NULL;
user_mutex_context = NULL;
realtime_sem_context = NULL;
xsi_sem_context = NULL;
death_entry = NULL;
@ -2026,6 +2028,8 @@ exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
sem_delete_owned_sems(team);
remove_images(team);
vfs_exec_io_context(team->io_context);
delete_user_mutex_context(team->user_mutex_context);
team->user_mutex_context = NULL;
delete_realtime_sem_context(team->realtime_sem_context);
team->realtime_sem_context = NULL;
@ -3343,6 +3347,7 @@ team_delete_team(Team* team, port_id debuggerPort)
// free team resources
delete_user_mutex_context(team->user_mutex_context);
delete_realtime_sem_context(team->realtime_sem_context);
xsi_sem_undo(team);
remove_images(team);

View File

@ -46,13 +46,13 @@ pthread_barrier_init(pthread_barrier_t* barrier,
static status_t
barrier_lock(__haiku_std_int32* mutex)
barrier_lock(__haiku_std_int32* mutex, uint32 flags)
{
const int32 oldValue = atomic_test_and_set((int32*)mutex, B_USER_MUTEX_LOCKED, 0);
if (oldValue != 0) {
status_t error;
do {
error = _kern_mutex_lock((int32*)mutex, NULL, 0, 0);
error = _kern_mutex_lock((int32*)mutex, NULL, flags, 0);
} while (error == B_INTERRUPTED);
if (error != B_OK)
@ -63,26 +63,28 @@ barrier_lock(__haiku_std_int32* mutex)
static void
barrier_unlock(__haiku_std_int32* mutex)
barrier_unlock(__haiku_std_int32* mutex, uint32 flags)
{
int32 oldValue = atomic_and((int32*)mutex,
~(int32)B_USER_MUTEX_LOCKED);
if ((oldValue & B_USER_MUTEX_WAITING) != 0)
_kern_mutex_unblock((int32*)mutex, 0);
_kern_mutex_unblock((int32*)mutex, flags);
}
static void
barrier_ensure_idle(pthread_barrier_t* barrier)
{
const uint32 flags = (barrier->flags & BARRIER_FLAG_SHARED) ? B_USER_MUTEX_SHARED : 0;
// waiter_count < 0 means other threads are still exiting.
// Loop (usually only one iteration needed) until this is no longer the case.
while (atomic_get((int32*)&barrier->waiter_count) < 0) {
status_t status = barrier_lock(&barrier->mutex);
status_t status = barrier_lock(&barrier->mutex, flags);
if (status != B_OK)
return;
barrier_unlock(&barrier->mutex);
barrier_unlock(&barrier->mutex, flags);
}
}
@ -96,15 +98,16 @@ pthread_barrier_wait(pthread_barrier_t* barrier)
if (barrier->waiter_max == 1)
return PTHREAD_BARRIER_SERIAL_THREAD;
const uint32 mutexFlags = (barrier->flags & BARRIER_FLAG_SHARED) ? B_USER_MUTEX_SHARED : 0;
barrier_ensure_idle(barrier);
if (atomic_add((int32*)&barrier->waiter_count, 1) == (barrier->waiter_max - 1)) {
// We are the last one in. Lock the barrier mutex.
barrier_lock(&barrier->mutex);
barrier_lock(&barrier->mutex, mutexFlags);
// Wake everyone else up.
barrier->waiter_count = (-barrier->waiter_max) + 1;
_kern_mutex_unblock((int32*)&barrier->lock, B_USER_MUTEX_UNBLOCK_ALL);
_kern_mutex_unblock((int32*)&barrier->lock, mutexFlags | B_USER_MUTEX_UNBLOCK_ALL);
// Return with the barrier mutex still locked, as waiter_count < 0.
// The last thread out will take care of unlocking it and resetting state.
@ -113,16 +116,16 @@ pthread_barrier_wait(pthread_barrier_t* barrier)
// We aren't the last one in. Wait until we are woken up.
do {
_kern_mutex_lock((int32*)&barrier->lock, "barrier wait", 0, 0);
_kern_mutex_lock((int32*)&barrier->lock, "barrier wait", mutexFlags, 0);
} while (barrier->waiter_count > 0);
// Release the barrier, so that any later threads trying to acquire it wake up.
barrier_unlock(&barrier->lock);
barrier_unlock(&barrier->lock, mutexFlags);
if (atomic_add((int32*)&barrier->waiter_count, 1) == -1) {
// We are the last one out. Reset state and unlock.
barrier->lock = B_USER_MUTEX_LOCKED;
barrier_unlock(&barrier->mutex);
barrier_unlock(&barrier->mutex, mutexFlags);
}
return 0;

View File

@ -80,7 +80,10 @@ cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex, uint32 flags,
mutex->owner = -1;
mutex->owner_count = 0;
if ((cond->flags & COND_FLAG_SHARED) != 0)
flags |= B_USER_MUTEX_SHARED;
status_t status = _kern_mutex_switch_lock((int32*)&mutex->lock,
((mutex->flags & MUTEX_FLAG_SHARED) ? B_USER_MUTEX_SHARED : 0),
(int32*)&cond->lock, "pthread condition", flags, timeout);
if (status == B_INTERRUPTED) {
@ -107,10 +110,15 @@ cond_signal(pthread_cond_t* cond, bool broadcast)
if (cond->waiter_count == 0)
return;
uint32 flags = 0;
if (broadcast)
flags |= B_USER_MUTEX_UNBLOCK_ALL;
if ((cond->flags & COND_FLAG_SHARED) != 0)
flags |= B_USER_MUTEX_SHARED;
// release the condition lock
atomic_and((int32*)&cond->lock, ~(int32)B_USER_MUTEX_LOCKED);
_kern_mutex_unblock((int32*)&cond->lock,
broadcast ? B_USER_MUTEX_UNBLOCK_ALL : 0);
_kern_mutex_unblock((int32*)&cond->lock, flags);
}

View File

@ -17,7 +17,6 @@
#include <user_mutex_defs.h>
#define MUTEX_FLAG_SHARED 0x80000000
#define MUTEX_TYPE_BITS 0x0000000f
#define MUTEX_TYPE(mutex) ((mutex)->flags & MUTEX_TYPE_BITS)
@ -79,6 +78,8 @@ __pthread_mutex_lock(pthread_mutex_t* mutex, uint32 flags, bigtime_t timeout)
// someone else has the lock or is at least waiting for it
if (timeout < 0)
return EBUSY;
if ((mutex->flags & MUTEX_FLAG_SHARED) != 0)
flags |= B_USER_MUTEX_SHARED;
// we have to call the kernel
status_t error;
@ -174,8 +175,10 @@ pthread_mutex_unlock(pthread_mutex_t* mutex)
// clear the locked flag
int32 oldValue = atomic_and((int32*)&mutex->lock,
~(int32)B_USER_MUTEX_LOCKED);
if ((oldValue & B_USER_MUTEX_WAITING) != 0)
_kern_mutex_unblock((int32*)&mutex->lock, 0);
if ((oldValue & B_USER_MUTEX_WAITING) != 0) {
_kern_mutex_unblock((int32*)&mutex->lock,
(mutex->flags & MUTEX_FLAG_SHARED) ? B_USER_MUTEX_SHARED : 0);
}
if (MUTEX_TYPE(mutex) == PTHREAD_MUTEX_ERRORCHECK
|| MUTEX_TYPE(mutex) == PTHREAD_MUTEX_DEFAULT) {