* Got rid of the duplicated functionalities provided by RWLocker.cpp, and

Locker.cpp.
* The services are now using recursive_locks, and rw_locks instead.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@33548 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2009-10-12 14:29:05 +00:00
parent 4baa865937
commit b74a098352
12 changed files with 53 additions and 1125 deletions

View File

@ -5,10 +5,12 @@
#ifndef _K_DISK_DEVICE_H
#define _K_DISK_DEVICE_H
#include <OS.h>
#include <lock.h>
#include "KPartition.h"
#include "RWLocker.h"
namespace BPrivate {
@ -36,10 +38,8 @@ public:
// manager lock owners can be sure, that it won't change.
bool ReadLock();
void ReadUnlock();
bool IsReadLocked(bool orWriteLocked = true);
bool WriteLock();
void WriteUnlock();
bool IsWriteLocked();
virtual void SetID(partition_id id);
@ -92,7 +92,7 @@ private:
void _UpdateDeviceFlags();
disk_device_data fDeviceData;
RWLocker fLocker;
rw_lock fLocker;
int fFD;
status_t fMediaStatus;
};

View File

@ -133,7 +133,7 @@ private:
void _NotifyDeviceEvent(KDiskDevice* device, int32 event, uint32 mask);
BLocker fLock;
recursive_lock fLock;
DeviceMap *fDevices;
PartitionMap *fPartitions;
DiskSystemMap *fDiskSystems;

View File

@ -1,131 +0,0 @@
// RWLocker.h
//
// This class provides a reader/writer locking mechanism:
// * A writer needs an exclusive lock.
// * For a reader a non-exclusive lock to be shared with other readers is
// sufficient.
// * The ownership of a lock is bound to the thread that requested the lock;
// the same thread has to call Unlock() later.
// * Nested locking is supported: a number of XXXLock() calls needs to be
// bracketed by the same number of XXXUnlock() calls.
// * The lock acquiration strategy is fair: a lock applicant needs to wait
// only for those threads that already own a lock or requested one before
// the current thread. No one can overtake. E.g. if a thread owns a read
// lock, another one is waiting for a write lock, then a third one
// requesting a read lock has to wait until the write locker is done.
// This does not hold for threads that already own a lock (nested locking).
// A read lock owner is immediately granted another read lock and a write
// lock owner another write or a read lock.
// * A write lock owner is allowed to request a read lock and a read lock
// owner a write lock. While the first case is not problematic, the
// second one needs some further explanation: A read lock owner requesting
// a write lock temporarily looses its read lock(s) until the write lock
// is granted. Otherwise two read lock owning threads trying to get
// write locks at the same time would dead lock each other. The only
// problem with this solution is, that the write lock acquiration must
// not fail, because in that case the thread could not be given back
// its read lock(s), since another thread may have been given a write lock
// in the mean time. Fortunately locking can fail only either, if the
// locker has been deleted, or, if a timeout occured. Therefore
// WriteLockWithTimeout() immediatlely returns with a B_WOULD_BLOCK error
// code, if the caller already owns a read lock (but no write lock) and
// another thread already owns or has requested a read or write lock.
// * Calls to read and write locking methods may interleave arbitrarily,
// e.g.: ReadLock(); WriteLock(); ReadUnlock(); WriteUnlock();
//
// Important note: Read/WriteLock() can fail only, if the locker has been
// deleted. However, it is NOT save to invoke any method on a deleted
// locker object.
//
// Implementation details:
// A locker needs three semaphores (a BLocker and two semaphores): one
// to protect the lockers data, one as a reader/writer mutex (to be
// acquired by each writer and the first reader) and one for queueing
// waiting readers and writers. The simplified locking/unlocking
// algorithm is the following:
//
// writer reader
// queue.acquire() queue.acquire()
// mutex.acquire() if (first reader) mutex.acquire()
// queue.release() queue.release()
// ... ...
// mutex.release() if (last reader) mutex.release()
//
// One thread at maximum waits at the mutex, the others at the queueing
// semaphore. Unfortunately features as nested locking and timeouts make
// things more difficult. Therefore readers as well as writers need to check
// whether they already own a lock before acquiring the queueing semaphore.
// The data for the readers are stored in a list of ReadLockInfo structures;
// the writer data are stored in some special fields. /fReaderCount/ and
// /fWriterCount/ contain the total count of unbalanced Read/WriteLock()
// calls, /fWriterReaderCount/ and /fWriterWriterCount/ only from those of
// the current write lock owner (/fWriter/). To be a bit more precise:
// /fWriterReaderCount/ is not contained in /fReaderCount/, but
// /fWriterWriterCount/ is contained in /fWriterCount/. Therefore
// /fReaderCount/ can be considered to be the count of true reader's read
// locks.
#ifndef RW_LOCKER_H
#define RW_LOCKER_H
#include <OS.h>
#include <Vector.h>
class RWLocker {
public:
RWLocker();
RWLocker(const char* name);
virtual ~RWLocker();
status_t InitCheck() const;
bool ReadLock();
status_t ReadLockWithTimeout(bigtime_t timeout);
void ReadUnlock();
bool IsReadLocked(bool orWriteLock = true) const;
bool WriteLock();
status_t WriteLockWithTimeout(bigtime_t timeout);
void WriteUnlock();
bool IsWriteLocked() const;
private:
struct ReadLockInfo;
struct Benaphore {
sem_id semaphore;
int32 counter;
};
private:
status_t _Init(const char* name);
status_t _ReadLock(bigtime_t timeout);
status_t _WriteLock(bigtime_t timeout);
int32 _AddReadLockInfo(ReadLockInfo* info);
int32 _NewReadLockInfo(thread_id thread,
int32 count = 1);
void _DeleteReadLockInfo(int32 index);
ReadLockInfo* _ReadLockInfoAt(int32 index) const;
int32 _IndexOf(thread_id thread) const;
static status_t _AcquireBenaphore(Benaphore& benaphore,
bigtime_t timeout = B_INFINITE_TIMEOUT);
static void _ReleaseBenaphore(Benaphore& benaphore);
private:
mutable Benaphore fLock; // data lock
Benaphore fMutex; // critical code mutex
Benaphore fQueue; // queueing semaphore
int32 fReaderCount; // total count...
int32 fWriterCount; // total count...
Vector<ReadLockInfo*> fReadLockInfos;
thread_id fWriter; // current write lock
// owner
int32 fWriterWriterCount; // write lock owner
// count
int32 fWriterReaderCount; // writer read lock
// owner count
};
#endif // RW_LOCKER_H

View File

@ -27,10 +27,6 @@ KernelMergeObject kernel_disk_device_manager.o :
KPartitionVisitor.cpp
UserDataWriter.cpp
# utilities
Locker.cpp
RWLocker.cpp
# disk device types
DiskDeviceTypes.cpp

View File

@ -23,26 +23,27 @@
#define DBG(x) x
#define OUT dprintf
// constructor
KDiskDevice::KDiskDevice(partition_id id)
: KPartition(id),
fDeviceData(),
fLocker("diskdevice"),
fFD(-1),
fMediaStatus(B_ERROR)
:
KPartition(id),
fDeviceData(),
fLocker(RW_LOCK_INITIALIZER("disk device")),
fFD(-1),
fMediaStatus(B_ERROR)
{
Unset();
fDevice = this;
fPublishedName = (char*)"raw";
}
// destructor
KDiskDevice::~KDiskDevice()
{
Unset();
}
// SetTo
status_t
KDiskDevice::SetTo(const char *path)
{
@ -84,7 +85,7 @@ KDiskDevice::SetTo(const char *path)
return B_OK;
}
// Unset
void
KDiskDevice::Unset()
{
@ -102,57 +103,42 @@ KDiskDevice::Unset()
_ResetGeometry();
}
// InitCheck
status_t
KDiskDevice::InitCheck() const
{
return fLocker.InitCheck();
return B_OK;
}
// ReadLock
bool
KDiskDevice::ReadLock()
{
return fLocker.ReadLock();
return rw_lock_read_lock(&fLocker) == B_OK;
}
// ReadUnlock
void
KDiskDevice::ReadUnlock()
{
fLocker.ReadUnlock();
rw_lock_read_unlock(&fLocker);
}
// IsReadLocked
bool
KDiskDevice::IsReadLocked(bool orWriteLocked)
{
return fLocker.IsReadLocked(orWriteLocked);
}
// WriteLock
bool
KDiskDevice::WriteLock()
{
return fLocker.WriteLock();
return rw_lock_write_lock(&fLocker) == B_OK;
}
// WriteUnlock
void
KDiskDevice::WriteUnlock()
{
fLocker.WriteUnlock();
}
// IsWriteLocked
bool
KDiskDevice::IsWriteLocked()
{
return fLocker.IsWriteLocked();
rw_lock_write_unlock(&fLocker);
}
// SetID
void
KDiskDevice::SetID(partition_id id)
{
@ -160,7 +146,7 @@ KDiskDevice::SetID(partition_id id)
fDeviceData.id = id;
}
// PublishDevice
status_t
KDiskDevice::PublishDevice()
{

View File

@ -239,7 +239,7 @@ public:
KDiskDeviceManager::KDiskDeviceManager()
:
fLock("disk device manager"),
fLock(RECURSIVE_LOCK_INITIALIZER("disk device manager")),
fDevices(new(nothrow) DeviceMap),
fPartitions(new(nothrow) PartitionMap),
fDiskSystems(new(nothrow) DiskSystemMap),
@ -324,7 +324,7 @@ KDiskDeviceManager::InitCheck() const
|| fObsoletePartitions == NULL || fNotifications == NULL)
return B_NO_MEMORY;
return fLock.Sem() >= 0 ? B_OK : fLock.Sem();
return B_OK;
}
@ -366,14 +366,14 @@ KDiskDeviceManager::Default()
bool
KDiskDeviceManager::Lock()
{
return fLock.Lock();
return recursive_lock_lock(&fLock) == B_OK;
}
void
KDiskDeviceManager::Unlock()
{
fLock.Unlock();
recursive_lock_unlock(&fLock);
}

View File

@ -1,305 +0,0 @@
//
// $Id: Locker.cpp,v 1.2 2003/10/25 13:24:59 wkornew Exp $
//
// This file contains the OpenBeOS implementation of BLocker.
//
#include "Locker.h"
#include <OS.h>
#include <SupportDefs.h>
#ifdef _KERNEL_MODE
#include <kernel_cpp.h>
#endif
#ifdef USE_OPENBEOS_NAMESPACE
namespace OpenBeOS {
#endif
//
// Data Member Documentation:
//
// The "fBenaphoreCount" member is set to 1 if the BLocker style is
// semaphore. If the style is benaphore, it is initialized to 0 and
// is incremented atomically when it is acquired, decremented when it
// is released. By setting the benaphore count to 1 when the style is
// semaphore, the benaphore effectively becomes a semaphore. I was able
// to determine this is what Be's implementation does by testing the
// result of the CountLockRequests() member.
//
// The "fSemaphoreID" member holds the sem_id returned from create_sem()
// when the BLocker is constructed. It is used to acquire and release
// the lock regardless of the lock style (semaphore or benaphore).
//
// The "fLockOwner" member holds the thread_id of the thread which
// currently holds the lock. If no thread holds the lock, it is set to
// B_ERROR.
//
// The "fRecursiveCount" member holds a count of the number of times the
// thread holding the lock has acquired the lock without a matching unlock.
// It is basically the number of times the thread must call Unlock() before
// the lock can be acquired by a different thread.
//
//
// Constructors:
//
// All constructors just pass their arguments to InitLocker(). Note that
// the default for "name" is "some BLocker" and "benaphore_style" is true.
//
BLocker::BLocker()
{
InitLocker("some BLocker", true);
}
BLocker::BLocker(const char *name)
{
InitLocker(name, true);
}
BLocker::BLocker(bool benaphore_style)
{
InitLocker("some BLocker", benaphore_style);
}
BLocker::BLocker(const char *name,
bool benaphore_style)
{
InitLocker(name, benaphore_style);
}
//
// This constructor is not documented. The final argument is ignored for
// now. In Be's headers, its called "for_IPC". DO NOT USE THIS
// CONSTRUCTOR!
//
BLocker::BLocker(const char *name,
bool benaphore_style,
bool)
{
InitLocker(name, benaphore_style);
}
//
// The destructor just deletes the semaphore. By deleting the semaphore,
// any threads waiting to acquire the BLocker will be unblocked.
//
BLocker::~BLocker()
{
delete_sem(fSemaphoreID);
}
bool
BLocker::Lock(void)
{
status_t result;
return (AcquireLock(B_INFINITE_TIMEOUT, &result));
}
status_t
BLocker::LockWithTimeout(bigtime_t timeout)
{
status_t result;
AcquireLock(timeout, &result);
return result;
}
void
BLocker::Unlock(void)
{
// If the thread currently holds the lockdecrement
if (IsLocked()) {
// Decrement the number of outstanding locks this thread holds
// on this BLocker.
fRecursiveCount--;
// If the recursive count is now at 0, that means the BLocker has
// been released by the thread.
if (fRecursiveCount == 0) {
// The BLocker is no longer owned by any thread.
fLockOwner = B_ERROR;
// Decrement the benaphore count and store the undecremented
// value in oldBenaphoreCount.
int32 oldBenaphoreCount = atomic_add(&fBenaphoreCount, -1);
// If the oldBenaphoreCount is greater than 1, then there is
// at lease one thread waiting for the lock in the case of a
// benaphore.
if (oldBenaphoreCount > 1) {
// Since there are threads waiting for the lock, it must
// be released. Note, the old benaphore count will always be
// greater than 1 for a semaphore so the release is always done.
release_sem(fSemaphoreID);
}
}
}
}
thread_id
BLocker::LockingThread(void) const
{
return fLockOwner;
}
bool
BLocker::IsLocked(void) const
{
// This member returns true if the calling thread holds the lock.
// The easiest way to determine this is to compare the result of
// find_thread() to the fLockOwner.
return (find_thread(NULL) == fLockOwner);
}
int32
BLocker::CountLocks(void) const
{
return fRecursiveCount;
}
int32
BLocker::CountLockRequests(void) const
{
return fBenaphoreCount;
}
sem_id
BLocker::Sem(void) const
{
return fSemaphoreID;
}
void
BLocker::InitLocker(const char *name,
bool benaphore)
{
if (benaphore) {
// Because this is a benaphore, initialize the benaphore count and
// create the semaphore. Because this is a benaphore, the semaphore
// count starts at 0 (ie acquired).
fBenaphoreCount = 0;
fSemaphoreID = create_sem(0, name);
} else {
// Because this is a semaphore, initialize the benaphore count to -1
// and create the semaphore. Because this is semaphore style, the
// semaphore count starts at 1 so that one thread can acquire it and
// the next thread to acquire it will block.
fBenaphoreCount = 1;
fSemaphoreID = create_sem(1, name);
}
#ifdef _KERNEL_MODE
set_sem_owner(fSemaphoreID, B_SYSTEM_TEAM);
#endif
// The lock is currently not acquired so there is no owner.
fLockOwner = B_ERROR;
// The lock is currently not acquired so the recursive count is zero.
fRecursiveCount = 0;
}
bool
BLocker::AcquireLock(bigtime_t timeout,
status_t *error)
{
// By default, return no error.
*error = B_NO_ERROR;
// Only try to acquire the lock if the thread doesn't already own it.
if (!IsLocked()) {
// Increment the benaphore count and test to see if it was already greater
// than 0. If it is greater than 0, then some thread already has the
// benaphore or the style is a semaphore. Either way, we need to acquire
// the semaphore in this case.
int32 oldBenaphoreCount = atomic_add(&fBenaphoreCount, 1);
if (oldBenaphoreCount > 0) {
*error = acquire_sem_etc(fSemaphoreID, 1, B_RELATIVE_TIMEOUT,
timeout);
// Note, if the lock here does time out, the benaphore count
// is not decremented. By doing this, the benaphore count will
// never go back to zero. This means that the locking essentially
// changes to semaphore style if this was a benaphore.
//
// Doing the decrement of the benaphore count when the acquisition
// fails is a risky thing to do. If you decrement the counter at
// the same time the thread which holds the benaphore does an
// Unlock(), there is serious risk of a race condition.
//
// If the Unlock() sees a positive count and releases the semaphore
// and then the timed out thread decrements the count to 0, there
// is no one to take the semaphore. The next two threads will be
// able to acquire the benaphore at the same time! The first will
// increment the counter and acquire the lock. The second will
// acquire the semaphore and therefore the lock. Not good.
//
// This has been discussed on the becodetalk mailing list and
// Trey from Be had this to say:
//
// I looked at the LockWithTimeout() code, and it does not have
// _this_ (ie the race condition) problem. It circumvents it by
// NOT doing the atomic_add(&count, -1) if the semaphore
// acquisition fails. This means that if a
// BLocker::LockWithTimeout() times out, all other Lock*() attempts
// turn into guaranteed semaphore grabs, _with_ the overhead of a
// (now) useless atomic_add().
//
// Given Trey's comments, it looks like Be took the same approach
// I did. The output of CountLockRequests() of Be's implementation
// confirms Trey's comments also.
//
// Finally some thoughts for the future with this code:
// - If 2^31 timeouts occur on a 32-bit machine (ie today),
// the benaphore count will wrap to a negative number. This
// would have unknown consequences on the ability of the BLocker
// to continue to function.
//
}
}
// If the lock has successfully been acquired.
if (*error == B_NO_ERROR) {
// Set the lock owner to this thread and increment the recursive count
// by one. The recursive count is incremented because one more Unlock()
// is now required to release the lock (ie, 0 => 1, 1 => 2 etc).
fLockOwner = find_thread(NULL);
fRecursiveCount++;
}
// Return true if the lock has been acquired.
return (*error == B_NO_ERROR);
}
#ifdef USE_OPENBEOS_NAMESPACE
}
#endif

View File

@ -1,63 +0,0 @@
//
// $Id: Locker.h,v 1.1 2003/06/10 22:29:52 bonefish Exp $
//
// This is the BLocker interface for OpenBeOS. It has been created to
// be source and binary compatible with the BeOS version of BLocker.
//
// bonefish: Removed virtual from destructor.
#ifndef _OPENBEOS_LOCKER_H
#define _OPENBEOS_LOCKER_H
#include <OS.h>
#include <SupportDefs.h>
#ifdef USE_OPENBEOS_NAMESPACE
namespace OpenBeOS {
#endif
class BLocker {
public:
BLocker();
BLocker(const char *name);
BLocker(bool benaphore_style);
BLocker(const char *name, bool benaphore_style);
// The following constructor is not documented in the BeBook
// and is only listed here to ensure binary compatibility.
// DO NOT USE THIS CONSTRUCTOR!
BLocker(const char *name, bool benaphore_style, bool);
~BLocker();
bool Lock(void);
status_t LockWithTimeout(bigtime_t timeout);
void Unlock(void);
thread_id LockingThread(void) const;
bool IsLocked(void) const;
int32 CountLocks(void) const;
int32 CountLockRequests(void) const;
sem_id Sem(void) const;
private:
void InitLocker(const char *name, bool benaphore_style);
bool AcquireLock(bigtime_t timeout, status_t *error);
int32 fBenaphoreCount;
sem_id fSemaphoreID;
thread_id fLockOwner;
int32 fRecursiveCount;
// Reserved space for future changes to BLocker
int32 fReservedSpace[4];
};
#ifdef USE_OPENBEOS_NAMESPACE
}
#endif
#endif // _OPENBEOS_LOCKER_H

View File

@ -1,525 +0,0 @@
// RWLocker.cpp
#include <util/kernel_cpp.h>
#include "RWLocker.h"
using namespace std;
// info about a read lock owner
struct RWLocker::ReadLockInfo {
thread_id reader;
int32 count;
};
// constructor
RWLocker::RWLocker()
:
fReaderCount(0),
fWriterCount(0),
fReadLockInfos(8),
fWriter(B_ERROR),
fWriterWriterCount(0),
fWriterReaderCount(0)
{
_Init(NULL);
}
// constructor
RWLocker::RWLocker(const char* name)
:
fReaderCount(0),
fWriterCount(0),
fReadLockInfos(8),
fWriter(B_ERROR),
fWriterWriterCount(0),
fWriterReaderCount(0)
{
_Init(name);
}
// destructor
RWLocker::~RWLocker()
{
_AcquireBenaphore(fLock);
delete_sem(fMutex.semaphore);
delete_sem(fQueue.semaphore);
for (int32 i = 0; ReadLockInfo* info = _ReadLockInfoAt(i); i++) {
delete info;
}
delete_sem(fLock.semaphore);
}
// InitCheck
status_t
RWLocker::InitCheck() const
{
if (fLock.semaphore < 0)
return fLock.semaphore;
if (fMutex.semaphore < 0)
return fMutex.semaphore;
if (fQueue.semaphore < 0)
return fQueue.semaphore;
return B_OK;
}
// ReadLock
bool
RWLocker::ReadLock()
{
status_t error = _ReadLock(B_INFINITE_TIMEOUT);
return (error == B_OK);
}
// ReadLockWithTimeout
status_t
RWLocker::ReadLockWithTimeout(bigtime_t timeout)
{
bigtime_t absoluteTimeout = system_time() + timeout;
// take care of overflow
if (timeout > 0 && absoluteTimeout < 0)
absoluteTimeout = B_INFINITE_TIMEOUT;
return _ReadLock(absoluteTimeout);
}
// ReadUnlock
void
RWLocker::ReadUnlock()
{
if (_AcquireBenaphore(fLock) == B_OK) {
thread_id thread = find_thread(NULL);
if (thread == fWriter) {
// We (also) have a write lock.
if (fWriterReaderCount > 0)
fWriterReaderCount--;
// else: error: unmatched ReadUnlock()
} else {
int32 index = _IndexOf(thread);
if (ReadLockInfo* info = _ReadLockInfoAt(index)) {
fReaderCount--;
if (--info->count == 0) {
// The outer read lock bracket for the thread has been
// reached. Dispose the info.
_DeleteReadLockInfo(index);
}
if (fReaderCount == 0) {
// The last reader needs to unlock the mutex.
_ReleaseBenaphore(fMutex);
}
} // else: error: caller has no read lock
}
_ReleaseBenaphore(fLock);
} // else: we are probably going to be destroyed
}
/** Returns whether or not the calling thread owns a read lock or, if
* orWriteLock is true, at least a write lock.
*/
bool
RWLocker::IsReadLocked(bool orWriteLock) const
{
bool result = false;
if (_AcquireBenaphore(fLock) == B_OK) {
thread_id thread = find_thread(NULL);
result = ((orWriteLock && thread == fWriter) || _IndexOf(thread) >= 0);
_ReleaseBenaphore(fLock);
}
return result;
}
// WriteLock
bool
RWLocker::WriteLock()
{
status_t error = _WriteLock(B_INFINITE_TIMEOUT);
return (error == B_OK);
}
// WriteLockWithTimeout
status_t
RWLocker::WriteLockWithTimeout(bigtime_t timeout)
{
bigtime_t absoluteTimeout = system_time() + timeout;
// take care of overflow
if (timeout > 0 && absoluteTimeout < 0)
absoluteTimeout = B_INFINITE_TIMEOUT;
return _WriteLock(absoluteTimeout);
}
// WriteUnlock
void
RWLocker::WriteUnlock()
{
if (_AcquireBenaphore(fLock) == B_OK) {
thread_id thread = find_thread(NULL);
if (thread == fWriter) {
fWriterCount--;
if (--fWriterWriterCount == 0) {
// The outer write lock bracket for the thread has been
// reached.
fWriter = B_ERROR;
if (fWriterReaderCount > 0) {
// We still own read locks.
_NewReadLockInfo(thread, fWriterReaderCount);
// TODO: if the creation fails, there is nothing we can do about it!
// A reader that expects to be the first reader may wait
// at the mutex semaphore. We need to wake it up.
if (fReaderCount > 0)
_ReleaseBenaphore(fMutex);
fReaderCount += fWriterReaderCount;
fWriterReaderCount = 0;
} else {
// We don't own any read locks. So we have to release the
// mutex benaphore.
_ReleaseBenaphore(fMutex);
}
}
} // else: error: unmatched WriteUnlock()
_ReleaseBenaphore(fLock);
} // else: We're probably going to die.
}
/** Returns whether or not the calling thread owns a write lock. */
bool
RWLocker::IsWriteLocked() const
{
return (fWriter == find_thread(NULL));
}
// make_sem_name
static void
make_sem_name(char *buffer, const char *name, const char *suffix)
{
if (!name)
name = "unnamed_rwlocker";
int32 nameLen = strlen(name);
int32 suffixLen = strlen(suffix);
if (suffixLen >= B_OS_NAME_LENGTH)
suffixLen = B_OS_NAME_LENGTH - 1;
if (nameLen + suffixLen >= B_OS_NAME_LENGTH)
nameLen = B_OS_NAME_LENGTH - suffixLen - 1;
memcpy(buffer, name, nameLen);
memcpy(buffer + nameLen, suffix, suffixLen);
buffer[nameLen + suffixLen] = '\0';
}
// _Init
status_t
RWLocker::_Init(const char* name)
{
// init the data lock benaphore
fLock.semaphore = create_sem(0, name);
fLock.counter = 0;
// init the mutex benaphore
char semName[B_OS_NAME_LENGTH];
make_sem_name(semName, name, "_mutex");
fMutex.semaphore = create_sem(0, semName);
fMutex.counter = 0;
// init the queueing benaphore
make_sem_name(semName, name, "_queue");
fQueue.semaphore = create_sem(0, semName);
fQueue.counter = 0;
return InitCheck();
}
// _ReadLock
//
// /timeout/ -- absolute timeout
status_t
RWLocker::_ReadLock(bigtime_t timeout)
{
status_t error = B_OK;
thread_id thread = find_thread(NULL);
bool locked = false;
if (_AcquireBenaphore(fLock) == B_OK) {
// Check, if we already own a read (or write) lock. In this case we
// can skip the usual locking procedure.
if (thread == fWriter) {
// We already own a write lock.
fWriterReaderCount++;
locked = true;
} else if (ReadLockInfo* info = _ReadLockInfoAt(_IndexOf(thread))) {
// We already own a read lock.
info->count++;
fReaderCount++;
locked = true;
}
_ReleaseBenaphore(fLock);
} else {
// failed to lock the data
error = B_ERROR;
}
// Usual locking, i.e. we do not already own a read or write lock.
if (error == B_OK && !locked) {
error = _AcquireBenaphore(fQueue, timeout);
if (error == B_OK) {
if (_AcquireBenaphore(fLock) == B_OK) {
bool firstReader = false;
if (++fReaderCount == 1) {
// We are the first reader.
firstReader = true;
}
int32 index = _NewReadLockInfo(thread);
_ReleaseBenaphore(fLock);
if (index < 0) {
// creating a ReadLockInfo object failed
_ReleaseBenaphore(fQueue);
return B_NO_MEMORY;
}
// The first reader needs to lock the mutex.
if (firstReader) {
error = _AcquireBenaphore(fMutex, timeout);
switch (error) {
case B_OK:
// fine
break;
case B_TIMED_OUT: {
// clean up
if (_AcquireBenaphore(fLock) == B_OK) {
_DeleteReadLockInfo(_IndexOf(thread));
fReaderCount--;
_ReleaseBenaphore(fLock);
}
break;
}
default:
// Probably we are going to be destroyed.
break;
}
}
// Let the next candidate enter the game.
_ReleaseBenaphore(fQueue);
} else {
// We couldn't lock the data, which can only happen, if
// we're going to be destroyed.
error = B_ERROR;
}
}
}
return error;
}
// _WriteLock
//
// /timeout/ -- absolute timeout
status_t
RWLocker::_WriteLock(bigtime_t timeout)
{
status_t error = B_ERROR;
if (_AcquireBenaphore(fLock) == B_OK) {
bool infiniteTimeout = (timeout == B_INFINITE_TIMEOUT);
bool locked = false;
int32 readerCount = 0;
thread_id thread = find_thread(NULL);
int32 index = _IndexOf(thread);
if (ReadLockInfo* info = _ReadLockInfoAt(index)) {
// We already own a read lock.
if (fWriterCount > 0) {
// There are writers before us.
if (infiniteTimeout) {
// Timeout is infinite and there are writers before us.
// Unregister the read locks and lock as usual.
readerCount = info->count;
fWriterCount++;
fReaderCount -= readerCount;
_DeleteReadLockInfo(index);
error = B_OK;
} else {
// The timeout is finite and there are readers before us:
// let the write lock request fail.
error = B_WOULD_BLOCK;
}
} else if (info->count == fReaderCount) {
// No writers before us.
// We are the only read lock owners. Just move the read lock
// info data to the special writer fields and then we are done.
// Note: At this point we may overtake readers that already
// have acquired the queueing benaphore, but have not yet
// locked the data. But that doesn't harm.
fWriter = thread;
fWriterCount++;
fWriterWriterCount = 1;
fWriterReaderCount = info->count;
fReaderCount -= fWriterReaderCount;
_DeleteReadLockInfo(index);
locked = true;
error = B_OK;
} else {
// No writers before us, but other readers.
// Note, we're quite restrictive here. If there are only
// readers before us, we could reinstall our readers, if
// our request times out. Unfortunately it is not easy
// to ensure, that no writer overtakes us between unlocking
// the data and acquiring the queuing benaphore.
if (infiniteTimeout) {
// Unregister the readers and lock as usual.
readerCount = info->count;
fWriterCount++;
fReaderCount -= readerCount;
_DeleteReadLockInfo(index);
error = B_OK;
} else
error = B_WOULD_BLOCK;
}
} else {
// We don't own a read lock.
if (fWriter == thread) {
// ... but a write lock.
fWriterCount++;
fWriterWriterCount++;
locked = true;
error = B_OK;
} else {
// We own neither read nor write locks.
// Lock as usual.
fWriterCount++;
error = B_OK;
}
}
_ReleaseBenaphore(fLock);
// Usual locking...
// First step: acquire the queueing benaphore.
if (!locked && error == B_OK) {
error = _AcquireBenaphore(fQueue, timeout);
switch (error) {
case B_OK:
break;
case B_TIMED_OUT: {
// clean up
if (_AcquireBenaphore(fLock) == B_OK) {
fWriterCount--;
_ReleaseBenaphore(fLock);
} // else: failed to lock the data: we're probably going
// to die.
break;
}
default:
// Probably we're going to die.
break;
}
}
// Second step: acquire the mutex benaphore.
if (!locked && error == B_OK) {
error = _AcquireBenaphore(fMutex, timeout);
switch (error) {
case B_OK: {
// Yeah, we made it. Set the special writer fields.
fWriter = thread;
fWriterWriterCount = 1;
fWriterReaderCount = readerCount;
break;
}
case B_TIMED_OUT: {
// clean up
if (_AcquireBenaphore(fLock) == B_OK) {
fWriterCount--;
_ReleaseBenaphore(fLock);
} // else: failed to lock the data: we're probably going
// to die.
break;
}
default:
// Probably we're going to die.
break;
}
// Whatever happened, we have to release the queueing benaphore.
_ReleaseBenaphore(fQueue);
}
} else // failed to lock the data
error = B_ERROR;
return error;
}
// _AddReadLockInfo
int32
RWLocker::_AddReadLockInfo(ReadLockInfo* info)
{
int32 index = fReadLockInfos.Count();
fReadLockInfos.Insert(info, index);
return index;
}
/** Create a new read lock info for the supplied thread and add it to
* the list. Returns the index of the info, or -1 to indicate an out
* of memory situation.
*/
int32
RWLocker::_NewReadLockInfo(thread_id thread, int32 count)
{
ReadLockInfo* info = new(nothrow) ReadLockInfo;
if (info == NULL)
return -1;
info->reader = thread;
info->count = count;
return _AddReadLockInfo(info);
}
// _DeleteReadLockInfo
void
RWLocker::_DeleteReadLockInfo(int32 index)
{
if (index >= 0 && index < fReadLockInfos.Count()) {
ReadLockInfo* info = fReadLockInfos.ElementAt(index);
fReadLockInfos.Erase(index);
delete info;
}
}
// _ReadLockInfoAt
RWLocker::ReadLockInfo*
RWLocker::_ReadLockInfoAt(int32 index) const
{
if (index >= 0 && index < fReadLockInfos.Count())
return fReadLockInfos.ElementAt(index);
return NULL;
}
// _IndexOf
int32
RWLocker::_IndexOf(thread_id thread) const
{
int32 count = fReadLockInfos.Count();
for (int32 i = 0; i < count; i++) {
if (_ReadLockInfoAt(i)->reader == thread)
return i;
}
return -1;
}
// _AcquireBenaphore
status_t
RWLocker::_AcquireBenaphore(Benaphore& benaphore, bigtime_t timeout)
{
status_t error = B_OK;
if (atomic_add(&benaphore.counter, 1) > 0) {
error = acquire_sem_etc(benaphore.semaphore, 1, B_ABSOLUTE_TIMEOUT,
timeout);
}
return error;
}
// _ReleaseBenaphore
void
RWLocker::_ReleaseBenaphore(Benaphore& benaphore)
{
if (atomic_add(&benaphore.counter, -1) > 1)
release_sem(benaphore.semaphore);
}

View File

@ -39,11 +39,9 @@ write_unlock_disk_device(partition_id partitionID)
{
KDiskDeviceManager* manager = KDiskDeviceManager::Default();
if (KDiskDevice* device = manager->RegisterDevice(partitionID, false)) {
bool isLocked = device->IsWriteLocked();
if (isLocked) {
device->WriteUnlock();
device->Unregister();
}
device->WriteUnlock();
device->Unregister();
device->Unregister();
}
}
@ -71,11 +69,9 @@ read_unlock_disk_device(partition_id partitionID)
{
KDiskDeviceManager* manager = KDiskDeviceManager::Default();
if (KDiskDevice* device = manager->RegisterDevice(partitionID, false)) {
bool isLocked = device->IsReadLocked(false);
if (isLocked) {
device->ReadUnlock();
device->Unregister();
}
device->ReadUnlock();
device->Unregister();
device->Unregister();
}
}

View File

@ -1,9 +1,11 @@
/*
/*
* Copyright 2005-2008, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
// kernel-side implementation of the messaging service
//! kernel-side implementation of the messaging service
#include <new>
@ -33,13 +35,11 @@ static const int32 kMessagingAreaSize = B_PAGE_SIZE * 4;
// #pragma mark - MessagingArea
// constructor
MessagingArea::MessagingArea()
{
}
// destructor
MessagingArea::~MessagingArea()
{
if (fID >= 0)
@ -47,7 +47,6 @@ MessagingArea::~MessagingArea()
}
// Create
MessagingArea *
MessagingArea::Create(sem_id lockSem, sem_id counterSem)
{
@ -76,7 +75,6 @@ MessagingArea::Create(sem_id lockSem, sem_id counterSem)
}
// InitHeader
void
MessagingArea::InitHeader()
{
@ -90,7 +88,6 @@ MessagingArea::InitHeader()
}
// CheckCommandSize
bool
MessagingArea::CheckCommandSize(int32 dataSize)
{
@ -101,7 +98,6 @@ MessagingArea::CheckCommandSize(int32 dataSize)
}
// Lock
bool
MessagingArea::Lock()
{
@ -113,7 +109,6 @@ MessagingArea::Lock()
}
// Unlock
void
MessagingArea::Unlock()
{
@ -122,7 +117,6 @@ MessagingArea::Unlock()
}
// ID
area_id
MessagingArea::ID() const
{
@ -130,7 +124,6 @@ MessagingArea::ID() const
}
// Size
int32
MessagingArea::Size() const
{
@ -138,7 +131,6 @@ MessagingArea::Size() const
}
// Empty
bool
MessagingArea::IsEmpty() const
{
@ -146,7 +138,6 @@ MessagingArea::IsEmpty() const
}
// AllocateCommand
void *
MessagingArea::AllocateCommand(uint32 commandWhat, int32 dataSize,
bool &wasEmpty)
@ -218,7 +209,6 @@ MessagingArea::AllocateCommand(uint32 commandWhat, int32 dataSize,
}
// CommitCommand
void
MessagingArea::CommitCommand()
{
@ -227,7 +217,6 @@ MessagingArea::CommitCommand()
}
// SetNextArea
void
MessagingArea::SetNextArea(MessagingArea *area)
{
@ -236,7 +225,6 @@ MessagingArea::SetNextArea(MessagingArea *area)
}
// NextArea
MessagingArea *
MessagingArea::NextArea() const
{
@ -244,7 +232,6 @@ MessagingArea::NextArea() const
}
// _CheckCommand
messaging_command *
MessagingArea::_CheckCommand(int32 offset, int32 &size)
{
@ -271,16 +258,15 @@ MessagingArea::_CheckCommand(int32 offset, int32 &size)
// #pragma mark - MessagingService
// constructor
MessagingService::MessagingService()
: fLock("messaging service"),
fFirstArea(NULL),
fLastArea(NULL)
:
fLock(RECURSIVE_LOCK_INITIALIZER("messaging service")),
fFirstArea(NULL),
fLastArea(NULL)
{
}
// destructor
MessagingService::~MessagingService()
{
// Should actually never be called. Once created the service stays till the
@ -288,33 +274,27 @@ MessagingService::~MessagingService()
}
// InitCheck
status_t
MessagingService::InitCheck() const
{
if (fLock.Sem() < 0)
return fLock.Sem();
return B_OK;
}
// Lock
bool
MessagingService::Lock()
{
return fLock.Lock();
return recursive_lock_lock(&fLock) == B_OK;
}
// Unlock
void
MessagingService::Unlock()
{
fLock.Unlock();
recursive_lock_unlock(&fLock);
}
// RegisterService
status_t
MessagingService::RegisterService(sem_id lockSem, sem_id counterSem,
area_id &areaID)
@ -362,7 +342,6 @@ MessagingService::RegisterService(sem_id lockSem, sem_id counterSem,
}
// UnregisterService
status_t
MessagingService::UnregisterService()
{
@ -392,7 +371,6 @@ MessagingService::UnregisterService()
}
// SendMessage
status_t
MessagingService::SendMessage(const void *message, int32 messageSize,
const messaging_target *targets, int32 targetCount)
@ -436,7 +414,6 @@ PRINT((" Allocated space for send message command: area: %p, data: %p, "
}
// _AllocateCommand
status_t
MessagingService::_AllocateCommand(int32 commandWhat, int32 size,
MessagingArea *&area, void *&data, bool &wasEmpty)
@ -513,7 +490,6 @@ MessagingService::_AllocateCommand(int32 commandWhat, int32 size,
// #pragma mark - kernel private
// send_message
status_t
send_message(const void *message, int32 messageSize,
const messaging_target *targets, int32 targetCount)
@ -534,7 +510,6 @@ send_message(const void *message, int32 messageSize,
}
// send_message
status_t
send_message(const KMessage *message, const messaging_target *targets,
int32 targetCount)
@ -547,7 +522,6 @@ send_message(const KMessage *message, const messaging_target *targets,
}
// init_messaging_service
status_t
init_messaging_service()
{
@ -573,7 +547,6 @@ init_messaging_service()
// #pragma mark - syscalls
// _user_register_messaging_service
/** \brief Called by the userland server to register itself as a messaging
service for the kernel.
\param lockingSem A semaphore used for locking the shared data. Semaphore
@ -605,7 +578,6 @@ _user_register_messaging_service(sem_id lockSem, sem_id counterSem)
}
// _user_unregister_messaging_service
status_t
_user_unregister_messaging_service()
{

View File

@ -1,16 +1,18 @@
/*
/*
* Copyright 2005, Ingo Weinhold, bonefish@users.sf.net. All rights reserved.
* Distributed under the terms of the MIT License.
*/
// kernel-side implementation-private definitions for the messaging service
#ifndef MESSAGING_SERVICE_H
#define MESSAGING_SERVICE_H
//! kernel-side implementation-private definitions for the messaging service
#include <MessagingServiceDefs.h>
#include "Locker.h"
#include <lock.h>
namespace BPrivate {
@ -73,7 +75,7 @@ private:
status_t _AllocateCommand(int32 commandWhat, int32 size,
MessagingArea *&area, void *&data, bool &wasEmpty);
BLocker fLock;
recursive_lock fLock;
team_id fServerTeam;
sem_id fLockSem;
sem_id fCounterSem;