Replaced benaphores with mutexes.

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@25378 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2008-05-08 15:52:27 +00:00
parent 3bdd7f38f1
commit adf376c941
3 changed files with 59 additions and 70 deletions

View File

@ -99,7 +99,7 @@ typedef DoublyLinkedList<cache_notification> NotificationList;
struct block_cache : DoublyLinkedListLinkImpl<block_cache> {
hash_table *hash;
benaphore lock;
mutex lock;
int fd;
off_t max_blocks;
size_t block_size;
@ -657,9 +657,7 @@ block_cache::block_cache(int _fd, off_t numBlocks, size_t blockSize,
if (transaction_hash == NULL)
return;
if (benaphore_init(&lock, "block cache") < B_OK)
return;
mutex_init(&lock, "block cache");
register_low_memory_handler(&block_cache::LowMemoryHandler, this, 0);
}
@ -671,7 +669,7 @@ block_cache::~block_cache()
unregister_low_memory_handler(&block_cache::LowMemoryHandler, this);
benaphore_destroy(&lock);
mutex_destroy(&lock);
condition_variable.Unpublish();
@ -689,9 +687,6 @@ block_cache::~block_cache()
status_t
block_cache::InitCheck()
{
if (lock.sem < B_OK)
return lock.sem;
if (buffer_cache == NULL || hash == NULL || transaction_hash == NULL)
return B_NO_MEMORY;
@ -834,7 +829,7 @@ void
block_cache::LowMemoryHandler(void *data, int32 level)
{
block_cache *cache = (block_cache *)data;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
if (!locker.IsLocked()) {
// If our block_cache were deleted, it could be that we had
@ -1457,7 +1452,7 @@ get_next_locked_block_cache(block_cache *last)
block_cache *cache;
if (last != NULL) {
benaphore_unlock(&last->lock);
mutex_unlock(&last->lock);
cache = sCaches.GetNext((block_cache *)&sMarkCache);
sCaches.Remove((block_cache *)&sMarkCache);
@ -1471,14 +1466,14 @@ get_next_locked_block_cache(block_cache *last)
if (cache == NULL)
break;
status_t status = benaphore_lock(&cache->lock);
status_t status = mutex_lock(&cache->lock);
if (status != B_OK) {
// can only happen if the cache is being deleted right now
continue;
}
if (cache->deleting) {
benaphore_unlock(&cache->lock);
mutex_unlock(&cache->lock);
continue;
}
@ -1667,7 +1662,7 @@ extern "C" int32
cache_start_transaction(void *_cache)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
if (cache->last_transaction && cache->last_transaction->open) {
panic("last transaction (%ld) still open!\n",
@ -1694,7 +1689,7 @@ extern "C" status_t
cache_sync_transaction(void *_cache, int32 id)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
status_t status = B_ENTRY_NOT_FOUND;
TRACE(("cache_sync_transaction(id %ld)\n", id));
@ -1762,7 +1757,7 @@ cache_end_transaction(void *_cache, int32 id,
transaction_notification_hook hook, void *data)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
TRACE(("cache_end_transaction(id = %ld)\n", id));
@ -1819,7 +1814,7 @@ extern "C" status_t
cache_abort_transaction(void *_cache, int32 id)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
TRACE(("cache_abort_transaction(id = %ld)\n", id));
@ -1870,7 +1865,7 @@ cache_detach_sub_transaction(void *_cache, int32 id,
transaction_notification_hook hook, void *data)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
TRACE(("cache_detach_sub_transaction(id = %ld)\n", id));
@ -1958,7 +1953,7 @@ extern "C" status_t
cache_abort_sub_transaction(void *_cache, int32 id)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
TRACE(("cache_abort_sub_transaction(id = %ld)\n", id));
@ -2009,7 +2004,7 @@ extern "C" status_t
cache_start_sub_transaction(void *_cache, int32 id)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
TRACE(("cache_start_sub_transaction(id = %ld)\n", id));
@ -2060,7 +2055,7 @@ cache_add_transaction_listener(void *_cache, int32 id, int32 events,
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
cache_transaction *transaction = lookup_transaction(cache, id);
if (transaction == NULL)
@ -2076,7 +2071,7 @@ cache_remove_transaction_listener(void *_cache, int32 id,
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
cache_transaction *transaction = lookup_transaction(cache, id);
if (transaction == NULL)
@ -2109,7 +2104,7 @@ cache_next_block_in_transaction(void *_cache, int32 id, bool mainOnly,
cached_block *block = (cached_block *)*_cookie;
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
cache_transaction *transaction = lookup_transaction(cache, id);
if (transaction == NULL || !transaction->open)
@ -2145,7 +2140,7 @@ extern "C" int32
cache_blocks_in_transaction(void *_cache, int32 id)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
cache_transaction *transaction = lookup_transaction(cache, id);
if (transaction == NULL)
@ -2159,7 +2154,7 @@ extern "C" int32
cache_blocks_in_main_transaction(void *_cache, int32 id)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
cache_transaction *transaction = lookup_transaction(cache, id);
if (transaction == NULL)
@ -2173,7 +2168,7 @@ extern "C" int32
cache_blocks_in_sub_transaction(void *_cache, int32 id)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
cache_transaction *transaction = lookup_transaction(cache, id);
if (transaction == NULL)
@ -2194,7 +2189,7 @@ block_cache_delete(void *_cache, bool allowWrites)
if (allowWrites)
block_cache_sync(cache);
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
// free all blocks
@ -2243,7 +2238,7 @@ block_cache_sync(void *_cache)
// we will sync all dirty blocks to disk that have a completed
// transaction or no transaction only
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
hash_iterator iterator;
hash_open(cache->hash, &iterator);
@ -2281,7 +2276,7 @@ block_cache_sync_etc(void *_cache, off_t blockNumber, size_t numBlocks)
return B_BAD_VALUE;
}
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
for (; numBlocks > 0; numBlocks--, blockNumber++) {
cached_block *block = (cached_block *)hash_lookup(cache->hash,
@ -2312,7 +2307,7 @@ extern "C" status_t
block_cache_make_writable(void *_cache, off_t blockNumber, int32 transaction)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
if (cache->read_only)
panic("tried to make block writable on a read-only cache!");
@ -2334,7 +2329,7 @@ block_cache_get_writable_etc(void *_cache, off_t blockNumber, off_t base,
off_t length, int32 transaction)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
TRACE(("block_cache_get_writable_etc(block = %Ld, transaction = %ld)\n",
blockNumber, transaction));
@ -2358,7 +2353,7 @@ extern "C" void *
block_cache_get_empty(void *_cache, off_t blockNumber, int32 transaction)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
TRACE(("block_cache_get_empty(block = %Ld, transaction = %ld)\n",
blockNumber, transaction));
@ -2374,7 +2369,7 @@ extern "C" const void *
block_cache_get_etc(void *_cache, off_t blockNumber, off_t base, off_t length)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
bool allocated;
cached_block *block = get_cached_block(cache, blockNumber, &allocated);
@ -2410,7 +2405,7 @@ block_cache_set_dirty(void *_cache, off_t blockNumber, bool dirty,
int32 transaction)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
cached_block *block = (cached_block *)hash_lookup(cache->hash,
&blockNumber);
@ -2433,7 +2428,7 @@ extern "C" void
block_cache_put(void *_cache, off_t blockNumber)
{
block_cache *cache = (block_cache *)_cache;
BenaphoreLocker locker(&cache->lock);
MutexLocker locker(&cache->lock);
put_cached_block(cache, blockNumber);
}

View File

@ -133,7 +133,7 @@ class Inode {
void SetModificationTime(time_t modificationTime)
{ fModificationTime = modificationTime; }
benaphore *RequestLock() { return &fRequestLock; }
mutex *RequestLock() { return &fRequestLock; }
status_t WriteDataToBuffer(const void *data, size_t *_length,
bool nonBlocking);
@ -168,7 +168,7 @@ class Inode {
ReadRequestList fReadRequests;
WriteRequestList fWriteRequests;
benaphore fRequestLock;
mutex fRequestLock;
ConditionVariable fWriteCondition;
@ -306,7 +306,7 @@ Inode::Inode()
fWriteSelectSyncPool(NULL)
{
fWriteCondition.Publish(this, "pipe");
benaphore_init(&fRequestLock, "pipe request");
mutex_init(&fRequestLock, "pipe request");
fCreationTime = fModificationTime = time(NULL);
}
@ -315,22 +315,18 @@ Inode::Inode()
Inode::~Inode()
{
fWriteCondition.Unpublish();
benaphore_destroy(&fRequestLock);
mutex_destroy(&fRequestLock);
}
status_t
Inode::InitCheck()
{
if (fRequestLock.sem < B_OK)
return B_ERROR;
return B_OK;
}
/*!
Writes the specified data bytes to the inode's ring buffer. The
/*! Writes the specified data bytes to the inode's ring buffer. The
request lock must be held when calling this method.
Notifies readers if necessary, so that blocking readers will get started.
Returns B_OK for success, B_BAD_ADDRESS if copying from the buffer failed,
@ -367,9 +363,9 @@ Inode::WriteDataToBuffer(const void *_data, size_t *_length, bool nonBlocking)
WriteRequest request(minToWrite);
fWriteRequests.Add(&request);
benaphore_unlock(&fRequestLock);
mutex_unlock(&fRequestLock);
status_t status = entry.Wait();
benaphore_lock(&fRequestLock);
mutex_lock(&fRequestLock);
fWriteRequests.Remove(&request);
@ -475,9 +471,9 @@ Inode::WaitForReadRequest(ReadRequest &request)
THREAD_BLOCK_TYPE_OTHER, "fifo read request");
// wait
benaphore_unlock(&fRequestLock);
mutex_unlock(&fRequestLock);
status_t status = thread_block();
benaphore_lock(&fRequestLock);
mutex_lock(&fRequestLock);
return status;
}
@ -567,7 +563,7 @@ Inode::NotifyEndClosed(bool writer)
void
Inode::Open(int openMode)
{
BenaphoreLocker locker(RequestLock());
MutexLocker locker(RequestLock());
if ((openMode & O_ACCMODE) == O_WRONLY)
fWriterCount++;
@ -592,7 +588,7 @@ Inode::Close(int openMode)
{
TRACE(("Inode::Close(openMode = %d)\n", openMode));
BenaphoreLocker locker(RequestLock());
MutexLocker locker(RequestLock());
if ((openMode & O_ACCMODE) == O_WRONLY && --fWriterCount == 0)
NotifyEndClosed(true);
@ -761,7 +757,7 @@ fifo_read(fs_volume *_volume, fs_vnode *_node, void *_cookie,
if ((cookie->open_mode & O_RWMASK) != O_RDONLY)
return B_NOT_ALLOWED;
BenaphoreLocker locker(inode->RequestLock());
MutexLocker locker(inode->RequestLock());
if (inode->IsActive() && inode->WriterCount() == 0) {
// as long there is no writer, and the pipe is empty,
@ -805,7 +801,7 @@ fifo_write(fs_volume *_volume, fs_vnode *_node, void *_cookie,
if ((cookie->open_mode & O_RWMASK) != O_WRONLY)
return B_NOT_ALLOWED;
BenaphoreLocker locker(inode->RequestLock());
MutexLocker locker(inode->RequestLock());
size_t length = *_length;
if (length == 0)
@ -837,7 +833,7 @@ fifo_read_stat(fs_volume *volume, fs_vnode *vnode, struct ::stat *st)
return error;
BenaphoreLocker locker(fifo->RequestLock());
MutexLocker locker(fifo->RequestLock());
st->st_size = fifo->BytesAvailable();
@ -909,7 +905,7 @@ fifo_select(fs_volume *_volume, fs_vnode *_node, void *_cookie,
if (!inode)
return B_ERROR;
BenaphoreLocker locker(inode->RequestLock());
MutexLocker locker(inode->RequestLock());
return inode->Select(event, sync, cookie->open_mode);
}
@ -925,7 +921,7 @@ fifo_deselect(fs_volume *_volume, fs_vnode *_node, void *_cookie,
if (!inode)
return B_ERROR;
BenaphoreLocker locker(inode->RequestLock());
MutexLocker locker(inode->RequestLock());
return inode->Deselect(event, sync, cookie->open_mode);
}

View File

@ -207,7 +207,7 @@ static mutex sVnodeMutex;
The only operation allowed while holding this lock besides getting or
setting the field is inc_vnode_ref_count() on io_context::root.
*/
static benaphore sIOContextRootLock;
static mutex sIOContextRootLock;
#define VNODE_HASH_TABLE_SIZE 1024
static hash_table *sVnodeTable;
@ -1468,7 +1468,7 @@ replace_vnode_if_disconnected(struct fs_mount* mount,
struct vnode* fallBack, bool lockRootLock)
{
if (lockRootLock)
benaphore_lock(&sIOContextRootLock);
mutex_lock(&sIOContextRootLock);
struct vnode* obsoleteVnode = NULL;
@ -1487,7 +1487,7 @@ replace_vnode_if_disconnected(struct fs_mount* mount,
}
if (lockRootLock)
benaphore_unlock(&sIOContextRootLock);
mutex_unlock(&sIOContextRootLock);
if (obsoleteVnode != NULL)
put_vnode(obsoleteVnode);
@ -1601,13 +1601,13 @@ get_root_vnode(bool kernel)
// Get current working directory from io context
struct io_context* context = get_current_io_context(kernel);
benaphore_lock(&sIOContextRootLock);
mutex_lock(&sIOContextRootLock);
struct vnode* root = context->root;
if (root != NULL)
inc_vnode_ref_count(root);
benaphore_unlock(&sIOContextRootLock);
mutex_unlock(&sIOContextRootLock);
if (root != NULL)
return root;
@ -1971,10 +1971,10 @@ vnode_path_to_vnode(struct vnode *vnode, char *path, bool traverseLeafLink,
while (*++path == '/')
;
benaphore_lock(&sIOContextRootLock);
mutex_lock(&sIOContextRootLock);
vnode = ioContext->root;
inc_vnode_ref_count(vnode);
benaphore_unlock(&sIOContextRootLock);
mutex_unlock(&sIOContextRootLock);
absoluteSymlink = true;
}
@ -4156,11 +4156,11 @@ vfs_new_io_context(void *_parentContext)
mutex_lock(&parentContext->io_mutex);
benaphore_lock(&sIOContextRootLock);
mutex_lock(&sIOContextRootLock);
context->root = parentContext->root;
if (context->root)
inc_vnode_ref_count(context->root);
benaphore_unlock(&sIOContextRootLock);
mutex_unlock(&sIOContextRootLock);
context->cwd = parentContext->cwd;
if (context->cwd)
@ -4413,9 +4413,7 @@ vfs_init(kernel_args *args)
mutex_init(&sMountMutex, "vfs_mount_lock");
mutex_init(&sVnodeCoveredByMutex, "vfs_vnode_covered_by_lock");
mutex_init(&sVnodeMutex, "vfs_vnode_lock");
if (benaphore_init(&sIOContextRootLock, "io_context::root lock") < 0)
panic("vfs_init: error allocating io_context::root lock\n");
mutex_init(&sIOContextRootLock, "io_context::root lock");
if (block_cache_init() != B_OK)
return B_ERROR;
@ -6470,9 +6468,9 @@ fs_mount(char *path, const char *device, const char *fsName, uint32 flags,
if (!sRoot) {
sRoot = mount->root_vnode;
benaphore_lock(&sIOContextRootLock);
mutex_lock(&sIOContextRootLock);
get_current_io_context(true)->root = sRoot;
benaphore_unlock(&sIOContextRootLock);
mutex_unlock(&sIOContextRootLock);
inc_vnode_ref_count(sRoot);
}
@ -8705,10 +8703,10 @@ _user_change_root(const char *userPath)
// set the new root
struct io_context* context = get_current_io_context(false);
benaphore_lock(&sIOContextRootLock);
mutex_lock(&sIOContextRootLock);
struct vnode* oldRoot = context->root;
context->root = vnode;
benaphore_unlock(&sIOContextRootLock);
mutex_unlock(&sIOContextRootLock);
put_vnode(oldRoot);