* Moved the vnode structure (to by Vnode class at some time in the future) into

its own header/source files.
* Changed vnode's bit fields to a single, atomically changeable int32 using
  flags instead. Added respective accessor methods.
* Added a per-vnode mutex-like lock, which uses 2 bits of the structure and
  32 global "buckets" which are used for waiter lists for the vnode locks.
* Reorganized the VFS locking a bit:
  Renamed sVnodeMutex to sVnodeLock and made it an r/w lock. In most situations
  it is now only read-locked to reduce its contention. The per-vnode locks guard
  the fields of the vnode structure and the newly introduced sUnusedVnodesLock
  has taken over the job to guard the unused vnodes list.

The main intent of the changes was to reduce the contention of the sVnodeMutex,
which was partially successful. In my standard -j8 Haiku image build test the
new sUnusedVnodesLock took over about a fourth of the former sVnodeMutex
contention, but the sVnodeLock and the vnode locks have virtually no contention
to speak of, now. A lot of contention migrated to the unrelated "pages" mutex
(another bottleneck). The overall build time dropped about 10 %.



git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@34865 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-01-02 23:27:41 +00:00
parent 82cbfd3573
commit 8ccbb7815c
4 changed files with 659 additions and 252 deletions

View File

@ -14,6 +14,7 @@ KernelMergeObject kernel_fs.o :
node_monitor.cpp
rootfs.cpp
socket.cpp
Vnode.cpp
vfs.cpp
vfs_boot.cpp
vfs_net_boot.cpp

View File

@ -0,0 +1,93 @@
/*
* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#include "Vnode.h"
#include <util/AutoLock.h>
vnode::Bucket vnode::sBuckets[kBucketCount];
vnode::Bucket::Bucket()
{
mutex_init(&lock, "vnode bucket");
}
/*static*/ void
vnode::StaticInit()
{
for (uint32 i = 0; i < kBucketCount; i++)
new(&sBuckets[i]) Bucket;
}
void
vnode::_WaitForLock()
{
LockWaiter waiter;
waiter.thread = thread_get_current_thread();
waiter.vnode = this;
Bucket& bucket = _Bucket();
MutexLocker bucketLocker(bucket.lock);
if ((atomic_or(&fFlags, kFlagsWaitingLocker)
& (kFlagsLocked | kFlagsWaitingLocker)) == 0) {
// The lock holder dropped it in the meantime and no-one else was faster
// than us, so it's ours now. Just mark the node locked and clear the
// waiting flag again.
atomic_or(&fFlags, kFlagsLocked);
atomic_and(&fFlags, ~kFlagsWaitingLocker);
return;
}
// prepare for waiting
bucket.waiters.Add(&waiter);
thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER,
"vnode lock");
// start waiting
bucketLocker.Unlock();
thread_block();
}
void
vnode::_WakeUpLocker()
{
Bucket& bucket = _Bucket();
MutexLocker bucketLocker(bucket.lock);
// mark the node locked again
atomic_or(&fFlags, kFlagsLocked);
// get the first waiter from the list
LockWaiter* waiter = NULL;
bool onlyWaiter = true;
for (LockWaiterList::Iterator it = bucket.waiters.GetIterator();
LockWaiter* someWaiter = it.Next();) {
if (someWaiter->vnode == this) {
if (waiter != NULL) {
onlyWaiter = false;
break;
}
waiter = someWaiter;
it.Remove();
}
}
ASSERT(waiter != NULL);
// if that's the only waiter, clear the flag
if (onlyWaiter)
atomic_and(&fFlags, ~kFlagsWaitingLocker);
// and wake it up
InterruptsSpinLocker threadLocker(gThreadSpinlock);
thread_unblock_locked(waiter->thread, B_OK);
}

View File

@ -0,0 +1,203 @@
/*
* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef VNODE_H
#define VNODE_H
#include <fs_interface.h>
#include <util/DoublyLinkedList.h>
#include <util/list.h>
#include <lock.h>
#include <thread.h>
struct advisory_locking;
struct file_descriptor;
struct fs_mount;
struct VMCache;
typedef struct vnode Vnode;
struct vnode : fs_vnode, DoublyLinkedListLinkImpl<vnode> {
struct vnode* next;
VMCache* cache;
struct fs_mount* mount;
struct vnode* covered_by;
struct advisory_locking* advisory_locking;
struct file_descriptor* mandatory_locked_by;
list_link unused_link;
ino_t id;
dev_t device;
int32 ref_count;
public:
inline bool IsBusy() const;
inline void SetBusy(bool busy);
inline bool IsRemoved() const;
inline void SetRemoved(bool removed);
inline bool IsUnpublished() const;
inline void SetUnpublished(bool unpublished);
inline uint32 Type() const;
inline void SetType(uint32 type);
inline bool Lock();
inline void Unlock();
static void StaticInit();
private:
static const uint32 kFlagsLocked = 0x00000001;
static const uint32 kFlagsWaitingLocker = 0x00000002;
static const uint32 kFlagsBusy = 0x00000004;
static const uint32 kFlagsRemoved = 0x00000008;
static const uint32 kFlagsUnpublished = 0x00000010;
static const uint32 kFlagsType = 0xfffff000;
static const uint32 kBucketCount = 32;
struct LockWaiter : DoublyLinkedListLinkImpl<LockWaiter> {
LockWaiter* next;
struct thread* thread;
struct vnode* vnode;
};
typedef DoublyLinkedList<LockWaiter> LockWaiterList;
struct Bucket {
mutex lock;
LockWaiterList waiters;
Bucket();
};
private:
inline Bucket& _Bucket() const;
void _WaitForLock();
void _WakeUpLocker();
private:
vint32 fFlags;
static Bucket sBuckets[kBucketCount];
};
bool
vnode::IsBusy() const
{
return (fFlags & kFlagsBusy) != 0;
}
void
vnode::SetBusy(bool busy)
{
if (busy)
atomic_or(&fFlags, kFlagsBusy);
else
atomic_and(&fFlags, ~kFlagsBusy);
}
bool
vnode::IsRemoved() const
{
return (fFlags & kFlagsRemoved) != 0;
}
void
vnode::SetRemoved(bool removed)
{
if (removed)
atomic_or(&fFlags, kFlagsRemoved);
else
atomic_and(&fFlags, ~kFlagsRemoved);
}
bool
vnode::IsUnpublished() const
{
return (fFlags & kFlagsUnpublished) != 0;
}
void
vnode::SetUnpublished(bool unpublished)
{
if (unpublished)
atomic_or(&fFlags, kFlagsUnpublished);
else
atomic_and(&fFlags, ~kFlagsUnpublished);
}
uint32
vnode::Type() const
{
return (uint32)fFlags & kFlagsType;
}
void
vnode::SetType(uint32 type)
{
atomic_and(&fFlags, ~kFlagsType);
atomic_or(&fFlags, type & kFlagsType);
}
/*! Locks the vnode.
The caller must hold sVnodeLock (at least read locked) and must continue to
hold it until calling Unlock(). After acquiring the lock the caller is
allowed to write access the vnode's mutable fields, if it hasn't been marked
busy by someone else.
Due to the condition of holding sVnodeLock at least read locked, write
locking it grants the same write access permission to *any* vnode.
The vnode's lock should be held only for a short time. It can be held over
sUnusedVnodesLock.
\return Always \c true.
*/
bool
vnode::Lock()
{
if ((atomic_or(&fFlags, kFlagsLocked)
& (kFlagsLocked | kFlagsWaitingLocker)) != 0) {
_WaitForLock();
}
return true;
}
void
vnode::Unlock()
{
if ((atomic_and(&fFlags, ~kFlagsLocked) & kFlagsWaitingLocker) != 0)
_WakeUpLocker();
}
vnode::Bucket&
vnode::_Bucket() const
{
return sBuckets[((addr_t)this / 64) % kBucketCount];
// The vnode structure is somewhat larger than 64 bytes (on 32 bit
// archs), so subsequently allocated vnodes fall into different
// buckets. How exactly the vnodes are distributed depends on the
// allocator -- a dedicated slab would be perfect.
}
#endif // VNODE_H

File diff suppressed because it is too large Load Diff