* Removed old mutex implementation and renamed cutex to mutex.
* Trivial adjustments of code using mutexes. Mostly removing the mutex_init() return value check. * Added mutex_lock_threads_locked(), which is called with the threads spinlock being held. The spinlock is released while waiting, of course. This function is useful in cases where the existence of the mutex object is ensured by holding the threads spinlock. * Changed the two instances in the VFS code where an IO context of another team needs to be locked to use mutex_lock_threads_locked(). Before it required a semaphore-based mutex implementation. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@25283 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
0ddd7ea66e
commit
0c615a01ae
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
@ -18,11 +19,6 @@ typedef struct recursive_lock {
|
||||
int recursion;
|
||||
} recursive_lock;
|
||||
|
||||
typedef struct mutex {
|
||||
sem_id sem;
|
||||
thread_id holder;
|
||||
} mutex;
|
||||
|
||||
typedef struct benaphore {
|
||||
sem_id sem;
|
||||
int32 count;
|
||||
@ -39,31 +35,29 @@ typedef struct rw_lock {
|
||||
|
||||
#define RW_MAX_READERS 1000000
|
||||
|
||||
struct cutex_waiter;
|
||||
struct mutex_waiter;
|
||||
|
||||
typedef struct cutex {
|
||||
typedef struct mutex {
|
||||
const char* name;
|
||||
struct cutex_waiter* waiters;
|
||||
struct mutex_waiter* waiters;
|
||||
#ifdef KDEBUG
|
||||
thread_id holder;
|
||||
#else
|
||||
int32 count;
|
||||
#endif
|
||||
uint8 flags;
|
||||
} cutex;
|
||||
} mutex;
|
||||
|
||||
#define CUTEX_FLAG_CLONE_NAME 0x1
|
||||
#define MUTEX_FLAG_CLONE_NAME 0x1
|
||||
|
||||
|
||||
#if 0 && KDEBUG // XXX disable this for now, it causes problems when including thread.h here
|
||||
# include <thread.h>
|
||||
#define ASSERT_LOCKED_RECURSIVE(r) { ASSERT(thread_get_current_thread_id() == (r)->holder); }
|
||||
#define ASSERT_LOCKED_MUTEX(m) { ASSERT(thread_get_current_thread_id() == (m)->holder); }
|
||||
#define ASSERT_LOCKED_CUTEX(m) { ASSERT(thread_get_current_thread_id() == (m)->holder); }
|
||||
#else
|
||||
#define ASSERT_LOCKED_RECURSIVE(r)
|
||||
#define ASSERT_LOCKED_MUTEX(m)
|
||||
#define ASSERT_LOCKED_CUTEX(m)
|
||||
#endif
|
||||
|
||||
|
||||
@ -77,12 +71,6 @@ extern status_t recursive_lock_lock(recursive_lock *lock);
|
||||
extern void recursive_lock_unlock(recursive_lock *lock);
|
||||
extern int32 recursive_lock_get_recursion(recursive_lock *lock);
|
||||
|
||||
extern status_t mutex_init(mutex *m, const char *name);
|
||||
extern void mutex_destroy(mutex *m);
|
||||
extern status_t mutex_trylock(mutex *mutex);
|
||||
extern status_t mutex_lock(mutex *m);
|
||||
extern void mutex_unlock(mutex *m);
|
||||
|
||||
extern status_t benaphore_init(benaphore *ben, const char *name);
|
||||
extern void benaphore_destroy(benaphore *ben);
|
||||
|
||||
@ -121,35 +109,48 @@ extern status_t rw_lock_read_unlock(rw_lock *lock);
|
||||
extern status_t rw_lock_write_lock(rw_lock *lock);
|
||||
extern status_t rw_lock_write_unlock(rw_lock *lock);
|
||||
|
||||
extern void cutex_init(cutex* lock, const char *name);
|
||||
// name is *not* cloned nor freed in cutex_destroy()
|
||||
extern void cutex_init_etc(cutex* lock, const char *name, uint32 flags);
|
||||
extern void cutex_destroy(cutex* lock);
|
||||
extern void mutex_init(mutex* lock, const char *name);
|
||||
// name is *not* cloned nor freed in mutex_destroy()
|
||||
extern void mutex_init_etc(mutex* lock, const char *name, uint32 flags);
|
||||
extern void mutex_destroy(mutex* lock);
|
||||
|
||||
// implementation private:
|
||||
extern status_t _cutex_lock(cutex* lock);
|
||||
extern void _cutex_unlock(cutex* lock);
|
||||
extern status_t _cutex_trylock(cutex* lock);
|
||||
extern status_t _mutex_lock(mutex* lock, bool threadsLocked);
|
||||
extern void _mutex_unlock(mutex* lock);
|
||||
extern status_t _mutex_trylock(mutex* lock);
|
||||
|
||||
|
||||
static inline status_t
|
||||
cutex_lock(cutex* lock)
|
||||
mutex_lock(mutex* lock)
|
||||
{
|
||||
#ifdef KDEBUG
|
||||
return _cutex_lock(lock);
|
||||
return _mutex_lock(lock, false);
|
||||
#else
|
||||
if (atomic_add(&lock->count, -1) < 0)
|
||||
return _cutex_lock(lock);
|
||||
return _mutex_lock(lock, false);
|
||||
return B_OK;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static inline status_t
|
||||
cutex_trylock(cutex* lock)
|
||||
mutex_lock_threads_locked(mutex* lock)
|
||||
{
|
||||
#ifdef KDEBUG
|
||||
return _cutex_trylock(lock);
|
||||
return _mutex_lock(lock, true);
|
||||
#else
|
||||
if (atomic_add(&lock->count, -1) < 0)
|
||||
return _mutex_lock(lock, true);
|
||||
return B_OK;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static inline status_t
|
||||
mutex_trylock(mutex* lock)
|
||||
{
|
||||
#ifdef KDEBUG
|
||||
return _mutex_trylock(lock);
|
||||
#else
|
||||
if (atomic_test_and_set(&lock->count, -1, 0) != 0)
|
||||
return B_WOULD_BLOCK;
|
||||
@ -159,13 +160,13 @@ cutex_trylock(cutex* lock)
|
||||
|
||||
|
||||
static inline void
|
||||
cutex_unlock(cutex* lock)
|
||||
mutex_unlock(mutex* lock)
|
||||
{
|
||||
#ifdef KDEBUG
|
||||
_cutex_unlock(lock);
|
||||
_mutex_unlock(lock);
|
||||
#else
|
||||
if (atomic_add(&lock->count, 1) < -1)
|
||||
_cutex_unlock(lock);
|
||||
_mutex_unlock(lock);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ enum {
|
||||
THREAD_BLOCK_TYPE_CONDITION_VARIABLE = 1,
|
||||
THREAD_BLOCK_TYPE_SNOOZE = 2,
|
||||
THREAD_BLOCK_TYPE_SIGNAL = 3,
|
||||
THREAD_BLOCK_TYPE_CUTEX = 4,
|
||||
THREAD_BLOCK_TYPE_MUTEX = 4,
|
||||
|
||||
THREAD_BLOCK_TYPE_OTHER = 9999,
|
||||
THREAD_BLOCK_TYPE_USER_BASE = 10000
|
||||
|
@ -67,24 +67,6 @@ public:
|
||||
// BenaphoreLocker
|
||||
typedef AutoLocker<benaphore, BenaphoreLocking> BenaphoreLocker;
|
||||
|
||||
// CutexLocking
|
||||
class CutexLocking {
|
||||
public:
|
||||
inline bool Lock(cutex *lockable)
|
||||
{
|
||||
cutex_lock(lockable);
|
||||
return true;
|
||||
}
|
||||
|
||||
inline void Unlock(cutex *lockable)
|
||||
{
|
||||
cutex_unlock(lockable);
|
||||
}
|
||||
};
|
||||
|
||||
// CutexLocker
|
||||
typedef AutoLocker<cutex, CutexLocking> CutexLocker;
|
||||
|
||||
// InterruptsLocking
|
||||
class InterruptsLocking {
|
||||
public:
|
||||
@ -171,7 +153,6 @@ using BPrivate::AutoLocker;
|
||||
using BPrivate::MutexLocker;
|
||||
using BPrivate::RecursiveLocker;
|
||||
using BPrivate::BenaphoreLocker;
|
||||
using BPrivate::CutexLocker;
|
||||
using BPrivate::InterruptsLocker;
|
||||
using BPrivate::SpinLocker;
|
||||
using BPrivate::InterruptsSpinLocker;
|
||||
|
@ -135,7 +135,7 @@ struct vm_dummy_page : vm_page {
|
||||
};
|
||||
|
||||
struct vm_cache {
|
||||
cutex lock;
|
||||
mutex lock;
|
||||
struct vm_area *areas;
|
||||
vint32 ref_count;
|
||||
struct list_link consumer_link;
|
||||
|
@ -265,8 +265,7 @@ keyboard_open(const char *name, uint32 flags, void **cookie)
|
||||
if (keyboard_sem < 0)
|
||||
panic("could not create keyboard sem!\n");
|
||||
|
||||
if (mutex_init(&keyboard_read_mutex, "keyboard_read_mutex") < 0)
|
||||
panic("could not create keyboard read mutex!\n");
|
||||
mutex_init(&keyboard_read_mutex, "keyboard_read_mutex");
|
||||
|
||||
shift = false;
|
||||
sControl = false;
|
||||
|
@ -51,25 +51,16 @@ init_driver(void)
|
||||
|
||||
memset(gDeviceNames, 0, sizeof(gDeviceNames));
|
||||
|
||||
// create the global mutex
|
||||
status_t error = mutex_init(&gGlobalTTYLock, "tty global");
|
||||
// create the request mutex
|
||||
status_t error = recursive_lock_init(&gTTYRequestLock, "tty requests");
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
// create the cookie mutex
|
||||
error = mutex_init(&gTTYCookieLock, "tty cookies");
|
||||
if (error != B_OK) {
|
||||
mutex_destroy(&gGlobalTTYLock);
|
||||
return error;
|
||||
}
|
||||
// create the global mutex
|
||||
mutex_init(&gGlobalTTYLock, "tty global");
|
||||
|
||||
// create the request mutex
|
||||
error = recursive_lock_init(&gTTYRequestLock, "tty requests");
|
||||
if (error != B_OK) {
|
||||
mutex_destroy(&gTTYCookieLock);
|
||||
mutex_destroy(&gGlobalTTYLock);
|
||||
return error;
|
||||
}
|
||||
// create the cookie mutex
|
||||
mutex_init(&gTTYCookieLock, "tty cookies");
|
||||
|
||||
// create driver name array and initialize basic TTY structures
|
||||
|
||||
|
@ -42,13 +42,9 @@ create_master_cookie(master_cookie *&cookie, struct tty *master,
|
||||
if (cookie == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
status_t error = mutex_init(&cookie->lock, "tty lock");
|
||||
if (error != B_OK) {
|
||||
free(cookie);
|
||||
return error;
|
||||
}
|
||||
mutex_init(&cookie->lock, "tty lock");
|
||||
|
||||
error = init_tty_cookie(cookie, master, slave, openMode);
|
||||
status_t error = init_tty_cookie(cookie, master, slave, openMode);
|
||||
if (error != B_OK) {
|
||||
mutex_destroy(&cookie->lock);
|
||||
free(cookie);
|
||||
|
@ -1866,7 +1866,7 @@ dump_tty_struct(struct tty& tty)
|
||||
kprintf(" open_count: %ld\n", tty.open_count);
|
||||
kprintf(" select_pool: %p\n", tty.select_pool);
|
||||
kprintf(" pending_eof: %lu\n", tty.pending_eof);
|
||||
kprintf(" lock.sem: %ld\n", tty.lock->sem);
|
||||
kprintf(" lock: %ld\n", &tty.lock);
|
||||
|
||||
kprintf(" input_buffer:\n");
|
||||
kprintf(" first: %ld\n", tty.input_buffer.first);
|
||||
|
@ -386,14 +386,12 @@ init_log(void)
|
||||
|
||||
sLogEntrySem = create_sem(kNumLogEntries, "cache log entries");
|
||||
if (sLogEntrySem >= B_OK) {
|
||||
if (mutex_init(&sLock, "log cache module") >= B_OK) {
|
||||
register_kernel_daemon(log_writer_daemon, NULL, kLogWriterFrequency);
|
||||
register_generic_syscall(CACHE_LOG_SYSCALLS, log_control, 1, 0);
|
||||
mutex_init(&sLock, "log cache module");
|
||||
register_kernel_daemon(log_writer_daemon, NULL, kLogWriterFrequency);
|
||||
register_generic_syscall(CACHE_LOG_SYSCALLS, log_control, 1, 0);
|
||||
|
||||
TRACE(("** - log init\n"));
|
||||
return B_OK;
|
||||
}
|
||||
delete_sem(sLogEntrySem);
|
||||
TRACE(("** - log init\n"));
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
close(sLogFile);
|
||||
|
@ -429,9 +429,6 @@ TCPEndpoint::~TCPEndpoint()
|
||||
status_t
|
||||
TCPEndpoint::InitCheck() const
|
||||
{
|
||||
if (fLock.sem < B_OK)
|
||||
return fLock.sem;
|
||||
|
||||
if (fReceiveList.InitCheck() < B_OK)
|
||||
return fReceiveList.InitCheck();
|
||||
|
||||
@ -2175,7 +2172,9 @@ TCPEndpoint::Dump() const
|
||||
kprintf("TCP endpoint %p\n", this);
|
||||
kprintf(" state: %s\n", name_for_state(fState));
|
||||
kprintf(" flags: 0x%lx\n", fFlags);
|
||||
kprintf(" lock: { sem: %ld, holder: %ld }\n", fLock.sem, fLock.holder);
|
||||
#ifdef KDEBUG
|
||||
kprintf(" lock: { %p, holder: %ld }\n", &fLock, fLock.holder);
|
||||
#endif
|
||||
kprintf(" accept sem: %ld\n", fAcceptSemaphore);
|
||||
kprintf(" options: 0x%lx\n", (uint32)fOptions);
|
||||
kprintf(" send\n");
|
||||
|
@ -721,13 +721,10 @@ tcp_error_reply(net_protocol* protocol, net_buffer* causedError, uint32 code,
|
||||
static status_t
|
||||
tcp_init()
|
||||
{
|
||||
status_t status = mutex_init(&sEndpointManagersLock,
|
||||
"endpoint managers lock");
|
||||
mutex_init(&sEndpointManagersLock, "endpoint managers lock");
|
||||
|
||||
if (status < B_OK)
|
||||
return status;
|
||||
|
||||
status = gStackModule->register_domain_protocols(AF_INET, SOCK_STREAM, 0,
|
||||
status_t status = gStackModule->register_domain_protocols(AF_INET,
|
||||
SOCK_STREAM, 0,
|
||||
"network/protocols/tcp/v1",
|
||||
"network/protocols/ipv4/v1",
|
||||
NULL);
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <Drivers.h>
|
||||
#include <KernelExport.h>
|
||||
|
||||
#include <lock.h>
|
||||
#include <kernel/lock.h>
|
||||
#include <net_stack.h>
|
||||
|
||||
#undef ASSERT
|
||||
|
@ -20,8 +20,7 @@ void
|
||||
mtx_init(struct mtx *m, const char *name, const char *type, int opts)
|
||||
{
|
||||
if (opts == MTX_DEF) {
|
||||
if (mutex_init(&m->u.mutex, name) < B_OK)
|
||||
panic("Panic! Dance like it's 1979, we ran out of semaphores");
|
||||
mutex_init_etc(&m->u.mutex, name, MUTEX_FLAG_CLONE_NAME);
|
||||
} else if (opts == MTX_RECURSE) {
|
||||
if (recursive_lock_init(&m->u.recursive, name) < B_OK)
|
||||
panic("Hell just froze as someone was trying to init a recursive mutex.");
|
||||
|
@ -45,10 +45,7 @@ _taskqueue_create(const char *name, int mflags, int fast,
|
||||
if (fast) {
|
||||
tq->tq_spinlock = 0;
|
||||
} else {
|
||||
if (mutex_init(&tq->tq_mutex, name) < B_OK) {
|
||||
free(tq);
|
||||
return NULL;
|
||||
}
|
||||
mutex_init_etc(&tq->tq_mutex, name, MUTEX_FLAG_CLONE_NAME);
|
||||
}
|
||||
|
||||
strlcpy(tq->tq_name, name, sizeof(tq->tq_name));
|
||||
|
@ -154,9 +154,7 @@ NotificationManager::~NotificationManager()
|
||||
status_t
|
||||
NotificationManager::_Init()
|
||||
{
|
||||
status_t status = mutex_init(&fLock, "notification manager");
|
||||
if (status < B_OK)
|
||||
return status;
|
||||
mutex_init(&fLock, "notification manager");
|
||||
|
||||
return fServiceHash.InitCheck();
|
||||
}
|
||||
|
@ -282,8 +282,7 @@ generic_vm_physical_page_mapper_init(kernel_args *args,
|
||||
memset(virtual_pmappings, 0, sizeof(paddr_chunk_desc *) * num_virtual_chunks);
|
||||
first_free_vmapping = 0;
|
||||
queue_init(&mapped_paddr_lru);
|
||||
sMutex.sem = -1;
|
||||
sMutex.holder = -1;
|
||||
mutex_init(&sMutex, "iospace_mutex");
|
||||
sChunkAvailableSem = -1;
|
||||
|
||||
TRACE(("generic_vm_physical_page_mapper_init: done\n"));
|
||||
@ -332,7 +331,6 @@ generic_vm_physical_page_mapper_init_post_area(kernel_args *args)
|
||||
status_t
|
||||
generic_vm_physical_page_mapper_init_post_sem(kernel_args *args)
|
||||
{
|
||||
mutex_init(&sMutex, "iospace_mutex");
|
||||
sChunkAvailableSem = create_sem(1, "iospace chunk available");
|
||||
|
||||
return sChunkAvailableSem >= B_OK ? B_OK : sChunkAvailableSem;
|
||||
|
34
src/system/kernel/cache/file_cache.cpp
vendored
34
src/system/kernel/cache/file_cache.cpp
vendored
@ -119,7 +119,7 @@ reserve_pages(file_cache_ref *ref, size_t reservePages, bool isWrite)
|
||||
{
|
||||
if (vm_low_memory_state() != B_NO_LOW_MEMORY) {
|
||||
vm_cache *cache = ref->cache;
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
if (list_is_empty(&cache->consumers) && cache->areas == NULL
|
||||
&& access_is_sequential(ref)) {
|
||||
@ -153,7 +153,7 @@ reserve_pages(file_cache_ref *ref, size_t reservePages, bool isWrite)
|
||||
}
|
||||
}
|
||||
}
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
}
|
||||
|
||||
vm_page_reserve_pages(reservePages);
|
||||
@ -208,7 +208,7 @@ read_into_cache(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
}
|
||||
|
||||
push_access(ref, offset, bufferSize, false);
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_page_unreserve_pages(lastReservedPages);
|
||||
|
||||
// read file into reserved pages
|
||||
@ -229,7 +229,7 @@ read_into_cache(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
}
|
||||
}
|
||||
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
for (int32 i = 0; i < pageIndex; i++) {
|
||||
busyConditions[i].Unpublish();
|
||||
@ -263,7 +263,7 @@ read_into_cache(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
}
|
||||
|
||||
reserve_pages(ref, reservePages, false);
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
// make the pages accessible in the cache
|
||||
for (int32 i = pageIndex; i-- > 0;) {
|
||||
@ -292,7 +292,7 @@ read_from_file(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
vec.iov_len = bufferSize;
|
||||
|
||||
push_access(ref, offset, bufferSize, false);
|
||||
cutex_unlock(&ref->cache->lock);
|
||||
mutex_unlock(&ref->cache->lock);
|
||||
vm_page_unreserve_pages(lastReservedPages);
|
||||
|
||||
status_t status = vfs_read_pages(ref->vnode, cookie, offset + pageOffset,
|
||||
@ -300,7 +300,7 @@ read_from_file(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
if (status == B_OK)
|
||||
reserve_pages(ref, reservePages, false);
|
||||
|
||||
cutex_lock(&ref->cache->lock);
|
||||
mutex_lock(&ref->cache->lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -351,7 +351,7 @@ write_to_cache(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
}
|
||||
|
||||
push_access(ref, offset, bufferSize, true);
|
||||
cutex_unlock(&ref->cache->lock);
|
||||
mutex_unlock(&ref->cache->lock);
|
||||
vm_page_unreserve_pages(lastReservedPages);
|
||||
|
||||
// copy contents (and read in partially written pages first)
|
||||
@ -433,7 +433,7 @@ write_to_cache(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
if (status == B_OK)
|
||||
reserve_pages(ref, reservePages, true);
|
||||
|
||||
cutex_lock(&ref->cache->lock);
|
||||
mutex_lock(&ref->cache->lock);
|
||||
|
||||
// unmap the pages again
|
||||
|
||||
@ -482,7 +482,7 @@ write_to_file(file_cache_ref *ref, void *cookie, off_t offset, int32 pageOffset,
|
||||
vec.iov_len = bufferSize;
|
||||
|
||||
push_access(ref, offset, bufferSize, true);
|
||||
cutex_unlock(&ref->cache->lock);
|
||||
mutex_unlock(&ref->cache->lock);
|
||||
vm_page_unreserve_pages(lastReservedPages);
|
||||
|
||||
status_t status = B_OK;
|
||||
@ -508,7 +508,7 @@ write_to_file(file_cache_ref *ref, void *cookie, off_t offset, int32 pageOffset,
|
||||
if (status == B_OK)
|
||||
reserve_pages(ref, reservePages, true);
|
||||
|
||||
cutex_lock(&ref->cache->lock);
|
||||
mutex_lock(&ref->cache->lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -604,7 +604,7 @@ cache_io(void *_cacheRef, void *cookie, off_t offset, addr_t buffer,
|
||||
size_t reservePages = 0;
|
||||
|
||||
reserve_pages(ref, lastReservedPages, doWrite);
|
||||
CutexLocker locker(cache->lock);
|
||||
MutexLocker locker(cache->lock);
|
||||
|
||||
while (bytesLeft > 0) {
|
||||
// check if this page is already in memory
|
||||
@ -780,7 +780,7 @@ cache_prefetch_vnode(struct vnode *vnode, off_t offset, size_t size)
|
||||
off_t lastOffset = offset;
|
||||
size_t lastSize = 0;
|
||||
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
for (; bytesLeft > 0; offset += B_PAGE_SIZE) {
|
||||
// check if this page is already in memory
|
||||
@ -792,9 +792,9 @@ cache_prefetch_vnode(struct vnode *vnode, off_t offset, size_t size)
|
||||
// if busy retry again later
|
||||
ConditionVariableEntry entry;
|
||||
entry.Add(page);
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
entry.Wait();
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
goto restart;
|
||||
}
|
||||
@ -825,7 +825,7 @@ cache_prefetch_vnode(struct vnode *vnode, off_t offset, size_t size)
|
||||
read_into_cache(ref, lastOffset, lastLeft, NULL, 0);
|
||||
|
||||
out:
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
#endif
|
||||
}
|
||||
@ -985,7 +985,7 @@ file_cache_set_size(void *_cacheRef, off_t newSize)
|
||||
if (ref == NULL)
|
||||
return B_OK;
|
||||
|
||||
CutexLocker _(ref->cache->lock);
|
||||
MutexLocker _(ref->cache->lock);
|
||||
|
||||
off_t offset = ref->cache->virtual_size;
|
||||
off_t size = newSize;
|
||||
|
@ -66,9 +66,6 @@ IOScheduler::~IOScheduler()
|
||||
status_t
|
||||
IOScheduler::InitCheck() const
|
||||
{
|
||||
if (fLock.sem < B_OK)
|
||||
return fLock.sem;
|
||||
|
||||
if (fThread < B_OK)
|
||||
return fThread;
|
||||
|
||||
|
@ -357,9 +357,7 @@ rootfs_mount(fs_volume *volume, const char *device, uint32 flags,
|
||||
fs->id = volume->id;
|
||||
fs->next_vnode_id = 1;
|
||||
|
||||
err = mutex_init(&fs->lock, "rootfs_mutex");
|
||||
if (err < B_OK)
|
||||
goto err1;
|
||||
mutex_init(&fs->lock, "rootfs_mutex");
|
||||
|
||||
fs->vnode_list_hash = hash_init(ROOTFS_HASH_SIZE, (addr_t)&vnode->all_next - (addr_t)vnode,
|
||||
&rootfs_vnode_compare_func, &rootfs_vnode_hash_func);
|
||||
@ -388,7 +386,6 @@ err3:
|
||||
hash_uninit(fs->vnode_list_hash);
|
||||
err2:
|
||||
mutex_destroy(&fs->lock);
|
||||
err1:
|
||||
free(fs);
|
||||
|
||||
return err;
|
||||
|
@ -1514,12 +1514,12 @@ disconnect_mount_or_vnode_fds(struct fs_mount *mount,
|
||||
|
||||
while (true) {
|
||||
struct io_context *context = NULL;
|
||||
sem_id contextMutex = -1;
|
||||
bool contextLocked = false;
|
||||
struct team *team = NULL;
|
||||
team_id lastTeamID;
|
||||
|
||||
cpu_status state = disable_interrupts();
|
||||
GRAB_TEAM_LOCK();
|
||||
SpinLocker teamsLock(team_spinlock);
|
||||
|
||||
lastTeamID = peek_next_thread_id();
|
||||
if (nextTeamID < lastTeamID) {
|
||||
@ -1531,12 +1531,20 @@ disconnect_mount_or_vnode_fds(struct fs_mount *mount,
|
||||
|
||||
if (team) {
|
||||
context = (io_context *)team->io_context;
|
||||
contextMutex = context->io_mutex.sem;
|
||||
|
||||
// Some acrobatics to lock the context in a safe way
|
||||
// (cf. _kern_get_next_fd_info() for details).
|
||||
GRAB_THREAD_LOCK();
|
||||
teamsLock.Unlock();
|
||||
contextLocked = mutex_lock_threads_locked(&context->io_mutex)
|
||||
== B_OK;
|
||||
RELEASE_THREAD_LOCK();
|
||||
|
||||
nextTeamID++;
|
||||
}
|
||||
}
|
||||
|
||||
RELEASE_TEAM_LOCK();
|
||||
teamsLock.Unlock();
|
||||
restore_interrupts(state);
|
||||
|
||||
if (context == NULL)
|
||||
@ -1546,7 +1554,7 @@ disconnect_mount_or_vnode_fds(struct fs_mount *mount,
|
||||
// safe access to the team structure, we now need to lock the mutex
|
||||
// manually
|
||||
|
||||
if (acquire_sem(contextMutex) != B_OK) {
|
||||
if (!contextLocked) {
|
||||
// team seems to be gone, go over to the next team
|
||||
continue;
|
||||
}
|
||||
@ -1554,8 +1562,6 @@ disconnect_mount_or_vnode_fds(struct fs_mount *mount,
|
||||
// the team cannot be deleted completely while we're owning its
|
||||
// io_context mutex, so we can safely play with it now
|
||||
|
||||
context->io_mutex.holder = thread_get_current_thread_id();
|
||||
|
||||
replace_vnode_if_disconnected(mount, vnodeToDisconnect, context->root,
|
||||
sRoot, true);
|
||||
replace_vnode_if_disconnected(mount, vnodeToDisconnect, context->cwd,
|
||||
@ -4141,11 +4147,7 @@ vfs_new_io_context(void *_parentContext)
|
||||
+ sizeof(struct select_sync*) * tableSize
|
||||
+ (tableSize + 7) / 8);
|
||||
|
||||
if (mutex_init(&context->io_mutex, "I/O context") < 0) {
|
||||
free(context->fds);
|
||||
free(context);
|
||||
return NULL;
|
||||
}
|
||||
mutex_init(&context->io_mutex, "I/O context");
|
||||
|
||||
// Copy all parent file descriptors
|
||||
|
||||
@ -4403,20 +4405,14 @@ vfs_init(kernel_args *args)
|
||||
|
||||
sRoot = NULL;
|
||||
|
||||
if (mutex_init(&sFileSystemsMutex, "vfs_lock") < 0)
|
||||
panic("vfs_init: error allocating file systems lock\n");
|
||||
mutex_init(&sFileSystemsMutex, "vfs_lock");
|
||||
|
||||
if (recursive_lock_init(&sMountOpLock, "vfs_mount_op_lock") < 0)
|
||||
panic("vfs_init: error allocating mount op lock\n");
|
||||
|
||||
if (mutex_init(&sMountMutex, "vfs_mount_lock") < 0)
|
||||
panic("vfs_init: error allocating mount lock\n");
|
||||
|
||||
if (mutex_init(&sVnodeCoveredByMutex, "vfs_vnode_covered_by_lock") < 0)
|
||||
panic("vfs_init: error allocating vnode::covered_by lock\n");
|
||||
|
||||
if (mutex_init(&sVnodeMutex, "vfs_vnode_lock") < 0)
|
||||
panic("vfs_init: error allocating vnode lock\n");
|
||||
mutex_init(&sMountMutex, "vfs_mount_lock");
|
||||
mutex_init(&sVnodeCoveredByMutex, "vfs_vnode_covered_by_lock");
|
||||
mutex_init(&sVnodeMutex, "vfs_vnode_lock");
|
||||
|
||||
if (benaphore_init(&sIOContextRootLock, "io_context::root lock") < 0)
|
||||
panic("vfs_init: error allocating io_context::root lock\n");
|
||||
@ -7000,26 +6996,31 @@ _kern_get_next_fd_info(team_id teamID, uint32 *_cookie, fd_info *info,
|
||||
return B_BAD_VALUE;
|
||||
|
||||
struct io_context *context = NULL;
|
||||
sem_id contextMutex = -1;
|
||||
struct team *team = NULL;
|
||||
|
||||
cpu_status state = disable_interrupts();
|
||||
GRAB_TEAM_LOCK();
|
||||
|
||||
bool contextLocked = false;
|
||||
team = team_get_team_struct_locked(teamID);
|
||||
if (team) {
|
||||
// We cannot lock the IO context while holding the team lock, nor can
|
||||
// we just drop the team lock, since it might be deleted in the
|
||||
// meantime. team_remove_team() acquires the thread lock when removing
|
||||
// the team from the team hash table, though. Hence we switch to the
|
||||
// thread lock and use mutex_lock_threads_locked().
|
||||
context = (io_context *)team->io_context;
|
||||
contextMutex = context->io_mutex.sem;
|
||||
}
|
||||
|
||||
RELEASE_TEAM_LOCK();
|
||||
GRAB_THREAD_LOCK();
|
||||
RELEASE_TEAM_LOCK();
|
||||
contextLocked = mutex_lock_threads_locked(&context->io_mutex) == B_OK;
|
||||
RELEASE_THREAD_LOCK();
|
||||
} else
|
||||
RELEASE_TEAM_LOCK();
|
||||
|
||||
restore_interrupts(state);
|
||||
|
||||
// we now have a context - since we couldn't lock it while having
|
||||
// safe access to the team structure, we now need to lock the mutex
|
||||
// manually
|
||||
|
||||
if (context == NULL || acquire_sem(contextMutex) != B_OK) {
|
||||
if (!contextLocked) {
|
||||
// team doesn't exit or seems to be gone
|
||||
return B_BAD_TEAM_ID;
|
||||
}
|
||||
@ -7027,8 +7028,6 @@ _kern_get_next_fd_info(team_id teamID, uint32 *_cookie, fd_info *info,
|
||||
// the team cannot be deleted completely while we're owning its
|
||||
// io_context mutex, so we can safely play with it now
|
||||
|
||||
context->io_mutex.holder = thread_get_current_thread_id();
|
||||
|
||||
uint32 slot = *_cookie;
|
||||
|
||||
struct file_descriptor *descriptor;
|
||||
|
@ -469,8 +469,8 @@ heap_validate_heap(heap_allocator *heap)
|
||||
// #pragma mark - Heap functions
|
||||
|
||||
|
||||
heap_allocator *
|
||||
heap_attach(addr_t base, size_t size, bool postSem)
|
||||
static heap_allocator *
|
||||
heap_attach(addr_t base, size_t size)
|
||||
{
|
||||
heap_allocator *heap = (heap_allocator *)base;
|
||||
base += sizeof(heap_allocator);
|
||||
@ -517,16 +517,7 @@ heap_attach(addr_t base, size_t size, bool postSem)
|
||||
heap->free_pages = &heap->page_table[0];
|
||||
heap->page_table[0].prev = NULL;
|
||||
|
||||
if (postSem) {
|
||||
if (mutex_init(&heap->lock, "heap_mutex") < 0) {
|
||||
panic("heap_attach(): error creating heap mutex\n");
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
// pre-init the mutex to at least fall through any semaphore calls
|
||||
heap->lock.sem = -1;
|
||||
heap->lock.holder = -1;
|
||||
}
|
||||
mutex_init(&heap->lock, "heap_mutex");
|
||||
|
||||
heap->next = NULL;
|
||||
dprintf("heap_attach: attached to %p - usable range 0x%08lx - 0x%08lx\n",
|
||||
@ -1014,7 +1005,7 @@ heap_grow_thread(void *)
|
||||
}
|
||||
|
||||
heap_allocator *newHeap = heap_attach((addr_t)heapAddress,
|
||||
HEAP_GROW_SIZE, true);
|
||||
HEAP_GROW_SIZE);
|
||||
if (newHeap == NULL) {
|
||||
panic("heap_grower: could not attach additional heap!\n");
|
||||
delete_area(heapArea);
|
||||
@ -1038,7 +1029,7 @@ heap_grow_thread(void *)
|
||||
status_t
|
||||
heap_init(addr_t base, size_t size)
|
||||
{
|
||||
sHeapList = heap_attach(base, size, false);
|
||||
sHeapList = heap_attach(base, size);
|
||||
|
||||
// set up some debug commands
|
||||
add_debugger_command_etc("heap", &dump_heap_list,
|
||||
@ -1063,12 +1054,6 @@ heap_init(addr_t base, size_t size)
|
||||
status_t
|
||||
heap_init_post_sem()
|
||||
{
|
||||
// create the lock for the initial heap
|
||||
if (mutex_init(&sHeapList->lock, "heap_mutex") < B_OK) {
|
||||
panic("heap_init_post_sem(): error creating heap mutex\n");
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
sHeapGrowSem = create_sem(0, "heap_grow_sem");
|
||||
if (sHeapGrowSem < 0) {
|
||||
panic("heap_init_post_sem(): failed to create heap grow sem\n");
|
||||
@ -1097,7 +1082,7 @@ heap_init_post_thread()
|
||||
return area;
|
||||
}
|
||||
|
||||
sGrowHeap = heap_attach((addr_t)dedicated, HEAP_DEDICATED_GROW_SIZE, true);
|
||||
sGrowHeap = heap_attach((addr_t)dedicated, HEAP_DEDICATED_GROW_SIZE);
|
||||
if (sGrowHeap == NULL) {
|
||||
panic("heap_init_post_thread(): failed to attach dedicated grow heap\n");
|
||||
delete_area(area);
|
||||
|
@ -274,7 +274,8 @@ image_init(void)
|
||||
add_debugger_command("team_images", &dump_images_list, "Dump all registered images from the current team");
|
||||
#endif
|
||||
|
||||
return mutex_init(&sImageMutex, "image");
|
||||
mutex_init(&sImageMutex, "image");
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
|
@ -127,8 +127,7 @@ kernel_daemon_init(void)
|
||||
{
|
||||
thread_id thread;
|
||||
|
||||
if (mutex_init(&sDaemonMutex, "kernel daemon") < B_OK)
|
||||
return B_ERROR;
|
||||
mutex_init(&sDaemonMutex, "kernel daemon");
|
||||
|
||||
list_init(&sDaemons);
|
||||
|
||||
|
@ -23,14 +23,14 @@
|
||||
#include <util/AutoLock.h>
|
||||
|
||||
|
||||
struct cutex_waiter {
|
||||
struct mutex_waiter {
|
||||
struct thread* thread;
|
||||
cutex_waiter* next; // next in queue
|
||||
cutex_waiter* last; // last in queue (valid for the first in queue)
|
||||
mutex_waiter* next; // next in queue
|
||||
mutex_waiter* last; // last in queue (valid for the first in queue)
|
||||
};
|
||||
|
||||
#define CUTEX_FLAG_OWNS_NAME CUTEX_FLAG_CLONE_NAME
|
||||
#define CUTEX_FLAG_RELEASED 0x2
|
||||
#define MUTEX_FLAG_OWNS_NAME MUTEX_FLAG_CLONE_NAME
|
||||
#define MUTEX_FLAG_RELEASED 0x2
|
||||
|
||||
|
||||
int32
|
||||
@ -110,106 +110,6 @@ recursive_lock_unlock(recursive_lock *lock)
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
status_t
|
||||
mutex_init(mutex *m, const char *name)
|
||||
{
|
||||
if (m == NULL)
|
||||
return EINVAL;
|
||||
|
||||
if (name == NULL)
|
||||
name = "mutex_sem";
|
||||
|
||||
m->holder = -1;
|
||||
|
||||
m->sem = create_sem(1, name);
|
||||
if (m->sem >= B_OK)
|
||||
return B_OK;
|
||||
|
||||
return m->sem;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
mutex_destroy(mutex *mutex)
|
||||
{
|
||||
if (mutex == NULL)
|
||||
return;
|
||||
|
||||
if (mutex->sem >= 0) {
|
||||
delete_sem(mutex->sem);
|
||||
mutex->sem = -1;
|
||||
}
|
||||
mutex->holder = -1;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
mutex_trylock(mutex *mutex)
|
||||
{
|
||||
thread_id me = thread_get_current_thread_id();
|
||||
status_t status;
|
||||
|
||||
if (kernel_startup)
|
||||
return B_OK;
|
||||
|
||||
status = acquire_sem_etc(mutex->sem, 1, B_RELATIVE_TIMEOUT, 0);
|
||||
if (status < B_OK)
|
||||
return status;
|
||||
|
||||
if (me == mutex->holder) {
|
||||
panic("mutex_trylock failure: mutex %p (sem = 0x%lx) acquired twice by"
|
||||
" thread 0x%lx\n", mutex, mutex->sem, me);
|
||||
}
|
||||
|
||||
mutex->holder = me;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
mutex_lock(mutex *mutex)
|
||||
{
|
||||
thread_id me = thread_get_current_thread_id();
|
||||
status_t status;
|
||||
|
||||
if (kernel_startup)
|
||||
return B_OK;
|
||||
|
||||
status = acquire_sem(mutex->sem);
|
||||
if (status < B_OK)
|
||||
return status;
|
||||
|
||||
if (me == mutex->holder) {
|
||||
panic("mutex_lock failure: mutex %p (sem = 0x%lx) acquired twice by"
|
||||
" thread 0x%lx\n", mutex, mutex->sem, me);
|
||||
}
|
||||
|
||||
mutex->holder = me;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
mutex_unlock(mutex *mutex)
|
||||
{
|
||||
thread_id me = thread_get_current_thread_id();
|
||||
|
||||
if (kernel_startup)
|
||||
return;
|
||||
|
||||
if (me != mutex->holder) {
|
||||
panic("mutex_unlock failure: thread 0x%lx is trying to release mutex %p"
|
||||
" (current holder 0x%lx)\n", me, mutex, mutex->holder);
|
||||
}
|
||||
|
||||
mutex->holder = -1;
|
||||
release_sem_etc(mutex->sem, 1, 0/*B_DO_NOT_RESCHEDULE*/);
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
status_t
|
||||
benaphore_init(benaphore *ben, const char *name)
|
||||
{
|
||||
@ -299,7 +199,7 @@ rw_lock_write_unlock(rw_lock *lock)
|
||||
|
||||
|
||||
void
|
||||
cutex_init(cutex* lock, const char *name)
|
||||
mutex_init(mutex* lock, const char *name)
|
||||
{
|
||||
lock->name = name;
|
||||
lock->waiters = NULL;
|
||||
@ -313,23 +213,23 @@ cutex_init(cutex* lock, const char *name)
|
||||
|
||||
|
||||
void
|
||||
cutex_init_etc(cutex* lock, const char *name, uint32 flags)
|
||||
mutex_init_etc(mutex* lock, const char *name, uint32 flags)
|
||||
{
|
||||
lock->name = (flags & CUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
|
||||
lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
|
||||
lock->waiters = NULL;
|
||||
#ifdef KDEBUG
|
||||
lock->holder = -1;
|
||||
#else
|
||||
lock->count = 0;
|
||||
#endif
|
||||
lock->flags = flags & CUTEX_FLAG_CLONE_NAME;
|
||||
lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
cutex_destroy(cutex* lock)
|
||||
mutex_destroy(mutex* lock)
|
||||
{
|
||||
char* name = (lock->flags & CUTEX_FLAG_CLONE_NAME) != 0
|
||||
char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
|
||||
? (char*)lock->name : NULL;
|
||||
|
||||
// unblock all waiters
|
||||
@ -338,16 +238,14 @@ cutex_destroy(cutex* lock)
|
||||
#ifdef KDEBUG
|
||||
if (lock->waiters != NULL && thread_get_current_thread_id()
|
||||
!= lock->holder) {
|
||||
panic("cutex_destroy(): there are blocking threads, but caller doesn't "
|
||||
panic("mutex_destroy(): there are blocking threads, but caller doesn't "
|
||||
"hold the lock (%p)", lock);
|
||||
locker.Unlock();
|
||||
if (_cutex_lock(lock) != B_OK)
|
||||
if (_mutex_lock(lock, true) != B_OK)
|
||||
return;
|
||||
locker.Lock();
|
||||
}
|
||||
#endif
|
||||
|
||||
while (cutex_waiter* waiter = lock->waiters) {
|
||||
while (mutex_waiter* waiter = lock->waiters) {
|
||||
// dequeue
|
||||
lock->waiters = waiter->next;
|
||||
|
||||
@ -364,16 +262,17 @@ cutex_destroy(cutex* lock)
|
||||
|
||||
|
||||
status_t
|
||||
_cutex_lock(cutex* lock)
|
||||
_mutex_lock(mutex* lock, bool threadsLocked)
|
||||
{
|
||||
#ifdef KDEBUG
|
||||
if (!kernel_startup && !are_interrupts_enabled()) {
|
||||
panic("_cutex_unlock(): called with interrupts disabled for lock %p",
|
||||
if (!kernel_startup && !threadsLocked && !are_interrupts_enabled()) {
|
||||
panic("_mutex_unlock(): called with interrupts disabled for lock %p",
|
||||
lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
InterruptsSpinLocker _(thread_spinlock);
|
||||
// lock only, if !threadsLocked
|
||||
InterruptsSpinLocker locker(thread_spinlock, false, !threadsLocked);
|
||||
|
||||
// Might have been released after we decremented the count, but before
|
||||
// we acquired the spinlock.
|
||||
@ -383,14 +282,14 @@ _cutex_lock(cutex* lock)
|
||||
return B_OK;
|
||||
}
|
||||
#else
|
||||
if ((lock->flags & CUTEX_FLAG_RELEASED) != 0) {
|
||||
lock->flags &= ~CUTEX_FLAG_RELEASED;
|
||||
if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
|
||||
lock->flags &= ~MUTEX_FLAG_RELEASED;
|
||||
return B_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
// enqueue in waiter list
|
||||
cutex_waiter waiter;
|
||||
mutex_waiter waiter;
|
||||
waiter.thread = thread_get_current_thread();
|
||||
waiter.next = NULL;
|
||||
|
||||
@ -402,7 +301,7 @@ _cutex_lock(cutex* lock)
|
||||
lock->waiters->last = &waiter;
|
||||
|
||||
// block
|
||||
thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_CUTEX, lock);
|
||||
thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
|
||||
status_t error = thread_block_locked(waiter.thread);
|
||||
|
||||
#ifdef KDEBUG
|
||||
@ -415,20 +314,20 @@ _cutex_lock(cutex* lock)
|
||||
|
||||
|
||||
void
|
||||
_cutex_unlock(cutex* lock)
|
||||
_mutex_unlock(mutex* lock)
|
||||
{
|
||||
InterruptsSpinLocker _(thread_spinlock);
|
||||
|
||||
#ifdef KDEBUG
|
||||
if (thread_get_current_thread_id() != lock->holder) {
|
||||
panic("_cutex_unlock() failure: thread %ld is trying to release "
|
||||
"cutex %p (current holder %ld)\n", thread_get_current_thread_id(),
|
||||
panic("_mutex_unlock() failure: thread %ld is trying to release "
|
||||
"mutex %p (current holder %ld)\n", thread_get_current_thread_id(),
|
||||
lock, lock->holder);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
cutex_waiter* waiter = lock->waiters;
|
||||
mutex_waiter* waiter = lock->waiters;
|
||||
if (waiter != NULL) {
|
||||
// dequeue the first waiter
|
||||
lock->waiters = waiter->next;
|
||||
@ -451,14 +350,14 @@ _cutex_unlock(cutex* lock)
|
||||
#ifdef KDEBUG
|
||||
lock->holder = -1;
|
||||
#else
|
||||
lock->flags |= CUTEX_FLAG_RELEASED;
|
||||
lock->flags |= MUTEX_FLAG_RELEASED;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
_cutex_trylock(cutex* lock)
|
||||
_mutex_trylock(mutex* lock)
|
||||
{
|
||||
#ifdef KDEBUG
|
||||
InterruptsSpinLocker _(thread_spinlock);
|
||||
@ -473,21 +372,21 @@ _cutex_trylock(cutex* lock)
|
||||
|
||||
|
||||
static int
|
||||
dump_cutex_info(int argc, char** argv)
|
||||
dump_mutex_info(int argc, char** argv)
|
||||
{
|
||||
if (argc < 2) {
|
||||
print_debugger_command_usage(argv[0]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
cutex* lock = (cutex*)strtoul(argv[1], NULL, 0);
|
||||
mutex* lock = (mutex*)strtoul(argv[1], NULL, 0);
|
||||
|
||||
if (!IS_KERNEL_ADDRESS(lock)) {
|
||||
kprintf("invalid address: %p\n", lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
kprintf("cutex %p:\n", lock);
|
||||
kprintf("mutex %p:\n", lock);
|
||||
kprintf(" name: %s\n", lock->name);
|
||||
kprintf(" flags: 0x%x\n", lock->flags);
|
||||
#ifdef KDEBUG
|
||||
@ -497,7 +396,7 @@ dump_cutex_info(int argc, char** argv)
|
||||
#endif
|
||||
|
||||
kprintf(" waiting threads:");
|
||||
cutex_waiter* waiter = lock->waiters;
|
||||
mutex_waiter* waiter = lock->waiters;
|
||||
while (waiter != NULL) {
|
||||
kprintf(" %ld", waiter->thread->id);
|
||||
waiter = waiter->next;
|
||||
@ -514,9 +413,9 @@ dump_cutex_info(int argc, char** argv)
|
||||
void
|
||||
lock_debug_init()
|
||||
{
|
||||
add_debugger_command_etc("cutex", &dump_cutex_info,
|
||||
"Dump info about a cutex",
|
||||
"<cutex>\n"
|
||||
"Prints info about the specified cutex.\n"
|
||||
" <cutex> - pointer to the cutex to print the info for.\n", 0);
|
||||
add_debugger_command_etc("mutex", &dump_mutex_info,
|
||||
"Dump info about a mutex",
|
||||
"<mutex>\n"
|
||||
"Prints info about the specified mutex.\n"
|
||||
" <mutex> - pointer to the mutex to print the info for.\n", 0);
|
||||
}
|
||||
|
@ -211,10 +211,7 @@ status_t
|
||||
generic_syscall_init(void)
|
||||
{
|
||||
list_init(&sGenericSyscalls);
|
||||
if (mutex_init(&sGenericSyscallLock, "generic syscall") != B_OK) {
|
||||
panic("generic_syscall_init(): mutex init failed");
|
||||
return B_ERROR;
|
||||
}
|
||||
mutex_init(&sGenericSyscallLock, "generic syscall");
|
||||
|
||||
#if SYSCALL_TRACING
|
||||
add_debugger_command_etc("straced", &dump_syscall_tracing,
|
||||
|
@ -1145,8 +1145,8 @@ _dump_thread_info(struct thread *thread)
|
||||
kprintf("signal\n");
|
||||
break;
|
||||
|
||||
case THREAD_BLOCK_TYPE_CUTEX:
|
||||
kprintf("cutex %p\n", thread->wait.object);
|
||||
case THREAD_BLOCK_TYPE_MUTEX:
|
||||
kprintf("mutex %p\n", thread->wait.object);
|
||||
break;
|
||||
|
||||
case THREAD_BLOCK_TYPE_OTHER:
|
||||
@ -1320,8 +1320,8 @@ dump_thread_list(int argc, char **argv)
|
||||
kprintf("signal ");
|
||||
break;
|
||||
|
||||
case THREAD_BLOCK_TYPE_CUTEX:
|
||||
kprintf("cutex %p ", thread->wait.object);
|
||||
case THREAD_BLOCK_TYPE_MUTEX:
|
||||
kprintf("mutex %p ", thread->wait.object);
|
||||
break;
|
||||
|
||||
case THREAD_BLOCK_TYPE_OTHER:
|
||||
|
@ -950,10 +950,7 @@ cbuf_init(void)
|
||||
// add the debug command
|
||||
add_debugger_command("cbuf_freelist", &dbg_dump_cbuf_freelists, "Dumps the cbuf free lists");
|
||||
|
||||
if (mutex_init(&sFreeBufferListMutex, "cbuf_free_list") < B_OK) {
|
||||
panic("cbuf_init: error creating cbuf_free_list mutex\n");
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
mutex_init(&sFreeBufferListMutex, "cbuf_free_list");
|
||||
|
||||
// errors are fatal, that's why we don't clean up here
|
||||
|
||||
|
@ -1236,7 +1236,7 @@ map_backing_store(vm_address_space *addressSpace, vm_cache *cache,
|
||||
TRACE(("map_backing_store: aspace %p, cache %p, *vaddr %p, offset 0x%Lx, size %lu, addressSpec %ld, wiring %d, protection %d, _area %p, area_name '%s'\n",
|
||||
addressSpace, cache, *_virtualAddress, offset, size, addressSpec,
|
||||
wiring, protection, _area, areaName));
|
||||
ASSERT_LOCKED_CUTEX(&cache->lock);
|
||||
ASSERT_LOCKED_MUTEX(&cache->lock);
|
||||
|
||||
vm_area *area = create_area_struct(addressSpace, areaName, wiring,
|
||||
protection);
|
||||
@ -1267,7 +1267,7 @@ map_backing_store(vm_address_space *addressSpace, vm_cache *cache,
|
||||
goto err1;
|
||||
}
|
||||
|
||||
cutex_lock(&newCache->lock);
|
||||
mutex_lock(&newCache->lock);
|
||||
newCache->type = CACHE_TYPE_RAM;
|
||||
newCache->temporary = 1;
|
||||
newCache->scan_skip = cache->scan_skip;
|
||||
@ -1310,7 +1310,7 @@ map_backing_store(vm_address_space *addressSpace, vm_cache *cache,
|
||||
// point the cache back to the area
|
||||
vm_cache_insert_area_locked(cache, area);
|
||||
if (mapping == REGION_PRIVATE_MAP)
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
|
||||
// insert the area in the global area hash table
|
||||
acquire_sem_etc(sAreaHashLock, WRITE_COUNT, 0 ,0);
|
||||
@ -1328,10 +1328,10 @@ err2:
|
||||
// We created this cache, so we must delete it again. Note, that we
|
||||
// need to temporarily unlock the source cache or we'll otherwise
|
||||
// deadlock, since vm_cache_remove_consumer will try to lock it too.
|
||||
cutex_unlock(&cache->lock);
|
||||
cutex_unlock(&sourceCache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
mutex_unlock(&sourceCache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
cutex_lock(&sourceCache->lock);
|
||||
mutex_lock(&sourceCache->lock);
|
||||
}
|
||||
err1:
|
||||
free(area->name);
|
||||
@ -1525,13 +1525,13 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
|
||||
break;
|
||||
}
|
||||
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
status = map_backing_store(addressSpace, cache, address, 0, size,
|
||||
addressSpec, wiring, protection, REGION_NO_PRIVATE_MAP, &area, name,
|
||||
unmapAddressRange);
|
||||
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
|
||||
if (status < B_OK) {
|
||||
vm_cache_release_ref(cache);
|
||||
@ -1554,7 +1554,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
|
||||
vm_page_reserve_pages(reservePages);
|
||||
|
||||
// Allocate and map all pages for this area
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
off_t offset = 0;
|
||||
for (addr_t address = area->base; address < area->base + (area->size - 1);
|
||||
@ -1579,7 +1579,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
|
||||
vm_map_page(area, page, address, protection);
|
||||
}
|
||||
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_page_unreserve_pages(reservePages);
|
||||
break;
|
||||
}
|
||||
@ -1595,7 +1595,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
|
||||
if (!kernel_startup)
|
||||
panic("ALREADY_WIRED flag used outside kernel startup\n");
|
||||
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
map->ops->lock(map);
|
||||
|
||||
for (addr_t virtualAddress = area->base; virtualAddress < area->base
|
||||
@ -1622,7 +1622,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1638,7 +1638,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
|
||||
off_t offset = 0;
|
||||
|
||||
vm_page_reserve_pages(reservePages);
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
map->ops->lock(map);
|
||||
|
||||
for (virtualAddress = area->base; virtualAddress < area->base
|
||||
@ -1660,7 +1660,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_page_unreserve_pages(reservePages);
|
||||
break;
|
||||
}
|
||||
@ -1739,13 +1739,13 @@ vm_map_physical_memory(team_id team, const char *name, void **_address,
|
||||
cache->type = CACHE_TYPE_DEVICE;
|
||||
cache->virtual_size = size;
|
||||
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
status_t status = map_backing_store(locker.AddressSpace(), cache, _address,
|
||||
0, size, addressSpec & ~B_MTR_MASK, B_FULL_LOCK, protection,
|
||||
REGION_NO_PRIVATE_MAP, &area, name, false);
|
||||
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
|
||||
if (status < B_OK)
|
||||
vm_cache_release_ref(cache);
|
||||
@ -1821,13 +1821,13 @@ vm_create_null_area(team_id team, const char *name, void **address,
|
||||
cache->type = CACHE_TYPE_NULL;
|
||||
cache->virtual_size = size;
|
||||
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
status = map_backing_store(locker.AddressSpace(), cache, address, 0, size,
|
||||
addressSpec, 0, B_KERNEL_READ_AREA, REGION_NO_PRIVATE_MAP, &area, name,
|
||||
false);
|
||||
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
|
||||
if (status < B_OK) {
|
||||
vm_cache_release_ref(cache);
|
||||
@ -1929,14 +1929,14 @@ _vm_map_file(team_id team, const char *name, void **_address, uint32 addressSpec
|
||||
if (status < B_OK)
|
||||
return status;
|
||||
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
vm_area *area;
|
||||
status = map_backing_store(locker.AddressSpace(), cache, _address,
|
||||
offset, size, addressSpec, 0, protection, mapping, &area, name,
|
||||
addressSpec == B_EXACT_ADDRESS);
|
||||
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
|
||||
if (status < B_OK || mapping == REGION_PRIVATE_MAP) {
|
||||
// map_backing_store() cannot know we no longer need the ref
|
||||
@ -1971,14 +1971,14 @@ vm_area_get_locked_cache(vm_area *area)
|
||||
vm_cache_acquire_ref(cache);
|
||||
locker.Unlock();
|
||||
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
locker.Lock();
|
||||
if (cache == area->cache)
|
||||
return cache;
|
||||
|
||||
// the cache changed in the meantime
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
}
|
||||
}
|
||||
@ -1987,7 +1987,7 @@ vm_area_get_locked_cache(vm_area *area)
|
||||
void
|
||||
vm_area_put_locked_cache(vm_cache *cache)
|
||||
{
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
}
|
||||
|
||||
@ -2207,7 +2207,7 @@ vm_copy_on_write_area(vm_cache* lowerCache)
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
|
||||
cutex_lock(&upperCache->lock);
|
||||
mutex_lock(&upperCache->lock);
|
||||
|
||||
upperCache->type = CACHE_TYPE_RAM;
|
||||
upperCache->temporary = 1;
|
||||
@ -3852,10 +3852,10 @@ retry:
|
||||
|
||||
vm_cache_acquire_ref(source);
|
||||
|
||||
cutex_lock(&source->lock);
|
||||
mutex_lock(&source->lock);
|
||||
|
||||
if (source->busy) {
|
||||
cutex_unlock(&source->lock);
|
||||
mutex_unlock(&source->lock);
|
||||
vm_cache_release_ref(source);
|
||||
goto retry;
|
||||
}
|
||||
@ -3889,7 +3889,7 @@ fault_remove_dummy_page(vm_dummy_page &dummyPage, bool isLocked)
|
||||
{
|
||||
vm_cache *cache = dummyPage.cache;
|
||||
if (!isLocked)
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
if (dummyPage.state == PAGE_STATE_BUSY) {
|
||||
vm_cache_remove_page(cache, &dummyPage);
|
||||
@ -3898,7 +3898,7 @@ fault_remove_dummy_page(vm_dummy_page &dummyPage, bool isLocked)
|
||||
}
|
||||
|
||||
if (!isLocked)
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
|
||||
vm_cache_release_ref(cache);
|
||||
}
|
||||
@ -3923,7 +3923,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
|
||||
vm_page *page = NULL;
|
||||
|
||||
vm_cache_acquire_ref(cache);
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
// we release this later in the loop
|
||||
|
||||
while (cache != NULL) {
|
||||
@ -3947,9 +3947,9 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
|
||||
{
|
||||
ConditionVariableEntry entry;
|
||||
entry.Add(page);
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
entry.Wait();
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
}
|
||||
|
||||
if (cache->busy) {
|
||||
@ -3958,7 +3958,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
|
||||
// the top cache.
|
||||
ConditionVariableEntry entry;
|
||||
entry.Add(cache);
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
entry.Wait();
|
||||
*_restart = true;
|
||||
@ -3982,7 +3982,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
|
||||
ConditionVariable busyCondition;
|
||||
busyCondition.Publish(page, "page");
|
||||
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
|
||||
// get a virtual address for the page
|
||||
iovec vec;
|
||||
@ -3997,7 +3997,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
|
||||
|
||||
map->ops->put_physical_page((addr_t)vec.iov_base);
|
||||
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
if (status < B_OK) {
|
||||
// on error remove and free the page
|
||||
@ -4008,7 +4008,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
|
||||
vm_cache_remove_page(cache, page);
|
||||
vm_page_set_state(page, PAGE_STATE_FREE);
|
||||
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
return status;
|
||||
}
|
||||
@ -4031,16 +4031,16 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
|
||||
// the source cache is currently in the process of being merged
|
||||
// with his only consumer (cacheRef); since its pages are moved
|
||||
// upwards, too, we try this cache again
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
thread_yield(true);
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
if (cache->busy) {
|
||||
// The cache became busy, which means, it is about to be
|
||||
// removed by vm_cache_remove_consumer(). We start again with
|
||||
// the top cache.
|
||||
ConditionVariableEntry entry;
|
||||
entry.Add(cache);
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
entry.Wait();
|
||||
*_restart = true;
|
||||
@ -4051,7 +4051,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
|
||||
} else if (status < B_OK)
|
||||
nextCache = NULL;
|
||||
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
// at this point, we still hold a ref to this cache (through lastCacheRef)
|
||||
|
||||
cache = nextCache;
|
||||
@ -4066,7 +4066,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
|
||||
// Read-only pages come in the deepest cache - only the
|
||||
// top most cache may have direct write access.
|
||||
vm_cache_acquire_ref(cache);
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
if (cache->busy) {
|
||||
// The cache became busy, which means, it is about to be
|
||||
@ -4074,7 +4074,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
|
||||
// the top cache.
|
||||
ConditionVariableEntry entry;
|
||||
entry.Add(cache);
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
entry.Wait();
|
||||
*_restart = true;
|
||||
@ -4085,7 +4085,7 @@ fault_find_page(vm_translation_map *map, vm_cache *topCache,
|
||||
// for, but it could as well be a dummy page from someone
|
||||
// else or an otherwise busy page. We can't really handle
|
||||
// that here. Hence we completely restart this functions.
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
*_restart = true;
|
||||
}
|
||||
@ -4128,7 +4128,7 @@ fault_get_page(vm_translation_map *map, vm_cache *topCache, off_t cacheOffset,
|
||||
break;
|
||||
|
||||
// Remove the dummy page, if it has been inserted.
|
||||
cutex_lock(&topCache->lock);
|
||||
mutex_lock(&topCache->lock);
|
||||
|
||||
if (dummyPage.state == PAGE_STATE_BUSY) {
|
||||
ASSERT_PRINT(dummyPage.cache == topCache, "dummy page: %p\n",
|
||||
@ -4136,7 +4136,7 @@ fault_get_page(vm_translation_map *map, vm_cache *topCache, off_t cacheOffset,
|
||||
fault_remove_dummy_page(dummyPage, true);
|
||||
}
|
||||
|
||||
cutex_unlock(&topCache->lock);
|
||||
mutex_unlock(&topCache->lock);
|
||||
}
|
||||
|
||||
if (page == NULL) {
|
||||
@ -4175,9 +4175,9 @@ fault_get_page(vm_translation_map *map, vm_cache *topCache, off_t cacheOffset,
|
||||
// This is not the top cache into which we inserted the dummy page,
|
||||
// let's remove it from there. We need to temporarily unlock our
|
||||
// cache to comply with the cache locking policy.
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
fault_remove_dummy_page(dummyPage, false);
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4225,8 +4225,8 @@ if (cacheOffset == 0x12000)
|
||||
if (sourcePage->state != PAGE_STATE_MODIFIED)
|
||||
vm_page_set_state(sourcePage, PAGE_STATE_ACTIVE);
|
||||
|
||||
cutex_unlock(&cache->lock);
|
||||
cutex_lock(&topCache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
mutex_lock(&topCache->lock);
|
||||
|
||||
// Since the top cache has been unlocked for a while, someone else
|
||||
// (vm_cache_remove_consumer()) might have replaced our dummy page.
|
||||
@ -4244,9 +4244,9 @@ if (cacheOffset == 0x12000)
|
||||
// The page is busy, wait till it becomes unbusy.
|
||||
ConditionVariableEntry entry;
|
||||
entry.Add(newPage);
|
||||
cutex_unlock(&topCache->lock);
|
||||
mutex_unlock(&topCache->lock);
|
||||
entry.Wait();
|
||||
cutex_lock(&topCache->lock);
|
||||
mutex_lock(&topCache->lock);
|
||||
}
|
||||
|
||||
if (newPage) {
|
||||
@ -4358,7 +4358,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
}
|
||||
}
|
||||
|
||||
cutex_unlock(&topCache->lock);
|
||||
mutex_unlock(&topCache->lock);
|
||||
|
||||
// The top most cache has no fault handler, so let's see if the cache or its sources
|
||||
// already have the page we're searching for (we're going from top to bottom)
|
||||
@ -4408,7 +4408,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
|
||||
vm_map_page(area, page, address, newProtection);
|
||||
|
||||
cutex_unlock(&pageSource->lock);
|
||||
mutex_unlock(&pageSource->lock);
|
||||
vm_cache_release_ref(pageSource);
|
||||
}
|
||||
|
||||
|
@ -159,7 +159,7 @@ delete_cache(vm_cache* cache)
|
||||
if (cache->source)
|
||||
vm_cache_remove_consumer(cache->source, cache);
|
||||
|
||||
cutex_destroy(&cache->lock);
|
||||
mutex_destroy(&cache->lock);
|
||||
free(cache);
|
||||
}
|
||||
|
||||
@ -194,7 +194,7 @@ vm_cache_create(vm_store* store)
|
||||
if (cache == NULL)
|
||||
return NULL;
|
||||
|
||||
cutex_init(&cache->lock, "vm_cache");
|
||||
mutex_init(&cache->lock, "vm_cache");
|
||||
list_init_etc(&cache->consumers, offsetof(vm_cache, consumer_link));
|
||||
cache->page_list = NULL;
|
||||
cache->areas = NULL;
|
||||
@ -265,7 +265,7 @@ vm_cache_release_ref(vm_cache* cache)
|
||||
vm_cache* c;
|
||||
bool locked = false;
|
||||
if (cacheRef->lock.holder != find_thread(NULL)) {
|
||||
cutex_lock(&cacheRef->lock);
|
||||
mutex_lock(&cacheRef->lock);
|
||||
locked = true;
|
||||
}
|
||||
for (a = cacheRef->areas; a != NULL; a = a->cache_next)
|
||||
@ -278,7 +278,7 @@ vm_cache_release_ref(vm_cache* cache)
|
||||
if (cacheRef->ref_count < min)
|
||||
panic("cache_ref %p has too little ref_count!!!!", cacheRef);
|
||||
if (locked)
|
||||
cutex_unlock(&cacheRef->lock);
|
||||
mutex_unlock(&cacheRef->lock);
|
||||
}
|
||||
#endif
|
||||
return;
|
||||
@ -310,7 +310,7 @@ vm_cache_acquire_page_cache_ref(vm_page* page)
|
||||
vm_page*
|
||||
vm_cache_lookup_page(vm_cache* cache, off_t offset)
|
||||
{
|
||||
ASSERT_LOCKED_CUTEX(&cache->lock);
|
||||
ASSERT_LOCKED_MUTEX(&cache->lock);
|
||||
|
||||
struct page_lookup_key key;
|
||||
key.offset = (uint32)(offset >> PAGE_SHIFT);
|
||||
@ -336,7 +336,7 @@ vm_cache_insert_page(vm_cache* cache, vm_page* page, off_t offset)
|
||||
{
|
||||
TRACE(("vm_cache_insert_page: cache %p, page %p, offset %Ld\n",
|
||||
cache, page, offset));
|
||||
ASSERT_LOCKED_CUTEX(&cache->lock);
|
||||
ASSERT_LOCKED_MUTEX(&cache->lock);
|
||||
|
||||
if (page->cache != NULL) {
|
||||
panic("insert page %p into cache %p: page cache is set to %p\n",
|
||||
@ -383,7 +383,7 @@ void
|
||||
vm_cache_remove_page(vm_cache* cache, vm_page* page)
|
||||
{
|
||||
TRACE(("vm_cache_remove_page: cache %p, page %p\n", cache, page));
|
||||
ASSERT_LOCKED_CUTEX(&cache->lock);
|
||||
ASSERT_LOCKED_MUTEX(&cache->lock);
|
||||
|
||||
if (page->cache != cache) {
|
||||
panic("remove page %p from cache %p: page cache is set to %p\n", page,
|
||||
@ -421,9 +421,9 @@ vm_cache_write_modified(vm_cache* cache, bool fsReenter)
|
||||
if (cache->temporary)
|
||||
return B_OK;
|
||||
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
status_t status = vm_page_write_modified_pages(cache, fsReenter);
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -438,7 +438,7 @@ vm_cache_set_minimal_commitment_locked(vm_cache* cache, off_t commitment)
|
||||
{
|
||||
TRACE(("vm_cache_set_minimal_commitment_locked(cache %p, commitment %Ld)\n",
|
||||
cache, commitment));
|
||||
ASSERT_LOCKED_CUTEX(&cache->lock);
|
||||
ASSERT_LOCKED_MUTEX(&cache->lock);
|
||||
|
||||
vm_store* store = cache->store;
|
||||
status_t status = B_OK;
|
||||
@ -471,7 +471,7 @@ vm_cache_resize(vm_cache* cache, off_t newSize)
|
||||
{
|
||||
TRACE(("vm_cache_resize(cache %p, newSize %Ld) old size %Ld\n",
|
||||
cache, newSize, cache->virtual_size));
|
||||
ASSERT_LOCKED_CUTEX(&cache->lock);
|
||||
ASSERT_LOCKED_MUTEX(&cache->lock);
|
||||
|
||||
status_t status = cache->store->ops->commit(cache->store, newSize);
|
||||
if (status != B_OK)
|
||||
@ -502,9 +502,9 @@ vm_cache_resize(vm_cache* cache, off_t newSize)
|
||||
// wait for page to become unbusy
|
||||
ConditionVariableEntry entry;
|
||||
entry.Add(page);
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
entry.Wait();
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
// restart from the start of the list
|
||||
page = cache->page_list;
|
||||
@ -534,7 +534,7 @@ void
|
||||
vm_cache_remove_consumer(vm_cache* cache, vm_cache* consumer)
|
||||
{
|
||||
TRACE(("remove consumer vm cache %p from cache %p\n", consumer, cache));
|
||||
ASSERT_LOCKED_CUTEX(&consumer->lock);
|
||||
ASSERT_LOCKED_MUTEX(&consumer->lock);
|
||||
|
||||
// Remove the store ref before locking the cache. Otherwise we'd call into
|
||||
// the VFS while holding the cache lock, which would reverse the usual
|
||||
@ -543,7 +543,7 @@ vm_cache_remove_consumer(vm_cache* cache, vm_cache* consumer)
|
||||
cache->store->ops->release_ref(cache->store);
|
||||
|
||||
// remove the consumer from the cache, but keep its reference until later
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
list_remove_item(&cache->consumers, consumer);
|
||||
consumer->source = NULL;
|
||||
|
||||
@ -569,10 +569,10 @@ vm_cache_remove_consumer(vm_cache* cache, vm_cache* consumer)
|
||||
// need to unlock our cache now
|
||||
busyCondition.Publish(cache, "cache");
|
||||
cache->busy = true;
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
|
||||
cutex_lock(&consumer->lock);
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&consumer->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
if (cache->areas != NULL || cache->source == NULL
|
||||
|| list_is_empty(&cache->consumers)
|
||||
@ -583,7 +583,7 @@ vm_cache_remove_consumer(vm_cache* cache, vm_cache* consumer)
|
||||
merge = false;
|
||||
cache->busy = false;
|
||||
busyCondition.Unpublish();
|
||||
cutex_unlock(&consumer->lock);
|
||||
mutex_unlock(&consumer->lock);
|
||||
vm_cache_release_ref(consumer);
|
||||
}
|
||||
}
|
||||
@ -637,14 +637,14 @@ vm_cache_remove_consumer(vm_cache* cache, vm_cache* consumer)
|
||||
vm_cache* newSource = cache->source;
|
||||
|
||||
// The remaining consumer has gotten a new source
|
||||
cutex_lock(&newSource->lock);
|
||||
mutex_lock(&newSource->lock);
|
||||
|
||||
list_remove_item(&newSource->consumers, cache);
|
||||
list_add_item(&newSource->consumers, consumer);
|
||||
consumer->source = newSource;
|
||||
cache->source = NULL;
|
||||
|
||||
cutex_unlock(&newSource->lock);
|
||||
mutex_unlock(&newSource->lock);
|
||||
|
||||
// Release the other reference to the cache - we take over
|
||||
// its reference of its source cache; we can do this here
|
||||
@ -654,7 +654,7 @@ if (cache->ref_count < 2)
|
||||
panic("cacheRef %p ref count too low!\n", cache);
|
||||
vm_cache_release_ref(cache);
|
||||
|
||||
cutex_unlock(&consumer->lock);
|
||||
mutex_unlock(&consumer->lock);
|
||||
vm_cache_release_ref(consumer);
|
||||
}
|
||||
|
||||
@ -662,7 +662,7 @@ panic("cacheRef %p ref count too low!\n", cache);
|
||||
busyCondition.Unpublish();
|
||||
}
|
||||
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
}
|
||||
|
||||
@ -676,8 +676,8 @@ void
|
||||
vm_cache_add_consumer_locked(vm_cache* cache, vm_cache* consumer)
|
||||
{
|
||||
TRACE(("add consumer vm cache %p to cache %p\n", consumer, cache));
|
||||
ASSERT_LOCKED_CUTEX(&cache->lock);
|
||||
ASSERT_LOCKED_CUTEX(&consumer->lock);
|
||||
ASSERT_LOCKED_MUTEX(&cache->lock);
|
||||
ASSERT_LOCKED_MUTEX(&consumer->lock);
|
||||
|
||||
consumer->source = cache;
|
||||
list_add_item(&cache->consumers, consumer);
|
||||
@ -696,7 +696,7 @@ status_t
|
||||
vm_cache_insert_area_locked(vm_cache* cache, vm_area* area)
|
||||
{
|
||||
TRACE(("vm_cache_insert_area_locked(cache %p, area %p)\n", cache, area));
|
||||
ASSERT_LOCKED_CUTEX(&cache->lock);
|
||||
ASSERT_LOCKED_MUTEX(&cache->lock);
|
||||
|
||||
area->cache_next = cache->areas;
|
||||
if (area->cache_next)
|
||||
@ -716,7 +716,7 @@ vm_cache_remove_area(vm_cache* cache, vm_area* area)
|
||||
{
|
||||
TRACE(("vm_cache_remove_area(cache %p, area %p)\n", cache, area));
|
||||
|
||||
CutexLocker locker(cache->lock);
|
||||
MutexLocker locker(cache->lock);
|
||||
|
||||
if (area->cache_prev)
|
||||
area->cache_prev->cache_next = area->cache_next;
|
||||
|
@ -66,15 +66,15 @@ PageCacheLocker::Lock(vm_page* page, bool dontWait)
|
||||
return false;
|
||||
|
||||
if (dontWait) {
|
||||
if (cutex_trylock(&cache->lock) != B_OK) {
|
||||
if (mutex_trylock(&cache->lock) != B_OK) {
|
||||
vm_cache_release_ref(cache);
|
||||
return false;
|
||||
}
|
||||
} else
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
if (cache != page->cache || _IgnorePage(page)) {
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
return false;
|
||||
}
|
||||
@ -91,7 +91,7 @@ PageCacheLocker::Unlock()
|
||||
return;
|
||||
|
||||
vm_cache* cache = fPage->cache;
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
vm_cache_release_ref(cache);
|
||||
|
||||
fPage = NULL;
|
||||
|
@ -184,8 +184,7 @@ vm_low_memory_init(void)
|
||||
status_t
|
||||
vm_low_memory_init_post_thread(void)
|
||||
{
|
||||
if (mutex_init(&sLowMemoryMutex, "low memory") < B_OK)
|
||||
return B_ERROR;
|
||||
mutex_init(&sLowMemoryMutex, "low memory");
|
||||
|
||||
sLowMemoryWaitSem = create_sem(0, "low memory wait");
|
||||
if (sLowMemoryWaitSem < B_OK)
|
||||
|
@ -1043,7 +1043,7 @@ page_writer(void* /*unused*/)
|
||||
|
||||
for (uint32 i = 0; i < numPages; i++) {
|
||||
vm_cache *cache = u.pages[i]->cache;
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
if (writeStatus[i] == B_OK) {
|
||||
// put it into the active queue
|
||||
@ -1069,7 +1069,7 @@ page_writer(void* /*unused*/)
|
||||
busyConditions[i].Unpublish();
|
||||
|
||||
u.caches[i] = cache;
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
}
|
||||
|
||||
for (uint32 i = 0; i < numPages; i++) {
|
||||
@ -1156,7 +1156,7 @@ steal_page(vm_page *page, bool stealActive)
|
||||
{
|
||||
fCache = vm_cache_acquire_page_cache_ref(page);
|
||||
if (fCache != NULL) {
|
||||
if (cutex_trylock(&fCache->lock) != B_OK)
|
||||
if (mutex_trylock(&fCache->lock) != B_OK)
|
||||
return;
|
||||
|
||||
fOwnsLock = true;
|
||||
@ -1169,7 +1169,7 @@ steal_page(vm_page *page, bool stealActive)
|
||||
~PageCacheTryLocker()
|
||||
{
|
||||
if (fOwnsLock)
|
||||
cutex_unlock(&fCache->lock);
|
||||
mutex_unlock(&fCache->lock);
|
||||
if (fCache != NULL)
|
||||
vm_cache_release_ref(fCache);
|
||||
}
|
||||
@ -1345,9 +1345,9 @@ vm_page_write_modified_pages(vm_cache *cache, bool fsReenter)
|
||||
// clear the modified flag
|
||||
vm_clear_map_flags(page, PAGE_MODIFIED);
|
||||
|
||||
cutex_unlock(&cache->lock);
|
||||
mutex_unlock(&cache->lock);
|
||||
status_t status = write_page(page, fsReenter);
|
||||
cutex_lock(&cache->lock);
|
||||
mutex_lock(&cache->lock);
|
||||
|
||||
InterruptsSpinLocker locker(&sPageLock);
|
||||
|
||||
|
@ -657,7 +657,8 @@ driver_settings_init(kernel_args *args)
|
||||
status_t
|
||||
driver_settings_init_post_sem(kernel_args *args)
|
||||
{
|
||||
return mutex_init(&sLock, "driver settings");
|
||||
mutex_init(&sLock, "driver settings");
|
||||
return B_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user