* mutex_lock() and recursive_lock_lock() now return a status_t and report failure.

* recursive_lock_unlock() now returns a void to mirror it's counterpart better;
  use recursive_lock_get_recursion() if you're interested in the lock depth.
* switch_sem(), and release_sem() now don't do anything anymore in kernel startup
  mode.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@20099 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2007-02-07 14:07:31 +00:00
parent d5d570384c
commit aa547f5fbb
5 changed files with 70 additions and 64 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2004, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -52,13 +52,13 @@ extern "C" {
extern status_t recursive_lock_init(recursive_lock *lock, const char *name);
extern void recursive_lock_destroy(recursive_lock *lock);
extern bool recursive_lock_lock(recursive_lock *lock);
extern bool recursive_lock_unlock(recursive_lock *lock);
extern int recursive_lock_get_recursion(recursive_lock *lock);
extern status_t recursive_lock_lock(recursive_lock *lock);
extern void recursive_lock_unlock(recursive_lock *lock);
extern int32 recursive_lock_get_recursion(recursive_lock *lock);
extern status_t mutex_init(mutex *m, const char *name);
extern void mutex_destroy(mutex *m);
extern void mutex_lock(mutex *m);
extern status_t mutex_lock(mutex *m);
extern void mutex_unlock(mutex *m);
extern status_t benaphore_init(benaphore *ben, const char *name);

View File

@ -1,13 +1,14 @@
/*
* Copyright 2005, Ingo Weinhold, bonefish@users.sf.net. All rights reserved.
* Copyright 2005-2007, Ingo Weinhold, bonefish@users.sf.net. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_UTIL_AUTO_LOCKER_H
#define KERNEL_UTIL_AUTO_LOCKER_H
#include <lock.h>
namespace BPrivate {
// AutoLockerStandardLocking
@ -165,8 +166,7 @@ class MutexLocking {
public:
inline bool Lock(mutex *lockable)
{
mutex_lock(lockable);
return true;
return mutex_lock(lockable) == B_OK;
}
inline void Unlock(mutex *lockable)
@ -183,8 +183,7 @@ class RecursiveLockLocking {
public:
inline bool Lock(recursive_lock *lockable)
{
recursive_lock_lock(lockable);
return true;
return recursive_lock_lock(lockable) == B_OK;
}
inline void Unlock(recursive_lock *lockable)

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -188,13 +188,14 @@ lock_tmap(vm_translation_map *map)
{
TRACE(("lock_tmap: map %p\n", map));
if (recursive_lock_lock(&map->lock) == true) {
recursive_lock_lock(&map->lock);
if (recursive_lock_get_recursion(&map->lock) == 1) {
// we were the first one to grab the lock
TRACE(("clearing invalidated page count\n"));
map->arch_data->num_invalidate_pages = 0;
}
return 0;
return B_OK;
}
@ -209,7 +210,7 @@ unlock_tmap(vm_translation_map *map)
}
recursive_lock_unlock(&map->lock);
return 0;
return B_OK;
}
@ -393,8 +394,8 @@ unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
{
page_table_entry *pt;
page_directory_entry *pd = map->arch_data->pgdir_virt;
status_t status;
int index;
int err;
start = ROUNDOWN(start, B_PAGE_SIZE);
end = ROUNDUP(end, B_PAGE_SIZE);
@ -403,7 +404,7 @@ unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
restart:
if (start >= end)
return 0;
return B_OK;
index = VADDR_TO_PDENT(start);
if (pd[index].present == 0) {
@ -413,10 +414,12 @@ restart:
}
do {
err = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr), (addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);
} while (err < 0);
status = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr),
(addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);
} while (status < B_OK);
for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end); index++, start += B_PAGE_SIZE) {
for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
index++, start += B_PAGE_SIZE) {
if (pt[index].present == 0) {
// page mapping not valid
continue;
@ -474,7 +477,7 @@ query_tmap_interrupt(vm_translation_map *map, addr_t va, addr_t *_physical)
index = VADDR_TO_PTENT(va);
*_physical = ADDR_REVERSE_SHIFT(pt[index].addr);
return 0;
return B_OK;
}
@ -517,7 +520,7 @@ query_tmap(vm_translation_map *map, addr_t va, addr_t *_physical, uint32 *_flags
TRACE(("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va));
return 0;
return B_OK;
}
@ -533,8 +536,8 @@ protect_tmap(vm_translation_map *map, addr_t start, addr_t end, uint32 attribute
{
page_table_entry *pt;
page_directory_entry *pd = map->arch_data->pgdir_virt;
status_t status;
int index;
int err;
start = ROUNDOWN(start, B_PAGE_SIZE);
end = ROUNDUP(end, B_PAGE_SIZE);
@ -543,7 +546,7 @@ protect_tmap(vm_translation_map *map, addr_t start, addr_t end, uint32 attribute
restart:
if (start >= end)
return 0;
return B_OK;
index = VADDR_TO_PDENT(start);
if (pd[index].present == 0) {
@ -553,9 +556,9 @@ restart:
}
do {
err = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr),
status = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr),
(addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);
} while (err < 0);
} while (status < B_OK);
for (index = VADDR_TO_PTENT(start); index < 1024 && start < end; index++, start += B_PAGE_SIZE) {
if (pt[index].present == 0) {
@ -588,19 +591,20 @@ clear_flags_tmap(vm_translation_map *map, addr_t va, uint32 flags)
{
page_table_entry *pt;
page_directory_entry *pd = map->arch_data->pgdir_virt;
status_t status;
int index;
int err;
int tlb_flush = false;
index = VADDR_TO_PDENT(va);
if (pd[index].present == 0) {
// no pagetable here
return B_NO_ERROR;
return B_OK;
}
do {
err = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr), (addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);
} while (err < 0);
status = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr),
(addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);
} while (status < B_OK);
index = VADDR_TO_PTENT(va);
// clear out the flags we've been requested to clear

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -18,12 +18,10 @@
#include <thread.h>
int
int32
recursive_lock_get_recursion(recursive_lock *lock)
{
thread_id thid = thread_get_current_thread_id();
if (lock->holder == thid)
if (lock->holder == thread_get_current_thread_id())
return lock->recursion;
return -1;
@ -61,41 +59,36 @@ recursive_lock_destroy(recursive_lock *lock)
}
bool
status_t
recursive_lock_lock(recursive_lock *lock)
{
thread_id thid = thread_get_current_thread_id();
bool retval = false;
thread_id thread = thread_get_current_thread_id();
if (!kernel_startup && !are_interrupts_enabled())
panic("recursive_lock_lock: called with interrupts disabled for lock %p, sem %#lx\n", lock, lock->sem);
if (thid != lock->holder) {
acquire_sem(lock->sem);
if (thread != lock->holder) {
status_t status = acquire_sem(lock->sem);
if (status < B_OK)
return status;
lock->holder = thid;
retval = true;
lock->holder = thread;
}
lock->recursion++;
return retval;
return B_OK;
}
bool
void
recursive_lock_unlock(recursive_lock *lock)
{
thread_id thid = thread_get_current_thread_id();
bool retval = false;
if (thid != lock->holder)
if (thread_get_current_thread_id() != lock->holder)
panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
if (--lock->recursion == 0) {
lock->holder = -1;
release_sem(lock->sem);
retval = true;
}
return retval;
}
@ -135,22 +128,24 @@ mutex_destroy(mutex *mutex)
}
void
status_t
mutex_lock(mutex *mutex)
{
thread_id me = thread_get_current_thread_id();
status_t status;
if (!kernel_startup && !are_interrupts_enabled())
panic("mutex_lock: called with interrupts disabled for mutex %p, sem %#lx\n", mutex, mutex->sem);
if (kernel_startup)
return B_OK;
status = acquire_sem(mutex->sem);
if (status < B_OK)
return status;
// ToDo: if acquire_sem() fails, we shouldn't panic - but we should definitely
// change the mutex API to actually return the status code
if (acquire_sem(mutex->sem) == B_OK) {
if (me == mutex->holder)
panic("mutex_lock failure: mutex %p (sem = 0x%lx) acquired twice by thread 0x%lx\n", mutex, mutex->sem, me);
}
mutex->holder = me;
return B_OK;
}
@ -159,9 +154,13 @@ mutex_unlock(mutex *mutex)
{
thread_id me = thread_get_current_thread_id();
if (me != mutex->holder)
if (kernel_startup)
return;
if (me != mutex->holder) {
panic("mutex_unlock failure: thread 0x%lx is trying to release mutex %p (current holder 0x%lx)\n",
me, mutex, mutex->holder);
}
mutex->holder = -1;
release_sem(mutex->sem);

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
@ -521,10 +521,12 @@ switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
int state;
status_t status = B_OK;
if (kernel_startup)
return B_OK;
if (sSemsActive == false)
return B_NO_MORE_SEMS;
if (!kernel_startup && !are_interrupts_enabled())
if (!are_interrupts_enabled())
panic("acquire_sem_etc: called with interrupts disabled for sem %#lx\n", id);
if (id < 0)
@ -691,6 +693,8 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
cpu_status state;
status_t status = B_OK;
if (kernel_startup)
return B_OK;
if (sSemsActive == false)
return B_NO_MORE_SEMS;
if (id < 0)