* Initialize all static mutexes in the kernel through a MUTEX_INITIALIZER()
and remove the then unneeded mutex_init() for them. * Remove the workaround for allowing uninitialized mutexes on kernel startup. As they are all initialized statically through the MUTEX_INITIALIZER() now this is not needed anymore. * An uninitialized mutex will now cause a panic when used to find possibly remaining cases. * Remove now unnecessary driver_settings_init_post_sem() function. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@25812 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
2b31f85c9e
commit
f5b3a6a796
@ -16,7 +16,6 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
status_t driver_settings_init(struct kernel_args *args);
|
||||
status_t driver_settings_init_post_sem(struct kernel_args *args);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ static paddr_chunk_desc **virtual_pmappings; // will be one ptr per virtual chun
|
||||
static int first_free_vmapping;
|
||||
static int num_virtual_chunks;
|
||||
static queue mapped_paddr_lru;
|
||||
static mutex sMutex;
|
||||
static mutex sMutex = MUTEX_INITIALIZER("iospace_mutex");
|
||||
static sem_id sChunkAvailableSem;
|
||||
static int32 sChunkAvailableWaitingCounter;
|
||||
|
||||
@ -282,7 +282,6 @@ generic_vm_physical_page_mapper_init(kernel_args *args,
|
||||
memset(virtual_pmappings, 0, sizeof(paddr_chunk_desc *) * num_virtual_chunks);
|
||||
first_free_vmapping = 0;
|
||||
queue_init(&mapped_paddr_lru);
|
||||
mutex_init(&sMutex, "iospace_mutex");
|
||||
sChunkAvailableSem = -1;
|
||||
|
||||
TRACE(("generic_vm_physical_page_mapper_init: done\n"));
|
||||
|
@ -396,6 +396,8 @@ frame_buffer_console_init(kernel_args *args)
|
||||
if (!args->frame_buffer.enabled)
|
||||
return B_OK;
|
||||
|
||||
mutex_init(&sConsole.lock, "console_lock");
|
||||
|
||||
void *frameBuffer;
|
||||
sConsole.area = map_physical_memory("vesa_fb",
|
||||
(void *)args->frame_buffer.physical_buffer.start,
|
||||
@ -443,8 +445,6 @@ frame_buffer_console_init(kernel_args *args)
|
||||
status_t
|
||||
frame_buffer_console_init_post_modules(kernel_args *args)
|
||||
{
|
||||
mutex_init(&sConsole.lock, "console_lock");
|
||||
|
||||
if (sConsole.frame_buffer == 0)
|
||||
return B_OK;
|
||||
|
||||
|
@ -67,7 +67,7 @@ typedef DoublyLinkedList<id_generator> GeneratorList;
|
||||
|
||||
|
||||
GeneratorList sGenerators;
|
||||
static mutex sLock;
|
||||
static mutex sLock = MUTEX_INITIALIZER("id generator");
|
||||
|
||||
|
||||
/*! Create new generator.
|
||||
@ -169,7 +169,6 @@ void
|
||||
dm_init_id_generator(void)
|
||||
{
|
||||
new(&sGenerators) GeneratorList;
|
||||
mutex_init(&sLock, "id generator");
|
||||
}
|
||||
|
||||
|
||||
|
@ -59,9 +59,11 @@ typedef struct elf_linked_image {
|
||||
static hash_table *sImagesHash;
|
||||
|
||||
static struct elf_image_info *sKernelImage = NULL;
|
||||
static mutex sImageMutex; // guards sImagesHash
|
||||
static mutex sImageLoadMutex; // serializes loading/unloading add-ons
|
||||
// locking order sImageLoadMutex -> sImageMutex
|
||||
static mutex sImageMutex = MUTEX_INITIALIZER("kimages_lock");
|
||||
// guards sImagesHash
|
||||
static mutex sImageLoadMutex = MUTEX_INITIALIZER("kimages_load_lock");
|
||||
// serializes loading/unloading add-ons locking order
|
||||
// sImageLoadMutex -> sImageMutex
|
||||
static bool sInitialized = false;
|
||||
|
||||
|
||||
@ -1785,9 +1787,6 @@ elf_init(kernel_args *args)
|
||||
|
||||
image_init();
|
||||
|
||||
mutex_init(&sImageMutex, "kimages_lock");
|
||||
mutex_init(&sImageLoadMutex, "kimages_load_lock");
|
||||
|
||||
sImagesHash = hash_init(IMAGE_HASH_SIZE, 0, image_compare, image_hash);
|
||||
if (sImagesHash == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
@ -169,7 +169,7 @@ struct advisory_locking {
|
||||
}
|
||||
};
|
||||
|
||||
static mutex sFileSystemsMutex;
|
||||
static mutex sFileSystemsMutex = MUTEX_INITIALIZER("vfs_lock");
|
||||
|
||||
/*! \brief Guards sMountsTable.
|
||||
|
||||
@ -177,7 +177,7 @@ static mutex sFileSystemsMutex;
|
||||
Manipulation of the fs_mount structures themselves
|
||||
(and their destruction) requires different locks though.
|
||||
*/
|
||||
static mutex sMountMutex;
|
||||
static mutex sMountMutex = MUTEX_INITIALIZER("vfs_mount_lock");
|
||||
|
||||
/*! \brief Guards mount/unmount operations.
|
||||
|
||||
@ -201,7 +201,8 @@ static recursive_lock sMountOpLock;
|
||||
|
||||
The thread trying to lock the must not hold sVnodeMutex.
|
||||
*/
|
||||
static mutex sVnodeCoveredByMutex;
|
||||
static mutex sVnodeCoveredByMutex
|
||||
= MUTEX_INITIALIZER("vfs_vnode_covered_by_lock");
|
||||
|
||||
/*! \brief Guards sVnodeTable.
|
||||
|
||||
@ -215,7 +216,7 @@ static mutex sVnodeCoveredByMutex;
|
||||
You must not have this mutex held when calling create_sem(), as this
|
||||
might call vfs_free_unused_vnodes().
|
||||
*/
|
||||
static mutex sVnodeMutex;
|
||||
static mutex sVnodeMutex = MUTEX_INITIALIZER("vfs_vnode_lock");
|
||||
|
||||
/*! \brief Guards io_context::root.
|
||||
|
||||
@ -223,7 +224,7 @@ static mutex sVnodeMutex;
|
||||
The only operation allowed while holding this lock besides getting or
|
||||
setting the field is inc_vnode_ref_count() on io_context::root.
|
||||
*/
|
||||
static mutex sIOContextRootLock;
|
||||
static mutex sIOContextRootLock = MUTEX_INITIALIZER("io_context::root lock");
|
||||
|
||||
#define VNODE_HASH_TABLE_SIZE 1024
|
||||
static hash_table *sVnodeTable;
|
||||
@ -4414,13 +4415,7 @@ vfs_init(kernel_args *args)
|
||||
|
||||
sRoot = NULL;
|
||||
|
||||
mutex_init(&sFileSystemsMutex, "vfs_lock");
|
||||
|
||||
recursive_lock_init(&sMountOpLock, "vfs_mount_op_lock");
|
||||
mutex_init(&sMountMutex, "vfs_mount_lock");
|
||||
mutex_init(&sVnodeCoveredByMutex, "vfs_vnode_covered_by_lock");
|
||||
mutex_init(&sVnodeMutex, "vfs_vnode_lock");
|
||||
mutex_init(&sIOContextRootLock, "io_context::root lock");
|
||||
|
||||
if (block_cache_init() != B_OK)
|
||||
return B_ERROR;
|
||||
|
@ -38,7 +38,7 @@ struct image {
|
||||
|
||||
|
||||
static image_id sNextImageID = 1;
|
||||
static mutex sImageMutex;
|
||||
static mutex sImageMutex = MUTEX_INITIALIZER("image");
|
||||
|
||||
|
||||
/*! Registers an image with the specified team.
|
||||
@ -274,7 +274,6 @@ image_init(void)
|
||||
add_debugger_command("team_images", &dump_images_list, "Dump all registered images from the current team");
|
||||
#endif
|
||||
|
||||
mutex_init(&sImageMutex, "image");
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ struct daemon : DoublyLinkedListLinkImpl<struct daemon> {
|
||||
|
||||
typedef DoublyLinkedList<struct daemon> DaemonList;
|
||||
|
||||
static mutex sDaemonMutex;
|
||||
static mutex sDaemonMutex = MUTEX_INITIALIZER("kernel daemon");
|
||||
static DaemonList sDaemons;
|
||||
|
||||
|
||||
@ -130,12 +130,9 @@ register_kernel_daemon(daemon_hook function, void* arg, int frequency)
|
||||
extern "C" status_t
|
||||
kernel_daemon_init(void)
|
||||
{
|
||||
thread_id thread;
|
||||
|
||||
mutex_init(&sDaemonMutex, "kernel daemon");
|
||||
new(&sDaemons) DaemonList;
|
||||
|
||||
thread = spawn_kernel_thread(&kernel_daemon, "kernel daemon",
|
||||
thread_id thread = spawn_kernel_thread(&kernel_daemon, "kernel daemon",
|
||||
B_LOW_PRIORITY, NULL);
|
||||
send_signal_etc(thread, SIGCONT, B_DO_NOT_RESCHEDULE);
|
||||
|
||||
|
@ -400,15 +400,14 @@ _mutex_lock(mutex* lock, bool threadsLocked)
|
||||
// Might have been released after we decremented the count, but before
|
||||
// we acquired the spinlock.
|
||||
#ifdef KDEBUG
|
||||
if (!kernel_startup && lock->holder == thread_get_current_thread_id()) {
|
||||
panic("_mutex_lock(): double lock of %p by thread %ld", lock,
|
||||
lock->holder);
|
||||
}
|
||||
|
||||
if (lock->holder <= 0) {
|
||||
if (lock->holder < 0) {
|
||||
lock->holder = thread_get_current_thread_id();
|
||||
return B_OK;
|
||||
}
|
||||
} else if (lock->holder == thread_get_current_thread_id()) {
|
||||
panic("_mutex_lock(): double lock of %p by thread %ld", lock,
|
||||
lock->holder);
|
||||
} else if (lock->holder == 0)
|
||||
panic("_mutex_lock(): using unitialized lock %p", lock);
|
||||
#else
|
||||
if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
|
||||
lock->flags &= ~MUTEX_FLAG_RELEASED;
|
||||
|
@ -156,8 +156,6 @@ _start(kernel_args *bootKernelArgs, int currentCPU)
|
||||
// now we can create and use semaphores
|
||||
TRACE("init VM semaphores\n");
|
||||
vm_init_post_sem(&sKernelArgs);
|
||||
TRACE("init driver_settings\n");
|
||||
driver_settings_init_post_sem(&sKernelArgs);
|
||||
TRACE("init generic syscall\n");
|
||||
generic_syscall_init();
|
||||
smp_init_post_generic_syscalls();
|
||||
|
@ -164,7 +164,7 @@ struct depot_cpu_store {
|
||||
|
||||
|
||||
static ObjectCacheList sObjectCaches;
|
||||
static mutex sObjectCacheListLock;
|
||||
static mutex sObjectCacheListLock = MUTEX_INITIALIZER("object cache list");
|
||||
|
||||
static uint8 *sInitialBegin, *sInitialLimit, *sInitialPointer;
|
||||
static kernel_args *sKernelArgs;
|
||||
@ -1454,8 +1454,6 @@ slab_init(kernel_args *args, addr_t initialBase, size_t initialSize)
|
||||
void
|
||||
slab_init_post_sem()
|
||||
{
|
||||
mutex_init(&sObjectCacheListLock, "object cache list");
|
||||
|
||||
ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
|
||||
|
||||
while (it.HasNext()) {
|
||||
|
@ -55,7 +55,7 @@ struct generic_syscall {
|
||||
generic_syscall *previous;
|
||||
};
|
||||
|
||||
static struct mutex sGenericSyscallLock;
|
||||
static mutex sGenericSyscallLock = MUTEX_INITIALIZER("generic syscall");
|
||||
static struct list sGenericSyscalls;
|
||||
|
||||
|
||||
@ -212,7 +212,6 @@ status_t
|
||||
generic_syscall_init(void)
|
||||
{
|
||||
list_init(&sGenericSyscalls);
|
||||
mutex_init(&sGenericSyscallLock, "generic syscall");
|
||||
|
||||
#if SYSCALL_TRACING
|
||||
add_debugger_command_etc("straced", &dump_syscall_tracing,
|
||||
|
@ -44,7 +44,7 @@ struct cbuf {
|
||||
#define CBUF_BITMAP_SIZE (CBUF_REGION_SIZE / CBUF_LENGTH)
|
||||
|
||||
static cbuf *sFreeBufferList;
|
||||
static mutex sFreeBufferListMutex;
|
||||
static mutex sFreeBufferListMutex = MUTEX_INITIALIZER("cbuf_free_list");
|
||||
static cbuf *sFreeBufferNoBlockList;
|
||||
static spinlock sNoBlockSpinlock;
|
||||
|
||||
@ -950,8 +950,6 @@ cbuf_init(void)
|
||||
// add the debug command
|
||||
add_debugger_command("cbuf_freelist", &dbg_dump_cbuf_freelists, "Dumps the cbuf free lists");
|
||||
|
||||
mutex_init(&sFreeBufferListMutex, "cbuf_free_list");
|
||||
|
||||
// errors are fatal, that's why we don't clean up here
|
||||
|
||||
sBufferArea = create_area("cbuf region", (void **)&sBuffer, B_ANY_KERNEL_ADDRESS,
|
||||
|
@ -194,11 +194,11 @@ public:
|
||||
static area_id sNextAreaID;
|
||||
static hash_table *sAreaHash;
|
||||
static sem_id sAreaHashLock;
|
||||
static mutex sMappingLock;
|
||||
static mutex sAreaCacheLock;
|
||||
static mutex sMappingLock = MUTEX_INITIALIZER("page mappings");
|
||||
static mutex sAreaCacheLock = MUTEX_INITIALIZER("area->cache");
|
||||
|
||||
static off_t sAvailableMemory;
|
||||
static mutex sAvailableMemoryLock;
|
||||
static mutex sAvailableMemoryLock = MUTEX_INITIALIZER("available memory lock");
|
||||
|
||||
#if DEBUG_CACHE_LIST
|
||||
|
||||
@ -3955,13 +3955,10 @@ vm_init_post_sem(kernel_args *args)
|
||||
// since we're still single threaded and only the kernel address space exists,
|
||||
// it isn't that hard to find all of the ones we need to create
|
||||
|
||||
mutex_init(&sAvailableMemoryLock, "available memory lock");
|
||||
arch_vm_translation_map_init_post_sem(args);
|
||||
vm_address_space_init_post_sem();
|
||||
|
||||
sAreaHashLock = create_sem(WRITE_COUNT, "area hash");
|
||||
mutex_init(&sAreaCacheLock, "area->cache");
|
||||
mutex_init(&sMappingLock, "page mappings");
|
||||
|
||||
slab_init_post_sem();
|
||||
return heap_init_post_sem();
|
||||
|
@ -50,7 +50,7 @@ static const size_t kCriticalLimit = 32;
|
||||
static int32 sLowMemoryState = B_NO_LOW_MEMORY;
|
||||
static bigtime_t sLastMeasurement;
|
||||
|
||||
static mutex sLowMemoryMutex;
|
||||
static mutex sLowMemoryMutex = MUTEX_INITIALIZER("low memory");
|
||||
static sem_id sLowMemoryWaitSem;
|
||||
static HandlerList sLowMemoryHandlers;
|
||||
|
||||
@ -184,8 +184,6 @@ vm_low_memory_init(void)
|
||||
status_t
|
||||
vm_low_memory_init_post_thread(void)
|
||||
{
|
||||
mutex_init(&sLowMemoryMutex, "low memory");
|
||||
|
||||
sLowMemoryWaitSem = create_sem(0, "low memory wait");
|
||||
if (sLowMemoryWaitSem < B_OK)
|
||||
return sLowMemoryWaitSem;
|
||||
|
@ -88,7 +88,7 @@ enum assignment_mode {
|
||||
|
||||
#ifdef _KERNEL_MODE
|
||||
static struct list sHandles;
|
||||
static mutex sLock;
|
||||
static mutex sLock = MUTEX_INITIALIZER("driver settings");
|
||||
#endif
|
||||
|
||||
|
||||
@ -652,14 +652,6 @@ driver_settings_init(kernel_args *args)
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
driver_settings_init_post_sem(kernel_args *args)
|
||||
{
|
||||
mutex_init(&sLock, "driver settings");
|
||||
return B_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user