* Renamed kernel_startup to gKernelStartup.

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@26731 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2008-08-02 15:03:03 +00:00
parent 15374c5dbd
commit 272457553e
7 changed files with 26 additions and 26 deletions

View File

@ -47,7 +47,7 @@
#define CLEAR_BIT(a, b) ((a) & (~(1 << (b))))
/* during kernel startup, interrupts are disabled (among other things) */
extern bool kernel_startup;
extern bool gKernelStartup;
#ifdef __cplusplus

View File

@ -260,7 +260,7 @@ class Free : public AbstractTraceEntry {
} // namespace KernelHeapTracing
# define T(x) if (!kernel_startup) new(std::nothrow) KernelHeapTracing::x;
# define T(x) if (!gKernelStartup) new(std::nothrow) KernelHeapTracing::x;
#else
# define T(x) ;
#endif
@ -1211,8 +1211,8 @@ heap_raw_alloc(heap_allocator *heap, size_t size, uint32 binIndex)
heap_leak_check_info *info = (heap_leak_check_info *)((addr_t)address
+ bin->element_size - sizeof(heap_leak_check_info));
info->size = size - sizeof(heap_leak_check_info);
info->thread = (kernel_startup ? 0 : thread_get_current_thread_id());
info->team = (kernel_startup ? 0 : team_get_current_team_id());
info->thread = (gKernelStartup ? 0 : thread_get_current_thread_id());
info->team = (gKernelStartup ? 0 : team_get_current_team_id());
info->caller = get_caller();
#endif
return address;
@ -1231,8 +1231,8 @@ heap_raw_alloc(heap_allocator *heap, size_t size, uint32 binIndex)
+ (firstPage->index + pageCount) * heap->page_size
- sizeof(heap_leak_check_info));
info->size = size - sizeof(heap_leak_check_info);
info->thread = (kernel_startup ? 0 : thread_get_current_thread_id());
info->team = (kernel_startup ? 0 : team_get_current_team_id());
info->thread = (gKernelStartup ? 0 : thread_get_current_thread_id());
info->team = (gKernelStartup ? 0 : team_get_current_team_id());
info->caller = get_caller();
#endif
return (void *)(firstPage->area->base + firstPage->index * heap->page_size);
@ -1759,12 +1759,12 @@ heap_init_post_thread()
void *
memalign(size_t alignment, size_t size)
{
if (!kernel_startup && !are_interrupts_enabled()) {
if (!gKernelStartup && !are_interrupts_enabled()) {
panic("memalign(): called with interrupts disabled\n");
return NULL;
}
if (!kernel_startup && size > HEAP_AREA_USE_THRESHOLD) {
if (!gKernelStartup && size > HEAP_AREA_USE_THRESHOLD) {
// don't even attempt such a huge allocation - use areas instead
size_t areaSize = size + sizeof(area_allocation_info);
if (alignment != 0)
@ -1875,7 +1875,7 @@ malloc(size_t size)
void
free(void *address)
{
if (!kernel_startup && !are_interrupts_enabled()) {
if (!gKernelStartup && !are_interrupts_enabled()) {
panic("free(): called with interrupts disabled\n");
return;
}
@ -1917,7 +1917,7 @@ free(void *address)
void *
realloc(void *address, size_t newSize)
{
if (!kernel_startup && !are_interrupts_enabled()) {
if (!gKernelStartup && !are_interrupts_enabled()) {
panic("realloc(): called with interrupts disabled\n");
return NULL;
}

View File

@ -92,7 +92,7 @@ recursive_lock_lock(recursive_lock *lock)
{
thread_id thread = thread_get_current_thread_id();
if (!kernel_startup && !are_interrupts_enabled()) {
if (!gKernelStartup && !are_interrupts_enabled()) {
panic("recursive_lock_lock: called with interrupts disabled for lock "
"%p (\"%s\")\n", lock, lock->lock.name);
}
@ -114,7 +114,7 @@ recursive_lock_trylock(recursive_lock *lock)
{
thread_id thread = thread_get_current_thread_id();
if (!kernel_startup && !are_interrupts_enabled())
if (!gKernelStartup && !are_interrupts_enabled())
panic("recursive_lock_lock: called with interrupts disabled for lock "
"%p (\"%s\")\n", lock, lock->lock.name);
@ -500,7 +500,7 @@ status_t
_mutex_lock(mutex* lock, bool threadsLocked)
{
#ifdef KDEBUG
if (!kernel_startup && !threadsLocked && !are_interrupts_enabled()) {
if (!gKernelStartup && !threadsLocked && !are_interrupts_enabled()) {
panic("_mutex_lock(): called with interrupts disabled for lock %p",
lock);
}

View File

@ -61,7 +61,7 @@
# define TRACE(x...) ;
#endif
bool kernel_startup = true;
bool gKernelStartup = true;
static kernel_args sKernelArgs;
static uint32 sCpuRendezvous;
@ -191,7 +191,7 @@ _start(kernel_args *bootKernelArgs, int currentCPU)
// exit the kernel startup phase (mutexes, etc work from now on out)
TRACE("exiting kernel startup\n");
kernel_startup = false;
gKernelStartup = false;
smp_cpu_rendezvous(&sCpuRendezvous2, 0);
// release the AP cpus to go enter the scheduler

View File

@ -711,7 +711,7 @@ switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
int state;
status_t status = B_OK;
if (kernel_startup)
if (gKernelStartup)
return B_OK;
if (sSemsActive == false)
return B_NO_MORE_SEMS;
@ -869,7 +869,7 @@ release_sem_etc(sem_id id, int32 count, uint32 flags)
{
int32 slot = id % sMaxSems;
if (kernel_startup)
if (gKernelStartup)
return B_OK;
if (sSemsActive == false)
return B_NO_MORE_SEMS;

View File

@ -1744,7 +1744,7 @@ vm_create_anonymous_area(team_id team, const char *name, void **address,
vm_translation_map *map = &addressSpace->translation_map;
off_t offset = 0;
if (!kernel_startup)
if (!gKernelStartup)
panic("ALREADY_WIRED flag used outside kernel startup\n");
cache->Lock();
@ -2918,16 +2918,16 @@ display_mem(int argc, char **argv)
address = ROUNDOWN(address, B_PAGE_SIZE);
kernel_startup = true;
gKernelStartup = true;
// vm_get_physical_page() needs to lock...
if (vm_get_physical_page(address, &copyAddress, PHYSICAL_PAGE_NO_WAIT) != B_OK) {
kprintf("getting the hardware page failed.");
kernel_startup = false;
gKernelStartup = false;
return 0;
}
kernel_startup = false;
gKernelStartup = false;
address += offset;
copyAddress += offset;
} else
@ -3015,9 +3015,9 @@ display_mem(int argc, char **argv)
if (physical) {
copyAddress = ROUNDOWN(copyAddress, B_PAGE_SIZE);
kernel_startup = true;
gKernelStartup = true;
vm_put_physical_page(copyAddress);
kernel_startup = false;
gKernelStartup = false;
}
return 0;
}

View File

@ -715,7 +715,7 @@ load_driver_settings(const char *driverName)
}
// we are allowed to call the driver settings pretty early in the boot process
if (kernel_startup) {
if (gKernelStartup) {
mutex_unlock(&sLock);
return NULL;
}