haiku/src/system/kernel/cpu.c
Travis Geiselbrecht badc7b674e yet another fix for #1018, which has at this point blossomed into a reorg of how AP cpus are initialized.
the new cpuid stuff was apparently exacerbating an existing problem where various bits of low level
cpu code (specifically get_current_cpu) weren't really initialized before being used. Changed the
order to set up a fake set of threads to point each cpu at really early in boot to make sure that at
all points in code it can get the current 'thread' and thus the current cpu.
A probably better solution would be to have dr3 point to the current cpu which would then point to the 
current thread, but that has a race condition that would require an int disable, etc.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@20160 a95241bf-73f2-0310-859d-f6bbb57e9c96
2007-02-19 00:11:24 +00:00

149 lines
2.6 KiB
C

/*
* Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
/* This file contains the cpu functions (init, etc). */
#include <cpu.h>
#include <thread_types.h>
#include <arch/cpu.h>
#include <boot/kernel_args.h>
#include <string.h>
/* global per-cpu structure */
cpu_ent gCPU[MAX_BOOT_CPUS];
static spinlock sSetCpuLock;
status_t
cpu_init(kernel_args *args)
{
return arch_cpu_init(args);
}
status_t
cpu_init_percpu(kernel_args *args, int curr_cpu)
{
return arch_cpu_init_percpu(args, curr_cpu);
}
status_t
cpu_init_post_vm(kernel_args *args)
{
return arch_cpu_init_post_vm(args);
}
status_t
cpu_init_post_modules(kernel_args *args)
{
return arch_cpu_init_post_modules(args);
}
status_t
cpu_preboot_init_percpu(kernel_args *args, int curr_cpu)
{
// set the cpu number in the local cpu structure so that
// we can use it for get_current_cpu
memset(&gCPU[curr_cpu], 0, sizeof(gCPU[curr_cpu]));
gCPU[curr_cpu].cpu_num = curr_cpu;
return arch_cpu_preboot_init_percpu(args, curr_cpu);
}
bigtime_t
cpu_get_active_time(int32 cpu)
{
bigtime_t activeTime;
cpu_status state;
if (cpu < 0 || cpu > smp_get_num_cpus())
return 0;
// We need to grab the thread lock here, because the thread activity
// time is not maintained atomically (because there is no need to)
state = disable_interrupts();
GRAB_THREAD_LOCK();
activeTime = gCPU[cpu].active_time;
RELEASE_THREAD_LOCK();
restore_interrupts(state);
return activeTime;
}
void
clear_caches(void *address, size_t length, uint32 flags)
{
// ToDo: implement me!
}
// #pragma mark -
void
_user_clear_caches(void *address, size_t length, uint32 flags)
{
clear_caches(address, length, flags);
}
bool
_user_cpu_enabled(int32 cpu)
{
if (cpu < 0 || cpu >= smp_get_num_cpus())
return B_BAD_VALUE;
return !gCPU[cpu].disabled;
}
status_t
_user_set_cpu_enabled(int32 cpu, bool enabled)
{
status_t status = B_OK;
cpu_status state;
int32 i, count;
if (cpu < 0 || cpu >= smp_get_num_cpus())
return B_BAD_VALUE;
// We need to lock here to make sure that no one can disable
// the last CPU
state = disable_interrupts();
acquire_spinlock(&sSetCpuLock);
if (!enabled) {
// check if this is the last CPU to be disabled
for (i = 0, count = 0; i < smp_get_num_cpus(); i++) {
if (!gCPU[i].disabled)
count++;
}
if (count == 1)
status = B_NOT_ALLOWED;
}
if (status == B_OK)
gCPU[cpu].disabled = !enabled;
release_spinlock(&sSetCpuLock);
restore_interrupts(state);
return status;
}