Changed the boot procedure a bit.
Extracted scheduler_init() from start_scheduler() (which is now called scheduler_start()). Moved scheduler related function prototypes from thread.h to the new scheduler.h. Cleanup. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@14518 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
82f3114509
commit
6cd505cee7
@ -11,8 +11,7 @@
|
||||
|
||||
#include <smp.h>
|
||||
#include <timer.h>
|
||||
|
||||
struct kernel_args;
|
||||
#include <boot/kernel_args.h>
|
||||
|
||||
|
||||
/* CPU local data structure */
|
||||
|
27
headers/private/kernel/kscheduler.h
Normal file
27
headers/private/kernel/kscheduler.h
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
* Copyright 2005, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef KERNEL_SCHEDULER_H
|
||||
#define KERNEL_SCHEDULER_H
|
||||
|
||||
|
||||
struct thread;
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void scheduler_enqueue_in_run_queue(struct thread *thread);
|
||||
void scheduler_remove_from_run_queue(struct thread *thread);
|
||||
void scheduler_reschedule(void);
|
||||
|
||||
void scheduler_init(void);
|
||||
void scheduler_start(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* KERNEL_SCHEDULER_H */
|
@ -1,7 +1,10 @@
|
||||
/*
|
||||
** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
** Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
* Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
#ifndef KERNEL_SMP_H
|
||||
#define KERNEL_SMP_H
|
||||
|
||||
@ -26,20 +29,28 @@ enum {
|
||||
SMP_MSG_FLAG_SYNC,
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
status_t smp_init(struct kernel_args *args);
|
||||
status_t smp_per_cpu_init(struct kernel_args *args, int32 cpu);
|
||||
int smp_trap_non_boot_cpus(struct kernel_args *ka, int cpu);
|
||||
void smp_wake_up_all_non_boot_cpus(void);
|
||||
void smp_wait_for_ap_cpus(struct kernel_args *ka);
|
||||
void smp_send_ici(int target_cpu, int message, unsigned long data, unsigned long data2, unsigned long data3, void *data_ptr, int flags);
|
||||
void smp_send_broadcast_ici(int message, unsigned long data, unsigned long data2, unsigned long data3, void *data_ptr, int flags);
|
||||
int smp_enable_ici(void);
|
||||
int smp_disable_ici(void);
|
||||
bool smp_trap_non_boot_cpus(int32 cpu);
|
||||
void smp_wake_up_non_boot_cpus(void);
|
||||
void smp_wait_for_non_boot_cpus(void);
|
||||
void smp_send_ici(int32 targetCPU, int32 message, uint32 data, uint32 data2, uint32 data3,
|
||||
void *data_ptr, uint32 flags);
|
||||
void smp_send_broadcast_ici(int32 message, uint32 data, uint32 data2, uint32 data3,
|
||||
void *data_ptr, uint32 flags);
|
||||
|
||||
int smp_get_num_cpus(void);
|
||||
void smp_set_num_cpus(int num_cpus);
|
||||
int smp_get_current_cpu(void);
|
||||
int32 smp_get_num_cpus(void);
|
||||
void smp_set_num_cpus(int32 numCPUs);
|
||||
int32 smp_get_current_cpu(void);
|
||||
|
||||
int smp_intercpu_int_handler(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* KERNEL_SMP_H */
|
||||
|
@ -20,17 +20,11 @@ struct kernel_args;
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void scheduler_reschedule(void);
|
||||
void start_scheduler(void);
|
||||
|
||||
void thread_enqueue(struct thread *t, struct thread_queue *q);
|
||||
struct thread *thread_lookat_queue(struct thread_queue *q);
|
||||
struct thread *thread_dequeue(struct thread_queue *q);
|
||||
struct thread *thread_dequeue_id(struct thread_queue *q, thread_id thr_id);
|
||||
|
||||
void scheduler_enqueue_in_run_queue(struct thread *thread);
|
||||
void scheduler_remove_from_run_queue(struct thread *thread);
|
||||
|
||||
void thread_at_kernel_entry(void);
|
||||
// called when the thread enters the kernel on behalf of the thread
|
||||
void thread_at_kernel_exit(void);
|
||||
|
@ -6,12 +6,13 @@
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
#include <vm.h>
|
||||
#include <int.h>
|
||||
#include <kscheduler.h>
|
||||
#include <ksyscalls.h>
|
||||
#include <smp.h>
|
||||
#include <team.h>
|
||||
#include <thread.h>
|
||||
#include <vm.h>
|
||||
#include <vm_priv.h>
|
||||
|
||||
#include <arch/cpu.h>
|
||||
|
@ -98,6 +98,61 @@ apic_write(uint32 offset, uint32 data)
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
setup_apic(kernel_args *args, int32 cpu)
|
||||
{
|
||||
uint32 config;
|
||||
|
||||
TRACE(("setting up the apic..."));
|
||||
|
||||
/* set spurious interrupt vector to 0xff */
|
||||
config = apic_read(APIC_SIVR) & 0xffffff00;
|
||||
config |= APIC_ENABLE | 0xff;
|
||||
apic_write(APIC_SIVR, config);
|
||||
|
||||
// don't touch the LINT0/1 configuration in virtual wire mode
|
||||
// ToDo: implement support for other modes...
|
||||
#if 0
|
||||
if (cpu == 0) {
|
||||
/* setup LINT0 as ExtINT */
|
||||
config = (apic_read(APIC_LINT0) & 0xffff00ff);
|
||||
config |= APIC_LVT_DM_ExtINT | APIC_LVT_IIPP | APIC_LVT_TM;
|
||||
apic_write(APIC_LINT0, config);
|
||||
|
||||
/* setup LINT1 as NMI */
|
||||
config = (apic_read(APIC_LINT1) & 0xffff00ff);
|
||||
config |= APIC_LVT_DM_NMI | APIC_LVT_IIPP;
|
||||
apic_write(APIC_LINT1, config);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* setup timer */
|
||||
config = apic_read(APIC_LVTT) & ~APIC_LVTT_MASK;
|
||||
config |= 0xfb | APIC_LVTT_M; // vector 0xfb, timer masked
|
||||
apic_write(APIC_LVTT, config);
|
||||
|
||||
apic_write(APIC_ICRT, 0); // zero out the clock
|
||||
|
||||
config = apic_read(APIC_TDCR) & ~0x0000000f;
|
||||
config |= APIC_TDCR_1; // clock division by 1
|
||||
apic_write(APIC_TDCR, config);
|
||||
|
||||
/* setup error vector to 0xfe */
|
||||
config = (apic_read(APIC_LVT3) & 0xffffff00) | 0xfe;
|
||||
apic_write(APIC_LVT3, config);
|
||||
|
||||
/* accept all interrupts */
|
||||
config = apic_read(APIC_TPRI) & 0xffffff00;
|
||||
apic_write(APIC_TPRI, config);
|
||||
|
||||
config = apic_read(APIC_SIVR);
|
||||
apic_write(APIC_EOI, 0);
|
||||
|
||||
TRACE((" done\n"));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
arch_smp_init(kernel_args *args)
|
||||
{
|
||||
@ -131,62 +186,12 @@ arch_smp_init(kernel_args *args)
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
smp_setup_apic(kernel_args *args)
|
||||
{
|
||||
uint32 config;
|
||||
|
||||
TRACE(("setting up the apic..."));
|
||||
|
||||
/* set spurious interrupt vector to 0xff */
|
||||
config = apic_read(APIC_SIVR) & 0xfffffc00;
|
||||
config |= APIC_ENABLE | 0xff;
|
||||
apic_write(APIC_SIVR, config);
|
||||
#if 0
|
||||
/* setup LINT0 as ExtINT */
|
||||
config = (apic_read(APIC_LINT0) & 0xffff1c00);
|
||||
config |= APIC_LVT_DM_ExtINT | APIC_LVT_IIPP | APIC_LVT_TM;
|
||||
apic_write(APIC_LINT0, config);
|
||||
|
||||
/* setup LINT1 as NMI */
|
||||
config = (apic_read(APIC_LINT1) & 0xffff1c00);
|
||||
config |= APIC_LVT_DM_NMI | APIC_LVT_IIPP;
|
||||
apic_write(APIC_LINT1, config);
|
||||
#endif
|
||||
|
||||
/* setup timer */
|
||||
config = apic_read(APIC_LVTT) & ~APIC_LVTT_MASK;
|
||||
config |= 0xfb | APIC_LVTT_M; // vector 0xfb, timer masked
|
||||
apic_write(APIC_LVTT, config);
|
||||
|
||||
apic_write(APIC_ICRT, 0); // zero out the clock
|
||||
|
||||
config = apic_read(APIC_TDCR) & ~0x0000000f;
|
||||
config |= APIC_TDCR_1; // clock division by 1
|
||||
apic_write(APIC_TDCR, config);
|
||||
|
||||
/* setup error vector to 0xfe */
|
||||
config = (apic_read(APIC_LVT3) & 0xffffff00) | 0xfe;
|
||||
apic_write(APIC_LVT3, config);
|
||||
|
||||
/* accept all interrupts */
|
||||
config = apic_read(APIC_TPRI) & 0xffffff00;
|
||||
apic_write(APIC_TPRI, config);
|
||||
|
||||
config = apic_read(APIC_SIVR);
|
||||
apic_write(APIC_EOI, 0);
|
||||
|
||||
TRACE((" done\n"));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
arch_smp_per_cpu_init(kernel_args *args, int32 cpu)
|
||||
{
|
||||
// set up the local apic on the current cpu
|
||||
TRACE(("arch_smp_init_percpu: setting up the apic on cpu %ld\n", cpu));
|
||||
smp_setup_apic(args);
|
||||
setup_apic(args, cpu);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
@ -219,6 +224,10 @@ arch_smp_send_ici(int32 target_cpu)
|
||||
apic_write(APIC_ICR1, config | 0xfd | APIC_ICR1_DELIVERY_MODE_FIXED
|
||||
| APIC_ICR1_DEST_MODE_PHYSICAL | APIC_ICR1_DEST_FIELD);
|
||||
|
||||
// wait for message to be sent
|
||||
while ((apic_read(APIC_ICR1) & APIC_ICR1_DELIVERY_STATUS) != 0)
|
||||
;
|
||||
|
||||
restore_interrupts(state);
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <debugger.h>
|
||||
#include <kernel.h>
|
||||
#include <KernelExport.h>
|
||||
#include <kscheduler.h>
|
||||
#include <ksignal.h>
|
||||
#include <ksyscalls.h>
|
||||
#include <sem.h>
|
||||
|
@ -7,15 +7,17 @@
|
||||
|
||||
|
||||
#include <KernelExport.h>
|
||||
|
||||
#include <kernel.h>
|
||||
#include <kimage.h>
|
||||
#include <kscheduler.h>
|
||||
#include <lock.h>
|
||||
#include <team.h>
|
||||
#include <thread.h>
|
||||
#include <thread_types.h>
|
||||
#include <user_debugger.h>
|
||||
|
||||
#include <malloc.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
|
||||
|
@ -11,29 +11,30 @@
|
||||
|
||||
#include <OS.h>
|
||||
|
||||
#include <boot/kernel_args.h>
|
||||
#include <debug.h>
|
||||
#include <ksyscalls.h>
|
||||
#include <vm.h>
|
||||
#include <timer.h>
|
||||
#include <smp.h>
|
||||
#include <sem.h>
|
||||
#include <port.h>
|
||||
#include <vfs.h>
|
||||
#include <cbuf.h>
|
||||
#include <elf.h>
|
||||
#include <cpu.h>
|
||||
#include <kdriver_settings.h>
|
||||
#include <boot_item.h>
|
||||
#include <kmodule.h>
|
||||
#include <cbuf.h>
|
||||
#include <cpu.h>
|
||||
#include <debug.h>
|
||||
#include <elf.h>
|
||||
#include <int.h>
|
||||
#include <team.h>
|
||||
#include <system_info.h>
|
||||
#include <kdevice_manager.h>
|
||||
#include <real_time_clock.h>
|
||||
#include <kdriver_settings.h>
|
||||
#include <kernel_daemon.h>
|
||||
#include <kmodule.h>
|
||||
#include <kscheduler.h>
|
||||
#include <ksyscalls.h>
|
||||
#include <messaging.h>
|
||||
#include <port.h>
|
||||
#include <real_time_clock.h>
|
||||
#include <sem.h>
|
||||
#include <smp.h>
|
||||
#include <system_info.h>
|
||||
#include <team.h>
|
||||
#include <timer.h>
|
||||
#include <user_debugger.h>
|
||||
#include <vfs.h>
|
||||
#include <vm.h>
|
||||
#include <boot/kernel_args.h>
|
||||
|
||||
#include <string.h>
|
||||
|
||||
@ -47,21 +48,19 @@
|
||||
|
||||
bool kernel_startup;
|
||||
|
||||
static kernel_args ka;
|
||||
static kernel_args sKernelArgs;
|
||||
|
||||
static int32 main2(void *);
|
||||
int _start(kernel_args *oldka, int cpu); /* keep compiler happy */
|
||||
int _start(kernel_args *bootKernelArgs, int cpu); /* keep compiler happy */
|
||||
|
||||
|
||||
int
|
||||
_start(kernel_args *oldka, int cpu_num)
|
||||
_start(kernel_args *bootKernelArgs, int currentCPU)
|
||||
{
|
||||
thread_id thread = -1;
|
||||
|
||||
kernel_startup = true;
|
||||
|
||||
if (oldka->kernel_args_size != sizeof(kernel_args)
|
||||
|| oldka->version != CURRENT_KERNEL_ARGS_VERSION) {
|
||||
if (bootKernelArgs->kernel_args_size != sizeof(kernel_args)
|
||||
|| bootKernelArgs->version != CURRENT_KERNEL_ARGS_VERSION) {
|
||||
// This is something we cannot handle right now - release kernels
|
||||
// should always be able to handle the kernel_args of earlier
|
||||
// released kernels.
|
||||
@ -69,100 +68,104 @@ _start(kernel_args *oldka, int cpu_num)
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy(&ka, oldka, sizeof(kernel_args));
|
||||
memcpy(&sKernelArgs, bootKernelArgs, sizeof(kernel_args));
|
||||
// the passed in kernel args are in a non-allocated range of memory
|
||||
|
||||
smp_set_num_cpus(ka.num_cpus);
|
||||
smp_set_num_cpus(sKernelArgs.num_cpus);
|
||||
|
||||
// do any pre-booting cpu config
|
||||
cpu_preboot_init(&ka);
|
||||
cpu_preboot_init(&sKernelArgs);
|
||||
|
||||
// if we're not a boot cpu, spin here until someone wakes us up
|
||||
if (smp_trap_non_boot_cpus(&ka, cpu_num) == B_NO_ERROR) {
|
||||
// we're the boot processor, so wait for all of the APs to enter the kernel
|
||||
smp_wait_for_ap_cpus(&ka);
|
||||
if (smp_trap_non_boot_cpus(currentCPU)) {
|
||||
thread_id thread;
|
||||
|
||||
// setup debug output
|
||||
debug_init(&ka);
|
||||
debug_init(&sKernelArgs);
|
||||
set_dprintf_enabled(true);
|
||||
dprintf("Welcome to kernel debugger output!\n");
|
||||
|
||||
// we're the boot processor, so wait for all of the APs to enter the kernel
|
||||
smp_wait_for_non_boot_cpus();
|
||||
|
||||
// init modules
|
||||
TRACE(("init CPU\n"));
|
||||
cpu_init(&ka);
|
||||
cpu_init(&sKernelArgs);
|
||||
TRACE(("init interrupts\n"));
|
||||
int_init(&ka);
|
||||
int_init(&sKernelArgs);
|
||||
|
||||
TRACE(("init VM\n"));
|
||||
vm_init(&ka);
|
||||
vm_init(&sKernelArgs);
|
||||
// Before vm_init_post_sem() is called, we have to make sure that
|
||||
// the boot loader allocated region is not used anymore
|
||||
|
||||
// now we can use the heap and create areas
|
||||
TRACE(("init driver_settings\n"));
|
||||
boot_item_init();
|
||||
driver_settings_init(&ka);
|
||||
debug_init_post_vm(&ka);
|
||||
int_init_post_vm(&ka);
|
||||
cpu_init_post_vm(&ka);
|
||||
driver_settings_init(&sKernelArgs);
|
||||
debug_init_post_vm(&sKernelArgs);
|
||||
int_init_post_vm(&sKernelArgs);
|
||||
cpu_init_post_vm(&sKernelArgs);
|
||||
TRACE(("init system info\n"));
|
||||
system_info_init(&ka);
|
||||
system_info_init(&sKernelArgs);
|
||||
|
||||
TRACE(("init SMP\n"));
|
||||
smp_init(&ka);
|
||||
smp_init(&sKernelArgs);
|
||||
TRACE(("init timer\n"));
|
||||
timer_init(&ka);
|
||||
timer_init(&sKernelArgs);
|
||||
TRACE(("init real time clock\n"));
|
||||
rtc_init(&ka);
|
||||
rtc_init(&sKernelArgs);
|
||||
|
||||
TRACE(("init semaphores\n"));
|
||||
sem_init(&ka);
|
||||
sem_init(&sKernelArgs);
|
||||
|
||||
// now we can create and use semaphores
|
||||
TRACE(("init VM semaphores\n"));
|
||||
vm_init_post_sem(&ka);
|
||||
vm_init_post_sem(&sKernelArgs);
|
||||
TRACE(("init driver_settings\n"));
|
||||
driver_settings_init_post_sem(&ka);
|
||||
driver_settings_init_post_sem(&sKernelArgs);
|
||||
TRACE(("init generic syscall\n"));
|
||||
generic_syscall_init();
|
||||
TRACE(("init cbuf\n"));
|
||||
cbuf_init();
|
||||
TRACE(("init VFS\n"));
|
||||
vfs_init(&ka);
|
||||
vfs_init(&sKernelArgs);
|
||||
TRACE(("init teams\n"));
|
||||
team_init(&ka);
|
||||
team_init(&sKernelArgs);
|
||||
TRACE(("init threads\n"));
|
||||
thread_init(&ka);
|
||||
thread_init(&sKernelArgs);
|
||||
TRACE(("init ports\n"));
|
||||
port_init(&ka);
|
||||
port_init(&sKernelArgs);
|
||||
TRACE(("init kernel daemons\n"));
|
||||
kernel_daemon_init();
|
||||
|
||||
TRACE(("init VM threads\n"));
|
||||
vm_init_post_thread(&ka);
|
||||
vm_init_post_thread(&sKernelArgs);
|
||||
TRACE(("init ELF loader\n"));
|
||||
elf_init(&ka);
|
||||
elf_init(&sKernelArgs);
|
||||
TRACE(("init scheduler\n"));
|
||||
scheduler_init();
|
||||
|
||||
// start a thread to finish initializing the rest of the system
|
||||
thread = spawn_kernel_thread(&main2, "main2", B_NORMAL_PRIORITY, NULL);
|
||||
|
||||
smp_wake_up_all_non_boot_cpus();
|
||||
smp_enable_ici(); // ici's were previously being ignored
|
||||
start_scheduler();
|
||||
smp_wake_up_non_boot_cpus();
|
||||
|
||||
TRACE(("enable interrupts, exit kernel startup\n"));
|
||||
kernel_startup = false;
|
||||
enable_interrupts();
|
||||
|
||||
scheduler_start();
|
||||
resume_thread(thread);
|
||||
} else {
|
||||
// this is run per cpu for each AP processor after they've been set loose
|
||||
smp_per_cpu_init(&ka, cpu_num);
|
||||
thread_per_cpu_init(cpu_num);
|
||||
// this is run for each non boot processor after they've been set loose
|
||||
smp_per_cpu_init(&sKernelArgs, currentCPU);
|
||||
thread_per_cpu_init(currentCPU);
|
||||
|
||||
enable_interrupts();
|
||||
}
|
||||
|
||||
TRACE(("enable interrupts, exit kernel startup\n"));
|
||||
kernel_startup = false;
|
||||
enable_interrupts();
|
||||
|
||||
if (thread >= B_OK)
|
||||
resume_thread(thread);
|
||||
|
||||
TRACE(("main: done... begin idle loop on cpu %d\n", cpu_num));
|
||||
TRACE(("main: done... begin idle loop on cpu %d\n", currentCPU));
|
||||
for (;;)
|
||||
arch_cpu_idle();
|
||||
|
||||
@ -178,7 +181,7 @@ main2(void *unused)
|
||||
TRACE(("start of main2: initializing devices\n"));
|
||||
|
||||
TRACE(("Init modules\n"));
|
||||
module_init(&ka);
|
||||
module_init(&sKernelArgs);
|
||||
|
||||
// ToDo: the preloaded image debug data is placed in the kernel args, and
|
||||
// thus, if they are enabled, the kernel args shouldn't be freed, so
|
||||
@ -188,7 +191,7 @@ main2(void *unused)
|
||||
// module_init() is supposed to be the last user of the kernel args
|
||||
// Note: don't confuse the kernel_args structure (which is never freed)
|
||||
// with the kernel args ranges it contains (and which are freed here).
|
||||
vm_free_kernel_args(&ka);
|
||||
vm_free_kernel_args(&sKernelArgs);
|
||||
}
|
||||
|
||||
// init userland debugging
|
||||
@ -204,15 +207,13 @@ main2(void *unused)
|
||||
vfs_bootstrap_file_systems();
|
||||
|
||||
TRACE(("Init Device Manager\n"));
|
||||
device_manager_init(&ka);
|
||||
device_manager_init(&sKernelArgs);
|
||||
|
||||
// ToDo: device manager starts here, bus_init()/dev_init() won't be necessary anymore,
|
||||
// but instead, the hardware and drivers are rescanned then.
|
||||
|
||||
TRACE(("Mount boot file system\n"));
|
||||
vfs_mount_boot_file_system(&ka);
|
||||
|
||||
//net_init_postdev(&ka);
|
||||
vfs_mount_boot_file_system(&sKernelArgs);
|
||||
|
||||
//module_test();
|
||||
#if 0
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
#include <OS.h>
|
||||
|
||||
#include <kscheduler.h>
|
||||
#include <thread.h>
|
||||
#include <timer.h>
|
||||
#include <int.h>
|
||||
@ -84,7 +85,8 @@ scheduler_enqueue_in_run_queue(struct thread *thread)
|
||||
if (thread->priority < B_MIN_PRIORITY)
|
||||
thread->priority = B_MIN_PRIORITY;
|
||||
|
||||
for (curr = sRunQueue.head, prev = NULL; curr && (curr->priority >= thread->priority); curr = curr->queue_next) {
|
||||
for (curr = sRunQueue.head, prev = NULL; curr && curr->priority >= thread->priority;
|
||||
curr = curr->queue_next) {
|
||||
if (prev)
|
||||
prev = prev->queue_next;
|
||||
else
|
||||
@ -166,7 +168,7 @@ scheduler_reschedule(void)
|
||||
switch (oldThread->next_state) {
|
||||
case B_THREAD_RUNNING:
|
||||
case B_THREAD_READY:
|
||||
TRACE(("enqueueing thread 0x%lx into run q. pri = %d\n", oldThread->id, oldThread->priority));
|
||||
TRACE(("enqueueing thread 0x%lx into run q. pri = %ld\n", oldThread->id, oldThread->priority));
|
||||
scheduler_enqueue_in_run_queue(oldThread);
|
||||
break;
|
||||
case B_THREAD_SUSPENDED:
|
||||
@ -178,7 +180,7 @@ scheduler_reschedule(void)
|
||||
thread_enqueue(oldThread, &dead_q);
|
||||
break;
|
||||
default:
|
||||
TRACE(("not enqueueing thread 0x%lx into run q. next_state = %d\n", oldThread->id, oldThread->next_state));
|
||||
TRACE(("not enqueueing thread 0x%lx into run q. next_state = %ld\n", oldThread->id, oldThread->next_state));
|
||||
break;
|
||||
}
|
||||
oldThread->state = oldThread->next_state;
|
||||
@ -186,13 +188,13 @@ scheduler_reschedule(void)
|
||||
// select next thread from the run queue
|
||||
nextThread = sRunQueue.head;
|
||||
prevThread = NULL;
|
||||
while (nextThread && (nextThread->priority > B_IDLE_PRIORITY)) {
|
||||
while (nextThread && nextThread->priority > B_IDLE_PRIORITY) {
|
||||
// always extract real time threads
|
||||
if (nextThread->priority >= B_FIRST_REAL_TIME_PRIORITY)
|
||||
break;
|
||||
|
||||
// never skip last non-idle normal thread
|
||||
if (nextThread->queue_next && (nextThread->queue_next->priority == B_IDLE_PRIORITY))
|
||||
if (nextThread->queue_next && nextThread->queue_next->priority == B_IDLE_PRIORITY)
|
||||
break;
|
||||
|
||||
// skip normal threads sometimes
|
||||
@ -217,13 +219,13 @@ scheduler_reschedule(void)
|
||||
|
||||
if (nextThread != oldThread || oldThread->cpu->info.preempted) {
|
||||
bigtime_t quantum = 3000; // ToDo: calculate quantum!
|
||||
timer *quantum_timer= &oldThread->cpu->info.quantum_timer;
|
||||
timer *quantumTimer = &oldThread->cpu->info.quantum_timer;
|
||||
|
||||
if (!oldThread->cpu->info.preempted)
|
||||
_local_timer_cancel_event(oldThread->cpu->info.cpu_num, quantum_timer);
|
||||
_local_timer_cancel_event(oldThread->cpu->info.cpu_num, quantumTimer);
|
||||
|
||||
oldThread->cpu->info.preempted = 0;
|
||||
add_timer(quantum_timer, &reschedule_event, quantum, B_ONE_SHOT_RELATIVE_TIMER);
|
||||
add_timer(quantumTimer, &reschedule_event, quantum, B_ONE_SHOT_RELATIVE_TIMER);
|
||||
|
||||
if (nextThread != oldThread)
|
||||
context_switch(oldThread, nextThread);
|
||||
@ -231,34 +233,27 @@ scheduler_reschedule(void)
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
scheduler_init(void)
|
||||
{
|
||||
add_debugger_command("run_queue", &dump_run_queue, "list threads in run queue");
|
||||
}
|
||||
|
||||
|
||||
/** This starts the scheduler. Must be run under the context of
|
||||
* the initial idle thread.
|
||||
*/
|
||||
|
||||
void
|
||||
start_scheduler(void)
|
||||
scheduler_start(void)
|
||||
{
|
||||
cpu_status state;
|
||||
|
||||
// ToDo: may not be the best place for this
|
||||
// invalidate all of the other processors' TLB caches
|
||||
state = disable_interrupts();
|
||||
arch_cpu_global_TLB_invalidate();
|
||||
smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVL_PAGE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
|
||||
restore_interrupts(state);
|
||||
|
||||
// start the other processors
|
||||
smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_ASYNC);
|
||||
|
||||
state = disable_interrupts();
|
||||
cpu_status state = disable_interrupts();
|
||||
GRAB_THREAD_LOCK();
|
||||
|
||||
scheduler_reschedule();
|
||||
|
||||
RELEASE_THREAD_LOCK();
|
||||
restore_interrupts(state);
|
||||
|
||||
add_debugger_command("run_queue", &dump_run_queue, "list threads in run queue");
|
||||
}
|
||||
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
|
||||
#include <sem.h>
|
||||
#include <kernel.h>
|
||||
#include <kscheduler.h>
|
||||
#include <ksignal.h>
|
||||
#include <smp.h>
|
||||
#include <int.h>
|
||||
|
@ -11,12 +11,13 @@
|
||||
#include <KernelExport.h>
|
||||
|
||||
#include <debug.h>
|
||||
#include <thread.h>
|
||||
#include <team.h>
|
||||
#include <sem.h>
|
||||
#include <ksignal.h>
|
||||
#include <user_debugger.h>
|
||||
#include <kernel.h>
|
||||
#include <kscheduler.h>
|
||||
#include <ksignal.h>
|
||||
#include <sem.h>
|
||||
#include <team.h>
|
||||
#include <thread.h>
|
||||
#include <user_debugger.h>
|
||||
|
||||
#include <stddef.h>
|
||||
#include <string.h>
|
||||
|
@ -21,9 +21,9 @@
|
||||
#include <string.h>
|
||||
|
||||
#define DEBUG_SPINLOCKS 1
|
||||
#define TRACE_SMP 0
|
||||
//#define TRACE_SMP
|
||||
|
||||
#if TRACE_SMP
|
||||
#ifdef TRACE_SMP
|
||||
# define TRACE(x) dprintf x
|
||||
#else
|
||||
# define TRACE(x) ;
|
||||
@ -38,17 +38,16 @@
|
||||
#define MSG_POOL_SIZE (SMP_MAX_CPUS * 4)
|
||||
|
||||
struct smp_msg {
|
||||
struct smp_msg *next;
|
||||
int message;
|
||||
unsigned long data;
|
||||
unsigned long data2;
|
||||
unsigned long data3;
|
||||
void *data_ptr;
|
||||
int flags;
|
||||
int32 ref_count;
|
||||
volatile bool done;
|
||||
unsigned int proc_bitmap;
|
||||
int lock;
|
||||
struct smp_msg *next;
|
||||
int32 message;
|
||||
uint32 data;
|
||||
uint32 data2;
|
||||
uint32 data3;
|
||||
void *data_ptr;
|
||||
uint32 flags;
|
||||
int32 ref_count;
|
||||
volatile bool done;
|
||||
uint32 proc_bitmap;
|
||||
};
|
||||
|
||||
#define MAILBOX_LOCAL 1
|
||||
@ -66,13 +65,13 @@ static spinlock cpu_msg_spinlock[SMP_MAX_CPUS] = { 0, };
|
||||
static struct smp_msg *smp_broadcast_msgs = NULL;
|
||||
static spinlock broadcast_msg_spinlock = 0;
|
||||
|
||||
static bool ici_enabled = false;
|
||||
static bool sICIEnabled = false;
|
||||
static int32 sNumCPUs = 1;
|
||||
|
||||
static int smp_num_cpus = 1;
|
||||
static int32 process_pending_ici(int32 currentCPU);
|
||||
|
||||
static int smp_process_pending_ici(int curr_cpu);
|
||||
|
||||
#ifdef DEBUG_SPINLOCKS
|
||||
#if DEBUG_SPINLOCKS
|
||||
#define NUM_LAST_CALLERS 32
|
||||
|
||||
static struct {
|
||||
@ -112,13 +111,13 @@ find_lock_caller(spinlock *lock)
|
||||
void
|
||||
acquire_spinlock(spinlock *lock)
|
||||
{
|
||||
if (smp_num_cpus > 1) {
|
||||
int curr_cpu = smp_get_current_cpu();
|
||||
if (sNumCPUs > 1) {
|
||||
int currentCPU = smp_get_current_cpu();
|
||||
if (are_interrupts_enabled())
|
||||
panic("acquire_spinlock: attempt to acquire lock %p with interrupts enabled\n", lock);
|
||||
while (1) {
|
||||
while (*lock != 0) {
|
||||
smp_process_pending_ici(curr_cpu);
|
||||
process_pending_ici(currentCPU);
|
||||
PAUSE();
|
||||
}
|
||||
if (atomic_set((int32 *)lock, 1) == 0)
|
||||
@ -144,7 +143,7 @@ acquire_spinlock(spinlock *lock)
|
||||
static void
|
||||
acquire_spinlock_nocheck(spinlock *lock)
|
||||
{
|
||||
if (smp_num_cpus > 1) {
|
||||
if (sNumCPUs > 1) {
|
||||
#if DEBUG_SPINLOCKS
|
||||
if (are_interrupts_enabled())
|
||||
panic("acquire_spinlock_nocheck: attempt to acquire lock %p with interrupts enabled\n", lock);
|
||||
@ -169,7 +168,7 @@ acquire_spinlock_nocheck(spinlock *lock)
|
||||
void
|
||||
release_spinlock(spinlock *lock)
|
||||
{
|
||||
if (smp_num_cpus > 1) {
|
||||
if (sNumCPUs > 1) {
|
||||
if (are_interrupts_enabled())
|
||||
panic("release_spinlock: attempt to release lock %p with interrupts enabled\n", lock);
|
||||
if (atomic_set((int32 *)lock, 0) != 1)
|
||||
@ -189,10 +188,10 @@ release_spinlock(spinlock *lock)
|
||||
// NOTE: has side effect of disabling interrupts
|
||||
// return value is interrupt state
|
||||
|
||||
static int
|
||||
static cpu_status
|
||||
find_free_message(struct smp_msg **msg)
|
||||
{
|
||||
int state;
|
||||
cpu_status state;
|
||||
|
||||
TRACE(("find_free_message: entry\n"));
|
||||
|
||||
@ -236,33 +235,36 @@ return_free_message(struct smp_msg *msg)
|
||||
|
||||
|
||||
static struct smp_msg *
|
||||
smp_check_for_message(int curr_cpu, int *source_mailbox)
|
||||
check_for_message(int currentCPU, int *source_mailbox)
|
||||
{
|
||||
struct smp_msg *msg;
|
||||
|
||||
acquire_spinlock_nocheck(&cpu_msg_spinlock[curr_cpu]);
|
||||
msg = smp_msgs[curr_cpu];
|
||||
if (!sICIEnabled)
|
||||
return NULL;
|
||||
|
||||
acquire_spinlock_nocheck(&cpu_msg_spinlock[currentCPU]);
|
||||
msg = smp_msgs[currentCPU];
|
||||
if (msg != NULL) {
|
||||
smp_msgs[curr_cpu] = msg->next;
|
||||
release_spinlock(&cpu_msg_spinlock[curr_cpu]);
|
||||
smp_msgs[currentCPU] = msg->next;
|
||||
release_spinlock(&cpu_msg_spinlock[currentCPU]);
|
||||
TRACE((" found msg %p in cpu mailbox\n", msg));
|
||||
*source_mailbox = MAILBOX_LOCAL;
|
||||
} else {
|
||||
// try getting one from the broadcast mailbox
|
||||
|
||||
release_spinlock(&cpu_msg_spinlock[curr_cpu]);
|
||||
release_spinlock(&cpu_msg_spinlock[currentCPU]);
|
||||
acquire_spinlock_nocheck(&broadcast_msg_spinlock);
|
||||
|
||||
msg = smp_broadcast_msgs;
|
||||
while (msg != NULL) {
|
||||
if (CHECK_BIT(msg->proc_bitmap, curr_cpu) != 0) {
|
||||
if (CHECK_BIT(msg->proc_bitmap, currentCPU) != 0) {
|
||||
// we have handled this one already
|
||||
msg = msg->next;
|
||||
continue;
|
||||
}
|
||||
|
||||
// mark it so we wont try to process this one again
|
||||
msg->proc_bitmap = SET_BIT(msg->proc_bitmap, curr_cpu);
|
||||
msg->proc_bitmap = SET_BIT(msg->proc_bitmap, currentCPU);
|
||||
*source_mailbox = MAILBOX_BCAST;
|
||||
break;
|
||||
}
|
||||
@ -274,7 +276,7 @@ smp_check_for_message(int curr_cpu, int *source_mailbox)
|
||||
|
||||
|
||||
static void
|
||||
smp_finish_message_processing(int curr_cpu, struct smp_msg *msg, int source_mailbox)
|
||||
finish_message_processing(int currentCPU, struct smp_msg *msg, int source_mailbox)
|
||||
{
|
||||
int old_refcount;
|
||||
|
||||
@ -292,8 +294,8 @@ smp_finish_message_processing(int curr_cpu, struct smp_msg *msg, int source_mail
|
||||
spinlock = &broadcast_msg_spinlock;
|
||||
break;
|
||||
case MAILBOX_LOCAL:
|
||||
mbox = &smp_msgs[curr_cpu];
|
||||
spinlock = &cpu_msg_spinlock[curr_cpu];
|
||||
mbox = &smp_msgs[currentCPU];
|
||||
spinlock = &cpu_msg_spinlock[currentCPU];
|
||||
break;
|
||||
}
|
||||
|
||||
@ -340,15 +342,15 @@ smp_finish_message_processing(int curr_cpu, struct smp_msg *msg, int source_mail
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
smp_process_pending_ici(int curr_cpu)
|
||||
static int32
|
||||
process_pending_ici(int32 currentCPU)
|
||||
{
|
||||
struct smp_msg *msg;
|
||||
bool halt = false;
|
||||
int source_mailbox = 0;
|
||||
int retval = B_HANDLED_INTERRUPT;
|
||||
|
||||
msg = smp_check_for_message(curr_cpu, &source_mailbox);
|
||||
msg = check_for_message(currentCPU, &source_mailbox);
|
||||
if (msg == NULL)
|
||||
return retval;
|
||||
|
||||
@ -369,15 +371,15 @@ smp_process_pending_ici(int curr_cpu)
|
||||
break;
|
||||
case SMP_MSG_CPU_HALT:
|
||||
halt = true;
|
||||
dprintf("cpu %d halted!\n", curr_cpu);
|
||||
dprintf("cpu %ld halted!\n", currentCPU);
|
||||
break;
|
||||
case SMP_MSG_1:
|
||||
default:
|
||||
dprintf("smp_intercpu_int_handler: got unknown message %d\n", msg->message);
|
||||
dprintf("smp_intercpu_int_handler: got unknown message %ld\n", msg->message);
|
||||
}
|
||||
|
||||
// finish dealing with this message, possibly removing it from the list
|
||||
smp_finish_message_processing(curr_cpu, msg, source_mailbox);
|
||||
finish_message_processing(currentCPU, msg, source_mailbox);
|
||||
|
||||
// special case for the halt message
|
||||
// we otherwise wouldn't have gotten the opportunity to clean up
|
||||
@ -394,11 +396,11 @@ int
|
||||
smp_intercpu_int_handler(void)
|
||||
{
|
||||
int retval;
|
||||
int curr_cpu = smp_get_current_cpu();
|
||||
int currentCPU = smp_get_current_cpu();
|
||||
|
||||
TRACE(("smp_intercpu_int_handler: entry on cpu %d\n", curr_cpu));
|
||||
TRACE(("smp_intercpu_int_handler: entry on cpu %d\n", currentCPU));
|
||||
|
||||
retval = smp_process_pending_ici(curr_cpu);
|
||||
retval = process_pending_ici(currentCPU);
|
||||
|
||||
TRACE(("smp_intercpu_int_handler: done\n"));
|
||||
|
||||
@ -407,22 +409,23 @@ smp_intercpu_int_handler(void)
|
||||
|
||||
|
||||
void
|
||||
smp_send_ici(int target_cpu, int message, uint32 data, uint32 data2, uint32 data3, void *data_ptr, int flags)
|
||||
smp_send_ici(int32 targetCPU, int32 message, uint32 data, uint32 data2, uint32 data3,
|
||||
void *data_ptr, uint32 flags)
|
||||
{
|
||||
struct smp_msg *msg;
|
||||
|
||||
TRACE(("smp_send_ici: target 0x%x, mess 0x%x, data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%x\n",
|
||||
target_cpu, message, data, data2, data3, data_ptr, flags));
|
||||
|
||||
if (ici_enabled) {
|
||||
if (sICIEnabled) {
|
||||
int state;
|
||||
int curr_cpu;
|
||||
int currentCPU;
|
||||
|
||||
// find_free_message leaves interrupts disabled
|
||||
state = find_free_message(&msg);
|
||||
|
||||
curr_cpu = smp_get_current_cpu();
|
||||
if (target_cpu == curr_cpu) {
|
||||
currentCPU = smp_get_current_cpu();
|
||||
if (targetCPU == currentCPU) {
|
||||
return_free_message(msg);
|
||||
restore_interrupts(state);
|
||||
return; // nope, cant do that
|
||||
@ -439,19 +442,19 @@ smp_send_ici(int target_cpu, int message, uint32 data, uint32 data2, uint32 data
|
||||
msg->done = false;
|
||||
|
||||
// stick it in the appropriate cpu's mailbox
|
||||
acquire_spinlock_nocheck(&cpu_msg_spinlock[target_cpu]);
|
||||
msg->next = smp_msgs[target_cpu];
|
||||
smp_msgs[target_cpu] = msg;
|
||||
release_spinlock(&cpu_msg_spinlock[target_cpu]);
|
||||
acquire_spinlock_nocheck(&cpu_msg_spinlock[targetCPU]);
|
||||
msg->next = smp_msgs[targetCPU];
|
||||
smp_msgs[targetCPU] = msg;
|
||||
release_spinlock(&cpu_msg_spinlock[targetCPU]);
|
||||
|
||||
arch_smp_send_ici(target_cpu);
|
||||
arch_smp_send_ici(targetCPU);
|
||||
|
||||
if (flags == SMP_MSG_FLAG_SYNC) {
|
||||
// wait for the other cpu to finish processing it
|
||||
// the interrupt handler will ref count it to <0
|
||||
// if the message is sync after it has removed it from the mailbox
|
||||
while (msg->done == false) {
|
||||
smp_process_pending_ici(curr_cpu);
|
||||
process_pending_ici(currentCPU);
|
||||
PAUSE();
|
||||
}
|
||||
// for SYNC messages, it's our responsibility to put it
|
||||
@ -465,30 +468,31 @@ smp_send_ici(int target_cpu, int message, uint32 data, uint32 data2, uint32 data
|
||||
|
||||
|
||||
void
|
||||
smp_send_broadcast_ici(int message, uint32 data, uint32 data2, uint32 data3, void *data_ptr, int flags)
|
||||
smp_send_broadcast_ici(int32 message, uint32 data, uint32 data2, uint32 data3,
|
||||
void *data_ptr, uint32 flags)
|
||||
{
|
||||
struct smp_msg *msg;
|
||||
|
||||
TRACE(("smp_send_broadcast_ici: cpu %d mess 0x%x, data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%x\n",
|
||||
smp_get_current_cpu(), message, data, data2, data3, data_ptr, flags));
|
||||
|
||||
if (ici_enabled) {
|
||||
if (sICIEnabled) {
|
||||
int state;
|
||||
int curr_cpu;
|
||||
int currentCPU;
|
||||
|
||||
// find_free_message leaves interrupts disabled
|
||||
state = find_free_message(&msg);
|
||||
|
||||
curr_cpu = smp_get_current_cpu();
|
||||
currentCPU = smp_get_current_cpu();
|
||||
|
||||
msg->message = message;
|
||||
msg->data = data;
|
||||
msg->data2 = data2;
|
||||
msg->data3 = data3;
|
||||
msg->data_ptr = data_ptr;
|
||||
msg->ref_count = smp_num_cpus - 1;
|
||||
msg->ref_count = sNumCPUs - 1;
|
||||
msg->flags = flags;
|
||||
msg->proc_bitmap = SET_BIT(0, curr_cpu);
|
||||
msg->proc_bitmap = SET_BIT(0, currentCPU);
|
||||
msg->done = false;
|
||||
|
||||
TRACE(("smp_send_broadcast_ici%d: inserting msg %p into broadcast mbox\n",
|
||||
@ -511,7 +515,7 @@ smp_send_broadcast_ici(int message, uint32 data, uint32 data2, uint32 data3, voi
|
||||
TRACE(("smp_send_broadcast_ici: waiting for ack\n"));
|
||||
|
||||
while (msg->done == false) {
|
||||
smp_process_pending_ici(curr_cpu);
|
||||
process_pending_ici(currentCPU);
|
||||
PAUSE();
|
||||
}
|
||||
|
||||
@ -529,52 +533,65 @@ smp_send_broadcast_ici(int message, uint32 data, uint32 data2, uint32 data3, voi
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
smp_trap_non_boot_cpus(kernel_args *ka, int cpu)
|
||||
bool
|
||||
smp_trap_non_boot_cpus(int32 cpu)
|
||||
{
|
||||
if (cpu > 0) {
|
||||
boot_cpu_spin[cpu] = 1;
|
||||
acquire_spinlock(&boot_cpu_spin[cpu]);
|
||||
return 1;
|
||||
return false;
|
||||
}
|
||||
return 0;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
smp_wake_up_all_non_boot_cpus()
|
||||
smp_wake_up_non_boot_cpus()
|
||||
{
|
||||
// resume non boot CPUs
|
||||
int i;
|
||||
for (i = 1; i < smp_num_cpus; i++) {
|
||||
for (i = 1; i < sNumCPUs; i++) {
|
||||
release_spinlock(&boot_cpu_spin[i]);
|
||||
}
|
||||
|
||||
// ICIs were previously being ignored
|
||||
if (sNumCPUs > 1)
|
||||
sICIEnabled = true;
|
||||
|
||||
// invalidate all of the other processors' TLB caches
|
||||
arch_cpu_global_TLB_invalidate();
|
||||
smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVL_PAGE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
|
||||
|
||||
// start the other processors
|
||||
smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_ASYNC);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
smp_wait_for_ap_cpus(kernel_args *ka)
|
||||
smp_wait_for_non_boot_cpus(void)
|
||||
{
|
||||
unsigned int i;
|
||||
int retry;
|
||||
bool retry;
|
||||
int32 i;
|
||||
do {
|
||||
retry = 0;
|
||||
for (i = 1; i < ka->num_cpus; i++) {
|
||||
retry = false;
|
||||
for (i = 1; i < sNumCPUs; i++) {
|
||||
if (boot_cpu_spin[i] != 1)
|
||||
retry = 1;
|
||||
retry = true;
|
||||
}
|
||||
} while (retry == 1);
|
||||
} while (retry == true);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
smp_init(kernel_args *ka)
|
||||
smp_init(kernel_args *args)
|
||||
{
|
||||
struct smp_msg *msg;
|
||||
int i;
|
||||
|
||||
TRACE(("smp_init: entry\n"));
|
||||
|
||||
if (ka->num_cpus > 1) {
|
||||
if (args->num_cpus > 1) {
|
||||
free_msgs = NULL;
|
||||
free_msg_count = 0;
|
||||
for (i = 0; i < MSG_POOL_SIZE; i++) {
|
||||
@ -588,11 +605,11 @@ smp_init(kernel_args *ka)
|
||||
free_msgs = msg;
|
||||
free_msg_count++;
|
||||
}
|
||||
smp_num_cpus = ka->num_cpus;
|
||||
sNumCPUs = args->num_cpus;
|
||||
}
|
||||
TRACE(("smp_init: calling arch_smp_init\n"));
|
||||
|
||||
return arch_smp_init(ka);
|
||||
return arch_smp_init(args);
|
||||
}
|
||||
|
||||
|
||||
@ -604,48 +621,32 @@ smp_per_cpu_init(kernel_args *args, int32 cpu)
|
||||
|
||||
|
||||
void
|
||||
smp_set_num_cpus(int num_cpus)
|
||||
smp_set_num_cpus(int32 numCPUs)
|
||||
{
|
||||
smp_num_cpus = num_cpus;
|
||||
sNumCPUs = numCPUs;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
int32
|
||||
smp_get_num_cpus()
|
||||
{
|
||||
return smp_num_cpus;
|
||||
return sNumCPUs;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
int32
|
||||
smp_get_current_cpu(void)
|
||||
{
|
||||
struct thread *t = thread_get_current_thread();
|
||||
if (t)
|
||||
return t->cpu->info.cpu_num;
|
||||
struct thread *thread = thread_get_current_thread();
|
||||
if (thread)
|
||||
return thread->cpu->info.cpu_num;
|
||||
|
||||
// this is not always correct during early boot, but it's okay for
|
||||
// for the boot process
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
smp_enable_ici()
|
||||
{
|
||||
if (smp_num_cpus > 1) // dont actually do it if we only have one cpu
|
||||
ici_enabled = true;
|
||||
|
||||
return B_NO_ERROR;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
smp_disable_ici()
|
||||
{
|
||||
ici_enabled = false;
|
||||
return B_NO_ERROR;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
// public exported functions
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <sem.h>
|
||||
#include <user_runtime.h>
|
||||
#include <kimage.h>
|
||||
#include <kscheduler.h>
|
||||
#include <elf.h>
|
||||
#include <syscalls.h>
|
||||
#include <syscall_process_info.h>
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <arch/vm.h>
|
||||
#include <kimage.h>
|
||||
#include <ksignal.h>
|
||||
#include <kscheduler.h>
|
||||
#include <syscalls.h>
|
||||
#include <tls.h>
|
||||
#include <vfs.h>
|
||||
|
Loading…
Reference in New Issue
Block a user