Changed the boot procedure a bit.

Extracted scheduler_init() from start_scheduler() (which is now called scheduler_start()).
Moved scheduler related function prototypes from thread.h to the new scheduler.h.
Cleanup.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@14518 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2005-10-25 16:59:12 +00:00
parent 82f3114509
commit 6cd505cee7
15 changed files with 324 additions and 279 deletions

View File

@ -11,8 +11,7 @@
#include <smp.h> #include <smp.h>
#include <timer.h> #include <timer.h>
#include <boot/kernel_args.h>
struct kernel_args;
/* CPU local data structure */ /* CPU local data structure */

View File

@ -0,0 +1,27 @@
/*
* Copyright 2005, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_SCHEDULER_H
#define KERNEL_SCHEDULER_H
struct thread;
#ifdef __cplusplus
extern "C" {
#endif
void scheduler_enqueue_in_run_queue(struct thread *thread);
void scheduler_remove_from_run_queue(struct thread *thread);
void scheduler_reschedule(void);
void scheduler_init(void);
void scheduler_start(void);
#ifdef __cplusplus
}
#endif
#endif /* KERNEL_SCHEDULER_H */

View File

@ -1,7 +1,10 @@
/* /*
** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. * Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de.
** Distributed under the terms of the NewOS License. * Distributed under the terms of the MIT License.
*/ *
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#ifndef KERNEL_SMP_H #ifndef KERNEL_SMP_H
#define KERNEL_SMP_H #define KERNEL_SMP_H
@ -26,20 +29,28 @@ enum {
SMP_MSG_FLAG_SYNC, SMP_MSG_FLAG_SYNC,
}; };
#ifdef __cplusplus
extern "C" {
#endif
status_t smp_init(struct kernel_args *args); status_t smp_init(struct kernel_args *args);
status_t smp_per_cpu_init(struct kernel_args *args, int32 cpu); status_t smp_per_cpu_init(struct kernel_args *args, int32 cpu);
int smp_trap_non_boot_cpus(struct kernel_args *ka, int cpu); bool smp_trap_non_boot_cpus(int32 cpu);
void smp_wake_up_all_non_boot_cpus(void); void smp_wake_up_non_boot_cpus(void);
void smp_wait_for_ap_cpus(struct kernel_args *ka); void smp_wait_for_non_boot_cpus(void);
void smp_send_ici(int target_cpu, int message, unsigned long data, unsigned long data2, unsigned long data3, void *data_ptr, int flags); void smp_send_ici(int32 targetCPU, int32 message, uint32 data, uint32 data2, uint32 data3,
void smp_send_broadcast_ici(int message, unsigned long data, unsigned long data2, unsigned long data3, void *data_ptr, int flags); void *data_ptr, uint32 flags);
int smp_enable_ici(void); void smp_send_broadcast_ici(int32 message, uint32 data, uint32 data2, uint32 data3,
int smp_disable_ici(void); void *data_ptr, uint32 flags);
int smp_get_num_cpus(void); int32 smp_get_num_cpus(void);
void smp_set_num_cpus(int num_cpus); void smp_set_num_cpus(int32 numCPUs);
int smp_get_current_cpu(void); int32 smp_get_current_cpu(void);
int smp_intercpu_int_handler(void); int smp_intercpu_int_handler(void);
#ifdef __cplusplus
}
#endif
#endif /* KERNEL_SMP_H */ #endif /* KERNEL_SMP_H */

View File

@ -20,17 +20,11 @@ struct kernel_args;
extern "C" { extern "C" {
#endif #endif
void scheduler_reschedule(void);
void start_scheduler(void);
void thread_enqueue(struct thread *t, struct thread_queue *q); void thread_enqueue(struct thread *t, struct thread_queue *q);
struct thread *thread_lookat_queue(struct thread_queue *q); struct thread *thread_lookat_queue(struct thread_queue *q);
struct thread *thread_dequeue(struct thread_queue *q); struct thread *thread_dequeue(struct thread_queue *q);
struct thread *thread_dequeue_id(struct thread_queue *q, thread_id thr_id); struct thread *thread_dequeue_id(struct thread_queue *q, thread_id thr_id);
void scheduler_enqueue_in_run_queue(struct thread *thread);
void scheduler_remove_from_run_queue(struct thread *thread);
void thread_at_kernel_entry(void); void thread_at_kernel_entry(void);
// called when the thread enters the kernel on behalf of the thread // called when the thread enters the kernel on behalf of the thread
void thread_at_kernel_exit(void); void thread_at_kernel_exit(void);

View File

@ -6,12 +6,13 @@
* Distributed under the terms of the NewOS License. * Distributed under the terms of the NewOS License.
*/ */
#include <vm.h>
#include <int.h> #include <int.h>
#include <kscheduler.h>
#include <ksyscalls.h> #include <ksyscalls.h>
#include <smp.h> #include <smp.h>
#include <team.h> #include <team.h>
#include <thread.h> #include <thread.h>
#include <vm.h>
#include <vm_priv.h> #include <vm_priv.h>
#include <arch/cpu.h> #include <arch/cpu.h>

View File

@ -98,6 +98,61 @@ apic_write(uint32 offset, uint32 data)
} }
static status_t
setup_apic(kernel_args *args, int32 cpu)
{
uint32 config;
TRACE(("setting up the apic..."));
/* set spurious interrupt vector to 0xff */
config = apic_read(APIC_SIVR) & 0xffffff00;
config |= APIC_ENABLE | 0xff;
apic_write(APIC_SIVR, config);
// don't touch the LINT0/1 configuration in virtual wire mode
// ToDo: implement support for other modes...
#if 0
if (cpu == 0) {
/* setup LINT0 as ExtINT */
config = (apic_read(APIC_LINT0) & 0xffff00ff);
config |= APIC_LVT_DM_ExtINT | APIC_LVT_IIPP | APIC_LVT_TM;
apic_write(APIC_LINT0, config);
/* setup LINT1 as NMI */
config = (apic_read(APIC_LINT1) & 0xffff00ff);
config |= APIC_LVT_DM_NMI | APIC_LVT_IIPP;
apic_write(APIC_LINT1, config);
}
#endif
/* setup timer */
config = apic_read(APIC_LVTT) & ~APIC_LVTT_MASK;
config |= 0xfb | APIC_LVTT_M; // vector 0xfb, timer masked
apic_write(APIC_LVTT, config);
apic_write(APIC_ICRT, 0); // zero out the clock
config = apic_read(APIC_TDCR) & ~0x0000000f;
config |= APIC_TDCR_1; // clock division by 1
apic_write(APIC_TDCR, config);
/* setup error vector to 0xfe */
config = (apic_read(APIC_LVT3) & 0xffffff00) | 0xfe;
apic_write(APIC_LVT3, config);
/* accept all interrupts */
config = apic_read(APIC_TPRI) & 0xffffff00;
apic_write(APIC_TPRI, config);
config = apic_read(APIC_SIVR);
apic_write(APIC_EOI, 0);
TRACE((" done\n"));
return 0;
}
status_t status_t
arch_smp_init(kernel_args *args) arch_smp_init(kernel_args *args)
{ {
@ -131,62 +186,12 @@ arch_smp_init(kernel_args *args)
} }
static int
smp_setup_apic(kernel_args *args)
{
uint32 config;
TRACE(("setting up the apic..."));
/* set spurious interrupt vector to 0xff */
config = apic_read(APIC_SIVR) & 0xfffffc00;
config |= APIC_ENABLE | 0xff;
apic_write(APIC_SIVR, config);
#if 0
/* setup LINT0 as ExtINT */
config = (apic_read(APIC_LINT0) & 0xffff1c00);
config |= APIC_LVT_DM_ExtINT | APIC_LVT_IIPP | APIC_LVT_TM;
apic_write(APIC_LINT0, config);
/* setup LINT1 as NMI */
config = (apic_read(APIC_LINT1) & 0xffff1c00);
config |= APIC_LVT_DM_NMI | APIC_LVT_IIPP;
apic_write(APIC_LINT1, config);
#endif
/* setup timer */
config = apic_read(APIC_LVTT) & ~APIC_LVTT_MASK;
config |= 0xfb | APIC_LVTT_M; // vector 0xfb, timer masked
apic_write(APIC_LVTT, config);
apic_write(APIC_ICRT, 0); // zero out the clock
config = apic_read(APIC_TDCR) & ~0x0000000f;
config |= APIC_TDCR_1; // clock division by 1
apic_write(APIC_TDCR, config);
/* setup error vector to 0xfe */
config = (apic_read(APIC_LVT3) & 0xffffff00) | 0xfe;
apic_write(APIC_LVT3, config);
/* accept all interrupts */
config = apic_read(APIC_TPRI) & 0xffffff00;
apic_write(APIC_TPRI, config);
config = apic_read(APIC_SIVR);
apic_write(APIC_EOI, 0);
TRACE((" done\n"));
return 0;
}
status_t status_t
arch_smp_per_cpu_init(kernel_args *args, int32 cpu) arch_smp_per_cpu_init(kernel_args *args, int32 cpu)
{ {
// set up the local apic on the current cpu // set up the local apic on the current cpu
TRACE(("arch_smp_init_percpu: setting up the apic on cpu %ld\n", cpu)); TRACE(("arch_smp_init_percpu: setting up the apic on cpu %ld\n", cpu));
smp_setup_apic(args); setup_apic(args, cpu);
return B_OK; return B_OK;
} }
@ -219,6 +224,10 @@ arch_smp_send_ici(int32 target_cpu)
apic_write(APIC_ICR1, config | 0xfd | APIC_ICR1_DELIVERY_MODE_FIXED apic_write(APIC_ICR1, config | 0xfd | APIC_ICR1_DELIVERY_MODE_FIXED
| APIC_ICR1_DEST_MODE_PHYSICAL | APIC_ICR1_DEST_FIELD); | APIC_ICR1_DEST_MODE_PHYSICAL | APIC_ICR1_DEST_FIELD);
// wait for message to be sent
while ((apic_read(APIC_ICR1) & APIC_ICR1_DELIVERY_STATUS) != 0)
;
restore_interrupts(state); restore_interrupts(state);
} }

View File

@ -10,6 +10,7 @@
#include <debugger.h> #include <debugger.h>
#include <kernel.h> #include <kernel.h>
#include <KernelExport.h> #include <KernelExport.h>
#include <kscheduler.h>
#include <ksignal.h> #include <ksignal.h>
#include <ksyscalls.h> #include <ksyscalls.h>
#include <sem.h> #include <sem.h>

View File

@ -7,15 +7,17 @@
#include <KernelExport.h> #include <KernelExport.h>
#include <kernel.h> #include <kernel.h>
#include <kimage.h> #include <kimage.h>
#include <kscheduler.h>
#include <lock.h> #include <lock.h>
#include <team.h> #include <team.h>
#include <thread.h> #include <thread.h>
#include <thread_types.h> #include <thread_types.h>
#include <user_debugger.h> #include <user_debugger.h>
#include <malloc.h> #include <stdlib.h>
#include <string.h> #include <string.h>

View File

@ -11,29 +11,30 @@
#include <OS.h> #include <OS.h>
#include <boot/kernel_args.h>
#include <debug.h>
#include <ksyscalls.h>
#include <vm.h>
#include <timer.h>
#include <smp.h>
#include <sem.h>
#include <port.h>
#include <vfs.h>
#include <cbuf.h>
#include <elf.h>
#include <cpu.h>
#include <kdriver_settings.h>
#include <boot_item.h> #include <boot_item.h>
#include <kmodule.h> #include <cbuf.h>
#include <cpu.h>
#include <debug.h>
#include <elf.h>
#include <int.h> #include <int.h>
#include <team.h>
#include <system_info.h>
#include <kdevice_manager.h> #include <kdevice_manager.h>
#include <real_time_clock.h> #include <kdriver_settings.h>
#include <kernel_daemon.h> #include <kernel_daemon.h>
#include <kmodule.h>
#include <kscheduler.h>
#include <ksyscalls.h>
#include <messaging.h> #include <messaging.h>
#include <port.h>
#include <real_time_clock.h>
#include <sem.h>
#include <smp.h>
#include <system_info.h>
#include <team.h>
#include <timer.h>
#include <user_debugger.h> #include <user_debugger.h>
#include <vfs.h>
#include <vm.h>
#include <boot/kernel_args.h>
#include <string.h> #include <string.h>
@ -47,21 +48,19 @@
bool kernel_startup; bool kernel_startup;
static kernel_args ka; static kernel_args sKernelArgs;
static int32 main2(void *); static int32 main2(void *);
int _start(kernel_args *oldka, int cpu); /* keep compiler happy */ int _start(kernel_args *bootKernelArgs, int cpu); /* keep compiler happy */
int int
_start(kernel_args *oldka, int cpu_num) _start(kernel_args *bootKernelArgs, int currentCPU)
{ {
thread_id thread = -1;
kernel_startup = true; kernel_startup = true;
if (oldka->kernel_args_size != sizeof(kernel_args) if (bootKernelArgs->kernel_args_size != sizeof(kernel_args)
|| oldka->version != CURRENT_KERNEL_ARGS_VERSION) { || bootKernelArgs->version != CURRENT_KERNEL_ARGS_VERSION) {
// This is something we cannot handle right now - release kernels // This is something we cannot handle right now - release kernels
// should always be able to handle the kernel_args of earlier // should always be able to handle the kernel_args of earlier
// released kernels. // released kernels.
@ -69,100 +68,104 @@ _start(kernel_args *oldka, int cpu_num)
return -1; return -1;
} }
memcpy(&ka, oldka, sizeof(kernel_args)); memcpy(&sKernelArgs, bootKernelArgs, sizeof(kernel_args));
// the passed in kernel args are in a non-allocated range of memory // the passed in kernel args are in a non-allocated range of memory
smp_set_num_cpus(ka.num_cpus); smp_set_num_cpus(sKernelArgs.num_cpus);
// do any pre-booting cpu config // do any pre-booting cpu config
cpu_preboot_init(&ka); cpu_preboot_init(&sKernelArgs);
// if we're not a boot cpu, spin here until someone wakes us up // if we're not a boot cpu, spin here until someone wakes us up
if (smp_trap_non_boot_cpus(&ka, cpu_num) == B_NO_ERROR) { if (smp_trap_non_boot_cpus(currentCPU)) {
// we're the boot processor, so wait for all of the APs to enter the kernel thread_id thread;
smp_wait_for_ap_cpus(&ka);
// setup debug output // setup debug output
debug_init(&ka); debug_init(&sKernelArgs);
set_dprintf_enabled(true); set_dprintf_enabled(true);
dprintf("Welcome to kernel debugger output!\n"); dprintf("Welcome to kernel debugger output!\n");
// we're the boot processor, so wait for all of the APs to enter the kernel
smp_wait_for_non_boot_cpus();
// init modules // init modules
TRACE(("init CPU\n")); TRACE(("init CPU\n"));
cpu_init(&ka); cpu_init(&sKernelArgs);
TRACE(("init interrupts\n")); TRACE(("init interrupts\n"));
int_init(&ka); int_init(&sKernelArgs);
TRACE(("init VM\n")); TRACE(("init VM\n"));
vm_init(&ka); vm_init(&sKernelArgs);
// Before vm_init_post_sem() is called, we have to make sure that // Before vm_init_post_sem() is called, we have to make sure that
// the boot loader allocated region is not used anymore // the boot loader allocated region is not used anymore
// now we can use the heap and create areas // now we can use the heap and create areas
TRACE(("init driver_settings\n")); TRACE(("init driver_settings\n"));
boot_item_init(); boot_item_init();
driver_settings_init(&ka); driver_settings_init(&sKernelArgs);
debug_init_post_vm(&ka); debug_init_post_vm(&sKernelArgs);
int_init_post_vm(&ka); int_init_post_vm(&sKernelArgs);
cpu_init_post_vm(&ka); cpu_init_post_vm(&sKernelArgs);
TRACE(("init system info\n")); TRACE(("init system info\n"));
system_info_init(&ka); system_info_init(&sKernelArgs);
TRACE(("init SMP\n")); TRACE(("init SMP\n"));
smp_init(&ka); smp_init(&sKernelArgs);
TRACE(("init timer\n")); TRACE(("init timer\n"));
timer_init(&ka); timer_init(&sKernelArgs);
TRACE(("init real time clock\n")); TRACE(("init real time clock\n"));
rtc_init(&ka); rtc_init(&sKernelArgs);
TRACE(("init semaphores\n")); TRACE(("init semaphores\n"));
sem_init(&ka); sem_init(&sKernelArgs);
// now we can create and use semaphores // now we can create and use semaphores
TRACE(("init VM semaphores\n")); TRACE(("init VM semaphores\n"));
vm_init_post_sem(&ka); vm_init_post_sem(&sKernelArgs);
TRACE(("init driver_settings\n")); TRACE(("init driver_settings\n"));
driver_settings_init_post_sem(&ka); driver_settings_init_post_sem(&sKernelArgs);
TRACE(("init generic syscall\n")); TRACE(("init generic syscall\n"));
generic_syscall_init(); generic_syscall_init();
TRACE(("init cbuf\n")); TRACE(("init cbuf\n"));
cbuf_init(); cbuf_init();
TRACE(("init VFS\n")); TRACE(("init VFS\n"));
vfs_init(&ka); vfs_init(&sKernelArgs);
TRACE(("init teams\n")); TRACE(("init teams\n"));
team_init(&ka); team_init(&sKernelArgs);
TRACE(("init threads\n")); TRACE(("init threads\n"));
thread_init(&ka); thread_init(&sKernelArgs);
TRACE(("init ports\n")); TRACE(("init ports\n"));
port_init(&ka); port_init(&sKernelArgs);
TRACE(("init kernel daemons\n")); TRACE(("init kernel daemons\n"));
kernel_daemon_init(); kernel_daemon_init();
TRACE(("init VM threads\n")); TRACE(("init VM threads\n"));
vm_init_post_thread(&ka); vm_init_post_thread(&sKernelArgs);
TRACE(("init ELF loader\n")); TRACE(("init ELF loader\n"));
elf_init(&ka); elf_init(&sKernelArgs);
TRACE(("init scheduler\n"));
scheduler_init();
// start a thread to finish initializing the rest of the system // start a thread to finish initializing the rest of the system
thread = spawn_kernel_thread(&main2, "main2", B_NORMAL_PRIORITY, NULL); thread = spawn_kernel_thread(&main2, "main2", B_NORMAL_PRIORITY, NULL);
smp_wake_up_all_non_boot_cpus(); smp_wake_up_non_boot_cpus();
smp_enable_ici(); // ici's were previously being ignored
start_scheduler(); TRACE(("enable interrupts, exit kernel startup\n"));
kernel_startup = false;
enable_interrupts();
scheduler_start();
resume_thread(thread);
} else { } else {
// this is run per cpu for each AP processor after they've been set loose // this is run for each non boot processor after they've been set loose
smp_per_cpu_init(&ka, cpu_num); smp_per_cpu_init(&sKernelArgs, currentCPU);
thread_per_cpu_init(cpu_num); thread_per_cpu_init(currentCPU);
enable_interrupts();
} }
TRACE(("enable interrupts, exit kernel startup\n")); TRACE(("main: done... begin idle loop on cpu %d\n", currentCPU));
kernel_startup = false;
enable_interrupts();
if (thread >= B_OK)
resume_thread(thread);
TRACE(("main: done... begin idle loop on cpu %d\n", cpu_num));
for (;;) for (;;)
arch_cpu_idle(); arch_cpu_idle();
@ -178,7 +181,7 @@ main2(void *unused)
TRACE(("start of main2: initializing devices\n")); TRACE(("start of main2: initializing devices\n"));
TRACE(("Init modules\n")); TRACE(("Init modules\n"));
module_init(&ka); module_init(&sKernelArgs);
// ToDo: the preloaded image debug data is placed in the kernel args, and // ToDo: the preloaded image debug data is placed in the kernel args, and
// thus, if they are enabled, the kernel args shouldn't be freed, so // thus, if they are enabled, the kernel args shouldn't be freed, so
@ -188,7 +191,7 @@ main2(void *unused)
// module_init() is supposed to be the last user of the kernel args // module_init() is supposed to be the last user of the kernel args
// Note: don't confuse the kernel_args structure (which is never freed) // Note: don't confuse the kernel_args structure (which is never freed)
// with the kernel args ranges it contains (and which are freed here). // with the kernel args ranges it contains (and which are freed here).
vm_free_kernel_args(&ka); vm_free_kernel_args(&sKernelArgs);
} }
// init userland debugging // init userland debugging
@ -204,15 +207,13 @@ main2(void *unused)
vfs_bootstrap_file_systems(); vfs_bootstrap_file_systems();
TRACE(("Init Device Manager\n")); TRACE(("Init Device Manager\n"));
device_manager_init(&ka); device_manager_init(&sKernelArgs);
// ToDo: device manager starts here, bus_init()/dev_init() won't be necessary anymore, // ToDo: device manager starts here, bus_init()/dev_init() won't be necessary anymore,
// but instead, the hardware and drivers are rescanned then. // but instead, the hardware and drivers are rescanned then.
TRACE(("Mount boot file system\n")); TRACE(("Mount boot file system\n"));
vfs_mount_boot_file_system(&ka); vfs_mount_boot_file_system(&sKernelArgs);
//net_init_postdev(&ka);
//module_test(); //module_test();
#if 0 #if 0

View File

@ -12,6 +12,7 @@
#include <OS.h> #include <OS.h>
#include <kscheduler.h>
#include <thread.h> #include <thread.h>
#include <timer.h> #include <timer.h>
#include <int.h> #include <int.h>
@ -84,7 +85,8 @@ scheduler_enqueue_in_run_queue(struct thread *thread)
if (thread->priority < B_MIN_PRIORITY) if (thread->priority < B_MIN_PRIORITY)
thread->priority = B_MIN_PRIORITY; thread->priority = B_MIN_PRIORITY;
for (curr = sRunQueue.head, prev = NULL; curr && (curr->priority >= thread->priority); curr = curr->queue_next) { for (curr = sRunQueue.head, prev = NULL; curr && curr->priority >= thread->priority;
curr = curr->queue_next) {
if (prev) if (prev)
prev = prev->queue_next; prev = prev->queue_next;
else else
@ -166,7 +168,7 @@ scheduler_reschedule(void)
switch (oldThread->next_state) { switch (oldThread->next_state) {
case B_THREAD_RUNNING: case B_THREAD_RUNNING:
case B_THREAD_READY: case B_THREAD_READY:
TRACE(("enqueueing thread 0x%lx into run q. pri = %d\n", oldThread->id, oldThread->priority)); TRACE(("enqueueing thread 0x%lx into run q. pri = %ld\n", oldThread->id, oldThread->priority));
scheduler_enqueue_in_run_queue(oldThread); scheduler_enqueue_in_run_queue(oldThread);
break; break;
case B_THREAD_SUSPENDED: case B_THREAD_SUSPENDED:
@ -178,7 +180,7 @@ scheduler_reschedule(void)
thread_enqueue(oldThread, &dead_q); thread_enqueue(oldThread, &dead_q);
break; break;
default: default:
TRACE(("not enqueueing thread 0x%lx into run q. next_state = %d\n", oldThread->id, oldThread->next_state)); TRACE(("not enqueueing thread 0x%lx into run q. next_state = %ld\n", oldThread->id, oldThread->next_state));
break; break;
} }
oldThread->state = oldThread->next_state; oldThread->state = oldThread->next_state;
@ -186,13 +188,13 @@ scheduler_reschedule(void)
// select next thread from the run queue // select next thread from the run queue
nextThread = sRunQueue.head; nextThread = sRunQueue.head;
prevThread = NULL; prevThread = NULL;
while (nextThread && (nextThread->priority > B_IDLE_PRIORITY)) { while (nextThread && nextThread->priority > B_IDLE_PRIORITY) {
// always extract real time threads // always extract real time threads
if (nextThread->priority >= B_FIRST_REAL_TIME_PRIORITY) if (nextThread->priority >= B_FIRST_REAL_TIME_PRIORITY)
break; break;
// never skip last non-idle normal thread // never skip last non-idle normal thread
if (nextThread->queue_next && (nextThread->queue_next->priority == B_IDLE_PRIORITY)) if (nextThread->queue_next && nextThread->queue_next->priority == B_IDLE_PRIORITY)
break; break;
// skip normal threads sometimes // skip normal threads sometimes
@ -217,13 +219,13 @@ scheduler_reschedule(void)
if (nextThread != oldThread || oldThread->cpu->info.preempted) { if (nextThread != oldThread || oldThread->cpu->info.preempted) {
bigtime_t quantum = 3000; // ToDo: calculate quantum! bigtime_t quantum = 3000; // ToDo: calculate quantum!
timer *quantum_timer= &oldThread->cpu->info.quantum_timer; timer *quantumTimer = &oldThread->cpu->info.quantum_timer;
if (!oldThread->cpu->info.preempted) if (!oldThread->cpu->info.preempted)
_local_timer_cancel_event(oldThread->cpu->info.cpu_num, quantum_timer); _local_timer_cancel_event(oldThread->cpu->info.cpu_num, quantumTimer);
oldThread->cpu->info.preempted = 0; oldThread->cpu->info.preempted = 0;
add_timer(quantum_timer, &reschedule_event, quantum, B_ONE_SHOT_RELATIVE_TIMER); add_timer(quantumTimer, &reschedule_event, quantum, B_ONE_SHOT_RELATIVE_TIMER);
if (nextThread != oldThread) if (nextThread != oldThread)
context_switch(oldThread, nextThread); context_switch(oldThread, nextThread);
@ -231,34 +233,27 @@ scheduler_reschedule(void)
} }
void
scheduler_init(void)
{
add_debugger_command("run_queue", &dump_run_queue, "list threads in run queue");
}
/** This starts the scheduler. Must be run under the context of /** This starts the scheduler. Must be run under the context of
* the initial idle thread. * the initial idle thread.
*/ */
void void
start_scheduler(void) scheduler_start(void)
{ {
cpu_status state; cpu_status state = disable_interrupts();
// ToDo: may not be the best place for this
// invalidate all of the other processors' TLB caches
state = disable_interrupts();
arch_cpu_global_TLB_invalidate();
smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVL_PAGE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
restore_interrupts(state);
// start the other processors
smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_ASYNC);
state = disable_interrupts();
GRAB_THREAD_LOCK(); GRAB_THREAD_LOCK();
scheduler_reschedule(); scheduler_reschedule();
RELEASE_THREAD_LOCK(); RELEASE_THREAD_LOCK();
restore_interrupts(state); restore_interrupts(state);
add_debugger_command("run_queue", &dump_run_queue, "list threads in run queue");
} }

View File

@ -13,6 +13,7 @@
#include <sem.h> #include <sem.h>
#include <kernel.h> #include <kernel.h>
#include <kscheduler.h>
#include <ksignal.h> #include <ksignal.h>
#include <smp.h> #include <smp.h>
#include <int.h> #include <int.h>

View File

@ -11,12 +11,13 @@
#include <KernelExport.h> #include <KernelExport.h>
#include <debug.h> #include <debug.h>
#include <thread.h>
#include <team.h>
#include <sem.h>
#include <ksignal.h>
#include <user_debugger.h>
#include <kernel.h> #include <kernel.h>
#include <kscheduler.h>
#include <ksignal.h>
#include <sem.h>
#include <team.h>
#include <thread.h>
#include <user_debugger.h>
#include <stddef.h> #include <stddef.h>
#include <string.h> #include <string.h>

View File

@ -21,9 +21,9 @@
#include <string.h> #include <string.h>
#define DEBUG_SPINLOCKS 1 #define DEBUG_SPINLOCKS 1
#define TRACE_SMP 0 //#define TRACE_SMP
#if TRACE_SMP #ifdef TRACE_SMP
# define TRACE(x) dprintf x # define TRACE(x) dprintf x
#else #else
# define TRACE(x) ; # define TRACE(x) ;
@ -38,17 +38,16 @@
#define MSG_POOL_SIZE (SMP_MAX_CPUS * 4) #define MSG_POOL_SIZE (SMP_MAX_CPUS * 4)
struct smp_msg { struct smp_msg {
struct smp_msg *next; struct smp_msg *next;
int message; int32 message;
unsigned long data; uint32 data;
unsigned long data2; uint32 data2;
unsigned long data3; uint32 data3;
void *data_ptr; void *data_ptr;
int flags; uint32 flags;
int32 ref_count; int32 ref_count;
volatile bool done; volatile bool done;
unsigned int proc_bitmap; uint32 proc_bitmap;
int lock;
}; };
#define MAILBOX_LOCAL 1 #define MAILBOX_LOCAL 1
@ -66,13 +65,13 @@ static spinlock cpu_msg_spinlock[SMP_MAX_CPUS] = { 0, };
static struct smp_msg *smp_broadcast_msgs = NULL; static struct smp_msg *smp_broadcast_msgs = NULL;
static spinlock broadcast_msg_spinlock = 0; static spinlock broadcast_msg_spinlock = 0;
static bool ici_enabled = false; static bool sICIEnabled = false;
static int32 sNumCPUs = 1;
static int smp_num_cpus = 1; static int32 process_pending_ici(int32 currentCPU);
static int smp_process_pending_ici(int curr_cpu);
#ifdef DEBUG_SPINLOCKS #if DEBUG_SPINLOCKS
#define NUM_LAST_CALLERS 32 #define NUM_LAST_CALLERS 32
static struct { static struct {
@ -112,13 +111,13 @@ find_lock_caller(spinlock *lock)
void void
acquire_spinlock(spinlock *lock) acquire_spinlock(spinlock *lock)
{ {
if (smp_num_cpus > 1) { if (sNumCPUs > 1) {
int curr_cpu = smp_get_current_cpu(); int currentCPU = smp_get_current_cpu();
if (are_interrupts_enabled()) if (are_interrupts_enabled())
panic("acquire_spinlock: attempt to acquire lock %p with interrupts enabled\n", lock); panic("acquire_spinlock: attempt to acquire lock %p with interrupts enabled\n", lock);
while (1) { while (1) {
while (*lock != 0) { while (*lock != 0) {
smp_process_pending_ici(curr_cpu); process_pending_ici(currentCPU);
PAUSE(); PAUSE();
} }
if (atomic_set((int32 *)lock, 1) == 0) if (atomic_set((int32 *)lock, 1) == 0)
@ -144,7 +143,7 @@ acquire_spinlock(spinlock *lock)
static void static void
acquire_spinlock_nocheck(spinlock *lock) acquire_spinlock_nocheck(spinlock *lock)
{ {
if (smp_num_cpus > 1) { if (sNumCPUs > 1) {
#if DEBUG_SPINLOCKS #if DEBUG_SPINLOCKS
if (are_interrupts_enabled()) if (are_interrupts_enabled())
panic("acquire_spinlock_nocheck: attempt to acquire lock %p with interrupts enabled\n", lock); panic("acquire_spinlock_nocheck: attempt to acquire lock %p with interrupts enabled\n", lock);
@ -169,7 +168,7 @@ acquire_spinlock_nocheck(spinlock *lock)
void void
release_spinlock(spinlock *lock) release_spinlock(spinlock *lock)
{ {
if (smp_num_cpus > 1) { if (sNumCPUs > 1) {
if (are_interrupts_enabled()) if (are_interrupts_enabled())
panic("release_spinlock: attempt to release lock %p with interrupts enabled\n", lock); panic("release_spinlock: attempt to release lock %p with interrupts enabled\n", lock);
if (atomic_set((int32 *)lock, 0) != 1) if (atomic_set((int32 *)lock, 0) != 1)
@ -189,10 +188,10 @@ release_spinlock(spinlock *lock)
// NOTE: has side effect of disabling interrupts // NOTE: has side effect of disabling interrupts
// return value is interrupt state // return value is interrupt state
static int static cpu_status
find_free_message(struct smp_msg **msg) find_free_message(struct smp_msg **msg)
{ {
int state; cpu_status state;
TRACE(("find_free_message: entry\n")); TRACE(("find_free_message: entry\n"));
@ -236,33 +235,36 @@ return_free_message(struct smp_msg *msg)
static struct smp_msg * static struct smp_msg *
smp_check_for_message(int curr_cpu, int *source_mailbox) check_for_message(int currentCPU, int *source_mailbox)
{ {
struct smp_msg *msg; struct smp_msg *msg;
acquire_spinlock_nocheck(&cpu_msg_spinlock[curr_cpu]); if (!sICIEnabled)
msg = smp_msgs[curr_cpu]; return NULL;
acquire_spinlock_nocheck(&cpu_msg_spinlock[currentCPU]);
msg = smp_msgs[currentCPU];
if (msg != NULL) { if (msg != NULL) {
smp_msgs[curr_cpu] = msg->next; smp_msgs[currentCPU] = msg->next;
release_spinlock(&cpu_msg_spinlock[curr_cpu]); release_spinlock(&cpu_msg_spinlock[currentCPU]);
TRACE((" found msg %p in cpu mailbox\n", msg)); TRACE((" found msg %p in cpu mailbox\n", msg));
*source_mailbox = MAILBOX_LOCAL; *source_mailbox = MAILBOX_LOCAL;
} else { } else {
// try getting one from the broadcast mailbox // try getting one from the broadcast mailbox
release_spinlock(&cpu_msg_spinlock[curr_cpu]); release_spinlock(&cpu_msg_spinlock[currentCPU]);
acquire_spinlock_nocheck(&broadcast_msg_spinlock); acquire_spinlock_nocheck(&broadcast_msg_spinlock);
msg = smp_broadcast_msgs; msg = smp_broadcast_msgs;
while (msg != NULL) { while (msg != NULL) {
if (CHECK_BIT(msg->proc_bitmap, curr_cpu) != 0) { if (CHECK_BIT(msg->proc_bitmap, currentCPU) != 0) {
// we have handled this one already // we have handled this one already
msg = msg->next; msg = msg->next;
continue; continue;
} }
// mark it so we wont try to process this one again // mark it so we wont try to process this one again
msg->proc_bitmap = SET_BIT(msg->proc_bitmap, curr_cpu); msg->proc_bitmap = SET_BIT(msg->proc_bitmap, currentCPU);
*source_mailbox = MAILBOX_BCAST; *source_mailbox = MAILBOX_BCAST;
break; break;
} }
@ -274,7 +276,7 @@ smp_check_for_message(int curr_cpu, int *source_mailbox)
static void static void
smp_finish_message_processing(int curr_cpu, struct smp_msg *msg, int source_mailbox) finish_message_processing(int currentCPU, struct smp_msg *msg, int source_mailbox)
{ {
int old_refcount; int old_refcount;
@ -292,8 +294,8 @@ smp_finish_message_processing(int curr_cpu, struct smp_msg *msg, int source_mail
spinlock = &broadcast_msg_spinlock; spinlock = &broadcast_msg_spinlock;
break; break;
case MAILBOX_LOCAL: case MAILBOX_LOCAL:
mbox = &smp_msgs[curr_cpu]; mbox = &smp_msgs[currentCPU];
spinlock = &cpu_msg_spinlock[curr_cpu]; spinlock = &cpu_msg_spinlock[currentCPU];
break; break;
} }
@ -340,15 +342,15 @@ smp_finish_message_processing(int curr_cpu, struct smp_msg *msg, int source_mail
} }
static int static int32
smp_process_pending_ici(int curr_cpu) process_pending_ici(int32 currentCPU)
{ {
struct smp_msg *msg; struct smp_msg *msg;
bool halt = false; bool halt = false;
int source_mailbox = 0; int source_mailbox = 0;
int retval = B_HANDLED_INTERRUPT; int retval = B_HANDLED_INTERRUPT;
msg = smp_check_for_message(curr_cpu, &source_mailbox); msg = check_for_message(currentCPU, &source_mailbox);
if (msg == NULL) if (msg == NULL)
return retval; return retval;
@ -369,15 +371,15 @@ smp_process_pending_ici(int curr_cpu)
break; break;
case SMP_MSG_CPU_HALT: case SMP_MSG_CPU_HALT:
halt = true; halt = true;
dprintf("cpu %d halted!\n", curr_cpu); dprintf("cpu %ld halted!\n", currentCPU);
break; break;
case SMP_MSG_1: case SMP_MSG_1:
default: default:
dprintf("smp_intercpu_int_handler: got unknown message %d\n", msg->message); dprintf("smp_intercpu_int_handler: got unknown message %ld\n", msg->message);
} }
// finish dealing with this message, possibly removing it from the list // finish dealing with this message, possibly removing it from the list
smp_finish_message_processing(curr_cpu, msg, source_mailbox); finish_message_processing(currentCPU, msg, source_mailbox);
// special case for the halt message // special case for the halt message
// we otherwise wouldn't have gotten the opportunity to clean up // we otherwise wouldn't have gotten the opportunity to clean up
@ -394,11 +396,11 @@ int
smp_intercpu_int_handler(void) smp_intercpu_int_handler(void)
{ {
int retval; int retval;
int curr_cpu = smp_get_current_cpu(); int currentCPU = smp_get_current_cpu();
TRACE(("smp_intercpu_int_handler: entry on cpu %d\n", curr_cpu)); TRACE(("smp_intercpu_int_handler: entry on cpu %d\n", currentCPU));
retval = smp_process_pending_ici(curr_cpu); retval = process_pending_ici(currentCPU);
TRACE(("smp_intercpu_int_handler: done\n")); TRACE(("smp_intercpu_int_handler: done\n"));
@ -407,22 +409,23 @@ smp_intercpu_int_handler(void)
void void
smp_send_ici(int target_cpu, int message, uint32 data, uint32 data2, uint32 data3, void *data_ptr, int flags) smp_send_ici(int32 targetCPU, int32 message, uint32 data, uint32 data2, uint32 data3,
void *data_ptr, uint32 flags)
{ {
struct smp_msg *msg; struct smp_msg *msg;
TRACE(("smp_send_ici: target 0x%x, mess 0x%x, data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%x\n", TRACE(("smp_send_ici: target 0x%x, mess 0x%x, data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%x\n",
target_cpu, message, data, data2, data3, data_ptr, flags)); target_cpu, message, data, data2, data3, data_ptr, flags));
if (ici_enabled) { if (sICIEnabled) {
int state; int state;
int curr_cpu; int currentCPU;
// find_free_message leaves interrupts disabled // find_free_message leaves interrupts disabled
state = find_free_message(&msg); state = find_free_message(&msg);
curr_cpu = smp_get_current_cpu(); currentCPU = smp_get_current_cpu();
if (target_cpu == curr_cpu) { if (targetCPU == currentCPU) {
return_free_message(msg); return_free_message(msg);
restore_interrupts(state); restore_interrupts(state);
return; // nope, cant do that return; // nope, cant do that
@ -439,19 +442,19 @@ smp_send_ici(int target_cpu, int message, uint32 data, uint32 data2, uint32 data
msg->done = false; msg->done = false;
// stick it in the appropriate cpu's mailbox // stick it in the appropriate cpu's mailbox
acquire_spinlock_nocheck(&cpu_msg_spinlock[target_cpu]); acquire_spinlock_nocheck(&cpu_msg_spinlock[targetCPU]);
msg->next = smp_msgs[target_cpu]; msg->next = smp_msgs[targetCPU];
smp_msgs[target_cpu] = msg; smp_msgs[targetCPU] = msg;
release_spinlock(&cpu_msg_spinlock[target_cpu]); release_spinlock(&cpu_msg_spinlock[targetCPU]);
arch_smp_send_ici(target_cpu); arch_smp_send_ici(targetCPU);
if (flags == SMP_MSG_FLAG_SYNC) { if (flags == SMP_MSG_FLAG_SYNC) {
// wait for the other cpu to finish processing it // wait for the other cpu to finish processing it
// the interrupt handler will ref count it to <0 // the interrupt handler will ref count it to <0
// if the message is sync after it has removed it from the mailbox // if the message is sync after it has removed it from the mailbox
while (msg->done == false) { while (msg->done == false) {
smp_process_pending_ici(curr_cpu); process_pending_ici(currentCPU);
PAUSE(); PAUSE();
} }
// for SYNC messages, it's our responsibility to put it // for SYNC messages, it's our responsibility to put it
@ -465,30 +468,31 @@ smp_send_ici(int target_cpu, int message, uint32 data, uint32 data2, uint32 data
void void
smp_send_broadcast_ici(int message, uint32 data, uint32 data2, uint32 data3, void *data_ptr, int flags) smp_send_broadcast_ici(int32 message, uint32 data, uint32 data2, uint32 data3,
void *data_ptr, uint32 flags)
{ {
struct smp_msg *msg; struct smp_msg *msg;
TRACE(("smp_send_broadcast_ici: cpu %d mess 0x%x, data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%x\n", TRACE(("smp_send_broadcast_ici: cpu %d mess 0x%x, data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%x\n",
smp_get_current_cpu(), message, data, data2, data3, data_ptr, flags)); smp_get_current_cpu(), message, data, data2, data3, data_ptr, flags));
if (ici_enabled) { if (sICIEnabled) {
int state; int state;
int curr_cpu; int currentCPU;
// find_free_message leaves interrupts disabled // find_free_message leaves interrupts disabled
state = find_free_message(&msg); state = find_free_message(&msg);
curr_cpu = smp_get_current_cpu(); currentCPU = smp_get_current_cpu();
msg->message = message; msg->message = message;
msg->data = data; msg->data = data;
msg->data2 = data2; msg->data2 = data2;
msg->data3 = data3; msg->data3 = data3;
msg->data_ptr = data_ptr; msg->data_ptr = data_ptr;
msg->ref_count = smp_num_cpus - 1; msg->ref_count = sNumCPUs - 1;
msg->flags = flags; msg->flags = flags;
msg->proc_bitmap = SET_BIT(0, curr_cpu); msg->proc_bitmap = SET_BIT(0, currentCPU);
msg->done = false; msg->done = false;
TRACE(("smp_send_broadcast_ici%d: inserting msg %p into broadcast mbox\n", TRACE(("smp_send_broadcast_ici%d: inserting msg %p into broadcast mbox\n",
@ -511,7 +515,7 @@ smp_send_broadcast_ici(int message, uint32 data, uint32 data2, uint32 data3, voi
TRACE(("smp_send_broadcast_ici: waiting for ack\n")); TRACE(("smp_send_broadcast_ici: waiting for ack\n"));
while (msg->done == false) { while (msg->done == false) {
smp_process_pending_ici(curr_cpu); process_pending_ici(currentCPU);
PAUSE(); PAUSE();
} }
@ -529,52 +533,65 @@ smp_send_broadcast_ici(int message, uint32 data, uint32 data2, uint32 data3, voi
} }
int bool
smp_trap_non_boot_cpus(kernel_args *ka, int cpu) smp_trap_non_boot_cpus(int32 cpu)
{ {
if (cpu > 0) { if (cpu > 0) {
boot_cpu_spin[cpu] = 1; boot_cpu_spin[cpu] = 1;
acquire_spinlock(&boot_cpu_spin[cpu]); acquire_spinlock(&boot_cpu_spin[cpu]);
return 1; return false;
} }
return 0;
return true;
} }
void void
smp_wake_up_all_non_boot_cpus() smp_wake_up_non_boot_cpus()
{ {
// resume non boot CPUs
int i; int i;
for (i = 1; i < smp_num_cpus; i++) { for (i = 1; i < sNumCPUs; i++) {
release_spinlock(&boot_cpu_spin[i]); release_spinlock(&boot_cpu_spin[i]);
} }
// ICIs were previously being ignored
if (sNumCPUs > 1)
sICIEnabled = true;
// invalidate all of the other processors' TLB caches
arch_cpu_global_TLB_invalidate();
smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVL_PAGE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
// start the other processors
smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_ASYNC);
} }
void void
smp_wait_for_ap_cpus(kernel_args *ka) smp_wait_for_non_boot_cpus(void)
{ {
unsigned int i; bool retry;
int retry; int32 i;
do { do {
retry = 0; retry = false;
for (i = 1; i < ka->num_cpus; i++) { for (i = 1; i < sNumCPUs; i++) {
if (boot_cpu_spin[i] != 1) if (boot_cpu_spin[i] != 1)
retry = 1; retry = true;
} }
} while (retry == 1); } while (retry == true);
} }
status_t status_t
smp_init(kernel_args *ka) smp_init(kernel_args *args)
{ {
struct smp_msg *msg; struct smp_msg *msg;
int i; int i;
TRACE(("smp_init: entry\n")); TRACE(("smp_init: entry\n"));
if (ka->num_cpus > 1) { if (args->num_cpus > 1) {
free_msgs = NULL; free_msgs = NULL;
free_msg_count = 0; free_msg_count = 0;
for (i = 0; i < MSG_POOL_SIZE; i++) { for (i = 0; i < MSG_POOL_SIZE; i++) {
@ -588,11 +605,11 @@ smp_init(kernel_args *ka)
free_msgs = msg; free_msgs = msg;
free_msg_count++; free_msg_count++;
} }
smp_num_cpus = ka->num_cpus; sNumCPUs = args->num_cpus;
} }
TRACE(("smp_init: calling arch_smp_init\n")); TRACE(("smp_init: calling arch_smp_init\n"));
return arch_smp_init(ka); return arch_smp_init(args);
} }
@ -604,48 +621,32 @@ smp_per_cpu_init(kernel_args *args, int32 cpu)
void void
smp_set_num_cpus(int num_cpus) smp_set_num_cpus(int32 numCPUs)
{ {
smp_num_cpus = num_cpus; sNumCPUs = numCPUs;
} }
int int32
smp_get_num_cpus() smp_get_num_cpus()
{ {
return smp_num_cpus; return sNumCPUs;
} }
int int32
smp_get_current_cpu(void) smp_get_current_cpu(void)
{ {
struct thread *t = thread_get_current_thread(); struct thread *thread = thread_get_current_thread();
if (t) if (thread)
return t->cpu->info.cpu_num; return thread->cpu->info.cpu_num;
// this is not always correct during early boot, but it's okay for
// for the boot process
return 0; return 0;
} }
int
smp_enable_ici()
{
if (smp_num_cpus > 1) // dont actually do it if we only have one cpu
ici_enabled = true;
return B_NO_ERROR;
}
int
smp_disable_ici()
{
ici_enabled = false;
return B_NO_ERROR;
}
// #pragma mark - // #pragma mark -
// public exported functions // public exported functions

View File

@ -17,6 +17,7 @@
#include <sem.h> #include <sem.h>
#include <user_runtime.h> #include <user_runtime.h>
#include <kimage.h> #include <kimage.h>
#include <kscheduler.h>
#include <elf.h> #include <elf.h>
#include <syscalls.h> #include <syscalls.h>
#include <syscall_process_info.h> #include <syscall_process_info.h>

View File

@ -18,6 +18,7 @@
#include <arch/vm.h> #include <arch/vm.h>
#include <kimage.h> #include <kimage.h>
#include <ksignal.h> #include <ksignal.h>
#include <kscheduler.h>
#include <syscalls.h> #include <syscalls.h>
#include <tls.h> #include <tls.h>
#include <vfs.h> #include <vfs.h>