bonefish + mmlr:

* We started the "main2" thread too late. Since the scheduler was already
  started on all CPUs, the idle thread could wait (for a mutex) while
  spawing the "main2" thread. This violated the assumption in the scheduler
  that all idle threads would always be ready or running. We now create the
  thread while the kernel runs still single-threaded.
* scheduler_start() is now invoked with interrupts still disabled. We enable
  them after the function returns. This prevents scheduler_reschedule() from
  potentially being invoked before scheduler_start().


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@29914 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2009-04-04 23:54:01 +00:00
parent aeab3755ee
commit 53e1017720
3 changed files with 40 additions and 44 deletions

View File

@ -189,6 +189,13 @@ _start(kernel_args *bootKernelArgs, int currentCPU)
swap_init();
#endif
// Start a thread to finish initializing the rest of the system. Note,
// it won't be scheduled before calling scheduler_start() (on any CPU).
TRACE("spawning main2 thread\n");
thread_id thread = spawn_kernel_thread(&main2, "main2",
B_NORMAL_PRIORITY, NULL);
send_signal_etc(thread, SIGCONT, B_DO_NOT_RESCHEDULE);
// bring up the AP cpus in a lock step fashion
TRACE("waking up AP cpus\n");
sCpuRendezvous = sCpuRendezvous2 = 0;
@ -202,16 +209,9 @@ _start(kernel_args *bootKernelArgs, int currentCPU)
smp_cpu_rendezvous(&sCpuRendezvous2, 0);
// release the AP cpus to go enter the scheduler
TRACE("enabling interrupts and starting scheduler on cpu 0\n");
enable_interrupts();
TRACE("starting scheduler on cpu 0 and enabling interrupts\n");
scheduler_start();
// start a thread to finish initializing the rest of the system
TRACE("starting main2 thread\n");
thread_id thread = spawn_kernel_thread(&main2, "main2",
B_NORMAL_PRIORITY, NULL);
TRACE("resuming main2 thread...\n");
resume_thread(thread);
enable_interrupts();
} else {
// lets make sure we're in sync with the main cpu
// the boot processor has probably been sending us
@ -228,8 +228,8 @@ _start(kernel_args *bootKernelArgs, int currentCPU)
smp_cpu_rendezvous(&sCpuRendezvous2, currentCPU);
// welcome to the machine
enable_interrupts();
scheduler_start();
enable_interrupts();
}
TRACE("main: done... begin idle loop on cpu %d\n", currentCPU);

View File

@ -49,13 +49,13 @@ const bigtime_t kMinThreadQuantum = 3000;
const bigtime_t kMaxThreadQuantum = 10000;
struct scheduler_thread_data {
scheduler_thread_data(void)
scheduler_thread_data(void)
{
Init();
}
void Init()
void Init()
{
memset(fLastThreadQuantums, 0, sizeof(fLastThreadQuantums));
fLastQuantumSlot = 0;
@ -69,7 +69,7 @@ struct scheduler_thread_data {
quantumAverage += fLastThreadQuantums[i];
return quantumAverage / kMaxTrackingQuantums;
}
int32 fLastThreadQuantums[kMaxTrackingQuantums];
int16 fLastQuantumSlot;
int32 fLastQueue;
@ -96,13 +96,13 @@ dump_run_queue(int argc, char **argv)
for (int32 i = 0; i < smp_get_num_cpus(); i++) {
thread = sRunQueue[i];
kprintf("Run queue for cpu %ld (%ld threads)\n", i,
kprintf("Run queue for cpu %ld (%ld threads)\n", i,
sRunQueueSize[i]);
if (sRunQueueSize[i] > 0) {
kprintf("thread id priority avg. quantum name\n");
while (thread) {
kprintf("%p %-7ld %-8ld %-12ld %s\n", thread, thread->id,
thread->priority,
thread->priority,
thread->scheduler_data->GetAverageQuantumUsage(),
thread->name);
thread = thread->queue_next;
@ -126,7 +126,7 @@ affine_get_most_idle_cpu()
if (targetCPU < 0 || sRunQueueSize[i] < sRunQueueSize[targetCPU])
targetCPU = i;
}
return targetCPU;
}
@ -151,7 +151,7 @@ static void
affine_enqueue_in_run_queue(struct thread *thread)
{
int32 targetCPU = -1;
if (thread->pinned_to_cpu > 0)
if (thread->pinned_to_cpu > 0)
targetCPU = thread->previous_cpu->cpu_num;
else if (thread->previous_cpu == NULL || thread->previous_cpu->disabled)
targetCPU = affine_get_most_idle_cpu();
@ -183,14 +183,14 @@ affine_enqueue_in_run_queue(struct thread *thread)
sRunQueue[targetCPU] = thread;
thread->scheduler_data->fLastQueue = targetCPU;
}
thread->next_priority = thread->priority;
if (thread->priority != B_IDLE_PRIORITY && targetCPU != smp_get_current_cpu()) {
int32 idleCPU = targetCPU;
if ((sIdleCPUs & (1 << targetCPU)) == 0) {
idleCPU = affine_get_next_idle_cpu();
// no idle CPUs are available
// no idle CPUs are available
// to try and grab this task
if (idleCPU < 0)
return;
@ -214,7 +214,7 @@ dequeue_from_run_queue(struct thread *prevThread, int32 currentCPU)
}
sRunQueueSize[currentCPU]--;
resultThread->scheduler_data->fLastQueue = -1;
return resultThread;
}
@ -256,17 +256,17 @@ static struct thread *steal_thread_from_other_cpus(int32 currentCPU)
if (nextThread->pinned_to_cpu > 0) {
prevThread = nextThread;
nextThread = prevThread->queue_next;
} else
} else
break;
} while (nextThread->queue_next != NULL);
// we reached the end of the queue without finding an
// eligible thread.
if (nextThread->pinned_to_cpu > 0)
nextThread = NULL;
// dequeue the thread we're going to steal
if (nextThread != NULL)
// dequeue the thread we're going to steal
if (nextThread != NULL)
dequeue_from_run_queue(prevThread, targetCPU);
}
@ -297,11 +297,11 @@ affine_set_thread_priority(struct thread *thread, int32 priority)
// search run queues for the thread
// TODO: keep track of the queue a thread is in (perhaps in a
// data pointer on the thread struct) so we only have to walk
// data pointer on the thread struct) so we only have to walk
// that exact queue to find it.
struct thread *item = NULL, *prev = NULL;
targetCPU = thread->scheduler_data->fLastQueue;
for (item = sRunQueue[targetCPU], prev = NULL; item && item != thread;
item = item->queue_next) {
if (prev)
@ -404,7 +404,7 @@ affine_reschedule(void)
// skip normal threads sometimes (roughly 20%)
if (_rand() > 0x1a00)
break;
// skip until next lower priority
int32 priority = nextThread->priority;
do {
@ -412,7 +412,7 @@ affine_reschedule(void)
nextThread = nextThread->queue_next;
} while (nextThread->queue_next != NULL
&& priority == nextThread->queue_next->priority);
}
}
// extract selected thread from the run queue
dequeue_from_run_queue(prevThread, currentCPU);
} else {
@ -423,7 +423,7 @@ affine_reschedule(void)
nextThread = NULL;
if (nextThread == NULL) {
TRACE(("No threads to steal, grabbing from idle pool\n"));
// no other CPU had anything for us to take,
// no other CPU had anything for us to take,
// grab one from the kernel's idle pool
nextThread = sIdleThreads;
if (nextThread)
@ -447,7 +447,7 @@ affine_reschedule(void)
// track CPU activity
if (!thread_is_idle_thread(oldThread)) {
bigtime_t activeTime =
bigtime_t activeTime =
(oldThread->kernel_time - oldThread->cpu->last_kernel_time)
+ (oldThread->user_time - oldThread->cpu->last_user_time);
oldThread->cpu->active_time += activeTime;
@ -468,8 +468,8 @@ affine_reschedule(void)
bigtime_t quantum = kMinThreadQuantum;
// give CPU-bound background threads a larger quantum size
// to minimize unnecessary context switches if the system is idle
if (nextThread->scheduler_data->GetAverageQuantumUsage()
> (kMinThreadQuantum >> 1)
if (nextThread->scheduler_data->GetAverageQuantumUsage()
> (kMinThreadQuantum >> 1)
&& nextThread->priority < B_NORMAL_PRIORITY)
quantum = kMaxThreadQuantum;
timer *quantumTimer = &oldThread->cpu->quantum_timer;
@ -516,19 +516,17 @@ affine_on_thread_destroy(struct thread* thread)
}
/*! This starts the scheduler. Must be run under the context of
the initial idle thread.
/*! This starts the scheduler. Must be run in the context of the initial idle
thread. Interrupts must be disabled and will be disabled when returning.
*/
static void
affine_start(void)
{
cpu_status state = disable_interrupts();
GRAB_THREAD_LOCK();
affine_reschedule();
RELEASE_THREAD_LOCK();
restore_interrupts(state);
}

View File

@ -261,7 +261,7 @@ simple_reschedule(void)
// CPU is disabled - service any threads we may have that are pinned,
// otherwise just select the idle thread
while (nextThread && nextThread->priority > B_IDLE_PRIORITY) {
if (nextThread->pinned_to_cpu > 0 &&
if (nextThread->pinned_to_cpu > 0 &&
nextThread->previous_cpu == oldThread->cpu)
break;
prevThread = nextThread;
@ -401,19 +401,17 @@ simple_on_thread_destroy(struct thread* thread)
}
/*! This starts the scheduler. Must be run under the context of
the initial idle thread.
/*! This starts the scheduler. Must be run in the context of the initial idle
thread. Interrupts must be disabled and will be disabled when returning.
*/
static void
simple_start(void)
{
cpu_status state = disable_interrupts();
GRAB_THREAD_LOCK();
simple_reschedule();
RELEASE_THREAD_LOCK();
restore_interrupts(state);
}