* CPUs can now be disabled - that is, they will keep idling.

* Added syscalls _kern_set_cpu_enabled() and _kern_cpu_enabled().
* scheduler.c::sRunQueue::tail was not maintained at all; changed sRunQueue to
  be a simple thread pointer instead of a struct thread_queue.
* Turns out we're monitoring CPU activity incorrectly when we've got more
  than one CPU.
* Renamed the global CPU array from "cpu" to gCPU.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@16186 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2006-02-01 16:09:05 +00:00
parent 0f11a95a97
commit df213cedbc
6 changed files with 100 additions and 34 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2005, Haiku Inc. All rights reserved.
* Copyright 2002-2006, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2002, Travis Geiselbrecht. All rights reserved.
@ -23,11 +23,12 @@ typedef union cpu_ent {
// thread.c: used to force a reschedule at quantum expiration time
int preempted;
timer quantum_timer;
bool disabled;
} info;
} cpu_ent __attribute__((aligned(64)));
extern cpu_ent cpu[MAX_BOOT_CPUS];
extern cpu_ent gCPU[MAX_BOOT_CPUS];
#ifdef __cplusplus
@ -40,9 +41,11 @@ status_t cpu_init_post_vm(struct kernel_args *args);
status_t cpu_init_post_modules(struct kernel_args *args);
cpu_ent *get_cpu_struct(void);
extern inline cpu_ent *get_cpu_struct(void) { return &cpu[smp_get_current_cpu()]; }
extern inline cpu_ent *get_cpu_struct(void) { return &gCPU[smp_get_current_cpu()]; }
void _user_clear_caches(void *address, size_t length, uint32 flags);
bool _user_cpu_enabled(int32 cpu);
status_t _user_set_cpu_enabled(int32 cpu, bool enabled);
#ifdef __cplusplus
}

View File

@ -300,6 +300,8 @@ extern area_id _kern_register_messaging_service(sem_id lockingSem,
extern status_t _kern_unregister_messaging_service();
extern void _kern_clear_caches(void *address, size_t length, uint32 flags);
extern bool _kern_cpu_enabled(int32 cpu);
extern status_t _kern_set_cpu_enabled(int32 cpu, bool enabled);
#ifdef __INTEL__
// our only x86 only syscall

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2002, Travis Geiselbrecht. All rights reserved.
@ -18,7 +18,9 @@
/* global per-cpu structure */
cpu_ent cpu[MAX_BOOT_CPUS];
cpu_ent gCPU[MAX_BOOT_CPUS];
static spinlock sSetCpuLock;
status_t
@ -26,9 +28,9 @@ cpu_init(kernel_args *args)
{
int i;
memset(cpu, 0, sizeof(cpu));
memset(gCPU, 0, sizeof(gCPU));
for (i = 0; i < MAX_BOOT_CPUS; i++) {
cpu[i].info.cpu_num = i;
gCPU[i].info.cpu_num = i;
}
return arch_cpu_init(args);
@ -72,3 +74,49 @@ _user_clear_caches(void *address, size_t length, uint32 flags)
clear_caches(address, length, flags);
}
bool
_user_cpu_enabled(int32 cpu)
{
if (cpu < 0 || cpu >= smp_get_num_cpus())
return B_BAD_VALUE;
return !gCPU[cpu].info.disabled;
}
status_t
_user_set_cpu_enabled(int32 cpu, bool enabled)
{
status_t status = B_OK;
cpu_status state;
int32 i, count;
if (cpu < 0 || cpu >= smp_get_num_cpus())
return B_BAD_VALUE;
// We need to lock here to make sure that no one can disable
// the last CPU
state = disable_interrupts();
acquire_spinlock(&sSetCpuLock);
if (!enabled) {
// check if this is the last CPU to be disabled
for (i = 0, count = 0; i < smp_get_num_cpus(); i++) {
if (!gCPU[i].info.disabled)
count++;
}
if (count == 1)
status = B_NOT_ALLOWED;
}
if (status == B_OK)
gCPU[cpu].info.disabled = !enabled;
release_spinlock(&sSetCpuLock);
restore_interrupts(state);
return status;
}

View File

@ -35,7 +35,7 @@ static int dump_run_queue(int argc, char **argv);
static int _rand(void);
// The run queue. Holds the threads ready to run ordered by priority.
static struct thread_queue sRunQueue = {NULL, NULL};
static struct thread *sRunQueue = NULL;
static int
@ -56,7 +56,7 @@ dump_run_queue(int argc, char **argv)
{
struct thread *thread;
thread = sRunQueue.head;
thread = sRunQueue;
if (!thread)
dprintf("Run queue is empty!\n");
else {
@ -79,20 +79,20 @@ scheduler_enqueue_in_run_queue(struct thread *thread)
{
struct thread *curr, *prev;
for (curr = sRunQueue.head, prev = NULL; curr
for (curr = sRunQueue, prev = NULL; curr
&& curr->priority >= thread->next_priority;
curr = curr->queue_next) {
if (prev)
prev = prev->queue_next;
else
prev = sRunQueue.head;
prev = sRunQueue;
}
thread->queue_next = curr;
if (prev)
prev->queue_next = thread;
else
sRunQueue.head = thread;
sRunQueue = thread;
thread->next_priority = thread->priority;
}
@ -108,11 +108,11 @@ scheduler_remove_from_run_queue(struct thread *thread)
struct thread *item, *prev;
// find thread in run queue
for (item = sRunQueue.head, prev = NULL; item && item != thread; item = item->queue_next) {
for (item = sRunQueue, prev = NULL; item && item != thread; item = item->queue_next) {
if (prev)
prev = prev->queue_next;
else
prev = sRunQueue.head;
prev = sRunQueue;
}
ASSERT(item == thread);
@ -120,7 +120,7 @@ scheduler_remove_from_run_queue(struct thread *thread)
if (prev)
prev->queue_next = item->queue_next;
else
sRunQueue.head = item->queue_next;
sRunQueue = item->queue_next;
}
@ -182,24 +182,33 @@ scheduler_reschedule(void)
}
oldThread->state = oldThread->next_state;
// select next thread from the run queue
nextThread = sRunQueue.head;
nextThread = sRunQueue;
prevThread = NULL;
while (nextThread && nextThread->priority > B_IDLE_PRIORITY) {
// always extract real time threads
if (nextThread->priority >= B_FIRST_REAL_TIME_PRIORITY)
break;
// never skip last non-idle normal thread
if (nextThread->queue_next && nextThread->queue_next->priority == B_IDLE_PRIORITY)
break;
if (oldThread->cpu->info.disabled) {
// just select an idle thread
while (nextThread && nextThread->priority > B_IDLE_PRIORITY) {
prevThread = nextThread;
nextThread = nextThread->queue_next;
}
} else {
// select next thread from the run queue
while (nextThread && nextThread->priority > B_IDLE_PRIORITY) {
// always extract real time threads
if (nextThread->priority >= B_FIRST_REAL_TIME_PRIORITY)
break;
// never skip last non-idle normal thread
if (nextThread->queue_next && nextThread->queue_next->priority == B_IDLE_PRIORITY)
break;
// skip normal threads sometimes
if (_rand() > 0x3000)
break;
// skip normal threads sometimes
if (_rand() > 0x3000)
break;
prevThread = nextThread;
nextThread = nextThread->queue_next;
prevThread = nextThread;
nextThread = nextThread->queue_next;
}
}
if (!nextThread)
@ -209,7 +218,7 @@ scheduler_reschedule(void)
if (prevThread)
prevThread->queue_next = nextThread->queue_next;
else
sRunQueue.head = nextThread->queue_next;
sRunQueue = nextThread->queue_next;
nextThread->state = B_THREAD_RUNNING;
nextThread->next_state = B_THREAD_READY;

View File

@ -1298,6 +1298,8 @@ thread_get_active_cpu_time(int32 cpuNum)
state = disable_interrupts();
GRAB_THREAD_LOCK();
// TODO: this is wrong - the idle threads are arbitrarly executed by the CPUs
// there is no CPU affinity!
activeTime -= sIdleThreads[cpuNum]->kernel_time;
RELEASE_THREAD_LOCK();
@ -1382,7 +1384,7 @@ thread_init(kernel_args *args)
if (i == 0)
arch_thread_set_current_thread(thread);
thread->cpu = &cpu[i];
thread->cpu = &gCPU[i];
}
sUsedThreads = args->num_cpus;

View File

@ -1,5 +1,5 @@
/*
* Copyright 2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Copyright 2005-2006, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*/
@ -8,6 +8,8 @@
#include <SupportDefs.h>
#include <syscalls.h>
int _kset_mon_limit_(int num);
int _kset_fd_limit_(int num);
@ -32,13 +34,13 @@ _kset_fd_limit_(int num)
int
_kget_cpu_state_(int cpuNum)
{
return 1;
return _kern_cpu_enabled(cpuNum);
}
int
_kset_cpu_state_(int cpuNum, int state)
{
return state ? B_OK : B_ERROR;
return _kern_set_cpu_enabled(cpuNum, state != 0);
}