bonefish+axeld:

* Implemented a tiny bit more sophisticated version of
  estimate_max_scheduling_latency() that uses a syscall that lets the scheduler
  decide.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@36170 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2010-04-11 20:40:58 +00:00
parent 5ffdbfdebb
commit ee0d2be9e4
7 changed files with 114 additions and 28 deletions

View File

@ -1,6 +1,6 @@
/*
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2005, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2005-2010, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_SCHEDULER_H
@ -22,17 +22,19 @@ struct scheduler_ops {
void (*enqueue_in_run_queue)(struct thread* thread);
void (*reschedule)(void);
void (*set_thread_priority)(struct thread* thread, int32 priority);
// called when the thread structure is first created -
// initialization of per-thread housekeeping data structures should
// be done here
bigtime_t (*estimate_max_scheduling_latency)(struct thread* thread);
void (*on_thread_create)(struct thread* thread);
// called when a thread structure is initialized and made ready for
// use - should be used to reset the housekeeping data structures
// if needed
// called when the thread structure is first created -
// initialization of per-thread housekeeping data structures should
// be done here
void (*on_thread_init)(struct thread* thread);
// called when a thread structure is freed - freeing up any allocated
// mem on the scheduler's part should be done here
// called when a thread structure is initialized and made ready for
// use - should be used to reset the housekeeping data structures
// if needed
void (*on_thread_destroy)(struct thread* thread);
// called when a thread structure is freed - freeing up any allocated
// mem on the scheduler's part should be done here
void (*start)(void);
};
@ -61,6 +63,7 @@ void scheduler_remove_listener(struct SchedulerListener* listener);
void scheduler_init(void);
bigtime_t _user_estimate_max_scheduling_latency(thread_id thread);
status_t _user_analyze_scheduling(bigtime_t from, bigtime_t until, void* buffer,
size_t size, struct scheduling_analysis* analysis);

View File

@ -1,5 +1,5 @@
/*
* Copyright 2004-2009, Haiku Inc. All rights reserved.
* Copyright 2004-2010, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef _SYSTEM_SYSCALLS_H
@ -177,6 +177,8 @@ extern status_t _kern_unblock_thread(thread_id thread, status_t status);
extern status_t _kern_unblock_threads(thread_id* threads, uint32 count,
status_t status);
extern bigtime_t _kern_estimate_max_scheduling_latency(thread_id thread);
// user/group functions
extern gid_t _kern_getgid(bool effective);
extern uid_t _kern_getuid(bool effective);

View File

@ -1,8 +1,10 @@
/*
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2010, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*/
#include <kscheduler.h>
#include <listeners.h>
#include <smp.h>
@ -68,3 +70,22 @@ scheduler_init(void)
" <thread> - ID of the thread.\n", 0);
#endif
}
// #pragma mark - Syscalls
bigtime_t
_user_estimate_max_scheduling_latency(thread_id id)
{
syscall_64_bit_return_value();
InterruptsSpinLocker locker(gThreadSpinlock);
struct thread* thread = id < 0
? thread_get_current_thread() : thread_get_thread_struct_locked(id);
if (thread == NULL)
return 0;
return gScheduler->estimate_max_scheduling_latency(thread);
}

View File

@ -1,7 +1,7 @@
/*
* Copyright 2009, Rene Gollent, rene@gollent.com.
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002, Angelo Mottola, a.mottola@libero.it.
* Distributed under the terms of the MIT License.
*
@ -321,6 +321,24 @@ affine_set_thread_priority(struct thread *thread, int32 priority)
}
static bigtime_t
affine_estimate_max_scheduling_latency(struct thread* thread)
{
// TODO: This is probably meant to be called periodically to return the
// current estimate depending on the system usage; we return fixed estimates
// per thread priority, though.
if (thread->priority >= B_REAL_TIME_DISPLAY_PRIORITY)
return kMinThreadQuantum / 4;
if (thread->priority >= B_DISPLAY_PRIORITY)
return kMinThreadQuantum;
if (thread->priority < B_NORMAL_PRIORITY)
return 2 * kMaxThreadQuantum;
return 2 * kMinThreadQuantum;
}
static void
context_switch(struct thread *fromThread, struct thread *toThread)
{
@ -556,6 +574,7 @@ static scheduler_ops kAffineOps = {
affine_enqueue_in_run_queue,
affine_reschedule,
affine_set_thread_priority,
affine_estimate_max_scheduling_latency,
affine_on_thread_create,
affine_on_thread_init,
affine_on_thread_destroy,

View File

@ -1,6 +1,6 @@
/*
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002, Angelo Mottola, a.mottola@libero.it.
* Distributed under the terms of the MIT License.
*
@ -36,6 +36,9 @@
#endif
const bigtime_t kThreadQuantum = 3000;
// The run queue. Holds the threads ready to run ordered by priority.
static struct thread *sRunQueue = NULL;
@ -160,6 +163,22 @@ simple_set_thread_priority(struct thread *thread, int32 priority)
}
static bigtime_t
simple_estimate_max_scheduling_latency(struct thread* thread)
{
// TODO: This is probably meant to be called periodically to return the
// current estimate depending on the system usage; we return fixed estimates
// per thread priority, though.
if (thread->priority >= B_REAL_TIME_DISPLAY_PRIORITY)
return kThreadQuantum / 4;
if (thread->priority >= B_DISPLAY_PRIORITY)
return kThreadQuantum;
return 2 * kThreadQuantum;
}
static void
context_switch(struct thread *fromThread, struct thread *toThread)
{
@ -322,8 +341,8 @@ simple_reschedule(void)
}
if (nextThread != oldThread || oldThread->cpu->preempted) {
bigtime_t quantum = 3000; // ToDo: calculate quantum!
timer *quantumTimer = &oldThread->cpu->quantum_timer;
bigtime_t quantum = kThreadQuantum; // TODO: calculate quantum?
timer* quantumTimer = &oldThread->cpu->quantum_timer;
if (!oldThread->cpu->preempted)
cancel_timer(quantumTimer);
@ -377,6 +396,7 @@ static scheduler_ops kSimpleOps = {
simple_enqueue_in_run_queue,
simple_reschedule,
simple_set_thread_priority,
simple_estimate_max_scheduling_latency,
simple_on_thread_create,
simple_on_thread_init,
simple_on_thread_destroy,

View File

@ -1,6 +1,6 @@
/*
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002, Angelo Mottola, a.mottola@libero.it.
* Distributed under the terms of the MIT License.
*
@ -37,6 +37,9 @@
#endif
const bigtime_t kThreadQuantum = 3000;
// The run queue. Holds the threads ready to run ordered by priority.
static struct thread *sRunQueue = NULL;
static int32 sCPUCount = 1;
@ -225,6 +228,22 @@ set_thread_priority(struct thread *thread, int32 priority)
}
static bigtime_t
estimate_max_scheduling_latency(struct thread* thread)
{
// TODO: This is probably meant to be called periodically to return the
// current estimate depending on the system usage; we return fixed estimates
// per thread priority, though.
if (thread->priority >= B_REAL_TIME_DISPLAY_PRIORITY)
return kThreadQuantum / 4;
if (thread->priority >= B_DISPLAY_PRIORITY)
return kThreadQuantum;
return 2 * kThreadQuantum;
}
static void
context_switch(struct thread *fromThread, struct thread *toThread)
{
@ -367,7 +386,7 @@ reschedule(void)
if (nextThread->cpu
&& nextThread->cpu->cpu_num != oldThread->cpu->cpu_num) {
panic("thread in run queue that's still running on another CPU!\n");
// ToDo: remove this check completely when we're sure that this
// TODO: remove this check completely when we're sure that this
// cannot happen anymore.
prevThread = nextThread;
nextThread = nextThread->queue_next;
@ -415,8 +434,8 @@ reschedule(void)
}
if (nextThread != oldThread || oldThread->cpu->preempted) {
bigtime_t quantum = 3000; // ToDo: calculate quantum!
timer *quantumTimer = &oldThread->cpu->quantum_timer;
bigtime_t quantum = kThreadQuantum; // TODO: calculate quantum?
timer* quantumTimer = &oldThread->cpu->quantum_timer;
if (!oldThread->cpu->preempted)
cancel_timer(quantumTimer);
@ -470,6 +489,7 @@ static scheduler_ops kSimpleSMPOps = {
enqueue_in_run_queue,
reschedule,
set_thread_priority,
estimate_max_scheduling_latency,
on_thread_create,
on_thread_init,
on_thread_destroy,

View File

@ -1,5 +1,5 @@
/*
* Copyright 2004-2008, Haiku. All rights reserved.
* Copyright 2004-2010, Haiku. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
@ -10,6 +10,8 @@
#include <scheduler.h>
#include <syscalls.h>
static struct {
uint32 what;
@ -39,6 +41,8 @@ suggest_thread_priority(uint32 what, int32 period, bigtime_t jitter,
int32 priority = what == B_DEFAULT_MEDIA_PRIORITY ? 0x0a : 0;
// default priority
// TODO: this needs kernel support, and is a pretty simplistic solution
for (i = 0; sWhatPriorityArray[i].what != (uint32)-1; i ++) {
if ((what & sWhatPriorityArray[i].what) != 0) {
priority = sWhatPriorityArray[i].priority;
@ -53,9 +57,6 @@ suggest_thread_priority(uint32 what, int32 period, bigtime_t jitter,
bigtime_t
estimate_max_scheduling_latency(thread_id thread)
{
if (thread == -1)
thread = find_thread(NULL);
return 0;
return _kern_estimate_max_scheduling_latency(thread);
}