* Changed the affine scheduler to have more or less the same characteristics

as the BeOS scheduler. This makes MediaPlayer playback much better, as high
  priority threads could lose their quantum to a worker thread twice in a row
  with 4% probability before.
* I did not yet change the simple scheduler as well yet; maybe this isn't the
  final one either.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@33965 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2009-11-09 19:00:42 +00:00
parent 7843ca4c64
commit 7249de5e61

View File

@ -9,6 +9,7 @@
* Distributed under the terms of the NewOS License. * Distributed under the terms of the NewOS License.
*/ */
/*! The thread scheduler */ /*! The thread scheduler */
@ -49,6 +50,7 @@ const int32 kMaxTrackingQuantums = 5;
const bigtime_t kMinThreadQuantum = 3000; const bigtime_t kMinThreadQuantum = 3000;
const bigtime_t kMaxThreadQuantum = 10000; const bigtime_t kMaxThreadQuantum = 10000;
struct scheduler_thread_data { struct scheduler_thread_data {
scheduler_thread_data(void) scheduler_thread_data(void)
{ {
@ -63,7 +65,7 @@ struct scheduler_thread_data {
fLastQueue = -1; fLastQueue = -1;
memset(fLastThreadQuantums, 0, sizeof(fLastThreadQuantums)); memset(fLastThreadQuantums, 0, sizeof(fLastThreadQuantums));
} }
inline void SetQuantum(int32 quantum) inline void SetQuantum(int32 quantum)
{ {
fQuantumAverage -= fLastThreadQuantums[fLastQuantumSlot]; fQuantumAverage -= fLastThreadQuantums[fLastQuantumSlot];
@ -74,7 +76,7 @@ struct scheduler_thread_data {
else else
fLastQuantumSlot = 0; fLastQuantumSlot = 0;
} }
inline int32 GetAverageQuantumUsage() const inline int32 GetAverageQuantumUsage() const
{ {
return fQuantumAverage / kMaxTrackingQuantums; return fQuantumAverage / kMaxTrackingQuantums;
@ -198,10 +200,13 @@ affine_enqueue_in_run_queue(struct thread *thread)
SMP_MSG_FLAG_ASYNC); SMP_MSG_FLAG_ASYNC);
} }
} }
return false; return false;
} }
/*! Dequeues the thread after the given \a prevThread from the run queue.
*/
static inline struct thread * static inline struct thread *
dequeue_from_run_queue(struct thread *prevThread, int32 currentCPU) dequeue_from_run_queue(struct thread *prevThread, int32 currentCPU)
{ {
@ -219,6 +224,7 @@ dequeue_from_run_queue(struct thread *prevThread, int32 currentCPU)
return resultThread; return resultThread;
} }
/*! Looks for a possible thread to grab/run from another CPU. /*! Looks for a possible thread to grab/run from another CPU.
Note: thread lock must be held when entering this function Note: thread lock must be held when entering this function
*/ */
@ -393,17 +399,30 @@ affine_reschedule(void)
if (nextThread->priority >= B_FIRST_REAL_TIME_PRIORITY) if (nextThread->priority >= B_FIRST_REAL_TIME_PRIORITY)
break; break;
// skip normal threads sometimes (roughly 20%) // find next thread with lower priority
if (_rand() > 0x1a00) struct thread *lowerNextThread = nextThread->queue_next;
struct thread *lowerPrevThread = nextThread;
int32 priority = nextThread->priority;
while (lowerNextThread != NULL
&& priority == lowerNextThread->priority) {
lowerPrevThread = lowerNextThread;
lowerNextThread = lowerNextThread->queue_next;
}
if (lowerNextThread == NULL)
break; break;
// skip until next lower priority int32 priorityDiff = priority - lowerNextThread->priority;
int32 priority = nextThread->priority; if (priorityDiff > 15)
do { break;
prevThread = nextThread;
nextThread = nextThread->queue_next; // skip normal threads sometimes
} while (nextThread->queue_next != NULL // (twice as probable per priority level)
&& priority == nextThread->queue_next->priority); if ((_rand() >> (15 - priorityDiff)) != 0)
break;
nextThread = lowerNextThread;
prevThread = lowerPrevThread;
} }
TRACE(("dequeuing thread %ld from cpu %ld\n", nextThread->id, TRACE(("dequeuing thread %ld from cpu %ld\n", nextThread->id,