make scheduler work better when a system has many runnable processes
by making p_estcpu fixpt_t. PR/31542. 1. schedcpu() decreases p_estcpu of all processes every seconds, by at least 1 regardless of load average. 2. schedclock() increases p_estcpu of curproc by 1, at about 16 hz. in the consequence, if a system has >16 processes with runnable lwps, their p_estcpu are not likely increased. by making p_estcpu fixpt_t, we can decay it more slowly when loadavg is high. (ie. solve #1.) i left kinfo_proc2::p_estcpu (ie. ps -O cpu) scaled because i have no idea about its absolute value's usage other than debugging, for which raw values are more valuable.
This commit is contained in:
parent
b755c1973c
commit
8ab7495c2d
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: kern_synch.c,v 1.152 2005/10/30 20:28:56 yamt Exp $ */
|
||||
/* $NetBSD: kern_synch.c,v 1.153 2005/11/01 09:07:53 yamt Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1999, 2000, 2004 The NetBSD Foundation, Inc.
|
||||
|
@ -76,7 +76,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.152 2005/10/30 20:28:56 yamt Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.153 2005/11/01 09:07:53 yamt Exp $");
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#include "opt_ktrace.h"
|
||||
|
@ -169,6 +169,13 @@ roundrobin(struct cpu_info *ci)
|
|||
need_resched(curcpu());
|
||||
}
|
||||
|
||||
#define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
|
||||
#define NICE_WEIGHT 2 /* priorities per nice level */
|
||||
|
||||
#define ESTCPU_SHIFT 11
|
||||
#define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
|
||||
#define ESTCPULIM(e) min((e), ESTCPU_MAX)
|
||||
|
||||
/*
|
||||
* Constants for digital decay and forget:
|
||||
* 90% of (p_estcpu) usage in 5 * loadav time
|
||||
|
@ -235,7 +242,25 @@ roundrobin(struct cpu_info *ci)
|
|||
|
||||
/* calculations for digital decay to forget 90% of usage in 5*loadav sec */
|
||||
#define loadfactor(loadav) (2 * (loadav))
|
||||
#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
|
||||
|
||||
static fixpt_t
|
||||
decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
|
||||
{
|
||||
|
||||
if (estcpu == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if !defined(_LP64)
|
||||
/* avoid 64bit arithmetics. */
|
||||
#define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
|
||||
if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
|
||||
return estcpu * loadfac / (loadfac + FSCALE);
|
||||
}
|
||||
#endif /* !defined(_LP64) */
|
||||
|
||||
return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
|
||||
}
|
||||
|
||||
/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
|
||||
fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
|
||||
|
@ -254,10 +279,6 @@ fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
|
|||
*/
|
||||
#define CCPU_SHIFT 11
|
||||
|
||||
#define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
|
||||
#define NICE_WEIGHT 2 /* priorities per nice level */
|
||||
#define ESTCPULIM(e) min((e), NICE_WEIGHT * PRIO_MAX - PPQ)
|
||||
|
||||
/*
|
||||
* Recompute process priorities, every hz ticks.
|
||||
*/
|
||||
|
@ -269,7 +290,6 @@ schedcpu(void *arg)
|
|||
struct lwp *l;
|
||||
struct proc *p;
|
||||
int s, minslp;
|
||||
unsigned int newcpu;
|
||||
int clkhz;
|
||||
|
||||
proclist_lock_read();
|
||||
|
@ -311,8 +331,7 @@ schedcpu(void *arg)
|
|||
(p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
|
||||
#endif
|
||||
p->p_cpticks = 0;
|
||||
newcpu = (u_int)decay_cpu(loadfac, p->p_estcpu);
|
||||
p->p_estcpu = newcpu;
|
||||
p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
|
||||
splx(s); /* Done with the process CPU ticks update */
|
||||
SCHED_LOCK(s);
|
||||
LIST_FOREACH(l, &p->p_lwps, l_sibling) {
|
||||
|
@ -341,13 +360,14 @@ schedcpu(void *arg)
|
|||
/*
|
||||
* Recalculate the priority of a process after it has slept for a while.
|
||||
* For all load averages >= 1 and max p_estcpu of 255, sleeping for at
|
||||
* least six times the loadfactor will decay p_estcpu to zero.
|
||||
* least six times the loadfactor will decay p_estcpu to less than
|
||||
* (1 << ESTCPU_SHIFT).
|
||||
*/
|
||||
void
|
||||
updatepri(struct lwp *l)
|
||||
{
|
||||
struct proc *p = l->l_proc;
|
||||
unsigned int newcpu;
|
||||
fixpt_t newcpu;
|
||||
fixpt_t loadfac;
|
||||
|
||||
SCHED_ASSERT_LOCKED();
|
||||
|
@ -360,7 +380,7 @@ updatepri(struct lwp *l)
|
|||
else {
|
||||
l->l_slptime--; /* the first time was done in schedcpu */
|
||||
while (newcpu && --l->l_slptime)
|
||||
newcpu = (int) decay_cpu(loadfac, newcpu);
|
||||
newcpu = decay_cpu(loadfac, newcpu);
|
||||
p->p_estcpu = newcpu;
|
||||
}
|
||||
resetpriority(l);
|
||||
|
@ -1119,7 +1139,7 @@ resetpriority(struct lwp *l)
|
|||
|
||||
SCHED_ASSERT_LOCKED();
|
||||
|
||||
newpriority = PUSER + p->p_estcpu +
|
||||
newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
|
||||
NICE_WEIGHT * (p->p_nice - NZERO);
|
||||
newpriority = min(newpriority, MAXPRI);
|
||||
l->l_usrpri = newpriority;
|
||||
|
@ -1159,7 +1179,7 @@ schedclock(struct lwp *l)
|
|||
struct proc *p = l->l_proc;
|
||||
int s;
|
||||
|
||||
p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
|
||||
p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
|
||||
SCHED_LOCK(s);
|
||||
resetpriority(l);
|
||||
SCHED_UNLOCK(s);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: proc.h,v 1.206 2005/08/28 14:57:18 yamt Exp $ */
|
||||
/* $NetBSD: proc.h,v 1.207 2005/11/01 09:07:53 yamt Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1986, 1989, 1991, 1993
|
||||
|
@ -201,7 +201,7 @@ struct proc {
|
|||
struct sadata *p_sa; /* Scheduler activation information */
|
||||
|
||||
/* scheduling */
|
||||
u_int p_estcpu; /* Time averaged value of p_cpticks XXX belongs in p_startcopy section */
|
||||
fixpt_t p_estcpu; /* Time averaged value of p_cpticks XXX belongs in p_startcopy section */
|
||||
int p_cpticks; /* Ticks of CPU time */
|
||||
fixpt_t p_pctcpu; /* %cpu for this process during p_swtime */
|
||||
|
||||
|
|
Loading…
Reference in New Issue