Merge run time accounting changes from the vmlocking branch. These make

the LWP "start time" per-thread instead of per-CPU.
This commit is contained in:
ad 2007-10-08 20:06:17 +00:00
parent 0ee29bef4d
commit 36a1712707
17 changed files with 163 additions and 142 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.76 2007/07/21 11:59:55 tsutsui Exp $ */
/* $NetBSD: cpu.c,v 1.77 2007/10/08 20:06:17 ad Exp $ */
/*-
* Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
@ -66,7 +66,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.76 2007/07/21 11:59:55 tsutsui Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.77 2007/10/08 20:06:17 ad Exp $");
#include "opt_ddb.h"
#include "opt_multiprocessor.h"
@ -564,9 +564,6 @@ cpu_hatch(struct cpu_info *ci)
alpha_pal_imb();
cc_calibrate_cpu(ci);
/* Initialize our base "runtime". */
microtime(&ci->ci_schedstate.spc_runtime);
}
int

View File

@ -1,4 +1,4 @@
/* $NetBSD: svr4_machdep.c,v 1.85 2007/06/23 16:50:23 dsl Exp $ */
/* $NetBSD: svr4_machdep.c,v 1.86 2007/10/08 20:06:18 ad Exp $ */
/*-
* Copyright (c) 1994, 2000 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: svr4_machdep.c,v 1.85 2007/06/23 16:50:23 dsl Exp $");
__KERNEL_RCSID(0, "$NetBSD: svr4_machdep.c,v 1.86 2007/10/08 20:06:18 ad Exp $");
#if defined(_KERNEL_OPT)
#include "opt_vm86.h"
@ -517,7 +517,6 @@ svr4_fasttrap(frame)
{
struct lwp *l = curlwp;
struct proc *p = l->l_proc;
struct schedstate_percpu *spc;
struct timeval tv;
struct timespec ts;
uint64_t tm;
@ -557,14 +556,12 @@ svr4_fasttrap(frame)
* using the LWP's real time augmented with its current
* runtime is the best we can do.
*/
spc = &curcpu()->ci_schedstate;
microtime(&tv);
tm = (l->l_rtime.tv_sec + tv.tv_sec -
spc->spc_runtime.tv_sec) * 1000000ull;
l->l_stime.tv_sec) * 1000000ull;
tm += l->l_rtime.tv_usec + tv.tv_usec;
tm -= spc->spc_runtime.tv_usec;
tm -= l->l_stime.tv_usec;
tm *= 1000u;
/* XXX: dsl - I would have expected the msb in %edx */
frame.tf_edx = tm & 0xffffffffu;

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.205 2007/08/01 00:06:32 macallan Exp $ */
/* $NetBSD: cpu.c,v 1.206 2007/10/08 20:06:18 ad Exp $ */
/*
* Copyright (c) 1996
@ -52,7 +52,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.205 2007/08/01 00:06:32 macallan Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.206 2007/10/08 20:06:18 ad Exp $");
#include "opt_multiprocessor.h"
#include "opt_lockdebug.h"
@ -580,12 +580,6 @@ cpu_boot_secondary_processors(void)
(cpi->flags & CPUFLG_HATCHED) == 0)
continue;
/*
* XXX - the first process run on this CPU will be charged
* with the leading idle time.
*/
getmicrotime(&cpi->ci_schedstate.spc_runtime);
printf(" cpu%d", cpi->ci_cpuid);
cpi->flags |= CPUFLG_READY;
cpu_ready_mask |= (1 << n);

View File

@ -1,4 +1,4 @@
/* $NetBSD: svr4_machdep.c,v 1.60 2007/03/04 09:35:03 macallan Exp $ */
/* $NetBSD: svr4_machdep.c,v 1.61 2007/10/08 20:06:18 ad Exp $ */
/*-
* Copyright (c) 1994 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: svr4_machdep.c,v 1.60 2007/03/04 09:35:03 macallan Exp $");
__KERNEL_RCSID(0, "$NetBSD: svr4_machdep.c,v 1.61 2007/10/08 20:06:18 ad Exp $");
#if defined(_KERNEL_OPT)
#include "opt_kgdb.h"
@ -534,7 +534,6 @@ svr4_trap(int type, struct lwp *l)
{
int n;
struct trapframe *tf = l->l_md.md_tf;
struct schedstate_percpu *spc;
struct timespec ts;
struct timeval tv;
uint64_t tm;
@ -588,15 +587,13 @@ svr4_trap(int type, struct lwp *l)
* for now using the process's real time augmented with its
* current runtime is the best we can do.
*/
spc = &curcpu()->ci_schedstate;
microtime(&tv); /* XXX should move on to struct bintime */
tm = (l->l_proc->p_rtime.tv_sec + tv.tv_sec -
spc->spc_runtime.tv_sec) * (uint64_t)1000000u;
tm += l->l_proc->p_rtime.tv_usec + tv.tv_usec;
tm -= spc->spc_runtime.tv_usec;
tm *= 1000;
tm = (l->l_rtime.tv_sec + tv.tv_sec -
l->l_stime.tv_sec) * 1000000ull;
tm += l->l_rtime.tv_usec + tv.tv_usec;
tm -= l->l_stime.tv_usec;
tm *= 1000u;
tf->tf_out[0] = (tm >> 32) & 0x00000000ffffffffUL;
tf->tf_out[1] = tm & 0x00000000ffffffffUL;
break;

View File

@ -1,4 +1,4 @@
/* $NetBSD: svr4_32_machdep.c,v 1.28 2007/03/16 22:24:49 dsl Exp $ */
/* $NetBSD: svr4_32_machdep.c,v 1.29 2007/10/08 20:06:18 ad Exp $ */
/*-
* Copyright (c) 1994 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: svr4_32_machdep.c,v 1.28 2007/03/16 22:24:49 dsl Exp $");
__KERNEL_RCSID(0, "$NetBSD: svr4_32_machdep.c,v 1.29 2007/10/08 20:06:18 ad Exp $");
#ifndef _LKM
#include "opt_ddb.h"
@ -592,7 +592,6 @@ svr4_32_trap(int type, struct lwp *l)
int n;
struct proc *p = l->l_proc;
struct trapframe64 *tf = l->l_md.md_tf;
struct schedstate_percpu *spc;
struct timespec ts;
struct timeval tv;
uint64_t tm;
@ -640,15 +639,13 @@ svr4_32_trap(int type, struct lwp *l)
* for now using the process's real time augmented with its
* current runtime is the best we can do.
*/
spc = &curcpu()->ci_schedstate;
microtime(&tv);
tm = (p->p_rtime.tv_sec + tv.tv_sec -
spc->spc_runtime.tv_sec) * (uint64_t)1000000u;
tm += p->p_rtime.tv_usec + tv.tv_usec;
tm -= spc->spc_runtime.tv_usec;
tm *= 1000;
tm = (l->l_rtime.tv_sec + tv.tv_sec -
l->l_stime.tv_sec) * 1000000ull;
tm += l->l_rtime.tv_usec + tv.tv_usec;
tm -= l->l_stime.tv_usec;
tm *= 1000u;
tf->tf_out[0] = (tm >> 32) & 0x00000000ffffffffffUL;
tf->tf_out[1] = tm & 0x00000000ffffffffffUL;
break;

View File

@ -1,4 +1,4 @@
/* $NetBSD: svr4_machdep.c,v 1.44 2007/03/04 07:54:07 christos Exp $ */
/* $NetBSD: svr4_machdep.c,v 1.45 2007/10/08 20:06:18 ad Exp $ */
/*-
* Copyright (c) 1994 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: svr4_machdep.c,v 1.44 2007/03/04 07:54:07 christos Exp $");
__KERNEL_RCSID(0, "$NetBSD: svr4_machdep.c,v 1.45 2007/10/08 20:06:18 ad Exp $");
#ifndef _LKM
#include "opt_ddb.h"
@ -620,7 +620,6 @@ svr4_trap(int type, struct lwp *l)
struct proc *p = l->l_proc;
int n;
struct trapframe64 *tf = l->l_md.md_tf;
struct schedstate_percpu *spc;
struct timespec ts;
struct timeval tv;
uint64_t tm;
@ -668,15 +667,13 @@ svr4_trap(int type, struct lwp *l)
* for now using the process's real time augmented with its
* current runtime is the best we can do.
*/
spc = &curcpu()->ci_schedstate;
microtime(&tv);
tm = (p->p_rtime.tv_sec + tv.tv_sec -
spc->spc_runtime.tv_sec) * (uint64_t)1000000u;
tm += p->p_rtime.tv_usec + tv.tv_usec;
tm -= spc->spc_runtime.tv_usec;
tm *= 1000;
tm = (l->l_rtime.tv_sec + tv.tv_sec -
l->l_stime.tv_sec) * 1000000ull;
tm += l->l_rtime.tv_usec + tv.tv_usec;
tm -= l->l_stime.tv_usec;
tm *= 1000u;
tf->tf_out[0] = (tm >> 32) & 0x00000000ffffffffUL;
tf->tf_out[1] = tm & 0x00000000ffffffffUL;
break;

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.12 2007/09/26 19:48:38 ad Exp $ */
/* $NetBSD: cpu.c,v 1.13 2007/10/08 20:06:18 ad Exp $ */
/* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp */
/*-
@ -72,7 +72,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.12 2007/09/26 19:48:38 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.13 2007/10/08 20:06:18 ad Exp $");
#include "opt_ddb.h"
#include "opt_multiprocessor.h"
@ -708,7 +708,6 @@ cpu_hatch(void *v)
if (ci->ci_feature_flags & CPUID_TSC)
cc_microset(ci);
#endif
microtime(&ci->ci_schedstate.spc_runtime);
splx(s);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: init_main.c,v 1.319 2007/10/08 15:12:06 ad Exp $ */
/* $NetBSD: init_main.c,v 1.320 2007/10/08 20:06:18 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1991, 1992, 1993
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.319 2007/10/08 15:12:06 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.320 2007/10/08 20:06:18 ad Exp $");
#include "opt_ipsec.h"
#include "opt_multiprocessor.h"
@ -627,13 +627,13 @@ main(void)
p->p_stats->p_start = time;
LIST_FOREACH(l, &p->p_lwps, l_sibling) {
lwp_lock(l);
l->l_cpu->ci_schedstate.spc_runtime = time;
l->l_rtime.tv_sec = l->l_rtime.tv_usec = 0;
lwp_unlock(l);
}
mutex_exit(&p->p_smutex);
}
mutex_exit(&proclist_lock);
curlwp->l_stime = time;
for (CPU_INFO_FOREACH(cii, ci)) {
ci->ci_schedstate.spc_lastmod = time_second;

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_idle.c,v 1.5 2007/10/01 22:14:23 ad Exp $ */
/* $NetBSD: kern_idle.c,v 1.6 2007/10/08 20:06:19 ad Exp $ */
/*-
* Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
@ -28,7 +28,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.5 2007/10/01 22:14:23 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.6 2007/10/08 20:06:19 ad Exp $");
#include <sys/param.h>
#include <sys/cpu.h>
@ -49,6 +49,9 @@ idle_loop(void *dummy)
struct cpu_info *ci = curcpu();
struct lwp *l = curlwp;
/* Update start time for this thread. */
microtime(&l->l_stime);
KERNEL_UNLOCK_ALL(l, NULL);
l->l_usrpri = PIDLELWP;
l->l_priority = l->l_usrpri;

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_resource.c,v 1.122 2007/09/29 12:22:31 dsl Exp $ */
/* $NetBSD: kern_resource.c,v 1.123 2007/10/08 20:06:19 ad Exp $ */
/*-
* Copyright (c) 1982, 1986, 1991, 1993
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.122 2007/09/29 12:22:31 dsl Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.123 2007/10/08 20:06:19 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -421,12 +421,7 @@ calcru(struct proc *p, struct timeval *up, struct timeval *sp,
sec++;
usec -= 1000000;
}
if (l->l_cpu == curcpu()) {
struct schedstate_percpu *spc;
KDASSERT(l->l_cpu != NULL);
spc = &l->l_cpu->ci_schedstate;
if ((l->l_flag & LW_RUNNING) != 0) {
/*
* Adjust for the current time slice. This is
* actually fairly important since the error
@ -435,8 +430,8 @@ calcru(struct proc *p, struct timeval *up, struct timeval *sp,
* error.
*/
microtime(&tv);
sec += tv.tv_sec - spc->spc_runtime.tv_sec;
usec += tv.tv_usec - spc->spc_runtime.tv_usec;
sec += tv.tv_sec - l->l_stime.tv_sec;
usec += tv.tv_usec - l->l_stime.tv_usec;
if (usec >= 1000000) {
sec++;
usec -= 1000000;
@ -1047,7 +1042,8 @@ again:
mutex_exit(&uihashtbl_lock);
/* Must not be called from interrupt context. */
newuip = malloc(sizeof(*uip), M_PROC, M_WAITOK | M_ZERO);
mutex_init(&newuip->ui_lock, MUTEX_DRIVER, IPL_SOFTNET);
/* XXX this could be IPL_SOFTNET */
mutex_init(&newuip->ui_lock, MUTEX_DRIVER, IPL_VM);
goto again;
}
uip = newuip;

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_softint.c,v 1.2 2007/10/08 15:51:03 ad Exp $ */
/* $NetBSD: kern_softint.c,v 1.3 2007/10/08 20:06:19 ad Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
@ -41,11 +41,13 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.2 2007/10/08 15:51:03 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.3 2007/10/08 20:06:19 ad Exp $");
#include <sys/param.h>
#include <sys/intr.h>
u_int softint_timing;
/*
* softint_init:
*

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_synch.c,v 1.198 2007/10/03 11:20:09 ad Exp $ */
/* $NetBSD: kern_synch.c,v 1.199 2007/10/08 20:06:19 ad Exp $ */
/*-
* Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
@ -75,7 +75,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.198 2007/10/03 11:20:09 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.199 2007/10/08 20:06:19 ad Exp $");
#include "opt_kstack.h"
#include "opt_lockdebug.h"
@ -98,6 +98,7 @@ __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.198 2007/10/03 11:20:09 ad Exp $");
#include <sys/sleepq.h>
#include <sys/lockdebug.h>
#include <sys/evcnt.h>
#include <sys/intr.h>
#include <uvm/uvm_extern.h>
@ -315,23 +316,18 @@ preempt(void)
* Compute the amount of time during which the current lwp was running.
*
* - update l_rtime unless it's an idle lwp.
* - update spc_runtime for the next lwp.
*/
static inline void
updatertime(struct lwp *l, struct schedstate_percpu *spc)
void
updatertime(lwp_t *l, const struct timeval *tv)
{
struct timeval tv;
long s, u;
if ((l->l_flag & LW_IDLE) != 0) {
microtime(&spc->spc_runtime);
if ((l->l_flag & LW_IDLE) != 0)
return;
}
microtime(&tv);
u = l->l_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
u = l->l_rtime.tv_usec + (tv->tv_usec - l->l_stime.tv_usec);
s = l->l_rtime.tv_sec + (tv->tv_sec - l->l_stime.tv_sec);
if (u < 0) {
u += 1000000;
s--;
@ -341,8 +337,6 @@ updatertime(struct lwp *l, struct schedstate_percpu *spc)
}
l->l_rtime.tv_usec = u;
l->l_rtime.tv_sec = s;
spc->spc_runtime = tv;
}
/*
@ -351,12 +345,14 @@ updatertime(struct lwp *l, struct schedstate_percpu *spc)
* Returns 1 if another LWP was actually run.
*/
int
mi_switch(struct lwp *l)
mi_switch(lwp_t *l)
{
struct schedstate_percpu *spc;
struct lwp *newl;
int retval, oldspl;
struct cpu_info *ci;
struct timeval tv;
bool returning;
KASSERT(lwp_locked(l, NULL));
LOCKDEBUG_BARRIER(l->l_mutex, 1);
@ -365,6 +361,8 @@ mi_switch(struct lwp *l)
kstack_check_magic(l);
#endif
microtime(&tv);
/*
* It's safe to read the per CPU schedstate unlocked here, as all we
* are after is the run time and that's guarenteed to have been last
@ -378,32 +376,47 @@ mi_switch(struct lwp *l)
* scheduling flags.
*/
spc = &ci->ci_schedstate;
returning = false;
newl = NULL;
/*
* If we have been asked to switch to a specific LWP, then there
* is no need to inspect the run queues. If a soft interrupt is
* blocking, then return to the interrupted thread without adjusting
* VM context or its start time: neither have been changed in order
* to take the interrupt.
*/
if (l->l_switchto != NULL) {
if ((l->l_flag & LW_INTR) != 0) {
returning = true;
softint_block(l);
if ((l->l_flag & LW_TIMEINTR) != 0)
updatertime(l, &tv);
}
newl = l->l_switchto;
l->l_switchto = NULL;
}
/* Count time spent in current system call */
SYSCALL_TIME_SLEEP(l);
if (!returning) {
SYSCALL_TIME_SLEEP(l);
/*
* XXXSMP If we are using h/w performance counters,
* save context.
*/
/*
* XXXSMP If we are using h/w performance counters,
* save context.
*/
#if PERFCTRS
if (PMC_ENABLED(l->l_proc)) {
pmc_save_context(l->l_proc);
}
if (PMC_ENABLED(l->l_proc)) {
pmc_save_context(l->l_proc);
}
#endif
updatertime(l, spc);
updatertime(l, &tv);
}
/*
* If on the CPU and we have gotten this far, then we must yield.
*/
mutex_spin_enter(spc->spc_mutex);
spc->spc_flags &= ~SPCF_SWITCHCLEAR;
KASSERT(l->l_stat != LSRUN);
if (l->l_stat == LSONPROC) {
KASSERT(lwp_locked(l, &spc->spc_lwplock));
@ -434,11 +447,20 @@ mi_switch(struct lwp *l)
newl->l_flag |= LW_RUNNING;
}
ci->ci_want_resched = 0;
spc->spc_flags &= ~SPCF_SWITCHCLEAR;
}
/* Update the new LWP's start time while it is still locked. */
if (!returning) {
newl->l_stime = tv;
/*
* XXX The following may be done unlocked if newl != NULL
* above.
*/
newl->l_priority = newl->l_usrpri;
}
spc->spc_curpriority = newl->l_usrpri;
/* XXX The following may be done unlocked if newl != NULL above. */
newl->l_priority = newl->l_usrpri;
if (l != newl) {
struct lwp *prevlwp;
@ -460,8 +482,12 @@ mi_switch(struct lwp *l)
/* Unlocked, but for statistics only. */
uvmexp.swtch++;
/* Save old VM context. */
pmap_deactivate(l);
/*
* Save old VM context, unless a soft interrupt
* handler is blocking.
*/
if (!returning)
pmap_deactivate(l);
/* Switch to the new LWP.. */
l->l_ncsw++;
@ -492,6 +518,7 @@ mi_switch(struct lwp *l)
KASSERT(l == curlwp);
KASSERT(l->l_stat == LSONPROC);
KASSERT(l->l_cpu == curcpu());
/*
* XXXSMP If we are using h/w performance counters, restore context.

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_time.c,v 1.128 2007/08/09 07:36:19 pooka Exp $ */
/* $NetBSD: kern_time.c,v 1.129 2007/10/08 20:06:19 ad Exp $ */
/*-
* Copyright (c) 2000, 2004, 2005 The NetBSD Foundation, Inc.
@ -68,7 +68,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.128 2007/08/09 07:36:19 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.129 2007/10/08 20:06:19 ad Exp $");
#include <sys/param.h>
#include <sys/resourcevar.h>
@ -114,8 +114,8 @@ settime(struct proc *p, struct timespec *ts)
struct timeval now;
struct timespec ts1;
#endif /* !__HAVE_TIMECOUNTER */
struct cpu_info *ci;
int s1, s2;
lwp_t *l;
int s;
/*
* Don't allow the time to be set forward so far it will wrap
@ -147,8 +147,7 @@ settime(struct proc *p, struct timespec *ts)
TIMESPEC_TO_TIMEVAL(&tv, ts);
/* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
s1 = splsoftclock();
s2 = splclock();
s = splclock();
#ifdef __HAVE_TIMECOUNTER
microtime(&now);
timersub(&tv, &now, &delta);
@ -158,12 +157,12 @@ settime(struct proc *p, struct timespec *ts)
if ((delta.tv_sec < 0 || delta.tv_usec < 0) &&
kauth_authorize_system(p->p_cred, KAUTH_SYSTEM_TIME,
KAUTH_REQ_SYSTEM_TIME_BACKWARDS, NULL, NULL, NULL)) {
splx(s1);
splx(s);
return (EPERM);
}
#ifdef notyet
if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */
splx(s1);
splx(s);
return (EPERM);
}
#endif
@ -175,21 +174,23 @@ settime(struct proc *p, struct timespec *ts)
time = tv;
#endif /* !__HAVE_TIMECOUNTER */
splx(s2);
timeradd(&boottime, &delta, &boottime);
/*
* XXXSMP
* This is wrong. We should traverse a list of all
* CPUs and add the delta to the runtime of those
* CPUs which have a process on them.
* XXXSMP: There is a short race between setting the time above
* and adjusting LWP's run times. Fixing this properly means
* pausing all CPUs while we adjust the clock.
*/
ci = curcpu();
timeradd(&ci->ci_schedstate.spc_runtime, &delta,
&ci->ci_schedstate.spc_runtime);
splx(s1);
mutex_enter(&proclist_lock);
LIST_FOREACH(l, &alllwp, l_list) {
lwp_lock(l);
timeradd(&l->l_stime, &delta, &l->l_stime);
lwp_unlock(l);
}
mutex_exit(&proclist_lock);
resettodr();
splx(s);
return (0);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: sched_4bsd.c,v 1.4 2007/08/04 11:03:02 ad Exp $ */
/* $NetBSD: sched_4bsd.c,v 1.5 2007/10/08 20:06:19 ad Exp $ */
/*-
* Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
@ -75,7 +75,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.4 2007/08/04 11:03:02 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.5 2007/10/08 20:06:19 ad Exp $");
#include "opt_ddb.h"
#include "opt_lockdebug.h"
@ -96,6 +96,7 @@ __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.4 2007/08/04 11:03:02 ad Exp $");
#include <sys/kauth.h>
#include <sys/lockdebug.h>
#include <sys/kmem.h>
#include <sys/intr.h>
#include <uvm/uvm_extern.h>
@ -136,6 +137,9 @@ int rrticks;
/*
* Force switch among equal priority processes every 100ms.
* Called from hardclock every hz/10 == rrticks hardclock ticks.
*
* There's no need to lock anywhere in this routine, as it's
* CPU-local and runs at IPL_SCHED (called from clock interrupt).
*/
/* ARGSUSED */
void
@ -145,7 +149,6 @@ sched_tick(struct cpu_info *ci)
spc->spc_ticks = rrticks;
spc_lock(ci);
if (!CURCPU_IDLE_P()) {
if (spc->spc_flags & SPCF_SEENRR) {
/*
@ -158,7 +161,6 @@ sched_tick(struct cpu_info *ci)
spc->spc_flags |= SPCF_SEENRR;
}
cpu_need_resched(curcpu(), 0);
spc_unlock(ci);
}
#define NICE_WEIGHT 2 /* priorities per nice level */
@ -706,8 +708,9 @@ sched_lwp_exit(struct lwp *l)
}
/* SysCtl */
/*
* sysctl setup. XXX This should be split with kern_synch.c.
*/
SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
{
const struct sysctlnode *node = NULL;
@ -724,13 +727,19 @@ SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
NULL, 0, NULL, 0,
CTL_KERN, CTL_CREATE, CTL_EOL);
if (node != NULL) {
sysctl_createv(clog, 0, &node, NULL,
CTLFLAG_PERMANENT,
CTLTYPE_STRING, "name", NULL,
NULL, 0, __UNCONST("4.4BSD"), 0,
CTL_CREATE, CTL_EOL);
}
KASSERT(node != NULL);
sysctl_createv(clog, 0, &node, NULL,
CTLFLAG_PERMANENT,
CTLTYPE_STRING, "name", NULL,
NULL, 0, __UNCONST("4.4BSD"), 0,
CTL_CREATE, CTL_EOL);
sysctl_createv(clog, 0, &node, NULL,
CTLFLAG_READWRITE,
CTLTYPE_INT, "timesoftints",
SYSCTL_DESCR("Track CPU time for soft interrupts"),
NULL, 0, &softint_timing, 0,
CTL_CREATE, CTL_EOL);
}
#if defined(DDB)

View File

@ -1,4 +1,4 @@
/* $NetBSD: procfs_linux.c,v 1.39 2007/05/26 16:21:04 agc Exp $ */
/* $NetBSD: procfs_linux.c,v 1.40 2007/10/08 20:06:20 ad Exp $ */
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: procfs_linux.c,v 1.39 2007/05/26 16:21:04 agc Exp $");
__KERNEL_RCSID(0, "$NetBSD: procfs_linux.c,v 1.40 2007/10/08 20:06:20 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -269,7 +269,7 @@ procfs_docpustat(struct lwp *curl, struct proc *p,
i += 1;
}
timersub(&curcpu()->ci_schedstate.spc_runtime, &boottime, &runtime);
timersub(&curlwp->l_stime, &boottime, &runtime);
len += snprintf(&bf[len], LBFSZ - len,
"disk 0 0 0 0\n"
"page %u %u\n"
@ -512,7 +512,7 @@ procfs_douptime(struct lwp *curl, struct proc *p,
bf = malloc(LBFSZ, M_TEMP, M_WAITOK);
timersub(&curcpu()->ci_schedstate.spc_runtime, &boottime, &runtime);
timersub(&curlwp->l_stime, &boottime, &runtime);
idle = curcpu()->ci_schedstate.spc_cp_time[CP_IDLE];
len = snprintf(bf, LBFSZ,
"%lu.%02lu %" PRIu64 ".%02" PRIu64 "\n",

View File

@ -1,4 +1,4 @@
/* $NetBSD: lwp.h,v 1.64 2007/09/06 23:59:01 ad Exp $ */
/* $NetBSD: lwp.h,v 1.65 2007/10/08 20:06:20 ad Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
@ -82,6 +82,7 @@ struct lwp {
int l_flag; /* l: misc flag values */
int l_stat; /* l: overall LWP status */
struct timeval l_rtime; /* l: real time */
struct timeval l_stime; /* l: start time (while ONPROC) */
u_int l_swtime; /* l: time swapped in or out */
int l_holdcnt; /* l: if non-zero, don't swap */
int l_biglocks; /* l: biglock count before sleep */
@ -180,6 +181,7 @@ extern lwp_t lwp0; /* LWP for proc0 */
#define LW_INMEM 0x00000004 /* Loaded into memory. */
#define LW_SINTR 0x00000080 /* Sleep is interruptible. */
#define LW_SYSTEM 0x00000200 /* Kernel thread */
#define LW_TIMEINTR 0x00010000 /* Time this soft interrupt */
#define LW_WSUSPEND 0x00020000 /* Suspend before return to user */
#define LW_WCORE 0x00080000 /* Stop for core dump on return to user */
#define LW_WEXIT 0x00100000 /* Exit before return to user */

View File

@ -1,4 +1,4 @@
/* $NetBSD: sched.h,v 1.37 2007/09/21 01:50:36 rmind Exp $ */
/* $NetBSD: sched.h,v 1.38 2007/10/08 20:06:20 ad Exp $ */
/*-
* Copyright (c) 1999, 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
@ -125,18 +125,20 @@ struct sched_param {
* c: cpu_lock
*/
struct schedstate_percpu {
void *spc_sched_info;/* (: scheduler-specific structure */
/* First set of data is likely to be accessed by other CPUs. */
kmutex_t *spc_mutex; /* (: lock on below, runnable LWPs */
kmutex_t spc_lwplock; /* (: general purpose lock for LWPs */
struct timeval spc_runtime; /* s: time curlwp started running */
volatile int spc_flags; /* m: flags; see below */
pri_t spc_curpriority;/* m: usrpri of curlwp */
time_t spc_lastmod; /* c: time of last cpu state change */
/* For the most part, this set of data is CPU-private. */
void *spc_sched_info;/* (: scheduler-specific structure */
volatile int spc_flags; /* s: flags; see below */
u_int spc_schedticks; /* s: ticks for schedclock() */
uint64_t spc_cp_time[CPUSTATES];/* s: CPU state statistics */
pri_t spc_curpriority;/* m: usrpri of curlwp */
int spc_ticks; /* s: ticks until sched_tick() */
int spc_pscnt; /* s: prof/stat counter */
int spc_psdiv; /* s: prof/stat divisor */
time_t spc_lastmod; /* c: time of last cpu state change */
};
/* spc_flags */
@ -212,6 +214,7 @@ void sched_print_runqueue(void (*pr)(const char *, ...));
void preempt(void);
int mi_switch(struct lwp *);
inline void resched_cpu(struct lwp *);
void updatertime(lwp_t *, const struct timeval *);
#endif /* _KERNEL */
#endif /* _SYS_SCHED_H_ */