NetBSD/sys/kern/kern_synch.c

987 lines
25 KiB
C
Raw Normal View History

/* $NetBSD: kern_synch.c,v 1.222 2008/04/02 17:38:16 ad Exp $ */
/*-
* Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
* NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
* Daniel Sieger.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (c) 1982, 1986, 1990, 1991, 1993
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
1998-03-01 05:20:01 +03:00
* @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
*/
2001-11-12 18:25:01 +03:00
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.222 2008/04/02 17:38:16 ad Exp $");
2001-11-12 18:25:01 +03:00
#include "opt_kstack.h"
#include "opt_lockdebug.h"
#include "opt_multiprocessor.h"
#include "opt_perfctrs.h"
2007-02-10 00:55:00 +03:00
#define __MUTEX_PRIVATE
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/kernel.h>
#if defined(PERFCTRS)
#include <sys/pmc.h>
#endif
#include <sys/cpu.h>
#include <sys/resourcevar.h>
Scheduler bug fixes and reorganization * fix the ancient nice(1) bug, where nice +20 processes incorrectly steal 10 - 20% of the CPU, (or even more depending on load average) * provide a new schedclk() mechanism at a new clock at schedhz, so high platform hz values don't cause nice +0 processes to look like they are niced * change the algorithm slightly, and reorganize the code a lot * fix percent-CPU calculation bugs, and eliminate some no-op code === nice bug === Correctly divide the scheduler queues between niced and compute-bound processes. The current nice weight of two (sort of, see `algorithm change' below) neatly divides the USRPRI queues in half; this should have been used to clip p_estcpu, instead of UCHAR_MAX. Besides being the wrong amount, clipping an unsigned char to UCHAR_MAX is a no-op, and it was done after decay_cpu() which can only _reduce_ the value. It has to be kept <= NICE_WEIGHT * PRIO_MAX - PPQ or processes can scheduler-penalize themselves onto the same queue as nice +20 processes. (Or even a higher one.) === New schedclk() mechansism === Some platforms should be cutting down stathz before hitting the scheduler, since the scheduler algorithm only works right in the vicinity of 64 Hz. Rather than prescale hz, then scale back and forth by 4 every time p_estcpu is touched (each occurance an abstraction violation), use p_estcpu without scaling and require schedhz to be generated directly at the right frequency. Use a default stathz (well, actually, profhz) / 4, so nothing changes unless a platform defines schedhz and a new clock. Define these for alpha, where hz==1024, and nice was totally broke. === Algorithm change === The nice value used to be added to the exponentially-decayed scheduler history value p_estcpu, in _addition_ to be incorporated directly (with greater wieght) into the priority calculation. At first glance, it appears to be a pointless increase of 1/8 the nice effect (pri = p_estcpu/4 + nice*2), but it's actually at least 3x that because it will ramp up linearly but be decayed only exponentially, thus converging to an additional .75 nice for a loadaverage of one. I killed this, it makes the behavior hard to control, almost impossible to analyze, and the effect (~~nothing at for the first second, then somewhat increased niceness after three seconds or more, depending on load average) pointless. === Other bugs === hz -> profhz in the p_pctcpu = f(p_cpticks) calcuation. Collect scheduler functionality. Try to put each abstraction in just one place.
1999-02-23 05:56:03 +03:00
#include <sys/sched.h>
#include <sys/syscall_stats.h>
2007-02-10 00:55:00 +03:00
#include <sys/sleepq.h>
#include <sys/lockdebug.h>
#include <sys/evcnt.h>
#include <sys/intr.h>
#include <sys/lwpctl.h>
#include <sys/atomic.h>
#include <sys/simplelock.h>
#include <uvm/uvm_extern.h>
callout_t sched_pstats_ch;
unsigned int sched_pstats_ticks;
kcondvar_t lbolt; /* once a second sleep address */
static u_int sched_unsleep(struct lwp *, bool);
static void sched_changepri(struct lwp *, pri_t);
static void sched_lendpri(struct lwp *, pri_t);
2003-01-18 13:06:22 +03:00
2007-02-10 00:55:00 +03:00
syncobj_t sleep_syncobj = {
SOBJ_SLEEPQ_SORTED,
sleepq_unsleep,
2007-02-26 12:20:52 +03:00
sleepq_changepri,
sleepq_lendpri,
syncobj_noowner,
2007-02-10 00:55:00 +03:00
};
syncobj_t sched_syncobj = {
SOBJ_SLEEPQ_SORTED,
sched_unsleep,
2007-02-26 12:20:52 +03:00
sched_changepri,
sched_lendpri,
syncobj_noowner,
2007-02-10 00:55:00 +03:00
};
2003-01-18 13:06:22 +03:00
/*
2007-02-10 00:55:00 +03:00
* During autoconfiguration or after a panic, a sleep will simply lower the
* priority briefly to allow interrupts, then return. The priority to be
* used (safepri) is machine-dependent, thus this value is initialized and
* maintained in the machine-dependent layers. This priority will typically
* be 0, or the lowest priority that is safe for use on the interrupt stack;
* it can be made higher to block network software interrupts after panics.
*/
2007-02-10 00:55:00 +03:00
int safepri;
/*
2007-02-10 00:55:00 +03:00
* OBSOLETE INTERFACE
*
* General sleep call. Suspends the current process until a wakeup is
* performed on the specified identifier. The process will then be made
2007-02-10 00:55:00 +03:00
* runnable with the specified priority. Sleeps at most timo/hz seconds (0
* means no timeout). If pri includes PCATCH flag, signals are checked
* before and after sleeping, else signals are not checked. Returns 0 if
* awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
* signal needs to be delivered, ERESTART is returned if the current system
* call should be restarted if possible, and EINTR is returned if the system
* call should be interrupted by the signal (return EINTR).
*
2007-02-10 00:55:00 +03:00
* The interlock is held until we are on a sleep queue. The interlock will
* be locked before returning back to the caller unless the PNORELOCK flag
* is specified, in which case the interlock will always be unlocked upon
* return.
*/
int
ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
2007-02-10 00:55:00 +03:00
volatile struct simplelock *interlock)
{
2003-01-18 13:06:22 +03:00
struct lwp *l = curlwp;
2007-02-10 00:55:00 +03:00
sleepq_t *sq;
int error;
KASSERT((l->l_pflag & LP_INTR) == 0);
2007-02-10 00:55:00 +03:00
if (sleepq_dontsleep(l)) {
(void)sleepq_abort(NULL, 0);
if ((priority & PNORELOCK) != 0)
simple_unlock(interlock);
2007-02-10 00:55:00 +03:00
return 0;
2003-01-18 13:06:22 +03:00
}
l->l_kpriority = true;
2007-02-10 00:55:00 +03:00
sq = sleeptab_lookup(&sleeptab, ident);
sleepq_enter(sq, l);
sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
2007-02-10 00:55:00 +03:00
if (interlock != NULL) {
KASSERT(simple_lock_held(interlock));
simple_unlock(interlock);
}
error = sleepq_block(timo, priority & PCATCH);
2007-02-10 00:55:00 +03:00
if (interlock != NULL && (priority & PNORELOCK) == 0)
simple_lock(interlock);
return error;
}
int
mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
kmutex_t *mtx)
{
struct lwp *l = curlwp;
sleepq_t *sq;
int error;
KASSERT((l->l_pflag & LP_INTR) == 0);
if (sleepq_dontsleep(l)) {
(void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
return 0;
}
l->l_kpriority = true;
sq = sleeptab_lookup(&sleeptab, ident);
sleepq_enter(sq, l);
sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
mutex_exit(mtx);
error = sleepq_block(timo, priority & PCATCH);
if ((priority & PNORELOCK) == 0)
mutex_enter(mtx);
return error;
}
/*
2007-02-10 00:55:00 +03:00
* General sleep call for situations where a wake-up is not expected.
*/
2007-02-10 00:55:00 +03:00
int
kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
{
2007-02-10 00:55:00 +03:00
struct lwp *l = curlwp;
sleepq_t *sq;
int error;
if (sleepq_dontsleep(l))
return sleepq_abort(NULL, 0);
if (mtx != NULL)
mutex_exit(mtx);
l->l_kpriority = true;
2007-02-10 00:55:00 +03:00
sq = sleeptab_lookup(&sleeptab, l);
sleepq_enter(sq, l);
sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
error = sleepq_block(timo, intr);
2007-02-10 00:55:00 +03:00
if (mtx != NULL)
mutex_enter(mtx);
return error;
}
/*
2007-02-10 00:55:00 +03:00
* OBSOLETE INTERFACE
*
* Make all processes sleeping on the specified identifier runnable.
*/
void
2007-02-10 00:55:00 +03:00
wakeup(wchan_t ident)
{
2007-02-10 00:55:00 +03:00
sleepq_t *sq;
2007-02-10 00:55:00 +03:00
if (cold)
return;
2007-02-10 00:55:00 +03:00
sq = sleeptab_lookup(&sleeptab, ident);
sleepq_wake(sq, ident, (u_int)-1);
}
/*
2007-02-10 00:55:00 +03:00
* OBSOLETE INTERFACE
*
* Make the highest priority process first in line on the specified
* identifier runnable.
*/
2007-02-10 00:55:00 +03:00
void
wakeup_one(wchan_t ident)
{
2007-02-10 00:55:00 +03:00
sleepq_t *sq;
2007-02-10 00:55:00 +03:00
if (cold)
return;
2007-02-10 00:55:00 +03:00
sq = sleeptab_lookup(&sleeptab, ident);
sleepq_wake(sq, ident, 1);
}
2007-02-10 00:55:00 +03:00
/*
* General yield call. Puts the current process back on its run queue and
* performs a voluntary context switch. Should only be called when the
* current process explicitly requests it (eg sched_yield(2)).
*/
void
yield(void)
{
2003-01-18 13:06:22 +03:00
struct lwp *l = curlwp;
2007-02-10 00:55:00 +03:00
KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
lwp_lock(l);
KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
KASSERT(l->l_stat == LSONPROC);
l->l_kpriority = false;
(void)mi_switch(l);
2007-02-10 00:55:00 +03:00
KERNEL_LOCK(l->l_biglocks, l);
}
/*
* General preemption call. Puts the current process back on its run queue
* and performs an involuntary context switch.
*/
void
2007-02-10 00:55:00 +03:00
preempt(void)
{
2003-01-18 13:06:22 +03:00
struct lwp *l = curlwp;
2007-02-10 00:55:00 +03:00
KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
lwp_lock(l);
KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
KASSERT(l->l_stat == LSONPROC);
l->l_kpriority = false;
2007-02-10 00:55:00 +03:00
l->l_nivcsw++;
(void)mi_switch(l);
2007-02-10 00:55:00 +03:00
KERNEL_LOCK(l->l_biglocks, l);
}
/*
* Compute the amount of time during which the current lwp was running.
*
* - update l_rtime unless it's an idle lwp.
*/
void
2007-12-22 04:14:53 +03:00
updatertime(lwp_t *l, const struct bintime *now)
{
if ((l->l_flag & LW_IDLE) != 0)
return;
2007-12-22 04:14:53 +03:00
/* rtime += now - stime */
bintime_add(&l->l_rtime, now);
bintime_sub(&l->l_rtime, &l->l_stime);
}
/*
* The machine independent parts of context switch.
2003-06-26 06:09:27 +04:00
*
* Returns 1 if another LWP was actually run.
*/
2003-01-18 13:06:22 +03:00
int
mi_switch(lwp_t *l)
{
struct cpu_info *ci, *tci = NULL;
struct schedstate_percpu *spc;
struct lwp *newl;
2007-02-10 00:55:00 +03:00
int retval, oldspl;
2007-12-22 04:14:53 +03:00
struct bintime bt;
bool returning;
KASSERT(lwp_locked(l, NULL));
LOCKDEBUG_BARRIER(l->l_mutex, 1);
2007-02-10 00:55:00 +03:00
#ifdef KSTACK_CHECK_MAGIC
kstack_check_magic(l);
#endif
2007-12-22 04:14:53 +03:00
binuptime(&bt);
KDASSERT(l->l_cpu == curcpu());
ci = l->l_cpu;
spc = &ci->ci_schedstate;
returning = false;
newl = NULL;
/*
* If we have been asked to switch to a specific LWP, then there
* is no need to inspect the run queues. If a soft interrupt is
* blocking, then return to the interrupted thread without adjusting
* VM context or its start time: neither have been changed in order
* to take the interrupt.
*/
if (l->l_switchto != NULL) {
if ((l->l_pflag & LP_INTR) != 0) {
returning = true;
softint_block(l);
if ((l->l_flag & LW_TIMEINTR) != 0)
2007-12-22 04:14:53 +03:00
updatertime(l, &bt);
}
newl = l->l_switchto;
l->l_switchto = NULL;
}
#ifndef __HAVE_FAST_SOFTINTS
else if (ci->ci_data.cpu_softints != 0) {
/* There are pending soft interrupts, so pick one. */
newl = softint_picklwp();
newl->l_stat = LSONPROC;
newl->l_flag |= LW_RUNNING;
}
#endif /* !__HAVE_FAST_SOFTINTS */
/* Count time spent in current system call */
if (!returning) {
SYSCALL_TIME_SLEEP(l);
/*
* XXXSMP If we are using h/w performance counters,
* save context.
*/
2007-02-10 00:55:00 +03:00
#if PERFCTRS
if (PMC_ENABLED(l->l_proc)) {
pmc_save_context(l->l_proc);
}
#endif
2007-12-22 04:14:53 +03:00
updatertime(l, &bt);
}
/*
2007-02-10 00:55:00 +03:00
* If on the CPU and we have gotten this far, then we must yield.
*/
2007-02-10 00:55:00 +03:00
KASSERT(l->l_stat != LSRUN);
if (l->l_stat == LSONPROC && (l->l_target_cpu || l != newl)) {
KASSERT(lwp_locked(l, spc->spc_lwplock));
if (l->l_target_cpu == l->l_cpu) {
l->l_target_cpu = NULL;
} else {
tci = l->l_target_cpu;
}
if (__predict_false(tci != NULL)) {
/* Double-lock the runqueues */
spc_dlock(ci, tci);
} else {
/* Lock the runqueue */
spc_lock(ci);
}
if ((l->l_flag & LW_IDLE) == 0) {
l->l_stat = LSRUN;
if (__predict_false(tci != NULL)) {
/*
* Set the new CPU, lock and unset the
* l_target_cpu - thread will be enqueued
* to the runqueue of target CPU.
*/
l->l_cpu = tci;
lwp_setlock(l, tci->ci_schedstate.spc_mutex);
l->l_target_cpu = NULL;
} else {
lwp_setlock(l, spc->spc_mutex);
}
sched_enqueue(l, true);
} else {
KASSERT(tci == NULL);
l->l_stat = LSIDL;
}
} else {
/* Lock the runqueue */
spc_lock(ci);
2007-02-10 00:55:00 +03:00
}
/*
* Let sched_nextlwp() select the LWP to run the CPU next.
* If no LWP is runnable, select the idle LWP.
*
* Note that spc_lwplock might not necessary be held, and
* new thread would be unlocked after setting the LWP-lock.
2007-02-10 00:55:00 +03:00
*/
if (newl == NULL) {
newl = sched_nextlwp();
if (newl != NULL) {
sched_dequeue(newl);
KASSERT(lwp_locked(newl, spc->spc_mutex));
newl->l_stat = LSONPROC;
newl->l_cpu = ci;
newl->l_flag |= LW_RUNNING;
lwp_setlock(newl, spc->spc_lwplock);
} else {
newl = ci->ci_data.cpu_idlelwp;
newl->l_stat = LSONPROC;
newl->l_flag |= LW_RUNNING;
}
/*
* Only clear want_resched if there are no
* pending (slow) software interrupts.
*/
ci->ci_want_resched = ci->ci_data.cpu_softints;
spc->spc_flags &= ~SPCF_SWITCHCLEAR;
spc->spc_curpriority = lwp_eprio(newl);
}
/* Items that must be updated with the CPU locked. */
if (!returning) {
/* Update the new LWP's start time. */
2007-12-22 04:14:53 +03:00
newl->l_stime = bt;
/*
* ci_curlwp changes when a fast soft interrupt occurs.
* We use cpu_onproc to keep track of which kernel or
* user thread is running 'underneath' the software
* interrupt. This is important for time accounting,
* itimers and forcing user threads to preempt (aston).
*/
ci->ci_data.cpu_onproc = newl;
}
2007-02-10 00:55:00 +03:00
if (l != newl) {
struct lwp *prevlwp;
2007-02-10 00:55:00 +03:00
/* Release all locks, but leave the current LWP locked */
if (l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex) {
/*
* In case of migration, drop the local runqueue
* lock, thread is on other runqueue now.
*/
if (__predict_false(tci != NULL))
spc_unlock(ci);
/*
* Drop spc_lwplock, if the current LWP has been moved
* to the run queue (it is now locked by spc_mutex).
*/
mutex_spin_exit(spc->spc_lwplock);
} else {
/*
* Otherwise, drop the spc_mutex, we are done with the
* run queues.
*/
mutex_spin_exit(spc->spc_mutex);
KASSERT(tci == NULL);
}
/*
* Mark that context switch is going to be perfomed
* for this LWP, to protect it from being switched
* to on another CPU.
*/
KASSERT(l->l_ctxswtch == 0);
l->l_ctxswtch = 1;
l->l_ncsw++;
l->l_flag &= ~LW_RUNNING;
/*
* Increase the count of spin-mutexes before the release
* of the last lock - we must remain at IPL_SCHED during
* the context switch.
*/
oldspl = MUTEX_SPIN_OLDSPL(ci);
ci->ci_mtx_count--;
lwp_unlock(l);
/* Count the context switch on this CPU. */
ci->ci_data.cpu_nswtch++;
/* Update status for lwpctl, if present. */
if (l->l_lwpctl != NULL)
l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
/*
* Save old VM context, unless a soft interrupt
* handler is blocking.
*/
if (!returning)
pmap_deactivate(l);
/*
* We may need to spin-wait for if 'newl' is still
* context switching on another CPU.
*/
if (newl->l_ctxswtch != 0) {
u_int count;
count = SPINLOCK_BACKOFF_MIN;
while (newl->l_ctxswtch)
SPINLOCK_BACKOFF(count);
}
/* Switch to the new LWP.. */
prevlwp = cpu_switchto(l, newl, returning);
ci = curcpu();
/*
* Switched away - we have new curlwp.
* Restore VM context and IPL.
*/
pmap_activate(l);
if (prevlwp != NULL) {
/* Normalize the count of the spin-mutexes */
ci->ci_mtx_count++;
/* Unmark the state of context switch */
membar_exit();
prevlwp->l_ctxswtch = 0;
}
splx(oldspl);
/* Update status for lwpctl, if present. */
if (l->l_lwpctl != NULL) {
l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
l->l_lwpctl->lc_pctr++;
}
retval = 1;
} else {
/* Nothing to do - just unlock and return. */
KASSERT(tci == NULL);
spc_unlock(ci);
lwp_unlock(l);
2003-01-18 13:06:22 +03:00
retval = 0;
}
KASSERT(l == curlwp);
KASSERT(l->l_stat == LSONPROC);
KASSERT(l->l_cpu == ci);
/*
2007-02-10 00:55:00 +03:00
* XXXSMP If we are using h/w performance counters, restore context.
*/
#if PERFCTRS
if (PMC_ENABLED(l->l_proc)) {
pmc_restore_context(l->l_proc);
2006-09-02 10:32:09 +04:00
}
#endif
SYSCALL_TIME_WAKEUP(l);
LOCKDEBUG_BARRIER(NULL, 1);
2003-01-18 13:06:22 +03:00
return retval;
}
/*
2007-02-10 00:55:00 +03:00
* Change process state to be runnable, placing it on the run queue if it is
* in memory, and awakening the swapper if it isn't in memory.
*
* Call with the process and LWP locked. Will return with the LWP unlocked.
*/
void
2003-01-18 13:06:22 +03:00
setrunnable(struct lwp *l)
{
2003-01-18 13:06:22 +03:00
struct proc *p = l->l_proc;
2007-11-06 20:57:46 +03:00
struct cpu_info *ci;
2007-02-10 00:55:00 +03:00
sigset_t *ss;
KASSERT((l->l_flag & LW_IDLE) == 0);
KASSERT(mutex_owned(&p->p_smutex));
KASSERT(lwp_locked(l, NULL));
2007-11-06 20:57:46 +03:00
KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
2003-01-18 13:06:22 +03:00
switch (l->l_stat) {
case LSSTOP:
/*
* If we're being traced (possibly because someone attached us
* while we were stopped), check for a signal from the debugger.
*/
2007-02-10 00:55:00 +03:00
if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
ss = &l->l_sigpend.sp_set;
else
ss = &p->p_sigpend.sp_set;
sigaddset(ss, p->p_xstat);
signotify(l);
}
2007-02-10 00:55:00 +03:00
p->p_nrlwps++;
2003-01-18 13:06:22 +03:00
break;
case LSSUSPENDED:
l->l_flag &= ~LW_WSUSPEND;
2007-02-10 00:55:00 +03:00
p->p_nrlwps++;
cv_broadcast(&p->p_lwpcv);
break;
2007-02-10 00:55:00 +03:00
case LSSLEEP:
KASSERT(l->l_wchan != NULL);
break;
default:
panic("setrunnable: lwp %p state was %d", l, l->l_stat);
}
2007-02-10 00:55:00 +03:00
/*
* If the LWP was sleeping interruptably, then it's OK to start it
* again. If not, mark it as still sleeping.
*/
if (l->l_wchan != NULL) {
l->l_stat = LSSLEEP;
/* lwp_unsleep() will release the lock. */
lwp_unsleep(l, true);
2007-02-10 00:55:00 +03:00
return;
}
2007-02-10 00:55:00 +03:00
/*
* If the LWP is still on the CPU, mark it as LSONPROC. It may be
* about to call mi_switch(), in which case it will yield.
*/
if ((l->l_flag & LW_RUNNING) != 0) {
2007-02-10 00:55:00 +03:00
l->l_stat = LSONPROC;
l->l_slptime = 0;
lwp_unlock(l);
return;
}
2003-01-18 13:06:22 +03:00
2007-02-10 00:55:00 +03:00
/*
2007-11-06 20:57:46 +03:00
* Look for a CPU to run.
* Set the LWP runnable.
2007-02-10 00:55:00 +03:00
*/
2007-11-06 20:57:46 +03:00
ci = sched_takecpu(l);
l->l_cpu = ci;
if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) {
lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
lwp_lock(l);
}
sched_setrunnable(l);
2007-02-10 00:55:00 +03:00
l->l_stat = LSRUN;
2003-01-18 13:06:22 +03:00
l->l_slptime = 0;
2007-02-10 00:55:00 +03:00
2007-11-06 20:57:46 +03:00
/*
* If thread is swapped out - wake the swapper to bring it back in.
* Otherwise, enter it into a run queue.
*/
if (l->l_flag & LW_INMEM) {
sched_enqueue(l, false);
resched_cpu(l);
2007-02-10 00:55:00 +03:00
lwp_unlock(l);
} else {
lwp_unlock(l);
uvm_kick_scheduler();
2007-02-10 00:55:00 +03:00
}
}
2007-02-10 00:55:00 +03:00
/*
* suspendsched:
*
* Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
*/
void
2007-02-10 00:55:00 +03:00
suspendsched(void)
{
2007-02-10 00:55:00 +03:00
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
2003-01-18 13:06:22 +03:00
struct lwp *l;
2007-02-10 00:55:00 +03:00
struct proc *p;
/*
2007-02-10 00:55:00 +03:00
* We do this by process in order not to violate the locking rules.
*/
mutex_enter(&proclist_lock);
2007-02-10 00:55:00 +03:00
PROCLIST_FOREACH(p, &allproc) {
mutex_enter(&p->p_smutex);
if ((p->p_flag & PK_SYSTEM) != 0) {
2007-02-10 00:55:00 +03:00
mutex_exit(&p->p_smutex);
continue;
2007-02-10 00:55:00 +03:00
}
p->p_stat = SSTOP;
LIST_FOREACH(l, &p->p_lwps, l_sibling) {
if (l == curlwp)
continue;
lwp_lock(l);
2003-01-18 13:06:22 +03:00
/*
2007-02-10 00:55:00 +03:00
* Set L_WREBOOT so that the LWP will suspend itself
* when it tries to return to user mode. We want to
* try and get to get as many LWPs as possible to
* the user / kernel boundary, so that they will
* release any locks that they hold.
*/
l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
2007-02-10 00:55:00 +03:00
if (l->l_stat == LSSLEEP &&
(l->l_flag & LW_SINTR) != 0) {
2007-02-10 00:55:00 +03:00
/* setrunnable() will release the lock. */
setrunnable(l);
continue;
}
lwp_unlock(l);
}
2007-02-10 00:55:00 +03:00
mutex_exit(&p->p_smutex);
}
mutex_exit(&proclist_lock);
2007-02-10 00:55:00 +03:00
/*
* Kick all CPUs to make them preempt any LWPs running in user mode.
* They'll trap into the kernel and suspend themselves in userret().
*/
for (CPU_INFO_FOREACH(cii, ci)) {
spc_lock(ci);
cpu_need_resched(ci, RESCHED_IMMED);
spc_unlock(ci);
}
2007-02-10 00:55:00 +03:00
}
/*
* sched_unsleep:
*
* The is called when the LWP has not been awoken normally but instead
* interrupted: for example, if the sleep timed out. Because of this,
* it's not a valid action for running or idle LWPs.
*/
static u_int
sched_unsleep(struct lwp *l, bool cleanup)
2007-02-10 00:55:00 +03:00
{
lwp_unlock(l);
panic("sched_unsleep");
}
void
resched_cpu(struct lwp *l)
{
struct cpu_info *ci;
/*
* XXXSMP
* Since l->l_cpu persists across a context switch,
* this gives us *very weak* processor affinity, in
* that we notify the CPU on which the process last
* ran that it should try to switch.
*
* This does not guarantee that the process will run on
* that processor next, because another processor might
* grab it the next time it performs a context switch.
*
* This also does not handle the case where its last
* CPU is running a higher-priority process, but every
* other CPU is running a lower-priority process. There
* are ways to handle this situation, but they're not
* currently very pretty, and we also need to weigh the
* cost of moving a process from one CPU to another.
*/
ci = l->l_cpu;
if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
cpu_need_resched(ci, 0);
}
static void
sched_changepri(struct lwp *l, pri_t pri)
2007-02-10 00:55:00 +03:00
{
KASSERT(lwp_locked(l, NULL));
2007-02-10 00:55:00 +03:00
if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
sched_dequeue(l);
l->l_priority = pri;
sched_enqueue(l, false);
} else {
2007-02-10 00:55:00 +03:00
l->l_priority = pri;
}
resched_cpu(l);
2007-02-26 12:20:52 +03:00
}
static void
sched_lendpri(struct lwp *l, pri_t pri)
2007-02-26 12:20:52 +03:00
{
KASSERT(lwp_locked(l, NULL));
2007-02-26 12:20:52 +03:00
if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
sched_dequeue(l);
l->l_inheritedprio = pri;
sched_enqueue(l, false);
} else {
2007-02-26 12:20:52 +03:00
l->l_inheritedprio = pri;
}
resched_cpu(l);
2007-02-26 12:20:52 +03:00
}
struct lwp *
syncobj_noowner(wchan_t wchan)
{
return NULL;
2005-10-06 11:02:13 +04:00
}
/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
/*
* If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
* faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
* and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
*
* To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
* 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
*
* If you dont want to bother with the faster/more-accurate formula, you
* can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
* (more general) method of calculating the %age of CPU used by a process.
*/
#define CCPU_SHIFT (FSHIFT + 1)
2003-06-26 06:09:27 +04:00
/*
* sched_pstats:
*
* Update process statistics and check CPU resource allocation.
* Call scheduler-specific hook to eventually adjust process/LWP
* priorities.
2003-06-26 06:09:27 +04:00
*/
/* ARGSUSED */
void
sched_pstats(void *arg)
{
struct rlimit *rlim;
struct lwp *l;
struct proc *p;
int sig, clkhz;
long runtm;
sched_pstats_ticks++;
2007-02-10 00:55:00 +03:00
mutex_enter(&proclist_lock);
PROCLIST_FOREACH(p, &allproc) {
/*
* Increment time in/out of memory and sleep time (if
* sleeping). We ignore overflow; with 16-bit int's
* (remember them?) overflow takes 45 days.
*/
mutex_enter(&p->p_smutex);
mutex_spin_enter(&p->p_stmutex);
2007-12-22 04:14:53 +03:00
runtm = p->p_rtime.sec;
LIST_FOREACH(l, &p->p_lwps, l_sibling) {
if ((l->l_flag & LW_IDLE) != 0)
continue;
lwp_lock(l);
2007-12-22 04:14:53 +03:00
runtm += l->l_rtime.sec;
l->l_swtime++;
sched_pstats_hook(l);
lwp_unlock(l);
2007-02-10 00:55:00 +03:00
/*
* p_pctcpu is only for ps.
*/
l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
if (l->l_slptime < 1) {
clkhz = stathz != 0 ? stathz : hz;
#if (FSHIFT >= CCPU_SHIFT)
l->l_pctcpu += (clkhz == 100) ?
((fixpt_t)l->l_cpticks) <<
(FSHIFT - CCPU_SHIFT) :
100 * (((fixpt_t) p->p_cpticks)
<< (FSHIFT - CCPU_SHIFT)) / clkhz;
#else
l->l_pctcpu += ((FSCALE - ccpu) *
(l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
#endif
l->l_cpticks = 0;
}
}
p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
mutex_spin_exit(&p->p_stmutex);
2007-02-10 00:55:00 +03:00
/*
* Check if the process exceeds its CPU resource allocation.
* If over max, kill it.
*/
rlim = &p->p_rlimit[RLIMIT_CPU];
sig = 0;
if (runtm >= rlim->rlim_cur) {
if (runtm >= rlim->rlim_max)
sig = SIGKILL;
else {
sig = SIGXCPU;
if (rlim->rlim_cur < rlim->rlim_max)
rlim->rlim_cur += 5;
}
}
mutex_exit(&p->p_smutex);
if (sig) {
mutex_enter(&proclist_mutex);
psignal(p, sig);
mutex_exit(&proclist_mutex);
}
2007-02-10 00:55:00 +03:00
}
mutex_exit(&proclist_lock);
uvm_meter();
cv_wakeup(&lbolt);
callout_schedule(&sched_pstats_ch, hz);
}
void
sched_init(void)
{
2007-11-29 18:41:07 +03:00
cv_init(&lbolt, "lbolt");
2008-01-02 14:48:20 +03:00
callout_init(&sched_pstats_ch, CALLOUT_MPSAFE);
callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
sched_setup();
sched_pstats(NULL);
}