Add a lock around the scheduler, and use it as necessary, including

in the non-MULTIPROCESSOR case (LOCKDEBUG requires it).  Scheduler
lock is held upon entry to mi_switch() and cpu_switch(), and
cpu_switch() releases the lock before returning.

Largely from Bill Sommerfeld, with some minor bug fixes and
machine-dependent code hacking from me.
This commit is contained in:
thorpej 2000-08-20 21:50:06 +00:00
parent f83d14e14b
commit a86d1f4891
26 changed files with 940 additions and 444 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.78 2000/07/19 14:00:24 nathanw Exp $ */
/* $NetBSD: locore.s,v 1.79 2000/08/20 21:50:06 thorpej Exp $ */
/*-
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
@ -68,6 +68,7 @@
#include "opt_ddb.h"
#include "opt_multiprocessor.h"
#include "opt_lockdebug.h"
#include "opt_compat_linux.h"
#ifdef COMPAT_LINUX
@ -76,7 +77,7 @@
#include <machine/asm.h>
__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.78 2000/07/19 14:00:24 nathanw Exp $");
__KERNEL_RCSID(0, "$NetBSD: locore.s,v 1.79 2000/08/20 21:50:06 thorpej Exp $");
#include "assym.h"
@ -767,7 +768,6 @@ LEAF(savectx, 1)
/**************************************************************************/
IMPORT(sched_whichqs, 4)
IMPORT(kernel_lev1map, 8)
/*
* When no processes are on the runq, cpu_switch branches to idle
@ -781,13 +781,19 @@ LEAF(idle, 0)
/* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
GET_CURPROC
stq zero, 0(v0) /* curproc <- NULL for stats */
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
CALL(sched_unlock_idle) /* release sched_lock */
#endif
mov zero, a0 /* enable all interrupts */
call_pal PAL_OSF1_swpipl
2: ldl t0, sched_whichqs /* look for non-empty queue */
beq t0, 2b
ldiq a0, ALPHA_PSL_IPL_HIGH /* disable all interrupts */
call_pal PAL_OSF1_swpipl
jmp zero, cpu_switch_queuescan /* jump back into the fray */
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
CALL(sched_lock_idle) /* acquire sched_lock */
#endif
jmp zero, cpu_switch_queuescan /* jump back into the fire */
END(idle)
/*
@ -818,11 +824,6 @@ LEAF(cpu_switch, 0)
mov a0, s0 /* save old curproc */
mov a1, s1 /* save old U-area */
ldl t0, sched_whichqs /* look for non-empty queue */
beq t0, idle /* and if none, go idle */
ldiq a0, ALPHA_PSL_IPL_HIGH /* disable all interrupts */
call_pal PAL_OSF1_swpipl
cpu_switch_queuescan:
br pv, 1f
1: LDGP(pv)
@ -863,6 +864,13 @@ cpu_switch_queuescan:
5:
mov t4, s2 /* save new proc */
ldq s3, P_MD_PCBPADDR(s2) /* save new pcbpaddr */
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
/*
* Done mucking with the run queues, release the
* scheduler lock, but keep interrupts out.
*/
CALL(sched_unlock_idle)
#endif
/*
* Check to see if we're switching to ourself. If we are,
@ -874,7 +882,7 @@ cpu_switch_queuescan:
* saved it. Also note that switch_exit() ensures that
* s0 is clear before jumping here to find a new process.
*/
cmpeq s0, t4, t0 /* oldproc == newproc? */
cmpeq s0, s2, t0 /* oldproc == newproc? */
bne t0, 7f /* Yes! Skip! */
/*
@ -1039,6 +1047,10 @@ LEAF(switch_exit, 1)
mov s2, a0
CALL(exit2)
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
CALL(sched_lock_idle) /* acquire sched_lock */
#endif
/*
* Now jump back into the middle of cpu_switch(). Note that
* we must clear s0 to guarantee that the check for switching

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.116 2000/05/31 05:06:43 thorpej Exp $ */
/* $NetBSD: locore.s,v 1.117 2000/08/20 21:50:07 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -52,6 +52,7 @@
#include "opt_compat_svr4.h"
#include "opt_compat_sunos.h"
#include "opt_fpsp.h"
#include "opt_lockdebug.h"
#include "assym.h"
#include <machine/asm.h>
@ -1119,6 +1120,8 @@ pcbflag:
* At exit of a process, do a switch for the last time.
* Switch to a safe stack and PCB, and select a new process to run. The
* old stack and u-area will be freed by the reaper.
*
* MUST BE CALLED AT SPLHIGH!
*/
ENTRY(switch_exit)
movl sp@(4),a0
@ -1130,21 +1133,30 @@ ENTRY(switch_exit)
jbsr _C_LABEL(exit2)
lea sp@(4),sp | pop args
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
jra _cpu_switch
/*
* When no processes are on the runq, Swtch branches to idle
* to wait for something to come ready.
*/
.globl Idle
Lidle:
ASENTRY_NOPROFILE(Idle)
#if defined(LOCKDEBUG)
/* Release sched_lock */
jbsr _C_LABEL(sched_unlock_idle)
#endif
stop #PSL_LOWIPL
Idle:
idle:
movw #PSL_HIGHIPL,sr
tstl _sched_whichqs
jeq Lidle
movw #PSL_LOWIPL,sr
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
movl _C_LABEL(sched_whichqs),%d0
jeq _ASM_LABEL(Idle)
jra Lsw1
Lbadsw:
@ -1164,174 +1176,173 @@ Lbadsw:
* bit). For now, we just always flush the full ATC.
*/
ENTRY(cpu_switch)
movl _curpcb,a0 | current pcb
movw sr,a0@(PCB_PS) | save sr before changing ipl
movl _C_LABEL(curpcb),%a0 | current pcb
movw %sr,%a0@(PCB_PS) | save sr before changing ipl
#ifdef notyet
movl _curproc,sp@- | remember last proc running
movl _C_LABEL(curproc),%sp@- | remember last proc running
#endif
clrl _curproc
Lsw1:
clrl _C_LABEL(curproc)
/*
* Find the highest-priority queue that isn't empty,
* then take the first proc from that queue.
*/
clrl d0
lea _sched_whichqs,a0
movl a0@,d1
Lswchk:
btst d0,d1
jne Lswfnd
addqb #1,d0
cmpb #32,d0
jne Lswchk
jra idle
Lswfnd:
movw #PSL_HIGHIPL,sr | lock out interrupts
movl a0@,d1 | and check again...
bclr d0,d1
jeq Lsw1 | proc moved, rescan
movl d1,a0@ | update whichqs
moveq #1,d1 | double check for higher priority
lsll d0,d1 | process (which may have snuck in
subql #1,d1 | while we were finding this one)
andl a0@,d1
jeq Lswok | no one got in, continue
movl a0@,d1
bset d0,d1 | otherwise put this one back
movl d1,a0@
jra Lsw1 | and rescan
Lswok:
movl d0,d1
lslb #3,d1 | convert queue number to index
addl #_sched_qs,d1 | locate queue (q)
movl d1,a1
cmpl a1@(P_FORW),a1 | anyone on queue?
movl _C_LABEL(sched_whichqs),%d0
jeq _ASM_LABEL(Idle)
Lsw1:
/*
* Interrupts are blocked, sched_lock is held. If
* we come here via Idle, %d0 contains the contents
* of a non-zero sched_whichqs.
*/
movl %d0,%d1
negl %d0
andl %d1,%d0
bfffo %d0{#0:#32},%d1
eorib #31,%d1
movl %d1,%d0
lslb #3,%d1 | convert queue number to index
addl #_C_LABEL(sched_qs),%d1 | locate queue (q)
movl %d1,%a1
movl %a1@(P_FORW),%a0 | p = q->p_forw
cmpal %d1,%a0 | anyone on queue?
jeq Lbadsw | no, panic
movl a1@(P_FORW),a0 | p = q->p_forw
#ifdef DIAGNOSTIC
tstl a0@(P_WCHAN)
tstl %a0@(P_WCHAN)
jne Lbadsw
cmpb #SRUN,a0@(P_STAT)
cmpb #SRUN,%a0@(P_STAT)
jne Lbadsw
#endif
movl a0@(P_FORW),a1@(P_FORW) | q->p_forw = p->p_forw
movl a0@(P_FORW),a1 | q = p->p_forw
movl a0@(P_BACK),a1@(P_BACK) | q->p_back = p->p_back
cmpl a0@(P_FORW),d1 | anyone left on queue?
jeq Lsw2 | no, skip
movl _sched_whichqs,d1
bset d0,d1 | yes, reset bit
movl d1,_sched_whichqs
movl %a0@(P_FORW),%a1@(P_FORW) | q->p_forw = p->p_forw
movl %a0@(P_FORW),%a1 | n = p->p_forw
movl %a0@(P_BACK),%a1@(P_BACK) | n->p_back = q
cmpal %d1,%a1 | anyone left on queue?
jne Lsw2 | yes, skip
movl _C_LABEL(sched_whichqs),%d1
bclr %d0,%d1 | no, clear bit
movl %d1,_C_LABEL(sched_whichqs)
Lsw2:
/* p->p_cpu initialized in fork1() for single-processor */
movb #SONPROC,a0@(P_STAT) | p->p_stat = SONPROC
movl a0,_curproc
clrl _want_resched
movb #SONPROC,%a0@(P_STAT) | p->p_stat = SONPROC
movl %a0,_C_LABEL(curproc)
clrl _C_LABEL(want_resched)
#ifdef notyet
movl sp@+,a1
cmpl a0,a1 | switching to same proc?
movl %sp@+,%a1
cmpl %a0,%a1 | switching to same proc?
jeq Lswdone | yes, skip save and restore
#endif
/*
* Save state of previous process in its pcb.
*/
movl _curpcb,a1
moveml d2-d7/a2-a7,a1@(PCB_REGS) | save non-scratch registers
movl usp,a2 | grab USP (a2 has been saved)
movl a2,a1@(PCB_USP) | and save it
movl _C_LABEL(curpcb),%a1
moveml %d2-%d7/%a2-%a7,%a1@(PCB_REGS) | save non-scratch registers
movl %usp,%a2 | grab USP (a2 has been saved)
movl %a2,%a1@(PCB_USP) | and save it
movl _CMAP2,a1@(PCB_CMAP2) | save temporary map PTE
#ifdef FPCOPROC
#ifdef FPU_EMULATE
tstl _fputype | do we have any FPU?
tstl _C_LABEL(fputype) | do we have any FPU?
jeq Lswnofpsave | no, dont save
#endif
lea a1@(PCB_FPCTX),a2 | pointer to FP save area
fsave a2@ | save FP state
lea %a1@(PCB_FPCTX),%a2 | pointer to FP save area
fsave %a2@ | save FP state
#if defined(M68020) || defined(M68030) || defined(M68040)
#ifdef M68060
cmpl #CPU_68060,_cputype
cmpl #CPU_68060,_C_LABEL(cputype)
jeq Lsavfp60
#endif
tstb a2@ | null state frame?
tstb %a2@ | null state frame?
jeq Lswnofpsave | yes, all done
fmovem fp0-fp7,a2@(216) | save FP general registers
fmovem fpcr/fpsr/fpi,a2@(312) | save FP control registers
fmovem %fp0-%fp7,%a2@(216) | save FP general registers
fmovem %fpcr/%fpsr/%fpi,%a2@(312) | save FP control registers
#ifdef M68060
jra Lswnofpsave
#endif
#endif
#ifdef M68060
Lsavfp60:
tstb a2@(2) | null state frame?
tstb %a2@(2) | null state frame?
jeq Lswnofpsave | yes, all done
fmovem fp0-fp7,a2@(216) | save FP general registers
fmovem fpcr,a2@(312) | save FP control registers
fmovem fpsr,a2@(316)
fmovem fpi,a2@(320)
fmovem %fp0-%fp7,%a2@(216) | save FP general registers
fmovem %fpcr,%a2@(312) | save FP control registers
fmovem %fpsr,%a2@(316)
fmovem %fpi,%a2@(320)
#endif
Lswnofpsave:
#endif
clrl a0@(P_BACK) | clear back link
movl a0@(P_ADDR),a1 | get p_addr
movl a1,_curpcb
movb a1@(PCB_FLAGS+1),pcbflag | copy of pcb_flags low byte
clrl %a0@(P_BACK) | clear back link
movl %a0@(P_ADDR),%a1 | get p_addr
movl %a1,_C_LABEL(curpcb)
movb %a1@(PCB_FLAGS+1),pcbflag | copy of pcb_flags low byte
#if defined(LOCKDEBUG)
/*
* Done mucking with the run queues, release the
* scheduler lock, but keep interrupts out.
*/
movl %a0,sp@- | not args...
movl %a1,sp@- | ...just saving
jbsr _C_LABEL(sched_unlock_idle)
movl sp@+,%a1
movl sp@+,%a0
#endif
/*
* Activate process's address space.
* XXX Should remember the last USTP value loaded, and call this
* XXX only if it has changed.
*/
pea a0@ | push proc
jbsr _pmap_activate | pmap_activate(p)
addql #4,sp
movl _curpcb,a1 | restore p_addr
pea %a0@ | push proc
jbsr _C_LABEL(pmap_activate) | pmap_activate(p)
addql #4,%sp
movl _C_LABEL(curpcb),%a1 | restore p_addr
lea tmpstk,sp | now goto a tmp stack for NMI
lea _ASM_LABEL(tmpstk),%sp | now goto a tmp stack for NMI
movl a1@(PCB_CMAP2),_CMAP2 | reload tmp map
moveml a1@(PCB_REGS),d2-d7/a2-a7 | and registers
movl a1@(PCB_USP),a0
movl a0,usp | and USP
movl %a1@(PCB_CMAP2),_CMAP2 | reload tmp map
moveml %a1@(PCB_REGS),%d2-%d7/%a2-%a7 | and registers
movl %a1@(PCB_USP),%a0
movl %a0,%usp | and USP
#ifdef FPCOPROC
#ifdef FPU_EMULATE
tstl _fputype | do we _have_ any fpu?
tstl _C_LABEL(fputype) | do we _have_ any fpu?
jne Lresnonofpatall
movw a1@(PCB_PS),sr | no, restore PS
movw %a1@(PCB_PS),%sr | no, restore PS
moveq #1,d0 | return 1 (for alternate rets)
rts
Lresnonofpatall:
#endif
lea a1@(PCB_FPCTX),a0 | pointer to FP save area
lea %a1@(PCB_FPCTX),a%0 | pointer to FP save area
#if defined(M68020) || defined(M68030) || defined(M68040)
#ifdef M68060
cmpl #CPU_68060,_cputype
cmpl #CPU_68060,_C_LABEL(cputype)
jeq Lresfp60rest1
#endif
tstb a0@ | null state frame?
tstb %a0@ | null state frame?
jeq Lresfprest2 | yes, easy
fmovem a0@(312),fpcr/fpsr/fpi | restore FP control registers
fmovem a0@(216),fp0-fp7 | restore FP general registers
fmovem %a0@(312),%fpcr/%fpsr/%fpi | restore FP control registers
fmovem %a0@(216),%fp0-%fp7 | restore FP general registers
Lresfprest2:
frestore a0@ | restore state
movw a1@(PCB_PS),sr | no, restore PS
moveq #1,d0 | return 1 (for alternate rets)
frestore %a0@ | restore state
movw %a1@(PCB_PS),%sr | no, restore PS
moveq #1,%d0 | return 1 (for alternate rets)
rts
#endif
#ifdef M68060
Lresfp60rest1:
tstb a0@(2) | null state frame?
tstb %a0@(2) | null state frame?
jeq Lresfp60rest2 | yes, easy
fmovem a0@(312),fpcr | restore FP control registers
fmovem a0@(316),fpsr
fmovem a0@(320),fpi
fmovem a0@(216),fp0-fp7 | restore FP general registers
fmovem %a0@(312),%fpcr | restore FP control registers
fmovem %a0@(316),%fpsr
fmovem %a0@(320),%fpi
fmovem %a0@(216),%fp0-%fp7 | restore FP general registers
Lresfp60rest2:
frestore a0@ | restore state
movw a1@(PCB_PS),sr | no, restore PS
moveq #1,d0 | return 1 (for alternate rets)
frestore %a0@ | restore state
movw %a1@(PCB_PS),%sr | no, restore PS
moveq #1,%d0 | return 1 (for alternate rets)
rts
#endif
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: Locore.c,v 1.5 2000/06/29 08:32:34 mrg Exp $ */
/* $NetBSD: Locore.c,v 1.6 2000/08/20 21:50:07 thorpej Exp $ */
/*
* Copyright (c) 2000 Ben Harris.
@ -37,9 +37,11 @@
* but have no need to be coded in assembly.
*/
#include "opt_lockdebug.h"
#include <sys/param.h>
__RCSID("$NetBSD: Locore.c,v 1.5 2000/06/29 08:32:34 mrg Exp $");
__RCSID("$NetBSD: Locore.c,v 1.6 2000/08/20 21:50:07 thorpej Exp $");
#include <sys/proc.h>
#include <sys/sched.h>
@ -105,10 +107,16 @@ static void
idle()
{
#if defined(LOCKDEBUG)
sched_unlock_idle();
#endif
spl0();
while (sched_whichqs == 0)
continue;
splhigh();
#if defined(LOCKDEBUG)
sched_lock_idle();
#endif
}
extern int want_resched; /* XXX should be in <machine/cpu.h> */
@ -120,15 +128,17 @@ void
cpu_switch(struct proc *p1)
{
int which;
int s;
struct prochd *q;
struct proc *p2;
/*
* We enter here with interrupts blocked and sched_lock held.
*/
#if 0
printf("cpu_switch: %p ->", p1);
#endif
curproc = NULL;
s = splhigh();
while (sched_whichqs == 0)
idle();
which = ffs(sched_whichqs) - 1;
@ -136,6 +146,9 @@ cpu_switch(struct proc *p1)
p2 = q->ph_link;
remrunqueue(p2);
want_resched = 0;
#ifdef LOCKDEBUG
sched_unlock_idle();
#endif
/* p->p_cpu initialized in fork1() for single-processor */
p2->p_stat = SONPROC;
curproc = p2;
@ -148,5 +161,4 @@ cpu_switch(struct proc *p1)
pmap_activate(p2);
cpu_loswitch(&p1->p_addr->u_pcb.pcb_sf, p2->p_addr->u_pcb.pcb_sf);
/* We only get back here after the other process has run. */
splx(s);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.4 2000/06/29 08:32:35 mrg Exp $ */
/* $NetBSD: vm_machdep.c,v 1.5 2000/08/20 21:50:07 thorpej Exp $ */
/*-
* Copyright (c) 2000 Ben Harris
@ -66,7 +66,7 @@
#include <sys/param.h>
__RCSID("$NetBSD: vm_machdep.c,v 1.4 2000/06/29 08:32:35 mrg Exp $");
__RCSID("$NetBSD: vm_machdep.c,v 1.5 2000/08/20 21:50:07 thorpej Exp $");
#include <sys/buf.h>
#include <sys/exec.h>
@ -162,9 +162,11 @@ setregs(struct proc *p, struct exec_package *pack, u_long stack)
void
cpu_exit(struct proc *p)
{
int s;
/* Nothing to do here? */
exit2(p); /* I think this is safe on a uniprocessor machine */
SCHED_LOCK(s); /* expected by cpu_switch */
cpu_switch(p);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.70 2000/06/13 14:48:44 leo Exp $ */
/* $NetBSD: locore.s,v 1.71 2000/08/20 21:50:07 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -54,6 +54,7 @@
#include "opt_compat_sunos.h"
#include "opt_ddb.h"
#include "opt_fpsp.h"
#include "opt_lockdebug.h"
#include "assym.h"
#include <machine/asm.h>
@ -1073,21 +1074,30 @@ ENTRY(switch_exit)
jbsr _C_LABEL(exit2)
lea sp@(4),sp | pop args
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
jra _C_LABEL(cpu_switch)
/*
* When no processes are on the runq, Swtch branches to idle
* to wait for something to come ready.
*/
.globl Idle
Lidle:
ASENTRY_NOPROFILE(Idle)
#if defined(LOCKDEBUG)
/* Release sched_lock */
jbsr _C_LABEL(sched_unlock_idle)
#endif
stop #PSL_LOWIPL
Idle:
idle:
movw #PSL_HIGHIPL,sr
tstl _C_LABEL(sched_whichqs)
jeq Lidle
movw #PSL_LOWIPL,sr
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
movl _C_LABEL(sched_whichqs),%d0
jeq _ASM_LABEL(Idle)
jra Lsw1
Lbadsw:
@ -1107,159 +1117,159 @@ Lbadsw:
* bit). For now, we just always flush the full ATC.
*/
ENTRY(cpu_switch)
movl _C_LABEL(curpcb),a0 | current pcb
movw sr,a0@(PCB_PS) | save sr before changing ipl
movl _C_LABEL(curpcb),%a0 | current pcb
movw %sr,%a0@(PCB_PS) | save sr before changing ipl
#ifdef notyet
movl _C_LABEL(curproc),sp@- | remember last proc running
movl _C_LABEL(curproc),%sp@- | remember last proc running
#endif
clrl _C_LABEL(curproc)
Lsw1:
/*
* Find the highest-priority queue that isn't empty,
* then take the first proc from that queue.
*/
clrl d0
lea _C_LABEL(sched_whichqs),a0
movl a0@,d1
Lswchk:
btst d0,d1
jne Lswfnd
addqb #1,d0
cmpb #32,d0
jne Lswchk
jra idle
Lswfnd:
movw #PSL_HIGHIPL,sr | lock out interrupts
movl a0@,d1 | and check again...
bclr d0,d1
jeq Lsw1 | proc moved, rescan
movl d1,a0@ | update whichqs
moveq #1,d1 | double check for higher priority
lsll d0,d1 | process (which may have snuck in
subql #1,d1 | while we were finding this one)
andl a0@,d1
jeq Lswok | no one got in, continue
movl a0@,d1
bset d0,d1 | otherwise put this one back
movl d1,a0@
jra Lsw1 | and rescan
Lswok:
movl d0,d1
lslb #3,d1 | convert queue number to index
addl #_C_LABEL(sched_qs),d1 | locate queue (q)
movl d1,a1
cmpl a1@(P_FORW),a1 | anyone on queue?
movl _C_LABEL(sched_whichqs),%d0
jeq _ASM_LABEL(Idle)
Lsw1:
/*
* Interrupts are blocked, sched_lock is held. If
* we come here via Idle, %d0 contains the contents
* of a non-zero sched_whichqs.
*/
movl %d0,%d1
negl %d0
andl %d1,%d0
bfffo %d0{#0:#32},%d1
eorib #31,%d1
movl %d0,%d1
lslb #3,%d1 | convert queue number to index
addl #_C_LABEL(sched_qs),%d1 | locate queue (q)
movl %d1,%a1
movl %a1@(P_FORW),%a0 | p = q->p_forw
cmpal %d1,%a0 | anyone on queue?
jeq Lbadsw | no, panic
movl a1@(P_FORW),a0 | p = q->p_forw
#ifdef DIAGNOSTIC
tstl a0@(P_WCHAN)
tstl %a0@(P_WCHAN)
jne Lbadsw
cmpb #SRUN,a0@(P_STAT)
cmpb #SRUN,%a0@(P_STAT)
jne Lbadsw
#endif
movl a0@(P_FORW),a1@(P_FORW) | q->p_forw = p->p_forw
movl a0@(P_FORW),a1 | q = p->p_forw
movl a0@(P_BACK),a1@(P_BACK) | q->p_back = p->p_back
cmpl a0@(P_FORW),d1 | anyone left on queue?
jeq Lsw2 | no, skip
movl _C_LABEL(sched_whichqs),d1
bset d0,d1 | yes, reset bit
movl d1,_C_LABEL(sched_whichqs)
movl %a0@(P_FORW),%a1@(P_FORW) | q->p_forw = p->p_forw
movl %a0@(P_FORW),%a1 | n = p->p_forw
movl %a0@(P_BACK),%a1@(P_BACK) | n->p_back = q
cmpal %d1,%a1 | anyone left on queue?
jne Lsw2 | yes, skip
movl _C_LABEL(sched_whichqs),%d1
bclr %d0,%d1 | no, clear bit
movl %d1,_C_LABEL(sched_whichqs)
Lsw2:
/* p->p_cpu initialized in fork1() for single-processor */
movb #SONPROC,a0@(P_STAT) | p->p_stat = SONPROC
movl a0,_C_LABEL(curproc)
movb #SONPROC,%a0@(P_STAT) | p->p_stat = SONPROC
movl %a0,_C_LABEL(curproc)
clrl _C_LABEL(want_resched)
#ifdef notyet
movl sp@+,a1
cmpl a0,a1 | switching to same proc?
movl %sp@+,%a1
cmpl %a0,%a1 | switching to same proc?
jeq Lswdone | yes, skip save and restore
#endif
/*
* Save state of previous process in its pcb.
*/
movl _C_LABEL(curpcb),a1
moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
movl usp,a2 | grab USP (a2 has been saved)
movl a2,a1@(PCB_USP) | and save it
movl _CMAP2,a1@(PCB_CMAP2) | save temporary map PTE
movl _C_LABEL(curpcb),%a1
moveml #0xFCFC,%a1@(PCB_REGS) | save non-scratch registers
movl %usp,%a2 | grab USP (a2 has been saved)
movl %a2,%a1@(PCB_USP) | and save it
movl _CMAP2,%a1@(PCB_CMAP2) | save temporary map PTE
tstl _C_LABEL(fputype) | do we have an FPU?
jeq Lswnofpsave | no? don't attempt to save
lea a1@(PCB_FPCTX),a2 | pointer to FP save area
fsave a2@ | save FP state
lea %a1@(PCB_FPCTX),%a2 | pointer to FP save area
fsave %a2@ | save FP state
#ifdef M68060
cmpl #CPU_68060,_C_LABEL(cputype)
jeq Lsavfp60 | It's a 060
#endif
tstb a2@ | null state frame?
tstb %a2@ | null state frame?
jeq Lswnofpsave | yes, all done
fmovem fp0-fp7,a2@(216) | save FP general registers
fmovem fpcr/fpsr/fpi,a2@(312) | save FP control registers
fmovem %fp0-%fp7,%a2@(216) | save FP general registers
fmovem %fpcr/%fpsr/%fpi,%a2@(312) | save FP control registers
#ifdef M68060
jra Lswnofpsave
Lsavfp60:
tstb a2@(2) | null state frame?
tstb %a2@(2) | null state frame?
jeq Lswnofpsave | yes, all done
fmovem fp0-fp7,a2@(216) | save FP general registers
fmovem fpcr,a2@(312) | save FP control registers
fmovem fpsr,a2@(316)
fmovem fpi,a2@(320)
fmovem %fp0-%fp7,%a2@(216) | save FP general registers
fmovem %fpcr,%a2@(312) | save FP control registers
fmovem %fpsr,%a2@(316)
fmovem %fpi,%a2@(320)
#endif
Lswnofpsave:
clrl a0@(P_BACK) | clear back link
movl a0@(P_ADDR),a1 | get p_addr
movl a1,_C_LABEL(curpcb)
movb a1@(PCB_FLAGS+1),pcbflag | copy of pcb_flags low byte
clrl %a0@(P_BACK) | clear back link
movl %a0@(P_ADDR),%a1 | get p_addr
movl %a1,_C_LABEL(curpcb)
movb %a1@(PCB_FLAGS+1),pcbflag | copy of pcb_flags low byte
#if defined(LOCKDEBUG)
/*
* Done mucking with the run queues, release the
* scheduler lock, but keep interrupts out.
*/
movl %a0,sp@- | not args...
movl %a1,sp@- | ...just saving
jbsr _C_LABEL(sched_unlock_idle)
movl sp@+,%a1
movl sp@+,%a0
#endif
/*
* Activate process's address space.
* XXX Should remember the last USTP value loaded, and call this
* XXX only if it has changed.
*/
pea a0@ | push proc
pea %a0@ | push proc
jbsr _C_LABEL(pmap_activate) | pmap_activate(p)
addql #4,sp
movl _C_LABEL(curpcb),a1 | restore p_addr
addql #4,%sp
movl _C_LABEL(curpcb),%a1 | restore p_addr
lea tmpstk,sp | now goto a tmp stack for NMI
lea tmpstk,%sp | now goto a tmp stack for NMI
movl a1@(PCB_CMAP2),_CMAP2 | reload tmp map
moveml a1@(PCB_REGS),#0xFCFC | and registers
movl a1@(PCB_USP),a0
movl a0,usp | and USP
movl %a1@(PCB_CMAP2),_CMAP2 | reload tmp map
moveml %a1@(PCB_REGS),#0xFCFC | and registers
movl %a1@(PCB_USP),%a0
movl %a0,%usp | and USP
tstl _C_LABEL(fputype) | do we have an FPU?
jeq Lnofprest | no, don't attempt to restore
lea a1@(PCB_FPCTX),a0 | pointer to FP save area
lea %a1@(PCB_FPCTX),%a0 | pointer to FP save area
#ifdef M68060
cmpl #CPU_68060,_C_LABEL(cputype)
jeq Lresfp60rest1 | handle a 060
#endif
tstb a0@ | null state frame?
tstb %a0@ | null state frame?
jeq Lresfprest | yes, easy
fmovem a0@(312),fpcr/fpsr/fpi | restore FP control registers
fmovem a0@(216),fp0-fp7 | restore FP general registers
fmovem %a0@(312),%fpcr/%fpsr/%fpi | restore FP control registers
fmovem %a0@(216),%fp0-%fp7 | restore FP general registers
Lresfprest:
frestore a0@ | restore state
frestore %a0@ | restore state
Lnofprest:
movw a1@(PCB_PS),sr | no, restore PS
moveq #1,d0 | return 1 (for alternate returns)
movw %a1@(PCB_PS),%sr | no, restore PS
moveq #1,%d0 | return 1 (for alternate returns)
rts
#ifdef M68060
Lresfp60rest1:
tstb a0@(2) | null state frame?
tstb %a0@(2) | null state frame?
jeq Lresfp60rest2 | yes, easy
fmovem a0@(312),fpcr | restore FP control registers
fmovem a0@(316),fpsr
fmovem a0@(320),fpi
fmovem a0@(216),fp0-fp7 | restore FP general registers
fmovem %a0@(312),%fpcr | restore FP control registers
fmovem %a0@(316),%fpsr
fmovem %a0@(320),%fpi
fmovem %a0@(216),%fp0-%fp7 | restore FP general registers
Lresfp60rest2:
frestore a0@ | restore state
movw a1@(PCB_PS),sr | no, restore PS
moveq #1,d0 | return 1 (for alternate returns)
frestore %a0@ | restore state
movw %a1@(PCB_PS),%sr | no, restore PS
moveq #1,%d0 | return 1 (for alternate returns)
rts
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.109 2000/05/31 05:06:49 thorpej Exp $ */
/* $NetBSD: locore.s,v 1.110 2000/08/20 21:50:07 thorpej Exp $ */
/*
* Copyright (c) 1994, 1995 Gordon W. Ross
@ -48,6 +48,7 @@
#include "opt_compat_sunos.h"
#include "opt_ddb.h"
#include "opt_fpsp.h"
#include "opt_lockdebug.h"
#include "assym.h"
#include <machine/asm.h>
@ -1196,6 +1197,8 @@ ASBSS(nullpcb,SIZEOF_PCB)
* At exit of a process, do a switch for the last time.
* Switch to a safe stack and PCB, and select a new process to run. The
* old stack and u-area will be freed by the reaper.
*
* MUST BE CALLED AT SPLHIGH!
*/
ENTRY(switch_exit)
movl %sp@(4),%a0
@ -1208,6 +1211,11 @@ ENTRY(switch_exit)
jbsr _C_LABEL(exit2)
lea %sp@(4),%sp | pop args
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
jra _C_LABEL(cpu_switch)
/*
@ -1215,8 +1223,16 @@ ENTRY(switch_exit)
* to wait for something to come ready.
*/
ASENTRY_NOPROFILE(Idle)
#if defined(LOCKDEBUG)
/* Release sched_lock */
jbsr _C_LABEL(sched_unlock_idle)
#endif
stop #PSL_LOWIPL
movw #PSL_HIGHIPL,%sr
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
movl _C_LABEL(sched_whichqs),%d0
jeq _ASM_LABEL(Idle)
jra Lsw1
@ -1248,10 +1264,14 @@ ENTRY(cpu_switch)
* Find the highest-priority queue that isn't empty,
* then take the first proc from that queue.
*/
movw #PSL_HIGHIPL,%sr | lock out interrupts
movl _C_LABEL(sched_whichqs),%d0
jeq _ASM_LABEL(Idle)
Lsw1:
/*
* Interrupts are blocked, sched_lock is held. If
* we come here via Idle, %d0 contains the contents
* of a non-zero sched_whichqs.
*/
movl %d0,%d1
negl %d0
andl %d1,%d0
@ -1312,6 +1332,18 @@ Lswnofpsave:
movl %a0@(P_ADDR),%a1 | get p_addr
movl %a1,_C_LABEL(curpcb)
#if defined(LOCKDEBUG)
/*
* Done mucking with the run queues, release the
* scheduler lock, but keep interrupts out.
*/
movl %a0,sp@- | not args...
movl %a1,sp@- | ...just saving
jbsr _C_LABEL(sched_unlock_idle)
movl sp@+,%a1
movl sp@+,%a0
#endif
/*
* Activate process's address space.
* XXX Should remember the last USTP value loaded, and call this

View File

@ -1,4 +1,4 @@
/* $NetBSD: vr_idle.S,v 1.5 2000/05/26 21:19:44 thorpej Exp $ */
/* $NetBSD: vr_idle.S,v 1.6 2000/08/20 21:50:08 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
@ -49,6 +49,8 @@
#include <mips/cpuregs.h>
#include <hpcmips/vr/vr_asm.h>
#include "opt_lockdebug.h"
#include "assym.h"
.set noreorder
@ -64,6 +66,10 @@ LEAF(vr_idle)
mtc0 t0, MIPS_COP_0_STATUS # enable all interrupts
nop
sw zero, _C_LABEL(curproc) # set curproc NULL for stats
#if defined(LOCKDEBUG)
jal _C_LABEL(sched_unlock_idle) # release sched_lock
nop
#endif
/* Try to zero some free pages. */
lw t0, _C_LABEL(uvm) + UVM_PAGE_IDLE_ZERO
@ -84,7 +90,24 @@ LEAF(vr_idle)
beq t0, zero, 1b
nop
1:
#if defined(LOCKDEBUG)
mtc0 zero, MIPS_COP_0_STATUS # disable all interrupts
nop
nop
nop
nop
jal _C_LABEL(sched_lock_idle) # acquire sched_lock
nop
la ra, cpu_switch_queuescan # rathole to cpu_switch()
j ra
nop
#else
mtc0 zero, MIPS_COP_0_STATUS # disable all interrupts
nop
nop
nop
la ra, cpu_switch_queuescan # rathole to cpu_switch()
j ra
nop
#endif
END(vr_idle)

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.223 2000/08/16 04:44:35 thorpej Exp $ */
/* $NetBSD: locore.s,v 1.224 2000/08/20 21:50:08 thorpej Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -86,6 +86,7 @@
#include "opt_compat_svr4.h"
#include "opt_compat_oldboot.h"
#include "opt_multiprocessor.h"
#include "opt_lockdebug.h"
#include "npx.h"
#include "assym.h"
@ -1836,10 +1837,16 @@ NENTRY(remrunqueue)
* something to come ready.
*/
ENTRY(idle)
cli
/*
* When we get here, interrupts are off (via cli) and
* sched_lock is held.
*/
movl _C_LABEL(sched_whichqs),%ecx
testl %ecx,%ecx
jnz sw1
#if defined(LOCKDEBUG)
call _C_LABEL(sched_unlock_idle)
#endif
sti
/* Try to zero some pages. */
@ -1854,6 +1861,10 @@ ENTRY(idle)
hlt
#if NAPM > 0
call _C_LABEL(apm_cpu_busy)
#endif
cli
#if defined(LOCKDEBUG)
call _C_LABEL(sched_lock_idle)
#endif
jmp _C_LABEL(idle)
@ -1888,6 +1899,11 @@ ENTRY(cpu_switch)
*/
movl $0,_C_LABEL(curproc)
#if defined(LOCKDEBUG)
/* Release the sched_lock before processing interrupts. */
call _C_LABEL(sched_unlock_idle)
#endif
movl $0,_C_LABEL(cpl) # spl0()
call _C_LABEL(Xspllower) # process pending interrupts
@ -1904,8 +1920,13 @@ switch_search:
* %edi - new process
*/
/* Wait for new process. */
/* Lock the scheduler. */
cli # splhigh doesn't do a cli
#if defined(LOCKDEBUG)
call _C_LABEL(sched_lock_idle)
#endif
/* Wait for new process. */
movl _C_LABEL(sched_whichqs),%ecx
sw1: bsfl %ecx,%ebx # find a full q
@ -1942,6 +1963,13 @@ sw1: bsfl %ecx,%ebx # find a full q
/* Isolate process. XXX Is this necessary? */
movl %eax,P_BACK(%edi)
#if defined(LOCKDEBUG)
/*
* Unlock the sched_lock, but leave interrupts off, for now.
*/
call _C_LABEL(sched_unlock_idle)
#endif
#if defined(MULTIPROCESSOR)
/*
* p->p_cpu = curcpu()

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.8 2000/07/09 07:14:18 nisimura Exp $ */
/* $NetBSD: locore.s,v 1.9 2000/08/20 21:50:08 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -45,6 +45,7 @@
#include "opt_compat_netbsd.h"
#include "opt_ddb.h"
#include "opt_fpsp.h"
#include "opt_lockdebug.h"
#include "assym.h"
#include <machine/asm.h>
@ -942,6 +943,8 @@ ASBSS(nullpcb, SIZEOF_PCB)
* At exit of a process, do a switch for the last time.
* Switch to a safe stack and PCB, and select a new process to run. The
* old stack and u-area will be freed by the reaper.
*
* MUST BE CALLED AT SPLHIGH!
*/
ENTRY(switch_exit)
movl sp@(4),a0
@ -954,6 +957,11 @@ ENTRY(switch_exit)
jbsr _C_LABEL(exit2)
lea sp@(4),sp | pop args
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
jra _C_LABEL(cpu_switch)
/*
@ -961,8 +969,16 @@ ENTRY(switch_exit)
* to wait for something to come ready.
*/
ASENTRY_NOPROFILE(Idle)
#if defined(LOCKDEBUG)
/* Release sched_lock */
jbsr _C_LABEL(sched_unlock_idle)
#endif
stop #PSL_LOWIPL
movw #PSL_HIGHIPL,sr
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
movl _C_LABEL(sched_whichqs),d0
jeq _ASM_LABEL(Idle)
jra Lsw1
@ -994,10 +1010,14 @@ ENTRY(cpu_switch)
* Find the highest-priority queue that isn't empty,
* then take the first proc from that queue.
*/
movw #PSL_HIGHIPL,sr | lock out interrupts
movl _C_LABEL(sched_whichqs),d0
jeq _ASM_LABEL(Idle)
Lsw1:
/*
* Interrupts are blocked, sched_lock is held. If
* we come here via Idle, %d0 contains the contents
* of a non-zero sched_whichqs.
*/
movl d0,d1
negl d0
andl d1,d0
@ -1061,6 +1081,18 @@ Lswnofpsave:
movl a0@(P_ADDR),a1 | get p_addr
movl a1,_C_LABEL(curpcb)
#if defined(LOCKDEBUG)
/*
* Done mucking with the run queues, release the
* scheduler lock, but keep interrupts out.
*/
movl %a0,sp@- | not args...
movl %a1,sp@- | ...just saving
jbsr _C_LABEL(sched_unlock_idle)
movl sp@+,%a1
movl sp@+,%a0
#endif
/*
* Activate process's address space.
* XXX Should remember the last USTP value loaded, and call this

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.129 2000/05/31 05:06:51 thorpej Exp $ */
/* $NetBSD: locore.s,v 1.130 2000/08/20 21:50:08 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -81,6 +81,7 @@
#include "opt_compat_svr4.h"
#include "opt_compat_sunos.h"
#include "opt_ddb.h"
#include "opt_lockdebug.h"
#include "assym.h"
#include "opt_fpsp.h"
#include <machine/asm.h>
@ -983,6 +984,8 @@ ASBSS(nullpcb,SIZEOF_PCB)
* At exit of a process, do a switch for the last time.
* Switch to a safe stack and PCB, and select a new process to run. The
* old stack and u-area will be freed by the reaper.
*
* MUST BE CALLED AT SPLHIGH!
*/
ENTRY(switch_exit)
movl sp@(4),a0
@ -995,6 +998,11 @@ ENTRY(switch_exit)
jbsr _C_LABEL(exit2)
lea sp@(4),sp | pop args
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
jra _C_LABEL(cpu_switch)
/*
@ -1002,8 +1010,16 @@ ENTRY(switch_exit)
* to wait for something to come ready.
*/
ASENTRY_NOPROFILE(Idle)
#if defined(LOCKDEBUG)
/* Release sched_lock */
jbsr _C_LABEL(sched_unlock_idle)
#endif
stop #PSL_LOWIPL
movw #PSL_HIGHIPL,sr
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
movl _C_LABEL(sched_whichqs),d0
jeq _ASM_LABEL(Idle)
jra Lsw1
@ -1035,10 +1051,14 @@ ENTRY(cpu_switch)
* Find the highest-priority queue that isn't empty,
* then take the first proc from that queue.
*/
movw #PSL_HIGHIPL,sr | lock out interrupts
movl _C_LABEL(sched_whichqs),d0
jeq _ASM_LABEL(Idle)
Lsw1:
/*
* Interrupts are blocked, sched_lock is held. If
* we come here via Idle, %d0 contains the contents
* of a non-zero sched_whichqs.
*/
movl d0,d1
negl d0
andl d1,d0
@ -1099,6 +1119,18 @@ Lswnofpsave:
movl a0@(P_ADDR),a1 | get p_addr
movl a1,_C_LABEL(curpcb)
#if defined(LOCKDEBUG)
/*
* Done mucking with the run queues, release the
* scheduler lock, but keep interrupts out.
*/
movl %a0,sp@- | not args...
movl %a1,sp@- | ...just saving
jbsr _C_LABEL(sched_unlock_idle)
movl sp@+,%a1
movl sp@+,%a0
#endif
/*
* Activate the process's address space.
* XXX Should remember the last USTP value loaded, and call this

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.S,v 1.112 2000/08/09 23:03:24 jeffs Exp $ */
/* $NetBSD: locore.S,v 1.113 2000/08/20 21:50:09 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
@ -59,6 +59,7 @@
#include "opt_ns.h"
#include "opt_ccitt.h"
#include "opt_iso.h"
#include "opt_lockdebug.h"
#include "fs_coda.h"
#include <machine/cdefs.h>
@ -283,6 +284,10 @@ LEAF(mips_idle)
mtc0 t0, MIPS_COP_0_STATUS # enable all interrupts
nop
sw zero, _C_LABEL(curproc) # set curproc NULL for stats
#if defined(LOCKDEBUG)
jal _C_LABEL(sched_unlock_idle) # release sched_lock
nop
#endif
/* Try to zero some free pages. */
lw t0, _C_LABEL(uvm) + UVM_PAGE_IDLE_ZERO
@ -296,9 +301,26 @@ LEAF(mips_idle)
nop
beq t0, zero, 1b
nop
#if defined(LOCKDEBUG)
mtc0 zero, MIPS_COP_0_STATUS # disable all interrupts
nop
nop
nop
nop
jal _C_LABEL(sched_lock_idle) # acquire sched_lock
nop
la ra, cpu_switch_queuescan
j ra
nop
#else
mtc0 zero, MIPS_COP_0_STATUS # disable all interrupts
nop
nop
nop
la ra, cpu_switch_queuescan
j ra
nop
#endif
END(mips_idle)
/*
@ -325,27 +347,12 @@ NESTED(cpu_switch, CALLFRAME_SIZ, ra)
subu sp, sp, CALLFRAME_SIZ
sw ra, CALLFRAME_RA(sp)
.mask 0x80000000, -4
lw t0, uvmexp+UVMEXP_SWTCH
lw t1, _C_LABEL(sched_whichqs)
addu t0, t0, 1
sw t0, uvmexp+UVMEXP_SWTCH
bne t1, zero, 1f
lw t2, _C_LABEL(mips_locoresw) + MIPSX_CPU_IDLE
nop # for r2000/r3000
jal ra, t2
nop
1:
mtc0 zero, MIPS_COP_0_STATUS
/*
* Entered here from idle() and switch_exit().
* Entered here from idle() and switch_exit(). Interrupts are
* blocked, and sched_lock is held.
*/
.globl cpu_switch_queuescan
cpu_switch_queuescan:
nop # wait for intrs disabled
nop
nop # extra cycles on r4000
nop # extra cycles on r4000
lw t0, _C_LABEL(sched_whichqs) # look for non-empty queue
li t2, -1 # t2 = lowest bit set
bne t0, zero, 1f
@ -381,6 +388,18 @@ cpu_switch_queuescan:
xor t3, t3, v1 # clear bit in 'whichqs'
sw t3, _C_LABEL(sched_whichqs)
3:
/* Squirrel away proc pointer. */
move s7, a0
#if defined(LOCKDEBUG)
/*
* Done mucking with the run queues, release the
* scheduler lock, but keep interrupts out.
*/
jal _C_LABEL(sched_unlock_idle)
nop
move a0, s7 # restore proc
#endif
/*
* Switch to new context.
*/
@ -395,7 +414,7 @@ cpu_switch_queuescan:
lw t2, _C_LABEL(mips_locoresw) + MIPSX_CPU_SWITCH_RESUME
sw a0, _C_LABEL(curproc)
jal ra, t2
move s7, a0 # -BDSLOT-
nop
#if 1 /* XXX XXX XXX */
REG_PROLOGUE
@ -447,6 +466,10 @@ LEAF(switch_exit)
REG_PROLOGUE
REG_L sp, U_PCB_CONTEXT+SF_REG_SP(v0) # restore stack pointer
REG_EPILOGUE
#if defined(LOCKDEBUG)
jal _C_LABEL(sched_lock_idle) # acquire sched_lock
nop
#endif
la ra, cpu_switch_queuescan # rathole to cpu_switch()
j _C_LABEL(exit2) # proc already in a0
sub sp, sp, CALLFRAME_SIZ #BDSlot: set stack call frame

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.62 2000/07/20 20:40:39 scw Exp $ */
/* $NetBSD: locore.s,v 1.63 2000/08/20 21:50:10 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -47,6 +47,7 @@
#include "opt_compat_sunos.h"
#include "opt_fpsp.h"
#include "opt_ddb.h"
#include "opt_lockdebug.h"
#include "assym.h"
#include <machine/asm.h>
@ -1193,6 +1194,8 @@ ASBSS(nullpcb,SIZEOF_PCB)
* At exit of a process, do a switch for the last time.
* Switch to a safe stack and PCB, and select a new process to run. The
* old stack and u-area will be freed by the reaper.
*
* MUST BE CALLED AT SPLHIGH!
*/
ENTRY(switch_exit)
movl sp@(4),a0
@ -1205,6 +1208,11 @@ ENTRY(switch_exit)
jbsr _C_LABEL(exit2)
lea sp@(4),sp | pop args
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
jra _C_LABEL(cpu_switch)
/*
@ -1212,8 +1220,16 @@ ENTRY(switch_exit)
* to wait for something to come ready.
*/
ASENTRY_NOPROFILE(Idle)
#if defined(LOCKDEBUG)
/* Release sched_lock */
jbsr _C_LABEL(sched_unlock_idle)
#endif
stop #PSL_LOWIPL
movw #PSL_HIGHIPL,sr
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
movl _C_LABEL(sched_whichqs),d0
jeq _ASM_LABEL(Idle)
jra Lsw1
@ -1245,10 +1261,14 @@ ENTRY(cpu_switch)
* Find the highest-priority queue that isn't empty,
* then take the first proc from that queue.
*/
movw #PSL_HIGHIPL,sr | lock out interrupts
movl _C_LABEL(sched_whichqs),d0
jeq _ASM_LABEL(Idle)
Lsw1:
/*
* Interrupts are blocked, sched_lock is held. If
* we come here via Idle, %d0 contains the contents
* of a non-zero sched_whichqs.
*/
movl d0,d1
negl d0
andl d1,d0
@ -1328,6 +1348,18 @@ Lswnofpsave:
movl a0@(P_ADDR),a1 | get p_addr
movl a1,_C_LABEL(curpcb)
#if defined(LOCKDEBUG)
/*
* Done mucking with the run queues, release the
* scheduler lock, but keep interrupts out.
*/
movl %a0,sp@- | not args...
movl %a1,sp@- | ...just saving
jbsr _C_LABEL(sched_unlock_idle)
movl sp@+,%a1
movl sp@+,%a0
#endif
/*
* Activate process's address space.
* XXX Should remember the last USTP value loaded, and call this

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.8 2000/05/31 05:06:53 thorpej Exp $ */
/* $NetBSD: locore.s,v 1.9 2000/08/20 21:50:10 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -51,6 +51,7 @@
#include "opt_compat_sunos.h"
#include "opt_fpsp.h"
#include "opt_ddb.h"
#include "opt_lockdebug.h"
#include "assym.h"
#include <machine/asm.h>
@ -952,6 +953,8 @@ ASBSS(nullpcb,SIZEOF_PCB)
* At exit of a process, do a switch for the last time.
* Switch to a safe stack and PCB, and select a new process to run. The
* old stack and u-area will be freed by the reaper.
*
* MUST BE CALLED AT SPLHIGH!
*/
ENTRY(switch_exit)
movl %sp@(4),%a0
@ -964,6 +967,11 @@ ENTRY(switch_exit)
jbsr _C_LABEL(exit2)
lea %sp@(4),%sp | pop args
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr C_LABEL(sched_lock_idle)
#endif
jra _C_LABEL(cpu_switch)
/*
@ -971,8 +979,16 @@ ENTRY(switch_exit)
* to wait for something to come ready.
*/
ASENTRY_NOPROFILE(Idle)
#if defined(LOCKDEBUG)
/* Release sched_lock */
jbsr _C_LABEL(sched_unlock_idle)
#endif
stop #PSL_LOWIPL
movw #PSL_HIGHIPL,%sr
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
movl _C_LABEL(sched_whichqs),%d0
jeq _ASM_LABEL(Idle)
jra Lsw1
@ -1004,10 +1020,14 @@ ENTRY(cpu_switch)
* Find the highest-priority queue that isn't empty,
* then take the first proc from that queue.
*/
movw #PSL_HIGHIPL,%sr | lock out interrupts
movl _C_LABEL(sched_whichqs),%d0
jeq _ASM_LABEL(Idle)
Lsw1:
/*
* Interrupts are blocked, sched_lock is held. If
* we come here via Idle, %d0 contains the contents
* of a non-zero sched_whichqs.
*/
movl %d0,%d1
negl %d0
andl %d1,%d0
@ -1069,6 +1089,18 @@ Lswnofpsave:
movl %a0@(P_ADDR),%a1 | get p_addr
movl %a1,_C_LABEL(curpcb)
#if defined(LOCKDEBUG)
/*
* Done mucking with the run queues, release the
* scheduler lock, but keep interrupts out.
*/
movl %a0,sp@- | not args...
movl %a1,sp@- | ...just saving
jbsr _C_LABEL(sched_unlock_idle)
movl sp@+,%a1
movl sp@+,%a0
#endif
/*
* Activate process's address space.
* XXX Should remember the last USTP value loaded, and call this

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.25 2000/05/31 05:06:54 thorpej Exp $ */
/* $NetBSD: locore.s,v 1.26 2000/08/20 21:50:10 thorpej Exp $ */
/*
* Copyright (c) 1998 Darrin B. Jewell
@ -54,6 +54,7 @@
#include "opt_compat_sunos.h"
#include "opt_ddb.h"
#include "opt_fpsp.h"
#include "opt_lockdebug.h"
#include "assym.h"
#include <machine/asm.h>
@ -1107,6 +1108,8 @@ ASBSS(nullpcb,SIZEOF_PCB)
* At exit of a process, do a switch for the last time.
* Switch to a safe stack and PCB, and select a new process to run. The
* old stack and u-area will be freed by the reaper.
*
* MUST BE CALLED AT SPLHIGH!
*/
ENTRY(switch_exit)
movl %sp@(4),%a0
@ -1119,6 +1122,11 @@ ENTRY(switch_exit)
jbsr _C_LABEL(exit2)
lea %sp@(4),%sp | pop args
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
jra _C_LABEL(cpu_switch)
/*
@ -1126,8 +1134,16 @@ ENTRY(switch_exit)
* to wait for something to come ready.
*/
ASENTRY_NOPROFILE(Idle)
#if defined(LOCKDEBUG)
/* Release sched_lock */
jbsr _C_LABEL(sched_unlock_idle)
#endif
stop #PSL_LOWIPL
movw #PSL_HIGHIPL,%sr
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
movl _C_LABEL(sched_whichqs),%d0
jeq _ASM_LABEL(Idle)
jra Lsw1
@ -1159,10 +1175,14 @@ ENTRY(cpu_switch)
* Find the highest-priority queue that isn't empty,
* then take the first proc from that queue.
*/
movw #PSL_HIGHIPL,%sr | lock out interrupts
movl _C_LABEL(sched_whichqs),%d0
jeq _ASM_LABEL(Idle)
Lsw1:
/*
* Interrupts are blocked, sched_lock is held. If
* we come here via Idle, %d0 contains the contents
* of a non-zero sched_whichqs.
*/
movl %d0,%d1
negl %d0
andl %d1,%d0
@ -1223,6 +1243,18 @@ Lswnofpsave:
movl %a0@(P_ADDR),%a1 | get p_addr
movl %a1,_C_LABEL(curpcb)
#if defined(LOCKDEBUG)
/*
* Done mucking with the run queues, release the
* scheduler lock, but keep interrupts out.
*/
movl %a0,sp@- | not args...
movl %a1,sp@- | ...just saving
jbsr _C_LABEL(sched_unlock_idle)
movl sp@+,%a1
movl sp@+,%a0
#endif
/*
* Activate process's address space.
* XXX Should remember the last USTP value loaded, and call this

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.74 2000/05/31 05:06:56 thorpej Exp $ */
/* $NetBSD: locore.s,v 1.75 2000/08/20 21:50:10 thorpej Exp $ */
/*
* Copyright (c) 1994, 1995 Gordon W. Ross
@ -46,6 +46,7 @@
#include "opt_compat_netbsd.h"
#include "opt_compat_svr4.h"
#include "opt_compat_sunos.h"
#include "opt_lockdebug.h"
#include "assym.h"
#include <machine/asm.h>
@ -622,8 +623,12 @@ ENTRY(switch_exit)
/* Schedule the vmspace and stack to be freed. */
movl a0,sp@- | exit2(p)
jbsr _C_LABEL(exit2)
lea sp@(4),sp | pop args
/* Don't pop the proc; pass it to cpu_switch(). */
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
jra _C_LABEL(cpu_switch)
@ -631,19 +636,20 @@ ENTRY(switch_exit)
* When no processes are on the runq, cpu_switch() branches to idle
* to wait for something to come ready.
*/
.data
GLOBAL(Idle_count)
.long 0
.text
Lidle:
#if defined(LOCKDEBUG)
/* Release sched_lock */
jbsr _C_LABEL(sched_unlock_idle)
#endif
stop #PSL_LOWIPL
GLOBAL(_Idle) | See clock.c
movw #PSL_HIGHIPL,sr
addql #1, _C_LABEL(Idle_count)
tstl _C_LABEL(sched_whichqs)
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
movl _C_LABEL(sched_whichqs),d0
jeq Lidle
movw #PSL_LOWIPL,sr
jra Lsw1
Lbadsw:
@ -654,69 +660,54 @@ Lbadsw:
/*
* cpu_switch()
* Hacked for sun3
* XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it.
* XXX - Sould we use p->p_addr instead of curpcb? -gwr
*/
ENTRY(cpu_switch)
movl _C_LABEL(curpcb),a1 | current pcb
movw sr,a1@(PCB_PS) | save sr before changing ipl
movl _C_LABEL(curpcb),a0 | current pcb
movw sr,a0@(PCB_PS) | save sr before changing ipl
#ifdef notyet
movl _C_LABEL(curproc),sp@- | remember last proc running
#endif
clrl _C_LABEL(curproc)
Lsw1:
/*
* Find the highest-priority queue that isn't empty,
* then take the first proc from that queue.
*/
clrl d0
lea _C_LABEL(sched_whichqs),a0
movl a0@,d1
Lswchk:
btst d0,d1
jne Lswfnd
addqb #1,d0
cmpb #32,d0
jne Lswchk
jra _C_LABEL(_Idle)
Lswfnd:
movw #PSL_HIGHIPL,sr | lock out interrupts
movl a0@,d1 | and check again...
bclr d0,d1
jeq Lsw1 | proc moved, rescan
movl d1,a0@ | update whichqs
moveq #1,d1 | double check for higher priority
lsll d0,d1 | process (which may have snuck in
subql #1,d1 | while we were finding this one)
andl a0@,d1
jeq Lswok | no one got in, continue
movl a0@,d1
bset d0,d1 | otherwise put this one back
movl d1,a0@
jra Lsw1 | and rescan
Lswok:
movl d0,d1
lslb #3,d1 | convert queue number to index
addl #_C_LABEL(sched_qs),d1 | locate queue (q)
movl d1,a1
cmpl a1@(P_FORW),a1 | anyone on queue?
movl _C_LABEL(sched_whichqs),%d0
jeq LIdle
Lsw1:
/*
* Interrupts are blocked, sched_lock is held. If
* we come here via Idle, %d0 contains the contents
* of a non-zero sched_whichqs.
*/
movl %d0,%d1
negl %d0
andl %d1,%d0
bfffo %d0{#0:#32},%d1
eorib #31,%d1
movl %d1,%d0
lslb #3,%d1 | convert queue number to index
addl #_C_LABEL(sched_qs),%d1 | locate queue (q)
movl %d1,%a1
movl %a1@(P_FORW),%a0 | p = q->p_forw
cmpal %d1,%a0 | anyone on queue?
jeq Lbadsw | no, panic
movl a1@(P_FORW),a0 | p = q->p_forw
#ifdef DIAGNOSTIC
tstl a0@(P_WCHAN)
tstl %a0@(P_WCHAN)
jne Lbadsw
cmpb #SRUN,a0@(P_STAT)
cmpb #SRUN,%a0@(P_STAT)
jne Lbadsw
#endif
movl a0@(P_FORW),a1@(P_FORW) | q->p_forw = p->p_forw
movl a0@(P_FORW),a1 | q = p->p_forw
movl a0@(P_BACK),a1@(P_BACK) | q->p_back = p->p_back
cmpl a0@(P_FORW),d1 | anyone left on queue?
jeq Lsw2 | no, skip
movl _C_LABEL(sched_whichqs),d1
bset d0,d1 | yes, reset bit
movl d1,_C_LABEL(sched_whichqs)
movl %a0@(P_FORW),%a1@(P_FORW) | q->p_forw = p->p_forw
movl %a0@(P_FORW),%a1 | n = p->p_forw
movl %a0@(P_BACK),%a1@(P_BACK) | n->p_back = q
cmpal %d1,%a1 | anyone left on queue?
jne Lsw2 | yes, skip
movl _C_LABEL(sched_whichqs),%d1
bclr %d0,%d1 | no, clear bit
movl %d1,_C_LABEL(sched_whichqs)
Lsw2:
/* p->p_cpu initialized in fork1() for single-processor */
movb #SONPROC,a0@(P_STAT) | p->p_stat = SONPROC
@ -756,6 +747,18 @@ Lswnofpsave:
movl a0@(P_ADDR),a1 | get p_addr
movl a1,_C_LABEL(curpcb)
#if defined(LOCKDEBUG)
/*
* Done mucking with the run queues, release the
* scheduler lock, but keep interrupts out.
*/
movl %a0,sp@- | not args...
movl %a1,sp@- | ...just saving
jbsr _C_LABEL(sched_unlock_idle)
movl sp@+,%a1
movl sp@+,%a0
#endif
/*
* Load the new VM context (new MMU root pointer)
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.41 2000/05/31 05:06:57 thorpej Exp $ */
/* $NetBSD: locore.s,v 1.42 2000/08/20 21:50:10 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -44,6 +44,7 @@
#include "opt_compat_netbsd.h"
#include "opt_compat_svr4.h"
#include "opt_compat_sunos.h"
#include "opt_lockdebug.h"
#include "assym.h"
#include <machine/asm.h>
@ -631,6 +632,8 @@ ASBSS(nullpcb,SIZEOF_PCB)
* At exit of a process, do a cpu_switch for the last time.
* Switch to a safe stack and PCB, and select a new process to run. The
* old stack and u-area will be freed by the reaper.
*
* MUST BE CALLED AT SPLHIGH!
*/
ENTRY(switch_exit)
movl sp@(4),a0 | struct proc *p
@ -641,8 +644,12 @@ ENTRY(switch_exit)
/* Schedule the vmspace and stack to be freed. */
movl a0,sp@- | exit2(p)
jbsr _C_LABEL(exit2)
lea sp@(4),sp
/* Don't pop the proc; pass it to cpu_switch(). */
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
jra _C_LABEL(cpu_switch)
@ -650,19 +657,20 @@ ENTRY(switch_exit)
* When no processes are on the runq, cpu_switch() branches to idle
* to wait for something to come ready.
*/
.data
GLOBAL(Idle_count)
.long 0
.text
Lidle:
#if defined(LOCKDEBUG)
/* Release sched_lock */
jbsr _C_LABEL(sched_unlock_idle)
#endif
stop #PSL_LOWIPL
GLOBAL(_Idle) | See clock.c
movw #PSL_HIGHIPL,sr
addql #1, _C_LABEL(Idle_count)
tstl _C_LABEL(sched_whichqs)
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
movl _C_LABEL(sched_whichqs),%d0
jeq Lidle
movw #PSL_LOWIPL,sr
jra Lsw1
Lbadsw:
@ -673,8 +681,6 @@ Lbadsw:
/*
* cpu_switch()
* Hacked for sun3
* XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it.
* XXX - Sould we use p->p_addr instead of curpcb? -gwr
*/
ENTRY(cpu_switch)
movl _C_LABEL(curpcb),a1 | current pcb
@ -684,58 +690,45 @@ ENTRY(cpu_switch)
#endif
clrl _C_LABEL(curproc)
Lsw1:
/*
* Find the highest-priority queue that isn't empty,
* then take the first proc from that queue.
*/
clrl d0
lea _C_LABEL(sched_whichqs),a0
movl a0@,d1
Lswchk:
btst d0,d1
jne Lswfnd
addqb #1,d0
cmpb #32,d0
jne Lswchk
jra _C_LABEL(_Idle)
Lswfnd:
movw #PSL_HIGHIPL,sr | lock out interrupts
movl a0@,d1 | and check again...
bclr d0,d1
jeq Lsw1 | proc moved, rescan
movl d1,a0@ | update whichqs
moveq #1,d1 | double check for higher priority
lsll d0,d1 | process (which may have snuck in
subql #1,d1 | while we were finding this one)
andl a0@,d1
jeq Lswok | no one got in, continue
movl a0@,d1
bset d0,d1 | otherwise put this one back
movl d1,a0@
jra Lsw1 | and rescan
Lswok:
movl d0,d1
lslb #3,d1 | convert queue number to index
addl #_C_LABEL(sched_qs),d1 | locate queue (q)
movl d1,a1
cmpl a1@(P_FORW),a1 | anyone on queue?
movl _C_LABEL(sched_whichqs),%d0
jeq LIdle
Lsw1:
/*
* Interrupts are blocked, sched_lock is held. If
* we come here via Idle, %d0 contains the contents
* of a non-zero sched_whichqs.
*/
movl %d0,%d1
negl %d0
andl %d1,%d0
bfffo %d0{#0:#32},%d1
eorib #31,%d1
movl %d1,%d0
lslb #3,%d1 | convert queue number to index
addl #_C_LABEL(sched_qs),%d1 | locate queue (q)
movl %d1,%a1
movl %a1@(P_FORW),%a0 | p = q->p_forw
cmpal %d1,%a0 | anyone on queue?
jeq Lbadsw | no, panic
movl a1@(P_FORW),a0 | p = q->p_forw
#ifdef DIAGNOSTIC
tstl a0@(P_WCHAN)
jne Lbadsw
cmpb #SRUN,a0@(P_STAT)
jne Lbadsw
#endif
movl a0@(P_FORW),a1@(P_FORW) | q->p_forw = p->p_forw
movl a0@(P_FORW),a1 | q = p->p_forw
movl a0@(P_BACK),a1@(P_BACK) | q->p_back = p->p_back
cmpl a0@(P_FORW),d1 | anyone left on queue?
jeq Lsw2 | no, skip
movl _C_LABEL(sched_whichqs),d1
bset d0,d1 | yes, reset bit
movl d1,_C_LABEL(sched_whichqs)
movl %a0@(P_FORW),%a1@(P_FORW) | q->p_forw = p->p_forw
movl %a0@(P_FORW),%a1 | n = p->p_forw
movl %a0@(P_BACK),%a1@(P_BACK) | n->p_back = q
cmpal %d1,%a1 | anyone left on queue?
jne Lsw2 | yes, skip
movl _C_LABEL(sched_whichqs),%d1
bclr %d0,%d1 | no, clear bit
movl %d1,_C_LABEL(sched_whichqs)
Lsw2:
/* p->p_cpu initialized in fork1() for single-processor */
movb #SONPROC,a0@(P_STAT) | p->p_stat = SONPROC
@ -775,6 +768,18 @@ Lswnofpsave:
movl a0@(P_ADDR),a1 | get p_addr
movl a1,_C_LABEL(curpcb)
#if defined(LOCKDEBUG)
/*
* Done mucking with the run queues, release the
* scheduler lock, but keep interrupts out.
*/
movl %a0,sp@- | not args...
movl %a1,sp@- | ...just saving
jbsr _C_LABEL(sched_unlock_idle)
movl sp@+,%a1
movl sp@+,%a0
#endif
/*
* Load the new VM context (new MMU root pointer)
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.59 2000/06/11 14:20:46 minoura Exp $ */
/* $NetBSD: locore.s,v 1.60 2000/08/20 21:50:11 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -47,6 +47,7 @@
#include "opt_compat_sunos.h"
#include "opt_ddb.h"
#include "opt_fpsp.h"
#include "opt_lockdebug.h"
#include "ite.h"
#include "fd.h"
@ -1013,6 +1014,8 @@ ASBSS(nullpcb,SIZEOF_PCB)
* At exit of a process, do a switch for the last time.
* Switch to a safe stack and PCB, and select a new process to run. The
* old stack and u-area will be freed by the reaper.
*
* MUST BE CALLED AT SPLHIGH!
*/
ENTRY(switch_exit)
movl sp@(4),a0
@ -1025,6 +1028,11 @@ ENTRY(switch_exit)
jbsr _C_LABEL(exit2)
lea sp@(4),sp | pop args
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
jra _C_LABEL(cpu_switch)
/*
@ -1032,19 +1040,27 @@ ENTRY(switch_exit)
* to wait for something to come ready.
*/
ASENTRY_NOPROFILE(Idle)
movw #PSL_HIGHIPL,%sr
movl _C_LABEL(sched_whichqs),%d0
jne Lsw1
#if defined(LOCKDEBUG)
/* Release sched_lock */
jbsr _C_LABEL(sched_unlock_idle)
#endif
movw #PSL_LOWIPL,%sr
/* Try to zero some pages. */
movl _C_LABEL(uvm)+UVM_PAGE_IDLE_ZERO,%d0
jeq 1f
jbsr _C_LABEL(uvm_pageidlezero)
jra _ASM_LABEL(Idle)
jra 2f
1:
stop #PSL_LOWIPL
jra _ASM_LABEL(Idle)
2: movw #PSL_HIGHIPL,sr
#if defined(LOCKDEBUG)
/* Acquire sched_lock */
jbsr _C_LABEL(sched_lock_idle)
#endif
movl _C_LABEL(sched_whichqs),%d0
jeq _ASM_LABEL(Idle)
jra Lsw1
Lbadsw:
PANIC("switch")
@ -1073,10 +1089,14 @@ ENTRY(cpu_switch)
* Find the highest-priority queue that isn't empty,
* then take the first proc from that queue.
*/
movw #PSL_HIGHIPL,sr | lock out interrupts
movl _C_LABEL(sched_whichqs),d0
jeq _ASM_LABEL(Idle)
Lsw1:
/*
* Interrupts are blocked, sched_lock is held. If
* we come here via Idle, %d0 contains the contents
* of a non-zero sched_whichqs.
*/
movl d0,d1
negl d0
andl d1,d0
@ -1155,6 +1175,18 @@ Lswnofpsave:
movl a0@(P_ADDR),a1 | get p_addr
movl a1,_C_LABEL(curpcb)
#if defined(LOCKDEBUG)
/*
* Done mucking with the run queues, release the
* scheduler lock, but keep interrupts out.
*/
movl %a0,sp@- | not args...
movl %a1,sp@- | ...just saving
jbsr _C_LABEL(sched_unlock_idle)
movl sp@+,%a1
movl sp@+,%a0
#endif
/*
* Activate process's address space.
* XXX Should remember the last USTP value loaded, and call this

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_resource.c,v 1.58 2000/06/27 17:41:25 mrg Exp $ */
/* $NetBSD: kern_resource.c,v 1.59 2000/08/20 21:50:11 thorpej Exp $ */
/*-
* Copyright (c) 1982, 1986, 1991, 1993
@ -187,6 +187,7 @@ donice(curp, chgp, n)
int n;
{
struct pcred *pcred = curp->p_cred;
int s;
if (pcred->pc_ucred->cr_uid && pcred->p_ruid &&
pcred->pc_ucred->cr_uid != chgp->p_ucred->cr_uid &&
@ -200,7 +201,9 @@ donice(curp, chgp, n)
if (n < chgp->p_nice && suser(pcred->pc_ucred, &curp->p_acflag))
return (EACCES);
chgp->p_nice = n;
SCHED_LOCK(s);
(void)resetpriority(chgp);
SCHED_UNLOCK(s);
return (0);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_sig.c,v 1.103 2000/07/27 14:01:57 mrg Exp $ */
/* $NetBSD: kern_sig.c,v 1.104 2000/08/20 21:50:11 thorpej Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1991, 1993
@ -77,7 +77,7 @@
#include <uvm/uvm_extern.h>
void stop __P((struct proc *p));
static void proc_stop __P((struct proc *p));
void killproc __P((struct proc *, char *));
static int build_corename __P((struct proc *, char *));
#if COMPAT_NETBSD32
@ -196,7 +196,7 @@ sigaction1(p, signum, nsa, osa)
if (prop & SA_CANTMASK)
return (EINVAL);
(void) splhigh();
(void) splhigh(); /* XXXSMP */
ps->ps_sigact[signum] = *nsa;
sigminusset(&sigcantmask, &ps->ps_sigact[signum].sa_mask);
if ((prop & SA_NORESET) != 0)
@ -376,7 +376,7 @@ sigprocmask1(p, how, nss, oss)
*oss = p->p_sigmask;
if (nss) {
(void)splhigh();
(void)splhigh(); /* XXXSMP */
switch (how) {
case SIG_BLOCK:
sigplusset(nss, &p->p_sigmask);
@ -390,11 +390,11 @@ sigprocmask1(p, how, nss, oss)
p->p_sigcheck = 1;
break;
default:
(void)spl0();
(void)spl0(); /* XXXSMP */
return (EINVAL);
}
sigminusset(&sigcantmask, &p->p_sigmask);
(void)spl0();
(void)spl0(); /* XXXSMP */
}
return (0);
@ -480,11 +480,11 @@ sigsuspend1(p, ss)
*/
ps->ps_oldmask = p->p_sigmask;
ps->ps_flags |= SAS_OLDMASK;
(void) splhigh();
(void) splhigh(); /* XXXSMP */
p->p_sigmask = *ss;
p->p_sigcheck = 1;
sigminusset(&sigcantmask, &p->p_sigmask);
(void) spl0();
(void) spl0(); /* XXXSMP */
}
while (tsleep((caddr_t) ps, PPAUSE|PCATCH, "pause", 0) == 0)
@ -722,7 +722,7 @@ trapsignal(p, signum, code)
#endif
(*p->p_emul->e_sendsig)(ps->ps_sigact[signum].sa_handler,
signum, &p->p_sigmask, code);
(void) splhigh();
(void) splhigh(); /* XXXSMP */
sigplusset(&ps->ps_sigact[signum].sa_mask, &p->p_sigmask);
if (ps->ps_sigact[signum].sa_flags & SA_RESETHAND) {
sigdelset(&p->p_sigcatch, signum);
@ -730,7 +730,7 @@ trapsignal(p, signum, code)
sigaddset(&p->p_sigignore, signum);
ps->ps_sigact[signum].sa_handler = SIG_DFL;
}
(void) spl0();
(void) spl0(); /* XXXSMP */
} else {
ps->ps_code = code; /* XXX for core dump/debugger */
ps->ps_sig = signum; /* XXX to verify code */
@ -750,11 +750,14 @@ trapsignal(p, signum, code)
* regardless of the signal action (eg, blocked or ignored).
*
* Other ignored signals are discarded immediately.
*
* XXXSMP: Invoked as psignal() or sched_psignal().
*/
void
psignal(p, signum)
psignal1(p, signum, dolock)
struct proc *p;
int signum;
int dolock; /* XXXSMP: works, but icky */
{
int s, prop;
sig_t action;
@ -762,6 +765,12 @@ psignal(p, signum)
#ifdef DIAGNOSTIC
if (signum <= 0 || signum >= NSIG)
panic("psignal signal number");
/* XXXSMP: works, but icky */
if (dolock)
SCHED_ASSERT_UNLOCKED();
else
SCHED_ASSERT_LOCKED();
#endif
prop = sigprop[signum];
@ -816,9 +825,12 @@ psignal(p, signum)
*/
if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP))
return;
s = splhigh();
switch (p->p_stat) {
/* XXXSMP: works, but icky */
if (dolock)
SCHED_LOCK(s);
switch (p->p_stat) {
case SSLEEP:
/*
* If process is sleeping uninterruptibly
@ -857,9 +869,14 @@ psignal(p, signum)
goto out;
sigdelset(&p->p_siglist, signum);
p->p_xstat = signum;
if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
psignal(p->p_pptr, SIGCHLD);
stop(p);
if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
/*
* XXXSMP: recursive call; don't lock
* the second time around.
*/
sched_psignal(p->p_pptr, SIGCHLD);
}
proc_stop(p); /* XXXSMP: recurse? */
goto out;
}
/*
@ -946,9 +963,11 @@ runfast:
if (p->p_priority > PUSER)
p->p_priority = PUSER;
run:
setrunnable(p);
setrunnable(p); /* XXXSMP: recurse? */
out:
splx(s);
/* XXXSMP: works, but icky */
if (dolock)
SCHED_UNLOCK(s);
}
static __inline int firstsig __P((const sigset_t *));
@ -996,7 +1015,7 @@ int
issignal(p)
struct proc *p;
{
int signum, prop;
int s, signum, prop;
sigset_t ss;
for (;;) {
@ -1027,8 +1046,11 @@ issignal(p)
if ((p->p_flag & P_FSTRACE) == 0)
psignal(p->p_pptr, SIGCHLD);
do {
stop(p);
SCHED_LOCK(s);
proc_stop(p);
mi_switch(p);
SCHED_ASSERT_UNLOCKED();
splx(s);
} while (!trace_req(p) && p->p_flag & P_TRACED);
/*
@ -1088,8 +1110,11 @@ issignal(p)
p->p_xstat = signum;
if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
psignal(p->p_pptr, SIGCHLD);
stop(p);
SCHED_LOCK(s);
proc_stop(p);
mi_switch(p);
SCHED_ASSERT_UNLOCKED();
splx(s);
break;
} else if (prop & SA_IGNORE) {
/*
@ -1133,14 +1158,16 @@ keep:
* via wakeup. Signals are handled elsewhere. The process must not be
* on the run queue.
*/
void
stop(p)
static void
proc_stop(p)
struct proc *p;
{
SCHED_ASSERT_LOCKED();
p->p_stat = SSTOP;
p->p_flag &= ~P_WAITED;
wakeup((caddr_t)p->p_pptr);
sched_wakeup((caddr_t)p->p_pptr);
}
/*
@ -1207,7 +1234,7 @@ postsig(signum)
ps->ps_sig = 0;
}
(*p->p_emul->e_sendsig)(action, signum, returnmask, code);
(void) splhigh();
(void) splhigh(); /* XXXSMP */
sigplusset(&ps->ps_sigact[signum].sa_mask, &p->p_sigmask);
if (ps->ps_sigact[signum].sa_flags & SA_RESETHAND) {
sigdelset(&p->p_sigcatch, signum);
@ -1215,7 +1242,7 @@ postsig(signum)
sigaddset(&p->p_sigignore, signum);
ps->ps_sigact[signum].sa_handler = SIG_DFL;
}
(void) spl0();
(void) spl0(); /* XXXSMP */
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_synch.c,v 1.82 2000/08/07 22:10:53 thorpej Exp $ */
/* $NetBSD: kern_synch.c,v 1.83 2000/08/20 21:50:11 thorpej Exp $ */
/*-
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
@ -80,6 +80,7 @@
#include "opt_ddb.h"
#include "opt_ktrace.h"
#include "opt_lockdebug.h"
#include "opt_multiprocessor.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -108,6 +109,8 @@ struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
__volatile u_int32_t sched_whichqs; /* bitmap of non-empty queues */
struct slpque sched_slpque[SLPQUE_TABLESIZE]; /* sleep queues */
struct simplelock sched_lock = SIMPLELOCK_INITIALIZER;
void roundrobin(void *);
void schedcpu(void *);
void updatepri(struct proc *);
@ -141,6 +144,7 @@ roundrobin(void *arg)
spc->spc_flags |= SPCF_SEENRR;
splx(s);
}
/* XXXSMP: should need_resched() on all CPUs */
need_resched();
callout_reset(&roundrobin_ch, hz / 10, roundrobin, NULL);
}
@ -239,7 +243,7 @@ schedcpu(void *arg)
{
fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
struct proc *p;
int s;
int s, s1;
unsigned int newcpu;
int clkhz;
@ -277,6 +281,7 @@ schedcpu(void *arg)
p->p_cpticks = 0;
newcpu = (u_int)decay_cpu(loadfac, p->p_estcpu);
p->p_estcpu = newcpu;
SCHED_LOCK(s1);
resetpriority(p);
if (p->p_priority >= PUSER) {
if (p->p_stat == SRUN &&
@ -288,6 +293,7 @@ schedcpu(void *arg)
} else
p->p_priority = p->p_usrpri;
}
SCHED_UNLOCK(s1);
splx(s);
}
proclist_unlock_read();
@ -304,8 +310,13 @@ schedcpu(void *arg)
void
updatepri(struct proc *p)
{
unsigned int newcpu = p->p_estcpu;
fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
unsigned int newcpu;
fixpt_t loadfac;
SCHED_ASSERT_LOCKED();
newcpu = p->p_estcpu;
loadfac = loadfactor(averunnable.ldavg[0]);
if (p->p_slptime > 5 * loadfac)
p->p_estcpu = 0;
@ -390,7 +401,7 @@ ltsleep(void *ident, int priority, const char *wmesg, int timo,
ktrcsw(p, 1, 0);
#endif
s = splhigh(); /* XXXSMP: SCHED_LOCK(s) */
SCHED_LOCK(s);
#ifdef DIAGNOSTIC
if (ident == NULL)
@ -450,6 +461,7 @@ ltsleep(void *ident, int priority, const char *wmesg, int timo,
*/
dobiglock = 0;
#endif
SCHED_UNLOCK(s);
goto resume;
}
if (p->p_wchan == NULL) {
@ -458,6 +470,7 @@ ltsleep(void *ident, int priority, const char *wmesg, int timo,
/* See above. */
dobiglock = 0;
#endif
SCHED_UNLOCK(s);
goto resume;
}
} else
@ -477,19 +490,22 @@ ltsleep(void *ident, int priority, const char *wmesg, int timo,
}
#endif
/* scheduler_slock held */
SCHED_ASSERT_LOCKED();
mi_switch(p);
/* scheduler_slock held */
#ifdef DDB
/* handy breakpoint location after process "wakes" */
asm(".globl bpendtsleep ; bpendtsleep:");
#endif
SCHED_ASSERT_UNLOCKED();
splx(s);
resume:
KDASSERT(p->p_cpu != NULL);
KDASSERT(p->p_cpu == curcpu());
p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
splx(s); /* XXXSMP: SCHED_UNLOCK(s) */
#if 0 /* XXXSMP */
if (dobiglock) {
/*
@ -546,7 +562,8 @@ endtsleep(void *arg)
int s;
p = (struct proc *)arg;
s = splhigh();
SCHED_LOCK(s);
if (p->p_wchan) {
if (p->p_stat == SSLEEP)
setrunnable(p);
@ -554,7 +571,7 @@ endtsleep(void *arg)
unsleep(p);
p->p_flag |= P_TIMEOUT;
}
splx(s);
SCHED_UNLOCK(s);
}
/*
@ -565,9 +582,9 @@ unsleep(struct proc *p)
{
struct slpque *qp;
struct proc **hp;
int s;
s = splhigh();
SCHED_ASSERT_LOCKED();
if (p->p_wchan) {
hp = &(qp = SLPQUE(p->p_wchan))->sq_head;
while (*hp != p)
@ -577,7 +594,6 @@ unsleep(struct proc *p)
qp->sq_tailp = hp;
p->p_wchan = 0;
}
splx(s);
}
/*
@ -587,6 +603,8 @@ __inline void
awaken(struct proc *p)
{
SCHED_ASSERT_LOCKED();
if (p->p_slptime > 1)
updatepri(p);
p->p_slptime = 0;
@ -600,20 +618,48 @@ awaken(struct proc *p)
setrunqueue(p);
need_resched();
} else
wakeup(&proc0);
sched_wakeup(&proc0);
}
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
void
sched_unlock_idle(void)
{
simple_unlock(&sched_lock);
}
void
sched_lock_idle(void)
{
simple_lock(&sched_lock);
}
#endif /* MULTIPROCESSOR || LOCKDEBUG */
/*
* Make all processes sleeping on the specified identifier runnable.
*/
void
wakeup(void *ident)
{
struct slpque *qp;
struct proc *p, **q;
int s;
s = splhigh(); /* XXXSMP: SCHED_LOCK(s) */
SCHED_ASSERT_UNLOCKED();
SCHED_LOCK(s);
sched_wakeup(ident);
SCHED_UNLOCK(s);
}
void
sched_wakeup(void *ident)
{
struct slpque *qp;
struct proc *p, **q;
SCHED_ASSERT_LOCKED();
qp = SLPQUE(ident);
restart:
@ -634,7 +680,6 @@ wakeup(void *ident)
} else
q = &p->p_forw;
}
splx(s); /* XXXSMP: SCHED_UNLOCK(s) */
}
/*
@ -653,7 +698,7 @@ wakeup_one(void *ident)
best_sleepp = best_stopp = NULL;
best_sleepq = best_stopq = NULL;
s = splhigh(); /* XXXSMP: SCHED_LOCK(s) */
SCHED_LOCK(s);
qp = SLPQUE(ident);
@ -699,7 +744,7 @@ wakeup_one(void *ident)
if (p->p_stat == SSLEEP)
awaken(p);
}
splx(s); /* XXXSMP: SCHED_UNLOCK(s) */
SCHED_UNLOCK(s);
}
/*
@ -712,12 +757,13 @@ yield(void)
struct proc *p = curproc;
int s;
s = splstatclock();
SCHED_LOCK(s);
p->p_priority = p->p_usrpri;
p->p_stat = SRUN;
setrunqueue(p);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch(p);
SCHED_ASSERT_UNLOCKED();
splx(s);
}
@ -739,12 +785,13 @@ preempt(struct proc *newp)
if (newp != NULL)
panic("preempt: cpu_preempt not yet implemented");
s = splstatclock();
SCHED_LOCK(s);
p->p_priority = p->p_usrpri;
p->p_stat = SRUN;
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch(p);
SCHED_ASSERT_UNLOCKED();
splx(s);
}
@ -760,6 +807,8 @@ mi_switch(struct proc *p)
long s, u;
struct timeval tv;
SCHED_ASSERT_LOCKED();
KDASSERT(p->p_cpu != NULL);
KDASSERT(p->p_cpu == curcpu());
@ -823,6 +872,12 @@ mi_switch(struct proc *p)
uvmexp.swtch++;
cpu_switch(p);
/*
* Make sure that MD code released the scheduler lock before
* resuming us.
*/
SCHED_ASSERT_UNLOCKED();
/*
* We're running again; record our new start time. We might
* be running on a new CPU now, so don't use the cache'd
@ -855,9 +910,9 @@ rqinit()
void
setrunnable(struct proc *p)
{
int s;
s = splhigh();
SCHED_ASSERT_LOCKED();
switch (p->p_stat) {
case 0:
case SRUN:
@ -885,12 +940,12 @@ setrunnable(struct proc *p)
p->p_stat = SRUN;
if (p->p_flag & P_INMEM)
setrunqueue(p);
splx(s);
if (p->p_slptime > 1)
updatepri(p);
p->p_slptime = 0;
if ((p->p_flag & P_INMEM) == 0)
wakeup((caddr_t)&proc0);
sched_wakeup((caddr_t)&proc0);
else if (p->p_priority < curcpu()->ci_schedstate.spc_curpriority) {
/*
* XXXSMP
@ -923,6 +978,8 @@ resetpriority(struct proc *p)
{
unsigned int newpriority;
SCHED_ASSERT_LOCKED();
newpriority = PUSER + p->p_estcpu + NICE_WEIGHT * (p->p_nice - NZERO);
newpriority = min(newpriority, MAXPRI);
p->p_usrpri = newpriority;
@ -953,9 +1010,14 @@ resetpriority(struct proc *p)
void
schedclock(struct proc *p)
{
int s;
p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
SCHED_LOCK(s);
resetpriority(p);
SCHED_UNLOCK(s);
if (p->p_priority >= PUSER)
p->p_priority = p->p_usrpri;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: sys_generic.c,v 1.50 2000/08/02 20:48:37 thorpej Exp $ */
/* $NetBSD: sys_generic.c,v 1.51 2000/08/20 21:50:11 thorpej Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@ -976,7 +976,7 @@ selwakeup(sip)
p = pfind(sip->si_pid);
sip->si_pid = 0;
if (p != NULL) {
s = splhigh();
SCHED_LOCK(s);
if (p->p_wchan == (caddr_t)&selwait) {
if (p->p_stat == SSLEEP)
setrunnable(p);
@ -984,6 +984,6 @@ selwakeup(sip)
unsleep(p);
} else if (p->p_flag & P_SELECT)
p->p_flag &= ~P_SELECT;
splx(s);
SCHED_UNLOCK(s);
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: sys_process.c,v 1.63 2000/07/28 04:21:26 eeh Exp $ */
/* $NetBSD: sys_process.c,v 1.64 2000/08/20 21:50:11 thorpej Exp $ */
/*-
* Copyright (c) 1994 Christopher G. Demetriou. All rights reserved.
@ -92,7 +92,7 @@ sys_ptrace(p, v, retval)
struct proc *t; /* target process */
struct uio uio;
struct iovec iov;
int error, write, tmp;
int s, error, write, tmp;
/* "A foolish consistency..." XXX */
if (SCARG(uap, req) == PT_TRACE_ME)
@ -303,7 +303,9 @@ sys_ptrace(p, v, retval)
/* Finally, deliver the requested signal (or none). */
if (t->p_stat == SSTOP) {
t->p_xstat = SCARG(uap, data);
SCHED_LOCK(s);
setrunnable(t);
SCHED_UNLOCK(s);
} else {
if (SCARG(uap, data) != 0)
psignal(t, SCARG(uap, data));

View File

@ -1,4 +1,4 @@
/* $NetBSD: procfs_ctl.c,v 1.17 1999/07/22 18:13:38 thorpej Exp $ */
/* $NetBSD: procfs_ctl.c,v 1.18 2000/08/20 21:50:11 thorpej Exp $ */
/*
* Copyright (c) 1993 Jan-Simon Pendry
@ -103,7 +103,7 @@ procfs_control(curp, p, op, sig)
struct proc *p;
int op, sig;
{
int error;
int s, error;
/*
* Attach - attaches the target process for debugging
@ -258,7 +258,9 @@ procfs_control(curp, p, op, sig)
/* Finally, deliver the requested signal (or none). */
if (p->p_stat == SSTOP) {
p->p_xstat = sig;
SCHED_LOCK(s);
setrunnable(p);
SCHED_UNLOCK(s);
} else {
if (sig != 0)
psignal(p, sig);

View File

@ -1,4 +1,4 @@
/* $NetBSD: sync_subr.c,v 1.4 2000/07/09 00:59:06 mycroft Exp $ */
/* $NetBSD: sync_subr.c,v 1.5 2000/08/20 21:50:11 thorpej Exp $ */
/*
* Copyright 1997 Marshall Kirk McKusick. All Rights Reserved.
@ -249,10 +249,15 @@ speedup_syncer()
{
int s;
s = splhigh();
/*
* XXX Should not be doing this, should be using ltsleep()
* XXX with a timeout, rather than sleeping on lbolt.
*/
SCHED_LOCK(s);
if (updateproc && updateproc->p_wchan == &lbolt)
setrunnable(updateproc);
splx(s);
SCHED_UNLOCK(s);
if (rushjob < syncdelay / 2) {
rushjob += 1;
stat_rush_requests += 1;

View File

@ -1,4 +1,4 @@
/* $NetBSD: sched.h,v 1.5 2000/06/03 20:42:44 thorpej Exp $ */
/* $NetBSD: sched.h,v 1.6 2000/08/20 21:50:12 thorpej Exp $ */
/*-
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
@ -79,6 +79,11 @@
#ifndef _SYS_SCHED_H_
#define _SYS_SCHED_H_
#if defined(_KERNEL) && !defined(_LKM)
#include "opt_multiprocessor.h"
#include "opt_lockdebug.h"
#endif
/*
* Posix defines a <sched.h> which may want to include <sys/sched.h>
*/
@ -172,7 +177,8 @@ extern __volatile u_int32_t sched_whichqs;
struct proc;
void schedclock __P((struct proc *p));
void schedclock(struct proc *p);
void sched_wakeup(void *);
/*
* scheduler_fork_hook:
@ -195,5 +201,39 @@ do { \
(parent)->p_estcpu = ESTCPULIM((parent)->p_estcpu + \
(child)->p_estcpu); \
} while (0)
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
#include <sys/lock.h>
extern struct simplelock sched_lock;
#define SCHED_ASSERT_LOCKED() LOCK_ASSERT(simple_lock_held(&sched_lock))
#define SCHED_ASSERT_UNLOCKED() LOCK_ASSERT(simple_lock_held(&sched_lock) == 0)
#define SCHED_LOCK(s) \
do { \
s = splhigh(); \
simple_lock(&sched_lock); \
} while (0)
#define SCHED_UNLOCK(s) \
do { \
simple_unlock(&sched_lock); \
splx(s); \
} while (0)
void sched_lock_idle(void);
void sched_unlock_idle(void);
#else /* ! MULTIPROCESSOR || LOCKDEBUG */
#define SCHED_ASSERT_LOCKED() /* nothing */
#define SCHED_ASSERT_UNLOCKED() /* nothing */
#define SCHED_LOCK(s) s = splhigh()
#define SCHED_UNLOCK(s) splx(s)
#endif /* MULTIPROCESSOR || LOCKDEBUG */
#endif /* _KERNEL */
#endif /* _SYS_SCHED_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: signalvar.h,v 1.23 1999/12/30 15:53:09 eeh Exp $ */
/* $NetBSD: signalvar.h,v 1.24 2000/08/20 21:50:12 thorpej Exp $ */
/*
* Copyright (c) 1991, 1993
@ -147,7 +147,9 @@ void gsignal __P((int pgid, int sig));
int issignal __P((struct proc *p));
void pgsignal __P((struct pgrp *pgrp, int sig, int checkctty));
void postsig __P((int sig));
void psignal __P((struct proc *p, int sig));
void psignal1 __P((struct proc *p, int sig, int dolock));
#define psignal(p, sig) psignal1((p), (sig), 1)
#define sched_psignal(p, sig) psignal1((p), (sig), 0)
void siginit __P((struct proc *p));
void trapsignal __P((struct proc *p, int sig, u_long code));
void sigexit __P((struct proc *, int));