Since cpu_switch (aka Swtch) is now called at splsched() with sched_lock

locked, cpu_exit needs to do that too.  Since in the lock debug case we
have to use a CALLS which wipes out R0-R6, change the convention for Swtch
so that the proc is passed in R6 and that R6 is clobbered.  This is so
Swtch itself doesn't have to save/restore the proc pointer explicitly.
This commit is contained in:
matt 2000-08-27 00:21:46 +00:00
parent cbac1a91d9
commit 98e5e148fa
2 changed files with 23 additions and 9 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: macros.h,v 1.20 2000/07/19 01:02:52 matt Exp $ */
/* $NetBSD: macros.h,v 1.21 2000/08/27 00:21:46 matt Exp $ */
/*
* Copyright (c) 1994, 1998, 2000 Ludd, University of Lule}, Sweden.
@ -276,8 +276,8 @@ skpc(int mask, size_t size, u_char *cp)
__asm__ __volatile("movl %0,r0;jsb Remrq":: "g"(p):"r0","r1","r2");
#define cpu_switch(p) \
__asm__ __volatile("movl %0,r0;movpsl -(sp);jsb Swtch" \
::"g"(p):"r0","r1","r2","r3");
__asm__ __volatile("movl %0,r6;movpsl -(sp);jsb Swtch" \
::"g"(p):"r0","r1","r2","r3","r4","r5","r6");
/*
* Interlock instructions. Used both in multiprocessor environments to

View File

@ -1,4 +1,4 @@
/* $NetBSD: subr.s,v 1.54 2000/08/26 15:13:23 matt Exp $ */
/* $NetBSD: subr.s,v 1.55 2000/08/27 00:21:46 matt Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
@ -329,12 +329,12 @@ idle:
3: bbssi $0,_C_LABEL(sched_lock),3b # acquire sched lock
#endif
tstl _C_LABEL(sched_whichqs) # Anything ready to run?
bneq Swtch # Yes, goto switch again.
brb idle # nope, continue to idlely loop
beql idle # Yes, goto switch again.
brb Swtch # nope, continue to idlely loop
#
# cpu_switch, cpu_exit and the idle loop implemented in assembler
# for efficiency. r0 contains pointer to last process. This is
# for efficiency. r6 contains pointer to last process. This is
# called at IPL_HIGH.
#
@ -351,7 +351,13 @@ JSBENTRY(Swtch)
bvc 1f # check if something on queue
pushab noque
calls $1,_C_LABEL(panic)
#ifdef __ELF__
.section .rodata
#endif
noque: .asciz "swtch"
#ifdef __ELF__
.text
#endif
#endif
1: bneq 2f # more processes on queue?
bbsc r3,_C_LABEL(sched_whichqs),2f # no, clear bit in whichqs
@ -364,7 +370,7 @@ noque: .asciz "swtch"
movb $SONPROC,P_STAT(r2) # p->p_stat = SONPROC;
movl r2,CI_CURPROC(r1) # set new process running
clrl CI_WANT_RESCHED(r1) # we are now changing process
cmpl r0,r2 # Same process?
cmpl r6,r2 # Same process?
bneq 1f # No, continue
#if defined(LOCKDEBUG)
calls $0,_C_LABEL(sched_unlock_idle)
@ -391,8 +397,10 @@ noque: .asciz "swtch"
#
svpctx
mtpr r3,$PR_PCBB
#if defined(MULTIPROCESSOR)
.globl _C_LABEL(tramp) # used to kick off multiprocessor systems.
_C_LABEL(tramp):
#endif
ldpctx
#if defined(LOCKDEBUG)
calls $0,_C_LABEL(sched_unlock_idle)
@ -416,7 +424,13 @@ ENTRY(cpu_exit,0)
mtpr r7,$PR_SSP # In case...
pushl r6
calls $1,_C_LABEL(exit2) # release last resources.
clrl r0
mtpr $IPL_HIGH,$PR_IPL # block all types of interrupts
#if defined(LOCKDEBUG)
calls $0,_C_LABEL(sched_lock_idle)
#elif defined(MULTIPROCESSOR)
1: bbssi $0,_C_LABEL(sched_lock),1b # acquire sched lock
#endif
clrl r6
brw Swtch
#