Don't drop to spl0 in cpu_switch/cpu_switchto. Do it in the idle loop
instead. With this change, we no longer need to save the current interrupt level in the switchframe. This is no great loss since both cpu_switch and cpu_switchto are always called at splsched, so the process' spl is effectively saved somewhere in the callstack. This fixes an evbarm problem reported by Allen Briggs: lwp gets into sa_switch -> mi_switch with newl != NULL when it's the last element on the runqueue, so it hits the second bit of: if (newl == NULL) { retval = cpu_switch(l, NULL); } else { remrunqueue(newl); cpu_switchto(l, newl); retval = 0; } mi_switch calls remrunqueue() and cpu_switchto() cpu_switchto unlocks the sched lock cpu_switchto drops CPU priority softclock is received schedcpu is called from softclock schedcpu hits the first if () {} block here: if (l->l_priority >= PUSER) { if (l->l_stat == LSRUN && (l->l_flag & L_INMEM) && (l->l_priority / PPQ) != (l->l_usrpri / PPQ)) { remrunqueue(l); l->l_priority = l->l_usrpri; setrunqueue(l); } else l->l_priority = l->l_usrpri; } Since mi_switch has already run remrunqueue, the LWP has been removed, but it's not been put back on any queue, so the remrunqueue panics.
This commit is contained in:
parent
65d5587ddd
commit
52c15bbd20
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: cpuswitch.S,v 1.37 2003/10/13 21:44:27 scw Exp $ */
|
||||
/* $NetBSD: cpuswitch.S,v 1.38 2003/10/23 08:59:10 scw Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright 2003 Wasabi Systems, Inc.
|
||||
@ -195,28 +195,45 @@ ASENTRY_NP(idle)
|
||||
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
|
||||
bl _C_LABEL(sched_unlock_idle)
|
||||
#endif
|
||||
ldr r3, .Lcpu_do_powersave
|
||||
|
||||
/* Enable interrupts */
|
||||
IRQenable
|
||||
|
||||
ldr r6, .Lcpu_do_powersave
|
||||
|
||||
/* Lower the spl level to spl0 and get the current spl level. */
|
||||
#ifdef __NEWINTR
|
||||
mov r0, #(IPL_NONE)
|
||||
bl _C_LABEL(_spllower)
|
||||
#else /* ! __NEWINTR */
|
||||
mov r0, #(_SPL_0)
|
||||
bl _C_LABEL(splx)
|
||||
#endif /* __NEWINTR */
|
||||
|
||||
/* Old interrupt level in r0 */
|
||||
|
||||
/* If we don't want to sleep, use a simpler loop. */
|
||||
ldr r3, [r3] /* r3 = cpu_do_powersave */
|
||||
teq r3, #0
|
||||
ldr r6, [r6] /* r6 = cpu_do_powersave */
|
||||
teq r6, #0
|
||||
bne 2f
|
||||
|
||||
/* Non-powersave idle. */
|
||||
1: /* should maybe do uvm pageidlezero stuff here */
|
||||
ldr r3, [r7] /* r3 = whichqs */
|
||||
teq r3, #0x00000000
|
||||
bne .Lswitch_search
|
||||
b 1b
|
||||
beq 1b
|
||||
adr lr, .Lswitch_search
|
||||
b _C_LABEL(splx) /* Restore ipl, return to switch_search */
|
||||
|
||||
2: /* Powersave idle. */
|
||||
ldr r4, .Lcpufuncs
|
||||
mov r6, r0 /* Preserve old interrupt level */
|
||||
|
||||
3: ldr r3, [r7] /* r3 = whichqs */
|
||||
teq r3, #0x00000000
|
||||
bne .Lswitch_search
|
||||
moveq r0, r6
|
||||
adreq lr, .Lswitch_search
|
||||
beq _C_LABEL(splx) /* Restore ipl, return to switch_search */
|
||||
|
||||
/* if saving power, don't want to pageidlezero */
|
||||
mov r0, #0
|
||||
@ -259,25 +276,7 @@ ENTRY(cpu_switch)
|
||||
/* stash the old proc while we call functions */
|
||||
mov r5, r0
|
||||
|
||||
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
|
||||
/* release the sched_lock before handling interrupts */
|
||||
bl _C_LABEL(sched_unlock_idle)
|
||||
#endif
|
||||
|
||||
/* Lower the spl level to spl0 and get the current spl level. */
|
||||
#ifdef __NEWINTR
|
||||
mov r0, #(IPL_NONE)
|
||||
bl _C_LABEL(_spllower)
|
||||
#else /* ! __NEWINTR */
|
||||
mov r0, #(_SPL_0)
|
||||
bl _C_LABEL(splx)
|
||||
#endif /* __NEWINTR */
|
||||
|
||||
/* Push the old spl level onto the stack */
|
||||
str r0, [sp, #-0x0004]!
|
||||
|
||||
/* First phase : find a new lwp */
|
||||
|
||||
ldr r7, .Lwhichqs
|
||||
|
||||
/* rem: r5 = old lwp */
|
||||
@ -285,9 +284,6 @@ ENTRY(cpu_switch)
|
||||
|
||||
.Lswitch_search:
|
||||
IRQdisable
|
||||
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
|
||||
bl _C_LABEL(sched_lock_idle)
|
||||
#endif
|
||||
|
||||
/* Do we have any active queues */
|
||||
ldr r3, [r7]
|
||||
@ -369,8 +365,8 @@ ENTRY(cpu_switch)
|
||||
str r0, [r7, #(L_BACK)]
|
||||
|
||||
/* Update the RAM copy of the queue not empty flags word. */
|
||||
ldr r7, .Lwhichqs
|
||||
str r3, [r7]
|
||||
ldreq r7, .Lwhichqs
|
||||
streq r3, [r7]
|
||||
|
||||
/* rem: r1 = old lwp */
|
||||
/* rem: r3 = whichqs - NOT NEEDED ANY MORE */
|
||||
@ -398,7 +394,13 @@ ENTRY(cpu_switch)
|
||||
mov r1, r7
|
||||
#endif
|
||||
|
||||
|
||||
.Lswitch_resume:
|
||||
/* rem: r1 = old lwp */
|
||||
/* rem: r4 = return value [not used if came from cpu_switchto()] */
|
||||
/* rem: r6 = new process */
|
||||
/* rem: interrupts are disabled */
|
||||
|
||||
#ifdef MULTIPROCESSOR
|
||||
/* XXX use curcpu() */
|
||||
ldr r0, .Lcpu_info_store
|
||||
@ -745,16 +747,11 @@ ENTRY(cpu_switch)
|
||||
*/
|
||||
|
||||
ldr r2, [r5, #(P_NRAS)]
|
||||
ldr r4, [r7, #(PCB_TF)] /* r4 = trapframe (used below) */
|
||||
ldr r1, [r7, #(PCB_TF)] /* r1 = trapframe (used below) */
|
||||
teq r2, #0 /* p->p_nras == 0? */
|
||||
bne .Lswitch_do_ras /* no, check for one */
|
||||
|
||||
.Lswitch_return:
|
||||
|
||||
/* Get the spl level from the stack and update the current spl level */
|
||||
ldr r0, [sp], #0x0004
|
||||
bl _C_LABEL(splx)
|
||||
|
||||
/* cpu_switch returns 1 == switched, 0 == didn't switch */
|
||||
mov r0, r4
|
||||
|
||||
@ -765,11 +762,12 @@ ENTRY(cpu_switch)
|
||||
ldmfd sp!, {r4-r7, pc}
|
||||
|
||||
.Lswitch_do_ras:
|
||||
ldr r1, [r4, #(TF_PC)] /* second ras_lookup() arg */
|
||||
ldr r1, [r1, #(TF_PC)] /* second ras_lookup() arg */
|
||||
mov r0, r5 /* first ras_lookup() arg */
|
||||
bl _C_LABEL(ras_lookup)
|
||||
cmn r0, #1 /* -1 means "not in a RAS" */
|
||||
strne r0, [r4, #(TF_PC)]
|
||||
ldrne r1, [r7, #(PCB_TF)]
|
||||
strne r0, [r1, #(TF_PC)]
|
||||
b .Lswitch_return
|
||||
|
||||
.Lswitch_exited:
|
||||
@ -798,55 +796,25 @@ ENTRY(cpu_switch)
|
||||
ENTRY(cpu_switchto)
|
||||
stmfd sp!, {r4-r7, lr}
|
||||
|
||||
/* Lower the spl level to spl0 and get the current spl level. */
|
||||
mov r6, r0 /* save old lwp */
|
||||
mov r5, r1 /* save new lwp */
|
||||
mov r6, r1 /* save new lwp */
|
||||
|
||||
#if defined(LOCKDEBUG)
|
||||
/* release the sched_lock before handling interrupts */
|
||||
mov r5, r0 /* save old lwp */
|
||||
bl _C_LABEL(sched_unlock_idle)
|
||||
#endif
|
||||
|
||||
#ifdef __NEWINTR
|
||||
mov r0, #(IPL_NONE)
|
||||
bl _C_LABEL(_spllower)
|
||||
#else /* ! __NEWINTR */
|
||||
#ifdef spl0
|
||||
mov r0, #(_SPL_0)
|
||||
bl _C_LABEL(splx)
|
||||
mov r1, r5
|
||||
#else
|
||||
bl _C_LABEL(spl0)
|
||||
#endif /* spl0 */
|
||||
#endif /* __NEWINTR */
|
||||
|
||||
/* Push the old spl level onto the stack */
|
||||
str r0, [sp, #-0x0004]!
|
||||
mov r1, r0
|
||||
#endif
|
||||
|
||||
IRQdisable
|
||||
#if defined(LOCKDEBUG)
|
||||
bl _C_LABEL(sched_lock_idle)
|
||||
#endif
|
||||
|
||||
mov r0, r6 /* restore old lwp */
|
||||
mov r1, r5 /* restore new lwp */
|
||||
|
||||
/* rem: r0 = old lwp */
|
||||
/* rem: r1 = new lwp */
|
||||
/* rem: interrupts are disabled */
|
||||
|
||||
/*
|
||||
* Okay, set up registers the way cpu_switch() wants them,
|
||||
* and jump into the middle of it (where we bring up the
|
||||
* new process).
|
||||
*
|
||||
* r1 = old lwp (r6 = new lwp)
|
||||
*/
|
||||
mov r6, r1 /* r6 = new lwp */
|
||||
#if defined(LOCKDEBUG)
|
||||
mov r5, r0 /* preserve old lwp */
|
||||
bl _C_LABEL(sched_unlock_idle)
|
||||
mov r1, r5 /* r1 = old lwp */
|
||||
#else
|
||||
mov r1, r0 /* r1 = old lwp */
|
||||
#endif
|
||||
b .Lswitch_resume
|
||||
|
||||
/*
|
||||
@ -1007,6 +975,14 @@ ENTRY(savectx)
|
||||
ldmfd sp!, {r4-r7, pc}
|
||||
|
||||
ENTRY(proc_trampoline)
|
||||
#ifdef __NEWINTR
|
||||
mov r0, #(IPL_NONE)
|
||||
bl _C_LABEL(_spllower)
|
||||
#else /* ! __NEWINTR */
|
||||
mov r0, #(_SPL_0)
|
||||
bl _C_LABEL(splx)
|
||||
#endif /* __NEWINTR */
|
||||
|
||||
#ifdef MULTIPROCESSOR
|
||||
bl _C_LABEL(proc_trampoline_mp)
|
||||
#endif
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: vm_machdep.c,v 1.29 2003/07/15 00:24:42 lukem Exp $ */
|
||||
/* $NetBSD: vm_machdep.c,v 1.30 2003/10/23 08:59:10 scw Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1994-1998 Mark Brinicombe.
|
||||
@ -44,7 +44,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.29 2003/07/15 00:24:42 lukem Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.30 2003/10/23 08:59:10 scw Exp $");
|
||||
|
||||
#include "opt_armfpe.h"
|
||||
#include "opt_pmap_debug.h"
|
||||
@ -197,7 +197,6 @@ cpu_lwp_fork(l1, l2, stack, stacksize, func, arg)
|
||||
tf->tf_usr_sp = (u_int)stack + stacksize;
|
||||
|
||||
sf = (struct switchframe *)tf - 1;
|
||||
sf->sf_spl = 0; /* always equivalent to spl0() */
|
||||
sf->sf_r4 = (u_int)func;
|
||||
sf->sf_r5 = (u_int)arg;
|
||||
sf->sf_pc = (u_int)proc_trampoline;
|
||||
@ -211,7 +210,6 @@ cpu_setfunc(struct lwp *l, void (*func)(void *), void *arg)
|
||||
struct trapframe *tf = pcb->pcb_tf;
|
||||
struct switchframe *sf = (struct switchframe *)tf - 1;
|
||||
|
||||
sf->sf_spl = 0; /* always equivalent to spl0() */
|
||||
sf->sf_r4 = (u_int)func;
|
||||
sf->sf_r5 = (u_int)arg;
|
||||
sf->sf_pc = (u_int)proc_trampoline;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: frame.h,v 1.5 2002/10/19 00:10:54 bjh21 Exp $ */
|
||||
/* $NetBSD: frame.h,v 1.6 2003/10/23 08:59:10 scw Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1994-1997 Mark Brinicombe.
|
||||
@ -83,7 +83,6 @@ typedef struct irqframe {
|
||||
*/
|
||||
|
||||
struct switchframe {
|
||||
int sf_spl;
|
||||
u_int sf_r4;
|
||||
u_int sf_r5;
|
||||
u_int sf_r6;
|
||||
|
Loading…
x
Reference in New Issue
Block a user