Sprinkle KERNEL_PROC_LOCK()s at the kernel entry points.

This commit is contained in:
pk 2001-03-03 19:11:02 +00:00
parent 439283d3c2
commit bb5f16e6e0
2 changed files with 121 additions and 37 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: memreg.c,v 1.28 1998/09/21 10:32:00 pk Exp $ */ /* $NetBSD: memreg.c,v 1.29 2001/03/03 19:11:02 pk Exp $ */
/* /*
* Copyright (c) 1992, 1993 * Copyright (c) 1992, 1993
@ -48,6 +48,7 @@
#include <sys/param.h> #include <sys/param.h>
#include <sys/systm.h> #include <sys/systm.h>
#include <sys/proc.h>
#include <sys/device.h> #include <sys/device.h>
#include <machine/autoconf.h> #include <machine/autoconf.h>
@ -242,9 +243,11 @@ hardmemerr4m(type, sfsr, sfva, afsr, afva)
* once, and then fail if we get called again. * once, and then fail if we get called again.
*/ */
/* XXXSMP */
static int addrold = (int) 0xdeadbeef; /* We pick an unlikely address */ static int addrold = (int) 0xdeadbeef; /* We pick an unlikely address */
static int addroldtop = (int) 0xdeadbeef; static int addroldtop = (int) 0xdeadbeef;
static int oldtype = -1; static int oldtype = -1;
/* XXXSMP */
void void
hypersparc_memerr(type, sfsr, sfva, tf) hypersparc_memerr(type, sfsr, sfva, tf)
@ -256,6 +259,9 @@ hypersparc_memerr(type, sfsr, sfva, tf)
u_int afsr; u_int afsr;
u_int afva; u_int afva;
if ((tf->tf_psr & PSR_PS) == 0)
KERNEL_PROC_LOCK(curproc);
(*cpuinfo.get_asyncflt)(&afsr, &afva); (*cpuinfo.get_asyncflt)(&afsr, &afva);
if ((afsr & AFSR_AFO) != 0) { /* HS async fault! */ if ((afsr & AFSR_AFO) != 0) { /* HS async fault! */
@ -268,10 +274,15 @@ hypersparc_memerr(type, sfsr, sfva, tf)
oldtype = -1; oldtype = -1;
addrold = afva; addrold = afva;
addroldtop = afsr & AFSR_AFA; addroldtop = afsr & AFSR_AFA;
return;
} }
out:
if ((tf->tf_psr & PSR_PS) == 0)
KERNEL_PROC_UNLOCK(curproc);
return;
hard: hard:
hardmemerr4m(type, sfsr, sfva, afsr, afva); hardmemerr4m(type, sfsr, sfva, afsr, afva);
goto out;
} }
void void
@ -284,6 +295,9 @@ viking_memerr(type, sfsr, sfva, tf)
u_int afsr=0; /* No Async fault registers on the viking */ u_int afsr=0; /* No Async fault registers on the viking */
u_int afva=0; u_int afva=0;
if ((tf->tf_psr & PSR_PS) == 0)
KERNEL_PROC_LOCK(curproc);
if (type == T_STOREBUFFAULT) { if (type == T_STOREBUFFAULT) {
/* /*
@ -303,8 +317,6 @@ viking_memerr(type, sfsr, sfva, tf)
sta(SRMMU_PCR, ASI_SRMMU, sta(SRMMU_PCR, ASI_SRMMU,
lda(SRMMU_PCR, ASI_SRMMU) | VIKING_PCR_SB); lda(SRMMU_PCR, ASI_SRMMU) | VIKING_PCR_SB);
return;
} else if (type == T_DATAFAULT && (sfsr & SFSR_FAV) == 0) { } else if (type == T_DATAFAULT && (sfsr & SFSR_FAV) == 0) {
/* /*
* bizarre. * bizarre.
@ -316,10 +328,16 @@ viking_memerr(type, sfsr, sfva, tf)
if (oldtype == T_DATAFAULT) if (oldtype == T_DATAFAULT)
goto hard; goto hard;
oldtype = T_DATAFAULT; oldtype = T_DATAFAULT;
return;
} }
out:
if ((tf->tf_psr & PSR_PS) == 0)
KERNEL_PROC_UNLOCK(curproc);
return;
hard: hard:
hardmemerr4m(type, sfsr, sfva, afsr, afva); hardmemerr4m(type, sfsr, sfva, afsr, afva);
goto out;
} }
void void
@ -332,6 +350,9 @@ memerr4m(type, sfsr, sfva, tf)
u_int afsr; u_int afsr;
u_int afva; u_int afva;
if ((tf->tf_psr & PSR_PS) == 0)
KERNEL_PROC_LOCK(curproc);
/* /*
* No known special cases. * No known special cases.
* Just get async registers, if any, and report the unhandled case. * Just get async registers, if any, and report the unhandled case.
@ -340,5 +361,7 @@ memerr4m(type, sfsr, sfva, tf)
afsr = afva = 0; afsr = afva = 0;
hardmemerr4m(type, sfsr, sfva, afsr, afva); hardmemerr4m(type, sfsr, sfva, afsr, afva);
if ((tf->tf_psr & PSR_PS) == 0)
KERNEL_PROC_UNLOCK(curproc);
} }
#endif /* SUN4M */ #endif /* SUN4M */

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.97 2000/12/01 19:50:17 jdolecek Exp $ */ /* $NetBSD: trap.c,v 1.98 2001/03/03 19:11:02 pk Exp $ */
/* /*
* Copyright (c) 1996 * Copyright (c) 1996
@ -267,11 +267,13 @@ trap(type, psr, pc, tf)
int n; int n;
char bits[64]; char bits[64];
u_quad_t sticks; u_quad_t sticks;
int sig;
u_long ucode;
/* This steps the PC over the trap. */ /* This steps the PC over the trap. */
#define ADVANCE (n = tf->tf_npc, tf->tf_pc = n, tf->tf_npc = n + 4) #define ADVANCE (n = tf->tf_npc, tf->tf_pc = n, tf->tf_npc = n + 4)
uvmexp.traps++; uvmexp.traps++; /* XXXSMP */
/* /*
* Generally, kernel traps cause a panic. Any exceptions are * Generally, kernel traps cause a panic. Any exceptions are
* handled early here. * handled early here.
@ -333,6 +335,9 @@ trap(type, psr, pc, tf)
} }
#endif #endif
sig = 0;
ucode = 0;
switch (type) { switch (type) {
default: default:
@ -342,7 +347,8 @@ trap(type, psr, pc, tf)
printf("trap type 0x%x: pc=0x%x npc=0x%x psr=%s\n", printf("trap type 0x%x: pc=0x%x npc=0x%x psr=%s\n",
type, pc, tf->tf_npc, bitmask_snprintf(psr, type, pc, tf->tf_npc, bitmask_snprintf(psr,
PSR_BITS, bits, sizeof(bits))); PSR_BITS, bits, sizeof(bits)));
trapsignal(p, SIGILL, type); sig = SIGILL;
ucode = type;
break; break;
} }
#if defined(COMPAT_SVR4) #if defined(COMPAT_SVR4)
@ -354,7 +360,8 @@ badtrap:
uprintf("%s[%d]: unimplemented software trap 0x%x\n", uprintf("%s[%d]: unimplemented software trap 0x%x\n",
p->p_comm, p->p_pid, type); p->p_comm, p->p_pid, type);
#endif #endif
trapsignal(p, SIGILL, type); sig = SIGILL;
ucode = type;
break; break;
#ifdef COMPAT_SVR4 #ifdef COMPAT_SVR4
@ -375,15 +382,16 @@ badtrap:
case T_ILLINST: case T_ILLINST:
case T_UNIMPLFLUSH: case T_UNIMPLFLUSH:
if ((n = emulinstr(pc, tf)) == 0) { if ((sig = emulinstr(pc, tf)) == 0) {
ADVANCE; ADVANCE;
break; break;
} }
trapsignal(p, n, 0); /* XXX code?? */ /* XXX - ucode? */
break; break;
case T_PRIVINST: case T_PRIVINST:
trapsignal(p, SIGILL, 0); /* XXX code?? */ sig = SIGILL;
/* XXX - ucode? */
break; break;
case T_FPDISABLED: { case T_FPDISABLED: {
@ -398,6 +406,7 @@ badtrap:
} }
#endif #endif
KERNEL_PROC_LOCK(p);
if (fs == NULL) { if (fs == NULL) {
fs = malloc(sizeof *fs, M_SUBPROC, M_WAITOK); fs = malloc(sizeof *fs, M_SUBPROC, M_WAITOK);
*fs = initfpstate; *fs = initfpstate;
@ -410,11 +419,12 @@ badtrap:
if (!cpuinfo.fpupresent) { if (!cpuinfo.fpupresent) {
#ifdef notyet #ifdef notyet
fpu_emulate(p, tf, fs); fpu_emulate(p, tf, fs);
break;
#else #else
trapsignal(p, SIGFPE, 0); /* XXX code?? */ sig = SIGFPE;
break; /* XXX - ucode? */
#endif #endif
KERNEL_PROC_UNLOCK(p);
break;
} }
/* /*
* We may have more FPEs stored up and/or ops queued. * We may have more FPEs stored up and/or ops queued.
@ -423,9 +433,10 @@ badtrap:
*/ */
if (fs->fs_qsize) { if (fs->fs_qsize) {
fpu_cleanup(p, fs); fpu_cleanup(p, fs);
KERNEL_PROC_UNLOCK(p);
break; break;
} }
#if NEW #if notyet
simple_lock(&cpuinfo.fplock); simple_lock(&cpuinfo.fplock);
if (cpuinfo.fpproc != p) { /* we do not have it */ if (cpuinfo.fpproc != p) { /* we do not have it */
if (cpuinfo.fpproc != NULL) { /* someone else had it*/ if (cpuinfo.fpproc != NULL) { /* someone else had it*/
@ -477,6 +488,7 @@ badtrap:
p->p_md.md_fpumid = cpuinfo.mid; p->p_md.md_fpumid = cpuinfo.mid;
} }
#endif #endif
KERNEL_PROC_UNLOCK(p);
tf->tf_psr |= PSR_EF; tf->tf_psr |= PSR_EF;
break; break;
} }
@ -498,6 +510,7 @@ badtrap:
* nsaved to -1. If we decide to deliver a signal on * nsaved to -1. If we decide to deliver a signal on
* our way out, we will clear nsaved. * our way out, we will clear nsaved.
*/ */
KERNEL_PROC_LOCK(p);
if (pcb->pcb_uw || pcb->pcb_nsaved) if (pcb->pcb_uw || pcb->pcb_nsaved)
panic("trap T_RWRET 1"); panic("trap T_RWRET 1");
#ifdef DEBUG #ifdef DEBUG
@ -510,6 +523,7 @@ badtrap:
if (pcb->pcb_nsaved) if (pcb->pcb_nsaved)
panic("trap T_RWRET 2"); panic("trap T_RWRET 2");
pcb->pcb_nsaved = -1; /* mark success */ pcb->pcb_nsaved = -1; /* mark success */
KERNEL_PROC_UNLOCK(p);
break; break;
case T_WINUF: case T_WINUF:
@ -522,6 +536,7 @@ badtrap:
* in the pcb. The restore's window may still be in * in the pcb. The restore's window may still be in
* the cpu; we need to force it out to the stack. * the cpu; we need to force it out to the stack.
*/ */
KERNEL_PROC_LOCK(p);
#ifdef DEBUG #ifdef DEBUG
if (rwindow_debug) if (rwindow_debug)
printf("%s[%d]: rwindow: T_WINUF 0: pcb<-stack: 0x%x\n", printf("%s[%d]: rwindow: T_WINUF 0: pcb<-stack: 0x%x\n",
@ -540,15 +555,21 @@ badtrap:
if (pcb->pcb_nsaved) if (pcb->pcb_nsaved)
panic("trap T_WINUF"); panic("trap T_WINUF");
pcb->pcb_nsaved = -1; /* mark success */ pcb->pcb_nsaved = -1; /* mark success */
KERNEL_PROC_UNLOCK(p);
break; break;
case T_ALIGN: case T_ALIGN:
if ((p->p_md.md_flags & MDP_FIXALIGN) != 0 && if ((p->p_md.md_flags & MDP_FIXALIGN) != 0) {
fixalign(p, tf) == 0) { KERNEL_PROC_LOCK(p);
n = fixalign(p, tf);
KERNEL_PROC_UNLOCK(p);
if (n == 0) {
ADVANCE; ADVANCE;
break; break;
} }
trapsignal(p, SIGBUS, 0); /* XXX code?? */ }
sig = SIGBUS;
/* XXX - ucode? */
break; break;
case T_FPE: case T_FPE:
@ -560,12 +581,14 @@ badtrap:
* will not match once fpu_cleanup does its job, so * will not match once fpu_cleanup does its job, so
* we must not save again later.) * we must not save again later.)
*/ */
KERNEL_PROC_LOCK(p);
if (p != cpuinfo.fpproc) if (p != cpuinfo.fpproc)
panic("fpe without being the FP user"); panic("fpe without being the FP user");
savefpstate(p->p_md.md_fpstate); savefpstate(p->p_md.md_fpstate);
cpuinfo.fpproc = NULL; cpuinfo.fpproc = NULL;
/* tf->tf_psr &= ~PSR_EF; */ /* share_fpu will do this */ /* tf->tf_psr &= ~PSR_EF; */ /* share_fpu will do this */
fpu_cleanup(p, p->p_md.md_fpstate); fpu_cleanup(p, p->p_md.md_fpstate);
KERNEL_PROC_UNLOCK(p);
/* fpu_cleanup posts signals if needed */ /* fpu_cleanup posts signals if needed */
#if 0 /* ??? really never??? */ #if 0 /* ??? really never??? */
ADVANCE; ADVANCE;
@ -573,29 +596,34 @@ badtrap:
break; break;
case T_TAGOF: case T_TAGOF:
trapsignal(p, SIGEMT, 0); /* XXX code?? */ sig = SIGEMT;
/* XXX - ucode? */
break; break;
case T_CPDISABLED: case T_CPDISABLED:
uprintf("coprocessor instruction\n"); /* XXX */ uprintf("coprocessor instruction\n"); /* XXX */
trapsignal(p, SIGILL, 0); /* XXX code?? */ sig = SIGILL;
/* XXX - ucode? */
break; break;
case T_BREAKPOINT: case T_BREAKPOINT:
trapsignal(p, SIGTRAP, 0); sig = SIGTRAP;
break; break;
case T_DIV0: case T_DIV0:
case T_IDIV0: case T_IDIV0:
ADVANCE; ADVANCE;
trapsignal(p, SIGFPE, FPE_INTDIV_TRAP); sig = SIGFPE;
ucode = FPE_INTDIV_TRAP;
break; break;
case T_FLUSHWIN: case T_FLUSHWIN:
write_user_windows(); write_user_windows();
#ifdef probably_slower_since_this_is_usually_false #ifdef probably_slower_since_this_is_usually_false
KERNEL_PROC_LOCK(p);
if (pcb->pcb_nsaved && rwindow_save(p)) if (pcb->pcb_nsaved && rwindow_save(p))
sigexit(p, SIGILL); sigexit(p, SIGILL);
KERNEL_PROC_UNLOCK(p);
#endif #endif
ADVANCE; ADVANCE;
break; break;
@ -608,7 +636,8 @@ badtrap:
case T_RANGECHECK: case T_RANGECHECK:
uprintf("T_RANGECHECK\n"); /* XXX */ uprintf("T_RANGECHECK\n"); /* XXX */
ADVANCE; ADVANCE;
trapsignal(p, SIGILL, 0); /* XXX code?? */ sig = SIGILL;
/* XXX - ucode? */
break; break;
case T_FIXALIGN: case T_FIXALIGN:
@ -623,9 +652,15 @@ badtrap:
case T_INTOF: case T_INTOF:
uprintf("T_INTOF\n"); /* XXX */ uprintf("T_INTOF\n"); /* XXX */
ADVANCE; ADVANCE;
trapsignal(p, SIGFPE, FPE_INTOVF_TRAP); sig = SIGFPE;
ucode = FPE_INTOVF_TRAP;
break; break;
} }
if (sig != 0) {
KERNEL_PROC_LOCK(p);
trapsignal(p, sig, ucode);
KERNEL_PROC_UNLOCK(p);
}
userret(p, pc, sticks); userret(p, pc, sticks);
share_fpu(p, tf); share_fpu(p, tf);
#undef ADVANCE #undef ADVANCE
@ -725,6 +760,9 @@ mem_access_fault(type, ser, v, pc, psr, tf)
p = &proc0; p = &proc0;
sticks = p->p_sticks; sticks = p->p_sticks;
if ((psr & PSR_PS) == 0) {
KERNEL_PROC_LOCK(p);
#ifdef FPU_DEBUG #ifdef FPU_DEBUG
if ((tf->tf_psr & PSR_EF) != 0) { if ((tf->tf_psr & PSR_EF) != 0) {
if (cpuinfo.fpproc != p) if (cpuinfo.fpproc != p)
@ -866,6 +904,7 @@ kfault:
} }
out: out:
if ((psr & PSR_PS) == 0) { if ((psr & PSR_PS) == 0) {
KERNEL_PROC_UNLOCK(p);
userret(p, pc, sticks); userret(p, pc, sticks);
share_fpu(p, tf); share_fpu(p, tf);
} }
@ -873,13 +912,8 @@ out:
} }
#if defined(SUN4M) /* 4m version of mem_access_fault() follows */ #if defined(SUN4M) /* 4m version of mem_access_fault() follows */
static int tfaultaddr = (int) 0xdeadbeef; static int tfaultaddr = (int) 0xdeadbeef;
#ifdef DEBUG
int dfdebug = 0;
#endif
void void
mem_access_fault4m(type, sfsr, sfva, tf) mem_access_fault4m(type, sfsr, sfva, tf)
unsigned type; unsigned type;
@ -897,7 +931,8 @@ mem_access_fault4m(type, sfsr, sfva, tf)
u_quad_t sticks; u_quad_t sticks;
char bits[64]; char bits[64];
uvmexp.traps++; uvmexp.traps++; /* XXXSMP */
if ((p = curproc) == NULL) /* safety check */ if ((p = curproc) == NULL) /* safety check */
p = &proc0; p = &proc0;
sticks = p->p_sticks; sticks = p->p_sticks;
@ -917,6 +952,9 @@ mem_access_fault4m(type, sfsr, sfva, tf)
pc = tf->tf_pc; /* These are needed below */ pc = tf->tf_pc; /* These are needed below */
psr = tf->tf_psr; psr = tf->tf_psr;
if ((psr & PSR_PS) == 0)
KERNEL_PROC_LOCK(p);
/* /*
* Our first priority is handling serious faults, such as * Our first priority is handling serious faults, such as
* parity errors or async faults that might have come through here. * parity errors or async faults that might have come through here.
@ -934,11 +972,15 @@ mem_access_fault4m(type, sfsr, sfva, tf)
*/ */
if (type == T_STOREBUFFAULT || if (type == T_STOREBUFFAULT ||
(type == T_DATAFAULT && (sfsr & SFSR_FAV) == 0)) { (type == T_DATAFAULT && (sfsr & SFSR_FAV) == 0)) {
if ((psr & PSR_PS) == 0)
KERNEL_PROC_UNLOCK(p);
(*cpuinfo.memerr)(type, sfsr, sfva, tf); (*cpuinfo.memerr)(type, sfsr, sfva, tf);
/* /*
* If we get here, exit the trap handler and wait for the * If we get here, exit the trap handler and wait for the
* trap to re-occur. * trap to re-occur.
*/ */
if ((psr & PSR_PS) == 0)
KERNEL_PROC_LOCK(p);
goto out; goto out;
} }
@ -982,10 +1024,13 @@ mem_access_fault4m(type, sfsr, sfva, tf)
} }
if ((sfsr & SFSR_FT) == SFSR_FT_TRANSERR) { if ((sfsr & SFSR_FT) == SFSR_FT_TRANSERR) {
/* Translation errors are always fatal, as they indicate /*
* Translation errors are always fatal, as they indicate
* a corrupt translation (page) table hierarchy. * a corrupt translation (page) table hierarchy.
*/ */
rv = KERN_PROTECTION_FAILURE; rv = KERN_PROTECTION_FAILURE;
/* XXXSMP - why bother with this anyway? */
if (tfaultaddr == sfva) /* Prevent infinite loops w/a static */ if (tfaultaddr == sfva) /* Prevent infinite loops w/a static */
goto fault; goto fault;
tfaultaddr = sfva; tfaultaddr = sfva;
@ -993,6 +1038,10 @@ mem_access_fault4m(type, sfsr, sfva, tf)
SRMMU_TETYPE) != SRMMU_TEPTE) SRMMU_TETYPE) != SRMMU_TEPTE)
goto fault; /* Translation bad */ goto fault; /* Translation bad */
lda(SRMMU_SFSR, ASI_SRMMU); lda(SRMMU_SFSR, ASI_SRMMU);
#ifdef DEBUG
printf("mem_access_fault4m: SFSR_FT_TRANSERR: "
"pid %d, va 0x%x: retrying\n", p->p_pid, sfva);
#endif
goto out; /* Translation OK, retry operation */ goto out; /* Translation OK, retry operation */
} }
@ -1111,6 +1160,7 @@ kfault:
} }
out: out:
if ((psr & PSR_PS) == 0) { if ((psr & PSR_PS) == 0) {
KERNEL_PROC_UNLOCK(p);
userret(p, pc, sticks); userret(p, pc, sticks);
share_fpu(p, tf); share_fpu(p, tf);
} }
@ -1141,8 +1191,11 @@ syscall(code, tf, pc)
register_t rval[2]; register_t rval[2];
u_quad_t sticks; u_quad_t sticks;
uvmexp.syscalls++; uvmexp.syscalls++; /* XXXSMP */
p = curproc; p = curproc;
KERNEL_PROC_LOCK(p);
#ifdef DIAGNOSTIC #ifdef DIAGNOSTIC
if (tf->tf_psr & PSR_PS) if (tf->tf_psr & PSR_PS)
panic("syscall"); panic("syscall");
@ -1268,10 +1321,14 @@ syscall(code, tf, pc)
break; break;
} }
KERNEL_PROC_UNLOCK(p);
userret(p, pc, sticks); userret(p, pc, sticks);
#ifdef KTRACE #ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET)) if (KTRPOINT(p, KTR_SYSRET)) {
KERNEL_PROC_LOCK(p);
ktrsysret(p, code, error, rval[0]); ktrsysret(p, code, error, rval[0]);
KERNEL_PROC_UNLOCK(p);
}
#endif #endif
share_fpu(p, tf); share_fpu(p, tf);
} }
@ -1288,10 +1345,14 @@ child_return(arg)
/* /*
* Return values in the frame set by cpu_fork(). * Return values in the frame set by cpu_fork().
*/ */
KERNEL_PROC_UNLOCK(p);
userret(p, p->p_md.md_tf->tf_pc, 0); userret(p, p->p_md.md_tf->tf_pc, 0);
#ifdef KTRACE #ifdef KTRACE
if (KTRPOINT(p, KTR_SYSRET)) if (KTRPOINT(p, KTR_SYSRET)) {
KERNEL_PROC_LOCK(p);
ktrsysret(p, ktrsysret(p,
(p->p_flag & P_PPWAIT) ? SYS_vfork : SYS_fork, 0, 0); (p->p_flag & P_PPWAIT) ? SYS_vfork : SYS_fork, 0, 0);
KERNEL_PROC_UNLOCK(p);
}
#endif #endif
} }