Switch prep over to evcnt(9). While I was here, I also switched it to

use struct cpu_info for the cpu stuff.  Tested on 7248 and 7043.
This commit is contained in:
garbled 2006-05-08 17:08:34 +00:00
parent 3ce69693c2
commit be39d7253e
7 changed files with 217 additions and 221 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: intr.h,v 1.24 2006/05/03 17:47:06 garbled Exp $ */
/* $NetBSD: intr.h,v 1.25 2006/05/08 17:08:34 garbled Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -62,6 +62,7 @@
#ifndef _LOCORE
#include <powerpc/softintr.h>
#include <machine/cpu.h>
/*
* Interrupt handler chains. intr_establish() inserts a handler into
@ -76,6 +77,21 @@ struct intrhand {
int ih_irq;
};
#include <sys/device.h>
struct intrsource {
int is_type;
int is_level;
int is_hwirq;
int is_mask;
struct intrhand *is_hand;
struct evcnt is_ev;
char is_source[16];
};
int splraise(int);
int spllower(int);
void softintr(int);
void do_pending_int(void);
void init_intr(void);
@ -95,61 +111,14 @@ void isa_intr_mask(int);
void isa_intr_clr(int);
void isa_setirqstat(int, int, int);
static __inline int splraise(int);
static __inline void spllower(int);
static __inline void set_sint(int);
extern volatile int cpl, ipending, astpending, tickspending;
extern int imen;
extern int imask[];
extern long intrcnt[];
extern unsigned intrcnt2[];
extern struct intrhand *intrhand[];
extern int intrtype[];
extern vaddr_t prep_intr_reg;
/*
* Reorder protection in the following inline functions is
* achieved with the "eieio" instruction which the assembler
* seems to detect and then doesn't move instructions past....
*/
static __inline int
splraise(int newcpl)
{
int oldcpl;
__asm volatile("sync; eieio\n"); /* don't reorder.... */
oldcpl = cpl;
cpl = oldcpl | newcpl;
__asm volatile("sync; eieio\n"); /* reorder protect */
return(oldcpl);
}
static __inline void
spllower(int newcpl)
{
__asm volatile("sync; eieio\n"); /* reorder protect */
cpl = newcpl;
if(ipending & ~newcpl)
do_pending_int();
__asm volatile("sync; eieio\n"); /* reorder protect */
}
/* Following code should be implemented with lwarx/stwcx to avoid
* the disable/enable. i need to read the manual once more.... */
static __inline void
set_sint(int pending)
{
int msrsave;
__asm ("mfmsr %0" : "=r"(msrsave));
__asm volatile ("mtmsr %0" :: "r"(msrsave & ~PSL_EE));
ipending |= pending;
__asm volatile ("mtmsr %0" :: "r"(msrsave));
}
#define ICU_LEN 32
extern struct intrsource intrsources[ICU_LEN];
#define IRQ_SLAVE 2
#define LEGAL_IRQ(x) ((x) >= 0 && (x) < ICU_LEN && (x) != IRQ_SLAVE)
#define I8259_INTR_NUM 16
@ -185,9 +154,9 @@ set_sint(int pending)
#define spllpt() spltty()
#define setsoftclock() set_sint(SINT_CLOCK);
#define setsoftnet() set_sint(SINT_NET);
#define setsoftserial() set_sint(SINT_SERIAL);
#define setsoftclock() softintr(SINT_CLOCK);
#define setsoftnet() softintr(SINT_NET);
#define setsoftserial() softintr(SINT_SERIAL);
#define splhigh() splraise(imask[IPL_HIGH])
#define splsched() splhigh()

View File

@ -1,4 +1,4 @@
/* $NetBSD: isa_machdep.c,v 1.9 2005/12/11 12:18:47 christos Exp $ */
/* $NetBSD: isa_machdep.c,v 1.10 2006/05/08 17:08:34 garbled Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: isa_machdep.c,v 1.9 2005/12/11 12:18:47 christos Exp $");
__KERNEL_RCSID(0, "$NetBSD: isa_machdep.c,v 1.10 2006/05/08 17:08:34 garbled Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -177,17 +177,17 @@ void
init_icu(int lvlmask)
{
int i;
extern int intrtype[];
struct intrsource *is;
for (i= 0; i < ICU_LEN; i++) {
for (i = 0, is = intrsources; i < ICU_LEN; i++, is++) {
switch (i) {
case 0:
case 2:
case 8:
intrtype[i] = IST_EDGE;
is->is_type = IST_EDGE;
break;
default:
intrtype[i] = (1 << i) & lvlmask ? IST_LEVEL : IST_NONE;
is->is_type = (1 << i) & lvlmask ? IST_LEVEL : IST_NONE;
}
}
@ -239,18 +239,21 @@ isa_intr_alloc(isa_chipset_tag_t c, int mask, int type, int *irq_p)
int irq;
int maybe_irq = -1;
int shared_depth = 0;
struct intrsource *is;
mask &= 0x8b28; /* choose from 3, 5, 8, 9, 11, 15 XXX */
for (irq = 0; mask != 0; mask >>= 1, irq++) {
for (irq = 0, is = intrsources; mask != 0; mask >>= 1, irq++, is++) {
if ((mask & 1) == 0)
continue;
if (intrtype[irq] == IST_NONE) {
if (is->is_type == IST_NONE) {
*irq_p = irq;
return 0;
}
/* Level interrupts can be shared */
if (type == IST_LEVEL && intrtype[irq] == IST_LEVEL) {
struct intrhand *ih = intrhand[irq];
if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
struct intrhand *ih = is->is_hand;
int depth;
if (maybe_irq == -1) {
maybe_irq = irq;
continue;

View File

@ -1,4 +1,4 @@
/* $NetBSD: clock.c,v 1.13 2005/12/24 22:45:36 perry Exp $ */
/* $NetBSD: clock.c,v 1.14 2006/05/08 17:08:34 garbled Exp $ */
/* $OpenBSD: clock.c,v 1.3 1997/10/13 13:42:53 pefo Exp $ */
/*
@ -33,7 +33,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: clock.c,v 1.13 2005/12/24 22:45:36 perry Exp $");
__KERNEL_RCSID(0, "$NetBSD: clock.c,v 1.14 2006/05/08 17:08:34 garbled Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@ -53,7 +53,6 @@ void decr_intr __P((struct clockframe *));
u_long ticks_per_sec;
u_long ns_per_tick;
static long ticks_per_intr;
static volatile u_long lasttb;
struct device *clockdev;
const struct clockfns *clockfns;
@ -78,13 +77,14 @@ todr_attach(handle)
void
cpu_initclocks()
{
struct cpu_info * const ci = curcpu();
ticks_per_intr = ticks_per_sec / hz;
cpu_timebase = ticks_per_sec;
if ((mfpvr() >> 16) == MPC601)
__asm volatile ("mfspr %0,%1" : "=r"(lasttb) : "n"(SPR_RTCL_R));
__asm volatile ("mfspr %0,%1" : "=r"(ci->ci_lasttb) : "n"(SPR_RTCL_R));
else
__asm volatile ("mftb %0" : "=r"(lasttb));
__asm volatile ("mftb %0" : "=r"(ci->ci_lasttb));
__asm volatile ("mtdec %0" :: "r"(ticks_per_intr));
}
@ -170,12 +170,12 @@ void
decr_intr(frame)
struct clockframe *frame;
{
struct cpu_info * const ci = curcpu();
int msr;
int pri;
u_long tb;
long ticks;
int nticks;
extern long intrcnt[];
/*
* Check whether we are initialized.
@ -193,14 +193,14 @@ decr_intr(frame)
__asm volatile ("mtdec %0" :: "r"(ticks));
uvmexp.intrs++;
intrcnt[CNT_CLOCK]++;
ci->ci_ev_clock.ev_count++;
pri = splclock();
if (pri & SPL_CLOCK) {
tickspending += nticks;
ci->ci_tickspending += nticks;
} else {
nticks += tickspending;
tickspending = 0;
nticks += ci->ci_tickspending;
ci->ci_tickspending = 0;
/*
* lasttb is used during microtime. Set it to the virtual
@ -211,7 +211,7 @@ decr_intr(frame)
} else {
__asm volatile ("mftb %0" : "=r"(tb));
}
lasttb = tb + ticks - ticks_per_intr;
ci->ci_lasttb = tb + ticks - ticks_per_intr;
/*
* Reenable interrupts
@ -249,7 +249,7 @@ microtime(tvp)
__asm volatile ("mfspr %0,%1" : "=r"(tb) : "n"(SPR_RTCL_R));
else
__asm volatile ("mftb %0" : "=r"(tb));
ticks = (tb - lasttb) * ns_per_tick;
ticks = (tb - curcpu()->ci_lasttb) * ns_per_tick;
*tvp = time;
__asm volatile ("mtmsr %0" :: "r"(msr));
ticks /= 1000;

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.12 2006/03/09 20:17:28 garbled Exp $ */
/* $NetBSD: cpu.c,v 1.13 2006/05/08 17:08:34 garbled Exp $ */
/*-
* Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.12 2006/03/09 20:17:28 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.13 2006/05/08 17:08:34 garbled Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -71,7 +71,11 @@ cpumatch(struct device *parent, struct cfdata *cfdata, void *aux)
void
cpuattach(struct device *parent, struct device *self, void *aux)
{
cpu_attach_common(self, 0);
struct cpu_info *ci;
ci = cpu_attach_common(self, 0);
if (ci == NULL)
return;
cpu_setup_prep_generic(self);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: extintr.c,v 1.21 2006/05/03 17:47:06 garbled Exp $ */
/* $NetBSD: extintr.c,v 1.22 2006/05/08 17:08:34 garbled Exp $ */
/* $OpenBSD: isabus.c,v 1.12 1999/06/15 02:40:05 rahnds Exp $ */
/*-
@ -119,7 +119,7 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: extintr.c,v 1.21 2006/05/03 17:47:06 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: extintr.c,v 1.22 2006/05/08 17:08:34 garbled Exp $");
#include "opt_openpic.h"
#include "pci.h"
@ -158,11 +158,9 @@ static void ext_intr_openpic(void);
static void install_extint(void (*)(void));
int imen = 0xffffffff;
volatile int cpl, ipending, tickspending;
int imask[NIPL];
int intrtype[ICU_LEN], intrmask[ICU_LEN], intrlevel[ICU_LEN];
struct intrhand *intrhand[ICU_LEN];
unsigned intrcnt2[ICU_LEN];
struct intrsource intrsources[ICU_LEN];
static int
fakeintr(void *arg)
@ -171,108 +169,68 @@ fakeintr(void *arg)
return 0;
}
#if 0
static void ext_intr(void);
/*
* Process an interrupt from the ISA bus.
* When we get here remember we have "delayed" ipl mask
* settings from the spl<foo>() calls. Yes it's faster
* to do it like this because SPL's are done so frequently
* and interrupts are likely to *NOT* happen most of the
* times the spl level is changed.
*/
static void
ext_intr(void)
{
u_int8_t irq;
int r_imen;
int pcpl;
struct intrhand *ih;
/* what about enabling external interrupt in here? */
pcpl = splhigh(); /* Turn off all */
irq = isa_intr();
intrcnt2[irq]++;
r_imen = 1 << irq;
if ((pcpl & r_imen) != 0) {
ipending |= r_imen; /* Masked! Mark this as pending */
imen |= r_imen;
isa_intr_mask(imen);
} else {
ih = intrhand[irq];
if (ih == NULL)
printf("spurious interrupt %d\n", irq);
while (ih) {
(*ih->ih_fun)(ih->ih_arg);
ih = ih->ih_next;
}
isa_intr_clr(irq);
uvmexp.intrs++;
intrcnt[irq]++;
}
splx(pcpl); /* Process pendings. */
}
#endif
/*
* Same as the above, but using the board's interrupt vector register.
* ext_interrupts using the board's interrupt vector register.
*/
static void
ext_intr_ivr(void)
{
u_int8_t irq;
int r_imen;
int pcpl;
int r_imen, pcpl, msr;
struct cpu_info *ci = curcpu();
struct intrhand *ih;
struct intrsource *is;
/* what about enabling external interrupt in here? */
pcpl = splhigh(); /* Turn off all */
pcpl = ci->ci_cpl;
msr = mfmsr();
irq = *((u_char *)prep_intr_reg + INTR_VECTOR_REG);
intrcnt2[irq]++;
is = &intrsources[irq];
r_imen = 1 << irq;
if ((pcpl & r_imen) != 0) {
ipending |= r_imen; /* Masked! Mark this as pending */
ci->ci_ipending |= r_imen; /* Masked! Mark this as pending */
imen |= r_imen;
isa_intr_mask(imen);
} else {
ih = intrhand[irq];
splraise(is->is_mask);
mtmsr(msr | PSL_EE);
KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
ih = is->is_hand;
if (ih == NULL)
printf("spurious interrupt %d\n", irq);
while (ih) {
(*ih->ih_fun)(ih->ih_arg);
ih = ih->ih_next;
}
KERNEL_UNLOCK();
mtmsr(msr);
ci->ci_cpl = pcpl;
isa_intr_clr(irq);
uvmexp.intrs++;
intrcnt[irq]++;
is->is_ev.ev_count++;
}
mtmsr(msr | PSL_EE);
splx(pcpl); /* Process pendings. */
mtmsr(msr);
}
#if defined(OPENPIC)
static void
ext_intr_openpic(void)
{
struct cpu_info *ci = curcpu();
struct intrhand *ih;
int r_imen;
int pcpl;
struct intrsource *is;
int r_imen, pcpl, msr;
u_int realirq;
u_int8_t irq;
/* what about enabling external interrupt in here? */
pcpl = splhigh(); /* Turn off all */
msr = mfmsr();
pcpl = ci->ci_cpl;
realirq = openpic_read_irq(0);
while (realirq < OPENPIC_INTR_NUM) {
@ -281,39 +239,46 @@ ext_intr_openpic(void)
else
irq = realirq + I8259_INTR_NUM;
intrcnt2[irq]++;
is = &intrsources[irq];
r_imen = 1 << irq;
if ((pcpl & r_imen) != 0) {
ipending |= r_imen;
ci->ci_ipending |= r_imen;
imen |= r_imen;
if (realirq == 0)
isa_intr_mask(imen);
else
openpic_disable_irq(realirq);
} else {
ih = intrhand[irq];
splraise(is->is_mask);
mtmsr(msr | PSL_EE);
KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
ih = is->is_hand;
if (ih == NULL)
printf("spurious interrupt %d\n", irq);
while (ih) {
(*ih->ih_fun)(ih->ih_arg);
ih = ih->ih_next;
}
KERNEL_UNLOCK();
mtmsr(msr);
ci->ci_cpl = pcpl;
if (realirq == 0)
isa_intr_clr(irq);
uvmexp.intrs++;
intrcnt[irq]++;
is->is_ev.ev_count++;
}
openpic_eoi(0);
realirq = openpic_read_irq(0);
}
mtmsr(msr | PSL_EE);
splx(pcpl); /* Process pendings. */
mtmsr(msr);
}
#endif /* OPENPIC */
@ -321,6 +286,7 @@ void *
intr_establish(int irq, int type, int level, int (*ih_fun)(void *), void *ih_arg)
{
struct intrhand **p, *q, *ih;
struct intrsource *is;
static struct intrhand fakehand = {fakeintr};
/* no point in sleeping unless someone can free memory. */
@ -331,28 +297,42 @@ intr_establish(int irq, int type, int level, int (*ih_fun)(void *), void *ih_arg
if (!LEGAL_IRQ(irq) || type == IST_NONE)
panic("intr_establish: bogus irq or type");
switch (intrtype[irq]) {
is = &intrsources[irq];
is->is_hwirq = irq;
switch (is->is_type) {
case IST_NONE:
intrtype[irq] = type;
is->is_type = type;
break;
case IST_LEVEL:
case IST_EDGE:
if (type == intrtype[irq])
if (type == is->is_type)
break;
case IST_PULSE:
if (type != IST_NONE)
panic("intr_establish: can't share %s with %s irq %d",
isa_intr_typename(intrtype[irq]),
isa_intr_typename(is->is_type),
isa_intr_typename(type), irq);
break;
}
if (is->is_hand == NULL) {
snprintf(is->is_source, sizeof(is->is_source), "irq %d",
is->is_hwirq);
if (irq >= I8259_INTR_NUM)
evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
"openpic", is->is_source);
else
evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
"8259", is->is_source);
}
/*
* Figure out where to put the handler.
* This is O(N^2), but we want to preserve the order, and N is
* generally small.
*/
for (p = &intrhand[irq]; (q = *p) != NULL; p = &q->ih_next)
for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next)
continue;
/*
@ -387,6 +367,7 @@ intr_disestablish(void *arg)
{
struct intrhand *ih = arg;
int irq = ih->ih_irq;
struct intrsource *is = &intrsources[irq];
struct intrhand **p, *q;
if (!LEGAL_IRQ(irq))
@ -396,7 +377,7 @@ intr_disestablish(void *arg)
* Remove the handler from the chain.
* This is O(n^2), too.
*/
for (p = &intrhand[irq]; (q = *p) != NULL && q != ih; p = &q->ih_next)
for (p = &is->is_hand; (q = *p) != NULL && q != ih; p = &q->ih_next)
continue;
if (q == NULL)
panic("intr_disestablish: handler not registered");
@ -407,8 +388,10 @@ intr_disestablish(void *arg)
intr_calculatemasks();
if (intrhand[irq] == NULL)
intrtype[irq] = IST_NONE;
if (is->is_hand == NULL) {
is->is_type = IST_NONE;
evcnt_detach(&is->is_ev);
}
}
/*
@ -421,21 +404,22 @@ static void
intr_calculatemasks(void)
{
int irq, level;
struct intrsource *is;
struct intrhand *q;
/* First, figure out which levels each IRQ uses. */
for (irq = 0; irq < ICU_LEN; irq++) {
for (irq = 0, is = intrsources; irq < ICU_LEN; irq++, is++) {
register int levels = 0;
for (q = intrhand[irq]; q; q = q->ih_next)
for (q = is->is_hand; q; q = q->ih_next)
levels |= 1 << q->ih_level;
intrlevel[irq] = levels;
is->is_level = levels;
}
/* Then figure out which IRQs use each level. */
for (level = 0; level < NIPL; level++) {
register int irqs = 0;
for (irq = 0; irq < ICU_LEN; irq++)
if (intrlevel[irq] & (1 << level))
for (irq = 0, is = intrsources; irq < ICU_LEN; irq++, is++)
if (is->is_level & (1 << level))
irqs |= 1 << irq;
imask[level] = irqs;
}
@ -491,17 +475,18 @@ intr_calculatemasks(void)
imask[IPL_SERIAL] |= imask[IPL_HIGH];
/* And eventually calculate the complete masks. */
for (irq = 0; irq < ICU_LEN; irq++) {
for (irq = 0, is = intrsources; irq < ICU_LEN; irq++, is++) {
register int irqs = 1 << irq;
for (q = intrhand[irq]; q; q = q->ih_next)
for (q = is->is_hand; q; q = q->ih_next)
irqs |= imask[q->ih_level];
intrmask[irq] = irqs;
is->is_mask = irqs;
}
{
register int irqs = 0;
for (irq = 0; irq < I8259_INTR_NUM; irq++)
if (intrhand[irq])
for (irq = 0, is = intrsources; irq < I8259_INTR_NUM;
irq++, is++)
if (is->is_hand)
irqs |= 1 << irq;
if (irqs >= 0x100) /* any IRQs >= 8 in use */
irqs |= 1 << IRQ_SLAVE;
@ -511,10 +496,11 @@ intr_calculatemasks(void)
#if defined(OPENPIC)
if (openpic_base) {
openpic_enable_irq(0, IST_LEVEL);
for (irq = I8259_INTR_NUM + 1; irq < ICU_LEN; irq++) {
if (intrhand[irq]) {
for (irq = I8259_INTR_NUM + 1, is = &intrsources[irq];
irq < ICU_LEN; irq++, is++) {
if (is->is_hand) {
openpic_enable_irq(irq - I8259_INTR_NUM,
intrtype[irq]);
is->is_type);
} else {
openpic_disable_irq(irq);
}
@ -527,31 +513,30 @@ intr_calculatemasks(void)
void
do_pending_int(void)
{
struct cpu_info * const ci = curcpu();
struct intrhand *ih;
int irq;
int pcpl;
int hwpend;
int emsr, dmsr;
static int processing;
struct intrsource *is;
int irq, pcpl, hwpend, emsr, dmsr;
if (processing)
if (ci->ci_iactive)
return;
processing = 1;
ci->ci_iactive = 1;
__asm volatile("mfmsr %0" : "=r"(emsr));
dmsr = emsr & ~PSL_EE;
__asm volatile("mtmsr %0" :: "r"(dmsr));
pcpl = splhigh(); /* Turn off all */
pcpl = ci->ci_cpl; /* Turn off all */
again:
hwpend = ipending & ~pcpl; /* Do now unmasked pendings */
hwpend = ci->ci_ipending & ~pcpl; /* Do now unmasked pendings */
imen &= ~hwpend;
hwpend &= ~SINT_MASK;
while (hwpend) {
irq = ffs(hwpend) - 1;
is = &intrsources[irq];
hwpend &= ~(1L << irq);
ih = intrhand[irq];
ih = is->is_hand;
while (ih) {
(*ih->ih_fun)(ih->ih_arg);
ih = ih->ih_next;
@ -559,52 +544,56 @@ again:
#if defined(OPENPIC)
if (irq >= I8259_INTR_NUM)
openpic_enable_irq(irq - I8259_INTR_NUM, intrtype[irq]);
openpic_enable_irq(irq - I8259_INTR_NUM, is->is_type);
else
#endif /* OPENPIC */
isa_intr_clr(irq);
uvmexp.intrs++;
intrcnt[irq]++;
is->is_ev.ev_count++;
}
if ((ipending & ~pcpl) & SINT_CLOCK) {
ipending &= ~SINT_CLOCK;
if ((ci->ci_ipending & ~pcpl) & SINT_CLOCK) {
ci->ci_ipending &= ~SINT_CLOCK;
splsoftclock();
mtmsr(emsr);
KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
softintr__run(IPL_SOFTCLOCK);
KERNEL_UNLOCK();
mtmsr(dmsr);
ci->ci_cpl = pcpl;
ci->ci_ev_softclock.ev_count++;
goto again;
}
if ((ipending & ~pcpl) & SINT_NET) {
ipending &= ~SINT_NET;
if ((ci->ci_ipending & ~pcpl) & SINT_NET) {
ci->ci_ipending &= ~SINT_NET;
splsoftnet();
mtmsr(emsr);
KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
softintr__run(IPL_SOFTNET);
KERNEL_UNLOCK();
mtmsr(dmsr);
ci->ci_cpl = pcpl;
ci->ci_ev_softnet.ev_count++;
goto again;
}
if ((ipending & ~pcpl) & SINT_SERIAL) {
ipending &= ~SINT_SERIAL;
if ((ci->ci_ipending & ~pcpl) & SINT_SERIAL) {
ci->ci_ipending &= ~SINT_SERIAL;
splsoftserial();
mtmsr(emsr);
KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
softintr__run(IPL_SOFTSERIAL);
KERNEL_UNLOCK();
mtmsr(dmsr);
ci->ci_cpl = pcpl;
ci->ci_ev_softserial.ev_count++;
goto again;
}
ipending &= pcpl;
cpl = pcpl; /* Don't use splx... we are here already! */
ci->ci_ipending &= pcpl;
ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
isa_intr_mask(imen);
__asm volatile("mtmsr %0" :: "r"(emsr));
processing = 0;
ci->ci_iactive = 0;
mtmsr(emsr);
}
static void
@ -719,3 +708,50 @@ init_intr(void)
#endif
install_extint(ext_intr_ivr);
}
/*
* Reorder protection in the following inline functions is
* achieved with the "eieio" instruction which the assembler
* seems to detect and then doesn't move instructions past....
*/
int
splraise(int newcpl)
{
struct cpu_info *ci = curcpu();
int oldcpl;
__asm volatile("sync; eieio\n"); /* don't reorder.... */
oldcpl = ci->ci_cpl;
ci->ci_cpl = oldcpl | newcpl;
__asm volatile("sync; eieio\n"); /* reorder protect */
return(oldcpl);
}
int
spllower(int newcpl)
{
struct cpu_info *ci = curcpu();
int ocpl;
__asm volatile("sync; eieio\n"); /* reorder protect */
ocpl = ci->ci_cpl;
ci->ci_cpl = newcpl;
if(ci->ci_ipending & ~newcpl)
do_pending_int();
__asm volatile("sync; eieio\n"); /* reorder protect */
return ocpl;
}
/* Following code should be implemented with lwarx/stwcx to avoid
* the disable/enable. i need to read the manual once more.... */
void
softintr(int pending)
{
int msrsave;
msrsave = mfmsr();
mtmsr(msrsave & ~PSL_EE);
curcpu()->ci_ipending |= pending;
mtmsr(msrsave);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.S,v 1.8 2005/12/11 12:18:48 christos Exp $ */
/* $NetBSD: locore.S,v 1.9 2006/05/08 17:08:34 garbled Exp $ */
/* $OpenBSD: locore.S,v 1.4 1997/01/26 09:06:38 rahnds Exp $ */
/*
@ -80,22 +80,6 @@ GLOBAL(endsym)
GLOBAL(proc0paddr)
.long 0 /* proc0 p_addr */
GLOBAL(intrnames)
.asciz "clock", "irq1", "irq2", "irq3"
.asciz "irq4", "irq5", "irq6", "irq7"
.asciz "irq8", "irq9", "irq10", "irq11"
.asciz "irq12", "irq13", "irq14", "irq15"
.asciz "irq16", "irq17", "irq18", "irq19"
.asciz "irq20", "irq21", "irq22", "irq23"
.asciz "irq24", "irq25", "irq26", "irq27"
.asciz "irq28", "softnet", "softclock", "softserial"
GLOBAL(eintrnames)
.align 4
GLOBAL(intrcnt)
.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
GLOBAL(eintrcnt)
/*
* This symbol is here for the benefit of kvm_mkdb, and is supposed to
* mark the start of kernel text.

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.59 2006/05/03 17:47:06 garbled Exp $ */
/* $NetBSD: machdep.c,v 1.60 2006/05/08 17:08:34 garbled Exp $ */
/*
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.59 2006/05/03 17:47:06 garbled Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.60 2006/05/08 17:08:34 garbled Exp $");
#include "opt_compat_netbsd.h"
#include "opt_ddb.h"
@ -384,15 +384,15 @@ halt_sys:
* splx() differing in that it returns the previous priority level.
*/
int
lcsplx(ipl)
int ipl;
lcsplx(int ipl)
{
int oldcpl;
struct cpu_info *ci = curcpu();
__asm volatile("sync; eieio\n"); /* reorder protect */
oldcpl = cpl;
cpl = ipl;
if (ipending & ~ipl)
oldcpl = ci->ci_cpl;
ci->ci_cpl = ipl;
if (ci->ci_ipending & ~ipl)
do_pending_int();
__asm volatile("sync; eieio\n"); /* reorder protect */