Arrange to inline hardsplx() into raisespl() and lowerspl(). This should

make them slightly faster, and makes it easier to see how much of the time
apparently spent in hardsplx() actually comes from deferred statclock
interrupts.
This commit is contained in:
bjh21 2001-05-01 22:19:09 +00:00
parent 5652bfe409
commit 4928854ae3
1 changed files with 22 additions and 20 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: irq.c,v 1.17 2001/04/21 18:51:17 bjh21 Exp $ */
/* $NetBSD: irq.c,v 1.18 2001/05/01 22:19:09 bjh21 Exp $ */
/*-
* Copyright (c) 2000, 2001 Ben Harris
@ -33,7 +33,7 @@
#include <sys/param.h>
__RCSID("$NetBSD: irq.c,v 1.17 2001/04/21 18:51:17 bjh21 Exp $");
__RCSID("$NetBSD: irq.c,v 1.18 2001/05/01 22:19:09 bjh21 Exp $");
#include <sys/device.h>
#include <sys/kernel.h> /* for cold */
@ -98,6 +98,8 @@ struct irq_handler {
volatile static int current_spl = IPL_HIGH;
__inline int hardsplx(int);
void
irq_init(void)
{
@ -286,6 +288,24 @@ void irq_genmasks()
splx(s);
}
__inline int
hardsplx(int s)
{
int was;
int_off();
was = current_spl;
/* Don't try this till we've found the IOC */
if (the_ioc != NULL)
ioc_irq_setmask(irqmask[s]);
#if NUNIXBP > 0
unixbp_irq_setmask(irqmask[s] >> IRQ_UNIXBP_BASE);
#endif
current_spl = s;
int_on();
return was;
}
int
raisespl(int s)
{
@ -306,24 +326,6 @@ lowerspl(int s)
}
}
int
hardsplx(int s)
{
int was;
int_off();
was = current_spl;
/* Don't try this till we've found the IOC */
if (the_ioc != NULL)
ioc_irq_setmask(irqmask[s]);
#if NUNIXBP > 0
unixbp_irq_setmask(irqmask[s] >> IRQ_UNIXBP_BASE);
#endif
current_spl = s;
int_on();
return was;
}
#ifdef DDB
void
irq_stat(void (*pr)(const char *, ...))