Event handling optimisations:

- sort the ih_evt_handler list by IPL, higher first. Otherwise some handlers
  would have been delayed, event if they could run at the current IPL.
- As ih_evt_handler is sorted, remove IPLs that have been processed for
  an event when calling hypervisor_set_ipending()
- In hypervisor_set_ipending(), enter the event in ipl_evt_mask only
  for the lowest IPL. As deffered IPLs are processed high to low,
  this ensure that hypervisor_enable_event() will be called only when all
  callbacks have been called for an event. We don't need the evtch_maskcount[]
  counters any more.

Thanks to YAMAMOTO Takashi for ideas and feedback.
This commit is contained in:
bouyer 2005-04-20 14:48:29 +00:00
parent eddbeffa06
commit 6970747e8e
4 changed files with 50 additions and 37 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: hypervisor_machdep.c,v 1.9 2005/04/19 22:14:30 bouyer Exp $ */
/* $NetBSD: hypervisor_machdep.c,v 1.10 2005/04/20 14:48:29 bouyer Exp $ */
/*
*
@ -59,7 +59,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.9 2005/04/19 22:14:30 bouyer Exp $");
__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.10 2005/04/20 14:48:29 bouyer Exp $");
#include <sys/cdefs.h>
#include <sys/param.h>
@ -134,7 +134,9 @@ stipending()
port = (l1i << 5) + l2i;
if (evtsource[port]) {
hypervisor_set_ipending(port, l1i, l2i);
hypervisor_set_ipending(
evtsource[port]->ev_imask,
l1i, l2i);
evtsource[port]->ev_evcnt.ev_count++;
if (ret == 0 && ci->ci_ilevel <
evtsource[port]->ev_maxlevel)
@ -276,7 +278,11 @@ hypervisor_enable_ipl(unsigned int ipl)
int l1i, l2i;
struct cpu_info *ci = curcpu();
/* enable all events for ipl */
/*
* enable all events for ipl. As we only set an event in ipl_evt_mask
* for its lowest IPL, and pending IPLs are processed high to low,
* we know that all callback for this event have been processed.
*/
l1 = ci->ci_isources[ipl]->ipl_evt_mask1;
ci->ci_isources[ipl]->ipl_evt_mask1 = 0;
@ -292,35 +298,28 @@ hypervisor_enable_ipl(unsigned int ipl)
l2 &= ~(1 << l2i);
evtch = (l1i << 5) + l2i;
KASSERT(evtch_maskcount[evtch] > 0);
if ((--evtch_maskcount[evtch]) == 0) {
hypervisor_enable_event(evtch);
}
hypervisor_enable_event(evtch);
}
}
}
void
hypervisor_set_ipending(int port, int l1, int l2)
hypervisor_set_ipending(u_int32_t iplmask, int l1, int l2)
{
int ipl, imask;
int ipl;
struct cpu_info *ci = curcpu();
KASSERT(port == (l1 << 5) + l2);
KASSERT(evtch_maskcount[port] == 0);
/* set pending bit for the appropriate IPLs */
ci->ci_ipending |= evtsource[port]->ev_imask;
ci->ci_ipending |= iplmask;
/* and set event pending bit for each IPL */
imask = evtsource[port]->ev_imask;
while ((ipl = ffs(imask)) != 0) {
ipl--;
imask &= ~(1 << ipl);
ci->ci_isources[ipl]->ipl_evt_mask1 |= 1 << l1;
ci->ci_isources[ipl]->ipl_evt_mask2[l1] |= 1 << l2;
evtch_maskcount[port]++;
KASSERT(evtch_maskcount[port] <= 32);
}
/*
* And set event pending bit for the lowest IPL. As IPL are handled
* from high to low, this ensure that all callbacks will have been
* called when we ack the event
*/
ipl = ffs(iplmask);
KASSERT(ipl > 0);
ipl--;
ci->ci_isources[ipl]->ipl_evt_mask1 |= 1 << l1;
ci->ci_isources[ipl]->ipl_evt_mask2[l1] |= 1 << l2;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: evtchn.h,v 1.7 2005/04/18 20:23:56 yamt Exp $ */
/* $NetBSD: evtchn.h,v 1.8 2005/04/20 14:48:29 bouyer Exp $ */
/*
*
@ -37,7 +37,6 @@
#define NR_PIRQS 32
extern struct evtsource *evtsource[];
extern uint8_t evtch_maskcount[];
void events_default_setup(void);
void init_events(void);

View File

@ -1,4 +1,4 @@
/* $NetBSD: hypervisor.h,v 1.12 2005/04/16 23:33:17 bouyer Exp $ */
/* $NetBSD: hypervisor.h,v 1.13 2005/04/20 14:48:29 bouyer Exp $ */
/*
*
@ -94,7 +94,7 @@ void hypervisor_mask_event(unsigned int);
void hypervisor_clear_event(unsigned int);
void hypervisor_force_callback(void);
void hypervisor_enable_ipl(unsigned int);
void hypervisor_set_ipending(int, int, int);
void hypervisor_set_ipending(u_int32_t, int, int);
/*
* Assembler stubs for hyper-calls.

View File

@ -1,4 +1,4 @@
/* $NetBSD: evtchn.c,v 1.12 2005/04/20 07:59:21 xtraeme Exp $ */
/* $NetBSD: evtchn.c,v 1.13 2005/04/20 14:48:29 bouyer Exp $ */
/*
*
@ -34,7 +34,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.12 2005/04/20 07:59:21 xtraeme Exp $");
__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.13 2005/04/20 14:48:29 bouyer Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@ -63,7 +63,6 @@ static struct simplelock irq_mapping_update_lock = SIMPLELOCK_INITIALIZER;
/* event handlers */
struct evtsource *evtsource[NR_EVENT_CHANNELS];
uint8_t evtch_maskcount[NR_EVENT_CHANNELS];
/* Reference counts for bindings to event channels */
static u_int8_t evtch_bindcount[NR_EVENT_CHANNELS];
@ -149,6 +148,7 @@ do_event(int evtch, struct intrframe *regs)
struct intrhand *ih;
int (*ih_fun)(void *, void *);
extern struct uvmexp uvmexp;
u_int32_t iplmask;
#ifdef DIAGNOSTIC
if (evtch >= NR_EVENT_CHANNELS) {
@ -188,11 +188,13 @@ do_event(int evtch, struct intrframe *regs)
printf("evtsource[%d]->ev_maxlevel %d <= ilevel %d\n",
evtch, evtsource[evtch]->ev_maxlevel, ilevel);
#endif
hypervisor_set_ipending(evtch, evtch / 32, evtch % 32);
hypervisor_set_ipending(evtsource[evtch]->ev_imask,
evtch / 32, evtch % 32);
/* leave masked */
return 0;
}
ci->ci_ilevel = evtsource[evtch]->ev_maxlevel;
iplmask = evtsource[evtch]->ev_imask;
/* sti */
ci->ci_idepth++;
#ifdef MULTIPROCESSOR
@ -208,12 +210,14 @@ do_event(int evtch, struct intrframe *regs)
#ifdef MULTIPROCESSOR
x86_intunlock(regs);
#endif
hypervisor_set_ipending(evtch, evtch / 32, evtch % 32);
hypervisor_set_ipending(iplmask,
evtch / 32, evtch % 32);
/* leave masked */
ci->ci_idepth--;
splx(ilevel);
return 0;
}
iplmask &= ~IUNMASK(ci, ih->ih_level);
ci->ci_ilevel = ih->ih_level;
ih_fun = (void *)ih->ih_fun;
ih_fun(ih->ih_arg, regs);
@ -404,7 +408,7 @@ event_set_handler(int evtch, int (*func)(void *), void *arg, int level,
{
struct iplsource *ipls;
struct evtsource *evts;
struct intrhand *ih;
struct intrhand *ih, **ihp;
struct cpu_info *ci;
int s;
@ -468,8 +472,19 @@ event_set_handler(int evtch, int (*func)(void *), void *arg, int level,
ci->ci_dev->dv_xname, evts->ev_evname);
} else {
evts = evtsource[evtch];
ih->ih_evt_next = evts->ev_handlers;
evts->ev_handlers = ih;
/* sort by IPL order, higher first */
for (ihp = &evts->ev_handlers; ; ihp = &((*ihp)->ih_evt_next)) {
if ((*ihp)->ih_level < ih->ih_level) {
/* insert before *ihp */
ih->ih_evt_next = *ihp;
*ihp = ih;
break;
}
if ((*ihp)->ih_evt_next == NULL) {
(*ihp)->ih_evt_next = ih;
break;
}
}
}
intr_calculatemasks(evts);