Simplify the xen event handler callback by:

- moving the interrupt handler callback traversal into a separate
   function.
 - using evt_iterate_bits() to scan through the pending bitfield
 - removing cross-cpu pending actions - events recieved on the wrong
   vcpu are re-routed via hypervisor_send_event().
 - simplifying nested while() loops by encapsulating them in
   equivalent functions.

Many thanks for multiple reviews by bouyer@ and jym@
This commit is contained in:
cherry 2012-12-28 06:29:56 +00:00
parent 780493cbdc
commit ceab76fbae
4 changed files with 164 additions and 62 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: evtchn.h,v 1.20 2011/09/20 00:12:23 jym Exp $ */
/* $NetBSD: evtchn.h,v 1.21 2012/12/28 06:29:56 cherry Exp $ */
/*
*
@ -33,6 +33,9 @@
extern struct evtsource *evtsource[];
#include <sys/mutex.h>
extern kmutex_t evtlock[];
void events_default_setup(void);
void events_init(void);
bool events_suspend(void);

View File

@ -1,4 +1,4 @@
/* $NetBSD: hypervisor.h,v 1.39 2012/11/25 08:39:35 cherry Exp $ */
/* $NetBSD: hypervisor.h,v 1.40 2012/12/28 06:29:56 cherry Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@ -142,6 +142,7 @@ void hypervisor_unmask_event(unsigned int);
void hypervisor_mask_event(unsigned int);
void hypervisor_clear_event(unsigned int);
void hypervisor_enable_ipl(unsigned int);
void hypervisor_do_iplpending(unsigned int, void *);
void hypervisor_set_ipending(uint32_t, int, int);
void hypervisor_machdep_attach(void);
void hypervisor_machdep_resume(void);

View File

@ -1,4 +1,4 @@
/* $NetBSD: hypervisor_machdep.c,v 1.23 2012/11/25 08:39:36 cherry Exp $ */
/* $NetBSD: hypervisor_machdep.c,v 1.24 2012/12/28 06:29:56 cherry Exp $ */
/*
*
@ -54,7 +54,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.23 2012/11/25 08:39:36 cherry Exp $");
__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.24 2012/12/28 06:29:56 cherry Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -187,7 +187,7 @@ stipending(void)
*/
while (vci->evtchn_upcall_pending) {
cli();
x86_disable_intr();
vci->evtchn_upcall_pending = 0;
@ -195,7 +195,7 @@ stipending(void)
s->evtchn_pending, s->evtchn_mask,
evt_set_pending, &ret);
sti();
x86_enable_intr();
}
#if 0
@ -284,6 +284,82 @@ do_hypervisor_callback(struct intrframe *regs)
#endif
}
/* Iterate through pending events and call the event handler */
struct splargs {
int ipl;
struct intrframe *regs;
};
static inline void
evt_do_iplcallback(unsigned int evtch, unsigned int l1i,
unsigned int l2i, void *args)
{
KASSERT(args != NULL);
struct splargs *sargs = args;
struct intrhand *ih;
int (*ih_fun)(void *, void *);
int ipl = sargs->ipl;
struct cpu_info *ci = curcpu();
struct intrframe *regs = sargs->regs;
KASSERT(evtsource[evtch] != 0);
KASSERT(ci->ci_ilevel == ipl);
KASSERT(x86_read_psl() != 0);
x86_enable_intr();
mutex_spin_enter(&evtlock[evtch]);
ih = evtsource[evtch]->ev_handlers;
while (ih != NULL) {
if (ih->ih_cpu != ci) { /* Skip non-local handlers */
ih = ih->ih_evt_next;
continue;
}
if (ih->ih_level == ipl) {
KASSERT(x86_read_psl() == 0);
x86_disable_intr();
ci->ci_ilevel = ih->ih_level;
x86_enable_intr();
ih_fun = (void *)ih->ih_fun;
ih_fun(ih->ih_arg, regs);
if (ci->ci_ilevel != ipl) {
printf("evtchn_do_event: "
"handler %p didn't lower "
"ipl %d %d\n",
ih_fun, ci->ci_ilevel, ipl);
}
}
ih = ih->ih_evt_next;
}
mutex_spin_exit(&evtlock[evtch]);
hypervisor_enable_event(evtch);
x86_disable_intr();
KASSERT(ci->ci_ilevel == ipl);
}
/*
* Iterate through all pending interrupt handlers at 'ipl', on current
* cpu.
*/
void
hypervisor_do_iplpending(unsigned int ipl, void *regs)
{
struct cpu_info *ci;
struct splargs splargs = {
.ipl = ipl,
.regs = regs};
ci = curcpu();
evt_iterate_bits(&ci->ci_isources[ipl]->ipl_evt_mask1,
ci->ci_isources[ipl]->ipl_evt_mask2, NULL,
evt_do_iplcallback, &splargs);
}
void
hypervisor_send_event(struct cpu_info *ci, unsigned int ev)
{
@ -437,13 +513,6 @@ hypervisor_set_ipending(uint32_t iplmask, int l1, int l2)
KASSERT(ci->ci_isources[ipl] != NULL);
ci->ci_isources[ipl]->ipl_evt_mask1 |= 1UL << l1;
ci->ci_isources[ipl]->ipl_evt_mask2[l1] |= 1UL << l2;
if (__predict_false(ci != curcpu())) {
if (xen_send_ipi(ci, XEN_IPI_HVCB)) {
panic("hypervisor_set_ipending: "
"xen_send_ipi(cpu%d, XEN_IPI_HVCB) failed\n",
(int) ci->ci_cpuid);
}
}
}
void

View File

@ -1,4 +1,4 @@
/* $NetBSD: evtchn.c,v 1.65 2012/11/25 20:56:33 cherry Exp $ */
/* $NetBSD: evtchn.c,v 1.66 2012/12/28 06:29:56 cherry Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@ -54,7 +54,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.65 2012/11/25 20:56:33 cherry Exp $");
__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.66 2012/12/28 06:29:56 cherry Exp $");
#include "opt_xen.h"
#include "isa.h"
@ -89,7 +89,7 @@ static kmutex_t evtchn_lock;
struct evtsource *evtsource[NR_EVENT_CHANNELS];
/* channel locks */
static kmutex_t evtlock[NR_EVENT_CHANNELS];
kmutex_t evtlock[NR_EVENT_CHANNELS];
/* Reference counts for bindings to event channels XXX: redo for SMP */
static uint8_t evtch_bindcount[NR_EVENT_CHANNELS];
@ -228,6 +228,50 @@ events_resume (void)
}
static int
xspllower(int ilevel, void *regs)
{
int i;
uint32_t iplbit;
uint32_t iplpending;
struct cpu_info *ci;
KASSERT(kpreempt_disabled());
KASSERT(x86_read_psl() != 0); /* Interrupts must be disabled */
ci = curcpu();
ci->ci_idepth++;
KASSERT(ilevel < ci->ci_ilevel);
/*
* C version of spllower(). ASTs will be checked when
* hypevisor_callback() exits, so no need to check here.
*/
iplpending = (IUNMASK(ci, ilevel) & ci->ci_ipending);
while (iplpending != 0) {
iplbit = 1 << (NIPL - 1);
i = (NIPL - 1);
while (iplpending != 0 && i > ilevel) {
while (iplpending & iplbit) {
ci->ci_ipending &= ~iplbit;
ci->ci_ilevel = i;
hypervisor_do_iplpending(i, regs);
KASSERT(x86_read_psl() != 0);
/* more pending IPLs may have been registered */
iplpending =
(IUNMASK(ci, ilevel) & ci->ci_ipending);
}
i--;
iplbit >>= 1;
}
}
KASSERT(x86_read_psl() != 0);
ci->ci_ilevel = ilevel;
ci->ci_idepth--;
return 0;
}
unsigned int
evtchn_do_event(int evtch, struct intrframe *regs)
{
@ -236,8 +280,6 @@ evtchn_do_event(int evtch, struct intrframe *regs)
struct intrhand *ih;
int (*ih_fun)(void *, void *);
uint32_t iplmask;
int i;
uint32_t iplbit;
#ifdef DIAGNOSTIC
if (evtch >= NR_EVENT_CHANNELS) {
@ -292,9 +334,12 @@ evtchn_do_event(int evtch, struct intrframe *regs)
}
ci->ci_ilevel = evtsource[evtch]->ev_maxlevel;
iplmask = evtsource[evtch]->ev_imask;
sti();
KASSERT(x86_read_psl() != 0);
x86_enable_intr();
mutex_spin_enter(&evtlock[evtch]);
ih = evtsource[evtch]->ev_handlers;
while (ih != NULL) {
if (ih->ih_cpu != ci) {
hypervisor_send_event(ih->ih_cpu, evtch);
@ -307,60 +352,44 @@ evtchn_do_event(int evtch, struct intrframe *regs)
if (evtch == IRQ_DEBUG)
printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel);
#endif
cli();
hypervisor_set_ipending(iplmask,
evtch >> LONG_SHIFT, evtch & LONG_MASK);
/* leave masked */
mutex_spin_exit(&evtlock[evtch]);
x86_disable_intr();
hypervisor_set_ipending(iplmask,
evtch >> LONG_SHIFT, evtch & LONG_MASK);
/* leave masked */
goto splx;
}
iplmask &= ~IUNMASK(ci, ih->ih_level);
ci->ci_ilevel = ih->ih_level;
KASSERT(x86_read_psl() == 0);
KASSERT(ih->ih_level > ilevel);
{ /* Lower current spl to current handler */
x86_disable_intr();
/* assert for handlers being sorted by spl */
KASSERT(ci->ci_ilevel >= ih->ih_level);
/* Lower it */
ci->ci_ilevel = ih->ih_level;
x86_enable_intr();
}
/* Assert handler doesn't break spl rules */
KASSERT(ih->ih_level > ilevel);
ih_fun = (void *)ih->ih_fun;
ih_fun(ih->ih_arg, regs);
ih = ih->ih_evt_next;
}
mutex_spin_exit(&evtlock[evtch]);
cli();
x86_disable_intr();
hypervisor_enable_event(evtch);
splx:
/*
* C version of spllower(). ASTs will be checked when
* hypevisor_callback() exits, so no need to check here.
*/
iplmask = (IUNMASK(ci, ilevel) & ci->ci_ipending);
while (iplmask != 0) {
iplbit = 1 << (NIPL - 1);
i = (NIPL - 1);
while (iplmask != 0 && i > ilevel) {
while (iplmask & iplbit) {
ci->ci_ipending &= ~iplbit;
ci->ci_ilevel = i;
for (ih = ci->ci_isources[i]->ipl_handlers;
ih != NULL; ih = ih->ih_ipl_next) {
KASSERT(ih->ih_cpu == ci);
sti();
ih_fun = (void *)ih->ih_fun;
ih_fun(ih->ih_arg, regs);
cli();
if (ci->ci_ilevel != i) {
printf("evtchn_do_event: "
"handler %p didn't lower "
"ipl %d %d\n",
ih_fun, ci->ci_ilevel, i);
ci->ci_ilevel = i;
}
}
hypervisor_enable_ipl(i);
/* more pending IPLs may have been registered */
iplmask =
(IUNMASK(ci, ilevel) & ci->ci_ipending);
}
i--;
iplbit >>= 1;
}
}
ci->ci_ilevel = ilevel;
KASSERT(ci->ci_ilevel > ilevel);
xspllower(ilevel, regs);
return 0;
}