KNF, white spaces and comment typo fixes.
This commit is contained in:
parent
e3a2fad897
commit
4128291e47
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: evtchn.c,v 1.51 2011/08/13 17:23:42 cherry Exp $ */
|
||||
/* $NetBSD: evtchn.c,v 1.52 2011/08/28 22:36:17 jym Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2006 Manuel Bouyer.
|
||||
@ -54,7 +54,7 @@
|
||||
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.51 2011/08/13 17:23:42 cherry Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.52 2011/08/28 22:36:17 jym Exp $");
|
||||
|
||||
#include "opt_xen.h"
|
||||
#include "isa.h"
|
||||
@ -249,7 +249,7 @@ evtchn_do_event(int evtch, struct intrframe *regs)
|
||||
evtch >> LONG_SHIFT,
|
||||
evtch & LONG_MASK);
|
||||
|
||||
if (evtsource[evtch]->ev_cpu != ci) {
|
||||
if (evtsource[evtch]->ev_cpu != ci) {
|
||||
/* facilitate spllower() on remote cpu */
|
||||
struct cpu_info *rci = evtsource[evtch]->ev_cpu;
|
||||
if (xen_send_ipi(rci, XEN_IPI_KICK) != 0) {
|
||||
@ -257,7 +257,7 @@ evtchn_do_event(int evtch, struct intrframe *regs)
|
||||
}
|
||||
}
|
||||
|
||||
/* leave masked */
|
||||
/* leave masked */
|
||||
return 0;
|
||||
}
|
||||
ci->ci_ilevel = evtsource[evtch]->ev_maxlevel;
|
||||
@ -353,7 +353,7 @@ bind_vcpu_to_evtch(cpuid_t vcpu)
|
||||
evtch_bindcount[evtchn]++;
|
||||
|
||||
mutex_spin_exit(&evtchn_lock);
|
||||
|
||||
|
||||
return evtchn;
|
||||
}
|
||||
|
||||
@ -365,9 +365,9 @@ bind_virq_to_evtch(int virq)
|
||||
|
||||
mutex_spin_enter(&evtchn_lock);
|
||||
|
||||
/*
|
||||
* XXX: The only per-cpu VIRQ we currently use is VIRQ_TIMER.
|
||||
* Please re-visit this implementation when others are used.
|
||||
/*
|
||||
* XXX: The only per-cpu VIRQ we currently use is VIRQ_TIMER.
|
||||
* Please re-visit this implementation when others are used.
|
||||
* Note: VIRQ_DEBUG is special-cased, and not used or bound on APs.
|
||||
* XXX: event->virq/ipi can be unified in a linked-list
|
||||
* implementation.
|
||||
@ -381,10 +381,11 @@ bind_virq_to_evtch(int virq)
|
||||
|
||||
if (virq == VIRQ_TIMER) {
|
||||
evtchn = virq_timer_to_evtch[ci->ci_cpuid];
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
evtchn = virq_to_evtch[virq];
|
||||
}
|
||||
|
||||
/* Allocate a channel if there is none already allocated */
|
||||
if (evtchn == -1) {
|
||||
op.cmd = EVTCHNOP_bind_virq;
|
||||
op.u.bind_virq.virq = virq;
|
||||
@ -399,7 +400,7 @@ bind_virq_to_evtch(int virq)
|
||||
evtch_bindcount[evtchn]++;
|
||||
|
||||
mutex_spin_exit(&evtchn_lock);
|
||||
|
||||
|
||||
return evtchn;
|
||||
}
|
||||
|
||||
@ -470,7 +471,7 @@ bind_pirq_to_evtch(int pirq)
|
||||
evtch_bindcount[evtchn]++;
|
||||
|
||||
mutex_spin_exit(&evtchn_lock);
|
||||
|
||||
|
||||
return evtchn;
|
||||
}
|
||||
|
||||
@ -632,13 +633,13 @@ event_set_handler(int evtch, int (*func)(void *), void *arg, int level,
|
||||
panic("can't allocate fixed interrupt source");
|
||||
|
||||
evts->ev_handlers = ih;
|
||||
/*
|
||||
/*
|
||||
* XXX: We're assuming here that ci is the same cpu as
|
||||
* the one on which this event/port is bound on. The
|
||||
* api needs to be reshuffled so that this assumption
|
||||
* is more explicitly implemented.
|
||||
*/
|
||||
evts->ev_cpu = ci;
|
||||
evts->ev_cpu = ci;
|
||||
mutex_init(&evtlock[evtch], MUTEX_DEFAULT, IPL_HIGH);
|
||||
evtsource[evtch] = evts;
|
||||
if (evname)
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: xennetback_xenbus.c,v 1.46 2011/05/30 14:34:58 joerg Exp $ */
|
||||
/* $NetBSD: xennetback_xenbus.c,v 1.47 2011/08/28 22:36:17 jym Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2006 Manuel Bouyer.
|
||||
@ -26,7 +26,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.46 2011/05/30 14:34:58 joerg Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.47 2011/08/28 22:36:17 jym Exp $");
|
||||
|
||||
#include "opt_xen.h"
|
||||
|
||||
@ -933,7 +933,7 @@ xennetback_ifstart(struct ifnet *ifp)
|
||||
* schedule batch of packets for the domain. To achieve this, we
|
||||
* schedule a soft interrupt, and just return. This way, the network
|
||||
* stack will enqueue all pending mbufs in the interface's send queue
|
||||
* before it is processed by the soft inetrrupt handler().
|
||||
* before it is processed by the soft interrupt handler().
|
||||
*/
|
||||
softint_schedule(xneti->xni_softintr);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user