Brush-up the generic cross-call routine and use it to implement the SMP
cache flush ops. Also a standard soft interrupt handler for standard cross-call notification reserving the NMI level 15 softint for urgent cross calls.
This commit is contained in:
parent
eaf530d598
commit
75c5f270d2
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: cache.c,v 1.66 2002/12/16 16:59:10 pk Exp $ */
|
||||
/* $NetBSD: cache.c,v 1.67 2002/12/19 10:38:28 pk Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1996
|
||||
|
@ -280,7 +280,7 @@ cypress_cache_enable()
|
|||
cache_alias_bits = (cache_alias_dist - 1) & ~PGOFSET;
|
||||
|
||||
pcr = lda(SRMMU_PCR, ASI_SRMMU);
|
||||
pcr &= ~(CYPRESS_PCR_CE | CYPRESS_PCR_CM);
|
||||
pcr &= ~CYPRESS_PCR_CM;
|
||||
|
||||
/* Now reset cache tag memory if cache not yet enabled */
|
||||
ls = CACHEINFO.c_linesize;
|
||||
|
@ -974,28 +974,7 @@ smp_vcache_flush_page(va, ctx)
|
|||
int va;
|
||||
int ctx;
|
||||
{
|
||||
int n, s;
|
||||
|
||||
cpuinfo.sp_vcache_flush_page(va, ctx);
|
||||
if (cold || (cpuinfo.flags & CPUFLG_READY) == 0)
|
||||
return;
|
||||
LOCK_XPMSG();
|
||||
for (n = 0; n < ncpu; n++) {
|
||||
struct cpu_info *cpi = cpus[n];
|
||||
struct xpmsg_flush_page *p;
|
||||
|
||||
if (CPU_READY(cpi))
|
||||
continue;
|
||||
p = &cpi->msg.u.xpmsg_flush_page;
|
||||
s = splhigh();
|
||||
simple_lock(&cpi->msg.lock);
|
||||
cpi->msg.tag = XPMSG_VCACHE_FLUSH_PAGE;
|
||||
p->ctx = getcontext4m();
|
||||
p->va = va;
|
||||
raise_ipi_wait_and_unlock(cpi);
|
||||
splx(s);
|
||||
}
|
||||
UNLOCK_XPMSG();
|
||||
xcall((xcall_func_t)cpuinfo.sp_vcache_flush_page, va, ctx, 0, 0, 0);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1003,29 +982,7 @@ smp_vcache_flush_segment(vr, vs, ctx)
|
|||
int vr, vs;
|
||||
int ctx;
|
||||
{
|
||||
int n, s;
|
||||
|
||||
cpuinfo.sp_vcache_flush_segment(vr, vs, ctx);
|
||||
if (cold || (cpuinfo.flags & CPUFLG_READY) == 0)
|
||||
return;
|
||||
LOCK_XPMSG();
|
||||
for (n = 0; n < ncpu; n++) {
|
||||
struct cpu_info *cpi = cpus[n];
|
||||
struct xpmsg_flush_segment *p;
|
||||
|
||||
if (CPU_READY(cpi))
|
||||
continue;
|
||||
p = &cpi->msg.u.xpmsg_flush_segment;
|
||||
s = splhigh();
|
||||
simple_lock(&cpi->msg.lock);
|
||||
cpi->msg.tag = XPMSG_VCACHE_FLUSH_SEGMENT;
|
||||
p->ctx = getcontext4m();
|
||||
p->vr = vr;
|
||||
p->vs = vs;
|
||||
raise_ipi_wait_and_unlock(cpi);
|
||||
splx(s);
|
||||
}
|
||||
UNLOCK_XPMSG();
|
||||
xcall((xcall_func_t)cpuinfo.sp_vcache_flush_segment, vr, vs, ctx, 0, 0);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1033,84 +990,22 @@ smp_vcache_flush_region(vr, ctx)
|
|||
int vr;
|
||||
int ctx;
|
||||
{
|
||||
int n, s;
|
||||
|
||||
cpuinfo.sp_vcache_flush_region(vr, ctx);
|
||||
if (cold || (cpuinfo.flags & CPUFLG_READY) == 0)
|
||||
return;
|
||||
LOCK_XPMSG();
|
||||
for (n = 0; n < ncpu; n++) {
|
||||
struct cpu_info *cpi = cpus[n];
|
||||
struct xpmsg_flush_region *p;
|
||||
|
||||
if (CPU_READY(cpi))
|
||||
continue;
|
||||
p = &cpi->msg.u.xpmsg_flush_region;
|
||||
s = splhigh();
|
||||
simple_lock(&cpi->msg.lock);
|
||||
cpi->msg.tag = XPMSG_VCACHE_FLUSH_REGION;
|
||||
p->ctx = getcontext4m();
|
||||
p->vr = vr;
|
||||
raise_ipi_wait_and_unlock(cpi);
|
||||
splx(s);
|
||||
}
|
||||
UNLOCK_XPMSG();
|
||||
xcall((xcall_func_t)cpuinfo.sp_vcache_flush_region, vr, ctx, 0, 0, 0);
|
||||
}
|
||||
|
||||
void
|
||||
smp_vcache_flush_context(ctx)
|
||||
int ctx;
|
||||
{
|
||||
int n, s;
|
||||
|
||||
cpuinfo.sp_vcache_flush_context(ctx);
|
||||
if (cold || (cpuinfo.flags & CPUFLG_READY) == 0)
|
||||
return;
|
||||
LOCK_XPMSG();
|
||||
for (n = 0; n < ncpu; n++) {
|
||||
struct cpu_info *cpi = cpus[n];
|
||||
struct xpmsg_flush_context *p;
|
||||
|
||||
if (CPU_READY(cpi))
|
||||
continue;
|
||||
p = &cpi->msg.u.xpmsg_flush_context;
|
||||
s = splhigh();
|
||||
simple_lock(&cpi->msg.lock);
|
||||
cpi->msg.tag = XPMSG_VCACHE_FLUSH_CONTEXT;
|
||||
p->ctx = ctx;
|
||||
raise_ipi_wait_and_unlock(cpi);
|
||||
splx(s);
|
||||
}
|
||||
UNLOCK_XPMSG();
|
||||
xcall((xcall_func_t)cpuinfo.sp_vcache_flush_context, ctx, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
void
|
||||
smp_cache_flush(va, size)
|
||||
smp_cache_flush(va, size, ctx)
|
||||
caddr_t va;
|
||||
u_int size;
|
||||
int ctx;
|
||||
{
|
||||
int n, s;
|
||||
|
||||
cpuinfo.sp_cache_flush(va, size);
|
||||
if (cold || (cpuinfo.flags & CPUFLG_READY) == 0)
|
||||
return;
|
||||
LOCK_XPMSG();
|
||||
for (n = 0; n < ncpu; n++) {
|
||||
struct cpu_info *cpi = cpus[n];
|
||||
struct xpmsg_flush_range *p;
|
||||
|
||||
if (CPU_READY(cpi))
|
||||
continue;
|
||||
p = &cpi->msg.u.xpmsg_flush_range;
|
||||
s = splhigh();
|
||||
simple_lock(&cpi->msg.lock);
|
||||
cpi->msg.tag = XPMSG_VCACHE_FLUSH_RANGE;
|
||||
p->ctx = getcontext4m();
|
||||
p->va = va;
|
||||
p->size = size;
|
||||
raise_ipi_wait_and_unlock(cpi);
|
||||
splx(s);
|
||||
}
|
||||
UNLOCK_XPMSG();
|
||||
xcall((xcall_func_t)cpuinfo.sp_cache_flush, (int)va, (int)size, ctx, 0, 0);
|
||||
}
|
||||
#endif /* MULTIPROCESSOR */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: cpu.c,v 1.139 2002/12/16 16:59:10 pk Exp $ */
|
||||
/* $NetBSD: cpu.c,v 1.140 2002/12/19 10:38:28 pk Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1996
|
||||
|
@ -75,6 +75,10 @@
|
|||
#include <machine/oldmon.h>
|
||||
#include <machine/idprom.h>
|
||||
|
||||
#if defined(MULTIPROCESSOR) && defined(DDB)
|
||||
#include <machine/db_machdep.h>
|
||||
#endif
|
||||
|
||||
#include <sparc/sparc/cache.h>
|
||||
#include <sparc/sparc/asm.h>
|
||||
#include <sparc/sparc/cpuvar.h>
|
||||
|
@ -554,55 +558,34 @@ extern void cpu_hatch __P((void)); /* in locore.s */
|
|||
printf("CPU did not spin up\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Calls raise_ipi(), waits for the remote CPU to notice the message, and
|
||||
* unlocks this CPU's message lock, which we expect was locked at entry.
|
||||
*/
|
||||
void
|
||||
raise_ipi_wait_and_unlock(cpi)
|
||||
struct cpu_info *cpi;
|
||||
{
|
||||
int i;
|
||||
|
||||
raise_ipi(cpi);
|
||||
i = 0;
|
||||
while ((cpi->flags & CPUFLG_GOTMSG) == 0) {
|
||||
if (i++ > 500000) {
|
||||
printf("raise_ipi_wait_and_unlock(cpu%d): couldn't ping cpu%d\n",
|
||||
cpuinfo.ci_cpuid, cpi->ci_cpuid);
|
||||
break;
|
||||
}
|
||||
}
|
||||
simple_unlock(&cpi->msg.lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Call a function on every CPU. One must hold xpmsg_lock around
|
||||
* this function.
|
||||
*/
|
||||
void
|
||||
cross_call(func, arg0, arg1, arg2, arg3, cpuset)
|
||||
xcall(func, arg0, arg1, arg2, arg3, cpuset)
|
||||
int (*func)(int, int, int, int);
|
||||
int arg0, arg1, arg2, arg3;
|
||||
int cpuset; /* XXX unused; cpus to send to: we do all */
|
||||
{
|
||||
int n, i, not_done;
|
||||
struct xpmsg_func *p;
|
||||
int s, n, i, done;
|
||||
volatile struct xpmsg_func *p;
|
||||
|
||||
/* XXX - note p->retval is probably no longer useful */
|
||||
|
||||
/*
|
||||
* If no cpus are configured yet, just call ourselves.
|
||||
*/
|
||||
if (cpus == NULL) {
|
||||
p = &cpuinfo.msg.u.xpmsg_func;
|
||||
p->func = func;
|
||||
p->arg0 = arg0;
|
||||
p->arg1 = arg1;
|
||||
p->arg2 = arg2;
|
||||
p->arg3 = arg3;
|
||||
p->retval = (*p->func)(p->arg0, p->arg1, p->arg2, p->arg3);
|
||||
if (func)
|
||||
p->retval = (*func)(arg0, arg1, arg2, arg3);
|
||||
return;
|
||||
}
|
||||
|
||||
s = splvm(); /* XXX - should validate this level */
|
||||
LOCK_XPMSG();
|
||||
|
||||
/*
|
||||
* Firstly, call each CPU. We do this so that they might have
|
||||
* finished by the time we start looking.
|
||||
|
@ -615,55 +598,47 @@ cross_call(func, arg0, arg1, arg2, arg3, cpuset)
|
|||
|
||||
simple_lock(&cpi->msg.lock);
|
||||
cpi->msg.tag = XPMSG_FUNC;
|
||||
cpi->flags &= ~CPUFLG_GOTMSG;
|
||||
p = &cpi->msg.u.xpmsg_func;
|
||||
p->func = func;
|
||||
p->arg0 = arg0;
|
||||
p->arg1 = arg1;
|
||||
p->arg2 = arg2;
|
||||
p->arg3 = arg3;
|
||||
cpi->flags &= ~CPUFLG_GOTMSG;
|
||||
raise_ipi(cpi);
|
||||
cpi->intreg_4m->pi_set = PINTR_SINTRLEV(13);/*xcall_cookie->pil*/
|
||||
/*was: raise_ipi(cpi);*/
|
||||
}
|
||||
|
||||
/*
|
||||
* Second, call ourselves.
|
||||
*/
|
||||
|
||||
p = &cpuinfo.msg.u.xpmsg_func;
|
||||
|
||||
/* Call this on me first. */
|
||||
p->func = func;
|
||||
p->arg0 = arg0;
|
||||
p->arg1 = arg1;
|
||||
p->arg2 = arg2;
|
||||
p->arg3 = arg3;
|
||||
|
||||
p->retval = (*p->func)(p->arg0, p->arg1, p->arg2, p->arg3);
|
||||
if (func)
|
||||
p->retval = (*func)(arg0, arg1, arg2, arg3);
|
||||
|
||||
/*
|
||||
* Lastly, start looping, waiting for all cpu's to register that they
|
||||
* have completed (bailing if it takes "too long", being loud about
|
||||
* this in the process).
|
||||
*/
|
||||
i = 0;
|
||||
while (not_done) {
|
||||
not_done = 0;
|
||||
done = 0;
|
||||
i = 100000; /* time-out */
|
||||
while (!done) {
|
||||
done = 1;
|
||||
for (n = 0; n < ncpu; n++) {
|
||||
struct cpu_info *cpi = cpus[n];
|
||||
|
||||
if (CPU_READY(cpi))
|
||||
continue;
|
||||
|
||||
if ((cpi->flags & CPUFLG_GOTMSG) != 0)
|
||||
not_done = 1;
|
||||
if ((cpi->flags & CPUFLG_GOTMSG) == 0)
|
||||
done = 0;
|
||||
}
|
||||
if (not_done && i++ > 100000) {
|
||||
printf("cross_call(cpu%d): couldn't ping cpus:",
|
||||
cpuinfo.ci_cpuid);
|
||||
if (!done && i-- < 0) {
|
||||
printf("xcall(cpu%d,%p): couldn't ping cpus:",
|
||||
cpuinfo.ci_cpuid, func);
|
||||
break;
|
||||
}
|
||||
if (not_done == 0)
|
||||
break;
|
||||
}
|
||||
for (n = 0; n < ncpu; n++) {
|
||||
struct cpu_info *cpi = cpus[n];
|
||||
|
@ -671,11 +646,14 @@ cross_call(func, arg0, arg1, arg2, arg3, cpuset)
|
|||
if (CPU_READY(cpi))
|
||||
continue;
|
||||
simple_unlock(&cpi->msg.lock);
|
||||
if ((cpi->flags & CPUFLG_GOTMSG) != 0)
|
||||
if ((cpi->flags & CPUFLG_GOTMSG) == 0)
|
||||
printf(" cpu%d", cpi->ci_cpuid);
|
||||
}
|
||||
if (not_done)
|
||||
if (!done)
|
||||
printf("\n");
|
||||
|
||||
UNLOCK_XPMSG();
|
||||
splx(s);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -686,6 +664,7 @@ mp_pause_cpus()
|
|||
if (cpus == NULL)
|
||||
return;
|
||||
|
||||
/* XXX - can currently be called at a high IPL level */
|
||||
LOCK_XPMSG();
|
||||
for (n = 0; n < ncpu; n++) {
|
||||
struct cpu_info *cpi = cpus[n];
|
||||
|
@ -696,7 +675,7 @@ mp_pause_cpus()
|
|||
simple_lock(&cpi->msg.lock);
|
||||
cpi->msg.tag = XPMSG_PAUSECPU;
|
||||
cpi->flags &= ~CPUFLG_GOTMSG;
|
||||
raise_ipi_wait_and_unlock(cpi);
|
||||
cpi->intreg_4m->pi_set = PINTR_SINTRLEV(13);/*xcall_cookie->pil*/
|
||||
}
|
||||
UNLOCK_XPMSG();
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: intr.c,v 1.66 2002/12/18 06:20:36 mrg Exp $ */
|
||||
/* $NetBSD: intr.c,v 1.67 2002/12/19 10:38:28 pk Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1992, 1993
|
||||
|
@ -73,6 +73,9 @@
|
|||
#endif
|
||||
|
||||
void *softnet_cookie;
|
||||
#if defined(MULTIPROCESSOR)
|
||||
void *xcall_cookie;
|
||||
#endif
|
||||
|
||||
void strayintr __P((struct clockframe *));
|
||||
#ifdef DIAGNOSTIC
|
||||
|
@ -250,11 +253,15 @@ nmi_hard()
|
|||
panic("nmi");
|
||||
}
|
||||
|
||||
/*
|
||||
* Non-maskable soft interrupt level 15 handler
|
||||
*/
|
||||
void
|
||||
nmi_soft(tf)
|
||||
struct trapframe *tf;
|
||||
{
|
||||
|
||||
/* XXX - Most of this is superseded by xcallintr() below */
|
||||
#ifdef MULTIPROCESSOR
|
||||
switch (cpuinfo.msg.tag) {
|
||||
case XPMSG_SAVEFPU:
|
||||
|
@ -282,14 +289,14 @@ nmi_soft(tf)
|
|||
}
|
||||
case XPMSG_FUNC:
|
||||
{
|
||||
struct xpmsg_func *p = &cpuinfo.msg.u.xpmsg_func;
|
||||
volatile struct xpmsg_func *p = &cpuinfo.msg.u.xpmsg_func;
|
||||
|
||||
p->retval = (*p->func)(p->arg0, p->arg1, p->arg2, p->arg3);
|
||||
break;
|
||||
}
|
||||
case XPMSG_VCACHE_FLUSH_PAGE:
|
||||
{
|
||||
struct xpmsg_flush_page *p = &cpuinfo.msg.u.xpmsg_flush_page;
|
||||
volatile struct xpmsg_flush_page *p = &cpuinfo.msg.u.xpmsg_flush_page;
|
||||
int ctx = getcontext();
|
||||
|
||||
setcontext(p->ctx);
|
||||
|
@ -299,7 +306,7 @@ nmi_soft(tf)
|
|||
}
|
||||
case XPMSG_VCACHE_FLUSH_SEGMENT:
|
||||
{
|
||||
struct xpmsg_flush_segment *p = &cpuinfo.msg.u.xpmsg_flush_segment;
|
||||
volatile struct xpmsg_flush_segment *p = &cpuinfo.msg.u.xpmsg_flush_segment;
|
||||
int ctx = getcontext();
|
||||
|
||||
setcontext(p->ctx);
|
||||
|
@ -309,7 +316,7 @@ nmi_soft(tf)
|
|||
}
|
||||
case XPMSG_VCACHE_FLUSH_REGION:
|
||||
{
|
||||
struct xpmsg_flush_region *p = &cpuinfo.msg.u.xpmsg_flush_region;
|
||||
volatile struct xpmsg_flush_region *p = &cpuinfo.msg.u.xpmsg_flush_region;
|
||||
int ctx = getcontext();
|
||||
|
||||
setcontext(p->ctx);
|
||||
|
@ -319,7 +326,7 @@ nmi_soft(tf)
|
|||
}
|
||||
case XPMSG_VCACHE_FLUSH_CONTEXT:
|
||||
{
|
||||
struct xpmsg_flush_context *p = &cpuinfo.msg.u.xpmsg_flush_context;
|
||||
volatile struct xpmsg_flush_context *p = &cpuinfo.msg.u.xpmsg_flush_context;
|
||||
int ctx = getcontext();
|
||||
|
||||
setcontext(p->ctx);
|
||||
|
@ -329,17 +336,17 @@ nmi_soft(tf)
|
|||
}
|
||||
case XPMSG_VCACHE_FLUSH_RANGE:
|
||||
{
|
||||
struct xpmsg_flush_range *p = &cpuinfo.msg.u.xpmsg_flush_range;
|
||||
volatile struct xpmsg_flush_range *p = &cpuinfo.msg.u.xpmsg_flush_range;
|
||||
int ctx = getcontext();
|
||||
|
||||
setcontext(p->ctx);
|
||||
cpuinfo.sp_cache_flush(p->va, p->size);
|
||||
cpuinfo.sp_cache_flush(p->va, p->size, p->ctx);
|
||||
setcontext(ctx);
|
||||
break;
|
||||
}
|
||||
case XPMSG_DEMAP_TLB_PAGE:
|
||||
{
|
||||
struct xpmsg_flush_page *p = &cpuinfo.msg.u.xpmsg_flush_page;
|
||||
volatile struct xpmsg_flush_page *p = &cpuinfo.msg.u.xpmsg_flush_page;
|
||||
int ctx = getcontext();
|
||||
|
||||
setcontext(p->ctx);
|
||||
|
@ -349,7 +356,7 @@ nmi_soft(tf)
|
|||
}
|
||||
case XPMSG_DEMAP_TLB_SEGMENT:
|
||||
{
|
||||
struct xpmsg_flush_segment *p = &cpuinfo.msg.u.xpmsg_flush_segment;
|
||||
volatile struct xpmsg_flush_segment *p = &cpuinfo.msg.u.xpmsg_flush_segment;
|
||||
int ctx = getcontext();
|
||||
|
||||
setcontext(p->ctx);
|
||||
|
@ -359,7 +366,7 @@ nmi_soft(tf)
|
|||
}
|
||||
case XPMSG_DEMAP_TLB_REGION:
|
||||
{
|
||||
struct xpmsg_flush_region *p = &cpuinfo.msg.u.xpmsg_flush_region;
|
||||
volatile struct xpmsg_flush_region *p = &cpuinfo.msg.u.xpmsg_flush_region;
|
||||
int ctx = getcontext();
|
||||
|
||||
setcontext(p->ctx);
|
||||
|
@ -369,7 +376,7 @@ nmi_soft(tf)
|
|||
}
|
||||
case XPMSG_DEMAP_TLB_CONTEXT:
|
||||
{
|
||||
struct xpmsg_flush_context *p = &cpuinfo.msg.u.xpmsg_flush_context;
|
||||
volatile struct xpmsg_flush_context *p = &cpuinfo.msg.u.xpmsg_flush_context;
|
||||
int ctx = getcontext();
|
||||
|
||||
setcontext(p->ctx);
|
||||
|
@ -384,7 +391,46 @@ nmi_soft(tf)
|
|||
cpuinfo.flags |= CPUFLG_GOTMSG;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(MULTIPROCESSOR)
|
||||
/*
|
||||
* Respond to an xcall() request from another CPU.
|
||||
*/
|
||||
static void xcallintr(void *v)
|
||||
{
|
||||
struct cpu_info *cpi = cpus[cpuinfo.ci_cpuid];
|
||||
|
||||
switch (cpi->msg.tag) {
|
||||
case XPMSG_PAUSECPU:
|
||||
{
|
||||
#if defined(DDB)
|
||||
struct trapframe *tf = v;
|
||||
volatile db_regs_t regs;
|
||||
|
||||
regs.db_tf = *tf;
|
||||
regs.db_fr = *(struct frame *)tf->tf_out[6];
|
||||
cpi->ci_ddb_regs = ®s;
|
||||
#endif
|
||||
cpi->flags |= CPUFLG_PAUSED|CPUFLG_GOTMSG;
|
||||
while (cpi->flags & CPUFLG_PAUSED) /**/;
|
||||
#if defined(DDB)
|
||||
cpi->ci_ddb_regs = NULL;
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
case XPMSG_FUNC:
|
||||
{
|
||||
volatile struct xpmsg_func *p = &cpuinfo.msg.u.xpmsg_func;
|
||||
|
||||
if (p->func)
|
||||
p->retval = (*p->func)(p->arg0, p->arg1, p->arg2, p->arg3);
|
||||
break;
|
||||
}
|
||||
}
|
||||
cpi->flags |= CPUFLG_GOTMSG;
|
||||
}
|
||||
#endif /* MULTIPROCESSOR */
|
||||
#endif /* SUN4M || SUN4D */
|
||||
|
||||
/*
|
||||
* Level 15 interrupts are special, and not vectored here.
|
||||
|
@ -617,6 +663,10 @@ softintr_init()
|
|||
{
|
||||
|
||||
softnet_cookie = softintr_establish(IPL_SOFTNET, softnet, NULL);
|
||||
#if defined(MULTIPROCESSOR) && (defined(SUN4M) || defined(SUN4D))
|
||||
/* Establish a standard soft interrupt handler for cross calls */
|
||||
xcall_cookie = softintr_establish(13, xcallintr, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue