Don't assume we're on a uniprocessor system (the hypervisor may be running SMP)

and add "lock;" barrier to avoid instruction reordering in event handlings.
This fix the last issue with event loss I was getting on SMP systems.
This commit is contained in:
bouyer 2005-03-26 20:00:49 +00:00
parent f2b82c7f8a
commit 57d0b7f835
3 changed files with 14 additions and 16 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: hypervisor_machdep.c,v 1.5 2005/03/17 15:26:06 bouyer Exp $ */
/* $NetBSD: hypervisor_machdep.c,v 1.6 2005/03/26 20:00:49 bouyer Exp $ */
/*
*
@ -59,7 +59,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.5 2005/03/17 15:26:06 bouyer Exp $");
__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.6 2005/03/26 20:00:49 bouyer Exp $");
#include <sys/cdefs.h>
#include <sys/param.h>
@ -113,9 +113,7 @@ stipending()
* STI at the end
*/
while (s->vcpu_data[0].evtchn_upcall_pending) {
__insn_barrier();
cli();
__insn_barrier();
s->vcpu_data[0].evtchn_upcall_pending = 0;
/* NB. No need for a barrier here -- XCHG is a barrier
* on x86. */
@ -143,7 +141,6 @@ stipending()
#endif
}
}
__insn_barrier();
sti();
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpufunc.h,v 1.5 2005/03/09 22:39:20 bouyer Exp $ */
/* $NetBSD: cpufunc.h,v 1.6 2005/03/26 20:00:49 bouyer Exp $ */
/* NetBSD: cpufunc.h,v 1.28 2004/01/14 11:31:55 yamt Exp */
/*-
@ -294,7 +294,7 @@ breakpoint(void)
#define write_psl(x) do { \
__insn_barrier(); \
HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = (x) ; \
__insn_barrier(); \
x86_lfence(); \
if ((x) == 0 && HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_pending) \
hypervisor_force_callback(); \
} while (0)

View File

@ -1,4 +1,4 @@
/* $NetBSD: xen.h,v 1.10 2005/03/09 22:39:20 bouyer Exp $ */
/* $NetBSD: xen.h,v 1.11 2005/03/26 20:00:49 bouyer Exp $ */
/*
*
@ -129,7 +129,7 @@ do { \
volatile shared_info_t *_shared = HYPERVISOR_shared_info; \
__insn_barrier(); \
if ((_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0) { \
__insn_barrier(); \
x86_lfence(); \
if (__predict_false(_shared->vcpu_data[0].evtchn_upcall_pending)) \
hypervisor_force_callback(); \
} \
@ -138,7 +138,7 @@ do { \
#define __cli() \
do { \
HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
__insn_barrier(); \
x86_lfence(); \
} while (0)
#define __sti() \
@ -146,7 +146,7 @@ do { \
volatile shared_info_t *_shared = HYPERVISOR_shared_info; \
__insn_barrier(); \
_shared->vcpu_data[0].evtchn_upcall_mask = 0; \
__insn_barrier(); /* unmask then check (avoid races) */ \
x86_lfence(); /* unmask then check (avoid races) */ \
if (__predict_false(_shared->vcpu_data[0].evtchn_upcall_pending)) \
hypervisor_force_callback(); \
} while (0)
@ -161,18 +161,19 @@ do { \
} while (/* CONSTCOND */ 0)
#define save_and_sti(x) __save_and_sti(x)
#ifdef MULTIPROCESSOR
/*
* always assume we're on multiprocessor. We don't know how many CPU the
* underlying hardware has.
*/
#define __LOCK_PREFIX "lock; "
#else
#define __LOCK_PREFIX ""
#endif
static __inline__ uint32_t
x86_atomic_xchg(volatile uint32_t *ptr, unsigned long val)
{
unsigned long result;
__asm __volatile("xchgl %0,%1"
__asm __volatile(__LOCK_PREFIX
"xchgl %0,%1"
:"=r" (result)
:"m" (*ptr), "0" (val)
:"memory");