cleanup low-level bit mangling code
This commit is contained in:
parent
79295fa58d
commit
0ebd762347
|
@ -1,4 +1,4 @@
|
||||||
/* $NetBSD: hypervisor_machdep.c,v 1.2 2004/04/24 17:41:49 cl Exp $ */
|
/* $NetBSD: hypervisor_machdep.c,v 1.3 2004/06/14 13:55:52 cl Exp $ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
|
@ -59,7 +59,7 @@
|
||||||
|
|
||||||
|
|
||||||
#include <sys/cdefs.h>
|
#include <sys/cdefs.h>
|
||||||
__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.2 2004/04/24 17:41:49 cl Exp $");
|
__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.3 2004/06/14 13:55:52 cl Exp $");
|
||||||
|
|
||||||
#include <sys/cdefs.h>
|
#include <sys/cdefs.h>
|
||||||
#include <sys/param.h>
|
#include <sys/param.h>
|
||||||
|
@ -69,7 +69,6 @@ __KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.2 2004/04/24 17:41:49 cl Ex
|
||||||
#include <machine/hypervisor.h>
|
#include <machine/hypervisor.h>
|
||||||
|
|
||||||
/* static */ unsigned long event_mask = 0;
|
/* static */ unsigned long event_mask = 0;
|
||||||
static unsigned long ev_err_count;
|
|
||||||
|
|
||||||
int stipending(void);
|
int stipending(void);
|
||||||
int
|
int
|
||||||
|
@ -96,7 +95,7 @@ stipending()
|
||||||
*/
|
*/
|
||||||
__cli();
|
__cli();
|
||||||
|
|
||||||
events = xchg(&HYPERVISOR_shared_info->events, 0);
|
events = x86_atomic_xchg(&HYPERVISOR_shared_info->events, 0);
|
||||||
events &= event_mask;
|
events &= event_mask;
|
||||||
|
|
||||||
while (events) {
|
while (events) {
|
||||||
|
@ -140,11 +139,11 @@ void do_hypervisor_callback(struct trapframe *regs)
|
||||||
|
|
||||||
do {
|
do {
|
||||||
/* Specialised local_irq_save(). */
|
/* Specialised local_irq_save(). */
|
||||||
flags = test_and_clear_bit(EVENTS_MASTER_ENABLE_BIT,
|
flags = x86_atomic_test_and_clear_bit(&shared->events_mask,
|
||||||
&shared->events_mask);
|
EVENTS_MASTER_ENABLE_BIT);
|
||||||
barrier();
|
__insn_barrier();
|
||||||
|
|
||||||
events = xchg(&shared->events, 0);
|
events = x86_atomic_xchg(&shared->events, 0);
|
||||||
events &= event_mask;
|
events &= event_mask;
|
||||||
|
|
||||||
/* 'events' now contains some pending events to handle. */
|
/* 'events' now contains some pending events to handle. */
|
||||||
|
@ -165,8 +164,9 @@ void do_hypervisor_callback(struct trapframe *regs)
|
||||||
|
|
||||||
/* Specialised local_irq_restore(). */
|
/* Specialised local_irq_restore(). */
|
||||||
if (flags)
|
if (flags)
|
||||||
set_bit(EVENTS_MASTER_ENABLE_BIT, &shared->events_mask);
|
x86_atomic_set_bit(&shared->events_mask,
|
||||||
barrier();
|
EVENTS_MASTER_ENABLE_BIT);
|
||||||
|
__insn_barrier();
|
||||||
}
|
}
|
||||||
while ( shared->events );
|
while ( shared->events );
|
||||||
|
|
||||||
|
@ -180,24 +180,25 @@ void do_hypervisor_callback(struct trapframe *regs)
|
||||||
|
|
||||||
void hypervisor_enable_event(unsigned int ev)
|
void hypervisor_enable_event(unsigned int ev)
|
||||||
{
|
{
|
||||||
set_bit(ev, &event_mask);
|
x86_atomic_set_bit(&event_mask, ev);
|
||||||
set_bit(ev, &HYPERVISOR_shared_info->events_mask);
|
x86_atomic_set_bit(&HYPERVISOR_shared_info->events_mask, ev);
|
||||||
#if 0
|
#if 0
|
||||||
if ( test_bit(EVENTS_MASTER_ENABLE_BIT,
|
if (x86_atomic_test_bit(&HYPERVISOR_shared_info->events_mask,
|
||||||
&HYPERVISOR_shared_info->events_mask) )
|
EVENTS_MASTER_ENABLE_BIT))
|
||||||
do_hypervisor_callback(NULL);
|
do_hypervisor_callback(NULL);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void hypervisor_disable_event(unsigned int ev)
|
void hypervisor_disable_event(unsigned int ev)
|
||||||
{
|
{
|
||||||
clear_bit(ev, &event_mask);
|
|
||||||
clear_bit(ev, &HYPERVISOR_shared_info->events_mask);
|
x86_atomic_clear_bit(&event_mask, ev);
|
||||||
|
x86_atomic_clear_bit(&HYPERVISOR_shared_info->events_mask, ev);
|
||||||
}
|
}
|
||||||
|
|
||||||
void hypervisor_acknowledge_event(unsigned int ev)
|
void hypervisor_acknowledge_event(unsigned int ev)
|
||||||
{
|
{
|
||||||
if ( !(event_mask & (1<<ev)) )
|
|
||||||
atomic_inc((atomic_t *)(void *)&ev_err_count);
|
/* XXX add event counter for stray events: !(event_mask & (1<<ev)) */
|
||||||
set_bit(ev, &HYPERVISOR_shared_info->events_mask);
|
x86_atomic_set_bit(&HYPERVISOR_shared_info->events_mask, ev);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/* $NetBSD: hypervisor.h,v 1.7 2004/04/26 22:05:05 cl Exp $ */
|
/* $NetBSD: hypervisor.h,v 1.8 2004/06/14 13:55:52 cl Exp $ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
|
@ -43,12 +43,21 @@ struct xen_npx_attach_args {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#define u16 uint16_t
|
||||||
|
#define u32 uint32_t
|
||||||
|
#define u64 uint64_t
|
||||||
|
|
||||||
/* include the hypervisor interface */
|
/* include the hypervisor interface */
|
||||||
#include <sys/systm.h>
|
#include <sys/systm.h>
|
||||||
#include <machine/hypervisor-ifs/block.h>
|
#include <machine/hypervisor-ifs/block.h>
|
||||||
#include <machine/hypervisor-ifs/hypervisor-if.h>
|
#include <machine/hypervisor-ifs/hypervisor-if.h>
|
||||||
#include <machine/hypervisor-ifs/dom0_ops.h>
|
#include <machine/hypervisor-ifs/dom0_ops.h>
|
||||||
#include <machine/hypervisor-ifs/network.h>
|
#include <machine/hypervisor-ifs/network.h>
|
||||||
|
#include <machine/hypervisor-ifs/vbd.h>
|
||||||
|
|
||||||
|
#undef u16
|
||||||
|
#undef u32
|
||||||
|
#undef u64
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/* $NetBSD: xen.h,v 1.7 2004/05/07 15:51:04 cl Exp $ */
|
/* $NetBSD: xen.h,v 1.8 2004/06/14 13:55:52 cl Exp $ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
|
@ -54,10 +54,6 @@ void xenmachmem_init(void);
|
||||||
void xenprivcmd_init(void);
|
void xenprivcmd_init(void);
|
||||||
void xenvfr_init(void);
|
void xenvfr_init(void);
|
||||||
|
|
||||||
typedef uint16_t u16;
|
|
||||||
typedef uint32_t u32;
|
|
||||||
typedef uint64_t u64;
|
|
||||||
|
|
||||||
#ifdef XENDEBUG
|
#ifdef XENDEBUG
|
||||||
void printk(const char *, ...);
|
void printk(const char *, ...);
|
||||||
void vprintk(const char *, va_list);
|
void vprintk(const char *, va_list);
|
||||||
|
@ -110,8 +106,6 @@ void vprintk(const char *, va_list);
|
||||||
/* Everything below this point is not included by assembler (.S) files. */
|
/* Everything below this point is not included by assembler (.S) files. */
|
||||||
#ifndef _LOCORE
|
#ifndef _LOCORE
|
||||||
|
|
||||||
#include <machine/hypervisor-ifs/hypervisor-if.h>
|
|
||||||
|
|
||||||
/* some function prototypes */
|
/* some function prototypes */
|
||||||
void trap_init(void);
|
void trap_init(void);
|
||||||
|
|
||||||
|
@ -122,185 +116,121 @@ void trap_init(void);
|
||||||
* the enable bit is set, there may be pending events to be handled.
|
* the enable bit is set, there may be pending events to be handled.
|
||||||
* We may therefore call into do_hypervisor_callback() directly.
|
* We may therefore call into do_hypervisor_callback() directly.
|
||||||
*/
|
*/
|
||||||
#define unlikely(x) __builtin_expect((x),0)
|
|
||||||
#define __save_flags(x) \
|
#define __save_flags(x) \
|
||||||
do { \
|
do { \
|
||||||
(x) = test_bit(EVENTS_MASTER_ENABLE_BIT, \
|
(x) = x86_atomic_test_bit(&HYPERVISOR_shared_info->events_mask, \
|
||||||
&HYPERVISOR_shared_info->events_mask); \
|
EVENTS_MASTER_ENABLE_BIT); \
|
||||||
barrier(); \
|
__insn_barrier(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define __restore_flags(x) \
|
#define __restore_flags(x) \
|
||||||
do { \
|
do { \
|
||||||
shared_info_t *_shared = HYPERVISOR_shared_info; \
|
shared_info_t *_shared = HYPERVISOR_shared_info; \
|
||||||
if (x) set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask); \
|
if (x) x86_atomic_set_bit(&_shared->events_mask, \
|
||||||
barrier(); \
|
EVENTS_MASTER_ENABLE_BIT); \
|
||||||
|
__insn_barrier(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
/* if ( unlikely(_shared->events) && (x) ) do_hypervisor_callback(NULL); \ */
|
/* if (__predict_false(_shared->events) && (x)) do_hypervisor_callback(NULL); \ */
|
||||||
|
|
||||||
#define __cli() \
|
#define __cli() \
|
||||||
do { \
|
do { \
|
||||||
clear_bit(EVENTS_MASTER_ENABLE_BIT, &HYPERVISOR_shared_info->events_mask);\
|
x86_atomic_clear_bit(&HYPERVISOR_shared_info->events_mask, \
|
||||||
barrier(); \
|
EVENTS_MASTER_ENABLE_BIT); \
|
||||||
|
__insn_barrier(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define __sti() \
|
#define __sti() \
|
||||||
do { \
|
do { \
|
||||||
shared_info_t *_shared = HYPERVISOR_shared_info; \
|
shared_info_t *_shared = HYPERVISOR_shared_info; \
|
||||||
set_bit(EVENTS_MASTER_ENABLE_BIT, &_shared->events_mask); \
|
x86_atomic_set_bit(&_shared->events_mask, \
|
||||||
barrier(); \
|
EVENTS_MASTER_ENABLE_BIT); \
|
||||||
|
__insn_barrier(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
/* if ( unlikely(_shared->events) ) do_hypervisor_callback(NULL); \ */
|
/* if (__predict_false(_shared->events)) do_hypervisor_callback(NULL); \ */
|
||||||
#define cli() __cli()
|
|
||||||
#define sti() __sti()
|
|
||||||
#define save_flags(x) __save_flags(x)
|
|
||||||
#define restore_flags(x) __restore_flags(x)
|
|
||||||
#define save_and_cli(x) __save_and_cli(x)
|
|
||||||
#define save_and_sti(x) __save_and_sti(x)
|
|
||||||
|
|
||||||
|
#define cli() __cli()
|
||||||
|
#define sti() __sti()
|
||||||
|
#define save_flags(x) __save_flags(x)
|
||||||
|
#define restore_flags(x) __restore_flags(x)
|
||||||
|
#define save_and_cli(x) __save_and_cli(x)
|
||||||
|
#define save_and_sti(x) __save_and_sti(x)
|
||||||
|
|
||||||
|
#ifdef MULTIPROCESSOR
|
||||||
/* This is a barrier for the compiler only, NOT the processor! */
|
#define __LOCK_PREFIX "lock; "
|
||||||
#define barrier() __asm__ __volatile__("": : :"memory")
|
#else
|
||||||
|
|
||||||
#define __LOCK_PREFIX ""
|
#define __LOCK_PREFIX ""
|
||||||
#define __LOCK ""
|
#endif
|
||||||
#define __ADDR (*(volatile long *) addr)
|
|
||||||
/*
|
|
||||||
* Make sure gcc doesn't try to be clever and move things around
|
|
||||||
* on us. We need to use _exactly_ the address the user gave us,
|
|
||||||
* not some alias that contains the same information.
|
|
||||||
*/
|
|
||||||
typedef struct { volatile int counter; } atomic_t;
|
|
||||||
|
|
||||||
|
static __inline__ unsigned long
|
||||||
#define xchg(ptr,v) \
|
x86_atomic_xchg(unsigned long *ptr, unsigned long val)
|
||||||
((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
|
|
||||||
struct __xchg_dummy { unsigned long a[100]; };
|
|
||||||
#define __xg(x) ((struct __xchg_dummy *)(x))
|
|
||||||
static inline unsigned long __xchg(unsigned long x, volatile void * ptr,
|
|
||||||
int size)
|
|
||||||
{
|
{
|
||||||
switch (size) {
|
unsigned long result;
|
||||||
case 1:
|
|
||||||
__asm__ __volatile__("xchgb %b0,%1"
|
__asm __volatile("xchgl %0,%1"
|
||||||
:"=q" (x)
|
:"=r" (result)
|
||||||
:"m" (*__xg(ptr)), "0" (x)
|
:"m" (*ptr), "0" (val)
|
||||||
:"memory");
|
:"memory");
|
||||||
break;
|
|
||||||
case 2:
|
return result;
|
||||||
__asm__ __volatile__("xchgw %w0,%1"
|
|
||||||
:"=r" (x)
|
|
||||||
:"m" (*__xg(ptr)), "0" (x)
|
|
||||||
:"memory");
|
|
||||||
break;
|
|
||||||
case 4:
|
|
||||||
__asm__ __volatile__("xchgl %0,%1"
|
|
||||||
:"=r" (x)
|
|
||||||
:"m" (*__xg(ptr)), "0" (x)
|
|
||||||
:"memory");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return x;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
static __inline__ int
|
||||||
* test_and_clear_bit - Clear a bit and return its old value
|
x86_atomic_test_and_clear_bit(volatile void *ptr, int bitno)
|
||||||
* @nr: Bit to set
|
|
||||||
* @addr: Address to count from
|
|
||||||
*
|
|
||||||
* This operation is atomic and cannot be reordered.
|
|
||||||
* It also implies a memory barrier.
|
|
||||||
*/
|
|
||||||
static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
|
|
||||||
{
|
{
|
||||||
int oldbit;
|
int result;
|
||||||
|
|
||||||
__asm__ __volatile__( __LOCK_PREFIX
|
__asm __volatile(__LOCK_PREFIX
|
||||||
"btrl %2,%1\n\tsbbl %0,%0"
|
"btrl %2,%1 ;"
|
||||||
:"=r" (oldbit),"=m" (__ADDR)
|
"sbbl %0,%0"
|
||||||
:"Ir" (nr) : "memory");
|
:"=r" (result), "=m" (*(volatile uint32_t *)(ptr))
|
||||||
return oldbit;
|
:"Ir" (bitno) : "memory");
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __inline__ int constant_test_bit(int nr, const volatile void * addr)
|
static __inline int
|
||||||
|
x86_constant_test_bit(const volatile void *ptr, int bitno)
|
||||||
{
|
{
|
||||||
return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
|
return ((1UL << (bitno & 31)) &
|
||||||
|
(((const volatile uint32_t *) ptr)[bitno >> 5])) != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __inline__ int variable_test_bit(int nr, volatile void * addr)
|
static __inline int
|
||||||
|
x86_variable_test_bit(const volatile void *ptr, int bitno)
|
||||||
{
|
{
|
||||||
int oldbit;
|
int result;
|
||||||
|
|
||||||
__asm__ __volatile__(
|
__asm __volatile(
|
||||||
"btl %2,%1\n\tsbbl %0,%0"
|
"btl %2,%1 ;"
|
||||||
:"=r" (oldbit)
|
"sbbl %0,%0"
|
||||||
:"m" (__ADDR),"Ir" (nr));
|
:"=r" (result)
|
||||||
return oldbit;
|
:"m" (*(volatile uint32_t *)(ptr)), "Ir" (bitno));
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define test_bit(nr,addr) \
|
#define x86_atomic_test_bit(ptr, bitno) \
|
||||||
(__builtin_constant_p(nr) ? \
|
(__builtin_constant_p(bitno) ? \
|
||||||
constant_test_bit((nr),(addr)) : \
|
x86_constant_test_bit((ptr),(bitno)) : \
|
||||||
variable_test_bit((nr),(addr)))
|
variable_test_bit((ptr),(bitno)))
|
||||||
|
|
||||||
|
static __inline void
|
||||||
/**
|
x86_atomic_set_bit(volatile void *ptr, int bitno)
|
||||||
* set_bit - Atomically set a bit in memory
|
|
||||||
* @nr: the bit to set
|
|
||||||
* @addr: the address to start counting from
|
|
||||||
*
|
|
||||||
* This function is atomic and may not be reordered. See __set_bit()
|
|
||||||
* if you do not require the atomic guarantees.
|
|
||||||
* Note that @nr may be almost arbitrarily large; this function is not
|
|
||||||
* restricted to acting on a single-word quantity.
|
|
||||||
*/
|
|
||||||
static __inline__ void set_bit(int nr, volatile void * addr)
|
|
||||||
{
|
{
|
||||||
__asm__ __volatile__( __LOCK_PREFIX
|
__asm __volatile(__LOCK_PREFIX
|
||||||
"btsl %1,%0"
|
"btsl %1,%0"
|
||||||
:"=m" (__ADDR)
|
:"=m" (*(volatile uint32_t *)(ptr))
|
||||||
:"Ir" (nr));
|
:"Ir" (bitno));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
static __inline void
|
||||||
* clear_bit - Clears a bit in memory
|
x86_atomic_clear_bit(volatile void *ptr, int bitno)
|
||||||
* @nr: Bit to clear
|
|
||||||
* @addr: Address to start counting from
|
|
||||||
*
|
|
||||||
* clear_bit() is atomic and may not be reordered. However, it does
|
|
||||||
* not contain a memory barrier, so if it is used for locking purposes,
|
|
||||||
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
|
|
||||||
* in order to ensure changes are visible on other processors.
|
|
||||||
*/
|
|
||||||
static __inline__ void clear_bit(int nr, volatile void * addr)
|
|
||||||
{
|
{
|
||||||
__asm__ __volatile__( __LOCK_PREFIX
|
__asm __volatile(__LOCK_PREFIX
|
||||||
"btrl %1,%0"
|
"btrl %1,%0"
|
||||||
:"=m" (__ADDR)
|
:"=m" (*(volatile uint32_t *)(ptr))
|
||||||
:"Ir" (nr));
|
:"Ir" (bitno));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* atomic_inc - increment atomic variable
|
|
||||||
* @v: pointer of type atomic_t
|
|
||||||
*
|
|
||||||
* Atomically increments @v by 1. Note that the guaranteed
|
|
||||||
* useful range of an atomic_t is only 24 bits.
|
|
||||||
*/
|
|
||||||
static __inline__ void atomic_inc(atomic_t *v)
|
|
||||||
{
|
|
||||||
__asm__ __volatile__(
|
|
||||||
__LOCK "incl %0"
|
|
||||||
:"=m" (v->counter)
|
|
||||||
:"m" (v->counter));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#define rdtscll(val) \
|
|
||||||
__asm__ __volatile__("rdtsc" : "=A" (val))
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
#endif /* _OS_H_ */
|
#endif /* _OS_H_ */
|
||||||
|
|
Loading…
Reference in New Issue