NetBSD/sys/arch/hppa/include/lock.h

163 lines
4.0 KiB
C

/* $NetBSD: lock.h,v 1.16 2008/04/28 20:23:23 martin Exp $ */
/*-
* Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
* NASA Ames Research Center, and Matthew Fredette.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Machine-dependent spin lock operations.
*/
#ifndef _HPPA_LOCK_H_
#define _HPPA_LOCK_H_
#include <sys/stdint.h>
#define HPPA_LDCW_ALIGN 16
#define __SIMPLELOCK_ALIGN(p) \
(volatile unsigned long *)(((uintptr_t)(p) + HPPA_LDCW_ALIGN - 1) & \
~(HPPA_LDCW_ALIGN - 1))
#define __SIMPLELOCK_RAW_LOCKED 0
#define __SIMPLELOCK_RAW_UNLOCKED 1
static __inline int
__SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
{
return *__SIMPLELOCK_ALIGN(__ptr) == __SIMPLELOCK_RAW_LOCKED;
}
static __inline int
__SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
{
return *__SIMPLELOCK_ALIGN(__ptr) == __SIMPLELOCK_RAW_UNLOCKED;
}
static __inline int
__ldcw(volatile unsigned long *__ptr)
{
int __val;
__asm volatile("ldcw 0(%1), %0"
: "=r" (__val) : "r" (__ptr)
: "memory");
return __val;
}
static __inline void
__sync(void)
{
__asm volatile("sync\n"
: /* no outputs */
: /* no inputs */
: "memory");
}
static __inline void
__cpu_simple_lock_init(__cpu_simple_lock_t *alp)
{
alp->csl_lock[0] = alp->csl_lock[1] =
alp->csl_lock[2] = alp->csl_lock[3] =
__SIMPLELOCK_RAW_UNLOCKED;
__sync();
}
static __inline void
__cpu_simple_lock(__cpu_simple_lock_t *alp)
{
volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
/*
* Note, if we detect that the lock is held when
* we do the initial load-clear-word, we spin using
* a non-locked load to save the coherency logic
* some work.
*/
while (__ldcw(__aptr) == __SIMPLELOCK_RAW_LOCKED)
while (*__aptr == __SIMPLELOCK_RAW_LOCKED)
;
}
static __inline int
__cpu_simple_lock_try(__cpu_simple_lock_t *alp)
{
volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
return (__ldcw(__aptr) != __SIMPLELOCK_RAW_LOCKED);
}
static __inline void
__cpu_simple_unlock(__cpu_simple_lock_t *alp)
{
volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
__sync();
*__aptr = __SIMPLELOCK_RAW_UNLOCKED;
}
static __inline void
__cpu_simple_lock_set(__cpu_simple_lock_t *alp)
{
volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
*__aptr = __SIMPLELOCK_RAW_LOCKED;
}
static __inline void
__cpu_simple_lock_clear(__cpu_simple_lock_t *alp)
{
volatile unsigned long *__aptr = __SIMPLELOCK_ALIGN(alp);
*__aptr = __SIMPLELOCK_RAW_UNLOCKED;
}
static __inline void
mb_read(void)
{
__sync();
}
static __inline void
mb_write(void)
{
__sync();
}
static __inline void
mb_memory(void)
{
__sync();
}
#endif /* _HPPA_LOCK_H_ */