NetBSD/sys/arch/vax/include/lock.h
garbled d974db0ada Merge the ppcoea-renovation branch to HEAD.
This branch was a major cleanup and rototill of many of the various OEA
cpu based PPC ports that focused on sharing as much code as possible
between the various ports to eliminate near-identical copies of files in
every tree.  Additionally there is a new PIC system that unifies the
interface to interrupt code for all different OEA ppc arches.  The work
for this branch was done by a variety of people, too long to list here.

TODO:
bebox still needs work to complete the transition to -renovation.
ofppc still needs a bunch of work, which I will be looking at.
ev64260 still needs to be renovated
amigappc was not attempted.

NOTES:
pmppc was removed as an arch, and moved to a evbppc target.
2007-10-17 19:52:51 +00:00

228 lines
5.7 KiB
C

/* $NetBSD: lock.h,v 1.27 2007/10/17 19:57:48 garbled Exp $ */
/*
* Copyright (c) 2000 Ludd, University of Lule}, Sweden.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed at Ludd, University of Lule}.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _VAX_LOCK_H_
#define _VAX_LOCK_H_
#ifdef _KERNEL
#ifdef _KERNEL_OPT
#include "opt_multiprocessor.h"
#include <machine/intr.h>
#endif
#include <machine/cpu.h>
#endif
static __inline int
__SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
{
return *__ptr == __SIMPLELOCK_LOCKED;
}
static __inline int
__SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
{
return *__ptr == __SIMPLELOCK_UNLOCKED;
}
static __inline void
__cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
{
*__ptr = __SIMPLELOCK_UNLOCKED;
}
static __inline void
__cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
{
*__ptr = __SIMPLELOCK_LOCKED;
}
static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *);
static __inline void
__cpu_simple_lock_init(__cpu_simple_lock_t *__alp)
{
#ifdef _KERNEL
__asm volatile ("movl %0,%%r1;jsb Sunlock"
: /* No output */
: "g"(__alp)
: "r1","cc","memory");
#else
__asm volatile ("bbcci $0,%0,1f;1:"
: /* No output */
: "m"(*__alp)
: "cc");
#endif
}
static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *);
static __inline int
__cpu_simple_lock_try(__cpu_simple_lock_t *__alp)
{
int ret;
#ifdef _KERNEL
__asm volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0"
: "=&r"(ret)
: "g"(__alp)
: "r0","r1","cc","memory");
#else
__asm volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:"
: "=&r"(ret)
: "m"(*__alp)
: "cc");
#endif
return ret;
}
#ifdef _KERNEL
#if defined(MULTIPROCESSOR)
#define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
#define __cpu_simple_lock(__alp) \
do { \
struct cpu_info *__ci = curcpu(); \
\
while (__cpu_simple_lock_try(__alp) == 0) { \
int __s; \
\
if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) { \
__s = splipi(); \
cpu_handle_ipi(); \
splx(__s); \
} \
} \
} while (/*CONSTCOND*/0)
#else /* MULTIPROCESSOR */
#define __cpu_simple_lock(__alp) \
do { \
while (__cpu_simple_lock_try(__alp) == 0) { \
; \
} \
} while (/*CONSTCOND*/0)
#endif
#else
static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
static __inline void
__cpu_simple_lock(__cpu_simple_lock_t *__alp)
{
__asm volatile ("1:bbssi $0,%0,1b"
: /* No outputs */
: "m"(*__alp)
: "cc");
}
#endif /* _KERNEL */
#if 0
static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
static __inline void
__cpu_simple_lock(__cpu_simple_lock_t *__alp)
{
struct cpu_info *ci = curcpu();
while (__cpu_simple_lock_try(__alp) == 0) {
int s;
if (ci->ci_ipimsgs & IPI_SEND_CNCHAR) {
s = splipi();
cpu_handle_ipi();
splx(s);
}
}
#if 0
__asm volatile ("movl %0,%%r1;jsb Slock"
: /* No output */
: "g"(__alp)
: "r0","r1","cc","memory");
#endif
#if 0
__asm volatile ("1:;bbssi $0, %0, 1b"
: /* No output */
: "m"(*__alp));
#endif
}
#endif
static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *);
static __inline void
__cpu_simple_unlock(__cpu_simple_lock_t *__alp)
{
#ifdef _KERNEL
__asm volatile ("movl %0,%%r1;jsb Sunlock"
: /* No output */
: "g"(__alp)
: "r1","cc","memory");
#else
__asm volatile ("bbcci $0,%0,1f;1:"
: /* No output */
: "m"(*__alp)
: "cc");
#endif
}
#if defined(MULTIPROCESSOR)
/*
* On the Vax, interprocessor interrupts can come in at device priority
* level or lower. This can cause some problems while waiting for r/w
* spinlocks from a high'ish priority level: IPIs that come in will not
* be processed. This can lead to deadlock.
*
* This hook allows IPIs to be processed while a spinlock's interlock
* is released.
*/
#define SPINLOCK_SPIN_HOOK \
do { \
struct cpu_info *__ci = curcpu(); \
int __s; \
\
if (__ci->ci_ipimsgs != 0) { \
/* printf("CPU %lu has IPIs pending\n", \
__ci->ci_cpuid); */ \
__s = splipi(); \
cpu_handle_ipi(); \
splx(__s); \
} \
} while (/*CONSTCOND*/0)
#endif /* MULTIPROCESSOR */
static __inline void mb_read(void);
static __inline void
mb_read(void)
{
}
static __inline void mb_write(void);
static __inline void
mb_write(void)
{
}
#endif /* _VAX_LOCK_H_ */