Partially adapt the VAX port to the newlock2 changes. These are untested

but they do at least compile.
This commit is contained in:
matt 2007-02-16 01:33:49 +00:00
parent bebefe198b
commit 53af1aa78b
14 changed files with 501 additions and 83 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: files.vax,v 1.104 2006/07/29 19:10:56 ad Exp $
# $NetBSD: files.vax,v 1.105 2007/02/16 01:33:49 matt Exp $
#
# new style config file for vax architecture
#
@ -335,6 +335,7 @@ file arch/vax/vax/ka670.c vax670 | vaxany
file arch/vax/vax/ka680.c vax680 | vaxany
file arch/vax/vax/emulate.S !no_insn_emulate
file arch/vax/vax/unimpl_emul.S !no_insn_emulate
file arch/vax/vax/lock_stubs.S
file arch/vax/vax/scb.c
file arch/vax/vax/conf.c
file arch/vax/vax/trap.c

View File

@ -1,4 +1,4 @@
# $NetBSD: Makefile,v 1.25 2006/07/26 19:54:58 drochner Exp $
# $NetBSD: Makefile,v 1.26 2007/02/16 01:34:02 matt Exp $
INCSDIR= /usr/include/vax
@ -13,7 +13,7 @@ INCS= ansi.h aout_machdep.h asm.h \
ioa.h \
ka410.h ka420.h ka43.h ka630.h ka650.h ka750.h ka820.h \
leds.h lcgreg.h limits.h lock.h \
macros.h math.h mtpr.h mcontext.h \
macros.h math.h mcontext.h mtpr.h mutex.h \
nexus.h param.h pcb.h pmap.h pmc.h \
proc.h profile.h psl.h pte.h ptrace.h \
qdioctl.h qdreg.h qduser.h qevent.h \

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.h,v 1.74 2006/09/05 19:32:57 matt Exp $ */
/* $NetBSD: cpu.h,v 1.75 2007/02/16 01:34:02 matt Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden
@ -132,6 +132,8 @@ struct cpu_info {
*/
struct cpu_data ci_data; /* MI per-cpu data */
struct lwp *ci_curlwp; /* current owner of the processor */
int ci_mtx_oldspl; /* saved spl */
int ci_mtx_count; /* negative count of mutexes */
/*
* Private members.
@ -170,13 +172,14 @@ struct cpu_mp_softc {
#define curcpu() ((struct cpu_info *)mfpr(PR_SSP))
#define curlwp (curcpu()->ci_curlwp)
#define cpu_number() (curcpu()->ci_cpuid)
#define need_resched(ci) \
#define cpu_need_resched(ci) \
do { \
(ci)->ci_want_resched = 1; \
mtpr(AST_OK,PR_ASTLVL); \
} while (/*CONSTCOND*/ 0)
#define cpu_proc_fork(x, y) do { } while (/*CONSCOND*/0)
#define cpu_lwp_free(l, f) do { } while (/*CONSCOND*/0)
#define cpu_lwp_free2(l) do { } while (/*CONSCOND*/0)
#if defined(MULTIPROCESSOR)
#define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CI_MASTERCPU)
@ -194,7 +197,7 @@ extern char vax_mp_tramp;
* process as soon as possible.
*/
#define signotify(p) mtpr(AST_OK,PR_ASTLVL);
#define cpu_signotify(l) mtpr(AST_OK,PR_ASTLVL)
/*
@ -202,7 +205,7 @@ extern char vax_mp_tramp;
* buffer pages are invalid. On the hp300, request an ast to send us
* through trap, marking the proc as needing a profiling tick.
*/
#define need_proftick(p) {(p)->p_flag |= P_OWEUPC; mtpr(AST_OK,PR_ASTLVL); }
#define cpu_need_proftick(l) do { (l)->l_pflag |= LP_OWEUPC; mtpr(AST_OK,PR_ASTLVL); } while (/*CONSTCOND*/ 0)
/*
* This defines the I/O device register space size in pages.

View File

@ -1,4 +1,4 @@
/* $NetBSD: intr.h,v 1.21 2006/12/21 15:55:25 yamt Exp $ */
/* $NetBSD: intr.h,v 1.22 2007/02/16 01:34:02 matt Exp $ */
/*
* Copyright (c) 1998 Matt Thomas.
@ -33,6 +33,7 @@
#define _VAX_INTR_H_
#include <sys/queue.h>
#include <machine/mtpr.h>
/* Define the various Interrupt Priority Levels */
@ -76,24 +77,22 @@
#ifdef _KERNEL
#ifndef __lint__
#define splx(reg) \
({ \
register int __val; \
__asm volatile ("mfpr $0x12,%0;mtpr %1,$0x12" \
: "=&g" (__val) \
: "g" (reg)); \
__val; \
})
#define _splset(reg) \
((void)({ \
__asm volatile ("mtpr %0,$0x12" \
: \
: "g" (reg)); \
}))
typedef int ipl_t;
static inline ipl_t
splx(ipl_t new_ipl)
{
ipl_t old_ipl = mfpr(PR_IPL);
mtpr(new_ipl, PR_IPL);
return old_ipl;
}
static inline void
_splset(ipl_t ipl)
{
mtpr(ipl, PR_IPL);
}
typedef struct {
ipl_t _ipl;
} ipl_cookie_t;
@ -108,23 +107,17 @@ makeiplcookie(ipl_t ipl)
static inline int
splraiseipl(ipl_cookie_t icookie)
{
register int __val;
int newipl = icookie._ipl;
register ipl_t __val;
ipl_t newipl = icookie._ipl;
__asm volatile ("mfpr $0x12,%0" : "=&g" (__val) : );
__asm volatile ("mfpr %1,%0" : "=&g" (__val) : "g" (PR_IPL));
if (newipl > __val) {
_splset(newipl);
}
return __val;
}
#define _setsirr(reg) \
do { \
__asm volatile ("mtpr %0,$0x14" \
: \
: "g" (reg)); \
} while (0)
#endif
#define _setsirr(reg) mtpr((reg), PR_SIRR)
#define spl0() _splset(IPL_NONE) /* IPL00 */
#define spllowersoftclock() _splset(IPL_SOFTCLOCK) /* IPL08 */

View File

@ -1,4 +1,4 @@
/* $NetBSD: lock.h,v 1.21 2005/12/28 19:09:30 perry Exp $ */
/* $NetBSD: lock.h,v 1.22 2007/02/16 01:34:02 matt Exp $ */
/*
* Copyright (c) 2000 Ludd, University of Lule}, Sweden.
@ -41,7 +41,7 @@
#include <machine/cpu.h>
#endif
static __inline void
static inline void
__cpu_simple_lock_init(__cpu_simple_lock_t *alp)
{
#ifdef _KERNEL
@ -57,7 +57,7 @@ __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
#endif
}
static __inline int
static inline int
__cpu_simple_lock_try(__cpu_simple_lock_t *alp)
{
int ret;
@ -94,7 +94,7 @@ do { \
} \
} while (0)
#else
static __inline void
static _inline void
__cpu_simple_lock(__cpu_simple_lock_t *alp)
{
__asm volatile ("1:bbssi $0,%0,1b"
@ -105,7 +105,7 @@ __cpu_simple_lock(__cpu_simple_lock_t *alp)
#endif /* _KERNEL */
#if 0
static __inline void
static inline void
__cpu_simple_lock(__cpu_simple_lock_t *alp)
{
struct cpu_info *ci = curcpu();
@ -134,7 +134,7 @@ __cpu_simple_lock(__cpu_simple_lock_t *alp)
}
#endif
static __inline void
static inline void
__cpu_simple_unlock(__cpu_simple_lock_t *alp)
{
#ifdef _KERNEL
@ -174,4 +174,14 @@ do { \
} \
} while (0)
#endif /* MULTIPROCESSOR */
static inline void
mb_read(void)
{
}
static inline void
mb_write(void)
{
}
#endif /* _VAX_LOCK_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: mtpr.h,v 1.19 2005/12/24 23:24:07 perry Exp $ */
/* $NetBSD: mtpr.h,v 1.20 2007/02/16 01:34:03 matt Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
@ -166,21 +166,25 @@
#ifndef _LOCORE
#define mtpr(val,reg) \
{ \
__asm volatile ("mtpr %0,%1" \
: /* No output */ \
: "g" ((long)(val)), "g" (reg)); \
static inline void
mtpr(register_t val, int reg)
{
__asm volatile (
"mtpr %0,%1"
: /* No output */
: "g" (val), "g" (reg));
}
#define mfpr(reg) \
({ \
register int __val; \
__asm volatile ("mfpr %1,%0" \
: "=g" (__val) \
: "g" (reg)); \
__val; \
})
static inline register_t
mfpr(int reg)
{
register_t __val;
__asm volatile (
"mfpr %1,%0"
: "=g" (__val)
: "g" (reg));
return __val;
}
#endif /* _LOCORE */
#endif /* _VAX_MTPR_H_ */

View File

@ -0,0 +1,208 @@
/* $NetBSD: mutex.h,v 1.1 2007/02/16 01:34:03 matt Exp $ */
/*-
* Copyright (c) 2002, 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe and Andrew Doran.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _VAX_MUTEX_H_
#define _VAX_MUTEX_H_
/*
* The VAX mutex implementation is troublesome, because the VAX architecture
* lacks a compare-and-set operation, yet there are many SMP VAX
* machines in circulation. SMP for spin mutexes is easy - we don't need
* to know who owns the lock. For adaptive mutexes, we need an aditional
* interlock. However, since we know that owners will be kernel addresses
* and all kernel addresses have the high bit set, we can use the high bit
* as an interlock.
*
* So we test the high bit with BBSSI and if clear
* kernels are always loaded above 0xe0000000, and the low 5 bits of any
* "struct lwp *" are always zero. So, to record the lock owner, we only
* need 23 bits of space. mtxa_owner contains the mutex owner's address
* shifted right by 5: the top three bits of which will always be 0xe,
* overlapping with the interlock at the top byte, which is always 0xff
* when the mutex is held.
*
* For a mutex acquisition, the owner field is set in two steps: first,
* acquire the interlock (top bit), and second OR in the owner's address.
* Once the owner field is non zero, it will appear that the mutex is held,
* by which LWP it does not matter: other LWPs competing for the lock will
* fall through to mutex_vector_enter(), and either spin or sleep.
*
* As a result there is no space for a waiters bit in the owner field. No
* problem, because it would be hard to synchronise using one without a CAS
* operation. Note that in order to do unlocked release of adaptive
* mutexes, we need the effect of MUTEX_SET_WAITERS() to be immediatley
* visible on the bus. So, adaptive mutexes share the spin lock byte with
* spin mutexes (set with bb{cc,ss}i), but it is not treated as a lock in its
* own right, rather as a flag that can be atomically set or cleared.
*
* When releasing an adaptive mutex, we first clear the owners field, and
* then check to see if the waiters byte is set. This ensures that there
* will always be someone to wake any sleeping waiters up (even it the mutex
* is acquired immediately after we release it, or if we are preempted
* immediatley after clearing the owners field). The setting or clearing of
* the waiters byte is serialized by the turnstile chain lock associated
* with the mutex.
*
* See comments in kern_mutex.c about releasing adaptive mutexes without
* an interlocking step.
*/
#ifndef __MUTEX_PRIVATE
struct kmutex {
uintptr_t mtx_pad1;
uint32_t mtx_pad2[2];
};
#else /* __MUTEX_PRIVATE */
struct kmutex {
/* Adaptive mutex */
union {
volatile uintptr_t mtxu_owner; /* 0-3 */
__cpu_simple_lock_t mtxu_lock; /* 0 */
} mtx_u;
ipl_cookie_t mtx_ipl; /* 4-7 */
uint32_t mtx_id; /* 8-11 */
};
#define mtx_owner mtx_u.mtxu_owner
#define mtx_lock mtx_u.mtxu_lock
#define __HAVE_MUTEX_STUBS 1
#define __HAVE_SPIN_MUTEX_STUBS 1
static inline uintptr_t
MUTEX_OWNER(uintptr_t owner)
{
return owner & ~1;
}
static inline bool
MUTEX_OWNED(uintptr_t owner)
{
return owner != 0;
}
static inline bool
MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
{
mtx->mtx_owner |= 1;
return mtx->mtx_owner != 0;
}
static inline bool
MUTEX_HAS_WAITERS(volatile kmutex_t *mtx)
{
return (mtx->mtx_owner & 1) != 0;
}
static inline void
MUTEX_CLEAR_WAITERS(volatile kmutex_t *mtx)
{
mtx->mtx_owner &= ~1;
}
static inline void
MUTEX_INITIALIZE_SPIN(kmutex_t *mtx, u_int id, int ipl)
{
mtx->mtx_id = (id << 1) | 1;
mtx->mtx_ipl = makeiplcookie(ipl);
mtx->mtx_lock = 0;
}
static inline void
MUTEX_INITIALIZE_ADAPTIVE(kmutex_t *mtx, u_int id)
{
mtx->mtx_id = id << 1;
mtx->mtx_ipl = makeiplcookie(-1);
mtx->mtx_owner = 0;
}
static inline void
MUTEX_DESTROY(kmutex_t *mtx)
{
mtx->mtx_owner = (uintptr_t)-1L;
mtx->mtx_id = 0xdeadface << 1;
}
static inline u_int
MUTEX_GETID(volatile kmutex_t *mtx)
{
return (mtx)->mtx_id >> 1;
}
static inline bool
MUTEX_SPIN_P(volatile kmutex_t *mtx)
{
return (mtx->mtx_id & 1) != 0;
}
static inline bool
MUTEX_ADAPTIVE_P(volatile kmutex_t *mtx)
{
return (mtx->mtx_id & 1) == 0;
}
static inline bool
MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
{
int rv;
__asm __volatile(
"clrl %1;"
"bbssi $31,%0,1f;"
"incl %1;"
"insv %2,%0,$31,%0;"
"1:"
: "=m"(mtx->mtx_owner), "=r"(rv)
: "g"(curthread));
return 1;
}
static inline void
MUTEX_RELEASE(kmutex_t *mtx)
{
__asm __volatile(
"insv $0,$0,$31,%0;"
"bbcci $31,%0,1f;"
"1:"
: "=m" (mtx->mtx_owner));
}
#endif /* __MUTEX_PRIVATE */
#endif /* _VAX_MUTEX_H_ */

View File

@ -0,0 +1,60 @@
/* $NetBSD: rwlock.h,v 1.1 2007/02/16 01:34:03 matt Exp $ */
/*-
* Copyright (c) 2002, 2006 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe and Andrew Doran.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _VAX_RWLOCK_H_
#define _VAX_RWLOCK_H_
struct krwlock {
volatile uintptr_t rw_owner;
uint32_t rw_id;
};
#ifdef __RWLOCK_PRIVATE
#define __HAVE_SIMPLE_RW_LOCKS 1
#define RW_RECEIVE(rw) /* nothing */
#define RW_GIVE(rw) /* nothing */
#define RW_CAS(p, o, n) _lock_cas((p), (o), (n))
int _lock_cas(volatile uintptr_t *, uintptr_t, uintptr_t);
#endif /* __RWLOCK_PRIVATE */
#endif /* _VAX_RWLOCK_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: types.h,v 1.32 2006/09/05 19:33:55 matt Exp $ */
/* $NetBSD: types.h,v 1.33 2007/02/16 01:34:03 matt Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -54,7 +54,10 @@ typedef unsigned long vsize_t;
typedef int register_t;
typedef volatile int __cpu_simple_lock_t;
/*
* BBCCI/BBSSI can operate on bytes so let's save some space.
*/
typedef volatile char __cpu_simple_lock_t;
#define __SIMPLELOCK_LOCKED 1
#define __SIMPLELOCK_UNLOCKED 0

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.29 2006/03/12 02:04:26 christos Exp $
# $NetBSD: genassym.cf,v 1.30 2007/02/16 01:34:03 matt Exp $
#
# Copyright (c) 1997 Ludd, University of Lule}, Sweden.
# All rights reserved.
@ -30,6 +30,7 @@
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
quote #define __MUTEX_PRIVATE
include <sys/param.h>
include <sys/proc.h>
@ -47,6 +48,7 @@ include <machine/sid.h>
include <machine/signal.h>
include <machine/trap.h>
include <machine/uvax.h>
include <machine/mutex.h>
define L_PRIORITY offsetof(struct lwp, l_priority)
define L_ADDR offsetof(struct lwp, l_addr)
@ -85,6 +87,8 @@ define UVME_SOFTS offsetof(struct uvmexp, softs)
define CI_CURLWP offsetof(struct cpu_info, ci_curlwp)
define CI_WANT_RESCHED offsetof(struct cpu_info, ci_want_resched)
define CI_EXIT offsetof(struct cpu_info, ci_exit)
define CI_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count)
define CI_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl)
# mtpr register numbers
define PR_KSP PR_KSP
@ -150,3 +154,7 @@ define SH_PENDING offsetof(struct softintr_handler, sh_pending)
define VC_DIAGTIMM offsetof(struct vs_cpu, vc_diagtimm)
define PSL_IS PSL_IS
define MTX_OWNER offsetof(struct kmutex, mtx_owner)
define MTX_IPL offsetof(struct kmutex, mtx_ipl)
define MTX_ID offsetof(struct kmutex, mtx_id)

View File

@ -1,4 +1,4 @@
/* $NetBSD: gencons.c,v 1.45 2006/10/01 19:28:43 elad Exp $ */
/* $NetBSD: gencons.c,v 1.46 2007/02/16 01:34:03 matt Exp $ */
/*
* Copyright (c) 1994 Gordon W. Ross
@ -36,7 +36,7 @@
/* All bugs are subject to removal without further notice */
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: gencons.c,v 1.45 2006/10/01 19:28:43 elad Exp $");
__KERNEL_RCSID(0, "$NetBSD: gencons.c,v 1.46 2007/02/16 01:34:03 matt Exp $");
#include "opt_ddb.h"
#include "opt_cputype.h"
@ -226,14 +226,14 @@ gencnrint(void *arg)
if (sc->alive == 0)
return;
i = mfpr(pr_rxdb[sc->unit]) & 0377; /* Mask status flags etc... */
KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
KERNEL_LOCK(1, NULL);
#ifdef DDB
if (tp->t_dev == cn_tab->cn_dev) {
int j = kdbrint(i);
if (j == 1) { /* Escape received, just return */
KERNEL_UNLOCK();
KERNEL_UNLOCK_ONE(NULL);
return;
}
@ -243,7 +243,7 @@ gencnrint(void *arg)
#endif
(*tp->t_linesw->l_rint)(i, tp);
KERNEL_UNLOCK();
KERNEL_UNLOCK_ONE(NULL);
}
static void
@ -254,11 +254,11 @@ gencntint(void *arg)
if (sc->alive == 0)
return;
KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
KERNEL_LOCK(1, NULL);
tp->t_state &= ~TS_BUSY;
gencnstart(tp);
KERNEL_UNLOCK();
KERNEL_UNLOCK_ONE(NULL);
}
int

View File

@ -0,0 +1,123 @@
/* $NetBSD: lock_stubs.S,v 1.1 2007/02/16 01:34:04 matt Exp $ */
/*-
* Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe and Andrew Doran.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "opt_multiprocessor.h"
#include <machine/asm.h>
#include "assym.h"
/*
* void mutex_enter(kmutex_t *);
*
*
*/
NENTRY(mutex_enter, 0)
movl 4(%ap), %r0 /* get mutex */
bbssi $31, (%r0), 1f /* is there an owner? */
mfpr $PR_SSP, %r1 /* Note, get curcpu */
movl CI_CURLWP(%r1),(%r0) /* set owner to curlwp */
ret /* and return */
1:
callg (%ap), _C_LABEL(mutex_vector_enter)
/* there is an owner */
/* so go slow */
ret
/*
* void mutex_exit(kmutex_t *);
*/
NENTRY(mutex_exit, 0)
movl 4(%ap), %r0 /* get mutex */
mfpr $PR_SSP, %r1 /* get curcpu */
cmpl (%r0),CI_CURLWP(%r1) /* is the owner still us and */
/* no waiters? */
bneq 2f /* no, slow path */
insv $0, $0, $31, (%r0) /* clear low 31 bits */
bbcci $31, (%r0), 1f /* clear high bit interlocked */
2: ret
1: callg (%ap), _C_LABEL(mutex_vector_exit)
ret
/*
* void mutex_spin_enter(kmutex_t *);
*/
NENTRY(mutex_spin_enter, 0)
movl 4(%ap), %r0 /* get spin mutex */
#ifdef DIAGNOSTIC
blbc MTX_ID(%r0), 3f
#endif
mfpr $PR_IPL, %r2 /* get current IPL */
cmpl MTX_IPL(%r0), %r2 /* does mutex have > IPL? */
bleq 1f /* no, leave IPL alone */
mtpr MTX_IPL(%r0), $PR_IPL /* yes, raise IPL */
mfpr $PR_SSP, %r1 /* get curcpu */
1: decl CI_MTX_COUNT(%r1) /* decr mutex count */
bneq 2f /* was mutex count < 1 */
movl %r2, CI_MTX_OLDSPL(%r1) /* save was-current IPL */
2:
#if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR)
bbssi $0, (%r0), 3f /* take out mutex */
ret
3: callg (%ap), _C_LABEL(mutex_spin_retry) /* slow path */
#endif
ret
/*
* void mutex_spin_exit(kmutex_t *);
*/
NENTRY(mutex_spin_exit, 0)
movl 4(%ap), %r0 /* get spin mutex */
#ifdef DIAGNOSTIC
blbc MTX_ID(%r0), 2f /* assert this is a spinlock */
#endif
#if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR)
bbcci $0, (%r0), 2f /* clear mutex */
#endif
mfpr $PR_SSP, %r1 /* get curcpu */
movl CI_MTX_OLDSPL(%r1), %r2 /* fetch oldspl */
incl CI_MTX_COUNT(%r1) /* incr mtx count */
bleq 1f /* was it positive? */
mtpr %r2, $PR_IPL /* yes, restore saved ipl */
1: ret
#if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR)
2: callg (%ap), _C_LABEL(mutex_vector_exit) /* slow path */
ret
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.154 2007/02/09 21:55:13 ad Exp $ */
/* $NetBSD: machdep.c,v 1.155 2007/02/16 01:34:04 matt Exp $ */
/*
* Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
@ -83,7 +83,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.154 2007/02/09 21:55:13 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.155 2007/02/16 01:34:04 matt Exp $");
#include "opt_ddb.h"
#include "opt_compat_netbsd.h"
@ -670,13 +670,13 @@ void krnunlock(void);
void
krnlock()
{
KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
KERNEL_LOCK(1, NULL);
}
void
krnunlock()
{
KERNEL_UNLOCK();
KERNEL_UNLOCK_ONE(NULL);
}
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.139 2006/10/02 02:59:38 chs Exp $ */
/* $NetBSD: pmap.c,v 1.140 2007/02/16 01:34:04 matt Exp $ */
/*
* Copyright (c) 1994, 1998, 1999, 2003 Ludd, University of Lule}, Sweden.
* All rights reserved.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.139 2006/10/02 02:59:38 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.140 2007/02/16 01:34:04 matt Exp $");
#include "opt_ddb.h"
#include "opt_cputype.h"
@ -386,10 +386,14 @@ pmap_bootstrap()
simple_lock_init(&pmap->pm_lock);
/* Activate the kernel pmap. */
mtpr(pcb->P1BR = pmap->pm_p1br, PR_P1BR);
mtpr(pcb->P0BR = pmap->pm_p0br, PR_P0BR);
mtpr(pcb->P1LR = pmap->pm_p1lr, PR_P1LR);
mtpr(pcb->P0LR = (pmap->pm_p0lr|AST_PCB), PR_P0LR);
pcb->P1BR = pmap->pm_p1br;
pcb->P0BR = pmap->pm_p0br;
pcb->P1LR = pmap->pm_p1lr;
pcb->P0LR = pmap->pm_p0lr|AST_PCB;
mtpr((uintptr_t)pcb->P1BR, PR_P1BR);
mtpr((uintptr_t)pcb->P0BR, PR_P0BR);
mtpr(pcb->P1LR, PR_P1LR);
mtpr(pcb->P0LR, PR_P0LR);
/* cpu_info struct */
pcb->SSP = scratch + VAX_NBPG;
@ -565,9 +569,9 @@ update_pcbs(struct pmap *pm)
/* If curlwp uses this pmap update the regs too */
if (pm == curproc->p_vmspace->vm_map.pmap) {
mtpr(pm->pm_p0br, PR_P0BR);
mtpr((uintptr_t)pm->pm_p0br, PR_P0BR);
mtpr(pm->pm_p0lr|AST_PCB, PR_P0LR);
mtpr(pm->pm_p1br, PR_P1BR);
mtpr((uintptr_t)pm->pm_p1br, PR_P1BR);
mtpr(pm->pm_p1lr, PR_P1LR);
}
#if defined(MULTIPROCESSOR) && defined(notyet)
@ -668,8 +672,9 @@ rmspace(struct pmap *pm)
#undef swappable
#define swappable(l, pm) \
(((l)->l_flag & (P_SYSTEM | L_INMEM | P_WEXIT)) == L_INMEM && \
((l)->l_holdcnt == 0) && ((l)->l_proc->p_vmspace->vm_map.pmap != pm))
(((l)->l_flag & (L_SYSTEM | L_INMEM | L_WEXIT)) == L_INMEM \
&& (l)->l_holdcnt == 0 \
&& (l)->l_proc->p_vmspace->vm_map.pmap != pm)
static int
pmap_rmproc(struct pmap *pm)
@ -683,7 +688,7 @@ pmap_rmproc(struct pmap *pm)
outl = outl2 = NULL;
outpri = outpri2 = 0;
proclist_lock_read();
rw_enter(&proclist_lock, RW_READER);
LIST_FOREACH(l, &alllwp, l_list) {
if (!swappable(l, pm))
continue;
@ -710,7 +715,7 @@ pmap_rmproc(struct pmap *pm)
continue;
}
}
proclist_unlock_read();
rw_exit(&proclist_lock);
if (didswap == 0) {
if ((l = outl) == NULL)
l = outl2;
@ -1638,9 +1643,9 @@ pmap_activate(struct lwp *l)
ps->ps_pcb = pcb;
if (l == curlwp) {
mtpr(pmap->pm_p0br, PR_P0BR);
mtpr((uintptr_t)pmap->pm_p0br, PR_P0BR);
mtpr(pmap->pm_p0lr|AST_PCB, PR_P0LR);
mtpr(pmap->pm_p1br, PR_P1BR);
mtpr((uintptr_t)pmap->pm_p1br, PR_P1BR);
mtpr(pmap->pm_p1lr, PR_P1LR);
mtpr(0, PR_TBIA);
}