Make sparc use atomic_cas_ulong() for mutex and rwlock operations, and

disable the custom mutex/rwlock code. PR port-sparc/37930. ok martin@
This commit is contained in:
ad 2008-02-14 14:07:35 +00:00
parent 8ae4ab89dd
commit 71eb65050c
3 changed files with 27 additions and 168 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: files.sparc,v 1.144 2008/01/25 21:12:13 joerg Exp $
# $NetBSD: files.sparc,v 1.145 2008/02/14 14:07:35 ad Exp $
# @(#)files.sparc 8.1 (Berkeley) 7/19/93
# sparc-specific configuration info
@ -298,7 +298,6 @@ file arch/sparc/sparc/kgdb_machdep.c kgdb
file arch/sparc/sparc/kobj_machdep.c modular
# sparc/sparc/locore.s is handled specially in the makefile,
# because it must come first in the "ld" command line.
file arch/sparc/sparc/lock_stubs.s
file arch/sparc/sparc/machdep.c
file arch/sparc/sparc/process_machdep.c
file arch/sparc/sparc/mem.c

View File

@ -1,7 +1,7 @@
/* $NetBSD: mutex.h,v 1.7 2008/01/02 11:48:28 ad Exp $ */
/* $NetBSD: mutex.h,v 1.8 2008/02/14 14:07:35 ad Exp $ */
/*-
* Copyright (c) 2002, 2007 The NetBSD Foundation, Inc.
* Copyright (c) 2002, 2006 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -39,178 +39,36 @@
#ifndef _SPARC_MUTEX_H_
#define _SPARC_MUTEX_H_
/*
* There sparc mutex implementation is troublesome, because sparc (v7 and
* v8) lacks a compare-and-set operation, yet there are many SMP sparc
* machines in circulation. SMP for spin mutexes is easy - we don't need
* to know who owns the lock. For adaptive mutexes, we need an aditional
* interlock.
*
* The locked byte set by the sparc 'ldstub' instruction is 0xff. sparc
* kernels are always loaded above 0xe0000000, and the low 5 bits of any
* "struct lwp *" are always zero. So, to record the lock owner, we only
* need 23 bits of space. mtxa_owner contains the mutex owner's address
* shifted right by 5: the top three bits of which will always be 0xe,
* overlapping with the interlock at the top byte, which is always 0xff
* when the mutex is held.
*
* For a mutex acquisition, the owner field is set in two steps: first,
* acquire the interlock (top byte), and second OR in the owner's address.
* Once the owner field is non zero, it will appear that the mutex is held,
* by which LWP it does not matter: other LWPs competing for the lock will
* fall through to mutex_vector_enter(), and either spin or sleep.
*
* As a result there is no space for a waiters bit in the owner field. No
* problem, because it would be hard to synchronise using one without a CAS
* operation. Note that in order to do unlocked release of adaptive
* mutexes, we need the effect of MUTEX_SET_WAITERS() to be immediatley
* visible on the bus. So, adaptive mutexes share the spin lock byte with
* spin mutexes (set with ldstub), but it is not treated as a lock in its
* own right, rather as a flag that can be atomically set or cleared.
*
* When releasing an adaptive mutex, we first clear the owners field, and
* then check to see if the waiters byte is set. This ensures that there
* will always be someone to wake any sleeping waiters up (even it the mutex
* is acquired immediately after we release it, or if we are preempted
* immediatley after clearing the owners field). The setting or clearing of
* the waiters byte is serialized by the turnstile chain lock associated
* with the mutex.
*
* See comments in kern_mutex.c about releasing adaptive mutexes without
* an interlocking step.
*/
#ifndef __MUTEX_PRIVATE
struct kmutex {
uintptr_t mtx_pad1;
uint32_t mtx_pad2;
};
#else /* __MUTEX_PRIVATE */
#include <machine/lock.h>
struct kmutex {
union {
/* Adaptive mutex */
volatile uintptr_t mtxu_owner; /* 0-3 */
__cpu_simple_lock_t mtxu_interlock; /* 0 */
/* Spin mutex. */
volatile uintptr_t mtxa_owner;
#ifdef __MUTEX_PRIVATE
struct {
uint8_t mtxs_dummy; /* 0 */
uint8_t mtxs_unused1; /* 1 */
ipl_cookie_t mtxs_ipl; /* 2 */
uint8_t mtxs_unused2; /* 3 */
volatile uint8_t mtxs_dummy;
ipl_cookie_t mtxs_ipl;
__cpu_simple_lock_t mtxs_lock;
volatile uint8_t mtxs_unused;
} s;
#endif
} u;
__cpu_simple_lock_t mtx_lock; /* 4 */
uint8_t mtx_dodebug; /* 5 */
uint8_t mtx_isspin; /* 6 */
uint8_t mtx_pad[1]; /* 7 */
};
#define __HAVE_MUTEX_STUBS 1
#if 0 /* does not work for MP yet */
#define __HAVE_SPIN_MUTEX_STUBS 1
#endif
#ifdef __MUTEX_PRIVATE
#define mtx_owner u.mtxu_owner
#define mtx_interlock u.mtxu_interlock
#define mtx_dummy u.s.mtxs_dummy
#define mtx_ipl u.s.mtxs_ipl
#define mtx_owner u.mtxa_owner
#define mtx_ipl u.s.mtxs_ipl
#define mtx_lock u.s.mtxs_lock
static inline uintptr_t
MUTEX_OWNER(uintptr_t owner)
{
return owner << 5;
}
#define __HAVE_SIMPLE_MUTEXES 1
static inline int
MUTEX_OWNED(uintptr_t owner)
{
return owner != 0;
}
#define MUTEX_RECEIVE(mtx) /* nothing */
#define MUTEX_GIVE(mtx) /* nothing */
static inline int
MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
{
(void)__cpu_simple_lock_try(&mtx->mtx_lock);
return mtx->mtx_owner != 0;
}
#define MUTEX_CAS(p, o, n) \
(_atomic_cas_ulong((volatile unsigned long *)(p), (o), (n)) == (o))
static inline int
MUTEX_HAS_WAITERS(volatile kmutex_t *mtx)
{
if (mtx->mtx_owner == 0)
return 0;
return mtx->mtx_lock == __SIMPLELOCK_LOCKED;
}
static inline void
MUTEX_INITIALIZE_SPIN(kmutex_t *mtx, bool dodebug, int ipl)
{
mtx->mtx_dodebug = dodebug;
mtx->mtx_isspin = 1;
mtx->mtx_ipl = makeiplcookie(ipl);
mtx->mtx_interlock = __SIMPLELOCK_LOCKED;
__cpu_simple_lock_init(&mtx->mtx_lock);
}
static inline void
MUTEX_INITIALIZE_ADAPTIVE(kmutex_t *mtx, bool dodebug)
{
mtx->mtx_dodebug = dodebug;
mtx->mtx_isspin = 0;
__cpu_simple_lock_init(&mtx->mtx_lock);
}
static inline void
MUTEX_DESTROY(kmutex_t *mtx)
{
mtx->mtx_owner = (uintptr_t)-1L;
}
static inline bool
MUTEX_DEBUG_P(kmutex_t *mtx)
{
return mtx->mtx_dodebug != 0;
}
static inline int
MUTEX_SPIN_P(volatile kmutex_t *mtx)
{
return mtx->mtx_isspin != 0;
}
static inline int
MUTEX_ADAPTIVE_P(volatile kmutex_t *mtx)
{
return mtx->mtx_isspin == 0;
}
static inline int
MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
{
if (!__cpu_simple_lock_try(&mtx->mtx_interlock))
return 0;
mtx->mtx_owner = (curthread >> 5) | 0xf8000000;
return 1;
}
static inline void
MUTEX_RELEASE(kmutex_t *mtx)
{
mtx->mtx_owner = 0;
__cpu_simple_unlock(&mtx->mtx_lock);
}
static inline void
MUTEX_CLEAR_WAITERS(kmutex_t *mtx)
{
__cpu_simple_unlock(&mtx->mtx_lock);
}
unsigned long _atomic_cas_ulong(volatile unsigned long *,
unsigned long, unsigned long);
#endif /* __MUTEX_PRIVATE */

View File

@ -1,7 +1,7 @@
/* $NetBSD: rwlock.h,v 1.3 2007/11/21 10:19:08 yamt Exp $ */
/* $NetBSD: rwlock.h,v 1.4 2008/02/14 14:07:35 ad Exp $ */
/*-
* Copyright (c) 2002, 2007 The NetBSD Foundation, Inc.
* Copyright (c) 2002, 2006 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@ -50,9 +50,11 @@ struct krwlock {
#define RW_RECEIVE(rw) /* nothing */
#define RW_GIVE(rw) /* nothing */
#define RW_CAS(p, o, n) _lock_cas((p), (o), (n))
#define RW_CAS(p, o, n) \
(_atomic_cas_ulong((volatile unsigned long *)(p), (o), (n)) == (o))
int _lock_cas(volatile uintptr_t *, uintptr_t, uintptr_t);
unsigned long _atomic_cas_ulong(volatile unsigned long *,
unsigned long, unsigned long);
#endif /* __RWLOCK_PRIVATE */