Resurrect the function pointers for lock operations and allow each

architecture to provide asm versions of the RAS operations.

We do this because relying on the compiler to get the RAS right is not
sensible. (It gets alpha wrong and hppa is suboptimal)

Provide asm RAS ops for hppa.

(A slightly different version) reviewed by Andrew Doran.
This commit is contained in:
skrll 2007-09-24 12:19:39 +00:00
parent cb0e178012
commit d32ed98975
6 changed files with 115 additions and 61 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: _context_u.S,v 1.1 2004/07/19 03:39:02 chs Exp $ */
/* $NetBSD: _context_u.S,v 1.2 2007/09/24 12:19:39 skrll Exp $ */
/*
* Copyright (c) 2001 The NetBSD Foundation, Inc.
@ -140,3 +140,38 @@ ENTRY(_swapcontext_u, 0)
GETC(%arg0)
SETC(%arg1)
EXIT(_swapcontext_u)
LEAF_ENTRY(pthread__ras_simple_lock_init)
ldi 1,%ret0 /* 1 == unlocked */
stw %ret0,0(%arg0)
stw %ret0,4(%arg0)
stw %ret0,8(%arg0)
stw %ret0,12(%arg0)
bv,n %r0(%rp)
EXIT(pthread__ras_simple_lock_init)
.global pthread__lock_ras_start
.global pthread__lock_ras_end
LEAF_ENTRY(pthread__ras_simple_lock_try)
ldo 15(%arg0),%arg0
depi 0,31,4,%arg0
pthread__lock_ras_start:
ldw 0(%arg0),%ret0
stw %r0,0(%arg0) /* 0 == locked */
pthread__lock_ras_end:
comiclr,= 0,%ret0,%ret0 /* if locked return 0 */
ldi 1,%ret0 /* else return 1 */
bv,n %r0(%rp)
EXIT(pthread__ras_simple_lock_try)
LEAF_ENTRY(pthread__ras_simple_unlock)
ldo 15(%arg0),%arg0
depi 0,31,4,%arg0
ldi 1,%ret0 /* 1 == unlocked */
bv %r0(%rp)
stw %ret0,0(%arg0)
EXIT(pthread__ras_simple_unlock)

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_md.h,v 1.3 2005/12/24 23:11:13 perry Exp $ */
/* $NetBSD: pthread_md.h,v 1.4 2007/09/24 12:19:39 skrll Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
@ -41,6 +41,8 @@
#include <machine/frame.h>
#define PTHREAD__ASM_RASOPS
static inline long
pthread__sp(void)
{

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_md.h,v 1.9 2007/09/08 22:49:51 ad Exp $ */
/* $NetBSD: pthread_md.h,v 1.10 2007/09/24 12:19:40 skrll Exp $ */
/*-
* Copyright (c) 2001, 2007 The NetBSD Foundation, Inc.
@ -182,7 +182,6 @@ pthread__sp(void)
} while (/*CONSTCOND*/0)
#define pthread__smt_pause() __asm __volatile("rep; nop" ::: "memory")
#define PTHREAD__CHEAP_UNLOCK
#define PTHREAD__HAVE_ATOMIC
#endif /* _LIB_PTHREAD_I386_MD_H */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_md.h,v 1.5 2007/09/07 00:24:57 ad Exp $ */
/* $NetBSD: pthread_md.h,v 1.6 2007/09/24 12:19:40 skrll Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
@ -104,7 +104,6 @@ pthread__sp(void)
} while (/*CONSTCOND*/0)
#define pthread__smt_pause() __asm __volatile("rep; nop" ::: "memory")
#define PTHREAD__CHEAP_UNLOCK
#define PTHREAD__HAVE_ATOMIC
#endif /* _LIB_PTHREAD_X86_64_MD_H */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_int.h,v 1.55 2007/09/13 23:51:47 ad Exp $ */
/* $NetBSD: pthread_int.h,v 1.56 2007/09/24 12:19:39 skrll Exp $ */
/*-
* Copyright (c) 2001, 2002, 2003, 2006, 2007 The NetBSD Foundation, Inc.
@ -128,6 +128,12 @@ struct __pthread_st {
ucontext_t pt_uc;
};
struct pthread_lock_ops {
void (*plo_init)(__cpu_simple_lock_t *);
int (*plo_try)(__cpu_simple_lock_t *);
void (*plo_unlock)(__cpu_simple_lock_t *);
};
/* Thread states */
#define PT_STATE_RUNNING 1
#define PT_STATE_ZOMBIE 5
@ -187,9 +193,9 @@ void pthread_spinunlock(pthread_spin_t *);
extern const struct pthread_lock_ops *pthread__lock_ops;
int pthread__simple_locked_p(__cpu_simple_lock_t *);
void pthread__simple_lock_init(__cpu_simple_lock_t *);
int pthread__simple_lock_try(__cpu_simple_lock_t *);
void pthread__simple_unlock(__cpu_simple_lock_t *);
#define pthread__simple_lock_init(alp) (*pthread__lock_ops->plo_init)(alp)
#define pthread__simple_lock_try(alp) (*pthread__lock_ops->plo_try)(alp)
#define pthread__simple_unlock(alp) (*pthread__lock_ops->plo_unlock)(alp)
#ifndef _getcontext_u
int _getcontext_u(ucontext_t *);

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_lock.c,v 1.28 2007/09/17 13:25:59 skrll Exp $ */
/* $NetBSD: pthread_lock.c,v 1.29 2007/09/24 12:19:39 skrll Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
@ -41,7 +41,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: pthread_lock.c,v 1.28 2007/09/17 13:25:59 skrll Exp $");
__RCSID("$NetBSD: pthread_lock.c,v 1.29 2007/09/24 12:19:39 skrll Exp $");
#include <sys/types.h>
#include <sys/lock.h>
@ -66,10 +66,7 @@ __RCSID("$NetBSD: pthread_lock.c,v 1.28 2007/09/17 13:25:59 skrll Exp $");
static void pthread_spinlock_slow(pthread_spin_t *);
static int pthread__atomic;
RAS_DECL(pthread__lock);
RAS_DECL(pthread__lock2);
int
pthread__simple_locked_p(__cpu_simple_lock_t *alp)
@ -77,26 +74,26 @@ pthread__simple_locked_p(__cpu_simple_lock_t *alp)
return __SIMPLELOCK_LOCKED_P(alp);
}
void
pthread__simple_lock_init(__cpu_simple_lock_t *alp)
{
#ifdef PTHREAD__ASM_RASOPS
if (pthread__atomic) {
__cpu_simple_lock_init(alp);
return;
}
void pthread__ras_simple_lock_init(__cpu_simple_lock_t *);
int pthread__ras_simple_lock_try(__cpu_simple_lock_t *);
void pthread__ras_simple_unlock(__cpu_simple_lock_t *);
#else
static void
pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp)
{
__cpu_simple_lock_clear(alp);
}
int
pthread__simple_lock_try(__cpu_simple_lock_t *alp)
static int
pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp)
{
int locked;
if (pthread__atomic)
return __cpu_simple_lock_try(alp);
RAS_START(pthread__lock);
locked = __SIMPLELOCK_LOCKED_P(alp);
__cpu_simple_lock_set(alp);
@ -105,22 +102,56 @@ pthread__simple_lock_try(__cpu_simple_lock_t *alp)
return !locked;
}
inline void
pthread__simple_unlock(__cpu_simple_lock_t *alp)
static void
pthread__ras_simple_unlock(__cpu_simple_lock_t *alp)
{
#ifdef PTHREAD__CHEAP_UNLOCK
__cpu_simple_unlock(alp);
#else
if (pthread__atomic) {
__cpu_simple_unlock(alp);
return;
}
__cpu_simple_lock_clear(alp);
#endif
}
#endif /* PTHREAD__ASM_RASOPS */
static const struct pthread_lock_ops pthread__lock_ops_ras = {
pthread__ras_simple_lock_init,
pthread__ras_simple_lock_try,
pthread__ras_simple_unlock,
};
static void
pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp)
{
__cpu_simple_lock_init(alp);
}
static int
pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp)
{
return (__cpu_simple_lock_try(alp));
}
static void
pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp)
{
__cpu_simple_unlock(alp);
}
static const struct pthread_lock_ops pthread__lock_ops_atomic = {
pthread__atomic_simple_lock_init,
pthread__atomic_simple_lock_try,
pthread__atomic_simple_unlock,
};
/*
* We default to pointing to the RAS primitives; we might need to use
* locks early, but before main() starts. This is safe, since no other
* threads will be active for the process, so atomicity will not be
* required.
*/
const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras;
void
pthread_spinlock(pthread_spin_t *lock)
{
@ -134,20 +165,8 @@ pthread_spinlock(pthread_spin_t *lock)
PTHREADD_ADD(PTHREADD_SPINLOCKS);
#endif
if (pthread__atomic) {
if (__predict_true(__cpu_simple_lock_try(lock)))
return;
} else {
int locked;
RAS_START(pthread__lock2);
locked = __SIMPLELOCK_LOCKED_P(lock);
__cpu_simple_lock_set(lock);
RAS_END(pthread__lock2);
if (__predict_true(!locked))
return;
}
if (__predict_true(pthread__simple_lock_try(lock)))
return;
pthread_spinlock_slow(lock);
}
@ -247,20 +266,14 @@ pthread__lockprim_init(void)
pthread__nspins = 1;
if (pthread__concurrency != 1) {
pthread__atomic = 1;
pthread__lock_ops = &pthread__lock_ops_atomic;
return;
}
if (rasctl(RAS_ADDR(pthread__lock), RAS_SIZE(pthread__lock),
RAS_INSTALL) != 0) {
pthread__atomic = 1;
return;
}
if (rasctl(RAS_ADDR(pthread__lock2), RAS_SIZE(pthread__lock2),
RAS_INSTALL) != 0) {
pthread__atomic = 1;
return;
pthread__lock_ops = &pthread__lock_ops_atomic;
return;
}
}