For PR bin/37347:

- Override __libc_thr_init() instead of using our own constructor.
- Add pthread__getenv() and use instead of getenv(). This is used before
  we are up and running and unfortunatley getenv() takes locks.

Other changes:

- Cache the spinlock vectors in pthread__st. Internal spinlock operations
  now take 1 function call instead of 3 (i386).
- Use pthread__self() internally, not pthread_self().
- Use __attribute__ ((visibility("hidden"))) in some places.
- Kill PTHREAD_MAIN_DEBUG.
This commit is contained in:
ad 2007-11-13 15:57:10 +00:00
parent fd4abe684b
commit 15e9cec117
13 changed files with 246 additions and 264 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: _context_u.S,v 1.5 2007/09/08 22:49:51 ad Exp $ */
/* $NetBSD: _context_u.S,v 1.6 2007/11/13 15:57:14 ad Exp $ */
/*-
* Copyright (c) 2001, 2007 The NetBSD Foundation, Inc.
@ -39,12 +39,13 @@
#include <machine/asm.h>
#include "assym.h"
#define STUB(n) ENTRY(n); .hidden _C_LABEL(n)
#define FPSAVE(reg) fnsave UC_FPREGS(reg)
#define FPLOAD(reg) frstor UC_FPREGS(reg)
#include "_getsetc.S"
ENTRY(_getcontext_u_s87)
STUB(_getcontext_u_s87)
movl 4(%esp), %ecx
movl 0(%esp), %edx
GETC
@ -54,11 +55,11 @@ ENTRY(_getcontext_u_s87)
xorl %eax, %eax
ret
ENTRY(_setcontext_u_s87)
STUB(_setcontext_u_s87)
movl 4(%esp), %ecx
SETC
ENTRY(_swapcontext_u_s87)
STUB(_swapcontext_u_s87)
movl 4(%esp), %ecx
movl 0(%esp), %edx
GETC
@ -76,7 +77,7 @@ ENTRY(_swapcontext_u_s87)
#define FPLOAD(reg) fxrstor UC_FXSAVEREGS(reg)
#include "_getsetc.S"
ENTRY(_getcontext_u_xmm)
STUB(_getcontext_u_xmm)
movl 4(%esp), %ecx
movl 0(%esp), %edx
GETC
@ -86,11 +87,11 @@ ENTRY(_getcontext_u_xmm)
xorl %eax, %eax
ret
ENTRY(_setcontext_u_xmm)
STUB(_setcontext_u_xmm)
movl 4(%esp), %ecx
SETC
ENTRY(_swapcontext_u_xmm)
STUB(_swapcontext_u_xmm)
movl 4(%esp), %ecx
movl 0(%esp), %edx
GETC
@ -100,7 +101,7 @@ ENTRY(_swapcontext_u_xmm)
movl 8(%esp), %ecx
SETC
ENTRY(pthread__atomic_cas_ptr)
STUB(pthread__atomic_cas_ptr)
movl 4(%esp), %ecx
movl 8(%esp), %eax
movl 12(%esp), %edx
@ -116,23 +117,23 @@ ENTRY(pthread__atomic_cas_ptr)
mov $0, %eax
ret
ENTRY(pthread__atomic_swap_ptr)
STUB(pthread__atomic_swap_ptr)
movl 4(%esp), %ecx
movl 8(%esp), %eax
xchgl %eax, (%ecx)
ret
ENTRY(pthread__membar_full)
STUB(pthread__membar_full)
lock
addl $0, -4(%esp)
ret
ENTRY(pthread__membar_producer)
STUB(pthread__membar_producer)
/* A store is enough */
movl $0, -4(%esp)
ret
ENTRY(pthread__membar_consumer)
STUB(pthread__membar_consumer)
lock
addl $0, -4(%esp)
ret

View File

@ -1,4 +1,4 @@
/* $NetBSD: _context_u.S,v 1.7 2007/09/11 16:07:15 ad Exp $ */
/* $NetBSD: _context_u.S,v 1.8 2007/11/13 15:57:14 ad Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
@ -41,6 +41,8 @@
#include <machine/asm.h>
#include "assym.h"
#define STUB(n) ENTRY(n); .hidden _C_LABEL(n)
#define GETC \
movq (%rsp), %r11 ; \
movq %r11, (UC_REGS + _REG_RIP * 8)(%rdi) ; \
@ -119,24 +121,24 @@
popq %rax ; \
lretqm(128)
ENTRY(_getcontext_u)
STUB(_getcontext_u)
GETC
leaq 8(%rsp), %r11
movq %r11, (UC_REGS + _REG_URSP * 8)(%rdi)
xorl %eax, %eax
ret
ENTRY(_setcontext_u)
STUB(_setcontext_u)
SETC
ENTRY(_swapcontext_u)
STUB(_swapcontext_u)
GETC
leaq 8(%rsp),%rax
movq %rax, (UC_REGS + _REG_URSP * 8)(%rdi)
movq %rsi, %rdi
SETC
ENTRY(pthread__atomic_cas_ptr)
STUB(pthread__atomic_cas_ptr)
movq (%rsi), %rax
lock
cmpxchgq %rdx, (%rdi)
@ -148,22 +150,22 @@ ENTRY(pthread__atomic_cas_ptr)
movq $0, %rax
ret
ENTRY(pthread__atomic_swap_ptr)
STUB(pthread__atomic_swap_ptr)
movq %rsi, %rax
xchgq %rax, (%rdi)
ret
ENTRY(pthread__membar_full)
STUB(pthread__membar_full)
lock
addq $0, -8(%rsp)
ret
ENTRY(pthread__membar_producer)
STUB(pthread__membar_producer)
/* A store is enough */
movq $0, -8(%rsp)
ret
ENTRY(pthread__membar_consumer)
STUB(pthread__membar_consumer)
lock
addq $0, -8(%rsp)
ret

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread.c,v 1.86 2007/11/07 00:55:22 ad Exp $ */
/* $NetBSD: pthread.c,v 1.87 2007/11/13 15:57:10 ad Exp $ */
/*-
* Copyright (c) 2001, 2002, 2003, 2006, 2007 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: pthread.c,v 1.86 2007/11/07 00:55:22 ad Exp $");
__RCSID("$NetBSD: pthread.c,v 1.87 2007/11/13 15:57:10 ad Exp $");
#define __EXPOSE_STACK 1
@ -60,13 +60,6 @@ __RCSID("$NetBSD: pthread.c,v 1.86 2007/11/07 00:55:22 ad Exp $");
#include "pthread.h"
#include "pthread_int.h"
#ifdef PTHREAD_MAIN_DEBUG
#define SDPRINTF(x) DPRINTF(x)
#else
#define SDPRINTF(x)
#endif
pthread_rwlock_t pthread__alltree_lock = PTHREAD_RWLOCK_INITIALIZER;
RB_HEAD(__pthread__alltree, __pthread_st) pthread__alltree;
@ -82,6 +75,8 @@ static int pthread__stackid_setup(void *, size_t, pthread_t *);
static int pthread__stackalloc(pthread_t *);
static void pthread__initmain(pthread_t *);
void pthread__init(void);
int pthread__started;
pthread_mutex_t pthread__deadqueue_lock = PTHREAD_MUTEX_INITIALIZER;
@ -124,6 +119,7 @@ __strong_alias(__libc_thr_create,pthread_create)
__strong_alias(__libc_thr_exit,pthread_exit)
__strong_alias(__libc_thr_errno,pthread__errno)
__strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate)
__strong_alias(__libc_thr_init,pthread__init)
/*
* Static library kludge. Place a reference to a symbol any library
@ -148,7 +144,7 @@ void *pthread__static_lib_binder[] = {
* spinlock counts for mutexes is an internal example).
*/
void
pthread_init(void)
pthread__init(void)
{
pthread_t first;
char *p;
@ -199,7 +195,7 @@ pthread_init(void)
PTHREAD_MD_INIT
pthread__debug_init();
for (p = getenv("PTHREAD_DIAGASSERT"); p && *p; p++) {
for (p = pthread__getenv("PTHREAD_DIAGASSERT"); p && *p; p++) {
switch (*p) {
case 'a':
pthread__diagassert |= DIAGASSERT_ABORT;
@ -253,7 +249,6 @@ pthread__start(void)
* fork() before creating any threads.
*/
pthread_atfork(NULL, NULL, pthread__child_callback);
SDPRINTF(("(pthread__start %p) Started.\n", pthread__self()));
}
@ -263,8 +258,8 @@ static void
pthread__initthread(pthread_t t)
{
t->pt_self = t;
t->pt_magic = PT_MAGIC;
t->pt_spinlocks = 0;
t->pt_willpark = 0;
t->pt_unpark = 0;
t->pt_sleeponq = 0;
@ -274,6 +269,7 @@ pthread__initthread(pthread_t t)
t->pt_havespecific = 0;
t->pt_early = NULL;
memcpy(&t->pt_lockops, pthread__lock_ops, sizeof(t->pt_lockops));
pthread_mutex_init(&t->pt_lock, NULL);
PTQ_INIT(&t->pt_cleanup_stack);
PTQ_INIT(&t->pt_joiners);
@ -402,8 +398,6 @@ pthread_create(pthread_t *thread, const pthread_attr_t *attr,
flag |= LWP_DETACHED;
ret = _lwp_create(&newthread->pt_uc, flag, &newthread->pt_lid);
if (ret != 0) {
SDPRINTF(("(pthread_create %p) _lwp_create: %s\n",
strerror(errno)));
free(name);
newthread->pt_state = PT_STATE_DEAD;
pthread_mutex_lock(&pthread__deadqueue_lock);
@ -412,13 +406,6 @@ pthread_create(pthread_t *thread, const pthread_attr_t *attr,
return ret;
}
/* XXX must die */
newthread->pt_num = newthread->pt_lid;
SDPRINTF(("(pthread_create %p) new thread %p (name %p, lid %d).\n",
pthread__self(), newthread, newthread->pt_name,
(int)newthread->pt_lid));
*thread = newthread;
return 0;
@ -470,9 +457,6 @@ pthread_suspend_np(pthread_t thread)
if (pthread__find(thread) != 0)
return ESRCH;
#endif
SDPRINTF(("(pthread_suspend_np %p) Suspend thread %p.\n",
pthread__self(), thread));
if (_lwp_suspend(thread->pt_lid) == 0)
return 0;
return errno;
@ -486,9 +470,6 @@ pthread_resume_np(pthread_t thread)
if (pthread__find(thread) != 0)
return ESRCH;
#endif
SDPRINTF(("(pthread_resume_np %p) Resume thread %p.\n",
pthread__self(), thread));
if (_lwp_continue(thread->pt_lid) == 0)
return 0;
return errno;
@ -502,8 +483,6 @@ pthread_exit(void *retval)
char *name;
self = pthread__self();
SDPRINTF(("(pthread_exit %p) status %p, flags %x, cancel %d\n",
self, retval, self->pt_flags, self->pt_cancel));
/* Disable cancellability. */
pthread_mutex_lock(&self->pt_lock);
@ -555,7 +534,6 @@ pthread_join(pthread_t thread, void **valptr)
char *name;
self = pthread__self();
SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread));
if (pthread__find(thread) != 0)
return ESRCH;
@ -596,7 +574,6 @@ pthread_join(pthread_t thread, void **valptr)
pthread_mutex_lock(&pthread__deadqueue_lock);
PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_deadq);
pthread_mutex_unlock(&pthread__deadqueue_lock);
SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread));
if (name != NULL)
free(name);
return 0;
@ -895,8 +872,6 @@ pthread__assertfunc(const char *file, int line, const char *function,
char buf[1024];
int len;
SDPRINTF(("(af)\n"));
/*
* snprintf should not acquire any locks, or we could
* end up deadlocked if the assert caller held locks.
@ -969,8 +944,6 @@ pthread__park(pthread_t self, pthread_spin_t *lock,
int rv, error;
void *obj;
SDPRINTF(("(pthread__park %p) queue %p enter\n", self, queue));
/* Clear the willpark flag, since we're about to block. */
self->pt_willpark = 0;
@ -1035,8 +1008,6 @@ pthread__park(pthread_t self, pthread_spin_t *lock,
break;
default:
OOPS("_lwp_park failed");
SDPRINTF(("(pthread__park %p) syscall rv=%d\n",
self, rv));
break;
}
}
@ -1051,7 +1022,7 @@ pthread__park(pthread_t self, pthread_spin_t *lock,
* without holding any locks.
*/
if (__predict_false(self->pt_sleeponq)) {
pthread_spinlock(lock);
pthread__spinlock(self, lock);
if (self->pt_sleeponq) {
PTQ_REMOVE(queue, self, pt_sleep);
obj = self->pt_sleepobj;
@ -1060,12 +1031,10 @@ pthread__park(pthread_t self, pthread_spin_t *lock,
if (obj != NULL && self->pt_early != NULL)
(*self->pt_early)(obj);
}
pthread_spinunlock(lock);
pthread__spinunlock(self, lock);
}
self->pt_early = NULL;
SDPRINTF(("(pthread__park %p) queue %p exit\n", self, queue));
return rv;
}
@ -1076,13 +1045,10 @@ pthread__unpark(pthread_t self, pthread_spin_t *lock,
int rv;
if (target == NULL) {
pthread_spinunlock(lock);
pthread__spinunlock(self, lock);
return;
}
SDPRINTF(("(pthread__unpark %p) queue %p target %p\n",
self, queue, target));
/*
* Easy: the thread has already been removed from
* the queue, so just awaken it.
@ -1096,7 +1062,7 @@ pthread__unpark(pthread_t self, pthread_spin_t *lock,
* to the thread in pthread__park() before the unpark
* operation is set in motion.
*/
pthread_spinunlock(lock);
pthread__spinunlock(self, lock);
/*
* If the calling thread is about to block, defer
@ -1108,8 +1074,6 @@ pthread__unpark(pthread_t self, pthread_spin_t *lock,
} else {
rv = _lwp_unpark(target->pt_lid, queue);
if (rv != 0 && errno != EALREADY && errno != EINTR) {
SDPRINTF(("(pthread__unpark %p) syscall rv=%d\n",
self, rv));
OOPS("_lwp_unpark failed");
}
}
@ -1124,7 +1088,7 @@ pthread__unpark_all(pthread_t self, pthread_spin_t *lock,
void *wakeobj;
if (PTQ_EMPTY(queue) && self->pt_nwaiters == 0) {
pthread_spinunlock(lock);
pthread__spinunlock(self, lock);
return;
}
@ -1161,8 +1125,6 @@ pthread__unpark_all(pthread_t self, pthread_spin_t *lock,
thread->pt_sleeponq = 0;
self->pt_waiters[n++] = thread->pt_lid;
PTQ_REMOVE(queue, thread, pt_sleep);
SDPRINTF(("(pthread__unpark_all %p) queue %p "
"unpark %p\n", self, queue, thread));
}
/*
@ -1173,7 +1135,7 @@ pthread__unpark_all(pthread_t self, pthread_spin_t *lock,
*/
switch (n) {
case 0:
pthread_spinunlock(lock);
pthread__spinunlock(self, lock);
return;
case 1:
/*
@ -1181,7 +1143,7 @@ pthread__unpark_all(pthread_t self, pthread_spin_t *lock,
* defer unparking the target until _lwp_park()
* is called.
*/
pthread_spinunlock(lock);
pthread__spinunlock(self, lock);
if (self->pt_willpark && self->pt_unpark == 0) {
self->pt_unpark = self->pt_waiters[0];
self->pt_unparkhint = queue;
@ -1190,8 +1152,6 @@ pthread__unpark_all(pthread_t self, pthread_spin_t *lock,
rv = (ssize_t)_lwp_unpark(self->pt_waiters[0], queue);
if (rv != 0 && errno != EALREADY && errno != EINTR) {
OOPS("_lwp_unpark failed");
SDPRINTF(("(pthread__unpark_all %p) "
"syscall rv=%d\n", self, rv));
}
return;
default:
@ -1210,17 +1170,15 @@ pthread__unpark_all(pthread_t self, pthread_spin_t *lock,
* marked to be woken (sleepobj == NULL).
*/
wakeobj = NULL;
pthread_spinunlock(lock);
pthread__spinunlock(self, lock);
rv = _lwp_unpark_all(self->pt_waiters, (size_t)n,
queue);
if (rv != 0 && errno != EINTR) {
OOPS("_lwp_unpark_all failed");
SDPRINTF(("(pthread__unpark_all %p) "
"syscall rv=%d\n", self, rv));
}
break;
}
pthread_spinlock(lock);
pthread__spinlock(self, lock);
}
}
@ -1269,8 +1227,9 @@ pthread__initmain(pthread_t *newt)
ret = getrlimit(RLIMIT_STACK, &slimit);
if (ret == -1)
err(1, "Couldn't get stack resource consumption limits");
value = getenv("PTHREAD_STACKSIZE");
if (value) {
value = pthread__getenv("PTHREAD_STACKSIZE");
if (value != NULL) {
pthread__stacksize = atoi(value) * 1024;
if (pthread__stacksize > slimit.rlim_cur)
pthread__stacksize = (size_t)slimit.rlim_cur;
@ -1345,3 +1304,14 @@ pthread__cmp(struct __pthread_st *a, struct __pthread_st *b)
RB_GENERATE_STATIC(__pthread__alltree, __pthread_st, pt_alltree, pthread__cmp)
#endif
/* Because getenv() wants to use locks. */
char *
pthread__getenv(const char *name)
{
extern char *__findenv(const char *, int *);
int off;
return __findenv(name, &off);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_barrier.c,v 1.14 2007/08/16 13:54:16 ad Exp $ */
/* $NetBSD: pthread_barrier.c,v 1.15 2007/11/13 15:57:11 ad Exp $ */
/*-
* Copyright (c) 2001, 2003, 2006, 2007 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: pthread_barrier.c,v 1.14 2007/08/16 13:54:16 ad Exp $");
__RCSID("$NetBSD: pthread_barrier.c,v 1.15 2007/11/13 15:57:11 ad Exp $");
#include <errno.h>
@ -56,6 +56,7 @@ int
pthread_barrier_init(pthread_barrier_t *barrier,
const pthread_barrierattr_t *attr, unsigned int count)
{
pthread_t self;
#ifdef ERRORCHECK
if ((barrier == NULL) ||
@ -67,19 +68,21 @@ pthread_barrier_init(pthread_barrier_t *barrier,
return EINVAL;
if (barrier->ptb_magic == _PT_BARRIER_MAGIC) {
self = pthread__self();
/*
* We're simply reinitializing the barrier to a
* new count.
*/
pthread_spinlock(&barrier->ptb_lock);
pthread__spinlock(self, &barrier->ptb_lock);
if (barrier->ptb_magic != _PT_BARRIER_MAGIC) {
pthread_spinunlock(&barrier->ptb_lock);
pthread__spinunlock(self, &barrier->ptb_lock);
return EINVAL;
}
if (!PTQ_EMPTY(&barrier->ptb_waiters)) {
pthread_spinunlock(&barrier->ptb_lock);
pthread__spinunlock(self, &barrier->ptb_lock);
return EBUSY;
}
@ -87,7 +90,7 @@ pthread_barrier_init(pthread_barrier_t *barrier,
barrier->ptb_curcount = 0;
barrier->ptb_generation = 0;
pthread_spinunlock(&barrier->ptb_lock);
pthread__spinunlock(self, &barrier->ptb_lock);
return 0;
}
@ -106,27 +109,29 @@ pthread_barrier_init(pthread_barrier_t *barrier,
int
pthread_barrier_destroy(pthread_barrier_t *barrier)
{
pthread_t self;
#ifdef ERRORCHECK
if ((barrier == NULL) || (barrier->ptb_magic != _PT_BARRIER_MAGIC))
return EINVAL;
#endif
pthread_spinlock(&barrier->ptb_lock);
self = pthread__self();
pthread__spinlock(self, &barrier->ptb_lock);
if (barrier->ptb_magic != _PT_BARRIER_MAGIC) {
pthread_spinunlock(&barrier->ptb_lock);
pthread__spinunlock(self, &barrier->ptb_lock);
return EINVAL;
}
if (!PTQ_EMPTY(&barrier->ptb_waiters)) {
pthread_spinunlock(&barrier->ptb_lock);
pthread__spinunlock(self, &barrier->ptb_lock);
return EBUSY;
}
barrier->ptb_magic = _PT_BARRIER_DEAD;
pthread_spinunlock(&barrier->ptb_lock);
pthread__spinunlock(self, &barrier->ptb_lock);
return 0;
}
@ -144,7 +149,7 @@ pthread_barrier_wait(pthread_barrier_t *barrier)
#endif
self = pthread__self();
pthread_spinlock(&barrier->ptb_lock);
pthread__spinlock(self, &barrier->ptb_lock);
/*
* A single arbitrary thread is supposed to return
@ -174,15 +179,15 @@ pthread_barrier_wait(pthread_barrier_t *barrier)
PTQ_INSERT_TAIL(&barrier->ptb_waiters, self, pt_sleep);
self->pt_sleeponq = 1;
self->pt_sleepobj = &barrier->ptb_waiters;
pthread_spinunlock(&barrier->ptb_lock);
pthread__spinunlock(self, &barrier->ptb_lock);
(void)pthread__park(self, &barrier->ptb_lock,
&barrier->ptb_waiters, NULL, 0,
&barrier->ptb_waiters);
SDPRINTF(("(barrier wait %p) Woke up on %p\n",
self, barrier));
pthread_spinlock(&barrier->ptb_lock);
pthread__spinlock(self, &barrier->ptb_lock);
}
pthread_spinunlock(&barrier->ptb_lock);
pthread__spinunlock(self, &barrier->ptb_lock);
return 0;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_cond.c,v 1.37 2007/09/13 23:51:47 ad Exp $ */
/* $NetBSD: pthread_cond.c,v 1.38 2007/11/13 15:57:11 ad Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: pthread_cond.c,v 1.37 2007/09/13 23:51:47 ad Exp $");
__RCSID("$NetBSD: pthread_cond.c,v 1.38 2007/11/13 15:57:11 ad Exp $");
#include <errno.h>
#include <sys/time.h>
@ -118,7 +118,7 @@ pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
* performance it's critical that the spinlock is held for
* as short a time as possible - that means no system calls.
*/
pthread_spinlock(&cond->ptc_lock);
pthread__spinlock(self, &cond->ptc_lock);
#ifdef ERRORCHECK
if (cond->ptc_mutex == NULL)
cond->ptc_mutex = mutex;
@ -134,7 +134,7 @@ pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
self->pt_signalled = 0;
self->pt_sleeponq = 1;
self->pt_sleepobj = &cond->ptc_waiters;
pthread_spinunlock(&cond->ptc_lock);
pthread__spinunlock(self, &cond->ptc_lock);
/*
* Before releasing the mutex, note that this thread is
@ -158,10 +158,10 @@ pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
* last issued a wakeup.
*/
if (PTQ_EMPTY(&cond->ptc_waiters) && cond->ptc_mutex != NULL) {
pthread_spinlock(&cond->ptc_lock);
pthread__spinlock(self, &cond->ptc_lock);
if (PTQ_EMPTY(&cond->ptc_waiters))
cond->ptc_mutex = NULL;
pthread_spinunlock(&cond->ptc_lock);
pthread__spinunlock(self, &cond->ptc_lock);
}
/*
@ -209,7 +209,7 @@ pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
* performance it's critical that the spinlock is held for
* as short a time as possible - that means no system calls.
*/
pthread_spinlock(&cond->ptc_lock);
pthread__spinlock(self, &cond->ptc_lock);
#ifdef ERRORCHECK
if (cond->ptc_mutex == NULL)
cond->ptc_mutex = mutex;
@ -225,7 +225,7 @@ pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
self->pt_signalled = 0;
self->pt_sleeponq = 1;
self->pt_sleepobj = &cond->ptc_waiters;
pthread_spinunlock(&cond->ptc_lock);
pthread__spinunlock(self, &cond->ptc_lock);
/*
* Before releasing the mutex, note that this thread is
@ -249,10 +249,10 @@ pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
* last issued a wakeup.
*/
if (PTQ_EMPTY(&cond->ptc_waiters) && cond->ptc_mutex != NULL) {
pthread_spinlock(&cond->ptc_lock);
pthread__spinlock(self, &cond->ptc_lock);
if (PTQ_EMPTY(&cond->ptc_waiters))
cond->ptc_mutex = NULL;
pthread_spinunlock(&cond->ptc_lock);
pthread__spinunlock(self, &cond->ptc_lock);
}
/*
@ -283,7 +283,7 @@ pthread_cond_signal(pthread_cond_t *cond)
return 0;
self = pthread__self();
pthread_spinlock(&cond->ptc_lock);
pthread__spinlock(self, &cond->ptc_lock);
/*
* Find a thread that is still blocked (no pending wakeup).
@ -296,7 +296,7 @@ pthread_cond_signal(pthread_cond_t *cond)
}
if (__predict_false(signaled == NULL)) {
cond->ptc_mutex = NULL;
pthread_spinunlock(&cond->ptc_lock);
pthread__spinunlock(self, &cond->ptc_lock);
return 0;
}
@ -328,7 +328,7 @@ pthread_cond_signal(pthread_cond_t *cond)
pthread__mutex_deferwake(self, mutex)) {
signaled->pt_sleepobj = NULL;
signaled->pt_sleeponq = 0;
pthread_spinunlock(&cond->ptc_lock);
pthread__spinunlock(self, &cond->ptc_lock);
self->pt_waiters[self->pt_nwaiters++] = signaled->pt_lid;
} else {
pthread__unpark(self, &cond->ptc_lock,
@ -355,7 +355,7 @@ pthread_cond_broadcast(pthread_cond_t *cond)
return 0;
self = pthread__self();
pthread_spinlock(&cond->ptc_lock);
pthread__spinlock(self, &cond->ptc_lock);
mutex = cond->ptc_mutex;
cond->ptc_mutex = NULL;
@ -382,7 +382,7 @@ pthread_cond_broadcast(pthread_cond_t *cond)
}
if (signaled == NULL) {
/* Anything more to do? */
pthread_spinunlock(&cond->ptc_lock);
pthread__spinunlock(self, &cond->ptc_lock);
return 0;
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_debug.c,v 1.13 2007/08/16 12:01:49 ad Exp $ */
/* $NetBSD: pthread_debug.c,v 1.14 2007/11/13 15:57:11 ad Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: pthread_debug.c,v 1.13 2007/08/16 12:01:49 ad Exp $");
__RCSID("$NetBSD: pthread_debug.c,v 1.14 2007/11/13 15:57:11 ad Exp $");
#include <err.h>
#include <errno.h>
@ -77,10 +77,10 @@ pthread__debug_init(void)
{
time_t t;
if (getenv("PTHREAD_DEBUGCOUNTERS") != NULL)
if (pthread__getenv("PTHREAD_DEBUGCOUNTERS") != NULL)
atexit(pthread__debug_printcounters);
if (getenv("PTHREAD_DEBUGLOG") != NULL) {
if (pthread__getenv("PTHREAD_DEBUGLOG") != NULL) {
t = time(NULL);
debugbuf = pthread__debuglog_init(0);
linebuf = calloc(1000, sizeof(struct linebuf));

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_int.h,v 1.59 2007/10/16 15:21:54 ad Exp $ */
/* $NetBSD: pthread_int.h,v 1.60 2007/11/13 15:57:11 ad Exp $ */
/*-
* Copyright (c) 2001, 2002, 2003, 2006, 2007 The NetBSD Foundation, Inc.
@ -57,6 +57,12 @@
#include <lwp.h>
#include <signal.h>
#ifdef __GNUC__
#define PTHREAD_HIDE __attribute__ ((visibility("hidden")))
#else
#define PTHREAD_HIDE /* nothing */
#endif
#define PTHREAD_KEYS_MAX 256
#define PTHREAD__UNPARK_MAX 32
@ -79,14 +85,20 @@ struct pthread_attr_private {
size_t ptap_guardsize;
};
struct pthread_lock_ops {
void (*plo_init)(__cpu_simple_lock_t *);
int (*plo_try)(__cpu_simple_lock_t *);
void (*plo_unlock)(__cpu_simple_lock_t *);
void (*plo_lock)(__cpu_simple_lock_t *);
};
struct __pthread_st {
pthread_t pt_self; /* Must be first. */
unsigned int pt_magic; /* Magic number */
int pt_num; /* ID XXX should die */
int pt_state; /* running, blocked, etc. */
pthread_mutex_t pt_lock; /* lock on state */
int pt_flags; /* see PT_FLAG_* below */
int pt_cancel; /* Deferred cancellation */
int pt_spinlocks; /* Number of spinlocks held. */
int pt_errno; /* Thread-specific errno. */
stack_t pt_stack; /* Our stack */
void *pt_exitval; /* Read by pthread_join() */
@ -94,6 +106,7 @@ struct __pthread_st {
int pt_willpark; /* About to park */
lwpid_t pt_unpark; /* Unpark this when parking */
void *pt_unparkhint; /* Hint for the above */
struct pthread_lock_ops pt_lockops;/* Cached to avoid PIC overhead */
/* Threads to defer waking, usually until pthread_mutex_unlock(). */
lwpid_t pt_waiters[PTHREAD__UNPARK_MAX];
@ -136,12 +149,6 @@ struct __pthread_st {
ucontext_t pt_uc;
};
struct pthread_lock_ops {
void (*plo_init)(__cpu_simple_lock_t *);
int (*plo_try)(__cpu_simple_lock_t *);
void (*plo_unlock)(__cpu_simple_lock_t *);
};
/* Thread states */
#define PT_STATE_RUNNING 1
#define PT_STATE_ZOMBIE 5
@ -179,44 +186,58 @@ extern int pthread__unpark_max;
#define _UC_USER_BIT 30
#define _UC_USER (1LU << _UC_USER_BIT)
void pthread_init(void) __attribute__ ((__constructor__));
/* Utility functions */
void pthread__unpark_all(pthread_t self, pthread_spin_t *lock,
pthread_queue_t *threadq);
void pthread__unpark(pthread_t self, pthread_spin_t *lock,
pthread_queue_t *queue, pthread_t target);
int pthread__park(pthread_t self, pthread_spin_t *lock,
pthread_queue_t *threadq,
const struct timespec *abs_timeout,
int cancelpt, const void *hint);
void pthread__unpark_all(pthread_t, pthread_spin_t *, pthread_queue_t *)
PTHREAD_HIDE;
void pthread__unpark(pthread_t, pthread_spin_t *, pthread_queue_t *,
pthread_t) PTHREAD_HIDE;
int pthread__park(pthread_t, pthread_spin_t *, pthread_queue_t *,
const struct timespec *, int, const void *)
PTHREAD_HIDE;
/* Internal locking primitives */
void pthread__lockprim_init(void);
void pthread_lockinit(pthread_spin_t *);
void pthread_spinlock(pthread_spin_t *);
int pthread_spintrylock(pthread_spin_t *);
void pthread_spinunlock(pthread_spin_t *);
void pthread__lockprim_init(void) PTHREAD_HIDE;
void pthread_lockinit(pthread_spin_t *) PTHREAD_HIDE;
static inline void
pthread__spinlock(pthread_t self, pthread_spin_t *lock)
{
if (__predict_true((*self->pt_lockops.plo_try)(lock)))
return;
(*self->pt_lockops.plo_lock)(lock);
}
static inline int
pthread__spintrylock(pthread_t self, pthread_spin_t *lock)
{
return (*self->pt_lockops.plo_try)(lock);
}
static inline void
pthread__spinunlock(pthread_t self, pthread_spin_t *lock)
{
(*self->pt_lockops.plo_unlock)(lock);
}
extern const struct pthread_lock_ops *pthread__lock_ops;
int pthread__simple_locked_p(__cpu_simple_lock_t *);
int pthread__simple_locked_p(__cpu_simple_lock_t *) PTHREAD_HIDE;
#define pthread__simple_lock_init(alp) (*pthread__lock_ops->plo_init)(alp)
#define pthread__simple_lock_try(alp) (*pthread__lock_ops->plo_try)(alp)
#define pthread__simple_unlock(alp) (*pthread__lock_ops->plo_unlock)(alp)
#ifndef _getcontext_u
int _getcontext_u(ucontext_t *);
int _getcontext_u(ucontext_t *) PTHREAD_HIDE;
#endif
#ifndef _setcontext_u
int _setcontext_u(const ucontext_t *);
int _setcontext_u(const ucontext_t *) PTHREAD_HIDE;
#endif
#ifndef _swapcontext_u
int _swapcontext_u(ucontext_t *, const ucontext_t *);
int _swapcontext_u(ucontext_t *, const ucontext_t *) PTHREAD_HIDE;
#endif
void pthread__testcancel(pthread_t);
int pthread__find(pthread_t);
void pthread__testcancel(pthread_t) PTHREAD_HIDE;
int pthread__find(pthread_t) PTHREAD_HIDE;
#ifndef PTHREAD_MD_INIT
#define PTHREAD_MD_INIT
@ -258,20 +279,21 @@ int pthread__find(pthread_t);
} \
} while (/*CONSTCOND*/0)
void pthread__destroy_tsd(pthread_t self);
void pthread__assertfunc(const char *file, int line, const char *function,
const char *expr);
void pthread__errorfunc(const char *file, int line, const char *function,
const char *msg);
void pthread__destroy_tsd(pthread_t) PTHREAD_HIDE;
void pthread__assertfunc(const char *, int, const char *, const char *)
PTHREAD_HIDE;
void pthread__errorfunc(const char *, int, const char *, const char *)
PTHREAD_HIDE;
char *pthread__getenv(const char *) PTHREAD_HIDE;
int pthread__atomic_cas_ptr(volatile void *, void **, void *);
void *pthread__atomic_swap_ptr(volatile void *, void *);
void pthread__membar_full(void);
void pthread__membar_producer(void);
void pthread__membar_consumer(void);
int pthread__atomic_cas_ptr(volatile void *, void **, void *) PTHREAD_HIDE;
void *pthread__atomic_swap_ptr(volatile void *, void *) PTHREAD_HIDE;
void pthread__membar_full(void) PTHREAD_HIDE;
void pthread__membar_producer(void) PTHREAD_HIDE;
void pthread__membar_consumer(void) PTHREAD_HIDE;
int pthread__mutex_deferwake(pthread_t, pthread_mutex_t *);
int pthread__mutex_catchup(pthread_mutex_t *);
int pthread__mutex_deferwake(pthread_t, pthread_mutex_t *) PTHREAD_HIDE;
int pthread__mutex_catchup(pthread_mutex_t *) PTHREAD_HIDE;
#ifndef pthread__smt_pause
#define pthread__smt_pause() /* nothing */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_lock.c,v 1.31 2007/10/04 21:04:32 ad Exp $ */
/* $NetBSD: pthread_lock.c,v 1.32 2007/11/13 15:57:11 ad Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
@ -41,7 +41,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: pthread_lock.c,v 1.31 2007/10/04 21:04:32 ad Exp $");
__RCSID("$NetBSD: pthread_lock.c,v 1.32 2007/11/13 15:57:11 ad Exp $");
#include <sys/types.h>
#include <sys/lock.h>
@ -56,17 +56,11 @@ __RCSID("$NetBSD: pthread_lock.c,v 1.31 2007/10/04 21:04:32 ad Exp $");
#include "pthread_int.h"
/* How many times to try acquiring spin locks on MP systems. */
#define PTHREAD__NSPINS 64
static void pthread_spinlock_slow(pthread_spin_t *);
#define PTHREAD__NSPINS 64
RAS_DECL(pthread__lock);
int
pthread__simple_locked_p(__cpu_simple_lock_t *alp)
{
return __SIMPLELOCK_LOCKED_P(alp);
}
static void pthread__spinlock_slow(pthread_spin_t *);
#ifdef PTHREAD__ASM_RASOPS
@ -109,6 +103,7 @@ static const struct pthread_lock_ops pthread__lock_ops_ras = {
pthread__ras_simple_lock_init,
pthread__ras_simple_lock_try,
pthread__ras_simple_unlock,
pthread__spinlock_slow,
};
static void
@ -136,6 +131,7 @@ static const struct pthread_lock_ops pthread__lock_ops_atomic = {
pthread__atomic_simple_lock_init,
pthread__atomic_simple_lock_try,
pthread__atomic_simple_unlock,
pthread__spinlock_slow,
};
/*
@ -146,33 +142,24 @@ static const struct pthread_lock_ops pthread__lock_ops_atomic = {
*/
const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras;
void
pthread_spinlock(pthread_spin_t *lock)
{
if (__predict_true(pthread__simple_lock_try(lock)))
return;
pthread_spinlock_slow(lock);
}
/*
* Prevent this routine from being inlined. The common case is no
* contention and it's better to not burden the instruction decoder.
*/
#if __GNUC_PREREQ__(3, 0)
__attribute ((noinline))
#endif
static void
pthread_spinlock_slow(pthread_spin_t *lock)
pthread__spinlock_slow(pthread_spin_t *lock)
{
pthread_t self;
int count;
self = pthread__self();
do {
count = pthread__nspins;
while (pthread__simple_locked_p(lock) && --count > 0)
while (__SIMPLELOCK_LOCKED_P(lock) && --count > 0)
pthread__smt_pause();
if (count > 0) {
if (pthread__simple_lock_try(lock))
if ((*self->pt_lockops.plo_try)(lock))
break;
continue;
}
@ -180,18 +167,6 @@ pthread_spinlock_slow(pthread_spin_t *lock)
} while (/*CONSTCOND*/ 1);
}
int
pthread_spintrylock(pthread_spin_t *lock)
{
return pthread__simple_lock_try(lock);
}
void
pthread_spinunlock(pthread_spin_t *lock)
{
pthread__simple_unlock(lock);
}
/*
* Initialize the locking primitives. On uniprocessors, we always
* use Restartable Atomic Sequences if they are available. Otherwise,
@ -202,7 +177,7 @@ pthread__lockprim_init(void)
{
char *p;
if ((p = getenv("PTHREAD_NSPINS")) != NULL)
if ((p = pthread__getenv("PTHREAD_NSPINS")) != NULL)
pthread__nspins = atoi(p);
else if (pthread__concurrency != 1)
pthread__nspins = PTHREAD__NSPINS;

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_mutex.c,v 1.36 2007/09/13 23:51:47 ad Exp $ */
/* $NetBSD: pthread_mutex.c,v 1.37 2007/11/13 15:57:11 ad Exp $ */
/*-
* Copyright (c) 2001, 2003, 2006, 2007 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: pthread_mutex.c,v 1.36 2007/09/13 23:51:47 ad Exp $");
__RCSID("$NetBSD: pthread_mutex.c,v 1.37 2007/11/13 15:57:11 ad Exp $");
#include <errno.h>
#include <limits.h>
@ -142,8 +142,8 @@ pthread_mutex_destroy(pthread_mutex_t *mutex)
* same mutex.
*
* A memory barrier after a lock and before an unlock will provide
* this behavior. This code relies on pthread__simple_lock_try() to issue
* a barrier after obtaining a lock, and on pthread__simple_unlock() to
* this behavior. This code relies on pthread__spintrylock() to issue
* a barrier after obtaining a lock, and on pthread__spinunlock() to
* issue a barrier before releasing a lock.
*/
@ -161,7 +161,7 @@ pthread_mutex_lock(pthread_mutex_t *mutex)
* Note that if we get the lock, we don't have to deal with any
* non-default lock type handling.
*/
if (__predict_false(pthread__simple_lock_try(&mutex->ptm_lock) == 0)) {
if (__predict_false(pthread__spintrylock(self, &mutex->ptm_lock) == 0)) {
error = pthread_mutex_lock_slow(self, mutex);
if (error)
return error;
@ -194,13 +194,13 @@ pthread_mutex_lock_slow(pthread_t self, pthread_mutex_t *mutex)
while (__SIMPLELOCK_LOCKED_P(&mutex->ptm_lock) && --count > 0)
pthread__smt_pause();
if (count > 0) {
if (pthread__simple_lock_try(&mutex->ptm_lock) != 0)
if (pthread__spintrylock(self, &mutex->ptm_lock) != 0)
break;
continue;
}
/* Okay, didn't look free. Get the interlock... */
pthread_spinlock(&mutex->ptm_interlock);
pthread__spinlock(self, &mutex->ptm_interlock);
/*
* The mutex_unlock routine will get the interlock
@ -212,7 +212,7 @@ pthread_mutex_lock_slow(pthread_t self, pthread_mutex_t *mutex)
PTQ_INSERT_HEAD(&mutex->ptm_blocked, self, pt_sleep);
if (__SIMPLELOCK_UNLOCKED_P(&mutex->ptm_lock)) {
PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
pthread_spinunlock(&mutex->ptm_interlock);
pthread__spinunlock(self, &mutex->ptm_interlock);
continue;
}
@ -221,7 +221,7 @@ pthread_mutex_lock_slow(pthread_t self, pthread_mutex_t *mutex)
switch (mp->type) {
case PTHREAD_MUTEX_ERRORCHECK:
PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
pthread_spinunlock(&mutex->ptm_interlock);
pthread__spinunlock(self, &mutex->ptm_interlock);
return EDEADLK;
case PTHREAD_MUTEX_RECURSIVE:
@ -232,7 +232,7 @@ pthread_mutex_lock_slow(pthread_t self, pthread_mutex_t *mutex)
* own the mutex.
*/
PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
pthread_spinunlock(&mutex->ptm_interlock);
pthread__spinunlock(self, &mutex->ptm_interlock);
if (mp->recursecount == INT_MAX)
return EAGAIN;
mp->recursecount++;
@ -260,7 +260,7 @@ pthread_mutex_lock_slow(pthread_t self, pthread_mutex_t *mutex)
*/
self->pt_sleeponq = 1;
self->pt_sleepobj = &mutex->ptm_blocked;
pthread_spinunlock(&mutex->ptm_interlock);
pthread__spinunlock(self, &mutex->ptm_interlock);
(void)pthread__park(self, &mutex->ptm_interlock,
&mutex->ptm_blocked, NULL, 0, &mutex->ptm_blocked);
}
@ -281,7 +281,7 @@ pthread_mutex_trylock(pthread_mutex_t *mutex)
self = pthread__self();
PTHREADD_ADD(PTHREADD_MUTEX_TRYLOCK);
if (pthread__simple_lock_try(&mutex->ptm_lock) == 0) {
if (pthread__spintrylock(self, &mutex->ptm_lock) == 0) {
/*
* These tests can be performed without holding the
* interlock because these fields are only modified
@ -322,7 +322,7 @@ pthread_mutex_unlock(pthread_mutex_t *mutex)
* interlock because these fields are only modified
* if we know we own the mutex.
*/
self = pthread_self();
self = pthread__self();
weown = (mutex->ptm_owner == self);
mp = mutex->ptm_private;
@ -352,7 +352,7 @@ pthread_mutex_unlock(pthread_mutex_t *mutex)
}
mutex->ptm_owner = NULL;
pthread__simple_unlock(&mutex->ptm_lock);
pthread__spinunlock(self, &mutex->ptm_lock);
/*
* Do a double-checked locking dance to see if there are any
@ -364,7 +364,7 @@ pthread_mutex_unlock(pthread_mutex_t *mutex)
* examination of the queue; if so, no harm is done, as the
* waiter will loop and see that the mutex is still locked.
*/
pthread_spinlock(&mutex->ptm_interlock);
pthread__spinlock(self, &mutex->ptm_interlock);
pthread__unpark_all(self, &mutex->ptm_interlock, &mutex->ptm_blocked);
return 0;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_rwlock.c,v 1.21 2007/09/07 14:09:28 ad Exp $ */
/* $NetBSD: pthread_rwlock.c,v 1.22 2007/11/13 15:57:13 ad Exp $ */
/*-
* Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: pthread_rwlock.c,v 1.21 2007/09/07 14:09:28 ad Exp $");
__RCSID("$NetBSD: pthread_rwlock.c,v 1.22 2007/11/13 15:57:13 ad Exp $");
#include <errno.h>
@ -102,10 +102,10 @@ pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
#endif
self = pthread__self();
pthread_spinlock(&rwlock->ptr_interlock);
pthread__spinlock(self, &rwlock->ptr_interlock);
#ifdef ERRORCHECK
if (rwlock->ptr_writer == self) {
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return EDEADLK;
}
#endif
@ -119,14 +119,14 @@ pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep);
self->pt_sleeponq = 1;
self->pt_sleepobj = &rwlock->ptr_rblocked;
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
(void)pthread__park(self, &rwlock->ptr_interlock,
&rwlock->ptr_rblocked, NULL, 0, &rwlock->ptr_rblocked);
pthread_spinlock(&rwlock->ptr_interlock);
pthread__spinlock(self, &rwlock->ptr_interlock);
}
rwlock->ptr_nreaders++;
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return 0;
}
@ -135,13 +135,15 @@ pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
int
pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
{
pthread_t self;
#ifdef ERRORCHECK
if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
return EINVAL;
#endif
pthread_spinlock(&rwlock->ptr_interlock);
self = pthread__self();
pthread__spinlock(self, &rwlock->ptr_interlock);
/*
* Don't get a readlock if there is a writer or if there are waiting
* writers; i.e. prefer writers to readers. This strategy is dictated
@ -149,12 +151,12 @@ pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
*/
if ((rwlock->ptr_writer != NULL) ||
(!PTQ_EMPTY(&rwlock->ptr_wblocked))) {
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return EBUSY;
}
rwlock->ptr_nreaders++;
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return 0;
}
@ -172,10 +174,10 @@ pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
#endif
self = pthread__self();
pthread_spinlock(&rwlock->ptr_interlock);
pthread__spinlock(self, &rwlock->ptr_interlock);
#ifdef ERRORCHECK
if (rwlock->ptr_writer == self) {
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return EDEADLK;
}
#endif
@ -186,21 +188,21 @@ pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
while ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) {
#ifdef ERRORCHECK
if (pthread__started == 0) {
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return EDEADLK;
}
#endif
PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep);
self->pt_sleeponq = 1;
self->pt_sleepobj = &rwlock->ptr_wblocked;
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
(void)pthread__park(self, &rwlock->ptr_interlock,
&rwlock->ptr_wblocked, NULL, 0, &rwlock->ptr_wblocked);
pthread_spinlock(&rwlock->ptr_interlock);
pthread__spinlock(self, &rwlock->ptr_interlock);
}
rwlock->ptr_writer = self;
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return 0;
}
@ -216,18 +218,18 @@ pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
#endif
self = pthread__self();
pthread_spinlock(&rwlock->ptr_interlock);
pthread__spinlock(self, &rwlock->ptr_interlock);
/*
* Prefer writers to readers here; permit writers even if there are
* waiting readers.
*/
if ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) {
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return EBUSY;
}
rwlock->ptr_writer = self;
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return 0;
}
@ -252,10 +254,10 @@ pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
return EINVAL;
self = pthread__self();
pthread_spinlock(&rwlock->ptr_interlock);
pthread__spinlock(self, &rwlock->ptr_interlock);
#ifdef ERRORCHECK
if (rwlock->ptr_writer == self) {
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return EDEADLK;
}
#endif
@ -270,11 +272,11 @@ pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep);
self->pt_sleeponq = 1;
self->pt_sleepobj = &rwlock->ptr_rblocked;
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
retval = pthread__park(self, &rwlock->ptr_interlock,
&rwlock->ptr_rblocked, abs_timeout, 0,
&rwlock->ptr_rblocked);
pthread_spinlock(&rwlock->ptr_interlock);
pthread__spinlock(self, &rwlock->ptr_interlock);
}
/* One last chance to get the lock, in case it was released between
@ -285,7 +287,7 @@ pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
rwlock->ptr_nreaders++;
retval = 0;
}
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return retval;
}
@ -311,10 +313,10 @@ pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock,
return EINVAL;
self = pthread__self();
pthread_spinlock(&rwlock->ptr_interlock);
pthread__spinlock(self, &rwlock->ptr_interlock);
#ifdef ERRORCHECK
if (rwlock->ptr_writer == self) {
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return EDEADLK;
}
#endif
@ -327,25 +329,25 @@ pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock,
((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL))) {
#ifdef ERRORCHECK
if (pthread__started == 0) {
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return EDEADLK;
}
#endif
PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep);
self->pt_sleeponq = 1;
self->pt_sleepobj = &rwlock->ptr_wblocked;
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
retval = pthread__park(self, &rwlock->ptr_interlock,
&rwlock->ptr_wblocked, abs_timeout, 0,
&rwlock->ptr_wblocked);
pthread_spinlock(&rwlock->ptr_interlock);
pthread__spinlock(self, &rwlock->ptr_interlock);
}
if ((rwlock->ptr_nreaders == 0) && (rwlock->ptr_writer == NULL)) {
rwlock->ptr_writer = self;
retval = 0;
}
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return retval;
}
@ -362,12 +364,12 @@ pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
writer = NULL;
self = pthread__self();
pthread_spinlock(&rwlock->ptr_interlock);
pthread__spinlock(self, &rwlock->ptr_interlock);
if (rwlock->ptr_writer != NULL) {
/* Releasing a write lock. */
#ifdef ERRORCHECK
if (rwlock->ptr_writer != self) {
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return EPERM;
}
#endif
@ -391,7 +393,7 @@ pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
}
#ifdef ERRORCHECK
} else {
pthread_spinunlock(&rwlock->ptr_interlock);
pthread__spinunlock(self, &rwlock->ptr_interlock);
return EPERM;
#endif
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_rwlock2.c,v 1.5 2007/09/21 16:24:45 ad Exp $ */
/* $NetBSD: pthread_rwlock2.c,v 1.6 2007/11/13 15:57:13 ad Exp $ */
/*-
* Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: pthread_rwlock2.c,v 1.5 2007/09/21 16:24:45 ad Exp $");
__RCSID("$NetBSD: pthread_rwlock2.c,v 1.6 2007/11/13 15:57:13 ad Exp $");
#include <errno.h>
#include <stddef.h>
@ -147,14 +147,14 @@ pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
* Grab the interlock. Once we have that, we
* can adjust the waiter bits and sleep queue.
*/
pthread_spinlock(&ptr->ptr_interlock);
pthread__spinlock(self, &ptr->ptr_interlock);
/*
* Mark the rwlock as having waiters. If the set fails,
* then we may not need to sleep and should spin again.
*/
if (!rw_cas(ptr, &owner, owner | RW_HAS_WAITERS)) {
pthread_spinunlock(&ptr->ptr_interlock);
pthread__spinunlock(self, &ptr->ptr_interlock);
continue;
}
@ -165,7 +165,7 @@ pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
self->pt_sleeponq = 1;
self->pt_sleepobj = &ptr->ptr_rblocked;
self->pt_early = pthread__rwlock_early;
pthread_spinunlock(&ptr->ptr_interlock);
pthread__spinunlock(self, &ptr->ptr_interlock);
error = pthread__park(self, &ptr->ptr_interlock,
&ptr->ptr_rblocked, ts, 0, &ptr->ptr_rblocked);
@ -260,7 +260,7 @@ pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
* Grab the interlock. Once we have that, we
* can adjust the waiter bits and sleep queue.
*/
pthread_spinlock(&ptr->ptr_interlock);
pthread__spinlock(self, &ptr->ptr_interlock);
/*
* Mark the rwlock as having waiters. If the set fails,
@ -268,7 +268,7 @@ pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
*/
if (!rw_cas(ptr, &owner,
owner | RW_HAS_WAITERS | RW_WRITE_WANTED)) {
pthread_spinunlock(&ptr->ptr_interlock);
pthread__spinunlock(self, &ptr->ptr_interlock);
continue;
}
@ -278,7 +278,7 @@ pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
self->pt_sleeponq = 1;
self->pt_sleepobj = &ptr->ptr_wblocked;
self->pt_early = pthread__rwlock_early;
pthread_spinunlock(&ptr->ptr_interlock);
pthread__spinunlock(self, &ptr->ptr_interlock);
error = pthread__park(self, &ptr->ptr_interlock,
&ptr->ptr_wblocked, ts, 0, &ptr->ptr_wblocked);
@ -422,10 +422,10 @@ pthread_rwlock_unlock(pthread_rwlock_t *ptr)
* the waiter bits. We must check to see if there are
* still waiters before proceeding.
*/
pthread_spinlock(&ptr->ptr_interlock);
pthread__spinlock(self, &ptr->ptr_interlock);
owner = (uintptr_t)ptr->ptr_owner;
if ((owner & RW_HAS_WAITERS) == 0) {
pthread_spinunlock(&ptr->ptr_interlock);
pthread__spinunlock(self, &ptr->ptr_interlock);
continue;
}
@ -498,7 +498,7 @@ pthread__rwlock_early(void *obj)
pthread_t self;
u_int off;
self = pthread_self();
self = pthread__self();
switch (self->pt_rwlocked) {
case _RW_WANT_READ:

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_spin.c,v 1.2 2007/09/10 11:34:06 skrll Exp $ */
/* $NetBSD: pthread_spin.c,v 1.3 2007/11/13 15:57:14 ad Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
@ -41,7 +41,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: pthread_spin.c,v 1.2 2007/09/10 11:34:06 skrll Exp $");
__RCSID("$NetBSD: pthread_spin.c,v 1.3 2007/11/13 15:57:14 ad Exp $");
#include <sys/types.h>
#include <sys/lock.h>
@ -96,13 +96,15 @@ pthread_spin_destroy(pthread_spinlock_t *lock)
int
pthread_spin_lock(pthread_spinlock_t *lock)
{
pthread_t self;
#ifdef ERRORCHECK
if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
return EINVAL;
#endif
while (pthread__simple_lock_try(&lock->pts_spin) == 0) {
self = pthread__self();
while (pthread__spintrylock(self, &lock->pts_spin) == 0) {
pthread__smt_pause();
}
@ -112,28 +114,31 @@ pthread_spin_lock(pthread_spinlock_t *lock)
int
pthread_spin_trylock(pthread_spinlock_t *lock)
{
pthread_t self;
#ifdef ERRORCHECK
if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
return EINVAL;
#endif
if (pthread__simple_lock_try(&lock->pts_spin) == 0)
self = pthread__self();
if (pthread__spintrylock(self, &lock->pts_spin) == 0)
return EBUSY;
return 0;
}
int
pthread_spin_unlock(pthread_spinlock_t *lock)
{
pthread_t self;
#ifdef ERRORCHECK
if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
return EINVAL;
#endif
pthread__simple_unlock(&lock->pts_spin);
self = pthread__self();
pthread__spinunlock(self, &lock->pts_spin);
return 0;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: res_state.c,v 1.4 2004/06/03 19:32:27 christos Exp $ */
/* $NetBSD: res_state.c,v 1.5 2007/11/13 15:57:14 ad Exp $ */
/*-
* Copyright (c) 2004 The NetBSD Foundation, Inc.
@ -38,7 +38,7 @@
#include <sys/cdefs.h>
#if defined(LIBC_SCCS) && !defined(lint)
__RCSID("$NetBSD: res_state.c,v 1.4 2004/06/03 19:32:27 christos Exp $");
__RCSID("$NetBSD: res_state.c,v 1.5 2007/11/13 15:57:14 ad Exp $");
#endif
#include <sys/types.h>
@ -73,7 +73,7 @@ static void
res_state_debug(const char *msg, void *p)
{
char buf[512];
pthread_t self = pthread_self();
pthread_t self = pthread__self();
int len = snprintf(buf, sizeof(buf), "%p: %s %p\n", self, msg, p);
(void)write(STDOUT_FILENO, buf, (size_t)len);