Pull up following revision(s) (requested by ad in ticket #647):

lib/libpthread/pthread_rwlock.c: revision 1.37 (patch)
	lib/libpthread/pthread_misc.c: revision 1.16
	lib/libpthread/pthread.c: revision 1.154
	lib/libpthread/pthread_int.h: revision 1.98
	lib/libpthread/pthread_cond.c: revision 1.66
	lib/libpthread/pthread_mutex.c: revision 1.66

Rip out some very ambitious optimisations around pthread_mutex that are
don't buy much.  This stuff is hard enough to get right in the kernel let
alone userspace, and I don't trust that it's right.
This commit is contained in:
martin 2020-01-26 10:55:16 +00:00
parent 4b4a5dea31
commit c592185caf
6 changed files with 35 additions and 88 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread.c,v 1.153 2019/03/05 01:35:52 christos Exp $ */
/* $NetBSD: pthread.c,v 1.153.2.1 2020/01/26 10:55:16 martin Exp $ */
/*-
* Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: pthread.c,v 1.153 2019/03/05 01:35:52 christos Exp $");
__RCSID("$NetBSD: pthread.c,v 1.153.2.1 2020/01/26 10:55:16 martin Exp $");
#define __EXPOSE_STACK 1
@ -319,7 +319,6 @@ pthread__initthread(pthread_t t)
t->pt_havespecific = 0;
t->pt_early = NULL;
t->pt_lwpctl = &pthread__dummy_lwpctl;
t->pt_blocking = 0;
t->pt_droplock = NULL;
memcpy(&t->pt_lockops, pthread__lock_ops, sizeof(t->pt_lockops));
@ -1157,15 +1156,9 @@ pthread__park(pthread_t self, pthread_mutex_t *lock,
int rv, error;
void *obj;
/*
* For non-interlocked release of mutexes we need a store
* barrier before incrementing pt_blocking away from zero.
* This is provided by pthread_mutex_unlock().
*/
self->pt_willpark = 1;
pthread_mutex_unlock(lock);
self->pt_willpark = 0;
self->pt_blocking++;
/*
* Wait until we are awoken by a pending unpark operation,
@ -1239,8 +1232,6 @@ pthread__park(pthread_t self, pthread_mutex_t *lock,
pthread_mutex_unlock(lock);
}
self->pt_early = NULL;
self->pt_blocking--;
membar_sync();
return rv;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_cond.c,v 1.65 2017/12/08 03:08:19 christos Exp $ */
/* $NetBSD: pthread_cond.c,v 1.65.6.1 2020/01/26 10:55:16 martin Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -46,7 +46,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: pthread_cond.c,v 1.65 2017/12/08 03:08:19 christos Exp $");
__RCSID("$NetBSD: pthread_cond.c,v 1.65.6.1 2020/01/26 10:55:16 martin Exp $");
#include <stdlib.h>
#include <errno.h>
@ -164,7 +164,6 @@ pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
self->pt_willpark = 1;
pthread_mutex_unlock(mutex);
self->pt_willpark = 0;
self->pt_blocking++;
do {
retval = _lwp_park(clkid, TIMER_ABSTIME,
__UNCONST(abstime), self->pt_unpark,
@ -172,8 +171,6 @@ pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
__UNVOLATILE(&mutex->ptm_waiters));
self->pt_unpark = 0;
} while (retval == -1 && errno == ESRCH);
self->pt_blocking--;
membar_sync();
pthread_mutex_lock(mutex);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_int.h,v 1.95.2.1 2019/12/18 20:18:20 martin Exp $ */
/* $NetBSD: pthread_int.h,v 1.95.2.2 2020/01/26 10:55:16 martin Exp $ */
/*-
* Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -131,7 +131,6 @@ struct __pthread_st {
*/
int pt_dummy1 __aligned(128);
struct lwpctl *pt_lwpctl; /* Kernel/user comms area */
volatile int pt_blocking; /* Blocking in userspace */
volatile int pt_rwlocked; /* Handed rwlock successfully */
volatile int pt_signalled; /* Received pthread_cond_signal() */
volatile int pt_mutexwait; /* Waiting to acquire mutex */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_misc.c,v 1.15 2013/03/21 16:49:12 christos Exp $ */
/* $NetBSD: pthread_misc.c,v 1.15.32.1 2020/01/26 10:55:16 martin Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: pthread_misc.c,v 1.15 2013/03/21 16:49:12 christos Exp $");
__RCSID("$NetBSD: pthread_misc.c,v 1.15.32.1 2020/01/26 10:55:16 martin Exp $");
#include <errno.h>
#include <string.h>
@ -151,20 +151,9 @@ pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
int
pthread__sched_yield(void)
{
pthread_t self;
int error;
if (__predict_false(__uselibcstub))
return __libc_thr_yield();
self = pthread__self();
/* Memory barrier for unlocked mutex release. */
membar_producer();
self->pt_blocking++;
error = _sys_sched_yield();
self->pt_blocking--;
membar_sync();
return error;
return _sys_sched_yield();
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_mutex.c,v 1.65 2019/03/05 22:49:38 christos Exp $ */
/* $NetBSD: pthread_mutex.c,v 1.65.2.1 2020/01/26 10:55:16 martin Exp $ */
/*-
* Copyright (c) 2001, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -47,7 +47,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: pthread_mutex.c,v 1.65 2019/03/05 22:49:38 christos Exp $");
__RCSID("$NetBSD: pthread_mutex.c,v 1.65.2.1 2020/01/26 10:55:16 martin Exp $");
#include <sys/types.h>
#include <sys/lwpctl.h>
@ -235,10 +235,7 @@ pthread__mutex_pause(void)
/*
* Spin while the holder is running. 'lwpctl' gives us the true
* status of the thread. pt_blocking is set by libpthread in order
* to cut out system call and kernel spinlock overhead on remote CPUs
* (could represent many thousands of clock cycles). pt_blocking also
* makes this thread yield if the target is calling sched_yield().
* status of the thread.
*/
NOINLINE static void *
pthread__mutex_spin(pthread_mutex_t *ptm, pthread_t owner)
@ -250,8 +247,7 @@ pthread__mutex_spin(pthread_mutex_t *ptm, pthread_t owner)
thread = (pthread_t)MUTEX_OWNER(owner);
if (thread == NULL)
break;
if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE ||
thread->pt_blocking)
if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
break;
if (count < 128)
count += count;
@ -262,10 +258,10 @@ pthread__mutex_spin(pthread_mutex_t *ptm, pthread_t owner)
return owner;
}
NOINLINE static void
NOINLINE static bool
pthread__mutex_setwaiters(pthread_t self, pthread_mutex_t *ptm)
{
void *new, *owner;
void *owner, *next;
/*
* Note that the mutex can become unlocked before we set
@ -281,34 +277,16 @@ pthread__mutex_setwaiters(pthread_t self, pthread_mutex_t *ptm)
* the value of ptm_owner/pt_mutexwait after we have entered
* the waiters list (the CAS itself must be atomic).
*/
again:
membar_consumer();
owner = ptm->ptm_owner;
if (MUTEX_OWNER(owner) == 0) {
pthread__mutex_wakeup(self, ptm);
return;
}
if (!MUTEX_HAS_WAITERS(owner)) {
new = (void *)((uintptr_t)owner | MUTEX_WAITERS_BIT);
if (atomic_cas_ptr(&ptm->ptm_owner, owner, new) != owner) {
goto again;
for (owner = ptm->ptm_owner;; owner = next) {
if (MUTEX_OWNER(owner) == 0) {
pthread__mutex_wakeup(self, ptm);
return true;
}
}
/*
* Note that pthread_mutex_unlock() can do a non-interlocked CAS.
* We cannot know if the presence of the waiters bit is stable
* while the holding thread is running. There are many assumptions;
* see sys/kern/kern_mutex.c for details. In short, we must spin if
* we see that the holder is running again.
*/
membar_sync();
if (MUTEX_OWNER(owner) != (uintptr_t)self)
pthread__mutex_spin(ptm, owner);
if (membar_consumer(), !MUTEX_HAS_WAITERS(ptm->ptm_owner)) {
goto again;
if (MUTEX_HAS_WAITERS(owner)) {
return false;
}
next = atomic_cas_ptr(&ptm->ptm_owner, owner,
(void *)((uintptr_t)owner | MUTEX_WAITERS_BIT));
}
}
@ -386,9 +364,12 @@ pthread__mutex_lock_slow(pthread_mutex_t *ptm, const struct timespec *ts)
if (next == waiters)
break;
}
/* Set the waiters bit and block. */
pthread__mutex_setwaiters(self, ptm);
membar_sync();
if (pthread__mutex_setwaiters(self, ptm)) {
continue;
}
/*
* We may have been awoken by the current thread above,
@ -398,15 +379,13 @@ pthread__mutex_lock_slow(pthread_mutex_t *ptm, const struct timespec *ts)
* being set to zero). Otherwise it is unsafe to re-enter
* the thread onto the waiters list.
*/
membar_sync();
while (self->pt_mutexwait) {
self->pt_blocking++;
error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME,
__UNCONST(ts), self->pt_unpark,
__UNVOLATILE(&ptm->ptm_waiters),
__UNVOLATILE(&ptm->ptm_waiters));
self->pt_unpark = 0;
self->pt_blocking--;
membar_sync();
if (__predict_true(error != -1)) {
continue;
}
@ -471,15 +450,11 @@ pthread_mutex_unlock(pthread_mutex_t *ptm)
if (__predict_false(__uselibcstub))
return __libc_mutex_unlock_stub(ptm);
/*
* Note this may be a non-interlocked CAS. See lock_slow()
* above and sys/kern/kern_mutex.c for details.
*/
#ifndef PTHREAD__ATOMIC_IS_MEMBAR
membar_exit();
#endif
self = pthread__self();
value = atomic_cas_ptr_ni(&ptm->ptm_owner, self, NULL);
value = atomic_cas_ptr(&ptm->ptm_owner, self, NULL);
if (__predict_true(value == self)) {
pthread__smt_wake();
return 0;
@ -582,12 +557,9 @@ pthread__mutex_wakeup(pthread_t self, pthread_mutex_t *ptm)
pthread_t thread, next;
ssize_t n, rv;
/*
* Take ownership of the current set of waiters. No
* need for a memory barrier following this, all loads
* are dependent upon 'thread'.
*/
/* Take ownership of the current set of waiters. */
thread = atomic_swap_ptr(&ptm->ptm_waiters, NULL);
membar_datadep_consumer(); /* for alpha */
pthread__smt_wake();
for (;;) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: pthread_rwlock.c,v 1.34 2016/07/03 14:24:58 christos Exp $ */
/* $NetBSD: pthread_rwlock.c,v 1.34.18.1 2020/01/26 10:55:16 martin Exp $ */
/*-
* Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: pthread_rwlock.c,v 1.34 2016/07/03 14:24:58 christos Exp $");
__RCSID("$NetBSD: pthread_rwlock.c,v 1.34.18.1 2020/01/26 10:55:16 martin Exp $");
#include <sys/types.h>
#include <sys/lwpctl.h>
@ -136,8 +136,7 @@ pthread__rwlock_spin(uintptr_t owner)
thread = (pthread_t)(owner & RW_THREAD);
if (thread == NULL || (owner & ~RW_THREAD) != RW_WRITE_LOCKED)
return 0;
if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE ||
thread->pt_blocking)
if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
return 0;
for (i = 128; i != 0; i--)
pthread__rwlock_pause();