- Fix a few possible locking issues in execve1() and exit1(). Add a note

that scheduler locks are special in this regard - adaptive locks cannot
  be in the path due to turnstiles.  Randomly spotted/reported by uebayasi@.
- Remove unused lwp_relock() and replace lwp_lock_retry() by simplifying
  lwp_lock() and sleepq_enter() a little.
- Give alllwp its own cache-line and mark lwp_cache pointer as read-mostly.

OK ad@
This commit is contained in:
rmind 2010-12-18 01:36:19 +00:00
parent 0d6d18d2d7
commit 11a35aed4d
8 changed files with 50 additions and 107 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_exec.c,v 1.303 2010/11/30 10:43:05 dholland Exp $ */
/* $NetBSD: kern_exec.c,v 1.304 2010/12/18 01:36:19 rmind Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
@ -59,7 +59,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.303 2010/11/30 10:43:05 dholland Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.304 2010/12/18 01:36:19 rmind Exp $");
#include "opt_ktrace.h"
#include "opt_modular.h"
@ -1165,8 +1165,10 @@ execve1(struct lwp *l, const char *path, char * const *args,
l->l_stat = LSSTOP;
p->p_stat = SSTOP;
p->p_nrlwps--;
lwp_unlock(l);
mutex_exit(p->p_lock);
mutex_exit(proc_lock);
lwp_lock(l);
mi_switch(l);
ksiginfo_queue_drain(&kq);
KERNEL_LOCK(l->l_biglocks, l);

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_exit.c,v 1.230 2010/07/07 01:30:37 chs Exp $ */
/* $NetBSD: kern_exit.c,v 1.231 2010/12/18 01:36:19 rmind Exp $ */
/*-
* Copyright (c) 1998, 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.230 2010/07/07 01:30:37 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_exit.c,v 1.231 2010/12/18 01:36:19 rmind Exp $");
#include "opt_ktrace.h"
#include "opt_perfctrs.h"
@ -244,7 +244,9 @@ exit1(struct lwp *l, int rv)
lwp_lock(l);
p->p_nrlwps--;
l->l_stat = LSSTOP;
lwp_unlock(l);
mutex_exit(p->p_lock);
lwp_lock(l);
mi_switch(l);
KERNEL_LOCK(l->l_biglocks, l);
mutex_enter(p->p_lock);

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_lwp.c,v 1.151 2010/07/07 01:30:37 chs Exp $ */
/* $NetBSD: kern_lwp.c,v 1.152 2010/12/18 01:36:19 rmind Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@ -153,9 +153,11 @@
* each field are documented in sys/lwp.h.
*
* State transitions must be made with the LWP's general lock held,
* and may cause the LWP's lock pointer to change. Manipulation of
* and may cause the LWP's lock pointer to change. Manipulation of
* the general lock is not performed directly, but through calls to
* lwp_lock(), lwp_relock() and similar.
* lwp_lock(), lwp_unlock() and others. It should be noted that the
* adaptive locks are not allowed to be released while the LWP's lock
* is being held (unlike for other spin-locks).
*
* States and their associated locks:
*
@ -209,7 +211,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.151 2010/07/07 01:30:37 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.152 2010/12/18 01:36:19 rmind Exp $");
#include "opt_ddb.h"
#include "opt_lockdebug.h"
@ -242,8 +244,8 @@ __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.151 2010/07/07 01:30:37 chs Exp $");
#include <uvm/uvm_extern.h>
#include <uvm/uvm_object.h>
struct lwplist alllwp = LIST_HEAD_INITIALIZER(alllwp);
static pool_cache_t lwp_cache;
static pool_cache_t lwp_cache __read_mostly;
struct lwplist alllwp __cacheline_aligned;
/* DTrace proc provider probes */
SDT_PROBE_DEFINE(proc,,,lwp_create,
@ -284,6 +286,7 @@ void
lwpinit(void)
{
LIST_INIT(&alllwp);
lwpinit_specificdata();
lwp_sys_init();
lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
@ -1241,40 +1244,6 @@ lwp_locked(struct lwp *l, kmutex_t *mtx)
return mutex_owned(cur) && (mtx == cur || mtx == NULL);
}
/*
* Lock an LWP.
*/
kmutex_t *
lwp_lock_retry(struct lwp *l, kmutex_t *old)
{
/*
* XXXgcc ignoring kmutex_t * volatile on i386
*
* gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
*/
#if 1
while (l->l_mutex != old) {
#else
for (;;) {
#endif
mutex_spin_exit(old);
old = l->l_mutex;
mutex_spin_enter(old);
/*
* mutex_enter() will have posted a read barrier. Re-test
* l->l_mutex. If it has changed, we need to try again.
*/
#if 1
}
#else
} while (__predict_false(l->l_mutex != old));
#endif
return old;
}
/*
* Lend a new mutex to an LWP. The old mutex must be held.
*/
@ -1297,7 +1266,7 @@ lwp_unlock_to(struct lwp *l, kmutex_t *new)
{
kmutex_t *old;
KASSERT(mutex_owned(l->l_mutex));
KASSERT(lwp_locked(l, NULL));
old = l->l_mutex;
membar_exit();
@ -1305,25 +1274,6 @@ lwp_unlock_to(struct lwp *l, kmutex_t *new)
mutex_spin_exit(old);
}
/*
* Acquire a new mutex, and donate it to an LWP. The LWP must already be
* locked.
*/
void
lwp_relock(struct lwp *l, kmutex_t *new)
{
kmutex_t *old;
KASSERT(mutex_owned(l->l_mutex));
old = l->l_mutex;
if (old != new) {
mutex_spin_enter(new);
l->l_mutex = new;
mutex_spin_exit(old);
}
}
int
lwp_trylock(struct lwp *l)
{
@ -1346,7 +1296,6 @@ lwp_unsleep(lwp_t *l, bool cleanup)
(*l->l_syncobj->sobj_unsleep)(l, cleanup);
}
/*
* Handle exceptions for mi_userret(). Called if a member of LW_USERRET is
* set.

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_timeout.c,v 1.44 2009/03/21 13:11:14 ad Exp $ */
/* $NetBSD: kern_timeout.c,v 1.45 2010/12/18 01:36:19 rmind Exp $ */
/*-
* Copyright (c) 2003, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@ -59,7 +59,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.44 2009/03/21 13:11:14 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.45 2010/12/18 01:36:19 rmind Exp $");
/*
* Timeouts are kept in a hierarchical timing wheel. The c_time is the
@ -87,6 +87,7 @@ __KERNEL_RCSID(0, "$NetBSD: kern_timeout.c,v 1.44 2009/03/21 13:11:14 ad Exp $")
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/callout.h>
#include <sys/lwp.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/sleepq.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: sys_select.c,v 1.28 2010/10/15 05:39:19 rmind Exp $ */
/* $NetBSD: sys_select.c,v 1.29 2010/12/18 01:36:19 rmind Exp $ */
/*-
* Copyright (c) 2007, 2008, 2009, 2010 The NetBSD Foundation, Inc.
@ -84,21 +84,19 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: sys_select.c,v 1.28 2010/10/15 05:39:19 rmind Exp $");
__KERNEL_RCSID(0, "$NetBSD: sys_select.c,v 1.29 2010/12/18 01:36:19 rmind Exp $");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/filedesc.h>
#include <sys/ioctl.h>
#include <sys/file.h>
#include <sys/proc.h>
#include <sys/socketvar.h>
#include <sys/signalvar.h>
#include <sys/uio.h>
#include <sys/kernel.h>
#include <sys/stat.h>
#include <sys/lwp.h>
#include <sys/poll.h>
#include <sys/vnode.h>
#include <sys/mount.h>
#include <sys/syscallargs.h>
#include <sys/cpu.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: sleepq.c,v 1.8 2010/07/23 19:14:14 pooka Exp $ */
/* $NetBSD: sleepq.c,v 1.9 2010/12/18 01:36:20 rmind Exp $ */
/*
* Copyright (c) 2008 Antti Kantee. All Rights Reserved.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: sleepq.c,v 1.8 2010/07/23 19:14:14 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: sleepq.c,v 1.9 2010/12/18 01:36:20 rmind Exp $");
#include <sys/param.h>
#include <sys/condvar.h>
@ -161,18 +161,15 @@ syncobj_noowner(wchan_t wc)
return NULL;
}
/*
* XXX: used only by callout, therefore here. should try to use
* one in kern_lwp directly.
*/
kmutex_t *
lwp_lock_retry(struct lwp *l, kmutex_t *old)
void
lwp_unlock_to(struct lwp *l, kmutex_t *new)
{
kmutex_t *old;
while (l->l_mutex != old) {
mutex_spin_exit(old);
old = l->l_mutex;
mutex_spin_enter(old);
}
return old;
KASSERT(mutex_owned(l->l_mutex));
old = l->l_mutex;
membar_exit();
l->l_mutex = new;
mutex_spin_exit(old);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: lwp.h,v 1.138 2010/09/01 19:37:58 pooka Exp $ */
/* $NetBSD: lwp.h,v 1.139 2010/12/18 01:36:20 rmind Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010
@ -299,7 +299,6 @@ void lwp_startup(lwp_t *, lwp_t *);
int lwp_locked(lwp_t *, kmutex_t *);
void lwp_setlock(lwp_t *, kmutex_t *);
void lwp_unlock_to(lwp_t *, kmutex_t *);
kmutex_t *lwp_lock_retry(lwp_t *, kmutex_t *);
void lwp_relock(lwp_t *, kmutex_t *);
int lwp_trylock(lwp_t *);
void lwp_addref(lwp_t *);
@ -360,16 +359,18 @@ void lwp_whatis(uintptr_t, void (*)(const char *, ...));
static inline void
lwp_lock(lwp_t *l)
{
kmutex_t *old;
mutex_spin_enter(old = l->l_mutex);
kmutex_t *old = l->l_mutex;
/*
* mutex_enter() will have posted a read barrier. Re-test
* l->l_mutex. If it has changed, we need to try again.
* Note: mutex_spin_enter() will have posted a read barrier.
* Re-test l->l_mutex. If it has changed, we need to try again.
*/
if (__predict_false(l->l_mutex != old))
lwp_lock_retry(l, old);
mutex_spin_enter(old);
while (__predict_false(l->l_mutex != old)) {
mutex_spin_exit(old);
old = l->l_mutex;
mutex_spin_enter(old);
}
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: sleepq.h,v 1.18 2009/11/22 18:40:26 mbalmer Exp $ */
/* $NetBSD: sleepq.h,v 1.19 2010/12/18 01:36:20 rmind Exp $ */
/*-
* Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@ -120,20 +120,13 @@ sleepq_hashlock(wchan_t wchan)
static inline void
sleepq_enter(sleepq_t *sq, lwp_t *l, kmutex_t *mp)
{
kmutex_t *omp;
/*
* Acquire the per-LWP mutex and lend it ours (the sleep queue
* lock). Once that's done we're interlocked, and so can release
* the kernel lock.
* Acquire the per-LWP mutex and lend it ours sleep queue lock.
* Once interlocked, we can release the kernel lock.
*/
omp = l->l_mutex;
mutex_spin_enter(omp);
if (__predict_false(l->l_mutex != omp)) {
omp = lwp_lock_retry(l, omp);
}
l->l_mutex = mp;
mutex_spin_exit(omp);
lwp_lock(l);
lwp_unlock_to(l, mp);
KERNEL_UNLOCK_ALL(NULL, &l->l_biglocks);
}