- Remove the LWP counters. The race between park/unpark rarely occurs

so it's not worth counting.

- lwp_wakeup: set LW_UNPARKED on the target. Ensures that _lwp_park will
  always be awoken even if another system call eats the wakeup, e.g. as a
  result of an intervening signal. To deal with this correctly for other
  system calls will require a different approach.

- _lwp_unpark, _lwp_unpark_all: use setrunnable if the LWP is not parked
  on the same sync queue: (1) simplifies the code a bit as there no point
  doing anything special for this case (2) makes it possible for p_smutex
  to be replaced by p_mutex and (3) restores the guarantee that the 'hint'
  argument really is just a hint.
This commit is contained in:
ad 2007-03-14 23:58:24 +00:00
parent bfa3094e84
commit 06aeb1d344

View File

@ -1,4 +1,4 @@
/* $NetBSD: sys_lwp.c,v 1.14 2007/03/14 23:07:27 yamt Exp $ */
/* $NetBSD: sys_lwp.c,v 1.15 2007/03/14 23:58:24 ad Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.14 2007/03/14 23:07:27 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.15 2007/03/14 23:58:24 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -68,40 +68,10 @@ syncobj_t lwp_park_sobj = {
sleeptab_t lwp_park_tab;
#ifdef LWP_COUNTERS
struct evcnt lwp_ev_park_early = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
NULL, "_lwp_park", "unparked early");
struct evcnt lwp_ev_park_raced = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
NULL, "_lwp_park", "raced");
struct evcnt lwp_ev_park_slowpath = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
NULL, "_lwp_park", "slowpath");
struct evcnt lwp_ev_park_miss = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
NULL, "_lwp_park", "not parked");
struct evcnt lwp_ev_park_bcast = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
NULL, "_lwp_park", "broadcast unpark");
struct evcnt lwp_ev_park_targ = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
NULL, "_lwp_park", "targeted unpark");
struct evcnt lwp_ev_park = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
NULL, "_lwp_park", "parked");
#define LWP_COUNT(ev, val) (ev).ev_count += (val) /* XXXSMP */
#else
#define LWP_COUNT(ev, val) /* nothing */
#endif
void
lwp_sys_init(void)
{
sleeptab_init(&lwp_park_tab);
#ifdef LWP_COUNTERS
evcnt_attach_static(&lwp_ev_park_early);
evcnt_attach_static(&lwp_ev_park_slowpath);
evcnt_attach_static(&lwp_ev_park_raced);
evcnt_attach_static(&lwp_ev_park_miss);
evcnt_attach_static(&lwp_ev_park_bcast);
evcnt_attach_static(&lwp_ev_park_targ);
evcnt_attach_static(&lwp_ev_park);
#endif
}
/* ARGSUSED */
@ -292,7 +262,7 @@ sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
}
lwp_lock(t);
t->l_flag |= LW_CANCELLED;
t->l_flag |= (LW_CANCELLED | LW_UNPARKED);
if (t->l_stat != LSSLEEP) {
error = ENODEV;
@ -491,7 +461,6 @@ sys__lwp_park(struct lwp *l, void *v, register_t *retval)
l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
sleepq_lwp_unlock(l);
sleepq_unlock(sq);
LWP_COUNT(lwp_ev_park_early, 1);
return EALREADY;
}
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
@ -503,7 +472,6 @@ sys__lwp_park(struct lwp *l, void *v, register_t *retval)
* put our stack up to be recycled. If it's binned, a trampoline
* function could call sleepq_unblock() on our behalf.
*/
LWP_COUNT(lwp_ev_park, 1);
KERNEL_UNLOCK_ALL(l, &l->l_biglocks); /* XXX for compat32 */
sleepq_block(sq, sched_kpri(l), wchan, "parked", timo, 1,
&lwp_park_sobj);
@ -550,46 +518,43 @@ sys__lwp_unpark(struct lwp *l, void *v, register_t *retval)
if (t->l_proc == p && t->l_lid == target)
break;
if (t == NULL) {
/*
* The LWP hasn't parked yet. Take the hit
* and mark the operation as pending.
*/
LWP_COUNT(lwp_ev_park_slowpath, 1);
if (__predict_true(t != NULL)) {
swapin = sleepq_remove(sq, t);
sleepq_unlock(sq);
mutex_enter(&p->p_smutex);
if ((t = lwp_find(p, target)) == NULL) {
mutex_exit(&p->p_smutex);
return ESRCH;
}
lwp_lock(t);
mutex_exit(&p->p_smutex);
if (t->l_sleepq == sq) {
/*
* We have raced, and the LWP is now parked.
* Wake it in the usual way.
*/
KASSERT(t->l_syncobj == &lwp_park_sobj);
KASSERT(lwp_locked(t, sq->sq_mutex));
LWP_COUNT(lwp_ev_park_raced, 1);
} else {
/*
* It may not have parked yet, or is parked
* on a different user sync object. The
* latter is an application error.
*/
t->l_flag |= LW_UNPARKED;
lwp_unlock(t);
return 0;
}
if (swapin)
uvm_kick_scheduler();
return 0;
}
swapin = sleepq_remove(sq, t);
LWP_COUNT(lwp_ev_park_targ, 1);
/*
* The LWP hasn't parked yet. Take the hit and mark the
* operation as pending.
*/
sleepq_unlock(sq);
if (swapin)
uvm_kick_scheduler();
mutex_enter(&p->p_smutex);
if ((t = lwp_find(p, target)) == NULL) {
mutex_exit(&p->p_smutex);
return ESRCH;
}
lwp_lock(t);
/*
* It may not have parked yet, we may have raced, or it
* is parked on a different user sync object.
*/
if (t->l_syncobj == &lwp_park_sobj) {
/* Releases the LWP lock. */
setrunnable(t);
} else {
/*
* Set the operation pending. The next call to _lwp_park
* will return early.
*/
t->l_flag |= LW_UNPARKED;
lwp_unlock(t);
}
mutex_exit(&p->p_smutex);
return 0;
}
@ -607,7 +572,7 @@ sys__lwp_unpark_all(struct lwp *l, void *v, register_t *retval)
wchan_t wchan;
lwpid_t targets[32], *tp, *tpp, *tmax, target;
int swapin, error;
u_int ntargets, unparked;
u_int ntargets;
size_t sz;
p = l->l_proc;
@ -648,7 +613,6 @@ sys__lwp_unpark_all(struct lwp *l, void *v, register_t *retval)
return error;
}
unparked = 0;
swapin = 0;
wchan = lwp_park_wchan(p, SCARG(uap, hint));
sq = sleeptab_lookup(&lwp_park_tab, wchan);
@ -666,7 +630,6 @@ sys__lwp_unpark_all(struct lwp *l, void *v, register_t *retval)
if (t != NULL) {
swapin |= sleepq_remove(sq, t);
unparked++;
continue;
}
@ -674,7 +637,6 @@ sys__lwp_unpark_all(struct lwp *l, void *v, register_t *retval)
* The LWP hasn't parked yet. Take the hit and
* mark the operation as pending.
*/
LWP_COUNT(lwp_ev_park_slowpath, 1);
sleepq_unlock(sq);
mutex_enter(&p->p_smutex);
if ((t = lwp_find(p, target)) == NULL) {
@ -683,28 +645,25 @@ sys__lwp_unpark_all(struct lwp *l, void *v, register_t *retval)
continue;
}
lwp_lock(t);
mutex_exit(&p->p_smutex);
if (t->l_sleepq == sq) {
/*
* We have raced, and the LWP is now parked.
* Wake it in the usual way.
*/
KASSERT(t->l_syncobj == &lwp_park_sobj);
KASSERT(lwp_locked(t, sq->sq_mutex));
LWP_COUNT(lwp_ev_park_raced, 1);
swapin |= sleepq_remove(sq, t);
unparked++;
/*
* It may not have parked yet, we may have raced, or
* it is parked on a different user sync object.
*/
if (t->l_syncobj == &lwp_park_sobj) {
/* Releases the LWP lock. */
setrunnable(t);
} else {
/*
* It may not have parked yet, or is parked
* on a different user sync object. The
* latter is an application error.
* Set the operation pending. The next call to
* _lwp_park will return early.
*/
t->l_flag |= LW_UNPARKED;
lwp_unlock(t);
sleepq_lock(sq);
}
mutex_exit(&p->p_smutex);
sleepq_lock(sq);
}
sleepq_unlock(sq);
@ -715,8 +674,6 @@ sys__lwp_unpark_all(struct lwp *l, void *v, register_t *retval)
}
if (swapin)
uvm_kick_scheduler();
LWP_COUNT(lwp_ev_park_bcast, unparked);
LWP_COUNT(lwp_ev_park_miss, (ntargets - unparked));
/* XXXAD return unparked; */
return 0;
}