- cv_wakeup: remove this. There are ~zero situations where it's useful.

- cv_wait and friends: after resuming execution, check to see if we have
  been restarted as a result of cv_signal. If we have, but cannot take
  the wakeup (because of eg a pending Unix signal or timeout) then try to
  ensure that another LWP sees it. This is necessary because there may
  be multiple waiters, and at least one should take the wakeup if possible.
  Prompted by a discussion with pooka@.
- typedef struct lwp lwp_t;
- int -> bool, struct lwp -> lwp_t in a few places.
This commit is contained in:
ad 2007-03-29 17:34:39 +00:00
parent 9982390dd6
commit 6cf46baa77
5 changed files with 137 additions and 139 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_condvar.c,v 1.5 2007/02/27 15:07:28 yamt Exp $ */
/* $NetBSD: kern_condvar.c,v 1.6 2007/03/29 17:34:39 ad Exp $ */
/*-
* Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
@ -45,7 +45,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.5 2007/02/27 15:07:28 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.6 2007/03/29 17:34:39 ad Exp $");
#include <sys/param.h>
#include <sys/proc.h>
@ -54,8 +54,8 @@ __KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.5 2007/02/27 15:07:28 yamt Exp $"
#include <sys/condvar.h>
#include <sys/sleepq.h>
static void cv_unsleep(struct lwp *);
static void cv_changepri(struct lwp *, pri_t);
static void cv_unsleep(lwp_t *);
static void cv_changepri(lwp_t *, pri_t);
syncobj_t cv_syncobj = {
SOBJ_SLEEPQ_SORTED,
@ -102,12 +102,13 @@ cv_destroy(kcondvar_t *cv)
* condition variable, and increment the number of waiters.
*/
static inline sleepq_t *
cv_enter(kcondvar_t *cv, kmutex_t *mtx, struct lwp *l)
cv_enter(kcondvar_t *cv, kmutex_t *mtx, lwp_t *l)
{
sleepq_t *sq;
KASSERT(cv->cv_wmesg != NULL);
l->l_cv_signalled = 0;
sq = sleeptab_lookup(&sleeptab, cv);
cv->cv_waiters++;
sleepq_enter(sq, l);
@ -116,6 +117,27 @@ cv_enter(kcondvar_t *cv, kmutex_t *mtx, struct lwp *l)
return sq;
}
/*
* cv_exit:
*
* After resuming execution, check to see if we have been restarted
* as a result of cv_signal(). If we have, but cannot take the
* wakeup (because of eg a pending Unix signal or timeout) then try
* to ensure that another LWP sees it. This is necessary because
* there may be multiple waiters, and at least one should take the
* wakeup if possible.
*/
static inline int
cv_exit(kcondvar_t *cv, kmutex_t *mtx, lwp_t *l, const int error)
{
mutex_enter(mtx);
if (__predict_false(error != 0) && l->l_cv_signalled != 0)
cv_signal(cv);
return error;
}
/*
* cv_unsleep:
*
@ -125,7 +147,7 @@ cv_enter(kcondvar_t *cv, kmutex_t *mtx, struct lwp *l)
* called with the LWP locked, and must return it unlocked.
*/
static void
cv_unsleep(struct lwp *l)
cv_unsleep(lwp_t *l)
{
uintptr_t addr;
@ -144,7 +166,7 @@ cv_unsleep(struct lwp *l)
* Adjust the real (user) priority of an LWP blocked on a CV.
*/
static void
cv_changepri(struct lwp *l, pri_t pri)
cv_changepri(lwp_t *l, pri_t pri)
{
sleepq_t *sq = l->l_sleepq;
pri_t opri;
@ -169,7 +191,7 @@ cv_changepri(struct lwp *l, pri_t pri)
void
cv_wait(kcondvar_t *cv, kmutex_t *mtx)
{
struct lwp *l = curlwp;
lwp_t *l = curlwp;
sleepq_t *sq;
LOCK_ASSERT(mutex_owned(mtx));
@ -182,7 +204,7 @@ cv_wait(kcondvar_t *cv, kmutex_t *mtx)
sq = cv_enter(cv, mtx, l);
sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, 0, 0, &cv_syncobj);
(void)sleepq_unblock(0, 0);
mutex_enter(mtx);
(void)cv_exit(cv, mtx, l, 0);
}
/*
@ -196,7 +218,7 @@ cv_wait(kcondvar_t *cv, kmutex_t *mtx)
int
cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
{
struct lwp *l = curlwp;
lwp_t *l = curlwp;
sleepq_t *sq;
int error;
@ -208,9 +230,7 @@ cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
sq = cv_enter(cv, mtx, l);
sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, 0, 1, &cv_syncobj);
error = sleepq_unblock(0, 1);
mutex_enter(mtx);
return error;
return cv_exit(cv, mtx, l, error);
}
/*
@ -223,7 +243,7 @@ cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
int
cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int timo)
{
struct lwp *l = curlwp;
lwp_t *l = curlwp;
sleepq_t *sq;
int error;
@ -235,9 +255,7 @@ cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int timo)
sq = cv_enter(cv, mtx, l);
sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, timo, 0, &cv_syncobj);
error = sleepq_unblock(timo, 0);
mutex_enter(mtx);
return error;
return cv_exit(cv, mtx, l, error);
}
/*
@ -252,7 +270,7 @@ cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int timo)
int
cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int timo)
{
struct lwp *l = curlwp;
lwp_t *l = curlwp;
sleepq_t *sq;
int error;
@ -264,9 +282,7 @@ cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int timo)
sq = cv_enter(cv, mtx, l);
sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, timo, 1, &cv_syncobj);
error = sleepq_unblock(timo, 1);
mutex_enter(mtx);
return error;
return cv_exit(cv, mtx, l, error);
}
/*
@ -278,6 +294,7 @@ cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int timo)
void
cv_signal(kcondvar_t *cv)
{
lwp_t *l;
sleepq_t *sq;
if (cv->cv_waiters == 0)
@ -292,7 +309,8 @@ cv_signal(kcondvar_t *cv)
sq = sleeptab_lookup(&sleeptab, cv);
if (cv->cv_waiters != 0) {
cv->cv_waiters--;
sleepq_wake(sq, cv, 1);
l = sleepq_wake(sq, cv, 1);
l->l_cv_signalled = 1;
} else
sleepq_unlock(sq);
}
@ -320,27 +338,6 @@ cv_broadcast(kcondvar_t *cv)
sleepq_unlock(sq);
}
/*
* cv_wakeup:
*
* Wake all LWPs waiting on a condition variable. The interlock
* need not be held, but it is the caller's responsibility to
* ensure correct synchronization.
*/
void
cv_wakeup(kcondvar_t *cv)
{
sleepq_t *sq;
u_int cnt;
sq = sleeptab_lookup(&sleeptab, cv);
if ((cnt = cv->cv_waiters) != 0) {
cv->cv_waiters = 0;
sleepq_wake(sq, cv, cnt);
} else
sleepq_unlock(sq);
}
/*
* cv_has_waiters:
*

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_sleepq.c,v 1.7 2007/02/27 15:07:29 yamt Exp $ */
/* $NetBSD: kern_sleepq.c,v 1.8 2007/03/29 17:34:39 ad Exp $ */
/*-
* Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.7 2007/02/27 15:07:29 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.8 2007/03/29 17:34:39 ad Exp $");
#include "opt_multiprocessor.h"
#include "opt_lockdebug.h"
@ -63,8 +63,8 @@ __KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.7 2007/02/27 15:07:29 yamt Exp $")
#include <uvm/uvm_extern.h>
int sleepq_sigtoerror(struct lwp *, int);
void updatepri(struct lwp *);
int sleepq_sigtoerror(lwp_t *, int);
void updatepri(lwp_t *);
/* General purpose sleep table, used by ltsleep() and condition variables. */
sleeptab_t sleeptab;
@ -114,7 +114,7 @@ sleepq_init(sleepq_t *sq, kmutex_t *mtx)
* to bring the LWP into memory.
*/
int
sleepq_remove(sleepq_t *sq, struct lwp *l)
sleepq_remove(sleepq_t *sq, lwp_t *l)
{
struct cpu_info *ci;
@ -193,9 +193,9 @@ sleepq_remove(sleepq_t *sq, struct lwp *l)
* Insert an LWP into the sleep queue, optionally sorting by priority.
*/
inline void
sleepq_insert(sleepq_t *sq, struct lwp *l, syncobj_t *sobj)
sleepq_insert(sleepq_t *sq, lwp_t *l, syncobj_t *sobj)
{
struct lwp *l2;
lwp_t *l2;
const int pri = lwp_eprio(l);
if ((sobj->sobj_flag & SOBJ_SLEEPQ_SORTED) != 0) {
@ -214,7 +214,7 @@ void
sleepq_enqueue(sleepq_t *sq, pri_t pri, wchan_t wchan, const char *wmesg,
syncobj_t *sobj)
{
struct lwp *l = curlwp;
lwp_t *l = curlwp;
KASSERT(mutex_owned(sq->sq_mutex));
KASSERT(l->l_stat == LSONPROC);
@ -235,9 +235,9 @@ sleepq_enqueue(sleepq_t *sq, pri_t pri, wchan_t wchan, const char *wmesg,
}
void
sleepq_switch(int timo, int catch)
sleepq_switch(int timo, bool catch)
{
struct lwp *l = curlwp;
lwp_t *l = curlwp;
#ifdef KTRACE
if (KTRPOINT(l->l_proc, KTR_CSW))
@ -289,7 +289,7 @@ sleepq_switch(int timo, int catch)
*/
void
sleepq_block(sleepq_t *sq, pri_t pri, wchan_t wchan, const char *wmesg,
int timo, int catch, syncobj_t *sobj)
int timo, bool catch, syncobj_t *sobj)
{
sleepq_enqueue(sq, pri, wchan, wmesg, sobj);
@ -308,11 +308,11 @@ sleepq_block(sleepq_t *sq, pri_t pri, wchan_t wchan, const char *wmesg,
* those they went asleep on.
*/
int
sleepq_unblock(int timo, int catch)
sleepq_unblock(int timo, bool catch)
{
int error, expired, sig;
struct proc *p;
struct lwp *l;
lwp_t *l;
l = curlwp;
error = l->l_sleeperr;
@ -361,10 +361,10 @@ sleepq_unblock(int timo, int catch)
*
* Wake zero or more LWPs blocked on a single wait channel.
*/
void
lwp_t *
sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected)
{
struct lwp *l, *next;
lwp_t *l, *next;
int swapin = 0;
KASSERT(mutex_owned(sq->sq_mutex));
@ -387,6 +387,8 @@ sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected)
*/
if (swapin)
uvm_kick_scheduler();
return l;
}
/*
@ -397,7 +399,7 @@ sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected)
* always release it.
*/
void
sleepq_unsleep(struct lwp *l)
sleepq_unsleep(lwp_t *l)
{
sleepq_t *sq = l->l_sleepq;
int swapin;
@ -422,7 +424,7 @@ sleepq_unsleep(struct lwp *l)
void
sleepq_timeout(void *arg)
{
struct lwp *l = arg;
lwp_t *l = arg;
/*
* Lock the LWP. Assuming it's still on the sleep queue, its
@ -445,7 +447,7 @@ sleepq_timeout(void *arg)
* Given a signal number, interpret and return an error code.
*/
int
sleepq_sigtoerror(struct lwp *l, int sig)
sleepq_sigtoerror(lwp_t *l, int sig)
{
struct proc *p = l->l_proc;
int error;
@ -494,7 +496,7 @@ sleepq_abort(kmutex_t *mtx, int unlock)
* assumed to have been fixed at the time of insertion into the queue.
*/
void
sleepq_changepri(struct lwp *l, pri_t pri)
sleepq_changepri(lwp_t *l, pri_t pri)
{
KASSERT(lwp_locked(l, l->l_sleepq->sq_mutex));
@ -502,7 +504,7 @@ sleepq_changepri(struct lwp *l, pri_t pri)
}
void
sleepq_lendpri(struct lwp *l, pri_t pri)
sleepq_lendpri(lwp_t *l, pri_t pri)
{
sleepq_t *sq = l->l_sleepq;
pri_t opri;

View File

@ -1,4 +1,4 @@
/* $NetBSD: condvar.h,v 1.2 2007/02/09 21:55:37 ad Exp $ */
/* $NetBSD: condvar.h,v 1.3 2007/03/29 17:34:39 ad Exp $ */
/*-
* Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
@ -65,7 +65,6 @@ int cv_timedwait_sig(kcondvar_t *, kmutex_t *, int);
void cv_signal(kcondvar_t *);
void cv_broadcast(kcondvar_t *);
void cv_wakeup(kcondvar_t *);
int cv_has_waiters(kcondvar_t *);

View File

@ -1,4 +1,4 @@
/* $NetBSD: lwp.h,v 1.57 2007/03/21 18:25:59 ad Exp $ */
/* $NetBSD: lwp.h,v 1.58 2007/03/29 17:34:39 ad Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
@ -58,6 +58,7 @@
* Lightweight process. Field markings and the corresponding locks:
*
* a: proclist_mutex
* c: condition variable interlock, passed to cv_wait()
* l: *l_mutex
* p: l_proc->p_smutex
* s: sched_mutex, which may or may not be referenced by l_mutex
@ -68,7 +69,7 @@
* Fields are clustered together by usage (to increase the likelyhood
* of cache hits) and by size (to reduce dead space in the structure).
*/
struct lwp {
typedef struct lwp {
/* Scheduling and overall state */
struct lwp *l_forw; /* s: run queue */
struct lwp *l_back; /* s: run queue */
@ -133,7 +134,8 @@ struct lwp {
void *l_private; /* !: svr4-style lwp-private data */
struct kauth_cred *l_cred; /* !: cached credentials */
void *l_emuldata; /* !: kernel lwp-private data */
u_short l_acflag; /* !: accounting flags */
u_int8_t l_cv_signalled; /* c: restarted by cv_signal() */
u_int8_t l_unused; /* !: currently unused */
u_short l_shlocks; /* !: lockdebug: shared locks held */
u_short l_exlocks; /* !: lockdebug: excl. locks held */
u_short l_locks; /* !: lockmgr count of held locks */
@ -143,7 +145,7 @@ struct lwp {
/* These are only used by 'options SYSCALL_TIMES' */
uint32_t l_syscall_time; /* !: time epoch for current syscall */
uint64_t *l_syscall_counter; /* !: counter for current process */
};
} lwp_t;
#if !defined(USER_TO_UAREA)
#if !defined(UAREA_USER_OFFSET)
@ -162,7 +164,7 @@ extern struct lwplist alllwp; /* List of all LWPs. */
extern struct pool lwp_uc_pool; /* memory pool for LWP startup args */
extern struct lwp lwp0; /* LWP for proc0 */
extern lwp_t lwp0; /* LWP for proc0 */
#endif
/* These flags are kept in l_flag. */
@ -228,64 +230,64 @@ do { \
lwp_update_creds(l); \
} while (/* CONSTCOND */ 0)
void preempt (void);
int mi_switch (struct lwp *, struct lwp *);
void preempt(void);
int mi_switch(lwp_t *, lwp_t *);
#ifndef remrunqueue
void remrunqueue (struct lwp *);
void remrunqueue(lwp_t *);
#endif
void resetpriority (struct lwp *);
void setrunnable (struct lwp *);
void resetpriority(lwp_t *);
void setrunnable(lwp_t *);
#ifndef setrunqueue
void setrunqueue (struct lwp *);
void setrunqueue(lwp_t *);
#endif
#ifndef nextrunqueue
struct lwp *nextrunqueue(void);
lwp_t *nextrunqueue(void);
#endif
void unsleep (struct lwp *);
void unsleep(lwp_t *);
#ifndef cpu_switch
int cpu_switch (struct lwp *, struct lwp *);
int cpu_switch(lwp_t *, lwp_t *);
#endif
#ifndef cpu_switchto
void cpu_switchto (struct lwp *, struct lwp *);
void cpu_switchto(lwp_t *, lwp_t *);
#endif
int lwp_locked(struct lwp *, kmutex_t *);
void lwp_setlock(struct lwp *, kmutex_t *);
void lwp_unlock_to(struct lwp *, kmutex_t *);
void lwp_lock_retry(struct lwp *, kmutex_t *);
void lwp_relock(struct lwp *, kmutex_t *);
int lwp_trylock(struct lwp *);
void lwp_addref(struct lwp *);
void lwp_delref(struct lwp *);
void lwp_drainrefs(struct lwp *);
int lwp_locked(lwp_t *, kmutex_t *);
void lwp_setlock(lwp_t *, kmutex_t *);
void lwp_unlock_to(lwp_t *, kmutex_t *);
void lwp_lock_retry(lwp_t *, kmutex_t *);
void lwp_relock(lwp_t *, kmutex_t *);
int lwp_trylock(lwp_t *);
void lwp_addref(lwp_t *);
void lwp_delref(lwp_t *);
void lwp_drainrefs(lwp_t *);
/* Flags for _lwp_wait1 */
#define LWPWAIT_EXITCONTROL 0x00000001
void lwpinit(void);
int lwp_wait1(struct lwp *, lwpid_t, lwpid_t *, int);
void lwp_continue(struct lwp *);
void cpu_setfunc(struct lwp *, void (*)(void *), void *);
int lwp_wait1(lwp_t *, lwpid_t, lwpid_t *, int);
void lwp_continue(lwp_t *);
void cpu_setfunc(lwp_t *, void (*)(void *), void *);
void startlwp(void *);
void upcallret(struct lwp *);
void lwp_exit(struct lwp *);
void lwp_exit2(struct lwp *);
struct lwp *proc_representative_lwp(struct proc *, int *, int);
int lwp_suspend(struct lwp *, struct lwp *);
int lwp_create1(struct lwp *, const void *, size_t, u_long, lwpid_t *);
void lwp_update_creds(struct lwp *);
struct lwp *lwp_find(struct proc *, int);
void lwp_userret(struct lwp *);
void lwp_need_userret(struct lwp *);
void lwp_free(struct lwp *, bool, bool);
void upcallret(lwp_t *);
void lwp_exit(lwp_t *);
void lwp_exit2(lwp_t *);
lwp_t *proc_representative_lwp(struct proc *, int *, int);
int lwp_suspend(lwp_t *, lwp_t *);
int lwp_create1(lwp_t *, const void *, size_t, u_long, lwpid_t *);
void lwp_update_creds(lwp_t *);
lwp_t *lwp_find(struct proc *, int);
void lwp_userret(lwp_t *);
void lwp_need_userret(lwp_t *);
void lwp_free(lwp_t *, bool, bool);
void lwp_sys_init(void);
int lwp_specific_key_create(specificdata_key_t *, specificdata_dtor_t);
void lwp_specific_key_delete(specificdata_key_t);
void lwp_initspecific(struct lwp *);
void lwp_finispecific(struct lwp *);
void * lwp_getspecific(specificdata_key_t);
void lwp_initspecific(lwp_t *);
void lwp_finispecific(lwp_t *);
void *lwp_getspecific(specificdata_key_t);
#if defined(_LWP_API_PRIVATE)
void * _lwp_getspecific_by_lwp(struct lwp *, specificdata_key_t);
void *_lwp_getspecific_by_lwp(lwp_t *, specificdata_key_t);
#endif
void lwp_setspecific(specificdata_key_t, void *);
@ -293,7 +295,7 @@ void lwp_setspecific(specificdata_key_t, void *);
* Lock an LWP. XXXLKM
*/
static inline void
lwp_lock(struct lwp *l)
lwp_lock(lwp_t *l)
{
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
kmutex_t *old;
@ -315,7 +317,7 @@ lwp_lock(struct lwp *l)
* Unlock an LWP. XXXLKM
*/
static inline void
lwp_unlock(struct lwp *l)
lwp_unlock(lwp_t *l)
{
LOCK_ASSERT(mutex_owned(l->l_mutex));
@ -323,7 +325,7 @@ lwp_unlock(struct lwp *l)
}
static inline void
lwp_changepri(struct lwp *l, pri_t pri)
lwp_changepri(lwp_t *l, pri_t pri)
{
LOCK_ASSERT(mutex_owned(l->l_mutex));
@ -334,7 +336,7 @@ lwp_changepri(struct lwp *l, pri_t pri)
}
static inline void
lwp_lendpri(struct lwp *l, pri_t pri)
lwp_lendpri(lwp_t *l, pri_t pri)
{
LOCK_ASSERT(mutex_owned(l->l_mutex));
@ -345,7 +347,7 @@ lwp_lendpri(struct lwp *l, pri_t pri)
}
static inline void
lwp_unsleep(struct lwp *l)
lwp_unsleep(lwp_t *l)
{
LOCK_ASSERT(mutex_owned(l->l_mutex));
@ -353,14 +355,14 @@ lwp_unsleep(struct lwp *l)
}
static inline int
lwp_eprio(struct lwp *l)
lwp_eprio(lwp_t *l)
{
return MIN(l->l_inheritedprio, l->l_priority);
}
int newlwp(struct lwp *, struct proc *, vaddr_t, bool, int,
void *, size_t, void (*)(void *), void *, struct lwp **);
int newlwp(lwp_t *, struct proc *, vaddr_t, bool, int,
void *, size_t, void (*)(void *), void *, lwp_t **);
/*
* Once we have per-CPU run queues and a modular scheduler interface,
@ -374,14 +376,14 @@ static inline void
sched_lock(const int heldmutex)
{
(void)heldmutex;
mutex_enter(&sched_mutex);
mutex_spin_enter(&sched_mutex);
}
static inline void
sched_unlock(const int heldmutex)
{
(void)heldmutex;
mutex_exit(&sched_mutex);
mutex_spin_exit(&sched_mutex);
}
#else /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */
@ -390,14 +392,14 @@ static inline void
sched_lock(const int heldmutex)
{
if (!heldmutex)
mutex_enter(&sched_mutex);
mutex_spin_enter(&sched_mutex);
}
static inline void
sched_unlock(int heldmutex)
sched_unlock(const int heldmutex)
{
if (!heldmutex)
mutex_exit(&sched_mutex);
mutex_spin_exit(&sched_mutex);
}
#endif /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */

View File

@ -1,4 +1,4 @@
/* $NetBSD: sleepq.h,v 1.5 2007/02/27 15:07:28 yamt Exp $ */
/* $NetBSD: sleepq.h,v 1.6 2007/03/29 17:34:39 ad Exp $ */
/*-
* Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
@ -58,8 +58,6 @@
#define SLEEPTAB_HASH_MASK (SLEEPTAB_HASH_SIZE - 1)
#define SLEEPTAB_HASH(wchan) (((uintptr_t)(wchan) >> 8) & SLEEPTAB_HASH_MASK)
struct lwp;
typedef struct sleepq {
TAILQ_HEAD(, lwp) sq_queue; /* queue of waiters */
kmutex_t *sq_mutex; /* mutex on struct & queue */
@ -88,20 +86,20 @@ typedef struct sleeptab {
#endif /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */
void sleepq_init(sleepq_t *, kmutex_t *);
int sleepq_remove(sleepq_t *, struct lwp *);
void sleepq_block(sleepq_t *, pri_t, wchan_t, const char *, int, int,
int sleepq_remove(sleepq_t *, lwp_t *);
void sleepq_block(sleepq_t *, pri_t, wchan_t, const char *, int, bool,
syncobj_t *);
void sleepq_unsleep(struct lwp *);
void sleepq_unsleep(lwp_t *);
void sleepq_timeout(void *);
void sleepq_wake(sleepq_t *, wchan_t, u_int);
lwp_t *sleepq_wake(sleepq_t *, wchan_t, u_int);
int sleepq_abort(kmutex_t *, int);
void sleepq_changepri(struct lwp *, pri_t);
void sleepq_lendpri(struct lwp *, pri_t);
int sleepq_unblock(int, int);
void sleepq_insert(sleepq_t *, struct lwp *, syncobj_t *);
void sleepq_changepri(lwp_t *, pri_t);
void sleepq_lendpri(lwp_t *, pri_t);
int sleepq_unblock(int, bool);
void sleepq_insert(sleepq_t *, lwp_t *, syncobj_t *);
void sleepq_enqueue(sleepq_t *, pri_t, wchan_t, const char *, syncobj_t *);
void sleepq_switch(int, int);
void sleepq_switch(int, bool);
void sleeptab_init(sleeptab_t *);
@ -112,8 +110,8 @@ extern sleeptab_t sleeptab;
*
* XXX This only exists because panic() is broken.
*/
static inline int
sleepq_dontsleep(struct lwp *l)
static inline bool
sleepq_dontsleep(lwp_t *l)
{
extern int cold;
@ -143,7 +141,7 @@ sleeptab_lookup(sleeptab_t *st, wchan_t wchan)
* safely released.
*/
static inline void
sleepq_enter(sleepq_t *sq, struct lwp *l)
sleepq_enter(sleepq_t *sq, lwp_t *l)
{
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
/*
@ -197,7 +195,7 @@ typedef struct turnstile {
/* priority inheritance */
pri_t ts_eprio;
struct lwp *ts_inheritor;
lwp_t *ts_inheritor;
SLIST_ENTRY(turnstile) ts_pichain;
} turnstile_t;
@ -233,7 +231,7 @@ void turnstile_init(void);
turnstile_t *turnstile_lookup(wchan_t);
void turnstile_exit(wchan_t);
void turnstile_block(turnstile_t *, int, wchan_t, syncobj_t *);
void turnstile_wakeup(turnstile_t *, int, int, struct lwp *);
void turnstile_wakeup(turnstile_t *, int, int, lwp_t *);
void turnstile_print(volatile void *, void (*)(const char *, ...));
static inline void
@ -242,8 +240,8 @@ turnstile_unblock(void)
(void)sleepq_unblock(0, 0);
}
void turnstile_unsleep(struct lwp *);
void turnstile_changepri(struct lwp *, pri_t);
void turnstile_unsleep(lwp_t *);
void turnstile_changepri(lwp_t *, pri_t);
extern struct pool_cache turnstile_cache;
extern struct turnstile turnstile0;