Fix a thinko in draining of spin locks: bump waitcount in the spin case,

too.  Remove some needless code duplication by adding a "drain" argument
to the ACQUIRE() macro (compiler can [and does] optimize the constant
conditional).
This commit is contained in:
thorpej 1999-07-28 19:29:39 +00:00
parent 3d71693ea7
commit cb41412726
2 changed files with 43 additions and 48 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_lock.c,v 1.22 1999/07/28 01:59:46 mellon Exp $ */
/* $NetBSD: kern_lock.c,v 1.23 1999/07/28 19:29:39 thorpej Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
@ -117,10 +117,12 @@ do { \
/*
* Acquire a resource.
*/
#define ACQUIRE(lkp, error, extflags, wanted) \
#define ACQUIRE(lkp, error, extflags, drain, wanted) \
if ((extflags) & LK_SPIN) { \
int interlocked; \
\
if ((drain) == 0) \
(lkp)->lk_waitcount++; \
for (interlocked = 1;;) { \
if (wanted) { \
if (interlocked) { \
@ -134,16 +136,24 @@ do { \
interlocked = 1; \
} \
} \
if ((drain) == 0) \
(lkp)->lk_waitcount--; \
KASSERT((wanted) == 0); \
error = 0; /* sanity */ \
} else { \
for (error = 0; wanted; ) { \
(lkp)->lk_waitcount++; \
if ((drain)) \
(lkp)->lk_flags |= LK_WAITDRAIN; \
else \
(lkp)->lk_waitcount++; \
simple_unlock(&(lkp)->lk_interlock); \
error = tsleep((void *)lkp, (lkp)->lk_prio, \
/* XXX Cast away volatile. */ \
error = tsleep((drain) ? &(lkp)->lk_flags : \
(void *)(lkp), (lkp)->lk_prio, \
(lkp)->lk_wmesg, (lkp)->lk_timo); \
simple_lock(&(lkp)->lk_interlock); \
(lkp)->lk_waitcount--; \
if ((drain) == 0) \
(lkp)->lk_waitcount--; \
if (error) \
break; \
if ((extflags) & LK_SLEEPFAIL) { \
@ -165,6 +175,14 @@ do { \
(((lkp)->lk_flags & LK_SPIN) != 0 ? \
((lkp)->lk_cpu == (cpu_id)) : ((lkp)->lk_lockholder == (pid)))
#define WAKEUP_WAITER(lkp) \
do { \
if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) { \
/* XXX Cast away volatile. */ \
wakeup_one((void *)(lkp)); \
} \
} while (0)
#if defined(LOCKDEBUG) /* { */
#if defined(MULTIPROCESSOR) /* { */
struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
@ -341,7 +359,7 @@ lockmgr(lkp, flags, interlkp)
/*
* Wait for exclusive locks and upgrades to clear.
*/
ACQUIRE(lkp, error, extflags, lkp->lk_flags &
ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
(LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
if (error)
break;
@ -367,8 +385,7 @@ lockmgr(lkp, flags, interlkp)
lkp->lk_flags &= ~LK_HAVE_EXCL;
SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
DONTHAVEIT(lkp);
if (lkp->lk_waitcount)
wakeup_one((void *)lkp);
WAKEUP_WAITER(lkp);
break;
case LK_EXCLUPGRADE:
@ -414,7 +431,7 @@ lockmgr(lkp, flags, interlkp)
* drop to zero, then take exclusive lock.
*/
lkp->lk_flags |= LK_WANT_UPGRADE;
ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount);
lkp->lk_flags &= ~LK_WANT_UPGRADE;
if (error)
break;
@ -434,8 +451,8 @@ lockmgr(lkp, flags, interlkp)
* lock, awaken upgrade requestor if we are the last shared
* lock, then request an exclusive lock.
*/
if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
wakeup_one((void *)lkp);
if (lkp->lk_sharecount == 0)
WAKEUP_WAITER(lkp);
/* fall into exclusive request */
case LK_EXCLUSIVE:
@ -470,7 +487,7 @@ lockmgr(lkp, flags, interlkp)
/*
* Try to acquire the want_exclusive flag.
*/
ACQUIRE(lkp, error, extflags, lkp->lk_flags &
ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
(LK_HAVE_EXCL | LK_WANT_EXCL));
if (error)
break;
@ -478,7 +495,7 @@ lockmgr(lkp, flags, interlkp)
/*
* Wait for shared locks and upgrades to finish.
*/
ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 ||
(lkp->lk_flags & LK_WANT_UPGRADE));
lkp->lk_flags &= ~LK_WANT_EXCL;
if (error)
@ -521,8 +538,7 @@ lockmgr(lkp, flags, interlkp)
lkp->lk_sharecount--;
COUNT(lkp, p, cpu_id, -1);
}
if (lkp->lk_waitcount)
wakeup_one((void *)lkp);
WAKEUP_WAITER(lkp);
break;
case LK_DRAIN:
@ -543,32 +559,13 @@ lockmgr(lkp, flags, interlkp)
error = EBUSY;
break;
}
if (lkp->lk_flags & LK_SPIN) {
ACQUIRE(lkp, error, extflags,
((lkp->lk_flags &
(LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
lkp->lk_sharecount != 0 ||
lkp->lk_waitcount != 0));
} else {
/*
* This is just a special cause of the sleep case
* in ACQUIRE(). We set WANTDRAIN instead of
* incrementing waitcount.
*/
for (error = 0; ((lkp->lk_flags &
(LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
lkp->lk_sharecount != 0 ||
lkp->lk_waitcount != 0); ) {
lkp->lk_flags |= LK_WAITDRAIN;
simple_unlock(&lkp->lk_interlock);
if ((error = tsleep((void *)&lkp->lk_flags,
lkp->lk_prio, lkp->lk_wmesg, lkp->lk_timo)))
return (error);
if ((extflags) & LK_SLEEPFAIL)
return (ENOLCK);
simple_lock(&lkp->lk_interlock);
}
}
ACQUIRE(lkp, error, extflags, 1,
((lkp->lk_flags &
(LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
lkp->lk_sharecount != 0 ||
lkp->lk_waitcount != 0));
if (error)
break;
lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
SETHOLDER(lkp, pid, cpu_id);
HAVEIT(lkp);
@ -585,8 +582,9 @@ lockmgr(lkp, flags, interlkp)
flags & LK_TYPE_MASK);
/* NOTREACHED */
}
if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
(LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
((lkp->lk_flags &
(LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
lkp->lk_flags &= ~LK_WAITDRAIN;
wakeup_one((void *)&lkp->lk_flags);

View File

@ -1,4 +1,4 @@
/* $NetBSD: lock.h,v 1.21 1999/07/27 23:45:13 thorpej Exp $ */
/* $NetBSD: lock.h,v 1.22 1999/07/28 19:29:39 thorpej Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
@ -140,6 +140,7 @@ struct lock {
int lk_sharecount; /* # of accepted shared locks */
short lk_exclusivecount; /* # of recursive exclusive locks */
short lk_recurselevel; /* lvl above which recursion ok */
int lk_waitcount; /* # of sleepers/spinners */
/*
* This is the sleep message for sleep locks, and a simple name
@ -152,9 +153,6 @@ struct lock {
/* pid of exclusive lock holder */
pid_t lk_sleep_lockholder;
/* # of processes sleeping for lock */
int lk_sleep_waitcount;
/* priority at which to sleep */
int lk_sleep_prio;
@ -171,7 +169,6 @@ struct lock {
} lk_un;
#define lk_lockholder lk_un.lk_un_sleep.lk_sleep_lockholder
#define lk_waitcount lk_un.lk_un_sleep.lk_sleep_waitcount
#define lk_prio lk_un.lk_un_sleep.lk_sleep_prio
#define lk_timo lk_un.lk_un_sleep.lk_sleep_timo