From 113dd58233556dfe93d0134c79a9120a5bae883b Mon Sep 17 00:00:00 2001 From: thorpej Date: Wed, 22 Nov 2000 06:31:22 +0000 Subject: [PATCH] Add a LOCKDEBUG check for a r/w spinlock spinning out of control. Partially from Bill Sommerfeld. --- sys/kern/kern_lock.c | 83 +++++++++++++++++++++++++++++++++++++++++++- sys/sys/lock.h | 25 ++++++++++++- 2 files changed, 106 insertions(+), 2 deletions(-) diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 4c652438321c..f24ebc4b50fc 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -1,4 +1,4 @@ -/* $NetBSD: kern_lock.c,v 1.49 2000/11/20 20:04:49 thorpej Exp $ */ +/* $NetBSD: kern_lock.c,v 1.50 2000/11/22 06:31:23 thorpej Exp $ */ /*- * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc. @@ -150,16 +150,51 @@ do { \ } \ } while (0) +#if defined(LOCKDEBUG) +#if defined(DDB) +#define SPINLOCK_SPINCHECK_DEBUGGER Debugger() +#else +#define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */ +#endif + +#define SPINLOCK_SPINCHECK_DECL \ + /* 32-bits of count -- wrap constitutes a "spinout" */ \ + uint32_t __spinc = 0 + +#define SPINLOCK_SPINCHECK \ +do { \ + if (++__spinc == 0) { \ + printf("LK_SPIN spinout, excl %d, share %d\n", \ + lkp->lk_exclusivecount, lkp->lk_sharecount); \ + if (lkp->lk_exclusivecount) \ + printf("held by CPU %lu\n", \ + (u_long) lkp->lk_cpu); \ + if (lkp->lk_lock_file) \ + printf("last locked at %s:%d\n", \ + lkp->lk_lock_file, lkp->lk_lock_line); \ + if (lkp->lk_unlock_file) \ + printf("last unlocked at %s:%d\n", \ + lkp->lk_unlock_file, lkp->lk_unlock_line); \ + SPINLOCK_SPINCHECK_DEBUGGER; \ + } \ +} while (0) +#else +#define SPINLOCK_SPINCHECK_DECL /* nothing */ +#define SPINLOCK_SPINCHECK /* nothing */ +#endif /* LOCKDEBUG && DDB */ + /* * Acquire a resource. */ #define ACQUIRE(lkp, error, extflags, drain, wanted) \ if ((extflags) & LK_SPIN) { \ int interlocked; \ + SPINLOCK_SPINCHECK_DECL; \ \ if ((drain) == 0) \ (lkp)->lk_waitcount++; \ for (interlocked = 1;;) { \ + SPINLOCK_SPINCHECK; \ if (wanted) { \ if (interlocked) { \ INTERLOCK_RELEASE((lkp), \ @@ -305,6 +340,10 @@ lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags) lkp->lk_timo = timo; } lkp->lk_wmesg = wmesg; /* just a name for spin locks */ +#if defined(LOCKDEBUG) + lkp->lk_lock_file = NULL; + lkp->lk_unlock_file = NULL; +#endif } /* @@ -401,8 +440,13 @@ spinlock_switchcheck(void) * accepted shared locks and shared-to-exclusive upgrades to go away. */ int +#if defined(LOCKDEBUG) +_lockmgr(__volatile struct lock *lkp, u_int flags, + struct simplelock *interlkp, const char *file, int line) +#else lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp) +#endif { int error; pid_t pid; @@ -513,6 +557,10 @@ lockmgr(__volatile struct lock *lkp, u_int flags, lkp->lk_recurselevel = 0; lkp->lk_flags &= ~LK_HAVE_EXCL; SETHOLDER(lkp, LK_NOPROC, LK_NOCPU); +#if defined(LOCKDEBUG) + lkp->lk_unlock_file = file; + lkp->lk_unlock_line = line; +#endif DONTHAVEIT(lkp); WAKEUP_WAITER(lkp); break; @@ -566,6 +614,10 @@ lockmgr(__volatile struct lock *lkp, u_int flags, break; lkp->lk_flags |= LK_HAVE_EXCL; SETHOLDER(lkp, pid, cpu_id); +#if defined(LOCKDEBUG) + lkp->lk_lock_file = file; + lkp->lk_lock_line = line; +#endif HAVEIT(lkp); if (lkp->lk_exclusivecount != 0) panic("lockmgr: non-zero exclusive count"); @@ -631,6 +683,10 @@ lockmgr(__volatile struct lock *lkp, u_int flags, break; lkp->lk_flags |= LK_HAVE_EXCL; SETHOLDER(lkp, pid, cpu_id); +#if defined(LOCKDEBUG) + lkp->lk_lock_file = file; + lkp->lk_lock_line = line; +#endif HAVEIT(lkp); if (lkp->lk_exclusivecount != 0) panic("lockmgr: non-zero exclusive count"); @@ -661,6 +717,10 @@ lockmgr(__volatile struct lock *lkp, u_int flags, if (lkp->lk_exclusivecount == 0) { lkp->lk_flags &= ~LK_HAVE_EXCL; SETHOLDER(lkp, LK_NOPROC, LK_NOCPU); +#if defined(LOCKDEBUG) + lkp->lk_unlock_file = file; + lkp->lk_unlock_line = line; +#endif DONTHAVEIT(lkp); } } else if (lkp->lk_sharecount != 0) { @@ -701,6 +761,10 @@ lockmgr(__volatile struct lock *lkp, u_int flags, break; lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; SETHOLDER(lkp, pid, cpu_id); +#if defined(LOCKDEBUG) + lkp->lk_lock_file = file; + lkp->lk_lock_line = line; +#endif HAVEIT(lkp); lkp->lk_exclusivecount = 1; /* XXX unlikely that we'd want this */ @@ -740,7 +804,11 @@ lockmgr(__volatile struct lock *lkp, u_int flags, */ int +#if defined(LOCKDEBUG) +_spinlock_release_all(__volatile struct lock *lkp, const char *file, int line) +#else spinlock_release_all(__volatile struct lock *lkp) +#endif { int s, count; cpuid_t cpu_id; @@ -765,6 +833,10 @@ spinlock_release_all(__volatile struct lock *lkp) COUNT_CPU(cpu_id, -count); lkp->lk_flags &= ~LK_HAVE_EXCL; SETHOLDER(lkp, LK_NOPROC, LK_NOCPU); +#if defined(LOCKDEBUG) + lkp->lk_unlock_file = file; + lkp->lk_unlock_line = line; +#endif DONTHAVEIT(lkp); } #ifdef DIAGNOSTIC @@ -785,7 +857,12 @@ spinlock_release_all(__volatile struct lock *lkp) */ void +#if defined(LOCKDEBUG) +_spinlock_acquire_count(__volatile struct lock *lkp, int count, + const char *file, int line) +#else spinlock_acquire_count(__volatile struct lock *lkp, int count) +#endif { int s, error; cpuid_t cpu_id; @@ -814,6 +891,10 @@ spinlock_acquire_count(__volatile struct lock *lkp, int count) lkp->lk_flags &= ~LK_WANT_EXCL; lkp->lk_flags |= LK_HAVE_EXCL; SETHOLDER(lkp, LK_NOPROC, cpu_id); +#if defined(LOCKDEBUG) + lkp->lk_lock_file = file; + lkp->lk_lock_line = line; +#endif HAVEIT(lkp); if (lkp->lk_exclusivecount != 0) panic("lockmgr: non-zero exclusive count"); diff --git a/sys/sys/lock.h b/sys/sys/lock.h index 56142968b0a4..0c7795e7c480 100644 --- a/sys/sys/lock.h +++ b/sys/sys/lock.h @@ -1,4 +1,4 @@ -/* $NetBSD: lock.h,v 1.39 2000/11/19 00:56:39 sommerfeld Exp $ */ +/* $NetBSD: lock.h,v 1.40 2000/11/22 06:31:22 thorpej Exp $ */ /*- * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc. @@ -167,6 +167,13 @@ struct lock { #if defined(LOCKDEBUG) #define lk_list lk_un.lk_un_spin.lk_spin_list #endif + +#if defined(LOCKDEBUG) + const char *lk_lock_file; + const char *lk_unlock_file; + int lk_lock_line; + int lk_unlock_line; +#endif }; /* @@ -275,7 +282,13 @@ struct proc; void lockinit(struct lock *, int prio, const char *wmesg, int timo, int flags); +#if defined(LOCKDEBUG) +int _lockmgr(__volatile struct lock *, u_int flags, struct simplelock *, + const char *file, int line); +#define lockmgr(l, f, i) _lockmgr((l), (f), (i), __FILE__, __LINE__) +#else int lockmgr(__volatile struct lock *, u_int flags, struct simplelock *); +#endif /* LOCKDEBUG */ int lockstatus(struct lock *); void lockmgr_printinfo(__volatile struct lock *); @@ -289,8 +302,18 @@ void spinlock_switchcheck(void); #define spinlockmgr(lkp, flags, intrlk) \ lockmgr((lkp), (flags) | LK_SPIN, (intrlk)) +#if defined(LOCKDEBUG) +int _spinlock_release_all(__volatile struct lock *, const char *, int); +void _spinlock_acquire_count(__volatile struct lock *, int, const char *, + int); + +#define spinlock_release_all(l) _spinlock_release_all((l), __FILE__, __LINE__) +#define spinlock_acquire_count(l, c) _spinlock_acquire_count((l), (c), \ + __FILE__, __LINE__) +#else int spinlock_release_all(__volatile struct lock *); void spinlock_acquire_count(__volatile struct lock *, int); +#endif #if defined(LOCKDEBUG) void _simple_lock(__volatile struct simplelock *, const char *, int);