add defines to control whether or not mutex operations are skipped
after we have panic'd. no functional change.
This commit is contained in:
parent
3084abc1e1
commit
e5440b91e0
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: kern_mutex.c,v 1.72 2018/02/06 07:46:24 ozaki-r Exp $ */
|
||||
/* $NetBSD: kern_mutex.c,v 1.73 2018/02/25 18:54:29 chs Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
|
||||
@ -40,7 +40,7 @@
|
||||
#define __MUTEX_PRIVATE
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.72 2018/02/06 07:46:24 ozaki-r Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.73 2018/02/25 18:54:29 chs Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/atomic.h>
|
||||
@ -60,6 +60,9 @@ __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.72 2018/02/06 07:46:24 ozaki-r Exp
|
||||
|
||||
#include <machine/lock.h>
|
||||
|
||||
#define MUTEX_PANIC_SKIP_SPIN 1
|
||||
#define MUTEX_PANIC_SKIP_ADAPTIVE 1
|
||||
|
||||
/*
|
||||
* When not running a debug kernel, spin mutexes are not much
|
||||
* more than an splraiseipl() and splx() pair.
|
||||
@ -489,8 +492,10 @@ mutex_vector_enter(kmutex_t *mtx)
|
||||
* to reduce cache line ping-ponging between CPUs.
|
||||
*/
|
||||
do {
|
||||
#if MUTEX_PANIC_SKIP_SPIN
|
||||
if (panicstr != NULL)
|
||||
break;
|
||||
#endif
|
||||
while (MUTEX_SPINBIT_LOCKED_P(mtx)) {
|
||||
SPINLOCK_BACKOFF(count);
|
||||
#ifdef LOCKDEBUG
|
||||
@ -547,10 +552,12 @@ mutex_vector_enter(kmutex_t *mtx)
|
||||
owner = mtx->mtx_owner;
|
||||
continue;
|
||||
}
|
||||
#if MUTEX_PANIC_SKIP_ADAPTIVE
|
||||
if (__predict_false(panicstr != NULL)) {
|
||||
KPREEMPT_ENABLE(curlwp);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
if (__predict_false(MUTEX_OWNER(owner) == curthread)) {
|
||||
MUTEX_ABORT(mtx, "locking against myself");
|
||||
}
|
||||
@ -726,8 +733,10 @@ mutex_vector_exit(kmutex_t *mtx)
|
||||
if (MUTEX_SPIN_P(mtx)) {
|
||||
#ifdef FULL
|
||||
if (__predict_false(!MUTEX_SPINBIT_LOCKED_P(mtx))) {
|
||||
#if MUTEX_PANIC_SKIP_SPIN
|
||||
if (panicstr != NULL)
|
||||
return;
|
||||
#endif
|
||||
MUTEX_ABORT(mtx, "exiting unheld spin mutex");
|
||||
}
|
||||
MUTEX_UNLOCKED(mtx);
|
||||
@ -737,11 +746,13 @@ mutex_vector_exit(kmutex_t *mtx)
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef MUTEX_PANIC_SKIP_ADAPTIVE
|
||||
if (__predict_false((uintptr_t)panicstr | cold)) {
|
||||
MUTEX_UNLOCKED(mtx);
|
||||
MUTEX_RELEASE(mtx);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
curthread = (uintptr_t)curlwp;
|
||||
MUTEX_DASSERT(mtx, curthread != 0);
|
||||
@ -932,8 +943,10 @@ mutex_spin_retry(kmutex_t *mtx)
|
||||
* to reduce cache line ping-ponging between CPUs.
|
||||
*/
|
||||
do {
|
||||
#if MUTEX_PANIC_SKIP_SPIN
|
||||
if (panicstr != NULL)
|
||||
break;
|
||||
#endif
|
||||
while (MUTEX_SPINBIT_LOCKED_P(mtx)) {
|
||||
SPINLOCK_BACKOFF(count);
|
||||
#ifdef LOCKDEBUG
|
||||
|
Loading…
Reference in New Issue
Block a user