#if DIAGNOSTIC panic ---> KASSERT

- Omit mutex_exit before panic.  No need.
- Sprinkle some more information into a few messages.
- Prefer __diagused over #if DIAGNOSTIC for declarations,
  to reduce conditionals.

ok mrg@
This commit is contained in:
riastradh 2017-03-14 03:13:50 +00:00
parent 03093d2152
commit 6a970e990b

View File

@ -1,4 +1,4 @@
/* $NetBSD: subr_pool.c,v 1.206 2016/02/05 03:04:52 knakahara Exp $ */
/* $NetBSD: subr_pool.c,v 1.207 2017/03/14 03:13:50 riastradh Exp $ */
/*-
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015
@ -33,7 +33,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.206 2016/02/05 03:04:52 knakahara Exp $");
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.207 2017/03/14 03:13:50 riastradh Exp $");
#ifdef _KERNEL_OPT
#include "opt_ddb.h"
@ -382,12 +382,10 @@ pr_rmpage(struct pool *pp, struct pool_item_header *ph,
* If the page was idle, decrement the idle page count.
*/
if (ph->ph_nmissing == 0) {
#ifdef DIAGNOSTIC
if (pp->pr_nidle == 0)
panic("pr_rmpage: nidle inconsistent");
if (pp->pr_nitems < pp->pr_itemsperpage)
panic("pr_rmpage: nitems inconsistent");
#endif
KASSERT(pp->pr_nidle != 0);
KASSERTMSG((pp->pr_nitems >= pp->pr_itemsperpage),
"nitems=%u < itemsperpage=%u",
pp->pr_nitems, pp->pr_itemsperpage);
pp->pr_nidle--;
}
@ -525,10 +523,9 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
prsize = sizeof(struct pool_item);
prsize = roundup(prsize, align);
#ifdef DIAGNOSTIC
if (prsize > palloc->pa_pagesz)
panic("pool_init: pool item size (%zu) too large", prsize);
#endif
KASSERTMSG((prsize <= palloc->pa_pagesz),
"pool_init: pool item size (%zu) larger than page size (%u)",
prsize, palloc->pa_pagesz);
/*
* Initialize the pool structure.
@ -698,14 +695,8 @@ pool_destroy(struct pool *pp)
mutex_enter(&pp->pr_lock);
KASSERT(pp->pr_cache == NULL);
#ifdef DIAGNOSTIC
if (pp->pr_nout != 0) {
panic("pool_destroy: pool busy: still out: %u",
pp->pr_nout);
}
#endif
KASSERTMSG((pp->pr_nout == 0),
"pool_destroy: pool busy: still out: %u", pp->pr_nout);
KASSERT(LIST_EMPTY(&pp->pr_fullpages));
KASSERT(LIST_EMPTY(&pp->pr_partpages));
@ -726,10 +717,8 @@ pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
{
/* XXX no locking -- must be used just after pool_init() */
#ifdef DIAGNOSTIC
if (pp->pr_drain_hook != NULL)
panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
#endif
KASSERTMSG((pp->pr_drain_hook == NULL),
"pool_set_drain_hook(%s): already set", pp->pr_wchan);
pp->pr_drain_hook = fn;
pp->pr_drain_hook_arg = arg;
}
@ -757,15 +746,13 @@ pool_get(struct pool *pp, int flags)
struct pool_item_header *ph;
void *v;
#ifdef DIAGNOSTIC
if (pp->pr_itemsperpage == 0)
panic("pool_get: pool '%s': pr_itemsperpage is zero, "
"pool not initialized?", pp->pr_wchan);
if ((cpu_intr_p() || cpu_softintr_p()) && pp->pr_ipl == IPL_NONE &&
!cold && panicstr == NULL)
panic("pool '%s' is IPL_NONE, but called from "
"interrupt context\n", pp->pr_wchan);
#endif
KASSERTMSG((pp->pr_itemsperpage != 0),
"pool_get: pool '%s': pr_itemsperpage is zero, "
"pool not initialized?", pp->pr_wchan);
KASSERTMSG((!(cpu_intr_p() || cpu_softintr_p())
|| pp->pr_ipl != IPL_NONE || cold || panicstr != NULL),
"pool '%s' is IPL_NONE, but called from interrupt context",
pp->pr_wchan);
if (flags & PR_WAITOK) {
ASSERT_SLEEPABLE();
}
@ -777,12 +764,8 @@ pool_get(struct pool *pp, int flags)
* and we can wait, then wait until an item has been returned to
* the pool.
*/
#ifdef DIAGNOSTIC
if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
mutex_exit(&pp->pr_lock);
panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
}
#endif
KASSERTMSG((pp->pr_nout <= pp->pr_hardlimit),
"pool_get: %s: crossed hard limit", pp->pr_wchan);
if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
if (pp->pr_drain_hook != NULL) {
/*
@ -830,14 +813,10 @@ pool_get(struct pool *pp, int flags)
if ((ph = pp->pr_curpage) == NULL) {
int error;
#ifdef DIAGNOSTIC
if (pp->pr_nitems != 0) {
mutex_exit(&pp->pr_lock);
printf("pool_get: %s: curpage NULL, nitems %u\n",
pp->pr_wchan, pp->pr_nitems);
panic("pool_get: nitems inconsistent");
}
#endif
KASSERTMSG((pp->pr_nitems == 0),
"pool_get: nitems inconsistent"
": %s: curpage NULL, nitems %u",
pp->pr_wchan, pp->pr_nitems);
/*
* Call the back-end page allocator for more memory.
@ -864,12 +843,8 @@ pool_get(struct pool *pp, int flags)
goto startover;
}
if (pp->pr_roflags & PR_NOTOUCH) {
#ifdef DIAGNOSTIC
if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
mutex_exit(&pp->pr_lock);
panic("pool_get: %s: page empty", pp->pr_wchan);
}
#endif
KASSERTMSG((ph->ph_nmissing < pp->pr_itemsperpage),
"pool_get: %s: page empty", pp->pr_wchan);
v = pr_item_notouch_get(pp, ph);
} else {
v = pi = LIST_FIRST(&ph->ph_itemlist);
@ -877,22 +852,14 @@ pool_get(struct pool *pp, int flags)
mutex_exit(&pp->pr_lock);
panic("pool_get: %s: page empty", pp->pr_wchan);
}
#ifdef DIAGNOSTIC
if (__predict_false(pp->pr_nitems == 0)) {
mutex_exit(&pp->pr_lock);
printf("pool_get: %s: items on itemlist, nitems %u\n",
pp->pr_wchan, pp->pr_nitems);
panic("pool_get: nitems inconsistent");
}
#endif
#ifdef DIAGNOSTIC
if (__predict_false(pi->pi_magic != PI_MAGIC)) {
panic("pool_get(%s): free list modified: "
"magic=%x; page %p; item addr %p\n",
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
}
#endif
KASSERTMSG((pp->pr_nitems > 0),
"pool_get: nitems inconsistent"
": %s: items on itemlist, nitems %u",
pp->pr_wchan, pp->pr_nitems);
KASSERTMSG((pi->pi_magic == PI_MAGIC),
"pool_get(%s): free list modified: "
"magic=%x; page %p; item addr %p",
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
/*
* Remove from item list.
@ -902,10 +869,7 @@ pool_get(struct pool *pp, int flags)
pp->pr_nitems--;
pp->pr_nout++;
if (ph->ph_nmissing == 0) {
#ifdef DIAGNOSTIC
if (__predict_false(pp->pr_nidle == 0))
panic("pool_get: nidle inconsistent");
#endif
KASSERT(pp->pr_nidle > 0);
pp->pr_nidle--;
/*
@ -917,14 +881,9 @@ pool_get(struct pool *pp, int flags)
}
ph->ph_nmissing++;
if (ph->ph_nmissing == pp->pr_itemsperpage) {
#ifdef DIAGNOSTIC
if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
!LIST_EMPTY(&ph->ph_itemlist))) {
mutex_exit(&pp->pr_lock);
panic("pool_get: %s: nmissing inconsistent",
pp->pr_wchan);
}
#endif
KASSERTMSG(((pp->pr_roflags & PR_NOTOUCH) ||
LIST_EMPTY(&ph->ph_itemlist)),
"pool_get: %s: nmissing inconsistent", pp->pr_wchan);
/*
* This page is now full. Move it to the full list
* and select a new current page.
@ -969,13 +928,8 @@ pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
FREECHECK_IN(&pp->pr_freecheck, v);
LOCKDEBUG_MEM_CHECK(v, pp->pr_size);
#ifdef DIAGNOSTIC
if (__predict_false(pp->pr_nout == 0)) {
printf("pool %s: putting with none out\n",
pp->pr_wchan);
panic("pool_put");
}
#endif
KASSERTMSG((pp->pr_nout > 0),
"pool_put: pool %s: putting with none out", pp->pr_wchan);
if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
panic("pool_put: %s: page header missing", pp->pr_wchan);
@ -1156,12 +1110,9 @@ pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
int n;
KASSERT(mutex_owned(&pp->pr_lock));
#ifdef DIAGNOSTIC
if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
#endif
KASSERTMSG(((pp->pr_roflags & PR_NOALIGN) ||
(((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) == 0)),
"pool_prime_page: %s: unaligned page: %p", pp->pr_wchan, cp);
/*
* Insert page header.
@ -1487,9 +1438,7 @@ pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
void (*pr)(const char *, ...))
{
struct pool_item_header *ph;
#ifdef DIAGNOSTIC
struct pool_item *pi;
#endif
struct pool_item *pi __diagused;
LIST_FOREACH(ph, pl, ph_pagelist) {
(*pr)("\t\tpage %p, nmissing %d, time %" PRIu32 "\n",