clean up DIAGNOSTIC checks, use KASSERT().

This commit is contained in:
chs 2001-02-18 21:19:08 +00:00
parent 7c3d7f0df5
commit 19b7b64642
9 changed files with 42 additions and 138 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_amap.c,v 1.29 2001/01/23 02:27:39 thorpej Exp $ */
/* $NetBSD: uvm_amap.c,v 1.30 2001/02/18 21:19:09 chs Exp $ */
/*
*
@ -255,11 +255,7 @@ amap_free(amap)
{
UVMHIST_FUNC("amap_free"); UVMHIST_CALLED(maphist);
#ifdef DIAGNOSTIC
if (amap->am_ref || amap->am_nused)
panic("amap_free");
#endif
KASSERT(amap->am_ref == 0 && amap->am_nused == 0);
LOCK_ASSERT(simple_lock_held(&amap->am_l));
free(amap->am_slots, M_UVMAMAP);
@ -384,11 +380,7 @@ amap_extend(entry, addsize)
newover = malloc(slotneed * sizeof(struct vm_anon *),
M_UVMAMAP, M_WAITOK);
amap_lock(amap); /* re-lock! */
#ifdef DIAGNOSTIC
if (amap->am_maxslot >= slotneed)
panic("amap_extend: amap changed during malloc");
#endif
KASSERT(amap->am_maxslot < slotneed);
/*
* now copy everything over to new malloc'd areas...

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_amap.h,v 1.13 2000/11/25 06:27:59 chs Exp $ */
/* $NetBSD: uvm_amap.h,v 1.14 2001/02/18 21:19:08 chs Exp $ */
/*
*
@ -247,15 +247,10 @@ struct vm_amap {
*/
/* AMAP_B2SLOT: convert byte offset to slot */
#ifdef DIAGNOSTIC
#define AMAP_B2SLOT(S,B) { \
if ((B) & (PAGE_SIZE - 1)) \
panic("AMAP_B2SLOT: invalid byte count"); \
(S) = (B) >> PAGE_SHIFT; \
#define AMAP_B2SLOT(S,B) { \
KASSERT(((B) & (PAGE_SIZE - 1)) == 0); \
(S) = (B) >> PAGE_SHIFT; \
}
#else
#define AMAP_B2SLOT(S,B) (S) = (B) >> PAGE_SHIFT
#endif
/*
* lock/unlock/refs/flags macros

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_anon.c,v 1.14 2001/01/28 23:30:42 thorpej Exp $ */
/* $NetBSD: uvm_anon.c,v 1.15 2001/02/18 21:19:08 chs Exp $ */
/*
*
@ -501,13 +501,6 @@ anon_pagein(anon)
*/
return FALSE;
default:
#ifdef DIAGNOSTIC
panic("anon_pagein: uvmfault_anonget -> %d", rv);
#else
return FALSE;
#endif
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_aobj.c,v 1.38 2001/01/28 23:30:42 thorpej Exp $ */
/* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@ -338,18 +338,17 @@ uao_set_swslot(uobj, pageidx, slot)
*/
if (UAO_USES_SWHASH(aobj)) {
/*
* Avoid allocating an entry just to free it again if
* the page had not swap slot in the first place, and
* we are freeing.
*/
struct uao_swhash_elt *elt =
uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
if (elt == NULL) {
#ifdef DIAGNOSTIC
if (slot)
panic("uao_set_swslot: didn't create elt");
#endif
KASSERT(slot == 0);
return (0);
}
@ -920,9 +919,6 @@ uao_flush(uobj, start, stop, flags)
default:
panic("uao_flush: weird flags");
}
#ifdef DIAGNOSTIC
panic("uao_flush: unreachable code");
#endif
}
uvm_unlock_pageq();
@ -1261,10 +1257,7 @@ uao_releasepg(pg, nextpgp)
{
struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
#ifdef DIAGNOSTIC
if ((pg->flags & PG_RELEASED) == 0)
panic("uao_releasepg: page not released!");
#endif
KASSERT(pg->flags & PG_RELEASED);
/*
* dispose of the page [caller handles PG_WANTED] and swap slot.
@ -1291,10 +1284,7 @@ uao_releasepg(pg, nextpgp)
if (aobj->u_obj.uo_npages != 0)
return TRUE;
#ifdef DIAGNOSTIC
if (TAILQ_FIRST(&aobj->u_obj.memq))
panic("uvn_releasepg: pages in object with npages == 0");
#endif
KASSERT(TAILQ_EMPTY(&aobj->u_obj.memq));
/*
* finally, free the rest.
@ -1513,20 +1503,8 @@ uao_pagein_page(aobj, pageidx)
*/
return FALSE;
#ifdef DIAGNOSTIC
default:
panic("uao_pagein_page: uao_get -> %d\n", rv);
#endif
}
#ifdef DIAGNOSTIC
/*
* this should never happen, since we have a reference on the aobj.
*/
if (pg->flags & PG_RELEASED) {
panic("uao_pagein_page: found PG_RELEASED page?\n");
}
#endif
KASSERT((pg->flags & PG_RELEASED) == 0);
/*
* ok, we've got the page now.

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_fault.c,v 1.55 2001/01/28 23:30:43 thorpej Exp $ */
/* $NetBSD: uvm_fault.c,v 1.56 2001/02/18 21:19:08 chs Exp $ */
/*
*
@ -1870,10 +1870,7 @@ uvm_fault_unwire_locked(map, start, end)
/*
* find the beginning map entry for the region.
*/
#ifdef DIAGNOSTIC
if (start < vm_map_min(map) || end > vm_map_max(map))
panic("uvm_fault_unwire_locked: address out of range");
#endif
KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
if (uvm_map_lookup_entry(map, start, &entry) == FALSE)
panic("uvm_fault_unwire_locked: address not in map");
@ -1886,16 +1883,11 @@ uvm_fault_unwire_locked(map, start, end)
* make sure the current entry is for the address we're
* dealing with. if not, grab the next entry.
*/
#ifdef DIAGNOSTIC
if (va < entry->start)
panic("uvm_fault_unwire_locked: hole 1");
#endif
KASSERT(va >= entry->start);
if (va >= entry->end) {
#ifdef DIAGNOSTIC
if (entry->next == &map->header ||
entry->next->start > entry->end)
panic("uvm_fault_unwire_locked: hole 2");
#endif
KASSERT(entry->next != &map->header &&
entry->next->start <= entry->end);
entry = entry->next;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_map.h,v 1.23 2000/12/13 08:06:12 enami Exp $ */
/* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -433,10 +433,7 @@ vm_map_lock(map)
&map->flags_lock);
if (error) {
#ifdef DIAGNOSTIC
if (error != ENOLCK)
panic("vm_map_lock: failed to get lock");
#endif
KASSERT(error == ENOLCK);
goto try_again;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_mmap.c,v 1.48 2001/01/08 01:35:03 thorpej Exp $ */
/* $NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -176,12 +176,9 @@ sys_mincore(p, v, retval)
for (/* nothing */;
entry != &map->header && entry->start < end;
entry = entry->next) {
#ifdef DIAGNOSTIC
if (UVM_ET_ISSUBMAP(entry))
panic("mincore: user map has submap");
if (start < entry->start)
panic("mincore: hole");
#endif
KASSERT(!UVM_ET_ISSUBMAP(entry));
KASSERT(start >= entry->start);
/* Make sure there are no holes. */
if (entry->end < end &&
(entry->next == &map->header ||
@ -197,10 +194,7 @@ sys_mincore(p, v, retval)
* are always considered resident (mapped devices).
*/
if (UVM_ET_ISOBJ(entry)) {
#ifdef DIAGNOSTIC
if (UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj))
panic("mincore: user map has kernel object");
#endif
KASSERT(!UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj));
if (entry->object.uvm_obj->pgops->pgo_releasepg
== NULL) {
for (/* nothing */; start < lim;
@ -416,11 +410,6 @@ sys_mmap(p, v, retval)
* so just change it to MAP_SHARED.
*/
if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
#if defined(DIAGNOSTIC)
printf("WARNING: converted MAP_PRIVATE device mapping "
"to MAP_SHARED (pid %d comm %s)\n", p->p_pid,
p->p_comm);
#endif
flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pglist.c,v 1.12 2000/11/25 06:28:00 chs Exp $ */
/* $NetBSD: uvm_pglist.c,v 1.13 2001/02/18 21:19:08 chs Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
@ -98,13 +98,8 @@ uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok)
vm_page_t tp;
#endif
#ifdef DIAGNOSTIC
if ((alignment & (alignment - 1)) != 0)
panic("uvm_pglistalloc: alignment must be power of 2");
if ((boundary & (boundary - 1)) != 0)
panic("uvm_pglistalloc: boundary must be power of 2");
#endif
KASSERT((alignment & (alignment - 1)) == 0);
KASSERT((boundary & (boundary - 1)) == 0);
/*
* Our allocations are always page granularity, so our alignment
@ -265,11 +260,8 @@ uvm_pglistfree(list)
*/
s = uvm_lock_fpageq();
while ((m = list->tqh_first) != NULL) {
#ifdef DIAGNOSTIC
if (m->pqflags & (PQ_ACTIVE|PQ_INACTIVE))
panic("uvm_pglistfree: active/inactive page!");
#endif
while ((m = TAILQ_FIRST(list)) != NULL) {
KASSERT((m->pqflags & (PQ_ACTIVE|PQ_INACTIVE)) == 0);
TAILQ_REMOVE(list, m, pageq);
m->pqflags = PQ_FREE;
TAILQ_INSERT_TAIL(&uvm.page_free[

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_swap.c,v 1.45 2001/02/12 11:50:50 pk Exp $ */
/* $NetBSD: uvm_swap.c,v 1.46 2001/02/18 21:19:08 chs Exp $ */
/*
* Copyright (c) 1995, 1996, 1997 Matthew R. Green
@ -999,13 +999,7 @@ swap_off(p, sdp)
simple_unlock(&uvm.swap_data_lock);
return ENOMEM;
}
#ifdef DIAGNOSTIC
if (sdp->swd_npginuse != sdp->swd_npgbad) {
panic("swap_off: sdp %p - %d pages still in use (%d bad)\n",
sdp, sdp->swd_npginuse, sdp->swd_npgbad);
}
#endif
KASSERT(sdp->swd_npginuse == sdp->swd_npgbad);
/*
* done with the vnode and saved creds.
@ -1423,11 +1417,7 @@ sw_reg_iodone(bp)
biodone(pbp);
}
} else if (pbp->b_resid == 0) {
#ifdef DIAGNOSTIC
if (vnx->vx_pending != 0)
panic("sw_reg_iodone: vnx pending: %d",vnx->vx_pending);
#endif
KASSERT(vnx->vx_pending == 0);
if ((vnx->vx_flags & VX_BUSY) == 0) {
UVMHIST_LOG(pdhist, " iodone error=%d !",
pbp, vnx->vx_error, 0, 0);
@ -1568,6 +1558,7 @@ uvm_swap_free(startslot, nslots)
/*
* ignore attempts to free the "bad" slot.
*/
if (startslot == SWSLOT_BAD) {
return;
}
@ -1577,30 +1568,19 @@ uvm_swap_free(startslot, nslots)
* in the extent, and return. must hold pri lock to do
* lookup and access the extent.
*/
simple_lock(&uvm.swap_data_lock);
sdp = swapdrum_getsdp(startslot);
#ifdef DIAGNOSTIC
if (uvmexp.nswapdev < 1)
panic("uvm_swap_free: uvmexp.nswapdev < 1\n");
if (sdp == NULL) {
printf("uvm_swap_free: startslot %d, nslots %d\n", startslot,
nslots);
panic("uvm_swap_free: unmapped address\n");
}
#endif
KASSERT(uvmexp.nswapdev >= 1);
KASSERT(sdp != NULL);
KASSERT(sdp->swd_npginuse >= nslots);
if (extent_free(sdp->swd_ex, startslot - sdp->swd_drumoffset, nslots,
EX_MALLOCOK|EX_NOWAIT) != 0) {
printf("warning: resource shortage: %d pages of swap lost\n",
nslots);
}
sdp->swd_npginuse -= nslots;
uvmexp.swpginuse -= nslots;
#ifdef DIAGNOSTIC
if (sdp->swd_npginuse < 0)
panic("uvm_swap_free: inuse < 0");
#endif
simple_unlock(&uvm.swap_data_lock);
}
@ -1639,11 +1619,7 @@ uvm_swap_get(page, swslot, flags)
int result;
uvmexp.nswget++;
#ifdef DIAGNOSTIC
if ((flags & PGO_SYNCIO) == 0)
printf("uvm_swap_get: ASYNC get requested?\n");
#endif
KASSERT(flags & PGO_SYNCIO);
if (swslot == SWSLOT_BAD) {
return VM_PAGER_ERROR;
}