reduce the size of user coredump files by not dumping regions of

the address space that have never been touched (such as much of the
virtual space allocated for pthread stacks).
This commit is contained in:
chs 2005-01-21 03:24:40 +00:00
parent 1a5a156a7e
commit 7c203c91d4
1 changed files with 28 additions and 26 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_glue.c,v 1.81 2004/05/12 20:09:51 yamt Exp $ */
/* $NetBSD: uvm_glue.c,v 1.82 2005/01/21 03:24:40 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.81 2004/05/12 20:09:51 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.82 2005/01/21 03:24:40 chs Exp $");
#include "opt_kgdb.h"
#include "opt_kstack.h"
@ -717,11 +717,8 @@ uvm_coredump_walkmap(p, vp, cred, func, cookie)
struct vmspace *vm = p->p_vmspace;
struct vm_map *map = &vm->vm_map;
struct vm_map_entry *entry;
vaddr_t maxstack;
int error;
maxstack = trunc_page(USRSTACK - ctob(vm->vm_ssize));
entry = NULL;
vm_map_lock_read(map);
for (;;) {
@ -732,38 +729,43 @@ uvm_coredump_walkmap(p, vp, cred, func, cookie)
if (entry == &map->header)
break;
/* Should never happen for a user process. */
if (UVM_ET_ISSUBMAP(entry))
panic("uvm_coredump_walkmap: user process with "
"submap?");
state.cookie = cookie;
state.start = entry->start;
state.end = entry->end;
state.prot = entry->protection;
state.flags = 0;
if (state.start >= VM_MAXUSER_ADDRESS)
continue;
if (state.end > VM_MAXUSER_ADDRESS)
state.end = VM_MAXUSER_ADDRESS;
/*
* Dump the region unless one of the following is true:
*
* (1) the region has neither object nor amap behind it
* (ie. it has never been accessed).
*
* (2) the region has no amap and is read-only
* (eg. an executable text section).
*
* (3) the region's object is a device.
*/
KASSERT(!UVM_ET_ISSUBMAP(entry));
KASSERT(state.start < VM_MAXUSER_ADDRESS);
KASSERT(state.end <= VM_MAXUSER_ADDRESS);
if (entry->object.uvm_obj == NULL &&
entry->aref.ar_amap == NULL) {
state.flags |= UVM_COREDUMP_NODUMP;
}
if ((entry->protection & VM_PROT_WRITE) == 0 &&
entry->aref.ar_amap == NULL) {
state.flags |= UVM_COREDUMP_NODUMP;
}
if (entry->object.uvm_obj != NULL &&
UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
state.flags |= UVM_COREDUMP_NODUMP;
}
if (state.start >= (vaddr_t)vm->vm_maxsaddr) {
if (state.end <= maxstack)
continue;
if (state.start < maxstack)
state.start = maxstack;
state.flags |= UVM_COREDUMP_STACK;
}
if ((entry->protection & VM_PROT_WRITE) == 0)
state.flags |= UVM_COREDUMP_NODUMP;
if (entry->object.uvm_obj != NULL &&
UVM_OBJ_IS_DEVICE(entry->object.uvm_obj))
state.flags |= UVM_COREDUMP_NODUMP;
vm_map_unlock_read(map);
error = (*func)(p, vp, cred, &state);
if (error)