- Split crashdump code out into its own file.

- Remove NO_SPARSE_DUMP.
- Minor KNF, sprinkle static.
This commit is contained in:
ad 2008-01-12 20:03:41 +00:00
parent c03e2ac7c3
commit ee652e42b1
5 changed files with 708 additions and 623 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: files.i386,v 1.328 2008/01/09 14:23:47 xtraeme Exp $
# $NetBSD: files.i386,v 1.329 2008/01/12 20:03:41 ad Exp $
#
# new style config file for i386 architecture
#
@ -75,6 +75,7 @@ file arch/i386/i386/db_disasm.c ddb
file arch/i386/i386/db_interface.c ddb
file arch/i386/i386/db_memrw.c ddb | kgdb
file arch/i386/i386/db_trace.c ddb
file arch/i386/i386/dumpsys.c
file kern/subr_disk_mbr.c disk
file arch/i386/i386/gdt.c
file arch/i386/i386/i386func.S

View File

@ -0,0 +1,694 @@
/* $NetBSD: dumpsys.c,v 1.1 2008/01/12 20:03:41 ad Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2000, 2004, 2006, 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Charles M. Hannum, by Jason R. Thorpe of the Numerical Aerospace
* Simulation Facility, NASA Ames Research Center.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Coyote Point Systems, Inc. which was written under contract to Coyote
* Point by Jed Davis and Devon O'Dell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)machdep.c 7.4 (Berkeley) 6/3/91
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: dumpsys.c,v 1.1 2008/01/12 20:03:41 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/kcore.h>
#include <sys/core.h>
#include <sys/conf.h>
#include <sys/exec.h>
#include <sys/exec_aout.h>
#include <machine/kcore.h>
#include <uvm/uvm_extern.h>
#include <uvm/uvm_page.h>
/*
* Exports, needed by savecore, the debugger or elsewhere in the kernel.
*/
void dumpsys(void);
struct pcb dumppcb;
uint32_t dumpmag = 0x8fca0101; /* magic number */
int dumpsize; /* pages */
long dumplo; /* blocks */
int sparse_dump;
/*
* Module private.
*/
#define dump_headerbuf_size PAGE_SIZE
#define dump_headerbuf_end (dump_headerbuf + dump_headerbuf_size)
#define dump_headerbuf_avail (dump_headerbuf_end - dump_headerbuf_ptr)
#define BYTES_PER_DUMP PAGE_SIZE /* must be a multiple of pagesize */
static vaddr_t dumpspace;
static paddr_t max_paddr;
static uint8_t *sparse_dump_physmap;
static uint8_t *dump_headerbuf;
static uint8_t *dump_headerbuf_ptr;
static daddr_t dump_header_blkno;
static size_t dump_nmemsegs;
static size_t dump_npages;
static size_t dump_header_size;
static size_t dump_totalbytesleft;
static int cpu_dump(void);
static int cpu_dumpsize(void);
static u_long cpu_dump_mempagecnt(void);
static void dump_misc_init(void);
static void dump_seg_prep(void);
static int dump_seg_iter(int (*)(paddr_t, paddr_t));
static void sparse_dump_reset(void);
static void sparse_dump_mark(vaddr_t, vaddr_t, int);
static void cpu_dump_prep_sparse(void);
static void dump_header_start(void);
static int dump_header_flush(void);
static int dump_header_addbytes(const void*, size_t);
static int dump_header_addseg(paddr_t, paddr_t);
static int dump_header_finish(void);
static int dump_seg_count_range(paddr_t, paddr_t);
static int dumpsys_seg(paddr_t, paddr_t);
/*
* From machdep.c.
*/
extern phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
extern int mem_cluster_cnt;
void
dumpsys(void)
{
const struct bdevsw *bdev;
int dumpend, psize;
int error;
/* Save registers. */
savectx(&dumppcb);
if (dumpdev == NODEV)
return;
bdev = bdevsw_lookup(dumpdev);
if (bdev == NULL || bdev->d_psize == NULL)
return;
/*
* For dumps during autoconfiguration,
* if dump device has already configured...
*/
if (dumpsize == 0)
cpu_dumpconf();
if (dumplo <= 0 || dumpsize == 0) {
printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
minor(dumpdev));
return;
}
printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
minor(dumpdev), dumplo);
psize = (*bdev->d_psize)(dumpdev);
printf("dump ");
if (psize == -1) {
printf("area unavailable\n");
return;
}
#if 0 /* XXX this doesn't work. grr. */
/* toss any characters present prior to dump */
while (sget() != NULL); /*syscons and pccons differ */
#endif
dump_seg_prep();
dumpend = dumplo + btodb(dump_header_size) + ctod(dump_npages);
if (dumpend > psize) {
printf("failed: insufficient space (%d < %d)\n",
psize, dumpend);
goto failed;
}
dump_header_start();
if ((error = cpu_dump()) != 0)
goto err;
if ((error = dump_header_finish()) != 0)
goto err;
if (dump_header_blkno != dumplo + btodb(dump_header_size)) {
printf("BAD header size (%ld [written] != %ld [expected])\n",
(long)(dump_header_blkno - dumplo),
(long)btodb(dump_header_size));
goto failed;
}
dump_totalbytesleft = roundup(ptoa(dump_npages), BYTES_PER_DUMP);
error = dump_seg_iter(dumpsys_seg);
if (error == 0 && dump_header_blkno != dumpend) {
printf("BAD dump size (%ld [written] != %ld [expected])\n",
(long)(dumpend - dumplo),
(long)(dump_header_blkno - dumplo));
goto failed;
}
err:
switch (error) {
case ENXIO:
printf("device bad\n");
break;
case EFAULT:
printf("device not ready\n");
break;
case EINVAL:
printf("area improper\n");
break;
case EIO:
printf("i/o error\n");
break;
case EINTR:
printf("aborted from console\n");
break;
case 0:
printf("succeeded\n");
break;
default:
printf("error %d\n", error);
break;
}
failed:
printf("\n\n");
delay(5000000); /* 5 seconds */
}
/*
* This is called by main to set dumplo and dumpsize.
* Dumps always skip the first PAGE_SIZE of disk space
* in case there might be a disk label stored there.
* If there is extra space, put dump at the end to
* reduce the chance that swapping trashes it.
*
* Sparse dumps can't placed as close to the end as possible, because
* savecore(8) has to know where to start reading in the dump device
* before it has access to any of the crashed system's state.
*
* Note also that a sparse dump will never be larger than a full one:
* in order to add a phys_ram_seg_t to the header, at least one page
* must be removed.
*/
void
cpu_dumpconf(void)
{
const struct bdevsw *bdev;
int nblks, dumpblks; /* size of dump area */
if (dumpdev == NODEV)
goto bad;
bdev = bdevsw_lookup(dumpdev);
if (bdev == NULL) {
dumpdev = NODEV;
goto bad;
}
if (bdev->d_psize == NULL)
goto bad;
nblks = (*bdev->d_psize)(dumpdev);
if (nblks <= ctod(1))
goto bad;
dumpblks = cpu_dumpsize();
if (dumpblks < 0)
goto bad;
dumpblks += ctod(cpu_dump_mempagecnt());
/* If dump won't fit (incl. room for possible label): */
if (dumpblks > (nblks - ctod(1))) {
/* A sparse dump might (and hopefully will) fit. */
dumplo = ctod(1);
} else {
/* Put dump at end of partition */
dumplo = nblks - dumpblks;
}
/* dumpsize is in page units, and doesn't include headers. */
dumpsize = cpu_dump_mempagecnt();
/* Now that we've decided this will work, init ancillary stuff. */
dump_misc_init();
return;
bad:
dumpsize = 0;
}
vaddr_t
reserve_dumppages(vaddr_t p)
{
dumpspace = p;
return (p + BYTES_PER_DUMP);
}
/*
* Perform assorted dump-related initialization tasks. Assumes that
* the maximum physical memory address will not increase afterwards.
*/
static void
dump_misc_init(void)
{
int i;
if (dump_headerbuf != NULL)
return; /* already called */
for (i = 0; i < mem_cluster_cnt; ++i) {
paddr_t top = mem_clusters[i].start + mem_clusters[i].size;
if (max_paddr < top)
max_paddr = top;
}
#ifdef DEBUG
printf("dump_misc_init: max_paddr = 0x%lx\n",
(unsigned long)max_paddr);
#endif
sparse_dump_physmap = (void*)uvm_km_alloc(kernel_map,
roundup(max_paddr / (PAGE_SIZE * NBBY), PAGE_SIZE),
PAGE_SIZE, UVM_KMF_WIRED|UVM_KMF_ZERO);
dump_headerbuf = (void*)uvm_km_alloc(kernel_map,
dump_headerbuf_size,
PAGE_SIZE, UVM_KMF_WIRED|UVM_KMF_ZERO);
/* XXXjld should check for failure here, disable dumps if so. */
}
/*
* Clear the set of pages to include in a sparse dump.
*/
static void
sparse_dump_reset(void)
{
memset(sparse_dump_physmap, 0,
roundup(max_paddr / (PAGE_SIZE * NBBY), PAGE_SIZE));
}
/*
* Include or exclude pages in a sparse dump, by half-open virtual
* address interval (which may wrap around the end of the space).
*/
static void
sparse_dump_mark(vaddr_t vbegin, vaddr_t vend, int includep)
{
pmap_t pmap;
paddr_t p;
vaddr_t v;
/*
* If a partial page is called for, the whole page must be included.
*/
if (includep) {
vbegin = rounddown(vbegin, PAGE_SIZE);
vend = roundup(vend, PAGE_SIZE);
} else {
vbegin = roundup(vbegin, PAGE_SIZE);
vend = rounddown(vend, PAGE_SIZE);
}
pmap = pmap_kernel();
for (v = vbegin; v != vend; v += PAGE_SIZE) {
if (pmap_extract(pmap, v, &p)) {
if (includep)
setbit(sparse_dump_physmap, p/PAGE_SIZE);
else
clrbit(sparse_dump_physmap, p/PAGE_SIZE);
}
}
}
/*
* Machine-dependently decides on the contents of a sparse dump, using
* the above.
*/
static void
cpu_dump_prep_sparse(void)
{
sparse_dump_reset();
/* XXX could the alternate recursive page table be skipped? */
sparse_dump_mark((vaddr_t)PTE_BASE, 0, 1);
/* Memory for I/O buffers could be unmarked here, for example. */
/* The kernel text could also be unmarked, but gdb would be upset. */
}
/*
* Abstractly iterate over the collection of memory segments to be
* dumped; the callback lacks the customary environment-pointer
* argument because none of the current users really need one.
*
* To be used only after dump_seg_prep is called to set things up.
*/
static int
dump_seg_iter(int (*callback)(paddr_t, paddr_t))
{
int error, i;
#define CALLBACK(start,size) do { \
error = callback(start,size); \
if (error) \
return error; \
} while(0)
for (i = 0; i < mem_cluster_cnt; ++i) {
/*
* The bitmap is scanned within each memory segment,
* rather than over its entire domain, in case any
* pages outside of the memory proper have been mapped
* into kva; they might be devices that wouldn't
* appreciate being arbitrarily read, and including
* them could also break the assumption that a sparse
* dump will always be smaller than a full one.
*/
if (sparse_dump) {
paddr_t p, start, end;
int lastset;
start = mem_clusters[i].start;
end = start + mem_clusters[i].size;
start = rounddown(start, PAGE_SIZE); /* unnecessary? */
lastset = 0;
for (p = start; p < end; p += PAGE_SIZE) {
int thisset = isset(sparse_dump_physmap,
p/PAGE_SIZE);
if (!lastset && thisset)
start = p;
if (lastset && !thisset)
CALLBACK(start, p - start);
lastset = thisset;
}
if (lastset)
CALLBACK(start, p - start);
} else
CALLBACK(mem_clusters[i].start, mem_clusters[i].size);
}
return 0;
#undef CALLBACK
}
/*
* Prepare for an impending core dump: decide what's being dumped and
* how much space it will take up.
*/
static void
dump_seg_prep(void)
{
if (sparse_dump)
cpu_dump_prep_sparse();
dump_nmemsegs = 0;
dump_npages = 0;
dump_seg_iter(dump_seg_count_range);
dump_header_size = ALIGN(sizeof(kcore_seg_t)) +
ALIGN(sizeof(cpu_kcore_hdr_t)) +
ALIGN(dump_nmemsegs * sizeof(phys_ram_seg_t));
dump_header_size = roundup(dump_header_size, dbtob(1));
/*
* savecore(8) will read this to decide how many pages to
* copy, and cpu_dumpconf has already used the pessimistic
* value to set dumplo, so it's time to tell the truth.
*/
dumpsize = dump_npages; /* XXX could these just be one variable? */
}
static int
dump_seg_count_range(paddr_t start, paddr_t size)
{
++dump_nmemsegs;
dump_npages += size / PAGE_SIZE;
return 0;
}
/*
* A sparse dump's header may be rather large, due to the number of
* "segments" emitted. These routines manage a simple output buffer,
* so that the header can be written to disk incrementally.
*/
static void
dump_header_start(void)
{
dump_headerbuf_ptr = dump_headerbuf;
dump_header_blkno = dumplo;
}
static int
dump_header_flush(void)
{
const struct bdevsw *bdev;
size_t to_write;
int error;
bdev = bdevsw_lookup(dumpdev);
to_write = roundup(dump_headerbuf_ptr - dump_headerbuf, dbtob(1));
error = bdev->d_dump(dumpdev, dump_header_blkno,
dump_headerbuf, to_write);
dump_header_blkno += btodb(to_write);
dump_headerbuf_ptr = dump_headerbuf;
return error;
}
static int
dump_header_addbytes(const void* vptr, size_t n)
{
const char *ptr = vptr;
int error;
while (n > dump_headerbuf_avail) {
memcpy(dump_headerbuf_ptr, ptr, dump_headerbuf_avail);
ptr += dump_headerbuf_avail;
n -= dump_headerbuf_avail;
dump_headerbuf_ptr = dump_headerbuf_end;
error = dump_header_flush();
if (error)
return error;
}
memcpy(dump_headerbuf_ptr, ptr, n);
dump_headerbuf_ptr += n;
return 0;
}
static int
dump_header_addseg(paddr_t start, paddr_t size)
{
phys_ram_seg_t seg = { start, size };
return dump_header_addbytes(&seg, sizeof(seg));
}
static int
dump_header_finish(void)
{
memset(dump_headerbuf_ptr, 0, dump_headerbuf_avail);
return dump_header_flush();
}
/*
* cpu_dumpsize: calculate size of machine-dependent kernel core dump headers
* for a full (non-sparse) dump.
*/
static int
cpu_dumpsize(void)
{
int size;
size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
if (roundup(size, dbtob(1)) != dbtob(1))
return (-1);
return (1);
}
/*
* cpu_dump_mempagecnt: calculate the size of RAM (in pages) to be dumped
* for a full (non-sparse) dump.
*/
static u_long
cpu_dump_mempagecnt(void)
{
u_long i, n;
n = 0;
for (i = 0; i < mem_cluster_cnt; i++)
n += atop(mem_clusters[i].size);
return (n);
}
/*
* cpu_dump: dump the machine-dependent kernel core dump headers.
*/
static int
cpu_dump(void)
{
int (*dump)(dev_t, daddr_t, void *, size_t);
kcore_seg_t seg;
cpu_kcore_hdr_t cpuhdr;
const struct bdevsw *bdev;
bdev = bdevsw_lookup(dumpdev);
if (bdev == NULL)
return (ENXIO);
dump = bdev->d_dump;
/*
* Generate a segment header.
*/
CORE_SETMAGIC(seg, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
seg.c_size = dump_header_size - ALIGN(sizeof(seg));
(void)dump_header_addbytes(&seg, ALIGN(sizeof(seg)));
/*
* Add the machine-dependent header info.
*/
cpuhdr.pdppaddr = PDPpaddr;
cpuhdr.nmemsegs = dump_nmemsegs;
(void)dump_header_addbytes(&cpuhdr, ALIGN(sizeof(cpuhdr)));
/*
* Write out the memory segment descriptors.
*/
return dump_seg_iter(dump_header_addseg);
}
static int
dumpsys_seg(paddr_t maddr, paddr_t bytes)
{
u_long i, m, n;
daddr_t blkno;
const struct bdevsw *bdev;
int (*dump)(dev_t, daddr_t, void *, size_t);
int error;
bdev = bdevsw_lookup(dumpdev);
dump = bdev->d_dump;
blkno = dump_header_blkno;
for (i = 0; i < bytes; i += n, dump_totalbytesleft -= n) {
/* Print out how many MBs we have left to go. */
if ((dump_totalbytesleft % (1024*1024)) == 0)
printf("%lu ", (unsigned long)
(dump_totalbytesleft / (1024 * 1024)));
/* Limit size for next transfer. */
n = bytes - i;
if (n > BYTES_PER_DUMP)
n = BYTES_PER_DUMP;
for (m = 0; m < n; m += NBPG)
pmap_kenter_pa(dumpspace + m, maddr + m,
VM_PROT_READ);
pmap_update(pmap_kernel());
error = (*dump)(dumpdev, blkno, (void *)dumpspace, n);
if (error)
return error;
maddr += n;
blkno += btodb(n); /* XXX? */
#if 0 /* XXX this doesn't work. grr. */
/* operator aborting dump? */
if (sget() != NULL)
return EINTR;
#endif
}
dump_header_blkno = blkno;
return 0;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.622 2008/01/11 20:00:14 bouyer Exp $ */
/* $NetBSD: machdep.c,v 1.623 2008/01/12 20:03:42 ad Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2000, 2004, 2006, 2008 The NetBSD Foundation, Inc.
@ -8,10 +8,6 @@
* by Charles M. Hannum, by Jason R. Thorpe of the Numerical Aerospace
* Simulation Facility, NASA Ames Research Center and by Julio M. Merino Vidal.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Coyote Point Systems, Inc. which was written under contract to Coyote
* Point by Jed Davis and Devon O'Dell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@ -76,7 +72,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.622 2008/01/11 20:00:14 bouyer Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.623 2008/01/12 20:03:42 ad Exp $");
#include "opt_beep.h"
#include "opt_compat_ibcs2.h"
@ -253,26 +249,6 @@ static int exec_nomid(struct lwp *, struct exec_package *);
#endif
int physmem;
int dumpmem_low;
int dumpmem_high;
#ifndef NO_SPARSE_DUMP
int sparse_dump = 0;
paddr_t max_paddr = 0;
unsigned char *sparse_dump_physmap;
#endif
char *dump_headerbuf, *dump_headerbuf_ptr;
#define dump_headerbuf_size PAGE_SIZE
#define dump_headerbuf_end (dump_headerbuf + dump_headerbuf_size)
#define dump_headerbuf_avail (dump_headerbuf_end - dump_headerbuf_ptr)
daddr_t dump_header_blkno;
size_t dump_nmemsegs;
size_t dump_npages;
size_t dump_header_size;
size_t dump_totalbytesleft;
unsigned int cpu_feature;
unsigned int cpu_feature2;
@ -325,30 +301,6 @@ void (*initclock_func)(void) = i8254_initclocks;
phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
int mem_cluster_cnt;
int cpu_dump(void);
int cpu_dumpsize(void);
u_long cpu_dump_mempagecnt(void);
void dumpsys(void);
void dump_misc_init(void);
void dump_seg_prep(void);
int dump_seg_iter(int (*)(paddr_t, paddr_t));
#ifndef NO_SPARSE_DUMP
void sparse_dump_reset(void);
void sparse_dump_mark(vaddr_t, vaddr_t, int);
void cpu_dump_prep_sparse(void);
#endif
void dump_header_start(void);
int dump_header_flush(void);
int dump_header_addbytes(const void*, size_t);
int dump_header_addseg(paddr_t, paddr_t);
int dump_header_finish(void);
int dump_seg_count_range(paddr_t, paddr_t);
int dumpsys_seg(paddr_t, paddr_t);
void init386(paddr_t);
void initgdt(union descriptor *);
@ -834,15 +786,11 @@ SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
CTLTYPE_INT, "tm_longrun_percentage", NULL,
sysctl_machdep_tm_longrun, 0, NULL, 0,
CTL_MACHDEP, CPU_TMLR_PERCENTAGE, CTL_EOL);
#ifndef NO_SPARSE_DUMP
/* XXXjld Does this really belong under machdep, and not e.g. kern? */
sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
CTLTYPE_INT, "sparse_dump", NULL,
NULL, 0, &sparse_dump, 0,
CTL_MACHDEP, CTL_CREATE, CTL_EOL);
#endif
}
void *
@ -974,7 +922,6 @@ sendsig(const ksiginfo_t *ksi, const sigset_t *mask)
}
int waittime = -1;
struct pcb dumppcb;
void
cpu_reboot(int howto, char *bootstr)
@ -1081,571 +1028,6 @@ haltsys:
/*NOTREACHED*/
}
/*
* Perform assorted dump-related initialization tasks. Assumes that
* the maximum physical memory address will not increase afterwards.
*/
void
dump_misc_init(void)
{
#ifndef NO_SPARSE_DUMP
int i;
#endif
if (dump_headerbuf != NULL)
return; /* already called */
#ifndef NO_SPARSE_DUMP
for (i = 0; i < mem_cluster_cnt; ++i) {
paddr_t top = mem_clusters[i].start + mem_clusters[i].size;
if (max_paddr < top)
max_paddr = top;
}
#ifdef DEBUG
printf("dump_misc_init: max_paddr = 0x%lx\n",
(unsigned long)max_paddr);
#endif
sparse_dump_physmap = (void*)uvm_km_alloc(kernel_map,
roundup(max_paddr / (PAGE_SIZE * NBBY), PAGE_SIZE),
PAGE_SIZE, UVM_KMF_WIRED|UVM_KMF_ZERO);
#endif
dump_headerbuf = (void*)uvm_km_alloc(kernel_map,
dump_headerbuf_size,
PAGE_SIZE, UVM_KMF_WIRED|UVM_KMF_ZERO);
/* XXXjld should check for failure here, disable dumps if so. */
}
#ifndef NO_SPARSE_DUMP
/*
* Clear the set of pages to include in a sparse dump.
*/
void
sparse_dump_reset(void)
{
memset(sparse_dump_physmap, 0,
roundup(max_paddr / (PAGE_SIZE * NBBY), PAGE_SIZE));
}
/*
* Include or exclude pages in a sparse dump, by half-open virtual
* address interval (which may wrap around the end of the space).
*/
void
sparse_dump_mark(vaddr_t vbegin, vaddr_t vend, int includep)
{
pmap_t pmap;
paddr_t p;
vaddr_t v;
/*
* If a partial page is called for, the whole page must be included.
*/
if (includep) {
vbegin = rounddown(vbegin, PAGE_SIZE);
vend = roundup(vend, PAGE_SIZE);
} else {
vbegin = roundup(vbegin, PAGE_SIZE);
vend = rounddown(vend, PAGE_SIZE);
}
pmap = pmap_kernel();
for (v = vbegin; v != vend; v += PAGE_SIZE) {
if (pmap_extract(pmap, v, &p)) {
if (includep)
setbit(sparse_dump_physmap, p/PAGE_SIZE);
else
clrbit(sparse_dump_physmap, p/PAGE_SIZE);
}
}
}
/*
* Machine-dependently decides on the contents of a sparse dump, using
* the above.
*/
void
cpu_dump_prep_sparse(void)
{
sparse_dump_reset();
/* XXX could the alternate recursive page table be skipped? */
sparse_dump_mark((vaddr_t)PTE_BASE, 0, 1);
/* Memory for I/O buffers could be unmarked here, for example. */
/* The kernel text could also be unmarked, but gdb would be upset. */
}
#endif
/*
* Abstractly iterate over the collection of memory segments to be
* dumped; the callback lacks the customary environment-pointer
* argument because none of the current users really need one.
*
* To be used only after dump_seg_prep is called to set things up.
*/
int
dump_seg_iter(int (*callback)(paddr_t, paddr_t))
{
int error, i;
#define CALLBACK(start,size) do { \
error = callback(start,size); \
if (error) \
return error; \
} while(0)
for (i = 0; i < mem_cluster_cnt; ++i) {
#ifndef NO_SPARSE_DUMP
/*
* The bitmap is scanned within each memory segment,
* rather than over its entire domain, in case any
* pages outside of the memory proper have been mapped
* into kva; they might be devices that wouldn't
* appreciate being arbitrarily read, and including
* them could also break the assumption that a sparse
* dump will always be smaller than a full one.
*/
if (sparse_dump) {
paddr_t p, start, end;
int lastset;
start = mem_clusters[i].start;
end = start + mem_clusters[i].size;
start = rounddown(start, PAGE_SIZE); /* unnecessary? */
lastset = 0;
for (p = start; p < end; p += PAGE_SIZE) {
int thisset = isset(sparse_dump_physmap,
p/PAGE_SIZE);
if (!lastset && thisset)
start = p;
if (lastset && !thisset)
CALLBACK(start, p - start);
lastset = thisset;
}
if (lastset)
CALLBACK(start, p - start);
} else
#endif
CALLBACK(mem_clusters[i].start, mem_clusters[i].size);
}
return 0;
#undef CALLBACK
}
/*
* Prepare for an impending core dump: decide what's being dumped and
* how much space it will take up.
*/
void
dump_seg_prep(void)
{
#ifndef NO_SPARSE_DUMP
if (sparse_dump)
cpu_dump_prep_sparse();
#endif
dump_nmemsegs = 0;
dump_npages = 0;
dump_seg_iter(dump_seg_count_range);
dump_header_size = ALIGN(sizeof(kcore_seg_t)) +
ALIGN(sizeof(cpu_kcore_hdr_t)) +
ALIGN(dump_nmemsegs * sizeof(phys_ram_seg_t));
dump_header_size = roundup(dump_header_size, dbtob(1));
/*
* savecore(8) will read this to decide how many pages to
* copy, and cpu_dumpconf has already used the pessimistic
* value to set dumplo, so it's time to tell the truth.
*/
dumpsize = dump_npages; /* XXX could these just be one variable? */
}
int
dump_seg_count_range(paddr_t start, paddr_t size)
{
++dump_nmemsegs;
dump_npages += size / PAGE_SIZE;
return 0;
}
/*
* A sparse dump's header may be rather large, due to the number of
* "segments" emitted. These routines manage a simple output buffer,
* so that the header can be written to disk incrementally.
*/
void
dump_header_start(void)
{
dump_headerbuf_ptr = dump_headerbuf;
dump_header_blkno = dumplo;
}
int
dump_header_flush(void)
{
const struct bdevsw *bdev;
size_t to_write;
int error;
bdev = bdevsw_lookup(dumpdev);
to_write = roundup(dump_headerbuf_ptr - dump_headerbuf, dbtob(1));
error = bdev->d_dump(dumpdev, dump_header_blkno,
dump_headerbuf, to_write);
dump_header_blkno += btodb(to_write);
dump_headerbuf_ptr = dump_headerbuf;
return error;
}
int
dump_header_addbytes(const void* vptr, size_t n)
{
const char* ptr = vptr;
int error;
while (n > dump_headerbuf_avail) {
memcpy(dump_headerbuf_ptr, ptr, dump_headerbuf_avail);
ptr += dump_headerbuf_avail;
n -= dump_headerbuf_avail;
dump_headerbuf_ptr = dump_headerbuf_end;
error = dump_header_flush();
if (error)
return error;
}
memcpy(dump_headerbuf_ptr, ptr, n);
dump_headerbuf_ptr += n;
return 0;
}
int
dump_header_addseg(paddr_t start, paddr_t size)
{
phys_ram_seg_t seg = { start, size };
return dump_header_addbytes(&seg, sizeof(seg));
}
int
dump_header_finish(void)
{
memset(dump_headerbuf_ptr, 0, dump_headerbuf_avail);
return dump_header_flush();
}
/*
* These variables are needed by /sbin/savecore
*/
uint32_t dumpmag = 0x8fca0101; /* magic number */
int dumpsize = 0; /* pages */
long dumplo = 0; /* blocks */
/*
* cpu_dumpsize: calculate size of machine-dependent kernel core dump headers
* for a full (non-sparse) dump.
*/
int
cpu_dumpsize()
{
int size;
size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
if (roundup(size, dbtob(1)) != dbtob(1))
return (-1);
return (1);
}
/*
* cpu_dump_mempagecnt: calculate the size of RAM (in pages) to be dumped
* for a full (non-sparse) dump.
*/
u_long
cpu_dump_mempagecnt()
{
u_long i, n;
n = 0;
for (i = 0; i < mem_cluster_cnt; i++)
n += atop(mem_clusters[i].size);
return (n);
}
/*
* cpu_dump: dump the machine-dependent kernel core dump headers.
*/
int
cpu_dump()
{
int (*dump)(dev_t, daddr_t, void *, size_t);
kcore_seg_t seg;
cpu_kcore_hdr_t cpuhdr;
const struct bdevsw *bdev;
bdev = bdevsw_lookup(dumpdev);
if (bdev == NULL)
return (ENXIO);
dump = bdev->d_dump;
/*
* Generate a segment header.
*/
CORE_SETMAGIC(seg, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
seg.c_size = dump_header_size - ALIGN(sizeof(seg));
(void)dump_header_addbytes(&seg, ALIGN(sizeof(seg)));
/*
* Add the machine-dependent header info.
*/
cpuhdr.pdppaddr = PDPpaddr;
cpuhdr.nmemsegs = dump_nmemsegs;
(void)dump_header_addbytes(&cpuhdr, ALIGN(sizeof(cpuhdr)));
/*
* Write out the memory segment descriptors.
*/
return dump_seg_iter(dump_header_addseg);
}
/*
* This is called by main to set dumplo and dumpsize.
* Dumps always skip the first PAGE_SIZE of disk space
* in case there might be a disk label stored there.
* If there is extra space, put dump at the end to
* reduce the chance that swapping trashes it.
*
* Sparse dumps can't placed as close to the end as possible, because
* savecore(8) has to know where to start reading in the dump device
* before it has access to any of the crashed system's state.
*
* Note also that a sparse dump will never be larger than a full one:
* in order to add a phys_ram_seg_t to the header, at least one page
* must be removed.
*/
void
cpu_dumpconf()
{
const struct bdevsw *bdev;
int nblks, dumpblks; /* size of dump area */
if (dumpdev == NODEV)
goto bad;
bdev = bdevsw_lookup(dumpdev);
if (bdev == NULL) {
dumpdev = NODEV;
goto bad;
}
if (bdev->d_psize == NULL)
goto bad;
nblks = (*bdev->d_psize)(dumpdev);
if (nblks <= ctod(1))
goto bad;
dumpblks = cpu_dumpsize();
if (dumpblks < 0)
goto bad;
dumpblks += ctod(cpu_dump_mempagecnt());
/* If dump won't fit (incl. room for possible label): */
if (dumpblks > (nblks - ctod(1))) {
#ifndef NO_SPARSE_DUMP
/* A sparse dump might (and hopefully will) fit. */
dumplo = ctod(1);
#else
/* But if we're not configured for that, punt. */
goto bad;
#endif
} else {
/* Put dump at end of partition */
dumplo = nblks - dumpblks;
}
/* dumpsize is in page units, and doesn't include headers. */
dumpsize = cpu_dump_mempagecnt();
/* Now that we've decided this will work, init ancillary stuff. */
dump_misc_init();
return;
bad:
dumpsize = 0;
}
/*
* Doadump comes here after turning off memory management and
* getting on the dump stack, either when called above, or by
* the auto-restart code.
*/
#define BYTES_PER_DUMP PAGE_SIZE /* must be a multiple of pagesize XXX small */
static vaddr_t dumpspace;
vaddr_t
reserve_dumppages(vaddr_t p)
{
dumpspace = p;
return (p + BYTES_PER_DUMP);
}
int
dumpsys_seg(paddr_t maddr, paddr_t bytes)
{
u_long i, m, n;
daddr_t blkno;
const struct bdevsw *bdev;
int (*dump)(dev_t, daddr_t, void *, size_t);
int error;
bdev = bdevsw_lookup(dumpdev);
dump = bdev->d_dump;
blkno = dump_header_blkno;
for (i = 0; i < bytes; i += n, dump_totalbytesleft -= n) {
/* Print out how many MBs we have left to go. */
if ((dump_totalbytesleft % (1024*1024)) == 0)
printf("%lu ", (unsigned long)
(dump_totalbytesleft / (1024 * 1024)));
/* Limit size for next transfer. */
n = bytes - i;
if (n > BYTES_PER_DUMP)
n = BYTES_PER_DUMP;
for (m = 0; m < n; m += NBPG)
pmap_kenter_pa(dumpspace + m, maddr + m,
VM_PROT_READ);
pmap_update(pmap_kernel());
error = (*dump)(dumpdev, blkno, (void *)dumpspace, n);
if (error)
return error;
maddr += n;
blkno += btodb(n); /* XXX? */
#if 0 /* XXX this doesn't work. grr. */
/* operator aborting dump? */
if (sget() != NULL)
return EINTR;
#endif
}
dump_header_blkno = blkno;
return 0;
}
void
dumpsys()
{
const struct bdevsw *bdev;
int dumpend, psize;
int error;
/* Save registers. */
savectx(&dumppcb);
if (dumpdev == NODEV)
return;
bdev = bdevsw_lookup(dumpdev);
if (bdev == NULL || bdev->d_psize == NULL)
return;
/*
* For dumps during autoconfiguration,
* if dump device has already configured...
*/
if (dumpsize == 0)
cpu_dumpconf();
if (dumplo <= 0 || dumpsize == 0) {
printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
minor(dumpdev));
return;
}
printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
minor(dumpdev), dumplo);
psize = (*bdev->d_psize)(dumpdev);
printf("dump ");
if (psize == -1) {
printf("area unavailable\n");
return;
}
#if 0 /* XXX this doesn't work. grr. */
/* toss any characters present prior to dump */
while (sget() != NULL); /*syscons and pccons differ */
#endif
dump_seg_prep();
dumpend = dumplo + btodb(dump_header_size) + ctod(dump_npages);
if (dumpend > psize) {
printf("failed: insufficient space (%d < %d)\n",
psize, dumpend);
goto failed;
}
dump_header_start();
if ((error = cpu_dump()) != 0)
goto err;
if ((error = dump_header_finish()) != 0)
goto err;
if (dump_header_blkno != dumplo + btodb(dump_header_size)) {
printf("BAD header size (%ld [written] != %ld [expected])\n",
(long)(dump_header_blkno - dumplo),
(long)btodb(dump_header_size));
goto failed;
}
dump_totalbytesleft = roundup(ptoa(dump_npages), BYTES_PER_DUMP);
error = dump_seg_iter(dumpsys_seg);
if (error == 0 && dump_header_blkno != dumpend) {
printf("BAD dump size (%ld [written] != %ld [expected])\n",
(long)(dumpend - dumplo),
(long)(dump_header_blkno - dumplo));
goto failed;
}
err:
switch (error) {
case ENXIO:
printf("device bad\n");
break;
case EFAULT:
printf("device not ready\n");
break;
case EINVAL:
printf("area improper\n");
break;
case EIO:
printf("i/o error\n");
break;
case EINTR:
printf("aborted from console\n");
break;
case 0:
printf("succeeded\n");
break;
default:
printf("error %d\n", error);
break;
}
failed:
printf("\n\n");
delay(5000000); /* 5 seconds */
}
/*
* Clear registers on exec
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: kcore.h,v 1.3 2005/12/26 19:23:59 perry Exp $ */
/* $NetBSD: kcore.h,v 1.4 2008/01/12 20:03:42 ad Exp $ */
/*
* Copyright (c) 1996 Carnegie-Mellon University.
@ -43,4 +43,11 @@ typedef struct cpu_kcore_hdr {
#endif
} cpu_kcore_hdr_t;
#ifdef _KERNEL
void dumpsys(void);
extern struct pcb dumppcb;
extern int sparse_dump;
#endif
#endif /* _I386_KCORE_H_ */

View File

@ -1,4 +1,4 @@
# $NetBSD: files.xen,v 1.77 2008/01/11 20:00:18 bouyer Exp $
# $NetBSD: files.xen,v 1.78 2008/01/12 20:03:42 ad Exp $
# NetBSD: files.x86,v 1.10 2003/10/08 17:30:00 bouyer Exp
# NetBSD: files.i386,v 1.254 2004/03/25 23:32:10 jmc Exp
@ -40,6 +40,7 @@ file arch/i386/i386/db_disasm.c ddb
file arch/i386/i386/db_interface.c ddb
file arch/i386/i386/db_memrw.c ddb | kgdb
file arch/i386/i386/db_trace.c ddb
file arch/i386/i386/dumpsys.c
file arch/i386/i386/gdt.c
file arch/i386/i386/in_cksum.S inet | inet6
file arch/i386/i386/ipkdb_glue.c ipkdb