Update for new m68k/kcore.h. We use dispatch information provided

by the crash dump itself to facilitate cross-analysis.  The m68k_cmn
for Utah pmaps reads MMU configuration from the crash dump to that
this works on 4k and 8k h/w page size systems.
This commit is contained in:
thorpej 1997-04-09 21:15:50 +00:00
parent 6911ff7d13
commit b6c0c9a270
5 changed files with 219 additions and 256 deletions

View File

@ -1,11 +1,11 @@
/* $NetBSD: kvm_m68k.c,v 1.10 1997/03/21 18:44:23 gwr Exp $ */
/* $NetBSD: kvm_m68k.c,v 1.11 1997/04/09 21:15:50 thorpej Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Gordon W. Ross.
* by Gordon W. Ross and Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -40,13 +40,11 @@
* Run-time kvm dispatcher for m68k machines.
* The actual MD code is in the files:
* kvm_m68k_cmn.c kvm_sun3.c ...
*
* Note: This file has to build on ALL m68k machines,
* so do NOT include any <machine/*.h> files here.
*/
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/kcore.h>
#include <stdio.h>
#include <string.h>
@ -58,26 +56,25 @@
#include <kvm.h>
#include <db.h>
#include <machine/kcore.h>
#include "kvm_private.h"
#include "kvm_m68k.h"
/* Could put this in struct vmstate, but this is easier. */
static struct kvm_ops *ops;
struct name_ops {
char *name;
const char *name;
struct kvm_ops *ops;
};
/*
* Match specific kcore types first, falling into a default.
*/
static struct name_ops optbl[] = {
{ "amiga", &_kvm_ops_cmn },
{ "atari", &_kvm_ops_cmn },
{ "sun3", &_kvm_ops_sun3 },
{ "sun3x", &_kvm_ops_sun3x },
{ NULL, NULL },
{ "sun3", &_kvm_ops_sun3 },
{ "sun3x", &_kvm_ops_sun3x },
{ NULL, &_kvm_ops_cmn },
};
/*
* Prepare for translation of kernel virtual addresses into offsets
* into crash dump files. This is where we do the dispatch work.
@ -86,33 +83,55 @@ int
_kvm_initvtop(kd)
kvm_t *kd;
{
char machine[256];
int mib[2], len, rval;
cpu_kcore_hdr_t *h;
struct name_ops *nop;
struct vmstate *vm;
/* Which set of kvm functions should we use? */
mib[0] = CTL_HW;
mib[1] = HW_MACHINE;
len = sizeof(machine);
if (sysctl(mib, 2, machine, &len, NULL, 0) == -1)
vm = (struct vmstate *)_kvm_malloc(kd, sizeof (*vm));
if (vm == 0)
return (-1);
for (nop = optbl; nop->name; nop++)
if (!strcmp(machine, nop->name))
goto found;
_kvm_err(kd, 0, "%s: unknown machine!", machine);
return (-1);
kd->vmst = vm;
found:
ops = nop->ops;
return ((ops->initvtop)(kd));
/*
* Use the machine name in the kcore header to determine
* our ops vector. When we reach an ops vector with
* no name, we've found a default.
*/
h = kd->cpu_data;
h->name[sizeof(h->name) - 1] = '\0'; /* sanity */
for (nop = optbl; nop->name != NULL; nop++)
if (strcmp(nop->name, h->name) == 0)
break;
vm->ops = nop->ops;
/*
* Compute pgshift and pgofset.
*/
for (vm->pgshift = 0; (1 << vm->pgshift) < h->page_size; vm->pgshift++)
/* nothing */ ;
if ((1 << vm->pgshift) != h->page_size)
goto bad;
vm->pgofset = h->page_size - 1;
if ((vm->ops->initvtop)(kd) < 0)
goto bad;
return (0);
bad:
kd->vmst = NULL;
free(vm);
return (-1);
}
void
_kvm_freevtop(kd)
kvm_t *kd;
{
(ops->freevtop)(kd);
(kd->vmst->ops->freevtop)(kd);
free(kd->vmst);
}
int
@ -121,7 +140,7 @@ _kvm_kvatop(kd, va, pap)
u_long va;
u_long *pap;
{
return ((ops->kvatop)(kd, va, pap));
return ((kd->vmst->ops->kvatop)(kd, va, pap));
}
off_t
@ -129,5 +148,5 @@ _kvm_pa2off(kd, pa)
kvm_t *kd;
u_long pa;
{
return ((ops->pa2off)(kd, pa));
return ((kd->vmst->ops->pa2off)(kd, pa));
}

View File

@ -1,11 +1,11 @@
/* $NetBSD: kvm_m68k.h,v 1.1 1997/03/21 18:44:24 gwr Exp $ */
/* $NetBSD: kvm_m68k.h,v 1.2 1997/04/09 21:15:53 thorpej Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Gordon W. Ross.
* by Gordon W. Ross and Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -43,6 +43,13 @@ struct kvm_ops {
off_t (*pa2off) __P((kvm_t *, u_long));
};
struct vmstate {
struct kvm_ops *ops; /* ops vector */
u_int32_t pgshift; /* log2(page_size) */
u_int32_t pgofset; /* mask to find offset into page */
void *private; /* private to the bottom layer */
};
extern struct kvm_ops _kvm_ops_cmn;
extern struct kvm_ops _kvm_ops_sun3;
extern struct kvm_ops _kvm_ops_sun3x;

View File

@ -1,6 +1,7 @@
/* $NetBSD: kvm_m68k_cmn.c,v 1.1 1997/03/21 18:44:24 gwr Exp $ */
/* $NetBSD: kvm_m68k_cmn.c,v 1.2 1997/04/09 21:15:55 thorpej Exp $ */
/*-
* Copyright (c) 1997 Jason R. Thorpe. All rights reserved.
* Copyright (c) 1989, 1992, 1993
* The Regents of the University of California. All rights reserved.
*
@ -41,7 +42,7 @@
#if 0
static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93";
#else
static char *rcsid = "$NetBSD: kvm_m68k_cmn.c,v 1.1 1997/03/21 18:44:24 gwr Exp $";
static char *rcsid = "$NetBSD: kvm_m68k_cmn.c,v 1.2 1997/04/09 21:15:55 thorpej Exp $";
#endif
#endif /* LIBC_SCCS and not lint */
@ -61,10 +62,6 @@ static char *rcsid = "$NetBSD: kvm_m68k_cmn.c,v 1.1 1997/03/21 18:44:24 gwr Exp
#include <kvm.h>
#include <db.h>
/* XXX: Avoid <machine/pte.h> etc. (see below) */
typedef u_int pt_entry_t; /* page table entry */
typedef u_int st_entry_t; /* segment table entry */
#include <m68k/cpu.h>
#include <m68k/kcore.h>
@ -82,44 +79,10 @@ struct kvm_ops _kvm_ops_cmn = {
_kvm_cmn_kvatop,
_kvm_cmn_pa2off };
static int vatop_030 __P((kvm_t *, st_entry_t *, ulong, ulong *));
static int vatop_040 __P((kvm_t *, st_entry_t *, ulong, ulong *));
/*
* XXX: I don't like this, but until all arch/.../include files
* are exported into some user-accessable place, there is no
* convenient alternative to copying these definitions here.
*/
/* Things from param.h */
#define PGSHIFT 13
#define NBPG (1<<13)
#define PGOFSET (NBPG-1)
#define btop(x) (((unsigned)(x)) >> PGSHIFT)
/* Things from pte.h */
/* All variants */
#define SG_V 2
#define PG_NV 0x00000000
#define PG_FRAME 0xffffe000
/* MC68030 with MMU TCR set for 8/11/13 (bits) */
#define SG3_SHIFT 24 /* a.k.a SEGSHIFT */
#define SG3_FRAME 0xffffe000
#define SG3_PMASK 0x00ffe000
/* MC68040 with MMU set for 8K page size. */
#define SG4_MASK1 0xfe000000
#define SG4_SHIFT1 25
#define SG4_MASK2 0x01fc0000
#define SG4_SHIFT2 18
#define SG4_MASK3 0x0003e000
#define SG4_SHIFT3 13
#define SG4_ADDR1 0xfffffe00
#define SG4_ADDR2 0xffffff80
static int vatop_030 __P((kvm_t *, u_int32_t, u_long, u_long *));
static int vatop_040 __P((kvm_t *, u_int32_t, u_long, u_long *));
#define _kvm_btop(v, a) (((unsigned)(a)) >> (v)->pgshift)
#define KREAD(kd, addr, p)\
(kvm_read(kd, addr, (char *)(p), sizeof(*(p))) != sizeof(*(p)))
@ -128,15 +91,14 @@ void
_kvm_cmn_freevtop(kd)
kvm_t *kd;
{
if (kd->vmst != 0)
free(kd->vmst);
/* No private state information to keep. */
}
int
_kvm_cmn_initvtop(kd)
kvm_t *kd;
{
/* No private state information to keep. */
return (0);
}
@ -146,31 +108,26 @@ _kvm_cmn_kvatop(kd, va, pa)
u_long va;
u_long *pa;
{
register cpu_kcore_hdr_t *cpu_kh;
int (*vtopf) __P((kvm_t *, st_entry_t *, ulong, ulong *));
cpu_kcore_hdr_t *h = kd->cpu_data;
struct m68k_kcore_hdr *m = &h->un._m68k;
struct vmstate *vm = kd->vmst;
int (*vtopf) __P((kvm_t *, u_int32_t, u_long, u_long *));
if (ISALIVE(kd)) {
_kvm_err(kd, 0, "vatop called in live kernel!");
return (0);
}
cpu_kh = kd->cpu_data;
switch (cpu_kh->mmutype) {
case MMU_68030:
vtopf = vatop_030;
break;
case MMU_68040:
/*
* 68040 and 68040 use same translation functions,
* as do 68030, 68851, HP MMU.
*/
if (m->mmutype == MMU_68040 || m->mmutype == MMU_68060)
vtopf = vatop_040;
break;
else
vtopf = vatop_030;
default:
_kvm_err(kd, 0, "vatop unknown MMU type!");
return (0);
}
return ((*vtopf)(kd, cpu_kh->sysseg_pa, va, pa));
return ((*vtopf)(kd, m->sysseg_pa, va, pa));
}
/*
@ -181,20 +138,23 @@ _kvm_cmn_pa2off(kd, pa)
kvm_t *kd;
u_long pa;
{
off_t off;
phys_ram_seg_t *rsp;
register cpu_kcore_hdr_t *cpu_kh;
cpu_kcore_hdr_t *h = kd->cpu_data;
struct m68k_kcore_hdr *m = &h->un._m68k;
phys_ram_seg_t *rsp;
off_t off;
int i;
cpu_kh = kd->cpu_data;
off = 0;
for (rsp = cpu_kh->ram_segs; rsp->size; rsp++) {
if (pa >= rsp->start && pa < rsp->start + rsp->size) {
rsp = m->ram_segs;
for (i = 0; i < M68K_NPHYS_RAM_SEGS && rsp[i].size != 0; i++) {
if (pa >= rsp[i].start &&
pa < (rsp[i].start + rsp[i].size)) {
pa -= rsp->start;
break;
}
off += rsp->size;
}
return(kd->dump_off + off + pa);
return (kd->dump_off + off + pa);
}
/*****************************************************************
@ -202,46 +162,50 @@ _kvm_cmn_pa2off(kd, pa)
*/
static int
vatop_030(kd, sta, va, pa)
vatop_030(kd, stpa, va, pa)
kvm_t *kd;
st_entry_t *sta;
u_int32_t stpa;
u_long va;
u_long *pa;
{
register cpu_kcore_hdr_t *cpu_kh;
register u_long addr;
int p, ste, pte;
int offset;
cpu_kcore_hdr_t *h = kd->cpu_data;
struct m68k_kcore_hdr *m = &h->un._m68k;
struct vmstate *vm = kd->vmst;
u_long addr;
u_int32_t ste, pte;
u_int p, offset;
offset = va & PGOFSET;
cpu_kh = kd->cpu_data;
offset = va & vm->pgofset;
/*
* If we are initializing (kernel segment table pointer not yet set)
* then return pa == va to avoid infinite recursion.
* We may be called before address translation is initialized.
* This is typically used to find the dump magic number. This
* means we do not yet have the kernel page tables available,
* so we must to a simple relocation.
*/
if (cpu_kh->sysseg_pa == 0) {
*pa = va + cpu_kh->kernel_pa;
return (NBPG - offset);
if (va < m->relocend) {
*pa = (va - h->kernbase) + m->reloc;
return (h->page_size - offset);
}
addr = (u_long)&sta[va >> SG3_SHIFT];
addr = stpa + ((va >> m->sg_ishift) * sizeof(u_int32_t));
/*
* Can't use KREAD to read kernel segment table entries.
* Fortunately it is 1-to-1 mapped so we don't have to.
*/
if (sta == cpu_kh->sysseg_pa) {
if (stpa == m->sysseg_pa) {
if (lseek(kd->pmfd, _kvm_cmn_pa2off(kd, addr), 0) == -1 ||
read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
goto invalid;
} else if (KREAD(kd, addr, &ste))
goto invalid;
if ((ste & SG_V) == 0) {
if ((ste & m->sg_v) == 0) {
_kvm_err(kd, 0, "invalid segment (%x)", ste);
return((off_t)0);
return(0);
}
p = btop(va & SG3_PMASK);
addr = (ste & SG3_FRAME) + (p * sizeof(pt_entry_t));
p = _kvm_btop(vm, va & m->sg_pmask);
addr = (ste & m->sg_frame) + (p * sizeof(u_int32_t));
/*
* Address from STE is a physical address so don't use kvm_read.
@ -249,61 +213,68 @@ vatop_030(kd, sta, va, pa)
if (lseek(kd->pmfd, _kvm_cmn_pa2off(kd, addr), 0) == -1 ||
read(kd->pmfd, (char *)&pte, sizeof(pte)) < 0)
goto invalid;
addr = pte & PG_FRAME;
if (pte == PG_NV) {
addr = pte & m->pg_frame;
if ((pte & m->pg_v) == 0) {
_kvm_err(kd, 0, "page not valid");
return (0);
}
*pa = addr + offset;
return (NBPG - offset);
return (h->page_size - offset);
invalid:
_kvm_err(kd, 0, "invalid address (%x)", va);
return (0);
}
static int
vatop_040(kd, sta, va, pa)
vatop_040(kd, stpa, va, pa)
kvm_t *kd;
st_entry_t *sta;
u_int32_t stpa;
u_long va;
u_long *pa;
{
register cpu_kcore_hdr_t *cpu_kh;
register u_long addr;
st_entry_t *sta2;
int p, ste, pte;
int offset;
cpu_kcore_hdr_t *h = kd->cpu_data;
struct m68k_kcore_hdr *m = &h->un._m68k;
struct vmstate *vm = kd->vmst;
u_long addr;
u_int32_t stpa2;
u_int32_t ste, pte;
u_int p, offset;
offset = va & vm->pgofset;
offset = va & PGOFSET;
cpu_kh = kd->cpu_data;
/*
* If we are initializing (kernel segment table pointer not yet set)
* then return pa == va to avoid infinite recursion.
* We may be called before address translation is initialized.
* This is typically used to find the dump magic number. This
* means we do not yet have the kernel page tables available,
* so we must to a simple relocation.
*/
if (cpu_kh->sysseg_pa == 0) {
*pa = va + cpu_kh->kernel_pa;
return (NBPG - offset);
if (va < m->relocend) {
*pa = (va - h->kernbase) + m->reloc;
return (h->page_size - offset);
}
addr = (u_long)&sta[va >> SG4_SHIFT1];
addr = stpa + ((va >> m->sg40_shift1) * sizeof(u_int32_t));
/*
* Can't use KREAD to read kernel segment table entries.
* Fortunately it is 1-to-1 mapped so we don't have to.
*/
if (sta == cpu_kh->sysseg_pa) {
if (stpa == m->sysseg_pa) {
if (lseek(kd->pmfd, _kvm_cmn_pa2off(kd, addr), 0) == -1 ||
read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
goto invalid;
} else if (KREAD(kd, addr, &ste))
goto invalid;
if ((ste & SG_V) == 0) {
if ((ste & m->sg_v) == 0) {
_kvm_err(kd, 0, "invalid level 1 descriptor (%x)",
ste);
return((off_t)0);
}
sta2 = (st_entry_t *)(ste & SG4_ADDR1);
addr = (u_long)&sta2[(va & SG4_MASK2) >> SG4_SHIFT2];
stpa2 = (ste & m->sg40_addr1);
addr = stpa2 + (((va & m->sg40_mask2) >> m->sg40_shift2) *
sizeof(u_int32_t));
/*
* Address from level 1 STE is a physical address,
* so don't use kvm_read.
@ -311,14 +282,14 @@ vatop_040(kd, sta, va, pa)
if (lseek(kd->pmfd, _kvm_cmn_pa2off(kd, addr), 0) == -1 ||
read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
goto invalid;
if ((ste & SG_V) == 0) {
if ((ste & m->sg_v) == 0) {
_kvm_err(kd, 0, "invalid level 2 descriptor (%x)",
ste);
return((off_t)0);
}
sta2 = (st_entry_t *)(ste & SG4_ADDR2);
addr = (u_long)&sta2[(va & SG4_MASK3) >> SG4_SHIFT3];
stpa2 = (ste & m->sg40_addr2);
addr = stpa2 + (((va & m->sg40_mask3) >> m->sg40_shift3) *
sizeof(u_int32_t));
/*
* Address from STE is a physical address so don't use kvm_read.
@ -326,14 +297,15 @@ vatop_040(kd, sta, va, pa)
if (lseek(kd->pmfd, _kvm_cmn_pa2off(kd, addr), 0) == -1 ||
read(kd->pmfd, (char *)&pte, sizeof(pte)) < 0)
goto invalid;
addr = pte & PG_FRAME;
if (pte == PG_NV) {
addr = pte & m->pg_frame;
if ((pte & m->pg_v) == 0) {
_kvm_err(kd, 0, "page not valid");
return (0);
}
*pa = addr + offset;
return (NBPG - offset);
return (h->page_size - offset);
invalid:
_kvm_err(kd, 0, "invalid address (%x)", va);
return (0);

View File

@ -1,4 +1,4 @@
/* $NetBSD: kvm_sun3.c,v 1.5 1997/03/21 18:44:25 gwr Exp $ */
/* $NetBSD: kvm_sun3.c,v 1.6 1997/04/09 21:15:58 thorpej Exp $ */
/*-
* Copyright (c) 1992, 1993
@ -41,7 +41,7 @@
#if 0
static char sccsid[] = "@(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93";
#else
static char *rcsid = "$NetBSD: kvm_sun3.c,v 1.5 1997/03/21 18:44:25 gwr Exp $";
static char *rcsid = "$NetBSD: kvm_sun3.c,v 1.6 1997/04/09 21:15:58 thorpej Exp $";
#endif
#endif /* LIBC_SCCS and not lint */
@ -61,6 +61,8 @@ static char *rcsid = "$NetBSD: kvm_sun3.c,v 1.5 1997/03/21 18:44:25 gwr Exp $";
#include <kvm.h>
#include <db.h>
#include <m68k/kcore.h>
#include "kvm_private.h"
#include "kvm_m68k.h"
@ -75,38 +77,24 @@ struct kvm_ops _kvm_ops_sun3 = {
_kvm_sun3_kvatop,
_kvm_sun3_pa2off };
#define _kvm_pg_pa(v, s, pte) \
(((pte) & (s)->pg_frame) << (v)->pgshift)
#define _kvm_va_segnum(s, x) \
((u_int)(x) >> (s)->segshift)
#define _kvm_pte_num_mask(v) \
(0xf << (v)->pgshift)
#define _kvm_va_pte_num(v, va) \
(((va) & _kvm_pte_num_mask((v))) >> (v)->pgshift)
/*
* XXX: I don't like this, but until all arch/.../include files
* are exported into some user-accessable place, there is no
* convenient alternative to copying these definitions here.
* XXX Re-define these here, no other place for them.
*/
/* sun3/include/param.h */
#define PGSHIFT 13
#define NBPG 8192
#define PGOFSET (NBPG-1)
#define SEGSHIFT 17
#define KERNBASE 0x0E000000
/* sun3/include/pte.h */
#define NKSEG 256 /* kernel segmap entries */
#define NPAGSEG 16 /* pages per segment */
#define PG_VALID 0x80000000
#define PG_FRAME 0x0007FFFF
#define PG_PA(pte) ((pte & PG_FRAME) <<PGSHIFT)
#define VA_SEGNUM(x) ((u_int)(x) >> SEGSHIFT)
#define VA_PTE_NUM_MASK (0xF << PGSHIFT)
#define VA_PTE_NUM(va) ((va & VA_PTE_NUM_MASK) >> PGSHIFT)
/* sun3/include/kcore.h */
typedef struct cpu_kcore_hdr {
phys_ram_seg_t ram_segs[4];
u_char ksegmap[NKSEG];
} cpu_kcore_hdr_t;
#define NKSEG 256 /* kernel segmap entries */
#define NPAGSEG 16 /* pages per segment */
/* Finally, our local stuff... */
struct vmstate {
struct private_vmstate {
/* Page Map Entry Group (PMEG) */
int pmeg[NKSEG][NPAGSEG];
};
@ -121,11 +109,12 @@ int
_kvm_sun3_initvtop(kd)
kvm_t *kd;
{
register char *p;
cpu_kcore_hdr_t *h = kd->cpu_data;
char *p;
p = kd->cpu_data;
p += (NBPG - sizeof(kcore_seg_t));
kd->vmst = (struct vmstate *)p;
p += (h->page_size - sizeof(kcore_seg_t));
kd->vmst->private = p;
return (0);
}
@ -135,7 +124,7 @@ _kvm_sun3_freevtop(kd)
kvm_t *kd;
{
/* This was set by pointer arithmetic, not allocation. */
kd->vmst = (void*)0;
kd->vmst->private = (void*)0;
}
/*
@ -150,42 +139,44 @@ _kvm_sun3_kvatop(kd, va, pap)
u_long va;
u_long *pap;
{
register cpu_kcore_hdr_t *ckh;
u_int segnum, sme, ptenum;
cpu_kcore_hdr_t *h = kd->cpu_data;
struct sun3_kcore_hdr *s = &h->un._sun3;
struct vmstate *v = kd->vmst;
struct private_vmstate *pv = v->private;
int pte, offset;
u_int segnum, sme, ptenum;
u_long pa;
if (ISALIVE(kd)) {
_kvm_err(kd, 0, "vatop called in live kernel!");
return((off_t)0);
return(0);
}
ckh = kd->cpu_data;
if (va < KERNBASE) {
if (va < h->kernbase) {
_kvm_err(kd, 0, "not a kernel address");
return((off_t)0);
return(0);
}
/*
* Get the segmap entry (sme) from the kernel segmap.
* Note: only have segmap entries from KERNBASE to end.
*/
segnum = VA_SEGNUM(va - KERNBASE);
ptenum = VA_PTE_NUM(va);
offset = va & PGOFSET;
segnum = _kvm_va_segnum(s, va - h->kernbase);
ptenum = _kvm_va_pte_num(v, va);
offset = va & v->pgofset;
/* The segmap entry selects a PMEG. */
sme = ckh->ksegmap[segnum];
pte = kd->vmst->pmeg[sme][ptenum];
sme = s->ksegmap[segnum];
pte = pv->pmeg[sme][ptenum];
if ((pte & PG_VALID) == 0) {
if ((pte & (s)->pg_valid) == 0) {
_kvm_err(kd, 0, "page not valid (VA=0x%x)", va);
return (0);
}
pa = PG_PA(pte) + offset;
pa = _kvm_pg_pa(v, s, pte) + offset;
*pap = pa;
return (NBPG - offset);
return (h->page_size - offset);
}
/*
@ -198,4 +189,3 @@ _kvm_sun3_pa2off(kd, pa)
{
return(kd->dump_off + pa);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kvm_sun3x.c,v 1.1 1997/03/21 18:44:26 gwr Exp $ */
/* $NetBSD: kvm_sun3x.c,v 1.2 1997/04/09 21:16:00 thorpej Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
@ -40,7 +40,7 @@
#if 0
static char sccsid[] = "@(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93";
#else
static char *rcsid = "$NetBSD: kvm_sun3x.c,v 1.1 1997/03/21 18:44:26 gwr Exp $";
static char *rcsid = "$NetBSD: kvm_sun3x.c,v 1.2 1997/04/09 21:16:00 thorpej Exp $";
#endif
#endif /* LIBC_SCCS and not lint */
@ -60,6 +60,8 @@ static char *rcsid = "$NetBSD: kvm_sun3x.c,v 1.1 1997/03/21 18:44:26 gwr Exp $";
#include <kvm.h>
#include <db.h>
#include <m68k/kcore.h>
#include "kvm_private.h"
#include "kvm_m68k.h"
@ -74,40 +76,12 @@ struct kvm_ops _kvm_ops_sun3x = {
_kvm_sun3x_kvatop,
_kvm_sun3x_pa2off };
/*
* XXX: I don't like this, but until all arch/.../include files
* are exported into some user-accessable place, there is no
* convenient alternative to copying these definitions here.
*/
/* sun3x/include/param.h */
#define PGSHIFT 13
#define NBPG 8192
#define PGOFSET (NBPG-1)
#define KERNBASE 0xF8000000
/* sun3x/sun3x/pmap.c */
#define KVAS_SIZE (-KERNBASE)
#define NKPTES (KVAS_SIZE >> PGSHIFT)
#define PG_FRAME 0xffffff00
#define PG_VALID 0x00000001
#define PG_PA(pte) (pte & PG_FRAME)
/* sun3x/include/kcore.h */
#define NPHYS_RAM_SEGS 4
typedef struct {
u_long start; /* Physical start address */
u_long size; /* Size in bytes */
} cpu_ram_seg_t;
typedef struct cpu_kcore_hdr {
u_long ckh_contig_end;
u_long ckh_kernCbase;
cpu_ram_seg_t ram_segs[NPHYS_RAM_SEGS];
} cpu_kcore_hdr_t;
/* Finally, our local stuff... */
#define _kvm_kvas_size(h) \
(-((h)->kernbase))
#define _kvm_nkptes(h, v) \
(_kvm_kvas_size((h)) >> (v)->pgshift)
#define _kvm_pg_pa(pte, h) \
((pte) & (h)->pg_frame)
/*
* Prepare for translation of kernel virtual addresses into offsets
@ -138,17 +112,18 @@ _kvm_sun3x_kvatop(kd, va, pap)
u_long va;
u_long *pap;
{
register cpu_kcore_hdr_t *ckh;
cpu_kcore_hdr_t *h = kd->cpu_data;
struct sun3x_kcore_hdr *s = &h->un._sun3x;
struct vmstate *v = kd->vmst;
int idx, len, offset, pte;
u_long pteva, pa;
if (ISALIVE(kd)) {
_kvm_err(kd, 0, "vatop called in live kernel!");
return((off_t)0);
return(0);
}
ckh = kd->cpu_data;
if (va < KERNBASE) {
if (va < h->kernbase) {
_kvm_err(kd, 0, "not a kernel address");
return(0);
}
@ -159,9 +134,9 @@ _kvm_sun3x_kvatop(kd, va, pap)
* kvm_read to access the kernel page table, which
* is guaranteed to be in the contiguous range.
*/
if (va < ckh->ckh_contig_end) {
len = va - ckh->ckh_contig_end;
pa = va - KERNBASE;
if (va < s->contig_end) {
len = va - s->contig_end;
pa = va - h->kernbase;
goto done;
}
@ -169,19 +144,19 @@ _kvm_sun3x_kvatop(kd, va, pap)
* The KVA is beyond the contiguous range, so we must
* read the PTE for this KVA from the page table.
*/
idx = ((va - KERNBASE) >> PGSHIFT);
pteva = ckh->ckh_kernCbase + (idx * 4);
idx = ((va - h->kernbase) >> v->pgshift);
pteva = s->kernCbase + (idx * 4);
if (kvm_read(kd, pteva, &pte, 4) != 4) {
_kvm_err(kd, 0, "can not read PTE!");
return (0);
}
if ((pte & PG_VALID) == 0) {
if ((pte & s->pg_valid) == 0) {
_kvm_err(kd, 0, "page not valid (VA=0x%x)", va);
return (0);
}
offset = va & PGOFSET;
len = (NBPG - offset);
pa = PG_PA(pte) + offset;
offset = va & v->pgofset;
len = (h->page_size - offset);
pa = _kvm_pg_pa(pte, s) + offset;
done:
*pap = pa;
@ -197,18 +172,18 @@ _kvm_sun3x_pa2off(kd, pa)
u_long pa;
{
off_t off;
cpu_ram_seg_t *rsp;
register cpu_kcore_hdr_t *cpu_kh;
phys_ram_seg_t *rsp;
cpu_kcore_hdr_t *h = kd->cpu_data;
struct sun3x_kcore_hdr *s = &h->un._sun3x;
cpu_kh = kd->cpu_data;
off = 0;
for (rsp = cpu_kh->ram_segs; rsp->size; rsp++) {
for (rsp = s->ram_segs; rsp->size; rsp++) {
if (pa >= rsp->start && pa < rsp->start + rsp->size) {
pa -= rsp->start;
break;
}
off += rsp->size;
}
return(kd->dump_off + off + pa);
return (kd->dump_off + off + pa);
}