Eliminate _kvm_uvatop(), in favor of using the machine-independent VM structures.

This commit is contained in:
mycroft 1995-01-09 08:59:25 +00:00
parent 85a1830551
commit c304971439
9 changed files with 117 additions and 308 deletions

View File

@ -171,17 +171,18 @@ _kvm_open(kd, uf, mf, sf, flag, errout)
{
struct stat st;
kd->vmfd = -1;
kd->db = 0;
kd->pmfd = -1;
kd->vmfd = -1;
kd->swfd = -1;
kd->nlfd = -1;
kd->vmst = 0;
kd->db = 0;
kd->procbase = 0;
kd->nbpg = getpagesize();
kd->swapspc = 0;
kd->argspc = 0;
kd->argv = 0;
kd->vmst = 0;
kd->vm_page_buckets = 0;
if (uf == 0)
uf = _PATH_UNIX;

View File

@ -37,7 +37,7 @@
#if defined(LIBC_SCCS) && !defined(lint)
/* from: static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93"; */
static char *rcsid = "$Id: kvm_i386.c,v 1.4 1994/08/15 15:57:36 mycroft Exp $";
static char *rcsid = "$Id: kvm_i386.c,v 1.5 1995/01/09 08:59:27 mycroft Exp $";
#endif /* LIBC_SCCS and not lint */
/*
@ -143,50 +143,3 @@ invalid:
_kvm_err(kd, 0, "invalid address (%x)", va);
return (0);
}
/*
* Translate a user virtual address to a physical address.
*/
int
_kvm_uvatop(kd, p, va, pa)
kvm_t *kd;
const struct proc *p;
u_long va;
u_long *pa;
{
struct vmspace vms;
pd_entry_t pde, *pdeloc;
pt_entry_t pte, *pteloc;
u_long kva, offset;
if (va >= KERNBASE)
goto invalid;
/* XXX - should be passed a `kinfo_proc *' here */
if (kvm_read(kd, (u_long)p->p_vmspace, (char *)&vms, sizeof(vms)) !=
sizeof(vms))
goto invalid;
pdeloc = vms.vm_pmap.pm_pdir + (va >> PDSHIFT);
if (kvm_read(kd, (u_long)pdeloc, (char *)&pde, sizeof(pde)) !=
sizeof(pde))
goto invalid;
if ((pde & PG_V) == 0)
goto invalid;
pteloc = (pt_entry_t *)(pde & PG_FRAME) + btop(va & PT_MASK);
if (lseek(kd->pmfd, (off_t)(u_long)pteloc, 0) != (off_t)(u_long)pteloc)
goto invalid;
if (read(kd->pmfd, (char *)&pte, sizeof(pte)) != sizeof(pte))
goto invalid;
if ((pte & PG_V) == 0)
goto invalid;
offset = va & PGOFSET;
*pa = (pte & PG_FRAME) + offset;
return (NBPG - offset);
invalid:
_kvm_err(kd, 0, "invalid address (%x)", va);
return (0);
}

View File

@ -37,7 +37,7 @@
#if defined(LIBC_SCCS) && !defined(lint)
/* from: static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93"; */
static char *rcsid = "$Id: kvm_m68k.c,v 1.2 1994/09/18 03:32:51 mycroft Exp $";
static char *rcsid = "$Id: kvm_m68k.c,v 1.3 1995/01/09 08:59:28 mycroft Exp $";
#endif /* LIBC_SCCS and not lint */
/*
@ -239,51 +239,3 @@ _kvm_kvatop(kd, va, pa)
{
return (_kvm_vatop(kd, (u_long)kd->vmst->Sysseg, va, pa));
}
/*
* Translate a user virtual address to a physical address.
*/
int
_kvm_uvatop(kd, p, va, pa)
kvm_t *kd;
const struct proc *p;
u_long va;
u_long *pa;
{
register struct vmspace *vms = p->p_vmspace;
int kva;
/*
* If this is a live kernel we just look it up in the kernel
* virtually allocated flat 4mb page table (i.e. let the kernel
* do the table walk). In this way, we avoid needing to know
* the MMU type.
*/
if (ISALIVE(kd)) {
pt_entry_t *ptab;
int pte, offset;
kva = (int)&vms->vm_pmap.pm_ptab;
if (KREAD(kd, kva, &ptab)) {
_kvm_err(kd, 0, "invalid address (%x)", va);
return (0);
}
kva = (int)&ptab[btop(va)];
if (KREAD(kd, kva, &pte) || (pte & PG_V) == 0) {
_kvm_err(kd, 0, "invalid address (%x)", va);
return (0);
}
offset = va & PGOFSET;
*pa = (pte & PG_FRAME) | offset;
return (NBPG - offset);
}
/*
* Otherwise, we just walk the table ourself.
*/
kva = (int)&vms->vm_pmap.pm_stab;
if (KREAD(kd, kva, &kva)) {
_kvm_err(kd, 0, "invalid address (%x)", va);
return (0);
}
return (_kvm_vatop(kd, kva, va, pa));
}

View File

@ -160,48 +160,3 @@ invalid:
_kvm_err(kd, 0, "invalid address (%x)", va);
return (0);
}
/*
* Translate a user virtual address to a physical address.
*/
int
_kvm_uvatop(kd, p, va, pa)
kvm_t *kd;
const struct proc *p;
u_long va;
u_long *pa;
{
register struct vmspace *vms = p->p_vmspace;
u_long kva, offset;
if (va >= KERNBASE)
goto invalid;
/* read the address of the first level table */
kva = (u_long)&vms->vm_pmap.pm_segtab;
if (kvm_read(kd, kva, (char *)&kva, sizeof(kva)) != sizeof(kva))
goto invalid;
if (kva == 0)
goto invalid;
/* read the address of the second level table */
kva += (va >> SEGSHIFT) * sizeof(caddr_t);
if (kvm_read(kd, kva, (char *)&kva, sizeof(kva)) != sizeof(kva))
goto invalid;
if (kva == 0)
goto invalid;
/* read the pte from the second level table */
kva += (va >> PGSHIFT) & (NPTEPG - 1);
if (kvm_read(kd, kva, (char *)&kva, sizeof(kva)) != sizeof(kva))
goto invalid;
if (!(kva & PG_V))
goto invalid;
offset = va & PGOFSET;
*pa = (kva & PG_FRAME) | offset;
return (NBPG - offset);
invalid:
_kvm_err(kd, 0, "invalid address (%x)", va);
return (0);
}

View File

@ -37,7 +37,7 @@
#if defined(LIBC_SCCS) && !defined(lint)
/* from: static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93"; */
static char *rcsid = "$Id: kvm_ns32k.c,v 1.1 1994/05/20 23:31:06 phil Exp $";
static char *rcsid = "$Id: kvm_ns32k.c,v 1.2 1995/01/09 08:59:30 mycroft Exp $";
#endif /* LIBC_SCCS and not lint */
/*
@ -144,50 +144,3 @@ invalid:
_kvm_err(kd, 0, "invalid address (%x)", va);
return (0);
}
/*
* Translate a user virtual address to a physical address.
*/
int
_kvm_uvatop(kd, p, va, pa)
kvm_t *kd;
const struct proc *p;
u_long va;
u_long *pa;
{
struct vmspace vms;
struct pde pde, *pdeloc;
struct pte pte, *pteloc;
u_long kva, offset;
if (va >= KERNBASE)
goto invalid;
/* XXX - should be passed a `kinfo_proc *' here */
if (kvm_read(kd, (u_long)p->p_vmspace, (char *)&vms, sizeof(vms)) !=
sizeof(vms))
goto invalid;
pdeloc = (struct pde *)vms.vm_pmap.pm_pdir + (va >> PDSHIFT);
if (kvm_read(kd, (u_long)pdeloc, (char *)&pde, sizeof(pde)) !=
sizeof(pde))
goto invalid;
if (pde.pd_v == 0)
goto invalid;
pteloc = (struct pte *)ptob(pde.pd_pfnum) + btop(va & PT_MASK);
if (lseek(kd->pmfd, (off_t)(u_long)pteloc, 0) != (off_t)(u_long)pteloc)
goto invalid;
if (read(kd->pmfd, (char *)&pte, sizeof(pte)) != sizeof(pte))
goto invalid;
if (pte.pg_v == 0)
goto invalid;
offset = va & PGOFSET;
*pa = (u_long)ptob(pte.pg_pfnum) + offset;
return (NBPG - offset);
invalid:
_kvm_err(kd, 0, "invalid address (%x)", va);
return (0);
}

View File

@ -66,6 +66,12 @@ struct __kvm {
* only allocate it if necessary.
*/
struct vmstate *vmst;
/*
* These kernel variables are used for looking up user addresses,
* and are cached for efficiency.
*/
struct pglist *vm_page_buckets;
int vm_page_hash_mask;
};
/*
@ -80,4 +86,3 @@ void *_kvm_malloc __P((kvm_t *kd, size_t));
void *_kvm_realloc __P((kvm_t *kd, void *, size_t));
void _kvm_syserr
__P((kvm_t *kd, const char *program, const char *fmt, ...));
int _kvm_uvatop __P((kvm_t *, const struct proc *, u_long, u_long *));

View File

@ -74,11 +74,12 @@ static char sccsid[] = "@(#)kvm_proc.c 8.3 (Berkeley) 9/23/93";
#define KREAD(kd, addr, obj) \
(kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj))
int _kvm_readfromcore __P((kvm_t *, u_long, u_long));
int _kvm_readfrompager __P((kvm_t *, struct vm_object *, u_long));
ssize_t kvm_uread __P((kvm_t *, const struct proc *, u_long, char *, size_t));
static char *
kvm_readswap(kd, p, va, cnt)
char *
_kvm_uread(kd, p, va, cnt)
kvm_t *kd;
const struct proc *p;
u_long va;
@ -88,18 +89,20 @@ kvm_readswap(kd, p, va, cnt)
register u_long offset;
struct vm_map_entry vme;
struct vm_object vmo;
int rv;
if (kd->swapspc == 0) {
kd->swapspc = (char *)_kvm_malloc(kd, kd->nbpg);
if (kd->swapspc == 0)
return (0);
}
head = (u_long)&p->p_vmspace->vm_map.header;
/*
* Look through the address map for the memory object
* that corresponds to the given virtual address.
* The header just has the entire valid range.
*/
head = (u_long)&p->p_vmspace->vm_map.header;
addr = head;
while (1) {
if (KREAD(kd, addr, &vme))
@ -119,13 +122,18 @@ kvm_readswap(kd, p, va, cnt)
*/
offset = va - vme.start + vme.offset;
addr = (u_long)vme.object.vm_object;
while (1) {
/* Try reading the page from core first. */
if ((rv = _kvm_readfromcore(kd, addr, offset)))
break;
if (KREAD(kd, addr, &vmo))
return (0);
/* If there is a pager here, see if it has the page. */
if (vmo.pager != 0 &&
_kvm_readfrompager(kd, &vmo, offset))
(rv = _kvm_readfrompager(kd, &vmo, offset)))
break;
/* Move down the shadow chain. */
@ -135,34 +143,103 @@ kvm_readswap(kd, p, va, cnt)
offset += vmo.shadow_offset;
}
if (rv == -1)
return (0);
/* Found the page. */
offset %= kd->nbpg;
*cnt = kd->nbpg - offset;
return (&kd->swapspc[offset]);
}
#define vm_page_hash(kd, object, offset) \
(((u_long)object + (u_long)(offset / kd->nbpg)) & kd->vm_page_hash_mask)
int
_kvm_coreinit(kd)
kvm_t *kd;
{
struct nlist nlist[3];
nlist[0].n_name = "_vm_page_buckets";
nlist[1].n_name = "_vm_page_hash_mask";
nlist[2].n_name = 0;
if (kvm_nlist(kd, nlist) != 0)
return (-1);
if (KREAD(kd, nlist[0].n_value, &kd->vm_page_buckets) ||
KREAD(kd, nlist[1].n_value, &kd->vm_page_hash_mask))
return (-1);
return (0);
}
int
_kvm_readfromcore(kd, object, offset)
kvm_t *kd;
u_long object, offset;
{
u_long addr;
struct pglist bucket;
struct vm_page mem;
off_t seekpoint;
if (kd->vm_page_buckets == 0 &&
_kvm_coreinit(kd))
return (-1);
addr = (u_long)&kd->vm_page_buckets[vm_page_hash(kd, object, offset)];
if (KREAD(kd, addr, &bucket))
return (-1);
addr = (u_long)bucket.tqh_first;
offset &= ~(kd->nbpg -1);
while (1) {
if (addr == 0)
return (0);
if (KREAD(kd, addr, &mem))
return (-1);
if ((u_long)mem.object == object &&
(u_long)mem.offset == offset)
break;
addr = (u_long)mem.hashq.tqe_next;
}
seekpoint = mem.phys_addr;
if (lseek(kd->pmfd, seekpoint, 0) == -1)
return (-1);
if (read(kd->pmfd, kd->swapspc, kd->nbpg) != kd->nbpg)
return (-1);
return (1);
}
int
_kvm_readfrompager(kd, vmop, offset)
kvm_t *kd;
struct vm_object *vmop;
u_long offset;
{
{
u_long addr;
struct pager_struct pager;
struct swpager swap;
int ix;
struct swblock swb;
register off_t seekpoint;
off_t seekpoint;
/* Read in the pager info and make sure it's a swap device. */
addr = (u_long)vmop->pager;
if (KREAD(kd, addr, &pager) || pager.pg_type != PG_SWAP)
return (0);
return (-1);
/* Read in the swap_pager private data. */
addr = (u_long)pager.pg_data;
if (KREAD(kd, addr, &swap))
return (0);
return (-1);
/*
* Calculate the paging offset, and make sure it's within the
@ -172,7 +249,7 @@ _kvm_readfrompager(kd, vmop, offset)
ix = offset / dbtob(swap.sw_bsize);
#if 0
if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks)
return (0);
return (-1);
#else
if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks) {
int i;
@ -190,14 +267,14 @@ _kvm_readfrompager(kd, vmop, offset)
printf("sw_blocks[%d]: block %x mask %x\n", ix,
swb.swb_block, swb.swb_mask);
}
return (0);
return (-1);
}
#endif
/* Read in the swap records. */
addr = (u_long)&swap.sw_blocks[ix];
if (KREAD(kd, addr, &swb))
return (0);
return (-1);
/* Calculate offset within pager. */
offset %= dbtob(swap.sw_bsize);
@ -206,12 +283,16 @@ _kvm_readfrompager(kd, vmop, offset)
if ((swb.swb_mask & (1 << (offset / kd->nbpg))) == 0)
return (0);
if (!ISALIVE(kd))
return (-1);
/* Calculate the physical address and read the page. */
seekpoint = dbtob(swb.swb_block) + (offset & ~(kd->nbpg -1));
if (lseek(kd->swfd, seekpoint, 0) == -1)
return (0);
return (-1);
if (read(kd->swfd, kd->swapspc, kd->nbpg) != kd->nbpg)
return (0);
return (-1);
return (1);
}
@ -711,40 +792,18 @@ kvm_uread(kd, p, uva, buf, len)
cp = buf;
while (len > 0) {
u_long pa;
register int cc;
cc = _kvm_uvatop(kd, p, uva, &pa);
if (cc > 0) {
if (cc > len)
cc = len;
errno = 0;
if (lseek(kd->pmfd, (off_t)pa, 0) == -1 && errno != 0) {
_kvm_err(kd, 0, "invalid address (%x)", uva);
break;
}
cc = read(kd->pmfd, cp, cc);
if (cc < 0) {
_kvm_syserr(kd, 0, _PATH_MEM);
break;
} else if (cc < len) {
_kvm_err(kd, kd->program, "short read");
break;
}
} else if (ISALIVE(kd)) {
/* try swap */
register char *dp;
int cnt;
register char *dp;
int cnt;
dp = _kvm_uread(kd, p, uva, &cnt);
if (dp == 0) {
_kvm_err(kd, 0, "invalid address (%x)", uva);
return (0);
}
cc = MIN(cnt, len);
bcopy(dp, cp, cc);
dp = kvm_readswap(kd, p, uva, &cnt);
if (dp == 0) {
_kvm_err(kd, 0, "invalid address (%x)", uva);
return (0);
}
cc = MIN(cnt, len);
bcopy(dp, cp, cc);
} else
break;
cp += cc;
uva += cc;
len -= cc;

View File

@ -205,61 +205,6 @@ _kvm_initvtop(kd)
#define VA_OFF(va) (va & (kd->nbpg - 1))
/*
* Translate a user virtual address to a physical address.
*/
int
_kvm_uvatop(kd, p, va, pa)
kvm_t *kd;
const struct proc *p;
u_long va;
u_long *pa;
{
int kva, pte;
register int off, frame;
register struct vmspace *vms = p->p_vmspace;
struct usegmap *usp;
_kvm_mustinit(kd);
if ((u_long)vms < KERNBASE) {
_kvm_err(kd, kd->program, "_kvm_uvatop: corrupt proc");
return (0);
}
if (va >= KERNBASE)
return (0);
/*
* Get the PTE. This takes two steps. We read the
* base address of the table, then we index it.
* Note that the index pte table is indexed by
* virtual segment rather than physical segment.
*/
kva = (u_long)&vms->vm_pmap.pm_segstore;
if (kvm_read(kd, kva, (char *)&usp, 4) != 4)
goto invalid;
kva = (u_long)&usp->us_pte[VA_VSEG(va)];
if (kvm_read(kd, kva, (char *)&kva, 4) != 4 || kva == 0)
goto invalid;
kva += sizeof(usp->us_pte[0]) * VA_VPG(va);
if (kvm_read(kd, kva, (char *)&pte, 4) == 4 && (pte & PG_V)) {
off = VA_OFF(va);
/*
* /dev/mem adheres to the hardware model of physical memory
* (with holes in the address space), while crashdumps
* adhere to the contiguous software model.
*/
if (ISALIVE(kd))
frame = pte & PG_PFNUM;
else
frame = HWTOSW(kd->vmst->pmap_stod, pte & PG_PFNUM);
*pa = (frame << pgshift) | off;
return (kd->nbpg - off);
}
invalid:
_kvm_err(kd, 0, "invalid address (%x)", va);
return (0);
}
/*
* Translate a kernel virtual address to a physical address using the
* mapping information in kd->vm. Returns the result in pa, and returns

View File

@ -102,20 +102,6 @@ _kvm_initvtop(kd)
#define VA_OFF(va) (va & (NBPG - 1))
/*
* Translate a user virtual address to a physical address.
*/
int
_kvm_uvatop(kd, p, va, pa)
kvm_t *kd;
const struct proc *p;
u_long va;
u_long *pa;
{
_kvm_err(kd, 0, "_kvm_uvatop: not supported");
return (0);
}
/*
* Translate a kernel virtual address to a physical address using the
* mapping information in kd->vm. Returns the result in pa, and returns