Fix fault handling code to correctly report access_type and fault_type

and get rid of pmap_enter_phys().
This commit is contained in:
eeh 1999-03-28 19:01:02 +00:00
parent cf2576d332
commit 6492e81ae6
4 changed files with 64 additions and 73 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.8 1998/09/22 02:48:43 eeh Exp $ */
/* $NetBSD: pmap.h,v 1.9 1999/03/28 19:01:02 eeh Exp $ */
/*-
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -130,9 +130,15 @@ struct prom_map {
u_int64_t tte;
};
#define PMAP_NC 1 /* Set the E bit in the page */
#define PMAP_NVC 2 /* Don't enable the virtual cache */
#define PMAP_LITTLE 3 /* Map in little endian mode */
#define PMAP_NC 0x001 /* Set the E bit in the page */
#define PMAP_NVC 0x002 /* Don't enable the virtual cache */
#define PMAP_LITTLE 0x004 /* Map in little endian mode */
/* Large page size hints -- we really should use another param to pmap_enter() */
#define PMAP_8K 0x000
#define PMAP_64K 0x008 /* Use 64K page */
#define PMAP_512K 0x010
#define PMAP_4M 0x018
#define PMAP_SZ_TO_TTE(x) (((x)&0x018)<<58)
/* If these bits are different in va's to the same PA then there is an aliasing in the d$ */
#define VA_ALIAS_MASK (1<<14)
@ -152,6 +158,7 @@ extern struct pmap kernel_pmap_;
int pmap_count_res __P((pmap_t pmap));
/* int pmap_change_wiring __P((pmap_t pm, vaddr_t va, boolean_t wired)); */
#define pmap_resident_count(pm) pmap_count_res((pm))
#define pmap_from_phys_address(x,f) ((x)>>PGSHIFT)
#define pmap_phys_address(x) ((((paddr_t)(x))<<PGSHIFT)|PMAP_NC)
void pmap_bootstrap __P((u_long kernelstart, u_long kernelend, u_int numctx));
@ -171,7 +178,6 @@ void switchexit __P((struct proc *));
/* SPARC64 specific */
int ctx_alloc __P((struct pmap*));
void ctx_free __P((struct pmap*));
void pmap_enter_phys __P((pmap_t, vaddr_t, u_int64_t, u_int64_t, vm_prot_t, boolean_t));
#endif /* _KERNEL */

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.34 1999/03/28 16:01:19 eeh Exp $ */
/* $NetBSD: machdep.c,v 1.35 1999/03/28 19:01:03 eeh Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -892,7 +892,7 @@ cpu_dumpconf()
dumpsize = physmem;
}
#define BYTES_PER_DUMP (32 * 1024) /* must be a multiple of pagesize */
#define BYTES_PER_DUMP (8 * 1024) /* must be a multiple of pagesize */
static vaddr_t dumpspace;
caddr_t
@ -971,8 +971,8 @@ dumpsys()
if (i && (i % (1024*1024)) == 0)
printf("%d ", i / (1024*1024));
(void) pmap_enter_phys(pmap_kernel(), dumpspace, maddr, maddr + n,
VM_PROT_READ, 1);
(void) pmap_enter(pmap_kernel(), dumpspace, maddr,
VM_PROT_READ, 1, VM_PROT_READ);
error = (*dump)(dumpdev, blkno,
(caddr_t)dumpspace, (int)n);
pmap_remove(pmap_kernel(), dumpspace, dumpspace + n);
@ -1730,9 +1730,9 @@ static vaddr_t iobase = IODEV_BASE;
printf("sparc_bus_map: phys %llx virt %p hp %llx\n",
(int)(pa>>32), (int)pa, v, (int)((*hp)>>32), (int)*hp);
#endif
pmap_enter_phys(pmap_kernel(), v, pa | pm_flags, NBPG,
pmap_enter(pmap_kernel(), v, pa | pm_flags,
(flags&BUS_SPACE_MAP_READONLY) ? VM_PROT_READ
: VM_PROT_READ | VM_PROT_WRITE, 1/*, 0*/);
: VM_PROT_READ | VM_PROT_WRITE, 1, 0);
v += PAGE_SIZE;
pa += PAGE_SIZE;
} while ((size -= PAGE_SIZE) > 0);
@ -1760,7 +1760,11 @@ sparc_bus_mmap(t, iospace, paddr, flags, hp)
int flags;
bus_space_handle_t *hp;
{
#if 0
*hp = (bus_space_handle_t)pmap_from_phys_address(paddr,flags);
#else
*hp = (bus_space_handle_t)(paddr>>PGSHIFT);
#endif
#if 0
printf("sparc_bus_mmap: encoding pa %llx as %llx becomes %llx\n",
(bus_addr_t)(paddr), (bus_space_handle_t)*hp,

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.30 1999/03/28 16:01:19 eeh Exp $ */
/* $NetBSD: pmap.c,v 1.31 1999/03/28 19:01:03 eeh Exp $ */
/* #define NO_VCACHE */ /* Don't forget the locked TLB in dostart */
#define HWREF
/* #define BOOT_DEBUG */
@ -1014,10 +1014,11 @@ pmap_bootstrap(kernelstart, kernelend, maxctx)
}
#else
prom_printf("i=%d j=%d\r\n", i, j);
pmap_enter_phys(pmap_kernel(),
(vaddr_t)prom_map[i].vstart + j,
(prom_map[i].tte & TLB_PA_MASK) + j,
TLB_8K, VM_PROT_WRITE, 1);
pmap_enter(pmap_kernel(),
(vaddr_t)prom_map[i].vstart + j,
(prom_map[i].tte & TLB_PA_MASK) + j,
VM_PROT_WRITE, 1,
VM_PROT_WRITE|VM_PROT_READ|VM_PROT_EXECUTE);
#endif
}
#ifdef BOOT1_DEBUG
@ -1390,28 +1391,7 @@ pmap_deactivate(p)
{
}
/*
* Insert physical page at pa into the given pmap at virtual address va.
*/
void
pmap_enter(pm, va, pa, prot, wired, access_type)
struct pmap *pm;
vaddr_t va;
paddr_t pa;
vm_prot_t prot;
int wired;
vm_prot_t access_type;
{
register u_int64_t phys;
phys = pa;
/* Call 64-bit clean version of pmap_enter */
pmap_enter_phys(pm, va, phys, TLB_8K, prot, wired);
}
#if defined(PMAP_NEW)
/* Different interfaces to pmap_enter_phys */
/*
* pmap_kenter_pa: [ INTERFACE ]
*
@ -1445,7 +1425,8 @@ pmap_kenter_pa(va, pa, prot)
tte.data.data = TSB_DATA(0, TLB_8K, pa, pm == pmap_kernel(),
(VM_PROT_WRITE & prot),
(!(pa & PMAP_NC)), pa & (PMAP_NVC), 1);
if (VM_PROT_WRITE & prot) tte.data.data |= TLB_REAL_W; /* HWREF -- XXXX */
/* We don't track modification here. */
if (VM_PROT_WRITE & prot) tte.data.data |= TLB_REAL_W|TLB_W; /* HWREF -- XXXX */
tte.data.data |= TLB_TSB_LOCK; /* wired */
ASSERT((tte.data.data & TLB_NFO) == 0);
pg = NULL;
@ -1474,12 +1455,12 @@ pmap_kenter_pa(va, pa, prot)
i = ptelookup_va(va);
#ifdef DEBUG
if( pmapdebug & PDB_ENTER )
prom_printf("pmap_kenter: va=%08x tag=%x:%08x data=%08x:%08x tsb[%d]=%08x\r\n", va,
prom_printf("pmap_kenter_pa: va=%08x tag=%x:%08x data=%08x:%08x tsb[%d]=%08x\r\n", va,
(int)(tte.tag.tag>>32), (int)tte.tag.tag,
(int)(tte.data.data>>32), (int)tte.data.data,
i, &tsb[i]);
if( pmapdebug & PDB_MMU_STEAL && tsb[i].data.data ) {
prom_printf("pmap_kenter: evicting entry tag=%x:%08x data=%08x:%08x tsb[%d]=%08x\r\n",
prom_printf("pmap_kenter_pa: evicting entry tag=%x:%08x data=%08x:%08x tsb[%d]=%08x\r\n",
(int)(tsb[i].tag.tag>>32), (int)tsb[i].tag.tag,
(int)(tsb[i].data.data>>32), (int)tsb[i].data.data,
i, &tsb[i]);
@ -1622,18 +1603,19 @@ pmap_kremove(va, size)
* Supports 64-bit pa so we can map I/O space.
*/
void
pmap_enter_phys(pm, va, pa, size, prot, wired)
pmap_enter(pm, va, pa, prot, wired, access_type)
struct pmap *pm;
vaddr_t va;
u_int64_t pa;
u_int64_t size;
vm_prot_t prot;
int wired;
vm_prot_t access_type;
{
pte_t tte;
int s, i, aliased = 0;
register pv_entry_t pv=NULL, npv;
paddr_t pg;
int size = 0; /* PMAP_SZ_TO_TTE(pa); */
/*
* Is this part of the permanent 4MB mapping?
@ -1714,22 +1696,22 @@ pmap_enter_phys(pm, va, pa, size, prot, wired)
ASSERT((tte.data.data & TLB_NFO) == 0);
pg = NULL;
#ifdef NOTDEF_DEBUG
printf("pmap_enter_phys: inserting %x:%x at %x\n", (int)(tte.data.data>>32), (int)tte.data.data, (int)va);
printf("pmap_enter: inserting %x:%x at %x\n", (int)(tte.data.data>>32), (int)tte.data.data, (int)va);
#endif
while (pseg_set(pm, va, tte.data.data, pg) != NULL) {
if (pmap_initialized || !uvm_page_physget(&pg)) {
vm_page_t page;
#ifdef NOTDEF_DEBUG
printf("pmap_enter_phys: need to alloc page\n");
printf("pmap_enter: need to alloc page\n");
#endif
while ((page = vm_page_alloc1()) == NULL) {
/*
* Let the pager run a bit--however this may deadlock
*/
#ifdef NOTDEF_DEBUG
printf("pmap_enter_phys: calling uvm_wait()\n");
printf("pmap_enter: calling uvm_wait()\n");
#endif
uvm_wait("pmap_enter_phys");
uvm_wait("pmap_enter");
}
pg = (paddr_t)VM_PAGE_TO_PHYS(page);
}
@ -1738,7 +1720,7 @@ pmap_enter_phys(pm, va, pa, size, prot, wired)
enter_stats.ptpneeded ++;
#endif
#ifdef NOTDEF_DEBUG
printf("pmap_enter_phys: inserting %x:%x at %x with %x\n", (int)(tte.data.data>>32), (int)tte.data.data, (int)va, (int)pg);
printf("pmap_enter: inserting %x:%x at %x with %x\n", (int)(tte.data.data>>32), (int)tte.data.data, (int)va, (int)pg);
#endif
}
@ -1826,19 +1808,19 @@ pmap_enter_phys(pm, va, pa, size, prot, wired)
pv->pv_va|=(pa & PMAP_NVC)?PV_NVC:PV_ALIAS;
#ifdef DEBUG
if (pmapdebug & PDB_ALIAS)
printf("pmap_enter_phys: aliased page %p:%p\n",
printf("pmap_enter: aliased page %p:%p\n",
(int)(pa>>32), (int)pa);
#endif
for (npv = pv; npv; npv = npv->pv_next)
if (npv->pv_pmap == pm) {
#ifdef DEBUG
if (pmapdebug & PDB_ALIAS)
printf("pmap_enter_phys: dealiasing %p in ctx %d\n",
printf("pmap_enter: dealiasing %p in ctx %d\n",
npv->pv_va, npv->pv_pmap->pm_ctx);
#endif
/* Turn off cacheing of this TTE */
if (pseg_set(npv->pv_pmap, va, pseg_get(npv->pv_pmap, va) & ~TLB_CV, 0)) {
printf("pmap_enter_phys: aliased pseg empty!\n");
printf("pmap_enter: aliased pseg empty!\n");
Debugger();
/* panic? */
}
@ -2180,7 +2162,8 @@ pmap_map(va, pa, endpa, prot)
#ifdef DEBUG
page_size_map[i].use++;
#endif
pmap_enter_phys(pmap_kernel(), va, pa, page_size_map[i].code, prot, 1);
pmap_enter(pmap_kernel(), va, pa|page_size_map[i].code,
prot, 1, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
va += pgsize;
pa += pgsize;
} while (pa & page_size_map[i].mask);

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.27 1999/03/28 16:01:19 eeh Exp $ */
/* $NetBSD: trap.c,v 1.28 1999/03/28 19:01:03 eeh Exp $ */
/*
* Copyright (c) 1996
@ -70,6 +70,7 @@
#include <vm/vm.h>
#include <vm/vm_kern.h>
#include <uvm/uvm_fault.h>
#include <machine/cpu.h>
#include <machine/ctlreg.h>
@ -950,6 +951,7 @@ data_access_fault(type, addr, pc, tf)
register vaddr_t va;
register int rv;
vm_prot_t ftype;
vm_prot_t access_type;
vaddr_t onfault;
u_quad_t sticks;
#if DEBUG
@ -1029,7 +1031,8 @@ data_access_fault(type, addr, pc, tf)
#endif
/* Now munch on protections... */
ftype = (type == T_FDMMU_PROT)? VM_PROT_READ|VM_PROT_WRITE:VM_PROT_READ;
access_type = (type == T_FDMMU_PROT) ? VM_PROT_READ|VM_PROT_WRITE : VM_PROT_READ;
ftype = (type == T_FDMMU_PROT) ? VM_FAULT_PROTECT : VM_FAULT_INVALID;
if (tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)) {
extern char Lfsbail[];
/*
@ -1048,7 +1051,7 @@ data_access_fault(type, addr, pc, tf)
goto kfault;
if (!(addr&TLB_TAG_ACCESS_CTX)) {
/* CTXT == NUCLEUS */
if ((rv=uvm_fault(kernel_map, va, ftype, 0)) == KERN_SUCCESS) {
if ((rv=uvm_fault(kernel_map, va, ftype, access_type)) == KERN_SUCCESS) {
#ifdef DEBUG
if (trapdebug&(TDB_ADDFLT|TDB_FOLLOW))
printf("data_access_fault: kernel uvm_fault(%x, %x, %x, 0) sez %x -- success\n",
@ -1056,17 +1059,6 @@ data_access_fault(type, addr, pc, tf)
#endif
return;
}
#if 0
/* XXXX Like, why are we doing this twice? */
if ((rv=uvm_fault(kernel_map, va, ftype, 0)) == KERN_SUCCESS) {
#ifdef DEBUG
if (trapdebug&(TDB_ADDFLT|TDB_FOLLOW))
printf("data_access_fault: kernel uvm_fault(%x, %x, %x, 0) sez %x -- success\n",
kernel_map, (vaddr_t)va, ftype, rv);
#endif
return;
}
#endif
#ifdef DEBUG
if (trapdebug&(TDB_ADDFLT|TDB_FOLLOW))
printf("data_access_fault: kernel uvm_fault(%x, %x, %x, 0) sez %x -- failure\n",
@ -1079,7 +1071,7 @@ data_access_fault(type, addr, pc, tf)
vm = p->p_vmspace;
/* alas! must call the horrible vm code */
rv = uvm_fault(&vm->vm_map, (vaddr_t)va, ftype, FALSE);
rv = uvm_fault(&vm->vm_map, (vaddr_t)va, ftype, access_type);
#ifdef DEBUG
if (trapdebug&(TDB_ADDFLT|TDB_FOLLOW))
@ -1182,6 +1174,7 @@ data_access_error(type, sfva, sfsr, afva, afsr, tf)
register vaddr_t va = 0; /* Stupid GCC warning */
register int rv;
vm_prot_t ftype;
vm_prot_t access_type;
vaddr_t onfault;
u_quad_t sticks;
#ifdef DEBUG
@ -1317,7 +1310,8 @@ DEBUGGER(type, tf);
#endif
/* Now munch on protections... */
ftype = sfsr & SFSR_W ? VM_PROT_READ|VM_PROT_WRITE:VM_PROT_READ;
access_type = (sfsr & SFSR_W) ? VM_PROT_READ|VM_PROT_WRITE : VM_PROT_READ;
ftype = VM_FAULT_PROTECT; /* Mapping must exist... */
if (tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)) {
extern char Lfsbail[];
/*
@ -1336,7 +1330,7 @@ DEBUGGER(type, tf);
goto kfault;
if (SFSR_CTXT_IS_PRIM(sfsr) || SFSR_CTXT_IS_NUCLEUS(sfsr)) {
/* NUCLEUS context */
if (uvm_fault(kernel_map, va, ftype, 0) == KERN_SUCCESS)
if (uvm_fault(kernel_map, va, ftype, access_type) == KERN_SUCCESS)
return;
if (SFSR_CTXT_IS_NUCLEUS(sfsr))
goto kfault;
@ -1350,7 +1344,7 @@ DEBUGGER(type, tf);
if (trapdebug&(TDB_ADDFLT|TDB_FOLLOW))
printf("data_access_error: calling uvm_fault\n");
#endif
rv = uvm_fault(&vm->vm_map, (vaddr_t)va, ftype, FALSE);
rv = uvm_fault(&vm->vm_map, (vaddr_t)va, ftype, access_type);
/*
* If this was a stack access we keep track of the maximum
@ -1432,6 +1426,7 @@ text_access_fault(type, pc, tf)
register vaddr_t va;
register int rv;
vm_prot_t ftype;
vm_prot_t access_type;
u_quad_t sticks;
#if DEBUG
@ -1475,7 +1470,8 @@ text_access_fault(type, pc, tf)
/* Now munch on protections... */
ftype = VM_PROT_READ;
access_type = /* VM_PROT_EXECUTE| */VM_PROT_READ;
ftype = VM_FAULT_INVALID;
if (tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)) {
(void) splhigh();
printf("text_access_fault: pc=%x\n", pc);
@ -1487,7 +1483,7 @@ text_access_fault(type, pc, tf)
vm = p->p_vmspace;
/* alas! must call the horrible vm code */
rv = uvm_fault(&vm->vm_map, (vaddr_t)va, ftype, FALSE);
rv = uvm_fault(&vm->vm_map, (vaddr_t)va, ftype, access_type);
#ifdef DEBUG
if (trapdebug&(TDB_TXTFLT|TDB_FOLLOW))
@ -1563,6 +1559,7 @@ text_access_error(type, pc, sfsr, afva, afsr, tf)
register vaddr_t va;
register int rv;
vm_prot_t ftype;
vm_prot_t access_type;
u_quad_t sticks;
#if DEBUG
static int lastdouble;
@ -1645,7 +1642,8 @@ text_access_error(type, pc, sfsr, afva, afsr, tf)
#endif
/* Now munch on protections... */
ftype = VM_PROT_READ;
access_type = /* VM_PROT_EXECUTE| */ VM_PROT_READ;
ftype = VM_FAULT_PROTECT; /* Protection fault? */
if (tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)) {
(void) splhigh();
printf("text error: pc=%lx sfsr=%%qb\n", pc, (long)sfsr, SFSR_BITS);
@ -1657,7 +1655,7 @@ text_access_error(type, pc, sfsr, afva, afsr, tf)
vm = p->p_vmspace;
/* alas! must call the horrible vm code */
rv = uvm_fault(&vm->vm_map, (vaddr_t)va, ftype, FALSE);
rv = uvm_fault(&vm->vm_map, (vaddr_t)va, ftype, access_type);
/*
* If this was a stack access we keep track of the maximum