Rewrite the "emulate page reference bits" code to avoid a bunch of

pv_lists traversal and unneccessary page faults.
This commit is contained in:
ragge 1999-05-23 23:03:44 +00:00
parent 539f825799
commit 373e7cc828
3 changed files with 57 additions and 82 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pte.h,v 1.11 1999/01/01 21:43:18 ragge Exp $ */
/* $NetBSD: pte.h,v 1.12 1999/05/23 23:03:44 ragge Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
@ -71,7 +71,7 @@ typedef struct pte pt_entry_t; /* Mach page table entry */
#define PG_W 0x00400000
#define PG_U 0x00200000
#define PG_FRAME 0x001fffff
#define PG_PFNUM(x) ((x) >> VAX_PGSHIFT)
#define PG_PFNUM(x) (((unsigned long)(x) & 0x3ffffe00) >> VAX_PGSHIFT)
#ifndef _LOCORE
extern pt_entry_t *Sysmap;
@ -80,13 +80,12 @@ extern pt_entry_t *Sysmap;
*/
#endif
#define kvtopte(va) \
(&Sysmap[((unsigned)(va) & ~KERNBASE) >> VAX_PGSHIFT])
#define kvtopte(va) (&Sysmap[PG_PFNUM(va)])
#define ptetokv(pt) \
((((pt_entry_t *)(pt) - Sysmap) << VAX_PGSHIFT) + 0x80000000)
#define kvtophys(va) \
(((kvtopte(va))->pg_pfn << VAX_PGSHIFT) | ((int)(va) & VAX_PGOFSET))
#define uvtopte(va, pcb) \
(((unsigned)va < 0x40000000) || ((unsigned)va > 0x80000000) ? \
&((pcb->P0BR)[(unsigned)va >> VAX_PGSHIFT]) : \
&((pcb->P1BR)[((unsigned)va & 0x3fffffff) >> VAX_PGSHIFT]))
(((unsigned)va < 0x40000000) ? \
&((pcb->P0BR)[PG_PFNUM(va)]) : \
&((pcb->P1BR)[PG_PFNUM(va)]))

View File

@ -1,4 +1,4 @@
/* $NetBSD: intvec.s,v 1.37 1999/02/02 18:37:20 ragge Exp $ */
/* $NetBSD: intvec.s,v 1.38 1999/05/23 23:03:44 ragge Exp $ */
/*
* Copyright (c) 1994, 1997 Ludd, University of Lule}, Sweden.
@ -208,50 +208,21 @@ L4: addl2 (sp)+,sp # remove info pushed on stack
* Therefore it is done a fast revalidation of the page if it is
* referenced. Trouble here is the hardware bug on KA650 CPUs that
* put in a need for an extra check when the fault is gotten during
* PTE reference.
* PTE reference. Handled in pmap.c.
*/
.align 2
transl_v: .globl transl_v # Translation violation, 20
#ifdef DEBUG
bbc $0,(sp),1f # pte len illegal in trans fault
pushab 2f
calls $1,_panic
2: .asciz "pte trans"
#endif
1: pushr $3 # save r0 & r1
movl 12(sp),r0 # Save faulted address in r0
blss 2f # Jump if in kernelspace
ashl $1,r0,r0
blss 3f # Jump if P1
mfpr $PR_P0BR,r1
brb 4f
3: mfpr $PR_P1BR,r1
4: bbc $1,8(sp),5f # Jump if not indirect
extzv $10,$21,r0,r0 # extract pte number
moval (r1)[r0],r0 # get address of pte
#if defined(VAX650) || defined(DEBUG)
extzv $10,$20,r0,r1
movl _Sysmap,r0
movaq (r0)[r1],r0
tstl (r0) # If pte clear, found HW bug.
bneq 6f
popr $3
brb access_v
#endif
2: extzv $10,$20,r0,r1 # get pte index
movl _Sysmap,r0
movaq (r0)[r1],r0 # pte address
6: bisl2 $0x80000000,(r0)+ # set valid bit
bisl2 $0x80000000,(r0)
popr $3
pushr $0x3f
pushl 28(sp)
pushl 28(sp)
calls $2,_pmap_simulref
tstl r0
bneq 1f
popr $0x3f
addl2 $8,sp
rei
5: extzv $11,$20,r0,r0
movaq (r1)[r0],r0
brb 6b
1: popr $0x3f
brb access_v
.align 2
access_v:.globl access_v # Access cntrl viol fault, 24

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.63 1999/04/17 17:02:50 ragge Exp $ */
/* $NetBSD: pmap.c,v 1.64 1999/05/23 23:03:44 ragge Exp $ */
/*
* Copyright (c) 1994, 1998 Ludd, University of Lule}, Sweden.
* All rights reserved.
@ -808,35 +808,63 @@ if(startpmapdebug) printf("pmap_protect: pmap %p, start %lx, end %lx, prot %x\n"
mtpr(0,PR_TBIA);
}
#ifdef NEW_REF
int pmap_simulref(int bits, int addr);
/*
* Called from interrupt vector routines if we get a page invalid fault.
* Note: the save mask must be or'ed with 0x3f for this function.
* Returns 0 if normal call, 1 if CVAX bug detected.
*/
int
pmap_simulref(bits, addr)
int bits, addr;
pmap_simulref(int bits, int addr)
{
u_int *pte;
struct pv_entry *pv;
paddr_t pa;
#ifdef PMAPDEBUG
if (startpmapdebug)
printf("pmap_simulref: bits %x addr %x\n", bits, addr);
#endif
#ifdef DEBUG
if (bits & 1)
panic("pte trans len");
#endif
/* Set addess on logical page boundary */
addr &= ~PGOFSET;
/* First decode userspace addr */
if (addr >= 0) {
if ((addr << 1) < 0)
pte = mfpr(PR_P1BR);
pte = (u_int *)mfpr(PR_P1BR);
else
pte = mfpr(PR_P0BR);
pte = (u_int *)mfpr(PR_P0BR);
pte += PG_PFNUM(addr);
if (bits & 2) { /* PTE reference */
pte = (u_int *)TRUNC_PAGE(pte);
pte = (u_int *)kvtopte(pte);
if (pte[0] == 0) /* Check for CVAX bug */
return 1;
pa = (u_int)pte & ~KERNBASE;
} else
pa = Sysmap[PG_PFNUM(pte)].pg_pfn << VAX_PGSHIFT;
} else {
pte = (u_int *)kvtopte(addr);
pa = (u_int)pte & ~KERNBASE;
}
pte[0] |= PG_V;
pte[1] |= PG_V;
pte[2] |= PG_V;
pte[3] |= PG_V;
pte[4] |= PG_V;
pte[5] |= PG_V;
pte[6] |= PG_V;
pte[7] |= PG_V;
pv = pv_table + (pa >> PGSHIFT);
pv->pv_attr |= PG_V; /* Referenced */
if (bits & 4)
pv->pv_attr |= PG_M; /* (will be) modified. XXX page tables */
return 0;
}
#endif
/*
* Checks if page is referenced; returns true or false depending on result.
*/
@ -863,29 +891,6 @@ pmap_is_referenced(pa)
if (pv->pv_attr & PG_V)
return 1;
#ifndef NEW_REF
if (pv->pv_pte)
if ((pv->pv_pte[0].pg_v | pv->pv_pte[2].pg_v |
pv->pv_pte[4].pg_v | pv->pv_pte[6].pg_v)) {
#ifdef PMAPDEBUG
if (startpmapdebug) printf("Yes (1)\n");
#endif
return 1;
}
while ((pv = pv->pv_next)) {
if ((pv->pv_pte[0].pg_v | pv->pv_pte[2].pg_v |
pv->pv_pte[4].pg_v | pv->pv_pte[6].pg_v)) {
#ifdef PMAPDEBUG
if (startpmapdebug) printf("Yes (2)\n");
#endif
return 1;
}
}
#ifdef PMAPDEBUG
if (startpmapdebug) printf("No pmap_is_referenced\n");
#endif
#endif
return 0;
}