Re-implement page reference bit emulation by using the (otherwise unused)

valid bit. This is faster than the "unmap all" solution that were described
in that Mach paper _and_ it eliminates the need for checking the wired bit.
As a result of this; swapping started working again on vax :-)
This commit is contained in:
ragge 1998-01-31 12:17:34 +00:00
parent 1caaa1f520
commit a355d0bc1b
4 changed files with 101 additions and 17 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.24 1998/01/18 22:07:50 ragge Exp $ */
/* $NetBSD: pmap.h,v 1.25 1998/01/31 12:17:40 ragge Exp $ */
/*
* Copyright (c) 1987 Carnegie-Mellon University
@ -102,8 +102,6 @@ extern struct pmap kernel_pmap_store;
#define pmap_collect(pmap) /* No need so far */
#define pmap_reference(pmap) if(pmap) (pmap)->ref_count++
#define pmap_phys_address(phys) ((u_int)(phys)<<PAGE_SHIFT)
#define pmap_is_referenced(phys) (FALSE)
#define pmap_clear_reference(pa) pmap_page_protect(pa, VM_PROT_NONE)
#define pmap_change_wiring(pmap, v, w) /* no need */
#define pmap_remove(pmap, start, slut) pmap_protect(pmap, start, slut, 0)

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.3 1997/11/04 22:59:36 ragge Exp $
# $NetBSD: genassym.cf,v 1.4 1998/01/31 12:17:37 ragge Exp $
#
# Copyright (c) 1997 Ludd, University of Lule}, Sweden.
# All rights reserved.
@ -61,6 +61,8 @@ define PR_IPL PR_IPL
define PR_SBIFS PR_SBIFS
define PR_EHSR PR_EHSR
define PR_MCESR PR_MCESR
define PR_P0BR PR_P0BR
define PR_P1BR PR_P1BR
# trap numbering
define T_KSPNOTVAL T_KSPNOTVAL

View File

@ -1,7 +1,7 @@
/* $NetBSD: intvec.s,v 1.25 1997/11/13 10:43:27 veego Exp $ */
/* $NetBSD: intvec.s,v 1.26 1998/01/31 12:17:36 ragge Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
* Copyright (c) 1994, 1997 Ludd, University of Lule}, Sweden.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -214,23 +214,67 @@ L4: addl2 (sp)+,sp # remove info pushed on stack
TRAPCALL(resopflt, T_RESOPFLT)
TRAPCALL(resadflt, T_RESADFLT)
/*
* Translation fault, used only when simulating page reference bit.
* Therefore it is done a fast revalidation of the page if it is
* referenced. Trouble here is the hardware bug on KA650 CPUs that
* put in a need for an extra check when the fault is gotten during
* PTE reference.
*/
.align 2
transl_v: .globl transl_v # Translation violation, 20
pushl $T_TRANSFLT
L3: bbc $1,4(sp),L1
bisl2 $T_PTEFETCH, (sp)
L1: bbc $2,4(sp),L2
bisl2 $T_WRITE, (sp)
L2: movl (sp), 4(sp)
addl2 $4, sp
jbr trap
#ifdef DEBUG
bbc $0,(sp),1f # pte len illegal in trans fault
pushab 2f
calls $1,_panic
2: .asciz "pte trans"
#endif
1: pushr $3 # save r0 & r1
movl 12(sp),r0 # Save faulted address in r0
blss 2f # Jump if in kernelspace
ashl $1,r0,r0
blss 3f # Jump if P1
mfpr $PR_P0BR,r1
brb 4f
3: mfpr $PR_P1BR,r1
4: bbc $1,8(sp),5f # Jump if not indirect
extzv $10,$21,r0,r0 # extract pte number
moval (r1)[r0],r0 # get address of pte
#if defined(VAX650) || defined(DEBUG)
extzv $10,$20,r0,r1
movl _Sysmap,r0
movaq (r0)[r1],r0
tstl (r0) # If pte clear, found HW bug.
bneq 6f
popr $3
brb access_v
#endif
2: extzv $10,$20,r0,r1 # get pte index
movl _Sysmap,r0
movaq (r0)[r1],r0 # pte address
6: bisl2 $0x80000000,(r0)+ # set valid bit
bisl2 $0x80000000,(r0)
popr $3
addl2 $8,sp
rei
5: extzv $11,$20,r0,r0
movaq (r1)[r0],r0
brb 6b
.align 2
access_v:.globl access_v # Access cntrl viol fault, 24
blbs (sp), ptelen
pushl $T_ACCFLT
jbr L3
bbc $1,4(sp),1f
bisl2 $T_PTEFETCH,(sp)
1: bbc $2,4(sp),2f
bisl2 $T_WRITE,(sp)
2: movl (sp), 4(sp)
addl2 $4, sp
jbr trap
ptelen: movl $T_PTELEN, (sp) # PTE must expand (or send segv)
jbr trap;

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.44 1998/01/27 17:35:03 ragge Exp $ */
/* $NetBSD: pmap.c,v 1.45 1998/01/31 12:17:34 ragge Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
* All rights reserved.
@ -615,6 +615,46 @@ if(startpmapdebug) printf("pmap_protect: pmap %x, start %x, end %x, prot %x\n",
mtpr(0,PR_TBIA);
}
/*
* Checks if page is referenced; returns true or false depending on result.
*/
boolean_t
pmap_is_referenced(pa)
vm_offset_t pa;
{
struct pv_entry *pv;
pv = pv_table + (pa >> CLSHIFT);
if (pv->pv_pte)
if ((pv->pv_pte[0].pg_v))
return 1;
while ((pv = pv->pv_next)) {
if ((pv->pv_pte[0].pg_v))
return 1;
}
return 0;
}
/*
* Clears valid bit in all ptes referenced to this physical page.
*/
void
pmap_clear_reference(pa)
vm_offset_t pa;
{
struct pv_entry *pv;
pv = pv_table + (pa >> CLSHIFT);
if (pv->pv_pte)
pv->pv_pte[0].pg_v = pv->pv_pte[1].pg_v = 0;
while ((pv = pv->pv_next))
pv->pv_pte[0].pg_v = pv->pv_pte[1].pg_v = 0;
}
/*
* Checks if page is modified; returns true or false depending on result.
*/