Flush D$ on bypass accesses.

This commit is contained in:
eeh 1999-03-22 05:35:39 +00:00
parent 04417da166
commit f0503a65c8
4 changed files with 387 additions and 59 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: ctlreg.h,v 1.5 1999/01/31 09:21:19 mrg Exp $ */
/* $NetBSD: ctlreg.h,v 1.6 1999/03/22 05:35:41 eeh Exp $ */
/*
* Copyright (c) 1996
@ -89,6 +89,9 @@
#define ASI_FLUSH_D_PAGE_SECONDARY 0x39 /* [4u] flush D-cache page using secondary context */
#define ASI_FLUSH_D_CTX_PRIMARY 0x3a /* [4u] flush D-cache context using primary context */
#define ASI_FLUSH_D_CTX_SECONDARY 0x3b /* [4u] flush D-cache context using secondary context */
#define ASI_LSU_CONTROL_REGISTER 0x45 /* [4u] load/store unit control register */
#define ASI_DCACHE_DATA 0x46 /* [4u] diagnostic access to D-cache data RAM */
#define ASI_DCACHE_TAG 0x47 /* [4u] diagnostic access to D-cache tag RAM */
@ -199,7 +202,7 @@
* [4u] MMU and Cache Control Register (MCCR)
* use ASI = 0x45
*/
#define ASI_MCCR 0x45
#define ASI_MCCR ASI_LSU_CONTROL_REGISTER
#define MCCR 0x00
/* MCCR Bits and their meanings */
@ -449,23 +452,89 @@
* on the address space identifiers (the "n"umeric value part) because
* it inlines too late, so we have to use the funny valued-macro syntax.
*/
/* DCACHE_BUG forces a flush of the D$ line on every ASI load */
#define DCACHE_BUG
/* load byte from alternate address space */
#ifdef DCACHE_BUG
#define lduba(loc, asi) ({ \
register int _lduba_v; \
if (asi == ASI_PHYS_CACHED) { \
__asm __volatile("wr %2,%%g0,%%asi; " \
" andn %1,0x1f,%2; stxa %%g0,[%2] %3; membar #Sync; " \
" lduba [%1]%%asi,%0" : "=r" (_lduba_v) : \
"r" ((long long)(loc)), "r" (asi), "n" (ASI_DCACHE_TAG)); \
} else { \
__asm __volatile("wr %2,%%g0,%%asi; lduba [%1]%%asi,%0" : \
"=r" (_lduba_v) : "r" ((long long)(loc)), "r" (asi)); \
} \
_lduba_v; \
})
#else
#define lduba(loc, asi) ({ \
register int _lduba_v; \
__asm __volatile("wr %2,%%g0,%%asi; lduba [%1]%%asi,%0" : "=r" (_lduba_v) : \
"r" ((long long)(loc)), "r" (asi)); \
_lduba_v; \
})
#endif
/* load half-word from alternate address space */
#ifdef DCACHE_BUG
#define lduha(loc, asi) ({ \
register int _lduha_v; \
if (asi == ASI_PHYS_CACHED) { \
__asm __volatile("wr %2,%%g0,%%asi; " \
" andn %1,0x1f,%2; stxa %%g0,[%2] %3; membar #Sync; " \
" lduha [%1]%%asi,%0" : "=r" (_lduha_v) : \
"r" ((long long)(loc)), "r" (asi), "n" (ASI_DCACHE_TAG)); \
} else { \
__asm __volatile("wr %2,%%g0,%%asi; lduha [%1]%%asi,%0" : "=r" (_lduha_v) : \
"r" ((long long)(loc)), "r" (asi)); \
} \
_lduha_v; \
})
#else
#define lduha(loc, asi) ({ \
register int _lduha_v; \
__asm __volatile("wr %2,%%g0,%%asi; lduha [%1]%%asi,%0" : "=r" (_lduha_v) : \
"r" ((long long)(loc)), "r" (asi)); \
_lduha_v; \
})
#endif
/* load int from alternate address space */
/* load unsigned int from alternate address space */
#ifdef DCACHE_BUG
#define lda(loc, asi) ({ \
register int _lda_v; \
if (asi == ASI_PHYS_CACHED) { \
__asm __volatile("wr %2,%%g0,%%asi; " \
" andn %1,0x1f,%2; stxa %%g0,[%2] %3; membar #Sync; " \
" lda [%1]%%asi,%0" : "=r" (_lda_v) : \
"r" ((int)(loc)), "r" (asi), "n" (ASI_DCACHE_TAG)); \
} else { \
__asm __volatile("wr %2,%%g0,%%asi; lda [%1]%%asi,%0" : "=r" (_lda_v) : \
"r" ((int)(loc)), "r" (asi)); \
} \
_lda_v; \
})
/* load signed int from alternate address space */
#define ldswa(loc, asi) ({ \
register int _lda_v; \
if (asi == ASI_PHYS_CACHED) { \
__asm __volatile("wr %2,%%g0,%%asi; " \
" andn %1,0x1f,%2; stxa %%g0,[%2] %3; membar #Sync; " \
" ldswa [%1]%%asi,%0" : "=r" (_lda_v) : \
"r" ((int)(loc)), "r" (asi), "n" (ASI_DCACHE_TAG)); \
} else { \
__asm __volatile("wr %2,%%g0,%%asi; ldswa [%1]%%asi,%0" : "=r" (_lda_v) : \
"r" ((int)(loc)), "r" (asi)); \
} \
_lda_v; \
})
#else
#define lda(loc, asi) ({ \
register int _lda_v; \
__asm __volatile("wr %2,%%g0,%%asi; lda [%1]%%asi,%0" : "=r" (_lda_v) : \
@ -479,24 +548,60 @@
"r" ((int)(loc)), "r" (asi)); \
_lda_v; \
})
#endif
/* store byte to alternate address space */
#define stba(loc, asi, value) ({ \
__asm __volatile("wr %2,%%g0,%%asi; stba %0,[%1]%%asi; membar #Sync" : : \
"r" ((int)(value)), "r" ((int)(loc)), "r" (asi)); \
#ifdef DCACHE_BUG
/* load 64-bit int from alternate address space */
#define ldda(loc, asi) ({ \
register long long _lda_v; \
if (asi == ASI_PHYS_CACHED) { \
__asm __volatile("wr %2,%%g0,%%asi; " \
" andn %1,0x1f,%2; stxa %%g0,[%2] %3; membar #Sync; " \
" ldda [%1]%%asi,%0" : "=r" (_lda_v) : \
"r" ((int)(loc)), "r" (asi), "n" (ASI_DCACHE_TAG)); \
} else { \
__asm __volatile("wr %2,%%g0,%%asi; ldda [%1]%%asi,%0" : "=r" (_lda_v) : \
"r" ((int)(loc)), "r" (asi)); \
} \
_lda_v; \
})
/* store half-word to alternate address space */
#define stha(loc, asi, value) ({ \
__asm __volatile("wr %2,%%g0,%%asi; stha %0,[%1]%%asi; membar #Sync" : : \
"r" ((int)(value)), "r" ((int)(loc)), "r" (asi)); \
#ifdef __arch64__
/* native load 64-bit int from alternate address space w/64-bit compiler*/
#define ldxa(loc, asi) ({ \
register long _lda_v; \
if (asi == ASI_PHYS_CACHED) { \
__asm __volatile("wr %2,%%g0,%%asi; "\
" andn %1,0x1f,%2; stxa %%g0,[%2] %3; membar #Sync; " \
" ldxa [%1]%%asi,%0" : "=r" (_lda_v) : \
"r" ((long)(loc)), "r" (asi), "n" (ASI_DCACHE_TAG)); \
} else { \
__asm __volatile("wr %2,%%g0,%%asi; ldxa [%1]%%asi,%0" : "=r" (_lda_v) : \
"r" ((long)(loc)), "r" (asi)); \
} \
_lda_v; \
})
#else
/* native load 64-bit int from alternate address space w/32-bit compiler*/
#define ldxa(loc, asi) ({ \
volatile register long _ldxa_lo, _ldxa_hi; \
if (asi == ASI_PHYS_CACHED) { \
__asm __volatile("wr %3,%%g0,%%asi; " \
" andn %2,0x1f,%3; stxa %%g0,[%3] %4; membar #Sync; " \
" ldxa [%2]%%asi,%0; srlx %0,32,%1; srl %0,0,%0" : \
"=r" (_ldxa_lo), "=r" (_ldxa_hi) : \
"r" ((long)(loc)), "r" (asi), "n" (ASI_DCACHE_TAG)); \
} else { \
__asm __volatile("wr %3,%%g0,%%asi; ldxa [%2]%%asi,%0; srlx %0,32,%1; srl %0,0,%0;" : \
"=r" (_ldxa_lo), "=r" (_ldxa_hi) : \
"r" ((long)(loc)), "r" (asi)); \
} \
((((int64_t)_ldxa_hi)<<32)|_ldxa_lo); \
})
#endif
/* store int to alternate address space */
#define sta(loc, asi, value) ({ \
__asm __volatile("wr %2,%%g0,%%asi; sta %0,[%1]%%asi; membar #Sync" : : \
"r" ((int)(value)), "r" ((int)(loc)), "r" (asi)); \
})
#else
/* load 64-bit int from alternate address space */
#define ldda(loc, asi) ({ \
@ -506,12 +611,6 @@
_lda_v; \
})
/* store 64-bit int to alternate address space */
#define stda(loc, asi, value) ({ \
__asm __volatile("wr %2,%%g0,%%asi; stda %0,[%1]%%asi; membar #Sync" : : \
"r" ((long long)(value)), "r" ((int)(loc)), "r" (asi)); \
})
#ifdef __arch64__
/* native load 64-bit int from alternate address space w/64-bit compiler*/
#define ldxa(loc, asi) ({ \
@ -530,11 +629,37 @@
((((int64_t)_ldxa_hi)<<32)|_ldxa_lo); \
})
#endif
#endif
/* store byte to alternate address space */
#define stba(loc, asi, value) ({ \
__asm __volatile("wr %2,%%g0,%%asi; stba %0,[%1]%%asi" : : \
"r" ((int)(value)), "r" ((int)(loc)), "r" (asi)); \
})
/* store half-word to alternate address space */
#define stha(loc, asi, value) ({ \
__asm __volatile("wr %2,%%g0,%%asi; stha %0,[%1]%%asi" : : \
"r" ((int)(value)), "r" ((int)(loc)), "r" (asi)); \
})
/* store int to alternate address space */
#define sta(loc, asi, value) ({ \
__asm __volatile("wr %2,%%g0,%%asi; sta %0,[%1]%%asi" : : \
"r" ((int)(value)), "r" ((int)(loc)), "r" (asi)); \
})
/* store 64-bit int to alternate address space */
#define stda(loc, asi, value) ({ \
__asm __volatile("wr %2,%%g0,%%asi; stda %0,[%1]%%asi" : : \
"r" ((long long)(value)), "r" ((int)(loc)), "r" (asi)); \
})
#ifdef __arch64__
/* native store 64-bit int to alternate address space w/64-bit compiler*/
#define stxa(loc, asi, value) ({ \
__asm __volatile("wr %2,%%g0,%%asi; stxa %0,[%1]%%asi; membar #Sync" : : \
__asm __volatile("wr %2,%%g0,%%asi; stxa %0,[%1]%%asi" : : \
"r" ((long)(value)), "r" ((long)(loc)), "r" (asi)); \
})
#else
@ -543,7 +668,7 @@
int64_t _stxa_v; \
int64_t *_stxa_a = &_stxa_v; \
_stxa_v = value; \
__asm __volatile("wr %2,%%g0,%%asi; ldx [%0],%3; stxa %3,[%1]%%asi; membar #Sync" : : \
__asm __volatile("wr %2,%%g0,%%asi; ldx [%0],%3; stxa %3,[%1]%%asi" : : \
"r" ((long)(_stxa_a)), "r" ((long)(loc)), "r" (asi), "r" ((long)(_stxa_v))); \
})
#endif
@ -554,7 +679,45 @@
"r" ((long)(loc))); \
})
/* Flush a D$ line */
#if 0
#define flushline(loc) ({ \
stxa(((paddr_t)loc)&(~0x1f), (ASI_DCACHE_TAG), 0); \
membar_sync(); \
})
#else
#define flushline(loc)
#endif
/* The following two enable or disable the dcache in the LSU control register */
#define dcenable() ({ \
int res; \
__asm __volatile("ldxa [%%g0] %1,%0; or %0,%2,%0; stxa %0,[%%g0] %1; membar #Sync" \
: "r" (res) : "n" (ASI_MCCR), "n" (MCCR_DCACHE_EN)); \
})
#define dcdisable() ({ \
int res; \
__asm __volatile("ldxa [%%g0] %1,%0; andn %0,%2,%0; stxa %0,[%%g0] %1; membar #Sync" \
: "r" (res) : "n" (ASI_MCCR), "n" (MCCR_DCACHE_EN)); \
})
/*
* SPARC V9 memory barrier instructions.
*/
/* Make all stores complete before next store */
#define membar_storestore() __asm __volatile("membar #StoreStore" : :)
/* Make all loads complete before next store */
#define membar_loadstore() __asm __volatile("membar #LoadStore" : :)
/* Make all stores complete before next load */
#define membar_storeload() __asm __volatile("membar #StoreLoad" : :)
/* Make all loads complete before next load */
#define membar_loadload() __asm __volatile("membar #LoadLoad" : :)
/* Complete all outstanding memory operations and exceptions */
#define membar_sync() __asm __volatile("membar #Sync" : :)
/* Complete all outstanding memory operations */
#define membar_memissue() __asm __volatile("membar #MemIssue" : :)
/* Complete all outstanding stores before any new loads */
#define membar_lookaside() __asm __volatile("membar #Lookaside" : :)
#ifdef __arch64__
/* read 64-bit %tick register */

View File

@ -1,4 +1,4 @@
/* $NetBSD: db_interface.c,v 1.15 1999/02/28 00:22:32 eeh Exp $ */
/* $NetBSD: db_interface.c,v 1.16 1999/03/22 05:35:40 eeh Exp $ */
/*
* Mach Operating System
@ -321,10 +321,20 @@ db_dump_dtlb(addr, have_addr, count, modif)
if (have_addr) {
int i;
long* p = (long*)addr;
int64_t* p = (int64_t*)addr;
static int64_t buf[128];
extern void dump_dtlb(int64_t *);
dump_dtlb(buf);
p = buf;
for (i=0; i<64;) {
#ifdef __arch64__
db_printf("%2d:%016.16lx %016.16lx ", i++, *p++, *p++);
db_printf("%2d:%016.16lx %016.16lx\n", i++, *p++, *p++);
#else
db_printf("%2d:%016.16qx %016.16qx ", i++, *p++, *p++);
db_printf("%2d:%016.16qx %016.16qx\n", i++, *p++, *p++);
#endif
}
} else
print_dtlb();

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.31 1999/02/28 00:26:46 eeh Exp $ */
/* $NetBSD: locore.s,v 1.32 1999/03/22 05:35:39 eeh Exp $ */
/*
* Copyright (c) 1996, 1997, 1998 Eduardo Horvath
* Copyright (c) 1996 Paul Kranenburg
@ -50,16 +50,19 @@
* @(#)locore.s 8.4 (Berkeley) 12/10/93
*/
#undef NO_VCACHE
#define TRAPTRACE
#define TRAPSTATS
#undef TRAPS_USE_IG
#undef LOCKED_PCB
#define HWREF
#define MMUDEBUG
#define VECTORED_INTERRUPTS
#undef PMAP_FPSTATE
#undef NO_VCACHE /* Map w/D$ disabled */
#undef TRAPTRACE /* Keep history of all traps (may watchdog) */
#define TRAPSTATS /* Count traps */
#undef TRAPS_USE_IG /* Use Interrupt Globals for trap handling */
#undef LOCKED_PCB /* Lock current proc's PCB in MMU */
#define HWREF /* Handle ref/mod tracking in trap handlers */
#undef MMUDEBUG /* Check use of MMU regs during MMU faults */
#define VECTORED_INTERRUPTS /* Use interrupt vectors */
#define PMAP_FPSTATE /* Allow nesting of VIS pmap copy/zero */
#undef PMAP_PHYS_PAGE /* Don't use block ld/st for pmap copy/zero */
#define DCACHE_BUG /* Clear D$ line before loads from ASI_PHYS */
#undef NO_TSB /* Don't use TSB */
#include "opt_ddb.h"
#include "opt_uvm.h"
#include "opt_compat_svr4.h"
@ -155,6 +158,28 @@
#define NOTREACHED
#endif
/*
* This macro will clear out a cache line before an explicit
* access to that location. It's mostly used to make certain
* loads bypassing the D$ do not get stale D$ data.
*
* It uses a register with the address to clear and a temporary
* which is destroyed.
*/
#ifdef DCACHE_BUG
#define DLFLUSH(a,t) \
andn a, 0x1f, t; \
stxa %g0, [ t ] ASI_DCACHE_TAG; \
membar #Sync
#define DLFLUSH2(t) \
stxa %g0, [ t ] ASI_DCACHE_TAG; \
membar #Sync
#else
#define DLFLUSH(a,t)
#define DLFLUSH2(t)
#endif
/*
* A handy macro for maintaining instrumentation counters.
* Note that this clobbers %o0 and %o1. Normal usage is
@ -719,6 +744,10 @@ ufast_IMMU_miss: ! 063 = fast instr access MMU miss
inc %g6 ! DEBUG
stw %g6, [%g7+%lo(_C_LABEL(missmmu))] ! DEBUG
0: ! DEBUG
#endif
#ifdef NO_TSB
ba,a %icc, instr_miss;
nop
#endif
brgez,pn %g5, instr_miss ! Entry invalid? Punt
xor %g1, %g4, %g4 ! Compare TLB tags
@ -745,6 +774,10 @@ ufast_DMMU_miss: ! 068 = fast data access MMU miss
inc %g6 ! DEBUG
stw %g6, [%g7+%lo(_C_LABEL(missmmu))] ! DEBUG
0: ! DEBUG
#endif
#ifdef NO_TSB
ba,a %icc, data_miss;
nop
#endif
brgez,pn %g5, data_miss ! Entry invalid? Punt
xor %g1, %g4, %g4 ! Compare TLB tags
@ -984,6 +1017,10 @@ kfast_IMMU_miss: ! 063 = fast instr access MMU miss
inc %g6 ! DEBUG
stw %g6, [%g7+%lo(_C_LABEL(missmmu))] ! DEBUG
0: ! DEBUG
#endif
#ifdef NO_TSB
ba,a %icc, instr_miss;
nop
#endif
brgez,pn %g5, instr_miss ! Entry invalid? Punt
xor %g1, %g4, %g4 ! Compare TLB tags
@ -1010,6 +1047,10 @@ kfast_DMMU_miss: ! 068 = fast data access MMU miss
inc %g6 ! DEBUG
stw %g6, [%g7+%lo(_C_LABEL(missmmu))] ! DEBUG
0: ! DEBUG
#endif
#ifdef NO_TSB
ba,a %icc, data_miss;
nop
#endif
brgez,pn %g5, data_miss ! Entry invalid? Punt
xor %g1, %g4, %g4 ! Compare TLB tags
@ -1837,6 +1878,7 @@ asmptechk:
and %g5, STMASK, %g5
sll %g5, 3, %g5
add %g4, %g5, %g4
DLFLUSH(%g4,%g5)
ldxa [%g4] ASI_PHYS_CACHED, %g4 ! Remember -- UNSIGNED
brz,pn %g4, 1f ! NULL entry? check somewhere else
@ -1844,6 +1886,7 @@ asmptechk:
and %g5, PDMASK, %g5
sll %g5, 3, %g5
add %g4, %g5, %g4
DLFLUSH(%g4,%g5)
ldxa [%g4] ASI_PHYS_CACHED, %g4 ! Remember -- UNSIGNED
brz,pn %g4, 1f ! NULL entry? check somewhere else
@ -1851,6 +1894,7 @@ asmptechk:
and %g5, PTMASK, %g5
sll %g5, 3, %g5
add %g4, %g5, %g4
DLFLUSH(%g4,%g5)
ldxa [%g4] ASI_PHYS_CACHED, %g6
brgez,pn %g6, 1f ! Entry invalid? Punt
srlx %g6, 32, %o0
@ -1908,6 +1952,7 @@ dmmu_write_fault:
and %g5, STMASK, %g5
sll %g5, 3, %g5
add %g5, %g4, %g4
DLFLUSH(%g4,%g5)
ldxa [%g4] ASI_PHYS_CACHED, %g4
srlx %g3, PDSHIFT, %g5
@ -1915,6 +1960,7 @@ dmmu_write_fault:
sll %g5, 3, %g5
brz,pn %g4, winfix ! NULL entry? check somewhere else
add %g5, %g4, %g4
DLFLUSH(%g4,%g5)
ldxa [%g4] ASI_PHYS_CACHED, %g4
srlx %g3, PTSHIFT, %g5 ! Convert to ptab offset
@ -1922,6 +1968,7 @@ dmmu_write_fault:
sll %g5, 3, %g5
brz,pn %g4, winfix ! NULL entry? check somewhere else
add %g5, %g4, %g6
DLFLUSH(%g6,%g4)
ldxa [%g6] ASI_PHYS_CACHED, %g4
brgez,pn %g4, winfix ! Entry invalid? Punt
btst TTE_REAL_W|TTE_W, %g4 ! Is it a ref fault?
@ -2037,6 +2084,7 @@ Ludata_miss:
and %g5, STMASK, %g5
sll %g5, 3, %g5
add %g5, %g4, %g4
DLFLUSH(%g4,%g5)
ldxa [%g4] ASI_PHYS_CACHED, %g4
srlx %g3, PDSHIFT, %g5
@ -2044,6 +2092,7 @@ Ludata_miss:
sll %g5, 3, %g5
brz,pn %g4, winfix ! NULL entry? check somewhere else
add %g5, %g4, %g4
DLFLUSH(%g4,%g5)
ldxa [%g4] ASI_PHYS_CACHED, %g4
srlx %g3, PTSHIFT, %g5 ! Convert to ptab offset
@ -2051,6 +2100,7 @@ Ludata_miss:
sll %g5, 3, %g5
brz,pn %g4, winfix ! NULL entry? check somewhere else
add %g5, %g4, %g6
DLFLUSH(%g6,%g4)
ldxa [%g6] ASI_PHYS_CACHED, %g4
brgez,pn %g4, winfix ! Entry invalid? Punt
bset TTE_ACCESS, %g4 ! Update the modified bit
@ -2311,6 +2361,7 @@ winfixspill:
and %g7, STMASK, %g7
sll %g7, 3, %g7
add %g7, %g1, %g1
DLFLUSH(%g1,%g7)
ldxa [%g1] ASI_PHYS_CACHED, %g1 ! Load pointer to directory
srlx %g6, PDSHIFT, %g7 ! Do page directory
@ -2318,6 +2369,7 @@ winfixspill:
sll %g7, 3, %g7
brz,pn %g1, 0f
add %g7, %g1, %g1
DLFLUSH(%g1,%g7)
ldxa [%g1] ASI_PHYS_CACHED, %g1
srlx %g6, PTSHIFT, %g7 ! Convert to ptab offset
@ -2325,6 +2377,7 @@ winfixspill:
brz %g1, 0f
sll %g7, 3, %g7
add %g1, %g7, %g7
DLFLUSH(%g7,%g1)
ldxa [%g7] ASI_PHYS_CACHED, %g7 ! This one is not
brgez %g7, 0f
srlx %g7, PGSHIFT, %g7 ! Isolate PA part
@ -2340,6 +2393,8 @@ winfixspill:
* Now save all user windows to cpcb.
*/
#ifdef NOTDEF_DEBUG
add %g6, PCB_NSAVED, %g7
DLFLUSH(%g6,%g7)
lduba [%g6 + PCB_NSAVED] %asi, %g7 ! make sure that pcb_nsaved
brz,pt %g7, 1f ! is zero, else
nop
@ -2360,6 +2415,8 @@ winfixspill:
1:
mov %g7, %g1
CHKPT(%g5,%g7,0x13)
add %g6, PCB_NSAVED, %g7
DLFLUSH(%g6,%g7)
lduba [%g6 + PCB_NSAVED] %asi, %g7 ! Start incrementing pcb_nsaved
#ifdef DEBUG
@ -2879,6 +2936,7 @@ Lutext_miss:
and %g5, STMASK, %g5
sll %g5, 3, %g5
add %g5, %g4, %g4
DLFLUSH(%g4,%g5)
ldxa [%g4] ASI_PHYS_CACHED, %g4
srlx %g3, PDSHIFT, %g5
@ -2886,6 +2944,7 @@ Lutext_miss:
sll %g5, 3, %g5
brz,pn %g4, textfault ! NULL entry? check somewhere else
add %g5, %g4, %g4
DLFLUSH(%g4,%g5)
ldxa [%g4] ASI_PHYS_CACHED, %g4
srlx %g3, PTSHIFT, %g5 ! Convert to ptab offset
@ -2893,6 +2952,7 @@ Lutext_miss:
sll %g5, 3, %g5
brz,pn %g4, textfault ! NULL entry? check somewhere else
add %g5, %g4, %g6
DLFLUSH(%g6,%g4)
ldxa [%g6] ASI_PHYS_CACHED, %g4
brgez,pn %g4, textfault
bset TTE_ACCESS, %g4 ! Update accessed bit
@ -4572,7 +4632,7 @@ print_dtlb:
membar #Sync
inc %l2
set 2f, %o0
call _C_LABEL(printf)
call _C_LABEL(db_printf)
inc 8, %l1
ldxa [%l1] ASI_DMMU_TLB_TAG, %o2
@ -4582,7 +4642,7 @@ print_dtlb:
membar #Sync
inc %l2
set 3f, %o0
call _C_LABEL(printf)
call _C_LABEL(db_printf)
inc 8, %l1
cmp %l1, %l3
@ -4612,7 +4672,7 @@ print_dtlb:
inc %l2
srax %o4, 32, %o4
set 2f, %o0
call _C_LABEL(printf)
call _C_LABEL(db_printf)
inc 8, %l1
ldxa [%l1] ASI_DMMU_TLB_TAG, %o2
@ -4626,7 +4686,7 @@ print_dtlb:
inc %l2
srax %o4, 32, %o4
set 3f, %o0
call _C_LABEL(printf)
call _C_LABEL(db_printf)
inc 8, %l1
cmp %l1, %l3
@ -4802,6 +4862,16 @@ dostart:
stx %o1, [%o0]
#endif
#if 0
/*
* Disable the DCACHE entirely for debug.
*/
ldxa [%g0] ASI_MCCR, %o1
andn %o1, MCCR_DCACHE_EN, %o1
stxa %o1, [%g0] ASI_MCCR
membar #Sync
#endif
/*
* Ready to run C code; finish bootstrap.
*/
@ -6942,6 +7012,7 @@ Lsw_load:
and %o1, STMASK, %o1
sll %o1, 3, %o1
add %o1, %o4, %o4
DLFLUSH(%o4,%o1)
ldxa [%o4] ASI_PHYS_CACHED, %o4
srlx %g1, PDSHIFT, %o1
@ -6949,6 +7020,7 @@ Lsw_load:
sll %o1, 3, %o1
brz,pn %o4, 1f ! NULL entry? check somewhere else
add %o1, %o4, %o4
DLFLUSH(%o4,%o1)
ldxa [%o4] ASI_PHYS_CACHED, %o4
srlx %g1, PTSHIFT, %o1 ! Convert to ptab offset
@ -6956,6 +7028,7 @@ Lsw_load:
sll %o1, 3, %o1
brz,pn %o4, 1f ! NULL entry? check somewhere else
add %o1, %o4, %o0
DLFLUSH(%o0,%o4)
ldxa [%o0] ASI_PHYS_CACHED, %o4
brgez,pn %o4, 1f ! Entry invalid? Punt
mov TLB_TAG_ACCESS, %o2
@ -7470,6 +7543,8 @@ ENTRY(_remque)
* the contents of the D$. We will execute a flush at the end
* to sync the I$.
*/
paginuse:
.word 0
ENTRY(pmap_zero_page)
!!
!! If we have 64-bit physical addresses (and we do now)
@ -7525,7 +7600,7 @@ ENTRY(pmap_zero_page)
!! routine and nest FP use in the kernel
!!
save %sp, -(CC64FSZ+FS_SIZE+BLOCK_SIZE), %sp ! Allocate an fpstate
add %sp, (CC64FSZ+STKB+BLOCK_SIZE), %l0 ! Calculate pointer to fpstate
add %sp, (CC64FSZ+STKB+BLOCK_SIZE-1), %l0 ! Calculate pointer to fpstate
rd %fprs, %l1 ! Save old fprs so we can restore it later
andn %l0, BLOCK_ALIGN, %l0 ! And make it block aligned
call _C_LABEL(savefpstate)
@ -7548,6 +7623,19 @@ ENTRY(pmap_zero_page)
1:
wr %o1, 0, %fprs ! Enable the FPU
#endif
#ifdef DEBUG
sethi %hi(paginuse), %o4 ! Prevent this from nesting
lduw [%o4 + %lo(paginuse)], %o5
tst %o5
tnz %icc, 1
bnz,pn %icc, pmap_zero_phys
inc %o5
stw %o5, [%o4 + %lo(paginuse)]
#endif
rdpr %pil, %g1
wrpr %g0, 15, %pil ! s = splhigh()
fzero %f0 ! Set up FPU
fzero %f2
@ -7593,6 +7681,27 @@ ENTRY(pmap_zero_page)
stxa %o3, [%o3] ASI_DMMU_DEMAP ! Demap the page again
membar #Sync ! No real reason for this XXXX
#ifdef DEBUG
!!
!! Use phys accesses to verify page is clear
!!
set NBPG, %o4
1:
DLFLUSH(%o0,%o1)
ldxa [%o0] ASI_PHYS_CACHED, %o1
dec 8, %o4
tst %o1
tnz %icc, 1
brnz,pt %o4, 1b
inc 8, %o0
sethi %hi(paginuse), %o4 ! Prevent this from nesting
stw %g0, [%o4 + %lo(paginuse)]
#endif
wrpr %g1, 0, %pil ! splx(s)
#ifdef PMAP_FPSTATE
btst FPRS_DU|FPRS_DL, %l1 ! Anything to restore?
bz,pt %icc, 1f
@ -7600,7 +7709,7 @@ ENTRY(pmap_zero_page)
call _C_LABEL(loadfpstate)
mov %l0, %o0
1:
return
! return ! Does this work?
wr %l1, 0, %fprs
ret
restore
@ -7705,7 +7814,7 @@ ENTRY(pmap_copy_page)
!! routine and nest FP use in the kernel
!!
save %sp, -(CC64FSZ+FS_SIZE+BLOCK_SIZE), %sp ! Allocate an fpstate
add %sp, (CC64FSZ+STKB+BLOCK_SIZE), %l0 ! Calculate pointer to fpstate
add %sp, (CC64FSZ+STKB+BLOCK_SIZE-1), %l0 ! Calculate pointer to fpstate
andn %l0, BLOCK_ALIGN, %l0 ! And make it block aligned
rd %fprs, %l1 ! Save old fprs so we can restore it later
call _C_LABEL(savefpstate)
@ -7725,11 +7834,24 @@ ENTRY(pmap_copy_page)
LDPTR [%o4 + %lo(_C_LABEL(fpproc))], %o4
bz,pt %icc, 1f ! No, use fpregs
bset FPRS_FEF, %o5
brz,pn %o4, pmap_zero_phys ! No userland fpstate so do this the slow way
brz,pn %o4, pmap_copy_phys ! No userland fpstate so do this the slow way
1:
wr %o5, 0, %fprs ! Enable the FPU
#endif
#ifdef DEBUG
sethi %hi(paginuse), %o4 ! Prevent this from nesting
lduw [%o4 + %lo(paginuse)], %o5
tst %o5
tnz %icc, 1
bnz,pn %icc, pmap_copy_phys
inc %o5
stw %o5, [%o4 + %lo(paginuse)]
#endif
rdpr %pil, %g1
wrpr %g0, 15, %pil ! s = splhigh();
stxa %o3, [%o3] ASI_DMMU_DEMAP ! Do the demap
sethi %hi(NBPG), %o4
membar #Sync ! No real reason for this XXXX
@ -7769,7 +7891,7 @@ ENTRY(pmap_copy_page)
membar #LoadStore
fmovd %f14, %f14 ! Sync 1st bank
stda %f0, [%o4] ASI_BLK_P ! Store 1st bank
stda %f0, [%o4] ASI_BLK_COMMIT_P ! Store 1st bank
brlez,pn %o5, 1f ! Finished?
add %o4, 64, %o4
@ -7780,7 +7902,7 @@ ENTRY(pmap_copy_page)
membar #LoadStore
fmovd %f30, %f30 ! Sync 2nd bank
stda %f16, [%o4] ASI_BLK_P ! Store 2nd bank
stda %f16, [%o4] ASI_BLK_COMMIT_P ! Store 2nd bank
brgz,pt %o5, 1b ! Finished?
add %o4, 64, %o4
@ -7789,7 +7911,7 @@ ENTRY(pmap_copy_page)
!!
membar #LoadStore
fmovd %f14, %f14 ! Sync 1st bank
stda %f0, [%o4] ASI_BLK_P ! Store 1st bank
stda %f0, [%o4] ASI_BLK_COMMIT_P ! Store 1st bank
ba,pt %icc, 2f ! Finished?
add %o4, 64, %o4
@ -7799,7 +7921,7 @@ ENTRY(pmap_copy_page)
!!
membar #LoadStore
fmovd %f30, %f30 ! Sync 2nd bank
stda %f16, [%o4] ASI_BLK_P ! Store 2nd bank
stda %f16, [%o4] ASI_BLK_COMMIT_P ! Store 2nd bank
add %o4, 64, %o4
2:
@ -7810,7 +7932,37 @@ ENTRY(pmap_copy_page)
sub %o3, %o4, %o3
stxa %o3, [%o3] ASI_DMMU_DEMAP ! Demap the source page again
membar #Sync ! No real reason for this XXXX
#ifdef DEBUG
!!
!! Use phys accesses to verify copy
!!
sethi %hi(0x80000000), %o4 ! Setup TTE:
sllx %o4, 32, %o4 ! V = 1
or %o4, TTE_CP|TTE_P|TTE_W|TTE_L, %o4 ! CP=1|P=1|W=1|L=1
andn %o1, %o4, %o0 ! Clear out TTE to get PADDR
andn %o1, %o4, %o1 ! Clear out TTE to get PADDR
set NBPG, %o3
1:
DLFLUSH(%o0,%o4)
ldxa [%o0] ASI_PHYS_CACHED, %o4
DLFLUSH(%o1,%o5)
ldxa [%o1] ASI_PHYS_CACHED, %o5
dec 8, %o3
cmp %o4, %o5
tne %icc, 1
inc 8, %o0
brnz,pt %o4, 1b
inc 8, %o1
sethi %hi(paginuse), %o4 ! Prevent this from nesting
stw %g0, [%o4 + %lo(paginuse)]
#endif
wrpr %g1, 0, %pil ! splx(s)
#ifdef PMAP_FPSTATE
btst FPRS_DU|FPRS_DL, %l1 ! Anything to restore?
bz,pt %icc, 1f
@ -7818,7 +7970,7 @@ ENTRY(pmap_copy_page)
call _C_LABEL(loadfpstate)
mov %l0, %o0
1:
return
! return ! Does this work?
wr %l1, 0, %fprs
ret
restore
@ -7914,6 +8066,7 @@ pmap_copy_phys:
sub %o3, 8, %o2
mov %g1, %o4 ! Save g1
1:
DLFLUSH(%o0,%g1)
ldxa [%o0] ASI_PHYS_CACHED, %g1
inc 8, %o0
stxa %g1, [%o1] ASI_PHYS_CACHED
@ -7935,6 +8088,7 @@ pmap_copy_phys:
add %o3, %o0, %o3
mov %g1, %o4 ! Save g1
1:
DLFLUSH(%o0,%g1)
ldxa [%o0] ASI_PHYS_CACHED, %g1
inc 8, %o0
cmp %o0, %o3
@ -7967,6 +8121,7 @@ ENTRY(pseg_get)
and %o3, STMASK, %o3 ! Index into pm_segs
sll %o3, 3, %o3
add %o2, %o3, %o2
DLFLUSH(%o2,%o3)
ldxa [%o2] ASI_PHYS_CACHED, %o2 ! Load page directory pointer
srlx %o1, PDSHIFT, %o3
@ -7974,6 +8129,7 @@ ENTRY(pseg_get)
sll %o3, 3, %o3
brz,pn %o2, 1f ! NULL entry? check somewhere else
add %o2, %o3, %o2
DLFLUSH(%o2,%o3)
ldxa [%o2] ASI_PHYS_CACHED, %o2 ! Load page table pointer
srlx %o1, PTSHIFT, %o3 ! Convert to ptab offset
@ -7981,6 +8137,7 @@ ENTRY(pseg_get)
sll %o3, 3, %o3
brz,pn %o2, 1f ! NULL entry? check somewhere else
add %o2, %o3, %o2
DLFLUSH(%o2,%o0)
ldxa [%o2] ASI_PHYS_CACHED, %o0
brgez,pn %o0, 1f ! Entry invalid? Punt
btst 1, %sp
@ -7994,6 +8151,7 @@ ENTRY(pseg_get)
retl ! No, generate a %o0:%o1 double
srlx %o0, 32, %o0
#else
DLFLUSH(%o2,%o0)
retl ! No, generate a %o0:%o1 double
ldda [%o2] ASI_PHYS_CACHED, %o0
#endif
@ -8067,6 +8225,7 @@ ENTRY(pseg_set)
and %o5, STMASK, %o5
sll %o5, 3, %o5
add %o4, %o5, %o4
DLFLUSH(%o4,%o5)
ldxa [%o4] ASI_PHYS_CACHED, %o5 ! Load page directory pointer
brnz,a,pt %o5, 0f ! Null pointer?
@ -8080,6 +8239,7 @@ ENTRY(pseg_set)
and %o5, PDMASK, %o5
sll %o5, 3, %o5
add %o4, %o5, %o4
DLFLUSH(%o4,%o5)
ldxa [%o4] ASI_PHYS_CACHED, %o5 ! Load table directory pointer
brnz,a,pt %o5, 0f ! Null pointer?

View File

@ -1,6 +1,6 @@
/* $NetBSD: pmap.c,v 1.25 1999/03/10 01:54:16 eeh Exp $ */
/* $NetBSD: pmap.c,v 1.26 1999/03/22 05:35:40 eeh Exp $ */
/* #define NO_VCACHE */ /* Don't forget the locked TLB in dostart */
/* #define HWREF */
#define HWREF
/* #define BOOT_DEBUG */
/* #define BOOT1_DEBUG */
/*
@ -3050,13 +3050,10 @@ pmap_page_protect(pa, prot)
if (data & (TLB_W|TLB_MODIFY))
firstpv->pv_va |= PV_MOD;
if (data & TLB_TSB_LOCK) {
#ifdef DEBUG
#ifdef DIAGNOSTIC
printf("pmap_page_protect: Removing wired page pm %p va %p\n",
npv->pv_pmap, npv->pv_va);
#endif
/* Skip this pv, it's wired */
pv = npv;
continue;
}
/* Clear mapping */
if (pseg_set(npv->pv_pmap, npv->pv_va&PV_VAMASK, 0, 0)) {
@ -3107,12 +3104,10 @@ pmap_page_protect(pa, prot)
if (data & (TLB_W|TLB_MODIFY))
pv->pv_va |= PV_MOD;
if (data & TLB_TSB_LOCK) {
#ifdef DEBUG
#ifdef DIAGNOSTIC
printf("pmap_page_protect: Removing wired page pm %p va %p\n",
pv->pv_pmap, pv->pv_va);
#endif
/* It's wired, leave it */
goto skipit;
}
if (pseg_set(pv->pv_pmap, pv->pv_va&PV_VAMASK, 0, 0)) {
printf("pmap_page_protect: gotten pseg empty!\n");