remove cache_flush_virt() and PADDRT, they're no longer used.

allocate a stack frame for blast_dcache() when profiling so it shows up.
in dcache_flush_page(), use a stride of 32 instead of 16 to match the
cache line size.  correct various comments.
This commit is contained in:
chs 2004-12-03 02:04:00 +00:00
parent 2db3939de3
commit 5500ae7993
3 changed files with 60 additions and 123 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cache.h,v 1.6 2002/09/29 04:12:03 chs Exp $ */
/* $NetBSD: cache.h,v 1.7 2004/12/03 02:04:00 chs Exp $ */
/*
* Copyright (c) 1996
@ -79,7 +79,6 @@ void blast_dcache __P((void)); /* Clear entire D$ */
void blast_icache __P((void)); /* Clear entire I$ */
/* The following flush a range from the D$ and I$ but not E$. */
void cache_flush_virt __P((vaddr_t, vsize_t));
void cache_flush_phys __P((paddr_t, psize_t, int));
/*

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.36 2004/03/26 23:18:42 petrov Exp $
# $NetBSD: genassym.cf,v 1.37 2004/12/03 02:04:00 chs Exp $
#
# Copyright (c) 1997 The NetBSD Foundation, Inc.
@ -116,7 +116,6 @@ endif
# general constants
define BSD BSD
define USRSTACK USRSTACK
define PADDRT sizeof(paddr_t)
define PAGE_SIZE PAGE_SIZE
# Important offsets into the lwp and proc structs & associated constants

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.200 2004/11/08 08:55:43 petrov Exp $ */
/* $NetBSD: locore.s,v 1.201 2004/12/03 02:04:00 chs Exp $ */
/*
* Copyright (c) 1996-2002 Eduardo Horvath
@ -858,33 +858,33 @@ _C_LABEL(trapbase):
UTRAP(T_ECCERR) ! We'll implement this one later
ufast_IMMU_miss: ! 064 = fast instr access MMU miss
TRACEFLT ! DEBUG
ldxa [%g0] ASI_IMMU_8KPTR, %g2 ! Load IMMU 8K TSB pointer
ldxa [%g0] ASI_IMMU_8KPTR, %g2 ! Load IMMU 8K TSB pointer
#ifdef NO_TSB
ba,a %icc, instr_miss;
ba,a %icc, instr_miss
#endif
ldxa [%g0] ASI_IMMU, %g1 ! Load IMMU tag target register
ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 !Load TSB tag:data into %g4:%g5
brgez,pn %g5, instr_miss ! Entry invalid? Punt
cmp %g1, %g4 ! Compare TLB tags
bne,pn %xcc, instr_miss ! Got right tag?
ldxa [%g0] ASI_IMMU, %g1 ! Load IMMU tag target register
ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB tag:data into %g4:%g5
brgez,pn %g5, instr_miss ! Entry invalid? Punt
cmp %g1, %g4 ! Compare TLB tags
bne,pn %xcc, instr_miss ! Got right tag?
nop
CLRTT
stxa %g5, [%g0] ASI_IMMU_DATA_IN! Enter new mapping
retry ! Try new mapping
stxa %g5, [%g0] ASI_IMMU_DATA_IN ! Enter new mapping
retry ! Try new mapping
1:
sir
TA32
ufast_DMMU_miss: ! 068 = fast data access MMU miss
TRACEFLT ! DEBUG
ldxa [%g0] ASI_DMMU_8KPTR, %g2! Load DMMU 8K TSB pointer
ldxa [%g0] ASI_DMMU_8KPTR, %g2! Load DMMU 8K TSB pointer
#ifdef NO_TSB
ba,a %icc, data_miss;
ba,a %icc, data_miss
#endif
ldxa [%g0] ASI_DMMU, %g1 ! Hard coded for unified 8K TSB Load DMMU tag target register
ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB tag and data into %g4 and %g5
brgez,pn %g5, data_miss ! Entry invalid? Punt
cmp %g1, %g4 ! Compare TLB tags
bnz,pn %xcc, data_miss ! Got right tag?
ldxa [%g0] ASI_DMMU, %g1 ! Load DMMU tag target register
ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB tag and data into %g4 and %g5
brgez,pn %g5, data_miss ! Entry invalid? Punt
cmp %g1, %g4 ! Compare TLB tags
bnz,pn %xcc, data_miss ! Got right tag?
nop
CLRTT
#ifdef TRAPSTATS
@ -893,8 +893,8 @@ ufast_DMMU_miss: ! 068 = fast data access MMU miss
inc %g2
stw %g2, [%g1+%lo(_C_LABEL(udhit))]
#endif
stxa %g5, [%g0] ASI_DMMU_DATA_IN! Enter new mapping
retry ! Try new mapping
stxa %g5, [%g0] ASI_DMMU_DATA_IN ! Enter new mapping
retry ! Try new mapping
1:
sir
TA32
@ -1101,33 +1101,33 @@ kdatafault:
UTRAP(T_ECCERR) ! We'll implement this one later
kfast_IMMU_miss: ! 064 = fast instr access MMU miss
TRACEFLT ! DEBUG
ldxa [%g0] ASI_IMMU_8KPTR, %g2 ! Load IMMU 8K TSB pointer
ldxa [%g0] ASI_IMMU_8KPTR, %g2 ! Load IMMU 8K TSB pointer
#ifdef NO_TSB
ba,a %icc, instr_miss;
ba,a %icc, instr_miss
#endif
ldxa [%g0] ASI_IMMU, %g1 ! Load IMMU tag target register
ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 !Load TSB tag:data into %g4:%g5
brgez,pn %g5, instr_miss ! Entry invalid? Punt
cmp %g1, %g4 ! Compare TLB tags
bne,pn %xcc, instr_miss ! Got right tag?
ldxa [%g0] ASI_IMMU, %g1 ! Load IMMU tag target register
ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB tag:data into %g4:%g5
brgez,pn %g5, instr_miss ! Entry invalid? Punt
cmp %g1, %g4 ! Compare TLB tags
bne,pn %xcc, instr_miss ! Got right tag?
nop
CLRTT
stxa %g5, [%g0] ASI_IMMU_DATA_IN! Enter new mapping
retry ! Try new mapping
stxa %g5, [%g0] ASI_IMMU_DATA_IN ! Enter new mapping
retry ! Try new mapping
1:
sir
TA32
kfast_DMMU_miss: ! 068 = fast data access MMU miss
TRACEFLT ! DEBUG
ldxa [%g0] ASI_DMMU_8KPTR, %g2! Load DMMU 8K TSB pointer
ldxa [%g0] ASI_DMMU_8KPTR, %g2! Load DMMU 8K TSB pointer
#ifdef NO_TSB
ba,a %icc, data_miss;
ba,a %icc, data_miss
#endif
ldxa [%g0] ASI_DMMU, %g1 ! Hard coded for unified 8K TSB Load DMMU tag target register
ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB tag and data into %g4 and %g5
brgez,pn %g5, data_miss ! Entry invalid? Punt
cmp %g1, %g4 ! Compare TLB tags
bnz,pn %xcc, data_miss ! Got right tag?
ldxa [%g0] ASI_DMMU, %g1 ! Load DMMU tag target register
ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB tag and data into %g4 and %g5
brgez,pn %g5, data_miss ! Entry invalid? Punt
cmp %g1, %g4 ! Compare TLB tags
bnz,pn %xcc, data_miss ! Got right tag?
nop
CLRTT
#ifdef TRAPSTATS
@ -1136,8 +1136,8 @@ kfast_DMMU_miss: ! 068 = fast data access MMU miss
inc %g2
stw %g2, [%g1+%lo(_C_LABEL(kdhit))]
#endif
stxa %g5, [%g0] ASI_DMMU_DATA_IN! Enter new mapping
retry ! Try new mapping
stxa %g5, [%g0] ASI_DMMU_DATA_IN ! Enter new mapping
retry ! Try new mapping
1:
sir
TA32
@ -2095,8 +2095,8 @@ dmmu_write_fault:
and %g6, PTMASK, %g6
add %g5, %g4, %g5
brz,pn %g4, winfix ! NULL entry? check somewhere else
nop
ldxa [%g5] ASI_PHYS_CACHED, %g4
sll %g6, 3, %g6
brz,pn %g4, winfix ! NULL entry? check somewhere else
@ -2126,9 +2126,8 @@ dmmu_write_fault:
ldxa [%g0] ASI_DMMU_8KPTR, %g2 ! Load DMMU 8K TSB pointer
andcc %g5, 0x3, %g5 ! 8K?
bnz,pn %icc, winfix ! We punt to the pmap code since we can't handle policy
ldxa [%g0] ASI_DMMU, %g1 ! Hard coded for unified 8K TSB Load DMMU tag target register
ldxa [%g0] ASI_DMMU, %g1 ! Load DMMU tag target register
casxa [%g6] ASI_PHYS_CACHED, %g4, %g7 ! and write it out
membar #StoreLoad
cmp %g4, %g7
bne,pn %xcc, 1b
@ -2137,6 +2136,7 @@ dmmu_write_fault:
mov SFSR, %g7
stx %g4, [%g2+8] ! Update TSB entry data
nop
#ifdef DEBUG
set DATA_START, %g6 ! debug
stx %g1, [%g6+0x40] ! debug
@ -6695,6 +6695,10 @@ ENTRY(blast_dcache)
/*
* We turn off interrupts for the duration to prevent RED exceptions.
*/
#ifdef PROF
save %sp, -CC64FSZ, %sp
#endif
rdpr %pstate, %o3
set (2 * NBPG) - 8, %o1
andn %o3, PSTATE_IE, %o4 ! Turn off PSTATE_IE bit
@ -6705,8 +6709,14 @@ ENTRY(blast_dcache)
dec 8, %o1
sethi %hi(KERNBASE), %o2
flush %o2
#ifdef PROF
wrpr %o3, %pstate
ret
restore
#else
retl
wrpr %o3, %pstate
#endif
/*
* blast_icache()
@ -6733,10 +6743,8 @@ ENTRY(blast_icache)
retl
wrpr %o3, %pstate
/*
* dcache_flush_page(vaddr_t pa)
* dcache_flush_page(paddr_t pa)
*
* Clear one page from D$.
*
@ -6746,30 +6754,27 @@ ENTRY(dcache_flush_page)
#ifndef _LP64
COMBINE(%o0, %o1, %o0)
#endif
!! Try using cache_flush_phys for a change.
mov -1, %o1 ! Generate mask for tag: bits [29..2]
srlx %o0, 13-2, %o2 ! Tag is VA bits <40:13> in bits <29:2>
srlx %o0, 13-2, %o2 ! Tag is PA bits <40:13> in bits <29:2>
clr %o4
srl %o1, 2, %o1 ! Now we have bits <29:0> set
set (2*NBPG), %o5
ba,pt %icc, 1f
andn %o1, 3, %o1 ! Now we have bits <29:2> set
.align 8
1:
ldxa [%o4] ASI_DCACHE_TAG, %o3
mov %o4, %o0
deccc 16, %o5
deccc 32, %o5
bl,pn %icc, 2f
inc 16, %o4
inc 32, %o4
xor %o3, %o2, %o3
andcc %o3, %o1, %g0
bne,pt %xcc, 1b
membar #LoadStore
stxa %g0, [%o0] ASI_DCACHE_TAG
ba,pt %icc, 1b
membar #StoreLoad
@ -6782,7 +6787,7 @@ ENTRY(dcache_flush_page)
membar #Sync
/*
* icache_flush_page(vaddr_t pa)
* icache_flush_page(paddr_t pa)
*
* Clear one page from I$.
*
@ -6825,59 +6830,6 @@ ENTRY(icache_flush_page)
retl
nop
/*
* cache_flush_virt(vaddr_t va, vsize_t len)
*
* Clear everything in that va range from D$ and I$.
*
*/
.align 8
ENTRY(cache_flush_virt)
brz,pn %o1, 2f ! What? nothing to clear?
add %o0, %o1, %o2
mov 0x1ff, %o3
sllx %o3, 5, %o3 ! Generate mask for VA bits
and %o0, %o3, %o0
and %o2, %o3, %o2
sub %o2, %o1, %o4 ! End < start? need to split flushes.
sethi %hi((1<<13)), %o5
brlz,pn %o4, 1f
movrz %o4, %o3, %o4 ! If start == end we need to wrap
!! Clear from start to end
1:
stxa %g0, [%o0] ASI_DCACHE_TAG
dec 16, %o4
xor %o5, %o0, %o3 ! Second way
#ifdef SPITFIRE
stxa %g0, [%o0] ASI_ICACHE_TAG! Don't do this on cheetah
stxa %g0, [%o3] ASI_ICACHE_TAG! Don't do this on cheetah
#endif
brgz,pt %o4, 1b
inc 16, %o0
2:
sethi %hi(KERNBASE), %o5
flush %o5
membar #Sync
retl
nop
!! We got a hole. Clear from start to hole
clr %o4
3:
stxa %g0, [%o4] ASI_DCACHE_TAG
dec 16, %o1
xor %o5, %o4, %g1 ! Second way
stxa %g0, [%o4] ASI_ICACHE_TAG
stxa %g0, [%g1] ASI_ICACHE_TAG
brgz,pt %o1, 3b
inc 16, %o4
!! Now clear to the end.
sub %o3, %o2, %o4 ! Size to clear (NBPG - end)
ba,pt %icc, 1b
mov %o2, %o0 ! Start of clear
/*
* cache_flush_phys __P((paddr_t, psize_t, int));
*
@ -8886,15 +8838,9 @@ paginuse:
.word 0
.text
ENTRY(pmap_zero_page)
!!
!! If we have 64-bit physical addresses (and we do now)
!! we need to move the pointer from %o0:%o1 to %o0
!!
#ifndef _LP64
#if PADDRT == 8
COMBINE(%o0, %o1, %o0)
#endif
#endif
#ifdef DEBUG
set pmapdebug, %o4
ld [%o4], %o4
@ -8949,16 +8895,9 @@ ENTRY(pmap_zero_page)
*/
ENTRY(pmap_copy_page)
#ifndef _LP64
!!
!! If we have 64-bit physical addresses (and we do now)
!! we need to move the pointer from %o0:%o1 to %o0 and
!! %o2:%o3 to %o1
!!
#if PADDRT == 8
COMBINE(%o0, %o1, %o0)
COMBINE(%o2, %o3, %o1)
#endif
#endif
#ifdef DEBUG
set pmapdebug, %o4
ld [%o4], %o4