Overhaul cache flush code and coredump code.

This commit is contained in:
eeh 2000-08-01 00:40:15 +00:00
parent 568dca001c
commit 57d2ec0b98
4 changed files with 360 additions and 329 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: kcore.h,v 1.3 1998/08/13 02:10:44 eeh Exp $ */
/* $NetBSD: kcore.h,v 1.4 2000/08/01 00:40:26 eeh Exp $ */
/*-
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -40,23 +40,28 @@
* The layout of a kernel core on the dump device is as follows:
* a `struct kcore_seg' of type CORE_CPU
* a `struct cpu_kcore_hdr'
* an array of `cpu_kcore_hdr.nmemseg' phys_ram_seg_t's
* an array of `cpu_kcore_hdr.nsegmap' segmap structures
* an array of `cpu_kcore_hdr.npmegs' PTEs (zero of these on sun4ms).
*/
typedef struct cpu_kcore_hdr {
int cputype; /* CPU type associated with this dump */
u_long kernbase; /* copy of KERNBASE goes here */
int nmemseg; /* # of physical memory segments */
u_long memsegoffset; /* start of memseg array (relative */
int cputype; /* CPU type associated with this dump */
int nmemseg; /* # of physical memory segments */
uint64_t memsegoffset; /* start of memseg array (relative */
/* to the start of this header) */
int nsegmap; /* # of segmaps following */
u_long segmapoffset; /* start of segmap array (relative */
int nsegmap; /* # of kernel segs */
uint64_t segmapoffset; /* start of segmap array (relative */
/* to the start of this header) */
int npmeg; /* # of PMEGs; [sun4/sun4c] only */
u_long pmegoffset; /* start of pmeg array (relative */
/* to the start of this header) */
/* SPARC64 stuff */
paddr_t kphys; /* Physical address of 4MB locked TLB */
uint64_t kernbase; /* copy of KERNBASE goes here */
uint64_t cpubase; /* Pointer to cpu_info structure */
uint64_t ktextbase; /* Virtual start of text segment */
uint64_t ktextp; /* Physical address of 4MB locked TLB */
uint64_t ktextsz; /* Size of locked kernel text segment. */
uint64_t kdatabase; /* Virtual start of data segment */
uint64_t kdatap; /* Physical address of 4MB locked TLB */
uint64_t kdatasz; /* Size of locked kernel data segment. */
} cpu_kcore_hdr_t;

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.90 2000/07/25 15:15:32 pk Exp $ */
/* $NetBSD: locore.s,v 1.91 2000/08/01 00:40:15 eeh Exp $ */
/*
* Copyright (c) 1996-1999 Eduardo Horvath
* Copyright (c) 1996 Paul Kranenburg
@ -223,6 +223,23 @@
#endif
/*
* Combine 2 regs -- used to convert 64-bit ILP32
* values to LP64.
*/
#define COMBINE(r1, r2, d) \
sllx r1, 32, d; \
or d, r2, d
/*
* Split 64-bit value in 1 reg into high and low halves.
* Used for ILP32 return values.
*/
#define SPLIT(r0, r1) \
srl r0, 0, r1; \
srlx r0, 32, r0
/*
* A handy macro for maintaining instrumentation counters.
* Note that this clobbers %o0 and %o1. Normal usage is
@ -2942,7 +2959,8 @@ winfixsave:
set CPCB, %g6 ! Load up nsaved
LDPTR [%g6], %g6
ldub [%g6 + PCB_NSAVED], %g6
clr %g6
! ldub [%g6 + PCB_NSAVED], %g6! this could fault
sllx %g6, 9, %g6
or %g6, %g4, %g4
@ -4191,6 +4209,11 @@ _C_LABEL(sparc_interrupt):
wr %g0, 1, CLEAR_SOFTINT
DLFLUSH(%g3, %g2)
#ifndef TICK_IS_TIME
rd TICK_CMPR, %g2
rd %tick, %g5
srax %g2, 1, %g2
cmp %g2, %g5
tg %xcc, 1
wrpr %g0, 0, %tick ! Reset %tick so we'll get another interrupt
#endif
ba,pt %icc, setup_sparcintr
@ -6110,11 +6133,42 @@ _C_LABEL(blast_vcache):
flush %o2
retl
wrpr %o3, %pstate
/*
* dcache_flush_page()
* blast_icache()
*
* Clear one page from D$. We should do one for the I$,
* but it does not alias and is not likely as large a problem.
* Clear out all of I$ regardless of contents
* Does not modify %o0
*
*/
.align 8
.globl _C_LABEL(blast_icache)
.proc 1
FTYPE(blast_icache)
_C_LABEL(blast_icache):
/*
* We turn off interrupts for the duration to prevent RED exceptions.
*/
rdpr %pstate, %o3
set (2*NBPG)-8, %o1
andn %o3, PSTATE_IE, %o4 ! Turn off PSTATE_IE bit
wrpr %o4, 0, %pstate
1:
stxa %g0, [%o1] ASI_ICACHE_TAG
brnz,pt %o1, 1b
dec 8, %o1
sethi %hi(KERNBASE), %o2
flush %o2
retl
wrpr %o3, %pstate
/*
* dcache_flush_page(vaddr_t pa)
*
* Clear one page from D$ and I$.
*
*/
.align 8
@ -6122,28 +6176,185 @@ _C_LABEL(blast_vcache):
.proc 1
FTYPE(dcache_flush_page)
_C_LABEL(dcache_flush_page):
mov -1, %g1 ! Generate mask for tag: bits [29..2]
srlx %o0, 13-2, %g2 ! Tag is VA bits <40:13> in bits <29:2>
srl %g1, 2, %g1 ! Now we have bits <29:0> set
andn %g1, 3, %g1 ! Now we have bits <29:2> set
#ifdef _LP64
COMBINE(%o0, %o1, %o0)
#endif
set (2*NBPG), %o3
clr %o1
!! Try using cache_flush_phys for a change.
mov -1, %o1 ! Generate mask for tag: bits [29..2]
srlx %o0, 13-2, %o2 ! Tag is VA bits <40:13> in bits <29:2>
srl %o1, 2, %o1 ! Now we have bits <29:0> set
andn %o1, 3, %o1 ! Now we have bits <29:2> set
set (2*NBPG), %o5
clr %o4
1:
ldxa [%o1] ASI_DCACHE_TAG, %g3
xor %g3, %g2, %g3
andcc %g3, %g1, %g0
ldxa [%o4] ASI_DCACHE_TAG, %o3
xor %o3, %o2, %o3
andcc %o3, %o1, %g0
bne,pt %xcc, 2f
dec 16, %o3
stxa %g0, [%o1] ASI_DCACHE_TAG
dec 16, %o5
membar #LoadStore
stxa %g0, [%o4] ASI_DCACHE_TAG
membar #StoreLoad
2:
brnz,pt %o3, 1b
inc 16, %o1
brnz,pt %o5, 1b
inc 16, %o4
!! Now do the I$
mov -1, %o1 ! Generate mask for tag: bits [35..8]
srlx %o0, 13-8, %o2
srl %o1, 32-35+7, %o1
sll %o1, 7, %o1 ! Mask
set (2*NBPG), %o5
clr %o4
1:
ldda [%o4] ASI_ICACHE_TAG, %g0 ! Tag goes in %g1
xor %g1, %o2, %g1
andcc %g1, %o1, %g0
bne,pt %xcc, 2f
dec 16, %o5
membar #LoadStore
stxa %g0, [%o4] ASI_ICACHE_TAG
membar #StoreLoad
2:
brnz,pt %o5, 1b
inc 16, %o4
sethi %hi(KERNBASE), %o5
flush %o5
membar #Sync
retl
nop
/*
* cache_flush_virt(va, len)
*
* Clear everything in that va range from D$ and I$.
*
*/
.align 8
.globl _C_LABEL(cache_flush_virt)
.proc 1
FTYPE(cache_flush_virt)
_C_LABEL(cache_flush_virt):
brz,pn %o1, 2f ! What? nothing to clear?
add %o0, %o1, %o2
mov 0x1ff, %o3
sllx %o3, 5, %o3 ! Generate mask for VA bits
and %o0, %o3, %o0
and %o2, %o3, %o2
sub %o2, %o1, %o4 ! End < start? need to split flushes.
sethi %hi((1<<13)), %o5
brlz,pn %o4, 1f
movrz %o4, %o3, %o4 ! If start == end we need to wrap
!! Clear from start to end
1:
stxa %g0, [%o0] ASI_DCACHE_TAG
dec 16, %o4
xor %o5, %o0, %o3 ! Second way
stxa %g0, [%o0] ASI_ICACHE_TAG
stxa %g0, [%o3] ASI_ICACHE_TAG
brgz,pt %o4, 1b
inc 16, %o0
2:
sethi %hi(KERNBASE), %o5
flush %o5
membar #Sync
retl
nop
!! We got a hole. Clear from start to hole
clr %o4
3:
stxa %g0, [%o4] ASI_DCACHE_TAG
dec 16, %o1
xor %o5, %o4, %g1 ! Second way
stxa %g0, [%o4] ASI_ICACHE_TAG
stxa %g0, [%g1] ASI_ICACHE_TAG
brgz,pt %o1, 3b
inc 16, %o4
!! Now clear to the end.
sub %o3, %o2, %o4 ! Size to clear (NBPG - end)
ba,pt %icc, 1b
mov %o2, %o0 ! Start of clear
/*
* cache_flush_phys __P((paddr_t, psize_t, int));
*
* Clear a set of paddrs from the D$, I$ and if param3 is
* non-zero, E$. (E$ is not supported yet).
*/
.align 8
.globl _C_LABEL(cache_flush_phys)
.proc 1
FTYPE(cache_flush_phys)
_C_LABEL(cache_flush_phys):
#ifndef _LP64
COMBINE(%o0, %o1, %o0)
COMBINE(%o2, %o3, %o1)
mov %o4, %o2
#endif
#ifdef DEBUG
tst %o2 ! Want to clear E$?
tnz 1 ! Error!
#endif
add %o0, %o1, %o1 ! End PA
!!
!! Both D$ and I$ tags match pa bits 40-13, but
!! they are shifted different amounts. So we'll
!! generate a mask for bits 40-13.
!!
mov -1, %o2 ! Generate mask for tag: bits [40..13]
srl %o2, 5, %o2 ! 32-5 = [27..0]
sllx %o2, 13, %o2 ! 27+13 = [40..13]
and %o2, %o0, %o0 ! Mask away uninteresting bits
and %o2, %o1, %o1 ! (probably not necessary)
set (2*NBPG), %o5
clr %o4
1:
ldxa [%o4] ASI_DCACHE_TAG, %o3
ldda [%o4] ASI_ICACHE_TAG, %g0 ! Tag goes in %g1
sllx %o3, 40-29, %o3 ! Shift D$ tag into place
and %o3, %o2, %o3 ! Mask out trash
cmp %o0, %o3
blt,pt %xcc, 2f ! Too low
sllx %g1, 40-35, %g1 ! Shift I$ tag into place
cmp %o1, %o3
bgt,pt %xcc, 2f ! Too high
nop
membar #LoadStore
stxa %g0, [%o4] ASI_DCACHE_TAG ! Just right
2:
cmp %o0, %g1
blt,pt %xcc, 3f
cmp %o1, %g1
bgt,pt %icc, 3f
nop
stxa %g0, [%o4] ASI_ICACHE_TAG
3:
membar #StoreLoad
dec 16, %o5
brgz,pt %o5, 1b
inc 16, %o4
sethi %hi(KERNBASE), %o5
flush %o5
membar #Sync
retl
nop
#ifdef _LP64
/*
* XXXXX Still needs lotsa cleanup after sendsig is complete and offsets are known
@ -7455,15 +7666,15 @@ idle:
brnz,a,pt %o3, Lsw_scan
wrpr %g0, PIL_CLOCK, %pil ! (void) splclock();
#if 1 /* Don't enable the zeroing code just yet. */
ba,a,pt %icc, 1b
nop
#else
! Check uvm.page_idle_zero
sethi %hi(_C_LABEL(uvm) + UVM_PAGE_IDLE_ZERO), %o3
ld [%o3 + %lo(_C_LABEL(uvm) + UVM_PAGE_IDLE_ZERO)], %o3
brz,pn %o3, 1b
nop
#if 1 /* Don't enable the zeroing code just yet. */
ba,a,pt %icc, 1b
nop
#endif
/*
* We must preserve several global registers across the call
* to uvm_pageidlezero(). Use the %ix registers for this, but
@ -7472,11 +7683,7 @@ idle:
* frame first.
*/
save %sp, -CCFSZ, %sp
mov %g1, %i0
mov %g2, %i1
mov %g4, %i2
mov %g6, %i3
mov %g7, %i4
GLOBTOLOC
! zero some pages
call _C_LABEL(uvm_pageidlezero)
@ -7484,13 +7691,10 @@ idle:
! restore global registers again which are now
! clobbered by uvm_pageidlezero()
mov %i0, %g1
mov %i1, %g2
mov %i2, %g4
mov %i3, %g6
mov %i4, %g7
LOCTOGLOB
ba,pt %icc, 1b
restore
#endif
Lsw_panic_rq:
sethi %hi(1f), %o0
@ -7797,7 +8001,7 @@ Lsw_load:
2:
#endif
ldx [%g1 + PCB_SP], %i6
call _C_LABEL(blast_vcache) ! Clear out I$ and D$
! call _C_LABEL(blast_vcache) ! Clear out I$ and D$
ldx [%g1 + PCB_PC], %i7
wrpr %g0, 0, %otherwin ! These two insns should be redundant
wrpr %g0, 0, %canrestore
@ -8257,8 +8461,7 @@ ENTRY(subyte)
ENTRY(probeget)
#ifndef _LP64
!! Shuffle the args around into LP64 format
sllx %o0, 32, %o0
or %o0, %o1, %o0
COMBINE(%o0, %o1, %o0)
mov %o2, %o1
mov %o3, %o2
#endif
@ -8299,8 +8502,7 @@ ENTRY(probeget)
0:
ldxa [%o0] %asi, %o0 ! value = *(long *)addr;
#ifndef _LP64
srl %o0, 0, %o1 ! Split the result again
srlx %o0, 32, %o0
SPLIT(%o0, %o1)
#endif
1: membar #Sync
#ifndef _LP64
@ -8336,12 +8538,10 @@ _C_LABEL(Lfsprobe):
ENTRY(probeset)
#ifndef _LP64
!! Shuffle the args around into LP64 format
sllx %o0, 32, %o0
or %o0, %o1, %o0
COMBINE(%o0, %o1, %o0)
mov %o2, %o1
mov %o3, %o2
sllx %o4, 32, %o3
or %o3, %o5, %o3
COMBINE(%o4, %o5, %o3)
#endif
mov %o2, %o4
! %o0 = addr, %o1 = asi, %o4 = (1,2,4), %o3 = val
@ -8425,8 +8625,7 @@ ENTRY(pmap_zero_page)
!!
#ifndef _LP64
#if PADDRT == 8
sllx %o0, 32, %o0
or %o0, %o1, %o0
COMBINE(%o0, %o1, %o0)
#endif
#endif
#ifdef DEBUG
@ -8717,10 +8916,8 @@ ENTRY(pmap_copy_page)
!!
#ifndef _LP64
#if PADDRT == 8
sllx %o0, 32, %o0
or %o0, %o1, %o0
sllx %o2, 32, %o1
or %o3, %o1, %o1
COMBINE(%o0, %o1, %o0)
COMBINE(%o2, %o3, %o1)
#endif
#endif
#ifdef DEBUG
@ -8775,7 +8972,7 @@ ENTRY(pmap_copy_page)
mov %i2, %o2
mov %i3, %o3
wr %g0, FPRS_FEF, %fprs
#else
#else /* NEW_FPSTATE */
/*
* New version, new scheme:
*
@ -8839,8 +9036,8 @@ ENTRY(pmap_copy_page)
STPTR %l0, [%l5 + P_FPSTATE] ! Insert new fpstate
STPTR %l5, [%l1 + %lo(FPPROC)] ! Set new fpproc
wr %g0, FPRS_FEF, %fprs ! Enable FPU
#endif
#else
#endif /* NEW_FPSTATE */
#else /* PMAP_FPSTATE */
!!
!! Don't use FP regs if the kernel's already using them
!!
@ -8853,7 +9050,7 @@ ENTRY(pmap_copy_page)
brz,pn %o4, pmap_copy_phys ! No userland fpstate so do this the slow way
1:
wr %o5, 0, %fprs ! Enable the FPU
#endif
#endif /* PMAP_FPSTATE */
#ifdef DEBUG
sethi %hi(paginuse), %o4 ! Prevent this from nesting
@ -8863,7 +9060,7 @@ ENTRY(pmap_copy_page)
bnz,pn %icc, pmap_copy_phys
inc %o5
stw %o5, [%o4 + %lo(paginuse)]
#endif
#endif /* DEBUG */
rdpr %pil, %g1
wrpr %g0, 15, %pil ! s = splhigh();
@ -8977,7 +9174,7 @@ ENTRY(pmap_copy_page)
sethi %hi(paginuse), %o4 ! Prevent this from nesting
stw %g0, [%o4 + %lo(paginuse)]
#endif
#endif /* PARANOID */
wrpr %g1, 0, %pil ! splx(s)
@ -8993,7 +9190,7 @@ ENTRY(pmap_copy_page)
wr %l1, 0, %fprs
ret
restore
#else
#else /* NEW_FPSTATE */
#ifdef DEBUG
LDPTR [%l1 + %lo(FPPROC)], %l7
cmp %l7, %l5
@ -9001,99 +9198,23 @@ ENTRY(pmap_copy_page)
LDPTR [%l5 + P_FPSTATE], %l7
cmp %l7, %l0
tnz 1 ! fpstate has changed!
#endif
#endif /* DEBUG */
STPTR %g0, [%l1 + %lo(FPPROC)] ! Clear fpproc
STPTR %l6, [%l5 + P_FPSTATE] ! Save old fpstate
wr %g0, 0, %fprs ! Disable FPU
ret
restore
#endif
#else
#endif /* NEW_FPSTATE */
#else /* PMAP_FPSTATE */
ba _C_LABEL(blast_vcache)
wr %g0, 0, %fprs ! Turn off FPU and mark as clean
retl ! Any other mappings have inconsistent D$
wr %g0, 0, %fprs ! Turn off FPU and mark as clean
#endif
#endif /* PMAP_FPSTATE */
pmap_copy_phys:
#endif
#if 0
#if 0
save %sp, -CC64FSZ, %sp ! Get 8 locals for scratch
set NBPG, %o1
sub %o1, 8, %o0
1:
ldxa [%i0] ASI_PHYS_CACHED, %l0
inc 8, %i0
ldxa [%i0] ASI_PHYS_CACHED, %l1
inc 8, %i0
ldxa [%i0] ASI_PHYS_CACHED, %l2
inc 8, %i0
ldxa [%i0] ASI_PHYS_CACHED, %l3
inc 8, %i0
ldxa [%i0] ASI_PHYS_CACHED, %l4
inc 8, %i0
ldxa [%i0] ASI_PHYS_CACHED, %l5
inc 8, %i0
ldxa [%i0] ASI_PHYS_CACHED, %l6
inc 8, %i0
ldxa [%i0] ASI_PHYS_CACHED, %l7
inc 8, %i0
stxa %l0, [%i1] ASI_PHYS_CACHED
inc 8, %i1
stxa %l1, [%i1] ASI_PHYS_CACHED
inc 8, %i1
stxa %l2, [%i1] ASI_PHYS_CACHED
inc 8,%i1
stxa %l3, [%i1] ASI_PHYS_CACHED
inc 8, %i1
stxa %l4, [%i1] ASI_PHYS_CACHED
inc 8, %i1
stxa %l5, [%i1] ASI_PHYS_CACHED
inc 8, %i1
stxa %l6, [%i1] ASI_PHYS_CACHED
inc 8, %i1
stxa %l7, [%i1] ASI_PHYS_CACHED
inc 8, %i1
stxa %g0, [%o0] ASI_DCACHE_TAG! Blast away at the D$
dec 8, %o0
stxa %g0, [%o1] ASI_DCACHE_TAG
inc 8, %o1
stxa %g0, [%o0] ASI_DCACHE_TAG! Blast away at the D$
dec 8, %o0
stxa %g0, [%o1] ASI_DCACHE_TAG
inc 8, %o1
stxa %g0, [%o0] ASI_DCACHE_TAG! Blast away at the D$
dec 8, %o0
stxa %g0, [%o1] ASI_DCACHE_TAG
inc 8, %o1
stxa %g0, [%o0] ASI_DCACHE_TAG! Blast away at the D$
dec 8, %o0
stxa %g0, [%o1] ASI_DCACHE_TAG
inc 8, %o1
stxa %g0, [%o0] ASI_DCACHE_TAG! Blast away at the D$
dec 8, %o0
stxa %g0, [%o1] ASI_DCACHE_TAG
inc 8, %o1
stxa %g0, [%o0] ASI_DCACHE_TAG! Blast away at the D$
dec 8, %o0
stxa %g0, [%o1] ASI_DCACHE_TAG
inc 8, %o1
stxa %g0, [%o0] ASI_DCACHE_TAG! Blast away at the D$
dec 8, %o0
stxa %g0, [%o1] ASI_DCACHE_TAG
inc 8, %o1
stxa %g0, [%o0] ASI_DCACHE_TAG! Blast away at the D$
dec 8, %o0
stxa %g0, [%o1] ASI_DCACHE_TAG
brnz,pt %o0, 1b
inc 8, %o1
sethi %hi(KERNBASE), %o2
flush %o2
return
nop
#else
/* This is the short, slow, safe version that uses %g1 */
set NBPG, %o3
@ -9115,7 +9236,6 @@ pmap_copy_phys:
flush %o5
retl
nop
#endif
#else
set NBPG, %o3
add %o3, %o0, %o3
@ -9308,8 +9428,7 @@ ENTRY(pseg_set)
call pseg_get
mov %o2, %o5
#ifndef _LP64
sllx %o0, 32, %o0
or %o1, %o0, %o0
COMBINE(%o0, %o1, %o0)
#endif
cmp %o0, %o5
tne 1
@ -9675,7 +9794,11 @@ Lbzero_longs:
3:
stx %o2, [%o0] ! Do 1 longword at a time
deccc 8, %o1
#ifdef _LP64
brgez,pt %o1, 3b
#else
bge,pt %icc, 3b
#endif
inc 8, %o0
/*
@ -9722,6 +9845,7 @@ Lbzero_small:
Lbzero_block:
!! Make sure our trap table is installed
ba,a,pt %icc, Lbzero_longs
rdpr %tba, %o3
set _C_LABEL(trapbase), %o5
sub %o3, %o5, %o3
@ -9828,14 +9952,8 @@ Lbzero_block:
5:
stda %f0, [%i0] ASI_BLK_COMMIT_P ! Store 64 bytes
deccc 64, %i1
ble,pn %xcc, 6f
bg,pt %icc, 5b
inc 64, %i0
stda %f0, [%i0] ASI_BLK_COMMIT_P ! Store 64 bytes
deccc 64, %i1
bg,pn %xcc, 5b
inc 64, %i0
6:
/*
* We've saved our possible fpstate, now disable the fpu
* and continue with life.

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.84 2000/07/28 19:08:25 eeh Exp $ */
/* $NetBSD: machdep.c,v 1.85 2000/08/01 00:40:18 eeh Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -331,7 +331,9 @@ setregs(p, pack, stack)
register struct fpstate64 *fs;
register int64_t tstate;
int pstate = PSTATE_USER;
#ifdef __arch64__
Elf_Ehdr *eh = pack->ep_hdr;
#endif
/* Don't allow misaligned code by default */
p->p_md.md_flags &= ~MDP_FIXALIGN;
@ -915,10 +917,9 @@ dumpsys()
blkno = dumplo;
dump = bdevsw[major(dumpdev)].d_dump;
#if 0
error = pmap_dumpmmu(dump, blkno);
blkno += pmap_dumpsize();
#endif
printf("starting dump, blkno %d\n", blkno);
for (mp = mem; mp->size; mp++) {
unsigned i = 0, n;
paddr_t maddr = mp->start;
@ -1233,16 +1234,37 @@ _bus_dmamap_unload(t, map)
bus_dma_tag_t t;
bus_dmamap_t map;
{
int i;
vm_page_t m;
struct pglist *mlist;
paddr_t pa;
if (map->dm_nsegs != 1)
panic("_bus_dmamap_unload: nsegs = %d", map->dm_nsegs);
for (i=0; i<map->dm_nsegs; i++) {
if ((mlist = map->dm_segs[i]._ds_mlist) == NULL) {
/*
* We were asked to load random VAs and lost the
* PA info so just blow the entire cache away.
*/
blast_vcache();
break;
}
for (m = TAILQ_FIRST(mlist); m != NULL;
m = TAILQ_NEXT(m,pageq)) {
pa = VM_PAGE_TO_PHYS(m);
/*
* We should be flushing a subrange, but we
* don't know where the segments starts.
*/
dcache_flush_page(pa);
}
}
/* Mark the mappings as invalid. */
map->dm_mapsize = 0;
map->dm_nsegs = 0;
/* Didn't keep track of vaddrs -- dump entire D$ */
blast_vcache();
}
/*
@ -1257,6 +1279,9 @@ _bus_dmamap_sync(t, map, offset, len, ops)
bus_size_t len;
int ops;
{
int i;
vm_page_t m;
struct pglist *mlist;
/*
* We sync out our caches, but the bus must do the same.
@ -1272,8 +1297,27 @@ _bus_dmamap_sync(t, map, offset, len, ops)
}
if (ops & BUS_DMASYNC_POSTREAD) {
/* Invalidate the vcache */
blast_vcache();
/* Maybe we should flush the I$? When we support LKMs.... */
for (i=0; i<map->dm_nsegs; i++) {
if ((mlist = map->dm_segs[i]._ds_mlist) == NULL)
/* Should not really happen. */
continue;
for (m = TAILQ_FIRST(mlist);
m != NULL; m = TAILQ_NEXT(m,pageq)) {
paddr_t start;
psize_t size;
if (offset < NBPG) {
start = VM_PAGE_TO_PHYS(m) + offset;
size = NBPG;
if (size > len)
size = len;
cache_flush_phys(start, size, 0);
len -= size;
continue;
}
offset -= size;
}
}
}
if (ops & BUS_DMASYNC_POSTWRITE) {
/* Nothing to do. Handled by the bus controller. */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.69 2000/07/31 05:40:22 mrg Exp $ */
/* $NetBSD: pmap.c,v 1.70 2000/08/01 00:40:20 eeh Exp $ */
#undef NO_VCACHE /* Don't forget the locked TLB in dostart */
#define HWREF 1
#undef BOOT_DEBUG
@ -1708,14 +1708,8 @@ pmap_kenter_pa(va, pa, prot)
#endif
tsb_enter(pm->pm_ctx, va, tte.data.data);
ASSERT((tsb[i].data.data & TLB_NFO) == 0);
#if 1
/* this is correct */
dcache_flush_page(va);
#else
/* Go totally crazy */
blast_vcache();
#endif
dcache_flush_page(pa);
}
#endif
/*
@ -1838,7 +1832,6 @@ pmap_kremove(va, size)
#ifdef DEBUG
remove_stats.flushes ++;
#endif
blast_vcache();
}
}
#endif
@ -2090,12 +2083,8 @@ pmap_enter(pm, va, pa, prot, flags)
}
/* Force reload -- protections may be changed */
tlb_flush_pte((npv->pv_va&PV_VAMASK), pm->pm_ctx);
#if 1
/* XXXXXX We should now flush the DCACHE to make sure */
dcache_flush_page((npv->pv_va&PV_VAMASK));
#else
blast_vcache();
#endif
dcache_flush_page(pa);
}
}
fnd:
@ -2142,15 +2131,9 @@ pmap_enter(pm, va, pa, prot, flags)
tlb_flush_pte(va, pm->pm_ctx);
ASSERT((tsb[i].data.data & TLB_NFO) == 0);
}
#if 1
#if 1
/* this is correct */
dcache_flush_page(va);
#else
/* Go totally crazy */
blast_vcache();
#endif
#endif
dcache_flush_page(pa);
/* We will let the fast mmu miss interrupt load the new translation */
pv_check();
return (KERN_SUCCESS);
@ -2166,6 +2149,7 @@ pmap_remove(pm, va, endva)
{
int i, flush=0;
int64_t data;
vaddr_t flushva = va;
/*
* In here we should check each pseg and if there are no more entries,
@ -2254,7 +2238,7 @@ pmap_remove(pm, va, endva)
#ifdef DEBUG
remove_stats.flushes ++;
#endif
blast_vcache();
cache_flush_virt(flushva, endva - flushva);
}
#ifdef DEBUG
if (pmapdebug & PDB_REMOVE)
@ -2391,116 +2375,6 @@ pmap_extract(pm, va, pap)
return (TRUE);
}
#if 0
/* This appears to be no longer used. */
/*
* Map physical addresses into kernel VM. -- used by device drivers
*/
vaddr_t
pmap_map(va, pa, endpa, prot)
register vaddr_t va;
retister paddr_t pa, endpa;
register int prot;
{
register int pgsize = PAGE_SIZE;
int i;
while (pa < endpa) {
for (i=0; page_size_map[i].mask; i++) {
if (((pa | va) & page_size_map[i].mask) == 0
&& pa + page_size_map[i].mask < endpa)
break;
}
do {
#ifdef DEBUG
page_size_map[i].use++;
#endif
pmap_enter(pmap_kernel(), va, pa|page_size_map[i].code,
prot,
VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
va += pgsize;
pa += pgsize;
} while (pa & page_size_map[i].mask);
}
return (va);
}
#endif
#if 0
/*
* Really change page protections -- used by device drivers
*/
void pmap_changeprot(pm, start, prot, size)
pmap_t pm;
vaddr_t start;
vm_prot_t prot;
int size;
{
int i, s;
vaddr_t sva, eva;
int64_t data, set, clr;
if (prot == VM_PROT_NONE) {
pmap_remove(pm, start, start+size);
return;
}
if (prot & VM_PROT_WRITE) {
#ifdef HWREF
set = TLB_REAL_W/*|TLB_W|TLB_MODIFY*/;
#else
set = TLB_REAL_W|TLB_W|TLB_MODIFY;
#endif
clr = 0LL;
} else {
set = 0LL;
clr = TLB_REAL_W|TLB_W;
}
sva = start & ~PGOFSET;
eva = start + size;
while (sva < eva) {
/*
* Is this part of the permanent 4MB mapping?
*/
if( pm == pmap_kernel() && sva >= ktext && sva < kdata+4*MEG ) {
prom_printf("pmap_changeprot: va=%08x in locked TLB\r\n", sva);
OF_enter();
return;
}
#ifdef DEBUG
if (pmapdebug & (PDB_CHANGEPROT|PDB_REF))
printf("pmap_changeprot: va %p prot %x\n", sva, prot);
#endif
/* First flush the TSB */
i = ptelookup_va(sva);
/* Then update the page table */
s = splimp();
if ((data = pseg_get(pm, sva))) {
data |= set;
data &= ~clr;
ASSERT((data & TLB_NFO) == 0);
if (pseg_set(pm, sva, data, 0)) {
printf("pmap_changeprot: gotten empty pseg!\n");
Debugger();
/* panic? */
}
if (pm->pm_ctx || pm == pmap_kernel()) {
tlb_flush_pte(sva, pm->pm_ctx);
if (tsb[i].tag.tag > 0
&& tsb[i].tag.tag == TSB_TAG(0,pm->pm_ctx,sva))
tsb[i].tag.tag = tsb[i].data.data = data;
}
}
splx(s);
sva += NBPG;
}
pv_check();
}
#endif
/*
* Return the number bytes that pmap_dumpmmu() will dump.
*/
@ -2526,7 +2400,6 @@ pmap_dumpsize()
* kcore_seg_t MI header defined in <sys/kcore.h>)
* cpu_kcore_hdr_t MD header defined in <machine/kcore.h>)
* phys_ram_seg_t[memsize] physical memory segments
* segmap_t[NKREG*NSEGRG] the kernel's segment map (NB: needed?)
*/
int
pmap_dumpmmu(dump, blkno)
@ -2537,7 +2410,7 @@ pmap_dumpmmu(dump, blkno)
cpu_kcore_hdr_t *kcpu;
phys_ram_seg_t memseg;
register int error = 0;
register int i, memsegoffset, segmapoffset;
register int i, memsegoffset;
int buffer[dbtob(1) / sizeof(int)];
int *bp, *ep;
@ -2570,16 +2443,26 @@ pmap_dumpmmu(dump, blkno)
/* Fill in MD segment header (interpreted by MD part of libkvm) */
kcpu = (cpu_kcore_hdr_t *)((long)bp + ALIGN(sizeof(kcore_seg_t)));
kcpu->cputype = CPU_SUN4U;
kcpu->kernbase = KERNBASE;
kcpu->kphys = (paddr_t)ktextp;
kcpu->kernbase = (u_int64_t)KERNBASE;
kcpu->cpubase = (u_int64_t)CPUINFO_VA;
/* Describe the locked text segment */
kcpu->ktextbase = (u_int64_t)ktext;
kcpu->ktextp = (u_int64_t)ktextp;
kcpu->ktextsz = (u_int64_t)ektextp - ktextp;
/* Describe locked data segment */
kcpu->kdatabase = (u_int64_t)kdata;
kcpu->kdatap = (u_int64_t)kdatap;
kcpu->kdatasz = (u_int64_t)ekdatap - kdatap;
/* Now the memsegs */
kcpu->nmemseg = memsize;
kcpu->memsegoffset = memsegoffset = ALIGN(sizeof(cpu_kcore_hdr_t));
kcpu->nsegmap = STSZ;
kcpu->segmapoffset = segmapoffset =
memsegoffset + memsize * sizeof(phys_ram_seg_t);
kcpu->npmeg = 0;
kcpu->pmegoffset = 0; /* We don't do this. */
/* Now we need to point this at our kernel pmap. */
kcpu->nsegmap = STSZ;
kcpu->segmapoffset = (u_int64_t)pmap_kernel()->pm_physaddr;
/* Note: we have assumed everything fits in buffer[] so far... */
bp = (int *)((long)kcpu + ALIGN(sizeof(cpu_kcore_hdr_t)));
@ -2590,14 +2473,6 @@ pmap_dumpmmu(dump, blkno)
EXPEDITE(&memseg, sizeof(phys_ram_seg_t));
}
#if 0
/*
* Since we're not mapping this in we need to re-do some of this
* logic.
*/
EXPEDITE(&kernel_pmap_.pm_segs[0], sizeof(kernel_pmap_.pm_segs));
#endif
if (bp != buffer)
error = (*dump)(dumpdev, blkno++, (caddr_t)buffer, dbtob(1));
@ -2885,7 +2760,7 @@ pmap_clear_reference(pg)
}
}
/* Stupid here will take a cache hit even on unmapped pages 8^( */
blast_vcache();
dcache_flush_page(pa);
splx(s);
pv_check();
#ifdef DEBUG
@ -3232,14 +3107,6 @@ pmap_page_protect(pg, prot)
printf("pmap_page_protect: demap va %p of pa %lx from pm %p...\n",
pv->pv_va, (long)pa, pv->pv_pmap);
}
#if 0
if (!pv->pv_pmap->pm_segs[va_to_seg(pv->pv_va&PV_VAMASK)]) {
printf("pmap_page_protect(%x:%x,%x): pv %x va %x not in pmap %x\n",
(int)(pa>>32), (int)pa, prot, pv, pv->pv_va, pv->pv_pmap);
Debugger();
goto skipit;
}
#endif
#endif
data = pseg_get(pv->pv_pmap, pv->pv_va&PV_VAMASK);
/* Save ref/mod info */
@ -3276,14 +3143,11 @@ pmap_page_protect(pg, prot)
pv->pv_pmap = NULL;
pv->pv_next = NULL;
}
#if 0
skipit:
#endif
}
dcache_flush_page(pa);
splx(s);
}
/* We should really only flush the pages we demapped. */
blast_vcache();
pv_check();
}