Move pte_spill calls from trap_subr to trap(). Count the number of
"evictions" and avoide calling pmap_pte_spill if there are no evictions for the current pmap. Make the ISI execption use the default exception code. Remove lots of dead stuff from trap_subr. Make olink use TAILQ instead of LIST and be sorted with evicted entries first and resident entries last. Make use of this knowledge to make pmap_pte_spill do a fast exit.
This commit is contained in:
parent
7bbf3ad2b9
commit
8c472e414b
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.8 2002/09/22 07:53:48 chs Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.9 2002/10/10 22:37:50 matt Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
|
||||
|
@ -55,6 +55,7 @@ struct pmap {
|
|||
sr_t pm_sr[16]; /* segments used in this pmap */
|
||||
int pm_refs; /* ref count */
|
||||
struct pmap_statistics pm_stats; /* pmap statistics */
|
||||
unsigned int pm_evictions; /* pvo's not in page table */
|
||||
};
|
||||
|
||||
typedef struct pmap *pmap_t;
|
||||
|
@ -101,7 +102,7 @@ void pmap_syncicache (paddr_t, psize_t);
|
|||
#define PMAP_NEED_PROCWR
|
||||
void pmap_procwr (struct proc *, vaddr_t, size_t);
|
||||
|
||||
int pmap_pte_spill(vaddr_t va);
|
||||
int pmap_pte_spill(struct pmap *, vaddr_t);
|
||||
|
||||
#define PMAP_NC 0x1000
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: mpc6xx_machdep.c,v 1.8 2002/09/25 22:21:17 thorpej Exp $ */
|
||||
/* $NetBSD: mpc6xx_machdep.c,v 1.9 2002/10/10 22:37:51 matt Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (C) 2002 Matt Thomas
|
||||
|
@ -106,7 +106,6 @@ mpc6xx_init(void (*handler)(void))
|
|||
extern int sctrap, scsize;
|
||||
extern int alitrap, alisize;
|
||||
extern int dsitrap, dsisize;
|
||||
extern int isitrap, isisize;
|
||||
extern int decrint, decrsize;
|
||||
extern int tlbimiss, tlbimsize;
|
||||
extern int tlbdlmiss, tlbdlmsize;
|
||||
|
@ -174,10 +173,6 @@ mpc6xx_init(void (*handler)(void))
|
|||
size = (size_t)&dsisize;
|
||||
memcpy((void *)EXC_DSI, &dsitrap, size);
|
||||
break;
|
||||
case EXC_ISI:
|
||||
size = (size_t)&isisize;
|
||||
memcpy((void *)EXC_ISI, &isitrap, size);
|
||||
break;
|
||||
case EXC_DECR:
|
||||
size = (size_t)&decrsize;
|
||||
memcpy((void *)EXC_DECR, &decrint, size);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.59 2002/08/23 11:59:40 scw Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.60 2002/10/10 22:37:51 matt Exp $ */
|
||||
/*-
|
||||
* Copyright (c) 2001 The NetBSD Foundation, Inc.
|
||||
* All rights reserved.
|
||||
|
@ -144,7 +144,7 @@ struct pmap_physseg pmap_physseg;
|
|||
*/
|
||||
struct pvo_entry {
|
||||
LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */
|
||||
LIST_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */
|
||||
TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */
|
||||
struct pte pvo_pte; /* Prebuilt PTE */
|
||||
pmap_t pvo_pmap; /* ptr to owning pmap */
|
||||
vaddr_t pvo_vaddr; /* VA of entry */
|
||||
|
@ -163,7 +163,8 @@ struct pvo_entry {
|
|||
#define PVO_PTEGIDX_SET(pvo,i) \
|
||||
((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
|
||||
|
||||
struct pvo_head *pmap_pvo_table; /* pvo entries by ptegroup index */
|
||||
TAILQ_HEAD(pvo_tqhead, pvo_entry);
|
||||
struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */
|
||||
struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */
|
||||
struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */
|
||||
|
||||
|
@ -743,16 +744,17 @@ pmap_pte_insert(int ptegidx, pte_t *pvo_pt)
|
|||
* disabled.
|
||||
*/
|
||||
int
|
||||
pmap_pte_spill(vaddr_t addr)
|
||||
pmap_pte_spill(struct pmap *pm, vaddr_t addr)
|
||||
{
|
||||
struct pvo_entry *source_pvo, *victim_pvo;
|
||||
struct pvo_entry *pvo;
|
||||
struct pvo_tqhead *pvoh;
|
||||
int ptegidx, i, j;
|
||||
sr_t sr;
|
||||
volatile pteg_t *pteg;
|
||||
volatile pte_t *pt;
|
||||
|
||||
sr = MFSRIN(addr);
|
||||
sr = va_to_sr(pm->pm_sr, addr);
|
||||
ptegidx = va_to_pteg(sr, addr);
|
||||
|
||||
/*
|
||||
|
@ -766,25 +768,52 @@ pmap_pte_spill(vaddr_t addr)
|
|||
|
||||
source_pvo = NULL;
|
||||
victim_pvo = NULL;
|
||||
LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
|
||||
pvoh = &pmap_pvo_table[ptegidx];
|
||||
TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
|
||||
/*
|
||||
* We need to find pvo entry for this address...
|
||||
*/
|
||||
PMAP_PVO_CHECK(pvo); /* sanity check */
|
||||
/*
|
||||
* If we haven't found the source and we come to a PVO with
|
||||
* a valid PTE, then we know we can't find it because all
|
||||
* evicted PVOs always are first in the list.
|
||||
*/
|
||||
if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID))
|
||||
break;
|
||||
if (source_pvo == NULL &&
|
||||
pmap_pte_match(&pvo->pvo_pte, sr, addr, pvo->pvo_pte.pte_hi & PTE_HID)) {
|
||||
/*
|
||||
* Now found an entry to be spilled into the pteg.
|
||||
* The PTE is now be valid, so we know it's active;
|
||||
* Now we have found the entry to be spilled into the
|
||||
* pteg. Attempt to insert it into the page table.
|
||||
*/
|
||||
j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
|
||||
if (j >= 0) {
|
||||
PVO_PTEGIDX_SET(pvo, j);
|
||||
PMAP_PVO_CHECK(pvo); /* sanity check */
|
||||
pvo->pvo_pmap->pm_evictions--;
|
||||
PMAPCOUNT(ptes_spilled);
|
||||
PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
|
||||
? pmap_evcnt_ptes_secondary
|
||||
: pmap_evcnt_ptes_primary)[j]);
|
||||
/*
|
||||
* Since we keep the evicted entries at the
|
||||
* from of the PVO list, we need move this
|
||||
* (now resident) PVO after the evicted
|
||||
* entries.
|
||||
*/
|
||||
victim_pvo = TAILQ_NEXT(pvo, pvo_olink);
|
||||
/*
|
||||
* If we don't have to move (either we were
|
||||
* the last entry or the next entry was valid,
|
||||
* don't change our position. Otherwise
|
||||
* move ourselves to the tail of the queue.
|
||||
*/
|
||||
if (victim_pvo != NULL &&
|
||||
!(victim_pvo->pvo_pte.pte_hi & PTE_VALID)) {
|
||||
TAILQ_REMOVE(pvoh, pvo, pvo_olink);
|
||||
TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
source_pvo = pvo;
|
||||
|
@ -816,7 +845,7 @@ pmap_pte_spill(vaddr_t addr)
|
|||
* If this is a secondary PTE, we need to search
|
||||
* its primary pvo bucket for the matching PVO.
|
||||
*/
|
||||
LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask],
|
||||
TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask],
|
||||
pvo_olink) {
|
||||
PMAP_PVO_CHECK(pvo); /* sanity check */
|
||||
/*
|
||||
|
@ -841,8 +870,19 @@ pmap_pte_spill(vaddr_t addr)
|
|||
*/
|
||||
source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
|
||||
|
||||
/* Move the source PVO from list of evicted PVO's to
|
||||
* after the current position of the victim PVO. Then
|
||||
* move the victim PVO to the head of the list so
|
||||
* the evicted PVOs are all at the front of the list.
|
||||
*/
|
||||
TAILQ_REMOVE(pvoh, source_pvo, pvo_olink);
|
||||
TAILQ_REMOVE(pvoh, victim_pvo, pvo_olink);
|
||||
TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink);
|
||||
TAILQ_INSERT_HEAD(pvoh, victim_pvo, pvo_olink);
|
||||
pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
|
||||
pmap_pte_set(pt, &source_pvo->pvo_pte);
|
||||
victim_pvo->pvo_pmap->pm_evictions++;
|
||||
source_pvo->pvo_pmap->pm_evictions--;
|
||||
|
||||
PVO_PTEGIDX_CLR(victim_pvo);
|
||||
PVO_PTEGIDX_SET(source_pvo, i);
|
||||
|
@ -888,7 +928,7 @@ pmap_init(void)
|
|||
{
|
||||
int s;
|
||||
#ifdef __HAVE_PMAP_PHYSSEG
|
||||
struct pvo_head *pvoh;
|
||||
struct pvo_tqhead *pvoh;
|
||||
int bank;
|
||||
long sz;
|
||||
char *attr;
|
||||
|
@ -901,7 +941,7 @@ pmap_init(void)
|
|||
vm_physmem[bank].pmseg.pvoh = pvoh;
|
||||
vm_physmem[bank].pmseg.attrs = attr;
|
||||
for (; sz > 0; sz--, pvoh++, attr++) {
|
||||
LIST_INIT(pvoh);
|
||||
TAILQ_INIT(pvoh);
|
||||
*attr = 0;
|
||||
}
|
||||
}
|
||||
|
@ -1223,7 +1263,7 @@ pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
|
|||
sr = va_to_sr(pm->pm_sr, va);
|
||||
ptegidx = va_to_pteg(sr, va);
|
||||
|
||||
LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
|
||||
TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
|
||||
#if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
|
||||
if ((uintptr_t) pvo >= SEGMENT_LENGTH)
|
||||
panic("pmap_pvo_find_va: invalid pvo %p on "
|
||||
|
@ -1243,7 +1283,7 @@ pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
|
|||
void
|
||||
pmap_pvo_check(const struct pvo_entry *pvo)
|
||||
{
|
||||
struct pvo_head *pvo_head;
|
||||
struct pvo_tqhead *pvo_head;
|
||||
struct pvo_entry *pvo0;
|
||||
volatile pte_t *pt;
|
||||
int failed = 0;
|
||||
|
@ -1347,6 +1387,7 @@ pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
|
|||
vaddr_t va, paddr_t pa, u_int pte_lo, int flags)
|
||||
{
|
||||
struct pvo_entry *pvo;
|
||||
struct pvo_tqhead *pvoh;
|
||||
u_int32_t msr;
|
||||
sr_t sr;
|
||||
int ptegidx;
|
||||
|
@ -1372,7 +1413,7 @@ pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
|
|||
* Remove any existing mapping for this page. Reuse the
|
||||
* pvo entry if there a mapping.
|
||||
*/
|
||||
LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
|
||||
TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
|
||||
if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
|
||||
#ifdef DEBUG
|
||||
if ((pmapdebug & PMAPDEBUG_PVOENTER) &&
|
||||
|
@ -1420,7 +1461,6 @@ pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
|
|||
}
|
||||
pvo->pvo_vaddr = va;
|
||||
pvo->pvo_pmap = pm;
|
||||
LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
|
||||
pvo->pvo_vaddr &= ~ADDR_POFF;
|
||||
if (flags & VM_PROT_EXECUTE) {
|
||||
PMAPCOUNT(exec_mappings);
|
||||
|
@ -1449,13 +1489,21 @@ pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
|
|||
/*
|
||||
* We hope this succeeds but it isn't required.
|
||||
*/
|
||||
pvoh = &pmap_pvo_table[ptegidx];
|
||||
i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
|
||||
if (i >= 0) {
|
||||
PVO_PTEGIDX_SET(pvo, i);
|
||||
PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
|
||||
? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]);
|
||||
TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
|
||||
} else {
|
||||
/*
|
||||
* Since we didn't have room for this entry (which makes it
|
||||
* and evicted entry), place it at the head of the list.
|
||||
*/
|
||||
TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink);
|
||||
PMAPCOUNT(ptes_evicted);
|
||||
pm->pm_evictions++;
|
||||
#if 0
|
||||
if ((flags & (VM_PROT_READ|VM_PROT_WRITE)) != VM_PROT_NONE)
|
||||
pmap_pte_evict(pvo, ptegidx, MFTB() & 7);
|
||||
|
@ -1473,12 +1521,24 @@ void
|
|||
pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
|
||||
{
|
||||
volatile pte_t *pt;
|
||||
int ptegidx;
|
||||
|
||||
#if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
|
||||
if (++pmap_pvo_remove_depth > 1)
|
||||
panic("pmap_pvo_remove: called recursively!");
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If we haven't been supplied the ptegidx, calculate it.
|
||||
*/
|
||||
if (pteidx == -1) {
|
||||
sr_t sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
|
||||
ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
|
||||
pteidx = pmap_pvo_pte_index(pvo, ptegidx);
|
||||
} else {
|
||||
ptegidx = pteidx >> 3;
|
||||
}
|
||||
|
||||
PMAP_PVO_CHECK(pvo); /* sanity check */
|
||||
/*
|
||||
* If there is an active pte entry, we need to deactivate it
|
||||
|
@ -1489,6 +1549,9 @@ pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
|
|||
pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
|
||||
PVO_PTEGIDX_CLR(pvo);
|
||||
PMAPCOUNT(ptes_removed);
|
||||
} else {
|
||||
KASSERT(pvo->pvo_pmap->pm_evictions > 0);
|
||||
pvo->pvo_pmap->pm_evictions--;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1506,6 +1569,9 @@ pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
|
|||
if (pg != NULL) {
|
||||
pmap_attr_save(pg, pvo->pvo_pte.pte_lo & (PTE_REF|PTE_CHG));
|
||||
}
|
||||
PMAPCOUNT(unmappings);
|
||||
} else {
|
||||
PMAPCOUNT(kernel_unmappings);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1517,11 +1583,8 @@ pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
|
|||
* Remove this from the Overflow list and return it to the pool...
|
||||
* ... if we aren't going to reuse it.
|
||||
*/
|
||||
LIST_REMOVE(pvo, pvo_olink);
|
||||
if (pvo->pvo_vaddr & PVO_MANAGED)
|
||||
PMAPCOUNT(unmappings);
|
||||
else
|
||||
PMAPCOUNT(kernel_unmappings);
|
||||
TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
|
||||
|
||||
pool_put(pvo->pvo_vaddr & PVO_MANAGED
|
||||
? &pmap_mpvo_pool
|
||||
: &pmap_upvo_pool,
|
||||
|
@ -2325,7 +2388,7 @@ pmap_pteg_dist(void)
|
|||
memset(depths, 0, sizeof(depths));
|
||||
for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
|
||||
depth = 0;
|
||||
LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
|
||||
TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
|
||||
depth++;
|
||||
}
|
||||
if (depth > max_depth)
|
||||
|
@ -2358,7 +2421,7 @@ pmap_pvo_verify(void)
|
|||
s = splvm();
|
||||
for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
|
||||
struct pvo_entry *pvo;
|
||||
LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
|
||||
TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
|
||||
if ((uintptr_t) pvo >= SEGMENT_LENGTH)
|
||||
panic("pmap_pvo_verify: invalid pvo %p "
|
||||
"on list %#x", pvo, ptegidx);
|
||||
|
@ -2823,7 +2886,7 @@ pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend,
|
|||
* We cannot do pmap_steal_memory here since UVM hasn't been loaded
|
||||
* with pages. So we just steal them before giving them to UVM.
|
||||
*/
|
||||
size = sizeof(struct pvo_head) * pmap_pteg_cnt;
|
||||
size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt;
|
||||
pmap_pvo_table = pmap_boot_find_memory(size, NBPG, 0);
|
||||
#if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
|
||||
if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH)
|
||||
|
@ -2832,7 +2895,7 @@ pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend,
|
|||
#endif
|
||||
|
||||
for (i = 0; i < pmap_pteg_cnt; i++)
|
||||
LIST_INIT(&pmap_pvo_table[i]);
|
||||
TAILQ_INIT(&pmap_pvo_table[i]);
|
||||
|
||||
#ifndef MSGBUFADDR
|
||||
/*
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.49 2002/09/27 15:36:38 provos Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.50 2002/10/10 22:37:51 matt Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
|
||||
|
@ -131,7 +131,7 @@ static inline int ptematch __P((pte_t *, sr_t, vaddr_t, int));
|
|||
static inline struct pv_entry *pa_to_pv __P((paddr_t));
|
||||
static inline char *pa_to_attr __P((paddr_t));
|
||||
static int pte_insert __P((int, pte_t *));
|
||||
int pmap_pte_spill __P((vaddr_t)); /* Called from trap_subr.S */
|
||||
int pmap_pte_spill __P((struct pmap *, vaddr_t)); /* Called from trap.c */
|
||||
static inline int pmap_enter_pv __P((int, vaddr_t, paddr_t));
|
||||
static void pmap_remove_pv __P((int, vaddr_t, paddr_t, struct pte *));
|
||||
static pte_t *pte_find __P((struct pmap *, vaddr_t));
|
||||
|
@ -265,7 +265,8 @@ pte_insert(idx, pt)
|
|||
* with interrupts disabled.
|
||||
*/
|
||||
int
|
||||
pmap_pte_spill(addr)
|
||||
pmap_pte_spill(pm, addr)
|
||||
struct pmap *pm;
|
||||
vaddr_t addr;
|
||||
{
|
||||
int idx, i;
|
||||
|
@ -274,8 +275,7 @@ pmap_pte_spill(addr)
|
|||
pte_t ps;
|
||||
pte_t *pt;
|
||||
|
||||
asm ("mfsrin %0,%1" : "=r"(sr) : "r"(addr));
|
||||
idx = pteidx(sr, addr);
|
||||
idx = pteidx(ptesr(pm->pm_sr, addr), addr);
|
||||
for (po = potable[idx].lh_first; po; po = po->po_list.le_next)
|
||||
if (ptematch(&po->po_pte, sr, addr, 0)) {
|
||||
/*
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: trap.c,v 1.68 2002/08/12 22:44:03 matt Exp $ */
|
||||
/* $NetBSD: trap.c,v 1.69 2002/10/10 22:37:51 matt Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
|
||||
|
@ -78,6 +78,7 @@ trap(struct trapframe *frame)
|
|||
struct cpu_info * const ci = curcpu();
|
||||
struct proc *p = curproc;
|
||||
struct pcb *pcb = curpcb;
|
||||
struct vm_map *map;
|
||||
int type = frame->exc;
|
||||
int ftype, rv;
|
||||
|
||||
|
@ -110,7 +111,6 @@ trap(struct trapframe *frame)
|
|||
*/
|
||||
ci->ci_ev_kdsi.ev_count++;
|
||||
if (intr_depth < 0) {
|
||||
struct vm_map *map;
|
||||
vaddr_t va;
|
||||
|
||||
if (frame->dsisr & DSISR_STORE)
|
||||
|
@ -131,6 +131,19 @@ trap(struct trapframe *frame)
|
|||
} else {
|
||||
map = kernel_map;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to spill an evicted pte into the page table
|
||||
* if this wasn't a protection fault and the pmap
|
||||
* has some evicted pte's.
|
||||
*/
|
||||
if ((frame->dsisr & DSISR_NOTFOUND) &&
|
||||
vm_map_pmap(map)->pm_evictions > 0 &&
|
||||
pmap_pte_spill(vm_map_pmap(map), trunc_page(va))) {
|
||||
KERNEL_UNLOCK();
|
||||
return;
|
||||
}
|
||||
|
||||
rv = uvm_fault(map, trunc_page(va), 0, ftype);
|
||||
if (map != kernel_map) {
|
||||
/*
|
||||
|
@ -139,7 +152,6 @@ trap(struct trapframe *frame)
|
|||
if (rv == 0)
|
||||
uvm_grow(p, trunc_page(va));
|
||||
/* KERNEL_PROC_UNLOCK(p); */
|
||||
} else {
|
||||
}
|
||||
KERNEL_UNLOCK();
|
||||
if (rv == 0)
|
||||
|
@ -171,8 +183,20 @@ trap(struct trapframe *frame)
|
|||
ftype = VM_PROT_WRITE;
|
||||
else
|
||||
ftype = VM_PROT_READ;
|
||||
rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->dar),
|
||||
0, ftype);
|
||||
/*
|
||||
* Try to spill an evicted pte into the page table
|
||||
* if this wasn't a protection fault and the pmap
|
||||
* has some evicted pte's.
|
||||
*/
|
||||
map = &p->p_vmspace->vm_map;
|
||||
if ((frame->dsisr & DSISR_NOTFOUND) &&
|
||||
vm_map_pmap(map)->pm_evictions > 0 &&
|
||||
pmap_pte_spill(vm_map_pmap(map), trunc_page(frame->dar))) {
|
||||
KERNEL_PROC_UNLOCK(p);
|
||||
break;
|
||||
}
|
||||
|
||||
rv = uvm_fault(map, trunc_page(frame->dar), 0, ftype);
|
||||
if (rv == 0) {
|
||||
/*
|
||||
* Record any stack growth...
|
||||
|
@ -201,16 +225,30 @@ trap(struct trapframe *frame)
|
|||
}
|
||||
KERNEL_PROC_UNLOCK(p);
|
||||
break;
|
||||
|
||||
case EXC_ISI:
|
||||
printf("trap: kernel ISI by %#x (SRR1 %#x)\n",
|
||||
frame->srr0, frame->srr1);
|
||||
goto brain_damage2;
|
||||
|
||||
case EXC_ISI|EXC_USER:
|
||||
KERNEL_PROC_LOCK(p);
|
||||
ci->ci_ev_isi.ev_count++;
|
||||
/*
|
||||
* Try to spill an evicted pte into the page table
|
||||
* if this wasn't a protection fault and the pmap
|
||||
* has some evicted pte's.
|
||||
*/
|
||||
map = &p->p_vmspace->vm_map;
|
||||
if ((frame->srr1 & DSISR_NOTFOUND) &&
|
||||
vm_map_pmap(map)->pm_evictions > 0 &&
|
||||
pmap_pte_spill(vm_map_pmap(map), trunc_page(frame->srr0))) {
|
||||
KERNEL_PROC_UNLOCK(p);
|
||||
break;
|
||||
}
|
||||
|
||||
ftype = VM_PROT_READ | VM_PROT_EXECUTE;
|
||||
rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->srr0),
|
||||
0, ftype);
|
||||
rv = uvm_fault(map, trunc_page(frame->srr0), 0, ftype);
|
||||
if (rv == 0) {
|
||||
KERNEL_PROC_UNLOCK(p);
|
||||
break;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: trap_subr.S,v 1.28 2002/08/06 06:21:58 chs Exp $ */
|
||||
/* $NetBSD: trap_subr.S,v 1.29 2002/10/10 22:37:52 matt Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
|
||||
|
@ -183,7 +183,8 @@ _C_LABEL(dsitrap):
|
|||
rfi /* return to trapped code */
|
||||
1:
|
||||
mflr 28 /* save LR */
|
||||
bla s_dsitrap
|
||||
mtsprg 1,1 /* save SP */
|
||||
bla disitrap
|
||||
_C_LABEL(dsisize) = .-_C_LABEL(dsitrap)
|
||||
|
||||
/*
|
||||
|
@ -230,76 +231,10 @@ _C_LABEL(dsitrap601):
|
|||
rfi /* return to trapped code */
|
||||
1:
|
||||
mflr 28 /* save LR */
|
||||
bla s_dsitrap
|
||||
mtsprg 1,1
|
||||
bla disitrap
|
||||
_C_LABEL(dsi601size) = .-_C_LABEL(dsitrap601)
|
||||
|
||||
/*
|
||||
* Similar to the above for ISI
|
||||
*/
|
||||
.globl _C_LABEL(isitrap),_C_LABEL(isisize)
|
||||
_C_LABEL(isitrap):
|
||||
stmw 28,disisave(0) /* free r28-r31 */
|
||||
mflr 28 /* save LR */
|
||||
mfcr 29 /* save CR */
|
||||
mfsrr1 31 /* test kernel mode */
|
||||
mtcr 31
|
||||
bc 12,17,1f /* branch if PSL_PR is set */
|
||||
mfsrr0 31 /* get fault address */
|
||||
rlwinm 31,31,7,25,28 /* get segment * 8 */
|
||||
|
||||
/* get batu */
|
||||
addis 31,31,_C_LABEL(battable)@ha
|
||||
lwz 30,_C_LABEL(battable)@l(31)
|
||||
mtcr 30
|
||||
bc 4,30,1f /* branch if supervisor valid is
|
||||
false */
|
||||
mtibatu 3,30
|
||||
|
||||
/* get batl */
|
||||
lwz 30,_C_LABEL(battable)+4@l(31)
|
||||
mtibatl 3,30
|
||||
|
||||
mtcr 29 /* restore CR */
|
||||
lmw 28,disisave(0) /* restore r28-r31 */
|
||||
rfi /* return to trapped code */
|
||||
1:
|
||||
bla s_isitrap
|
||||
_C_LABEL(isisize)= .-_C_LABEL(isitrap)
|
||||
|
||||
/*
|
||||
* Dedicated MPC601 version of the above.
|
||||
* Considers different BAT format.
|
||||
*/
|
||||
.globl _C_LABEL(isitrap601),_C_LABEL(isi601size)
|
||||
_C_LABEL(isitrap601):
|
||||
stmw 28,disisave(0) /* free r28-r31 */
|
||||
mflr 28 /* save LR */
|
||||
mfcr 29 /* save CR */
|
||||
mfsrr1 31 /* test kernel mode */
|
||||
mtcr 31
|
||||
bc 12,17,1f /* branch if PSL_PR is set */
|
||||
mfsrr0 31 /* get fault address */
|
||||
rlwinm 31,31,12,20,28 /* get "segment" battable offset */
|
||||
|
||||
/* get batl */
|
||||
addis 31,31,_C_LABEL(battable)@ha
|
||||
lwz 30,_C_LABEL(battable)+4@l(31)
|
||||
mtcr 30
|
||||
bc 4,25,1f /* branch if Valid is is false,
|
||||
presently assumes supervisor only */
|
||||
/* get batu */
|
||||
lwz 31,_C_LABEL(battable)@l(31)
|
||||
|
||||
mtibatu 3,31
|
||||
mtibatl 3,30
|
||||
|
||||
mtcr 29 /* restore CR */
|
||||
lmw 28,disisave(0) /* restore r28-r31 */
|
||||
rfi /* return to trapped code */
|
||||
1:
|
||||
bla s_isitrap
|
||||
_C_LABEL(isi601size)= .-_C_LABEL(isitrap601)
|
||||
|
||||
/*
|
||||
* This one for the external interrupt handler.
|
||||
*/
|
||||
|
@ -811,87 +746,6 @@ s_sctrap:
|
|||
FRAME_LEAVE(tempsave)
|
||||
rfi
|
||||
|
||||
/*
|
||||
* DSI second stage fault handler
|
||||
*/
|
||||
s_dsitrap:
|
||||
mfdsisr 31 /* test whether this may be a
|
||||
spill fault */
|
||||
mtcr 31
|
||||
mtsprg 1,1 /* save SP */
|
||||
bc 4,1,disitrap /* branch if table miss is false */
|
||||
lis 1,spillstk+SPILLSTK@ha
|
||||
addi 1,1,spillstk+SPILLSTK@l /* get spill stack */
|
||||
stwu 1,-SPFRAMELEN(1)
|
||||
stw 0,SPFRAME_R0(1) /* save non-volatile registers */
|
||||
stw 3,SPFRAME_R3(1)
|
||||
stw 4,SPFRAME_R4(1)
|
||||
stw 5,SPFRAME_R5(1)
|
||||
stw 6,SPFRAME_R6(1)
|
||||
stw 7,SPFRAME_R7(1)
|
||||
stw 8,SPFRAME_R8(1)
|
||||
stw 9,SPFRAME_R9(1)
|
||||
stw 10,SPFRAME_R10(1)
|
||||
stw 11,SPFRAME_R11(1)
|
||||
stw 12,SPFRAME_R12(1)
|
||||
mflr 30 /* save trap type */
|
||||
mfctr 31 /* & CTR */
|
||||
mfdar 3
|
||||
s_pte_spill:
|
||||
bl _C_LABEL(pmap_pte_spill) /* try a spill */
|
||||
or. 3,3,3
|
||||
mtctr 31 /* restore CTR */
|
||||
mtlr 30 /* and trap type */
|
||||
mfsprg 31,2 /* get saved XER */
|
||||
mtxer 31 /* restore XER */
|
||||
lwz 12,SPFRAME_R12(1) /* restore non-volatile registers */
|
||||
lwz 11,SPFRAME_R11(1)
|
||||
lwz 10,SPFRAME_R10(1)
|
||||
lwz 9,SPFRAME_R9(1)
|
||||
lwz 8,SPFRAME_R8(1)
|
||||
lwz 7,SPFRAME_R7(1)
|
||||
lwz 6,SPFRAME_R6(1)
|
||||
lwz 5,SPFRAME_R5(1)
|
||||
lwz 4,SPFRAME_R4(1)
|
||||
lwz 3,SPFRAME_R3(1)
|
||||
lwz 0,SPFRAME_R0(1)
|
||||
beq disitrap
|
||||
mfsprg 1,1 /* restore SP */
|
||||
mtcr 29 /* restore CR */
|
||||
mtlr 28 /* restore LR */
|
||||
lmw 28,disisave(0) /* restore r28-r31 */
|
||||
rfi /* return to trapped code */
|
||||
|
||||
/*
|
||||
* ISI second stage fault handler
|
||||
*/
|
||||
s_isitrap:
|
||||
mfsrr1 31 /* test whether this may be a
|
||||
spill fault */
|
||||
mtcr 31
|
||||
mtsprg 1,1 /* save SP */
|
||||
bc 4,1,disitrap /* branch if table miss is false */
|
||||
lis 1,spillstk+SPILLSTK@ha
|
||||
addi 1,1,spillstk+SPILLSTK@l /* get spill stack */
|
||||
stwu 1,-SPFRAMELEN(1)
|
||||
stw 0,SPFRAME_R0(1) /* save non-volatile registers */
|
||||
stw 3,SPFRAME_R3(1)
|
||||
stw 4,SPFRAME_R4(1)
|
||||
stw 5,SPFRAME_R5(1)
|
||||
stw 6,SPFRAME_R6(1)
|
||||
stw 7,SPFRAME_R7(1)
|
||||
stw 8,SPFRAME_R8(1)
|
||||
stw 9,SPFRAME_R9(1)
|
||||
stw 10,SPFRAME_R10(1)
|
||||
stw 11,SPFRAME_R11(1)
|
||||
stw 12,SPFRAME_R12(1)
|
||||
mfxer 30 /* save XER */
|
||||
mtsprg 2,30
|
||||
mflr 30 /* save trap type */
|
||||
mfctr 31 /* & ctr */
|
||||
mfsrr0 3
|
||||
b s_pte_spill /* above */
|
||||
|
||||
/*
|
||||
* External interrupt second level handler
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue