mmu_pagein() is only useful on sun4/sun4c. For now, keep a `mmu_pagein4m()'

within `#ifdef DEBUG' for monitoring.

Push user windows to stack in pmap_extract() if we need to switch contexts.
This commit is contained in:
pk 1996-05-27 01:12:34 +00:00
parent 344a892d31
commit 522c1d367a
2 changed files with 58 additions and 97 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.60 1996/05/19 00:32:15 pk Exp $ */
/* $NetBSD: pmap.c,v 1.61 1996/05/27 01:12:34 pk Exp $ */
/*
* Copyright (c) 1996
@ -475,7 +475,6 @@ void (*pmap_protect_p) __P((pmap_t,
void (*pmap_zero_page_p) __P((vm_offset_t));
void (*pmap_changeprot_p) __P((pmap_t, vm_offset_t,
vm_prot_t, int));
int (*mmu_pagein_p) __P((pmap_t, int, int));
/* local: */
void (*pmap_rmk_p) __P((struct pmap *, vm_offset_t, vm_offset_t,
int, int));
@ -1644,7 +1643,7 @@ region_free(pm, smeg)
* PTE not valid, or segment not loaded at all).
*/
int
mmu_pagein4_4c(pm, va, prot)
mmu_pagein(pm, va, prot)
register struct pmap *pm;
register int va, prot;
{
@ -1711,12 +1710,16 @@ printf("mmu_pagein: kernel wants map at va %x, vr %d, vs %d\n", va, vr, vs);
}
#endif /* defined SUN4 or SUN4C */
#if defined(SUN4M) /* sun4m version of mmu_pagein follows */
#if defined(DEBUG) && defined(SUN4M)
/*
* `Page in' (load or inspect) an MMU entry; called on page faults.
* Returns -1 if the desired page was marked valid (in which case the
* fault must be a bus error or something), or 0 (segment loaded but
* PTE not valid, or segment not loaded at all).
*
* The SRMMU does not have the concept of `loading PMEGs into the MMU'.
* For now, we use it to debug certain sporadic and strange memory
* fault traps.
*/
int
mmu_pagein4m(pm, va, prot)
@ -1747,12 +1750,12 @@ mmu_pagein4m(pm, va, prot)
/* if (getcontext() == 0)
bits |= PPROT_S;
*/
#if 0
if (bits && (pte & bits) == bits)
printf("pagein4m(%s[%d]): OOPS: prot=%x, va=%x, pte=%x, bits=%x\n",
curproc->p_comm, curproc->p_pid, prot, va, pte, bits);
#endif
return (bits && (pte & bits) == bits ? -1 : 0);
if (bits && (pte & bits) == bits) {
printf("pagein4m(%s[%d]): OOPS: prot=%x, va=%x, pte=%x, bits=%x\n",
curproc->p_comm, curproc->p_pid, prot, va, pte, bits);
return -1;
}
return 0;
}
#endif
@ -1915,13 +1918,13 @@ ctx_alloc(pm)
&pm->pm_regmap[VA_VREG(KERNBASE)],
NKREG * sizeof(struct regmap));
ctxbusyvector[pm->pm_ctxnum] = 1; /* mark context as busy */
ctxbusyvector[cnum] = 1; /* mark context as busy */
#ifdef DEBUG
if (pm->pm_reg_ptps_pa == 0)
panic("ctx_alloc: no region table in current pmap");
#endif
/*setcontext(0); * paranoia? can we modify curr. ctx? */
ctx_phys_tbl[pm->pm_ctxnum] =
ctx_phys_tbl[cnum] =
(pm->pm_reg_ptps_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
setcontext(cnum);
@ -2299,13 +2302,11 @@ pv_changepte4m(pv0, bis, bic)
register struct pvlist *pv0;
register int bis, bic;
{
register int *pte;
register struct pvlist *pv;
register struct pmap *pm;
register int va, vr, vs, flags;
register int va, vr, flags;
int ctx, s;
struct regmap *rp;
struct segmap *sp;
write_user_windows(); /* paranoid? */
@ -2319,18 +2320,14 @@ pv_changepte4m(pv0, bis, bic)
for (pv = pv0; pv != NULL; pv = pv->pv_next) {
register int tpte;
pm = pv->pv_pmap;
if (pm==NULL)
panic("pv_changepte 1");
if (pm == NULL)
panic("pv_changepte 1");
va = pv->pv_va;
vr = VA_VREG(va);
vs = VA_VSEG(va);
rp = &pm->pm_regmap[vr];
if (rp->rg_segmap == NULL)
panic("pv_changepte: no segments");
sp = &rp->rg_segmap[vs];
pte = sp->sg_pte;
if (CTX_USABLE(pm,rp)) {
extern vm_offset_t pager_sva, pager_eva;
@ -2689,7 +2686,6 @@ pmap_bootstrap4_4c(nctx, nregion, nsegment)
pmap_protect_p = pmap_protect4_4c;
pmap_zero_page_p = pmap_zero_page4_4c;
pmap_changeprot_p = pmap_changeprot4_4c;
mmu_pagein_p = mmu_pagein4_4c;
pmap_rmk_p = pmap_rmk4_4c;
pmap_rmu_p = pmap_rmu4_4c;
#endif /* defined SUN4M */
@ -3030,7 +3026,6 @@ pmap_bootstrap4m(void)
pmap_protect_p = pmap_protect4m;
pmap_zero_page_p = pmap_zero_page4m;
pmap_changeprot_p = pmap_changeprot4m;
mmu_pagein_p = mmu_pagein4m;
pmap_rmk_p = pmap_rmk4m;
pmap_rmu_p = pmap_rmu4m;
#endif /* defined Sun4/Sun4c */
@ -3337,13 +3332,13 @@ pmap_bootstrap4m(void)
getpte4m((vm_offset_t)q);
}
if (q >= (caddr_t) trapbase && q < etext)
((int *)kphyspagtbl)[VA_VPG(q)] =
(VA2PA(q) >> SRMMU_PPNPASHIFT) |
PPROT_N_RX | SRMMU_PG_C | SRMMU_TEPTE;
((int *)kphyspagtbl)[VA_VPG(q)] =
(VA2PA(q) >> SRMMU_PPNPASHIFT) |
PPROT_N_RX | SRMMU_PG_C | SRMMU_TEPTE;
else
((int *)kphyspagtbl)[VA_VPG(q)] =
(VA2PA(q) >> SRMMU_PPNPASHIFT) |
PPROT_N_RWX | SRMMU_PG_C | SRMMU_TEPTE;
((int *)kphyspagtbl)[VA_VPG(q)] =
(VA2PA(q) >> SRMMU_PPNPASHIFT) |
PPROT_N_RWX | SRMMU_PG_C | SRMMU_TEPTE;
}
/*
@ -3374,8 +3369,8 @@ pmap_bootstrap4m(void)
rmapp->rg_seg_ptps = (int *)kphyssegtbl;
if (rmapp->rg_segmap == NULL)
rmapp->rg_segmap = &kernel_segmap_store[(reg -
VA_VREG(KERNBASE)) * NSEGRG];
rmapp->rg_segmap = &kernel_segmap_store
[(reg - VA_VREG(KERNBASE)) * NSEGRG];
}
for (seg = 0; seg < NSEGRG; seg++) {
if (rmapp->rg_seg_ptps[seg] == NULL) {
@ -3422,22 +3417,23 @@ pmap_bootstrap4m(void)
i = lda(SRMMU_PCR, ASI_SRMMU);
switch(mmumod) {
/* case SUN4M_MMU_MS: */ /* These have the same model # as SS */
case SUN4M_MMU_SS:
/* case SUN4M_MMU_MS: */ /* These have the same model # as SS */
case SUN4M_MMU_SS:
if ((cpumod & 0xf0) == (SUN4M_SS & 0xf0))
sta(SRMMU_PCR, ASI_SRMMU, (i | SRMMU_PCR_TC));
sta(SRMMU_PCR, ASI_SRMMU, (i | SRMMU_PCR_TC));
else /* microsparc */
printf("(if this doesn't work, fix pmap_bootstrap4m in pmap.c)");
printf("(if this doesn't work, "
"fix pmap_bootstrap4m in pmap.c)");
break;
case SUN4M_MMU_HS:
case SUN4M_MMU_HS:
printf("(if this doesn't work, fix pmap_bootstrap4m in pmap.c)");
sta(SRMMU_PCR, ASI_SRMMU, (i | SRMMU_PCR_C) & ~SRMMU_PCR_CE);
/* above: CHECKFIX %%% below: add microsparc*/
break;
case SUN4M_MMU_MS1:
case SUN4M_MMU_MS1:
printf("(if this doesn't work, fix pmap_bootstrap4m in pmap.c)");
break;
default:
default:
panic("Unimplemented MMU architecture %d",mmumod);
}
@ -3497,39 +3493,6 @@ pmap_bootstrap4m(void)
/* All done! */
}
/*
* The following allows functions such as pmap_enk4m which are called before
* vm is available to allocate memory during bootstrap, but after it is
* possible to adjust avail_start, etc. It steals memory from a special pool
* of 2 pages (8k) statically reserved for this purpose. This should be more
* than enough to hold several page or segment tables, each of which is 256
* bytes. It will only return memory buffers aligned by size. We use no
* fancy scheme here; if the request doesn't fit between minipool_current
* and the end of the pool, it is rejected.
*/
#if 0 /* not needed anymore */
/* ARGSUSED */
static void *
pmap_bootstrap_malloc(size, type, flags)
register unsigned long size;
register int type, flags;
{
register void *retval;
if (MINIPOOL_SIZE - (roundup((u_int)minipool_current, size) -
(u_int)minipool_start) <= size) {
printf("WARNING: minipool overflow with size=%d, remaining="
"%d\n", size, minipool_current - minipool_start);
return (void *)NULL;
}
retval = (void *) roundup((u_int)minipool_current, size);
bzero((caddr_t) retval, size);
minipool_current = (caddr_t) ((u_int)retval + size);
return retval;
}
#endif /* 0 */
#endif /* defined sun4m */
void
@ -3705,11 +3668,12 @@ pmap_pinit(pm)
*/
urp = malloc(SRMMU_L1SIZE * sizeof(int), M_VMPMAP, M_WAITOK);
if (cant_cache_pagetables)
kvm_uncache(urp, ((SRMMU_L1SIZE*sizeof(int))+NBPG-1)/NBPG);
kvm_uncache(urp,
((SRMMU_L1SIZE*sizeof(int))+NBPG-1)/NBPG);
#ifdef DEBUG
if ((u_int) urp % (SRMMU_L1SIZE * sizeof(int)))
panic("pmap_pinit: malloc() not giving aligned memory");
panic("pmap_pinit: malloc() not giving aligned memory");
#endif
pm->pm_reg_ptps = urp;
pm->pm_reg_ptps_pa = VA2PA(urp);
@ -4065,7 +4029,7 @@ pmap_rmk4m(pm, va, endva, vr, vs)
while (va < endva) {
tpte = getpte4m(va);
if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
va += PAGE_SIZE;
va += NBPG;
continue;
}
if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
@ -5467,13 +5431,6 @@ pmap_enter4m(pm, va, pa, prot, wired)
if (pm == NULL)
return;
if (VA_INHOLE(va)) {
#ifdef DEBUG
printf("pmap_enter: pm %p, va %lx, pa %lx: in MMU hole\n",
pm, va, pa);
#endif
return;
}
#ifdef DEBUG
if (pmapdebug & PDB_ENTER)
printf("pmap_enter(%p, %lx, %lx, %x, %x)\n",
@ -5817,10 +5774,10 @@ pmap_extract4_4c(pm, va)
register int ctx = getcontext();
if (CTX_USABLE(pm,rp)) {
setcontext(pm->pm_ctxnum);
CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
tpte = getpte4(va);
} else {
setcontext(0);
CHANGE_CONTEXTS(ctx, 0);
#ifdef MMU_3L
if (mmu_3l)
setregmap(0, tregion);
@ -5866,9 +5823,9 @@ pmap_extract4m(pm, va)
return (0);
}
ctx = getcontext();
if (pm->pm_ctx) {
setcontext(pm->pm_ctxnum);
ctx = getcontext();
CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
tpte = getpte4m(va);
#ifdef DEBUG
if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
@ -5877,10 +5834,10 @@ pmap_extract4m(pm, va)
return (0);
}
#endif
setcontext(ctx);
} else
tpte = getptesw4m(pm, va);
setcontext(ctx);
return (ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT) | VA_OFF(va));
}
@ -6258,7 +6215,7 @@ pmap_copy_page4m(src, dst)
setpte4m((vm_offset_t) dva, dpte);
qcopy(sva, dva, NBPG); /* loads cache, so we must ... */
if (vactype != VAC_NONE)
cache_flush_page((int)sva);
cache_flush_page((int)sva);
setpte4m((vm_offset_t) sva, SRMMU_TEINVALID);
setpte4m((vm_offset_t) dva, SRMMU_TEINVALID);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.42 1996/05/16 15:57:26 abrown Exp $ */
/* $NetBSD: trap.c,v 1.43 1996/05/27 01:12:44 pk Exp $ */
/*
* Copyright (c) 1996
@ -557,12 +557,12 @@ rwindow_save(p)
if (i == 0)
return (0);
#ifdef DEBUG
if(rwindow_debug)
if (rwindow_debug)
printf("%s[%d]: rwindow: pcb->stack:", p->p_comm, p->p_pid);
#endif
do {
#ifdef DEBUG
if(rwindow_debug)
if (rwindow_debug)
printf(" %x", rw[1].rw_in[6]);
#endif
if (copyout((caddr_t)rw, (caddr_t)rw[1].rw_in[6],
@ -571,7 +571,7 @@ rwindow_save(p)
rw++;
} while (--i > 0);
#ifdef DEBUG
if(rwindow_debug)
if (rwindow_debug)
printf("\n");
#endif
pcb->pcb_nsaved = 0;
@ -947,18 +947,22 @@ static int lastdouble;
} else
p->p_md.md_tf = tf;
vm = p->p_vmspace;
#ifdef DEBUG
/*
* mmu_pagein returns -1 if the page is already valid, in which
* case we have a hard fault; it returns 1 if it loads a segment
* that got bumped out via LRU replacement.
* case we have a hard fault.. now why would *that* happen?
* But it happens sporadically, and vm_fault() seems to clear it..
*/
vm = p->p_vmspace;
rv = mmu_pagein(&vm->vm_pmap, va,
rv = mmu_pagein4m(&vm->vm_pmap, va,
sfsr & SFSR_AT_STORE ? VM_PROT_WRITE : VM_PROT_READ);
if (rv < 0)
goto fault;
printf(" sfsr=%x(FT=%x,AT=%x,LVL=%x), sfva=%x, pc=%x, psr=%x\n",
sfsr, (sfsr >> 2) & 7, (sfsr >> 5) & 7, (sfsr >> 8) & 3,
sfva, pc, psr);
if (rv > 0)
goto out;
panic("mmu_pagein4m returns %d", rv);
#endif
/* alas! must call the horrible vm code */
rv = vm_fault(&vm->vm_map, (vm_offset_t)va, ftype, FALSE);