* Allow pv_link4_4c() to fail with ENOMEM on pool allocation failures

(catching up with pv_link4m()).
* Fix return code checking for pv_link4m() calls.
* Add low water setting to pv pool.
This commit is contained in:
pk 2004-04-04 18:34:35 +00:00
parent 486f4170eb
commit 8066aa3aa5

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.277 2004/04/03 23:11:14 pk Exp $ */
/* $NetBSD: pmap.c,v 1.278 2004/04/04 18:34:35 pk Exp $ */
/*
* Copyright (c) 1996
@ -56,7 +56,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.277 2004/04/03 23:11:14 pk Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.278 2004/04/04 18:34:35 pk Exp $");
#include "opt_ddb.h"
#include "opt_kgdb.h"
@ -556,7 +556,7 @@ static void mmu_setup4m_L3(int, struct segmap *);
int, struct vm_page *, int);
/*static*/ void pv_changepte4_4c(struct vm_page *, int, int);
/*static*/ int pv_syncflags4_4c(struct vm_page *);
/*static*/ int pv_link4_4c(struct vm_page *, struct pmap *, vaddr_t, int);
/*static*/ int pv_link4_4c(struct vm_page *, struct pmap *, vaddr_t, u_int *);
/*static*/ void pv_unlink4_4c(struct vm_page *, struct pmap *, vaddr_t);
#endif
@ -2578,8 +2578,12 @@ pv_unlink4_4c(pg, pm, va)
} else {
struct pvlist *prev;
pmap_stats.ps_unlink_pvsearch++;
for (prev = pv0;; prev = npv, npv = npv->pv_next) {
pmap_stats.ps_unlink_pvsearch++;
if (npv == NULL) {
panic("pv_unlink: pm %p is missing on pg %p",
pm, pg);
}
if (npv->pv_pmap == pm && npv->pv_va == va)
break;
}
@ -2606,17 +2610,16 @@ pv_unlink4_4c(pg, pm, va)
* be cached.
*/
/*static*/ int
pv_link4_4c(pg, pm, va, nc)
pv_link4_4c(pg, pm, va, pteprotop)
struct vm_page *pg;
struct pmap *pm;
vaddr_t va;
int nc;
unsigned int *pteprotop;
{
struct pvlist *pv0, *npv;
int ret;
struct pvlist *pv0, *pv, *npv;
int nc = (*pteprotop & PG_NC) != 0 ? PV_NC : 0;
pv0 = VM_MDPAGE_PVHEAD(pg);
ret = nc ? PG_NC : 0;
if (pv0->pv_pmap == NULL) {
/* no pvlist entries yet */
@ -2624,55 +2627,66 @@ pv_link4_4c(pg, pm, va, nc)
pv0->pv_next = NULL;
pv0->pv_pmap = pm;
pv0->pv_va = va;
pv0->pv_flags |= nc ? PV_NC : 0;
return (ret);
pv0->pv_flags |= nc;
return (0);
}
/*
* Allocate the new PV entry now, and, if that fails, bail out
* before changing the cacheable state of the existing mappings.
*/
npv = pool_get(&pv_pool, PR_NOWAIT);
if (npv == NULL)
return (ENOMEM);
pmap_stats.ps_enter_secondpv++;
/*
* Before entering the new mapping, see if
* it will cause old mappings to become aliased
* and thus need to be `discached'.
*/
pmap_stats.ps_enter_secondpv++;
if (pv0->pv_flags & PV_ANC) {
/* already uncached, just stay that way */
ret = PG_NC;
} else {
for (npv = pv0; npv != NULL; npv = npv->pv_next) {
if (npv->pv_flags & PV_NC) {
ret = PG_NC;
*pteprotop |= PG_NC;
goto link_npv;
}
for (pv = pv0; pv != NULL; pv = pv->pv_next) {
if ((pv->pv_flags & PV_NC) != 0) {
*pteprotop |= PG_NC;
#ifdef DEBUG
/* Check currently illegal condition */
if (nc == 0)
printf("pv_link: proc %s, va=0x%lx: "
/* Check currently illegal condition */
if (nc == 0)
printf("pv_link: proc %s, va=0x%lx: "
"unexpected uncached mapping at 0x%lx\n",
curproc ? curproc->p_comm : "--",
va, npv->pv_va);
curproc ? curproc->p_comm : "--",
va, pv->pv_va);
#endif
}
if (BADALIAS(va, npv->pv_va)) {
}
if (BADALIAS(va, pv->pv_va)) {
#ifdef DEBUG
if (pmapdebug & PDB_CACHESTUFF)
printf(
"pv_link: badalias: proc %s, 0x%lx<=>0x%lx, pv %p\n",
curproc ? curproc->p_comm : "--",
va, npv->pv_va, pg);
if (pmapdebug & PDB_CACHESTUFF)
printf(
"pv_link: badalias: proc %s, 0x%lx<=>0x%lx, pg %p\n",
curproc ? curproc->p_comm : "--",
va, pv->pv_va, pg);
#endif
/* Mark list head `uncached due to aliases' */
pv0->pv_flags |= PV_ANC;
pv_changepte4_4c(pg, ret = PG_NC, 0);
break;
}
/* Mark list head `uncached due to aliases' */
pv0->pv_flags |= PV_ANC;
pv_changepte4_4c(pg, PG_NC, 0);
*pteprotop |= PG_NC;
break;
}
}
npv = pool_get(&pv_pool, PR_NOWAIT);
if (npv == NULL)
panic("pv_link: pv_pool exhausted");
link_npv:
npv->pv_next = pv0->pv_next;
npv->pv_pmap = pm;
npv->pv_va = va;
npv->pv_flags = nc ? PV_NC : 0;
npv->pv_flags = nc;
pv0->pv_next = npv;
return (ret);
return (0);
}
#endif /* SUN4 || SUN4C */
@ -2867,13 +2881,13 @@ pv_unlink4m(pg, pm, va)
} else {
struct pvlist *prev;
pmap_stats.ps_unlink_pvsearch++;
for (prev = pv0;; prev = npv, npv = npv->pv_next) {
if (npv == NULL) {
panic("pm %p is missing ", pm);
printf("pm %p is missing ", pm);
panic("pv_unlink: pm %p is missing on pg %p",
pm, pg);
goto out;
}
pmap_stats.ps_unlink_pvsearch++;
if (npv->pv_pmap == pm && npv->pv_va == va)
break;
}
@ -2935,8 +2949,6 @@ pv_link4m(pg, pm, va, pteprotop)
goto out;
}
pmap_stats.ps_enter_secondpv++;
/*
* Allocate the new PV entry now, and, if that fails, bail out
* before changing the cacheable state of the existing mappings.
@ -2947,6 +2959,8 @@ pv_link4m(pg, pm, va, pteprotop)
goto out;
}
pmap_stats.ps_enter_secondpv++;
/*
* See if the new mapping will cause old mappings to
* become aliased and thus need to be `discached'.
@ -2995,7 +3009,7 @@ link_npv:
out:
simple_unlock(&pg->mdpage.pv_slock);
return (0);
return (error);
}
#endif
@ -4371,6 +4385,11 @@ pmap_pmap_pool_ctor(void *arg, void *object, int flags)
}
#endif /* SUN4M || SUN4D */
/* XXX - a peculiar place to do this, but we can't do it in pmap_init
* and here at least it's off the beaten code track.
*/
{static int x; if (x == 0) pool_setlowat(&pv_pool, 512), x = 1; }
return (0);
}
@ -5698,6 +5717,7 @@ pmap_enk4_4c(pm, va, prot, flags, pg, pteproto)
int *ptep;
struct regmap *rp;
struct segmap *sp;
int error = 0;
vr = VA_VREG(va);
vs = VA_VSEG(va);
@ -5738,6 +5758,9 @@ pmap_enk4_4c(pm, va, prot, flags, pg, pteproto)
cache_flush_page(va, 0);
}
}
*ptep = 0;
if (inmmu)
setpte4(va, 0);
if (pte & PG_WIRED)
sp->sg_nwired--;
pm->pm_stats.resident_count--;
@ -5759,8 +5782,13 @@ pmap_enk4_4c(pm, va, prot, flags, pg, pteproto)
/*
* If the new mapping is for a managed PA, enter into pvlist.
*/
if (pg != NULL)
pteproto |= pv_link4_4c(pg, pm, va, pteproto & PG_NC);
if (pg != NULL && (error = pv_link4_4c(pg, pm, va, &pteproto)) != 0) {
if (--sp->sg_npte == 0)
pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs);
if ((flags & PMAP_CANFAIL) != 0)
goto out;
panic("pmap_enter: cannot allocate PV entry");
}
/* Update S/W page table */
*ptep = pteproto;
@ -5781,8 +5809,9 @@ pmap_enk4_4c(pm, va, prot, flags, pg, pteproto)
/* Update H/W page table */
setpte4(va, pteproto & ~PG_MBZ);
out:
splx(s);
return (0);
return (error);
}
/* enter new (or change existing) user mapping */
@ -5928,6 +5957,9 @@ pmap_enu4_4c(pm, va, prot, flags, pg, pteproto)
if (pte & PG_WIRED)
sp->sg_nwired--;
pm->pm_stats.resident_count--;
ptep[VA_VPG(va)] = 0;
if (sp->sg_pmeg != seginval)
setpte4(va, 0);
} else {
/* adding new entry */
sp->sg_npte++;
@ -5940,8 +5972,15 @@ pmap_enu4_4c(pm, va, prot, flags, pg, pteproto)
}
}
if (pg != NULL)
pteproto |= pv_link4_4c(pg, pm, va, pteproto & PG_NC);
if (pg != NULL && (error = pv_link4_4c(pg, pm, va, &pteproto)) != 0) {
if (--sp->sg_npte == 0)
/* Sigh, undo pgt allocations */
pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs);
if ((flags & PMAP_CANFAIL) != 0)
goto out;
panic("pmap_enter: cannot allocate PV entry");
}
/* Update S/W page table */
ptep += VA_VPG(va);
@ -6368,7 +6407,7 @@ printf("pmap_enk4m: changing existing va=>pa entry: va 0x%lx, pteproto 0x%x, "
/*
* If the new mapping is for a managed PA, enter into pvlist.
*/
if (pg != NULL && (error = pv_link4m(pg, pm, va, &pteproto) != 0)) {
if (pg != NULL && (error = pv_link4m(pg, pm, va, &pteproto)) != 0) {
sp->sg_npte--;
if ((flags & PMAP_CANFAIL) != 0)
goto out;
@ -6566,7 +6605,7 @@ pmap_enu4m(pm, va, prot, flags, pg, pteproto)
}
}
if (pg != NULL && (error = pv_link4m(pg, pm, va, &pteproto) != 0)) {
if (pg != NULL && (error = pv_link4m(pg, pm, va, &pteproto)) != 0) {
if (--sp->sg_npte == 0)
/* Sigh, undo pgt allocations */
pgt_lvl23_remove4m(pm, rp, sp, vr, vs);