Use a private pool of PV elements. This improves performance,

and avoids reentrance into pmap code via malloc().
This commit is contained in:
gwr 1997-11-03 16:58:05 +00:00
parent 47c2d7502f
commit 62a4f5b044
1 changed files with 241 additions and 179 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.83 1997/11/03 16:08:23 gwr Exp $ */ /* $NetBSD: pmap.c,v 1.84 1997/11/03 16:58:05 gwr Exp $ */
/*- /*-
* Copyright (c) 1996 The NetBSD Foundation, Inc. * Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -39,7 +39,7 @@
/* /*
* Some notes: * Some notes:
* *
* sun3s have contexts (8). In our mapping of the world, the kernel is mapped * sun3s have contexts (8). In this pmap design, the kernel is mapped
* into all contexts. Processes take up a known portion of the context, * into all contexts. Processes take up a known portion of the context,
* and compete for the available contexts on a LRU basis. * and compete for the available contexts on a LRU basis.
* *
@ -71,7 +71,7 @@
*/ */
/* /*
* Project: Use a "null" context for processes that have not * Project1: Use a "null" context for processes that have not
* touched any user-space address recently. This is efficient * touched any user-space address recently. This is efficient
* for things that stay in the kernel for a while, waking up * for things that stay in the kernel for a while, waking up
* to handle some I/O then going back to sleep (i.e. nfsd). * to handle some I/O then going back to sleep (i.e. nfsd).
@ -80,6 +80,10 @@
* *
* This also lets context switch be fast, because all we need * This also lets context switch be fast, because all we need
* to do there for the MMU is slam the context register. * to do there for the MMU is slam the context register.
*
* Project2: Use a private pool of PV elements. This pool can be
* fixed size because the total mapped virtual space supported by
* the MMU H/W (and this pmap) is fixed for all time.
*/ */
#include <sys/param.h> #include <sys/param.h>
@ -283,15 +287,22 @@ struct pmap_stats {
*/ */
int pv_initialized = 0; int pv_initialized = 0;
/* One of these for each mapped virtual page. */
struct pv_entry { struct pv_entry {
struct pv_entry *pv_next; struct pv_entry *pv_next;
pmap_t pv_pmap; pmap_t pv_pmap;
vm_offset_t pv_va; vm_offset_t pv_va;
unsigned int pv_flags;
}; };
typedef struct pv_entry *pv_entry_t; typedef struct pv_entry *pv_entry_t;
pv_entry_t pv_head_table = NULL; /* Table of PV list heads (per physical page). */
static struct pv_entry **pv_head_tbl;
/* Free list of PV entries. */
static struct pv_entry *pv_free_list;
/* Table of flags (per physical page). */
static u_char *pv_flags_tbl;
/* These are as in the MMU but shifted by PV_SHIFT. */ /* These are as in the MMU but shifted by PV_SHIFT. */
#define PV_SHIFT 24 #define PV_SHIFT 24
@ -381,11 +392,11 @@ static void pmeg_free __P((pmeg_t pmegp));
static pmeg_t pmeg_cache __P((pmap_t pmap, vm_offset_t va)); static pmeg_t pmeg_cache __P((pmap_t pmap, vm_offset_t va));
static void pmeg_set_wiring __P((pmeg_t pmegp, vm_offset_t va, int)); static void pmeg_set_wiring __P((pmeg_t pmegp, vm_offset_t va, int));
static int pv_link __P((pmap_t pmap, vm_offset_t, vm_offset_t, u_int)); static int pv_link __P((pmap_t pmap, vm_offset_t, vm_offset_t, int));
static void pv_unlink __P((pmap_t, vm_offset_t, vm_offset_t)); static void pv_unlink __P((pmap_t, vm_offset_t, vm_offset_t));
static void pv_remove_all __P((vm_offset_t pa)); static void pv_remove_all __P((vm_offset_t pa));
static void pv_changepte __P((pv_entry_t, int, int)); static void pv_changepte __P((vm_offset_t pa, int, int));
static void pv_syncflags __P((pv_entry_t head)); static u_int pv_syncflags __P((pv_entry_t));
static void pv_init __P((void)); static void pv_init __P((void));
static void pmeg_clean __P((pmeg_t pmegp)); static void pmeg_clean __P((pmeg_t pmegp));
@ -444,32 +455,41 @@ current_pmap __P((void))
return (pmap); return (pmap);
} }
#ifdef DIAGNOSTIC static inline struct pv_entry **
static struct pv_entry * pa_to_pvhead(vm_offset_t pa)
pa_to_pvp(vm_offset_t pa)
{ {
struct pv_entry *pvp; int idx;
if (pa < avail_start || pa >= avail_end) {
panic("pmap:pa_to_pvp: bad pa=0x%lx", pa);
}
pvp = &pv_head_table[PA_PGNUM(pa)];
return pvp;
}
#else
#define pa_to_pvp(pa) &pv_head_table[PA_PGNUM(pa)]
#endif
idx = PA_PGNUM(pa);
#ifdef DIAGNOSTIC #ifdef DIAGNOSTIC
static pmeg_t if (idx > physmem)
panic("pmap:pa_to_pvhead: bad pa=0x%lx", pa);
#endif
return (&pv_head_tbl[idx]);
}
static inline u_char *
pa_to_pvflags(vm_offset_t pa)
{
int idx;
idx = PA_PGNUM(pa);
#ifdef DIAGNOSTIC
if (idx > physmem)
panic("pmap:pa_to_pvflags: idx=0x%x", idx);
#endif
return (&pv_flags_tbl[idx]);
}
static inline pmeg_t
pmeg_p(int sme) pmeg_p(int sme)
{ {
#ifdef DIAGNOSTIC
if (sme < 0 || sme >= SEGINV) if (sme < 0 || sme >= SEGINV)
panic("pmeg_p: bad sme"); panic("pmeg_p: bad sme");
#endif
return &pmeg_array[sme]; return &pmeg_array[sme];
} }
#else
#define pmeg_p(x) &pmeg_array[x]
#endif
#define is_pmeg_wired(pmegp) (pmegp->pmeg_wired != 0) #define is_pmeg_wired(pmegp) (pmegp->pmeg_wired != 0)
@ -496,7 +516,7 @@ pmeg_set_wiring(pmegp, va, flag)
static void static void
save_modref_bits(int pte) save_modref_bits(int pte)
{ {
pv_entry_t pvhead; u_char *pv_flags;
if (pv_initialized == 0) if (pv_initialized == 0)
return; return;
@ -507,8 +527,8 @@ save_modref_bits(int pte)
CHECK_SPL(); CHECK_SPL();
pvhead = pa_to_pvp(PG_PA(pte)); pv_flags = pa_to_pvflags(PG_PA(pte));
pvhead->pv_flags |= ((pte & PG_MODREF) >> PV_SHIFT); *pv_flags |= ((pte & PG_MODREF) >> PV_SHIFT);
} }
@ -1056,22 +1076,58 @@ pmeg_verify_empty(va)
/**************************************************************** /****************************************************************
* Physical-to-virutal lookup support * Physical-to-virutal lookup support
*
* Need memory for the pv_alloc/pv_free list heads
* and elements. We know how many to allocate since
* there is one list head for each physical page, and
* at most one element for each PMEG slot.
*/ */
static void static void
pv_init() pv_init()
{ {
int sz; int npp, nvp, sz;
pv_entry_t pv;
char *p;
sz = PA_PGNUM(avail_end); /* total allocation size */
sz *= sizeof(struct pv_entry); sz = 0;
pv_head_table = (pv_entry_t) kmem_alloc(kernel_map, sz); /*
if (!pv_head_table) { * Data for each physical page.
mon_printf("pmap: kmem_alloc() of pv table failed\n"); * Each "mod/ref" flag is a char.
sunmon_abort(); * Each PV head is a pointer.
} * Note physmem is in pages.
bzero((caddr_t) pv_head_table, sz); */
npp = ALIGN(physmem);
sz += (npp * sizeof(*pv_flags_tbl));
sz += (npp * sizeof(*pv_head_tbl));
/*
* Data for each virtual page (all PMEGs).
* One pv_entry for each page frame.
*/
nvp = NPMEG * NPAGSEG;
sz += (nvp * sizeof(*pv_free_list));
/* Now allocate the whole thing. */
sz = m68k_round_page(sz);
p = (char*) kmem_alloc(kernel_map, sz);
if (p == NULL)
panic("pmap:pv_init: alloc failed");
bzero(p, sz);
/* Now divide up the space. */
pv_flags_tbl = (void *) p;
p += (npp * sizeof(*pv_flags_tbl));
pv_head_tbl = (void*) p;
p += (npp * sizeof(*pv_head_tbl));
pv_free_list = (void *) p;
p += (nvp * sizeof(*pv_free_list));
/* Finally, make pv_free_list into a list. */
for (pv = pv_free_list; (char*)pv < p; pv++)
pv->pv_next = &pv[1];
pv[-1].pv_next = 0;
pv_initialized++; pv_initialized++;
} }
@ -1081,12 +1137,13 @@ pv_init()
* Also does syncflags work while we are there... * Also does syncflags work while we are there...
*/ */
static void static void
pv_changepte(head, set_bits, clear_bits) pv_changepte(pa, set_bits, clear_bits)
pv_entry_t head; vm_offset_t pa;
int set_bits; int set_bits;
int clear_bits; int clear_bits;
{ {
pv_entry_t pv; pv_entry_t *head, pv;
u_char *pv_flags;
pmap_t pmap; pmap_t pmap;
vm_offset_t va; vm_offset_t va;
int pte, sme; int pte, sme;
@ -1101,8 +1158,11 @@ pv_changepte(head, set_bits, clear_bits)
if ((set_bits == 0) && (clear_bits == 0)) if ((set_bits == 0) && (clear_bits == 0))
return; return;
pv_flags = pa_to_pvflags(pa);
head = pa_to_pvhead(pa);
/* If no mappings, no work to do. */ /* If no mappings, no work to do. */
if (head->pv_pmap == NULL) if (*head == NULL)
return; return;
#ifdef DIAGNOSTIC #ifdef DIAGNOSTIC
@ -1113,18 +1173,16 @@ pv_changepte(head, set_bits, clear_bits)
flags = 0; flags = 0;
saved_ctx = get_context(); saved_ctx = get_context();
for (pv = head; pv != NULL; pv = pv->pv_next) { for (pv = *head; pv != NULL; pv = pv->pv_next) {
pmap = pv->pv_pmap; pmap = pv->pv_pmap;
va = pv->pv_va; va = pv->pv_va;
sme = SEGINV; /* kill warning */ sme = SEGINV; /* kill warning */
#ifdef DIAGNOSTIC #ifdef DIAGNOSTIC
/*
* Only the head may have a null pmap, and
* we checked for that above.
*/
if (pmap == NULL) if (pmap == NULL)
panic("pv_changepte: null pmap"); panic("pv_changepte: null pmap");
if (pmap->pm_segmap == NULL)
panic("pv_changepte: null segmap");
#endif #endif
/* Is the PTE currently accessable in some context? */ /* Is the PTE currently accessable in some context? */
@ -1153,8 +1211,6 @@ pv_changepte(head, set_bits, clear_bits)
/* /*
* The PTE is not in any context. * The PTE is not in any context.
*/ */
if (pmap->pm_segmap == NULL)
panic("pv_changepte: null segmap");
sme = pmap->pm_segmap[VA_SEGNUM(va)]; sme = pmap->pm_segmap[VA_SEGNUM(va)];
if (sme == SEGINV) if (sme == SEGINV)
panic("pv_changepte: SEGINV"); panic("pv_changepte: SEGINV");
@ -1188,18 +1244,17 @@ pv_changepte(head, set_bits, clear_bits)
} }
set_context(saved_ctx); set_context(saved_ctx);
head->pv_flags |= (flags >> PV_SHIFT); *pv_flags |= (flags >> PV_SHIFT);
} }
/* /*
* Sync ref and mod bits in pvlist * Return ref and mod bits from pvlist,
* (turns off same in hardware PTEs). * and turns off same in hardware PTEs.
*/ */
static void static u_int
pv_syncflags(head) pv_syncflags(pv)
pv_entry_t head;
{
pv_entry_t pv; pv_entry_t pv;
{
pmap_t pmap; pmap_t pmap;
vm_offset_t va; vm_offset_t va;
int pte, sme; int pte, sme;
@ -1210,15 +1265,15 @@ pv_syncflags(head)
CHECK_SPL(); CHECK_SPL();
if (!pv_initialized) if (!pv_initialized)
return; return(0);
/* If no mappings, no work to do. */ /* If no mappings, no work to do. */
if (head->pv_pmap == NULL) if (pv == NULL)
return; return (0);
flags = 0; flags = 0;
saved_ctx = get_context(); saved_ctx = get_context();
for (pv = head; pv != NULL; pv = pv->pv_next) { for ( ; pv != NULL; pv = pv->pv_next) {
pmap = pv->pv_pmap; pmap = pv->pv_pmap;
va = pv->pv_va; va = pv->pv_va;
sme = SEGINV; /* kill warning */ sme = SEGINV; /* kill warning */
@ -1230,6 +1285,8 @@ pv_syncflags(head)
*/ */
if (pmap == NULL) if (pmap == NULL)
panic("pv_syncflags: null pmap"); panic("pv_syncflags: null pmap");
if (pmap->pm_segmap == NULL)
panic("pv_syncflags: null segmap");
#endif #endif
/* Is the PTE currently accessable in some context? */ /* Is the PTE currently accessable in some context? */
@ -1260,8 +1317,6 @@ pv_syncflags(head)
* XXX - Consider syncing MODREF bits * XXX - Consider syncing MODREF bits
* when the PMEG looses its context? * when the PMEG looses its context?
*/ */
if (pmap->pm_segmap == NULL)
panic("pv_syncflags: null segmap");
sme = pmap->pm_segmap[VA_SEGNUM(va)]; sme = pmap->pm_segmap[VA_SEGNUM(va)];
if (sme == SEGINV) if (sme == SEGINV)
panic("pv_syncflags: SEGINV"); panic("pv_syncflags: SEGINV");
@ -1291,7 +1346,7 @@ pv_syncflags(head)
} }
set_context(saved_ctx); set_context(saved_ctx);
head->pv_flags |= (flags >> PV_SHIFT); return (flags >> PV_SHIFT);
} }
/* Remove all mappings for the physical page. */ /* Remove all mappings for the physical page. */
@ -1299,7 +1354,7 @@ static void
pv_remove_all(pa) pv_remove_all(pa)
vm_offset_t pa; vm_offset_t pa;
{ {
pv_entry_t pv; pv_entry_t *head, pv;
pmap_t pmap; pmap_t pmap;
vm_offset_t va; vm_offset_t va;
@ -1313,16 +1368,14 @@ pv_remove_all(pa)
if (!pv_initialized) if (!pv_initialized)
return; return;
/* The PV head never moves. */ head = pa_to_pvhead(pa);
pv = pa_to_pvp(pa); while ((pv = *head) != NULL) {
while (pv->pv_pmap != NULL) {
pmap = pv->pv_pmap; pmap = pv->pv_pmap;
va = pv->pv_va; va = pv->pv_va;
pmap_remove1(pmap, va, va + NBPG); pmap_remove1(pmap, va, va + NBPG);
#ifdef PMAP_DEBUG #ifdef PMAP_DEBUG
/* Make sure it went away. */ /* Make sure it went away. */
if ((pv->pv_pmap == pmap) && (pv->pv_va == va)) if (pv == *head) {
{
db_printf("pv_remove_all: head unchanged for pa=0x%lx\n", pa); db_printf("pv_remove_all: head unchanged for pa=0x%lx\n", pa);
Debugger(); Debugger();
} }
@ -1341,9 +1394,10 @@ static int
pv_link(pmap, pa, va, flags) pv_link(pmap, pa, va, flags)
pmap_t pmap; pmap_t pmap;
vm_offset_t pa, va; vm_offset_t pa, va;
u_int flags; int flags;
{ {
pv_entry_t head, pv; pv_entry_t *head, pv;
u_char *pv_flags;
if (!pv_initialized) if (!pv_initialized)
return 0; return 0;
@ -1360,49 +1414,46 @@ pv_link(pmap, pa, va, flags)
/* Only the non-cached bit is of interest here. */ /* Only the non-cached bit is of interest here. */
flags = flags & PV_NC; flags = flags & PV_NC;
head = pa_to_pvp(pa); pv_flags = pa_to_pvflags(pa);
if (head->pv_pmap == NULL) { head = pa_to_pvhead(pa);
/* not currently mapped anywhere */
pmap_stats.ps_enter_firstpv++;
head->pv_va = va;
head->pv_pmap = pmap,
head->pv_next = NULL;
head->pv_flags = flags;
return (flags);
}
#ifdef PMAP_DEBUG #ifdef DIAGNOSTIC
/* XXX - See if this mapping is already in the list? */ /* See if this mapping is already in the list. */
for (pv = head; pv != NULL; pv = pv->pv_next) { for (pv = *head; pv != NULL; pv = pv->pv_next) {
if ((pv->pv_pmap == pmap) && (pv->pv_va == va)) if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
panic("pv_link: duplicate entry for PA=0x%lx", pa); panic("pv_link: duplicate entry for PA=0x%lx", pa);
} }
#endif #endif
/* /*
* Before entering the new mapping, see if it will cause * Does this new mapping cause VAC alias problems?
* old mappings to become aliased (need cache inhibit).
*/ */
pmap_stats.ps_enter_secondpv++; *pv_flags |= flags;
if ((*pv_flags & PV_NC) == 0) {
head->pv_flags |= flags; for (pv = *head; pv != NULL; pv = pv->pv_next) {
if ((head->pv_flags & PV_NC) == 0) {
for (pv = head; pv != NULL; pv = pv->pv_next) {
if (BADALIAS(va, pv->pv_va)) { if (BADALIAS(va, pv->pv_va)) {
head->pv_flags |= PV_NC; *pv_flags |= PV_NC;
pv_changepte(head, PG_NC, 0); pv_changepte(pa, PG_NC, 0);
pmap_stats.ps_vac_uncached++; pmap_stats.ps_vac_uncached++;
break; break;
} }
} }
} }
pv = (pv_entry_t) malloc(sizeof(*pv), M_VMPVENT, M_WAITOK);
pv->pv_next = head->pv_next; /* Allocate a PV element (pv_alloc()). */
pv = pv_free_list;
if (pv == NULL)
panic("pv_link: pv_alloc");
pv_free_list = pv->pv_next;
pv->pv_next = 0;
/* Insert new entry at the head. */
pv->pv_pmap = pmap; pv->pv_pmap = pmap;
pv->pv_va = va; pv->pv_va = va;
head->pv_next = pv; pv->pv_next = *head;
*head = pv;
return (head->pv_flags & PV_NC); return (*pv_flags & PV_NC);
} }
/* /*
@ -1420,7 +1471,8 @@ pv_unlink(pmap, pa, va)
pmap_t pmap; pmap_t pmap;
vm_offset_t pa, va; vm_offset_t pa, va;
{ {
pv_entry_t head, pv; pv_entry_t *head, *ppv, pv;
u_char *pv_flags;
if (!pv_initialized) if (!pv_initialized)
return; return;
@ -1434,61 +1486,59 @@ pv_unlink(pmap, pa, va)
} }
#endif #endif
head = pa_to_pvp(pa); pv_flags = pa_to_pvflags(pa);
head = pa_to_pvhead(pa);
#ifdef DIAGNOSTIC
if (head->pv_pmap == NULL)
panic("pv_unlink: empty list");
#endif
/* /*
* First entry is special (sigh). * Find the entry.
*/ */
pv = head->pv_next; ppv = head;
if (head->pv_pmap == pmap && head->pv_va == va) { pv = *ppv;
pmap_stats.ps_unlink_pvfirst++; while (pv) {
if (pv != NULL) { if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
/* Copy next entry into (fixed) head. */ goto found;
head->pv_next = pv->pv_next; ppv = &pv->pv_next;
head->pv_pmap = pv->pv_pmap; pv = pv->pv_next;
head->pv_va = pv->pv_va; }
free((caddr_t)pv, M_VMPVENT); #ifdef PMAP_DEBUG
} else { db_printf("pv_unlink: not found (pa=0x%lx,va=0x%lx)\n", pa, va);
/* No next entry, list is now empty. */ Debugger();
head->pv_pmap = NULL;
}
} else {
register pv_entry_t prev;
for (prev = head;; prev = pv, pv = pv->pv_next) {
pmap_stats.ps_unlink_pvsearch++;
if (pv == NULL) {
#ifdef PMAP_DEBUG
db_printf("pv_unlink: not on list (pa=0x%lx,va=0x%lx)\n",
pa, va);
Debugger();
#endif #endif
return; return;
}
if (pv->pv_pmap == pmap && pv->pv_va == va)
break;
}
prev->pv_next = pv->pv_next;
free((caddr_t)pv, M_VMPVENT);
}
if (head->pv_flags & PV_NC) { found:
/* /* Unlink this entry from the list and clear it. */
* Not cached: check to see if we can fix that now. *ppv = pv->pv_next;
*/ pv->pv_pmap = NULL;
va = head->pv_va; pv->pv_va = 0;
for (pv = head->pv_next; pv != NULL; pv = pv->pv_next)
if (BADALIAS(va, pv->pv_va)) /* Insert it on the head of the free list. (pv_free()) */
return; pv->pv_next = pv_free_list;
head->pv_flags &= ~PV_NC; pv_free_list = pv;
pv_changepte(head, 0, PG_NC); pv = NULL;
pmap_stats.ps_vac_recached++;
/* Do any non-cached mappings remain? */
if ((*pv_flags & PV_NC) == 0)
return;
if ((pv = *head) == NULL)
return;
/*
* Have non-cached mappings. See if we can fix that now.
*/
va = pv->pv_va;
for (pv = pv->pv_next; pv != NULL; pv = pv->pv_next) {
/* If there is a DVMA mapping, leave it NC. */
if (va >= DVMA_SPACE_START)
return;
/* If there are VAC alias problems, leave NC. */
if (BADALIAS(va, pv->pv_va))
return;
} }
/* OK, there are no "problem" mappings. */
*pv_flags &= ~PV_NC;
pv_changepte(pa, 0, PG_NC);
pmap_stats.ps_vac_recached++;
} }
@ -2066,7 +2116,7 @@ pmap_page_protect(pa, prot)
break; break;
case VM_PROT_READ: case VM_PROT_READ:
case VM_PROT_READ|VM_PROT_EXECUTE: case VM_PROT_READ|VM_PROT_EXECUTE:
pv_changepte(pa_to_pvp(pa), 0, PG_WRITE); pv_changepte(pa, 0, PG_WRITE);
break; break;
default: default:
/* remove mapping for all pmaps that have it */ /* remove mapping for all pmaps that have it */
@ -2767,7 +2817,6 @@ pmap_enter(pmap, va, pa, prot, wired)
*/ */
s = splpmap(); s = splpmap();
if (pmap == kernel_pmap) { if (pmap == kernel_pmap) {
/* This can be called recursively through malloc. */
pte_proto |= PG_SYSTEM; pte_proto |= PG_SYSTEM;
pmap_enter_kernel(va, pa, prot, wired, pte_proto); pmap_enter_kernel(va, pa, prot, wired, pte_proto);
} else { } else {
@ -2913,18 +2962,19 @@ void
pmap_clear_modify(pa) pmap_clear_modify(pa)
register vm_offset_t pa; register vm_offset_t pa;
{ {
register pv_entry_t pvhead; register pv_entry_t *head;
u_char *pv_flags;
int s; int s;
if (!pv_initialized) if (!pv_initialized)
return; return;
if (!managed(pa))
return; pv_flags = pa_to_pvflags(pa);
head = pa_to_pvhead(pa);
s = splpmap(); s = splpmap();
pvhead = pa_to_pvp(pa); *pv_flags |= pv_syncflags(*head);
pv_syncflags(pvhead); *pv_flags &= ~PV_MOD;
pvhead->pv_flags &= ~PV_MOD;
splx(s); splx(s);
} }
@ -2935,19 +2985,20 @@ int
pmap_is_modified(pa) pmap_is_modified(pa)
vm_offset_t pa; vm_offset_t pa;
{ {
pv_entry_t pvhead; pv_entry_t *head;
u_char *pv_flags;
int rv, s; int rv, s;
if (!pv_initialized) if (!pv_initialized)
return (0); return (0);
if (!managed(pa))
return (0); pv_flags = pa_to_pvflags(pa);
head = pa_to_pvhead(pa);
s = splpmap(); s = splpmap();
pvhead = pa_to_pvp(pa); if ((*pv_flags & PV_MOD) == 0)
if ((pvhead->pv_flags & PV_MOD) == 0) *pv_flags |= pv_syncflags(*head);
pv_syncflags(pvhead); rv = (*pv_flags & PV_MOD);
rv = pvhead->pv_flags & PV_MOD;
splx(s); splx(s);
return (rv); return (rv);
@ -2961,18 +3012,19 @@ void
pmap_clear_reference(pa) pmap_clear_reference(pa)
register vm_offset_t pa; register vm_offset_t pa;
{ {
register pv_entry_t pvhead; register pv_entry_t *head;
u_char *pv_flags;
int s; int s;
if (!pv_initialized) if (!pv_initialized)
return; return;
if (!managed(pa))
return; pv_flags = pa_to_pvflags(pa);
head = pa_to_pvhead(pa);
s = splpmap(); s = splpmap();
pvhead = pa_to_pvp(pa); *pv_flags |= pv_syncflags(*head);
pv_syncflags(pvhead); *pv_flags &= ~PV_REF;
pvhead->pv_flags &= ~PV_REF;
splx(s); splx(s);
} }
@ -2984,19 +3036,20 @@ int
pmap_is_referenced(pa) pmap_is_referenced(pa)
vm_offset_t pa; vm_offset_t pa;
{ {
register pv_entry_t pvhead; register pv_entry_t *head;
u_char *pv_flags;
int rv, s; int rv, s;
if (!pv_initialized) if (!pv_initialized)
return (0); return (0);
if (!managed(pa))
return (0); pv_flags = pa_to_pvflags(pa);
head = pa_to_pvhead(pa);
s = splpmap(); s = splpmap();
pvhead = pa_to_pvp(pa); if ((*pv_flags & PV_REF) == 0)
if ((pvhead->pv_flags & PV_REF) == 0) *pv_flags |= pv_syncflags(*head);
pv_syncflags(pvhead); rv = (*pv_flags & PV_REF);
rv = pvhead->pv_flags & PV_REF;
splx(s); splx(s);
return (rv); return (rv);
@ -3754,14 +3807,23 @@ pv_print(pa)
vm_offset_t pa; vm_offset_t pa;
{ {
pv_entry_t pv; pv_entry_t pv;
int idx;
if (!pv_initialized) if (!pv_initialized) {
db_printf("no pv_flags_tbl\n");
return; return;
}
idx = PA_PGNUM(pa);
if (idx > physmem) {
db_printf("bad address\n");
return;
}
db_printf("pa=0x%lx, flags=0x%x\n",
pa, pv_flags_tbl[idx]);
pv = pa_to_pvp(pa); pv = pv_head_tbl[idx];
db_printf("pv_list for pa 0x%lx: flags=0x%x\n", pa, pv->pv_flags);
while (pv) { while (pv) {
db_printf("pv_entry %p pmap %p va 0x%lx next %p\n", db_printf(" pv_entry %p pmap %p va 0x%lx next %p\n",
pv, pv->pv_pmap, pv->pv_va, pv->pv_next); pv, pv->pv_pmap, pv->pv_va, pv->pv_next);
pv = pv->pv_next; pv = pv->pv_next;
} }