Change pv_entries to use SLIST.

For VIPT caches, keep track of when pages are dirty so that their content
can be flushed back to main memory.  This is done when the page is
read-only mapped by more than 1 color.  Pages become when either their
modified bit gets set or an unmanaged writeable page is mapped.  When
a page in unmapped or changed to read-only, run pmap_vac_me_harder in
case the page can be mapped read-only.

Thanks are given to Imre Deak for giving me the idea to assert for PVF_DIRTY.
This commit is contained in:
matt 2008-08-06 19:13:45 +00:00
parent 04ddff9cc7
commit baab10840a
2 changed files with 229 additions and 110 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.182 2008/07/16 00:19:57 matt Exp $ */
/* $NetBSD: pmap.c,v 1.183 2008/08/06 19:13:45 matt Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@ -211,7 +211,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.182 2008/07/16 00:19:57 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.183 2008/08/06 19:13:45 matt Exp $");
#ifdef PMAP_DEBUG
@ -303,7 +303,7 @@ static struct pool_cache pmap_l2ptp_cache;
static vaddr_t pmap_kernel_l2ptp_kva;
static paddr_t pmap_kernel_l2ptp_phys;
#ifdef PMAPCOUNT
#ifdef PMAPCOUNTERS
#define PMAP_EVCNT_INITIALIZER(name) \
EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name)
@ -439,7 +439,7 @@ EVCNT_ATTACH_STATIC(pmap_ev_activations);
*/
static pt_entry_t *csrc_pte, *cdst_pte;
static vaddr_t csrcp, cdstp;
vaddr_t memhook;
vaddr_t memhook; /* used by mem.c */
extern void *msgbufaddr;
/*
@ -593,7 +593,7 @@ int pmap_needs_pte_sync;
* Real definition of pv_entry.
*/
struct pv_entry {
struct pv_entry *pv_next; /* next pv_entry */
SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */
pmap_t pv_pmap; /* pmap where mapping lies */
vaddr_t pv_va; /* virtual address for mapping */
u_int pv_flags; /* flags */
@ -652,7 +652,7 @@ static int pmap_clean_page(struct pv_entry *, bool);
#endif
#ifdef PMAP_CACHE_VIPT
static void pmap_syncicache_page(struct vm_page *);
static void pmap_flush_page(struct vm_page *);
static void pmap_flush_page(struct vm_page *, bool);
#endif
static void pmap_page_remove(struct vm_page *);
@ -851,19 +851,24 @@ pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
pve->pv_flags = flags;
simple_lock(&pg->mdpage.pvh_slock); /* lock vm_page */
pvp = &pg->mdpage.pvh_list;
pvp = &SLIST_FIRST(&pg->mdpage.pvh_list);
#ifdef PMAP_CACHE_VIPT
/*
* Insert unmapped entries at the head of the pv list.
*/
if (__predict_true((flags & PVF_KENTRY) == 0)) {
while (*pvp != NULL && (*pvp)->pv_flags & PVF_KENTRY)
pvp = &(*pvp)->pv_next;
pvp = &SLIST_NEXT(*pvp, pv_link);
}
#endif
pve->pv_next = *pvp; /* add to ... */
SLIST_NEXT(pve, pv_link) = *pvp; /* add to ... */
*pvp = pve; /* ... locked list */
#ifdef PMAP_CACHE_VIPT
pg->mdpage.pvh_attrs |= flags & (PVF_REF | PVF_MOD | PVF_KENTRY);
if ((flags & PVF_MOD) && (pg->mdpage.pvh_attrs & PVF_NC) == 0)
pg->mdpage.pvh_attrs |= PVF_DIRTY;
KASSERT((pg->mdpage.pvh_attrs & PVF_MOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
#endif
if (pm == pmap_kernel()) {
PMAPCOUNT(kernel_mappings);
if (flags & PVF_WRITE)
@ -908,7 +913,7 @@ pmap_find_pv(struct vm_page *pg, pmap_t pm, vaddr_t va)
{
struct pv_entry *pv;
for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
if (pm == pv->pv_pmap && va == pv->pv_va)
break;
}
@ -934,7 +939,7 @@ pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t va, int skip_wired)
NPDEBUG(PDB_PVDUMP,
printf("pmap_remove_pv: pm %p, pg %p, va 0x%08lx\n", pm, pg, va));
prevptr = &pg->mdpage.pvh_list; /* previous pv_entry pointer */
prevptr = &SLIST_FIRST(&pg->mdpage.pvh_list); /* prev pv_entry ptr */
pve = *prevptr;
while (pve) {
@ -952,13 +957,13 @@ pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t va, int skip_wired)
* a KENTRY, if the next one isn't also a KENTER,
* clear KENTRY from the page attributes.
*/
if (pg->mdpage.pvh_list == pve
if (SLIST_FIRST(&pg->mdpage.pvh_list) == pve
&& (pve->pv_flags & PVF_KENTRY)
&& (pve->pv_next == NULL
|| (pve->pv_next->pv_flags & PVF_KENTRY) == 0))
&& (SLIST_NEXT(pve, pv_link) == NULL
|| (SLIST_NEXT(pve, pv_link)->pv_flags & PVF_KENTRY) == 0))
pg->mdpage.pvh_attrs &= ~PVF_KENTRY;
#endif
*prevptr = pve->pv_next; /* remove it! */
*prevptr = SLIST_NEXT(pve, pv_link); /* remove it! */
if (pm == pmap_kernel()) {
PMAPCOUNT(kernel_unmappings);
if (pve->pv_flags & PVF_WRITE)
@ -981,7 +986,7 @@ pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t va, int skip_wired)
* otherwise sync the i-cache for this page.
*/
if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs)) {
if (pg->mdpage.pvh_list == NULL) {
if (SLIST_EMPTY(&pg->mdpage.pvh_list)) {
pg->mdpage.pvh_attrs &= ~PVF_EXEC;
PMAPCOUNT(exec_discarded_unmap);
} else {
@ -992,18 +997,24 @@ pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t va, int skip_wired)
#endif /* PMAP_CACHE_VIPT */
break;
}
prevptr = &pve->pv_next; /* previous pointer */
pve = pve->pv_next; /* advance */
prevptr = &SLIST_NEXT(pve, pv_link); /* previous pointer */
pve = *prevptr; /* advance */
}
#ifdef PMAP_CACHE_VIPT
/*
* If this was a writeable page and there are no more writeable
* mappings (ignoring KMPAGE), clear the WRITE flag.
* mappings (ignoring KMPAGE), clear the WRITE flag and writeback
* the contents to memory.
*/
if ((pg->mdpage.pvh_attrs & PVF_WRITE)
&& pg->mdpage.krw_mappings + pg->mdpage.urw_mappings == 0)
&& pg->mdpage.krw_mappings + pg->mdpage.urw_mappings == 0) {
pg->mdpage.pvh_attrs &= ~PVF_WRITE;
#if 0 /* XYY */
if ((pg->mdpage.pvh_attrs & PVF_NC) == 0)
pmap_flush_page(pg, false);
#endif
}
#endif /* PMAP_CACHE_VIPT */
return(pve); /* return removed pve */
@ -1038,8 +1049,14 @@ pmap_modify_pv(struct vm_page *pg, pmap_t pm, vaddr_t va,
* There is at least one VA mapping this page.
*/
if (clr_mask & (PVF_REF | PVF_MOD))
if (clr_mask & (PVF_REF | PVF_MOD)) {
pg->mdpage.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
#ifdef PMAP_CACHE_VIPT
if ((set_mask & PVF_MOD) && !(pg->mdpage.pvh_attrs & PVF_NC))
pg->mdpage.pvh_attrs |= PVF_DIRTY;
KASSERT((pg->mdpage.pvh_attrs & PVF_MOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
#endif
}
oflags = npv->pv_flags;
npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
@ -1587,7 +1604,7 @@ pmap_vac_me_kpmap(struct vm_page *pg, pmap_t pm, vaddr_t va)
* kernel-writable pages.
*/
u_cacheable = 0;
for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0)
u_cacheable++;
}
@ -1608,7 +1625,7 @@ pmap_vac_me_kpmap(struct vm_page *pg, pmap_t pm, vaddr_t va)
* might not be set correctly, call pmap_vac_me_user
* to recalculate the settings.
*/
for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
/*
* We know kernel mappings will get set
* correctly in other calls. We also know
@ -1671,7 +1688,7 @@ pmap_vac_me_user(struct vm_page *pg, pmap_t pm, vaddr_t va)
* Include kernel mappings as part of our own.
* Keep a pointer to the first one.
*/
for (pv = npv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
/* Count mappings in the same pmap */
if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) {
if (entries++ == 0)
@ -1703,7 +1720,7 @@ pmap_vac_me_user(struct vm_page *pg, pmap_t pm, vaddr_t va)
if (cacheable_entries == 0)
return;
for (pv = npv; pv; pv = pv->pv_next) {
for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) {
if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) ||
(pv->pv_flags & PVF_NC))
continue;
@ -1745,7 +1762,7 @@ pmap_vac_me_user(struct vm_page *pg, pmap_t pm, vaddr_t va)
* Turn cacheing back on for some pages. If it is a kernel
* page, only do so if there are no other writable pages.
*/
for (pv = npv; pv; pv = pv->pv_next) {
for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) {
if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap &&
(kpmap != pv->pv_pmap || other_writable)))
continue;
@ -1813,6 +1830,9 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t va)
bool bad_alias;
struct l2_bucket *l2b;
pt_entry_t *ptep, pte, opte;
const u_int
rw_mappings = pg->mdpage.urw_mappings + pg->mdpage.krw_mappings,
ro_mappings = pg->mdpage.uro_mappings + pg->mdpage.kro_mappings;
/* do we need to do anything? */
if (arm_cache_prefer_mask == 0)
@ -1833,130 +1853,177 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t va)
/* Already a conflict? */
if (__predict_false(pg->mdpage.pvh_attrs & PVF_NC)) {
/* just an add, things are already non-cached */
KASSERT(!(pg->mdpage.pvh_attrs & PVF_DIRTY));
bad_alias = false;
if (va) {
PMAPCOUNT(vac_color_none);
bad_alias = true;
KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
goto fixup;
}
pv = pg->mdpage.pvh_list;
pv = SLIST_FIRST(&pg->mdpage.pvh_list);
/* the list can't be empty because it would be cachable */
if (pg->mdpage.pvh_attrs & PVF_KMPAGE) {
tst_mask = pg->mdpage.pvh_attrs;
} else {
KASSERT(pv);
tst_mask = pv->pv_va;
pv = pv->pv_next;
pv = SLIST_NEXT(pv, pv_link);
}
/*
* Only check for a bad alias if we have writable mappings.
*/
if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings > 0) {
tst_mask &= arm_cache_prefer_mask;
for (; pv && !bad_alias; pv = pv->pv_next) {
tst_mask &= arm_cache_prefer_mask;
if (rw_mappings > 0 && arm_cache_prefer_mask) {
for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) {
/* if there's a bad alias, stop checking. */
if (tst_mask != (pv->pv_va & arm_cache_prefer_mask))
bad_alias = true;
}
pg->mdpage.pvh_attrs |= PVF_WRITE;
if (!bad_alias)
pg->mdpage.pvh_attrs |= PVF_DIRTY;
} else {
pg->mdpage.pvh_attrs &= ~PVF_WRITE;
}
/* If no conflicting colors, set everything back to cached */
if (!bad_alias) {
PMAPCOUNT(vac_color_restore);
#ifdef DEBUG
if ((pg->mdpage.pvh_attrs & PVF_WRITE)
|| ro_mappings < 2) {
SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link)
KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0);
}
#endif
if (ro_mappings > 1
&& (pg->mdpage.pvh_attrs & PVF_DIRTY))
pmap_flush_page(pg, false);
pg->mdpage.pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC;
pg->mdpage.pvh_attrs |= tst_mask | PVF_COLORED;
PMAPCOUNT(vac_color_restore);
} else {
KASSERT(pg->mdpage.pvh_list != NULL);
KASSERT(pg->mdpage.pvh_list->pv_next != NULL);
KASSERT(SLIST_FIRST(&pg->mdpage.pvh_list) != NULL);
KASSERT(SLIST_NEXT(SLIST_FIRST(&pg->mdpage.pvh_list), pv_link) != NULL);
}
KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
} else if (!va) {
KASSERT(pmap_is_page_colored_p(pg));
pg->mdpage.pvh_attrs &= (PAGE_SIZE - 1) | arm_cache_prefer_mask;
if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0)
KASSERT(!(pg->mdpage.pvh_attrs & PVF_WRITE)
|| (pg->mdpage.pvh_attrs & PVF_DIRTY));
if (rw_mappings == 0)
pg->mdpage.pvh_attrs &= ~PVF_WRITE;
KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
return;
} else if (!pmap_is_page_colored_p(pg)) {
/* not colored so we just use its color */
KASSERT(pg->mdpage.pvh_attrs & (PVF_WRITE|PVF_DIRTY));
PMAPCOUNT(vac_color_new);
pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
pg->mdpage.pvh_attrs |= PVF_COLORED
| (va & arm_cache_prefer_mask);
if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings > 0)
pg->mdpage.pvh_attrs |= PVF_WRITE;
KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
| (va & arm_cache_prefer_mask)
| (rw_mappings > 0 ? PVF_WRITE : 0);
KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
return;
} else if (((pg->mdpage.pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) {
bad_alias = false;
if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings > 0) {
if (rw_mappings > 0) {
/*
* We now have writeable mappings and more than one
* readonly mapping, verify the colors don't clash
* and mark the page as writeable.
*/
if (pg->mdpage.uro_mappings + pg->mdpage.kro_mappings > 1
&& (pg->mdpage.pvh_attrs & PVF_WRITE) == 0) {
if (ro_mappings > 1
&& (pg->mdpage.pvh_attrs & PVF_WRITE) == 0
&& arm_cache_prefer_mask) {
tst_mask = pg->mdpage.pvh_attrs & arm_cache_prefer_mask;
for (pv = pg->mdpage.pvh_list;
pv && !bad_alias;
pv = pv->pv_next) {
SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
/* if there's a bad alias, stop checking. */
if (tst_mask != (pv->pv_va & arm_cache_prefer_mask))
if (((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0) {
bad_alias = true;
break;
}
}
}
pg->mdpage.pvh_attrs |= PVF_WRITE;
}
/* If no conflicting colors, set everything back to cached */
if (!bad_alias) {
if (pg->mdpage.pvh_list)
#ifdef DEBUG
if (rw_mappings > 0
|| (pg->mdpage.pvh_attrs & PMAP_KMPAGE)) {
tst_mask = pg->mdpage.pvh_attrs & arm_cache_prefer_mask;
SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link)
KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0);
}
#endif
if (SLIST_EMPTY(&pg->mdpage.pvh_list))
PMAPCOUNT(vac_color_reuse);
else
PMAPCOUNT(vac_color_ok);
/* matching color, just return */
KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
return;
}
KASSERT(pg->mdpage.pvh_list != NULL);
KASSERT(pg->mdpage.pvh_list->pv_next != NULL);
KASSERT(SLIST_FIRST(&pg->mdpage.pvh_list) != NULL);
KASSERT(SLIST_NEXT(SLIST_FIRST(&pg->mdpage.pvh_list), pv_link) != NULL);
/* color conflict. evict from cache. */
pmap_flush_page(pg);
pmap_flush_page(pg, true);
pg->mdpage.pvh_attrs &= ~PVF_COLORED;
pg->mdpage.pvh_attrs |= PVF_NC;
} else if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0
PMAPCOUNT(vac_color_erase);
} else if (rw_mappings == 0
&& (pg->mdpage.pvh_attrs & PVF_KMPAGE) == 0) {
KASSERT((pg->mdpage.pvh_attrs & PVF_WRITE) == 0);
/*
* If all the mappings are read-only, don't do anything.
* If the page has dirty cache lines, clean it.
*/
PMAPCOUNT(vac_color_blind);
KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
if (pg->mdpage.pvh_attrs & PVF_DIRTY)
pmap_flush_page(pg, false);
/*
* If this is the first remapping (we know that there are no
* writeable mappings), then this is a simple color change.
* Otherwise this is a seconary r/o mapping, which means
* we don't have to do anything.
*/
if (ro_mappings == 1) {
KASSERT(((pg->mdpage.pvh_attrs ^ va) & arm_cache_prefer_mask) != 0);
pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
pg->mdpage.pvh_attrs |= (va & arm_cache_prefer_mask);
PMAPCOUNT(vac_color_change);
} else {
PMAPCOUNT(vac_color_blind);
}
KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
return;
} else {
if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings > 0)
if (rw_mappings > 0)
pg->mdpage.pvh_attrs |= PVF_WRITE;
/* color conflict. evict from cache. */
pmap_flush_page(pg);
pmap_flush_page(pg, true);
/* the list can't be empty because this was a enter/modify */
pv = pg->mdpage.pvh_list;
KASSERT(pv);
/*
* If there's only one mapped page, change color to the
* page's new color and return.
*/
if (pv->pv_next == NULL) {
PMAPCOUNT(vac_color_change);
pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
pg->mdpage.pvh_attrs |= (va & arm_cache_prefer_mask);
KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
return;
pv = SLIST_FIRST(&pg->mdpage.pvh_list);
if ((pg->mdpage.pvh_attrs & PVF_KMPAGE) == 0) {
KASSERT(pv);
/*
* If there's only one mapped page, change color to the
* page's new color and return.
*/
if (SLIST_NEXT(pv, pv_link) == NULL) {
pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
pg->mdpage.pvh_attrs |= (va & arm_cache_prefer_mask);
PMAPCOUNT(vac_color_change);
KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
return;
}
}
bad_alias = true;
pg->mdpage.pvh_attrs &= ~PVF_COLORED;
@ -1965,12 +2032,12 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t va)
}
fixup:
KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
/*
* Turn cacheing on/off for all pages.
*/
for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
opte = *ptep;
@ -1981,6 +2048,7 @@ pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t va)
pv->pv_flags &= ~PVF_NC;
pte |= pte_l2_s_cache_mode;
}
if (opte == pte) /* only update is there's a change */
continue;
@ -2017,6 +2085,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
const bool want_syncicache = PV_IS_EXEC_P(pg->mdpage.pvh_attrs);
bool need_syncicache = false;
bool did_syncicache = false;
bool need_vac_me_harder = false;
#endif
NPDEBUG(PDB_BITS,
@ -2039,7 +2108,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
*/
pg->mdpage.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
if (pg->mdpage.pvh_list == NULL) {
if (SLIST_EMPTY(&pg->mdpage.pvh_list)) {
#ifdef PMAP_CACHE_VIPT
if (need_syncicache) {
/*
@ -2058,7 +2127,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
/*
* Loop over all current mappings setting/clearing as appropos
*/
for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
va = pv->pv_va;
pm = pv->pv_pmap;
oflags = pv->pv_flags;
@ -2134,15 +2203,16 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
pg->mdpage.pvh_attrs &= ~PVF_WRITE;
if (want_syncicache)
need_syncicache = true;
need_vac_me_harder = true;
#endif
}
}
if (maskbits & PVF_REF) {
#ifdef PMAP_CACHE_VIVT
if ((pv->pv_flags & PVF_NC) == 0 &&
(maskbits & (PVF_WRITE|PVF_MOD)) == 0 &&
l2pte_valid(npte)) {
#ifdef PMAP_CACHE_VIVT
/*
* Check npte here; we may have already
* done the wbinv above, and the validity
@ -2158,8 +2228,8 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
pmap_dcache_wb_range(pm,
pv->pv_va, PAGE_SIZE,
true, true);
}
#endif
}
/*
* Make the PTE invalid so that we will take a
@ -2196,6 +2266,19 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
pmap_syncicache_page(pg);
PMAPCOUNT(exec_synced_clearbit);
}
/*
* If we are changing this to read-only, we ned to call vac_me_harder
* so we can change all the read-only pages to cacheable. We pretend
* this as a page deletion.
*/
if (need_vac_me_harder) {
if (pg->mdpage.pvh_attrs & PVF_NC)
pmap_vac_me_harder(pg, NULL, 0);
#if 0 /* XYY */
else
pmap_flush_page(pg, false);
#endif
}
#endif
simple_unlock(&pg->mdpage.pvh_slock);
@ -2244,7 +2327,7 @@ pmap_clean_page(struct pv_entry *pv, bool is_src)
*/
pm = curproc->p_vmspace->vm_map.pmap;
for (npv = pv; npv; npv = npv->pv_next) {
for (npv = pv; npv; npv = SLIST_NEXT(npv, pv_link)) {
if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) {
flags |= npv->pv_flags;
/*
@ -2335,7 +2418,7 @@ pmap_syncicache_page(struct vm_page *pg)
}
void
pmap_flush_page(struct vm_page *pg)
pmap_flush_page(struct vm_page *pg, bool flush)
{
const vsize_t va_offset = pg->mdpage.pvh_attrs & arm_cache_prefer_mask;
const size_t pte_offset = va_offset >> PGSHIFT;
@ -2366,7 +2449,16 @@ pmap_flush_page(struct vm_page *pg)
/*
* Flush it.
*/
cpu_idcache_wbinv_range(cdstp + va_offset, PAGE_SIZE);
if (flush)
cpu_idcache_wbinv_range(cdstp + va_offset, PAGE_SIZE);
else
cpu_dcache_wb_range(cdstp + va_offset, PAGE_SIZE);
/*
* Mark that the page is no longer dirty.
*/
if ((pg->mdpage.pvh_attrs & PVF_MOD) == 0)
pg->mdpage.pvh_attrs &= ~PVF_DIRTY;
/*
* Restore the page table entry since we might have interrupted
@ -2406,7 +2498,7 @@ pmap_page_remove(struct vm_page *pg)
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pg->mdpage.pvh_slock);
pv = pg->mdpage.pvh_list;
pv = SLIST_FIRST(&pg->mdpage.pvh_list);
if (pv == NULL) {
#ifdef PMAP_CACHE_VIPT
/*
@ -2417,6 +2509,9 @@ pmap_page_remove(struct vm_page *pg)
PMAPCOUNT(exec_discarded_page_protect);
pg->mdpage.pvh_attrs &= ~PVF_EXEC;
KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
#if 0 /* XYY */
pmap_flush_page(pg, true); /* wbinv the contents */
#endif
#endif
simple_unlock(&pg->mdpage.pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
@ -2442,10 +2537,10 @@ pmap_page_remove(struct vm_page *pg)
pmap_clean_page(pv, false);
#endif
pvp = &pg->mdpage.pvh_list;
pvp = &SLIST_FIRST(&pg->mdpage.pvh_list);
while (pv) {
pm = pv->pv_pmap;
npv = pv->pv_next;
npv = SLIST_NEXT(pv, pv_link);
if (flush == false && (pm == curpm || pm == pmap_kernel()))
flush = true;
@ -2458,7 +2553,7 @@ pmap_page_remove(struct vm_page *pg)
*/
if (pv->pv_flags & PVF_KENTRY) {
*pvp = pv;
pvp = &pv->pv_next;
pvp = &SLIST_NEXT(pv, pv_link);
pv = npv;
continue;
}
@ -2505,7 +2600,7 @@ pmap_page_remove(struct vm_page *pg)
*/
if (pv == NULL) {
*pvp = NULL;
if (pg->mdpage.pvh_list != NULL)
if (!SLIST_EMPTY(&pg->mdpage.pvh_list))
pmap_vac_me_harder(pg, pm, 0);
}
pmap_release_pmap_lock(pm);
@ -2517,7 +2612,19 @@ pmap_page_remove(struct vm_page *pg)
if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs))
PMAPCOUNT(exec_discarded_page_protect);
pg->mdpage.pvh_attrs &= ~PVF_EXEC;
if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0)
KASSERT(pg->mdpage.urw_mappings == 0);
KASSERT(pg->mdpage.uro_mappings == 0);
#if 0 /* XYY */
if ((pg->mdpage.pvh_attrs & PMAP_KMPAGE) == 0) {
if (SLIST_EMPTY(&pg->mdpage.pvh_list)) {
pmap_flush_page(pg, true); /* wbinv the contents */
} else if ((pg->mdpage.pvh_attrs & PVF_WRITE)
&& pg->mdpage.krw_mappings == 0) {
pmap_flush_page(pg, false); /* wb the contents */
}
}
#endif
if (pg->mdpage.krw_mappings == 0)
pg->mdpage.pvh_attrs &= ~PVF_WRITE;
KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
#endif
@ -2868,6 +2975,10 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
simple_unlock(&pg->mdpage.pvh_slock);
}
}
#ifdef PMAP_CACHE_VIPT
KASSERT((pg->mdpage.pvh_attrs & PVF_MOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
KASSERT(((pg->mdpage.pvh_attrs & PVF_WRITE) == 0) == (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0));
#endif
pmap_release_pmap_lock(pm);
PMAP_MAP_TO_HEAD_UNLOCK();
@ -3116,7 +3227,7 @@ pmap_kremove_pg(struct vm_page *pg, vaddr_t va)
*/
if ((pg->mdpage.pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC
&& (pv->pv_flags & PVF_WRITE) != 0) {
if (pg->mdpage.pvh_list == NULL) {
if (SLIST_EMPTY(&pg->mdpage.pvh_list)) {
pg->mdpage.pvh_attrs &= ~PVF_EXEC;
PMAPCOUNT(exec_discarded_kremove);
} else {
@ -3201,12 +3312,14 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
KASSERT((pg->mdpage.pvh_attrs & PVF_NC) == 0);
/* if there is a color conflict, evict from cache. */
if (pmap_is_page_colored_p(pg)
&& ((va ^ pg->mdpage.pvh_attrs) & arm_cache_prefer_mask))
pmap_flush_page(pg);
&& ((va ^ pg->mdpage.pvh_attrs) & arm_cache_prefer_mask)) {
PMAPCOUNT(vac_color_change);
pmap_flush_page(pg, true);
}
pg->mdpage.pvh_attrs &= PAGE_SIZE - 1;
pg->mdpage.pvh_attrs |= PVF_KMPAGE
| PVF_COLORED
| (va & arm_cache_prefer_mask);
| PVF_COLORED | PVF_DIRTY
| (va & arm_cache_prefer_mask);
simple_unlock(&pg->mdpage.pvh_slock);
} else {
if (pv == NULL) {
@ -3215,7 +3328,11 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
}
pmap_enter_pv(pg, pv, pmap_kernel(), va,
PVF_WIRED | PVF_KENTRY
| (prot & VM_PROT_WRITE ? PVF_WRITE : 0));
| (prot & VM_PROT_WRITE ? PVF_WRITE : 0));
if ((prot & VM_PROT_WRITE)
&& !(pg->mdpage.pvh_attrs & PVF_NC))
pg->mdpage.pvh_attrs |= PVF_DIRTY;
KASSERT((prot & VM_PROT_WRITE) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
simple_lock(&pg->mdpage.pvh_slock);
pmap_vac_me_harder(pg, pmap_kernel(), va);
simple_unlock(&pg->mdpage.pvh_slock);
@ -3673,7 +3790,7 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user)
printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 0x%08lx\n",
pm, va, VM_PAGE_TO_PHYS(pg)));
pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD | PVF_DIRTY;
pv->pv_flags |= PVF_REF | PVF_MOD;
simple_unlock(&pg->mdpage.pvh_slock);
@ -3943,7 +4060,7 @@ pmap_activate(struct lwp *l)
}
/* No interrupts while we frob the TTB/DACR */
oldirqstate = disable_interrupts(I32_bit | F32_bit);
oldirqstate = disable_interrupts(IF32_bits);
/*
* For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1
@ -4195,7 +4312,7 @@ pmap_zero_page_generic(paddr_t phys)
pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT];
#ifdef DEBUG
if (pg->mdpage.pvh_list != NULL)
if (!SLIST_EMPTY(&pg->mdpage.pvh_list))
panic("pmap_zero_page: page has mappings");
#endif
@ -4233,6 +4350,7 @@ pmap_zero_page_generic(paddr_t phys)
pg->mdpage.pvh_attrs &= ~PVF_EXEC;
PMAPCOUNT(exec_discarded_zero);
}
pg->mdpage.pvh_attrs |= PVF_DIRTY;
#endif
}
#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
@ -4244,7 +4362,7 @@ pmap_zero_page_xscale(paddr_t phys)
#ifdef DEBUG
struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
if (pg->mdpage.pvh_list != NULL)
if (!SLIST_EMPTY(&pg->mdpage.pvh_list))
panic("pmap_zero_page: page has mappings");
#endif
@ -4290,7 +4408,7 @@ pmap_pageidlezero(paddr_t phys)
#ifdef DEBUG
if (pg->mdpage.pvh_list != NULL)
if (!SLIST_EMPTY(&pg->mdpage.pvh_list))
panic("pmap_pageidlezero: page has mappings");
#endif
@ -4378,7 +4496,7 @@ pmap_copy_page_generic(paddr_t src, paddr_t dst)
pt_entry_t * const dst_ptep = &cdst_pte[dst_va_offset >> PGSHIFT];
#ifdef DEBUG
if (dst_pg->mdpage.pvh_list != NULL)
if (!SLIST_EMPTY(&dst_pg->mdpage.pvh_list))
panic("pmap_copy_page: dst page has mappings");
#endif
@ -4395,7 +4513,7 @@ pmap_copy_page_generic(paddr_t src, paddr_t dst)
*/
simple_lock(&src_pg->mdpage.pvh_slock);
#ifdef PMAP_CACHE_VIVT
(void) pmap_clean_page(src_pg->mdpage.pvh_list, true);
(void) pmap_clean_page(SLIST_FIRST(&src_pg->mdpage.pvh_list), true);
#endif
/*
@ -4449,6 +4567,7 @@ pmap_copy_page_generic(paddr_t src, paddr_t dst)
dst_pg->mdpage.pvh_attrs &= ~PVF_EXEC;
PMAPCOUNT(exec_discarded_copy);
}
dst_pg->mdpage.pvh_attrs |= PVF_DIRTY;
#endif
}
#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
@ -4461,7 +4580,7 @@ pmap_copy_page_xscale(paddr_t src, paddr_t dst)
#ifdef DEBUG
struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
if (dst_pg->mdpage.pvh_list != NULL)
if (!SLIST_EMPTY(&dst_pg->mdpage.pvh_list))
panic("pmap_copy_page: dst page has mappings");
#endif
@ -4475,7 +4594,7 @@ pmap_copy_page_xscale(paddr_t src, paddr_t dst)
*/
simple_lock(&src_pg->mdpage.pvh_slock);
#ifdef PMAP_CACHE_VIVT
(void) pmap_clean_page(src_pg->mdpage.pvh_list, true);
(void) pmap_clean_page(SLIST_FIRST(&src_pg->mdpage.pvh_list), true);
#endif
/*
@ -4551,7 +4670,7 @@ pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t *pap)
* This new page must not have any mappings. Enter it via
* pmap_kenter_pa and let that routine do the hard work.
*/
KASSERT(pg->mdpage.pvh_list == NULL);
KASSERT(SLIST_EMPTY(&pg->mdpage.pvh_list));
pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE|PMAP_KMPAGE);
#endif
}
@ -4982,7 +5101,7 @@ pmap_bootstrap(vaddr_t vstart, vaddr_t vend)
pmap_set_pt_cache_mode(l1pt, (vaddr_t)csrc_pte);
pmap_alloc_specials(&virtual_avail, nptes, &cdstp, &cdst_pte);
pmap_set_pt_cache_mode(l1pt, (vaddr_t)cdst_pte);
pmap_alloc_specials(&virtual_avail, 1, (void *)&memhook, NULL);
pmap_alloc_specials(&virtual_avail, nptes, &memhook, NULL);
pmap_alloc_specials(&virtual_avail, round_page(MSGBUFSIZE) / PAGE_SIZE,
(void *)&msgbufaddr, NULL);
@ -6154,7 +6273,7 @@ pmap_dump_ncpg(pmap_t pm)
pg->mdpage.krw_mappings, pg->mdpage.kro_mappings,
pg->mdpage.urw_mappings, pg->mdpage.uro_mappings);
for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
printf(" %c va 0x%08lx, flags 0x%x\n",
(pm == pv->pv_pmap) ? '*' : ' ',
pv->pv_va, pv->pv_flags);

View File

@ -1,4 +1,4 @@
/* $NetBSD: vmparam.h,v 1.22 2008/04/27 18:58:44 matt Exp $ */
/* $NetBSD: vmparam.h,v 1.23 2008/08/06 19:13:45 matt Exp $ */
/*
* Copyright (c) 2001, 2002 Wasabi Systems, Inc.
@ -109,7 +109,7 @@ extern vaddr_t virtual_end;
*/
#define __HAVE_VM_PAGE_MD
struct vm_page_md {
struct pv_entry *pvh_list; /* pv_entry list */
SLIST_HEAD(,pv_entry) pvh_list; /* pv_entry list */
struct simplelock pvh_slock; /* lock on this head */
int pvh_attrs; /* page attributes */
u_int uro_mappings;
@ -137,7 +137,7 @@ struct vm_page_md {
#define VM_MDPAGE_INIT(pg) \
do { \
(pg)->mdpage.pvh_list = NULL; \
SLIST_INIT(&(pg)->mdpage.pvh_list); \
simple_lock_init(&(pg)->mdpage.pvh_slock); \
VM_MDPAGE_PVH_ATTRS_INIT(pg); \
(pg)->mdpage.uro_mappings = 0; \