Since we keep a list of all mappings for a given physical page (even
unmanaged mappings) so we can deal with cache aliases, make sure to skip unmanaged/wired mappings (added via pmap_kenter_pa()) when doing things like pmap_page_protect().
This commit is contained in:
parent
6a3f09110b
commit
3795c4e6f4
@ -1,4 +1,4 @@
|
|||||||
/* $NetBSD: pmap.c,v 1.23 2002/10/24 13:56:45 scw Exp $ */
|
/* $NetBSD: pmap.c,v 1.24 2002/10/31 14:34:17 scw Exp $ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright 2002 Wasabi Systems, Inc.
|
* Copyright 2002 Wasabi Systems, Inc.
|
||||||
@ -124,7 +124,7 @@
|
|||||||
#include <machine/cacheops.h>
|
#include <machine/cacheops.h>
|
||||||
|
|
||||||
|
|
||||||
#define PMAP_DIAG
|
#undef PMAP_DIAG
|
||||||
#ifdef PMAP_DIAG
|
#ifdef PMAP_DIAG
|
||||||
#ifndef DDB
|
#ifndef DDB
|
||||||
#define pmap_debugger() panic("")
|
#define pmap_debugger() panic("")
|
||||||
@ -572,7 +572,7 @@ static __inline void
|
|||||||
pmap_pteg_synch(ptel_t ptel, struct pvo_entry *pvo)
|
pmap_pteg_synch(ptel_t ptel, struct pvo_entry *pvo)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (PVO_ISMANAGED(pvo))
|
if (PVO_ISMANAGED(pvo) && !PVO_ISWIRED(pvo))
|
||||||
pvo->pvo_ptel |= (ptel & SH5_PTEL_RM_MASK);
|
pvo->pvo_ptel |= (ptel & SH5_PTEL_RM_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1715,15 +1715,11 @@ pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
splx(s);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we aren't overwriting a mapping, try to allocate
|
* If we aren't overwriting a mapping, try to allocate
|
||||||
*/
|
*/
|
||||||
pvo = pool_get(pl, poolflags);
|
pvo = pool_get(pl, poolflags);
|
||||||
|
|
||||||
s = splvm();
|
|
||||||
|
|
||||||
if (pvo == NULL) {
|
if (pvo == NULL) {
|
||||||
if ((flags & PMAP_CANFAIL) == 0)
|
if ((flags & PMAP_CANFAIL) == 0)
|
||||||
panic("pmap_pvo_enter: failed");
|
panic("pmap_pvo_enter: failed");
|
||||||
@ -1913,7 +1909,7 @@ pmap_pvo_remove(struct pvo_entry *pvo, int idx)
|
|||||||
* Save the REF/CHG bits into their cache if the page is managed.
|
* Save the REF/CHG bits into their cache if the page is managed.
|
||||||
*/
|
*/
|
||||||
pg = PHYS_TO_VM_PAGE((paddr_t)(pvo->pvo_ptel & SH5_PTEL_PPN_MASK));
|
pg = PHYS_TO_VM_PAGE((paddr_t)(pvo->pvo_ptel & SH5_PTEL_PPN_MASK));
|
||||||
if (pg && PVO_ISMANAGED(pvo))
|
if (pg && PVO_ISMANAGED(pvo) && !PVO_ISWIRED(pvo))
|
||||||
pmap_attr_save(pg, pvo->pvo_ptel & (SH5_PTEL_R | SH5_PTEL_M));
|
pmap_attr_save(pg, pvo->pvo_ptel & (SH5_PTEL_R | SH5_PTEL_M));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2142,7 +2138,6 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
|
|||||||
if (pg && (flags & PMAP_UNMANAGED) == 0)
|
if (pg && (flags & PMAP_UNMANAGED) == 0)
|
||||||
pmap_attr_save(pg, ptel & (SH5_PTEL_R | SH5_PTEL_M));
|
pmap_attr_save(pg, ptel & (SH5_PTEL_R | SH5_PTEL_M));
|
||||||
error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, ptel, flags);
|
error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, ptel, flags);
|
||||||
|
|
||||||
splx(s);
|
splx(s);
|
||||||
|
|
||||||
return (error);
|
return (error);
|
||||||
@ -2343,6 +2338,8 @@ pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
|
|||||||
PMPRINTF(("pmap_protect: protecting VA 0x%lx, PA 0x%lx\n",
|
PMPRINTF(("pmap_protect: protecting VA 0x%lx, PA 0x%lx\n",
|
||||||
va, (paddr_t)(pvo->pvo_ptel & SH5_PTEL_PPN_MASK)));
|
va, (paddr_t)(pvo->pvo_ptel & SH5_PTEL_PPN_MASK)));
|
||||||
|
|
||||||
|
KDASSERT(!PVO_ISWIRED(pvo));
|
||||||
|
|
||||||
pvo->pvo_ptel &= ~clrbits;
|
pvo->pvo_ptel &= ~clrbits;
|
||||||
pvo->pvo_vaddr &= ~PVO_WRITABLE;
|
pvo->pvo_vaddr &= ~PVO_WRITABLE;
|
||||||
|
|
||||||
@ -2446,6 +2443,16 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
|
|||||||
for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
|
for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
|
||||||
next_pvo = LIST_NEXT(pvo, pvo_vlink);
|
next_pvo = LIST_NEXT(pvo, pvo_vlink);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mappings entered using pmap_kenter_pa() can be ignored
|
||||||
|
*/
|
||||||
|
if (!PVO_ISMANAGED(pvo) || PVO_ISWIRED(pvo)) {
|
||||||
|
PMPRINTF((
|
||||||
|
"pmap_page_protect: ignoring wired VA 0x%lx\n",
|
||||||
|
PVO_VADDR(pvo)));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Downgrading to no mapping at all, we just remove the entry.
|
* Downgrading to no mapping at all, we just remove the entry.
|
||||||
*/
|
*/
|
||||||
@ -2548,7 +2555,8 @@ pmap_query_bit(struct vm_page *pg, ptel_t ptebit)
|
|||||||
* See if we saved the bit off. If so cache, it and return
|
* See if we saved the bit off. If so cache, it and return
|
||||||
* success.
|
* success.
|
||||||
*/
|
*/
|
||||||
if (PVO_ISMANAGED(pvo) && pvo->pvo_ptel & ptebit) {
|
if (PVO_ISMANAGED(pvo) && !PVO_ISWIRED(pvo) &&
|
||||||
|
pvo->pvo_ptel & ptebit) {
|
||||||
pmap_attr_save(pg, ptebit);
|
pmap_attr_save(pg, ptebit);
|
||||||
PMPRINTF(("yes. Cached in pvo for 0x%lx.\n",
|
PMPRINTF(("yes. Cached in pvo for 0x%lx.\n",
|
||||||
PVO_VADDR(pvo)));
|
PVO_VADDR(pvo)));
|
||||||
@ -2563,7 +2571,7 @@ pmap_query_bit(struct vm_page *pg, ptel_t ptebit)
|
|||||||
* to the PTEs.
|
* to the PTEs.
|
||||||
*/
|
*/
|
||||||
LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
|
LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
|
||||||
if (!PVO_ISMANAGED(pvo))
|
if (!PVO_ISMANAGED(pvo) || PVO_ISWIRED(pvo))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (PVO_VADDR(pvo) < SH5_KSEG0_BASE) {
|
if (PVO_VADDR(pvo) < SH5_KSEG0_BASE) {
|
||||||
@ -2637,7 +2645,7 @@ pmap_clear_bit(struct vm_page *pg, ptel_t ptebit)
|
|||||||
* valid PTE. If so, clear the ptebit from the valid PTE.
|
* valid PTE. If so, clear the ptebit from the valid PTE.
|
||||||
*/
|
*/
|
||||||
LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
|
LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
|
||||||
if (!PVO_ISMANAGED(pvo))
|
if (!PVO_ISMANAGED(pvo) || PVO_ISWIRED(pvo))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (PVO_VADDR(pvo) < SH5_KSEG0_BASE) {
|
if (PVO_VADDR(pvo) < SH5_KSEG0_BASE) {
|
||||||
|
Loading…
Reference in New Issue
Block a user