Use #ifdef CACHE_HAVE_VAC rather than #ifdef M68K_MMU_HP where

the pmap code indends.
(though currently only MMU_HP machines have VAC in pmap_motorola ports)
This commit is contained in:
tsutsui 2011-01-06 13:49:16 +00:00
parent 964d204966
commit 1e4ed35788
2 changed files with 25 additions and 25 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap_motorola.h,v 1.31 2011/01/02 05:21:11 tsutsui Exp $ */
/* $NetBSD: pmap_motorola.h,v 1.32 2011/01/06 13:49:16 tsutsui Exp $ */
/*
* Copyright (c) 1991, 1993
@ -246,7 +246,7 @@ extern u_int protection_codes[];
#if defined(M68040) || defined(M68060)
extern u_int protostfree;
#endif
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
extern u_int pmap_aliasmask;
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap_motorola.c,v 1.58 2011/01/02 05:21:11 tsutsui Exp $ */
/* $NetBSD: pmap_motorola.c,v 1.59 2011/01/06 13:49:17 tsutsui Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
@ -119,7 +119,7 @@
#include "opt_m68k_arch.h"
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.58 2011/01/02 05:21:11 tsutsui Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.59 2011/01/06 13:49:17 tsutsui Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -270,7 +270,7 @@ struct pv_header *pv_table;
TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
int pv_nfree;
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
u_int pmap_aliasmask; /* seperation at which VA aliasing ok */
#endif
#if defined(M68040) || defined(M68060)
@ -913,7 +913,7 @@ pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
vaddr_t nssva;
pt_entry_t *pte;
int flags;
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
bool firstpage = true, needcflush = false;
#endif
@ -944,7 +944,7 @@ pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
}
if (pmap_pte_v(pte)) {
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
if (pmap_aliasmask) {
/*
@ -976,7 +976,7 @@ pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
}
}
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
/*
* Didn't do anything, no need for cache flushes
@ -1111,7 +1111,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
pte = pmap_pte(pmap, sva);
while (sva < nssva) {
if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
/*
* Purge kernel side of VAC to ensure we
@ -1313,7 +1313,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
npv->pv_ptpmap = NULL;
pv->pv_next = npv;
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
/*
* Since there is another logical mapping for the
@ -1391,7 +1391,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
pmap->pm_stats.wired_count++;
validate:
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
/*
* Purge kernel side of VAC to ensure we get correct state
* of HW bits so we don't clobber them.
@ -1448,7 +1448,7 @@ validate:
*pte = npte;
if (!wired && active_pmap(pmap))
TBIS(va);
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
/*
* The following is executed if we are entering a second
* (or greater) mapping for a physical page and the mappings
@ -1537,14 +1537,14 @@ pmap_kremove(vaddr_t va, vsize_t size)
pt_entry_t *pte;
vaddr_t nssva;
vaddr_t eva = va + size;
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
bool firstpage, needcflush;
#endif
PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
("pmap_kremove(%lx, %lx)\n", va, size));
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
firstpage = true;
needcflush = false;
#endif
@ -1574,7 +1574,7 @@ pmap_kremove(vaddr_t va, vsize_t size)
va += PAGE_SIZE;
continue;
}
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
if (pmap_aliasmask) {
/*
@ -1605,7 +1605,7 @@ pmap_kremove(vaddr_t va, vsize_t size)
}
}
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
/*
* In a couple of cases, we don't need to worry about flushing
@ -1874,7 +1874,7 @@ pmap_zero_page(paddr_t phys)
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
npte = phys | PG_V;
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
if (pmap_aliasmask) {
/*
@ -1932,7 +1932,7 @@ pmap_copy_page(paddr_t src, paddr_t dst)
npte1 = src | PG_RO | PG_V;
npte2 = dst | PG_V;
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
if (pmap_aliasmask) {
/*
@ -2051,7 +2051,7 @@ pmap_phys_address(paddr_t ppn)
return m68k_ptob(ppn);
}
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
/*
* pmap_prefer: [ INTERFACE ]
*
@ -2074,7 +2074,7 @@ pmap_prefer(vaddr_t foff, vaddr_t *vap)
*vap = va + d;
}
}
#endif /* M68K_MMU_HP */
#endif /* CACHE_HAVE_VAC */
/*
* Miscellaneous support routines follow
@ -2123,7 +2123,7 @@ pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags)
return;
}
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
/*
@ -2281,7 +2281,7 @@ pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags)
pv = &pvh->pvh_first;
}
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
/*
* If only one mapping left we no longer need to cache inhibit
@ -2411,7 +2411,7 @@ pmap_testbit(paddr_t pa, int bit)
return true;
}
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
/*
* Flush VAC to get correct state of any hardware maintained bits.
@ -2455,7 +2455,7 @@ pmap_changebit(paddr_t pa, int set, int mask)
pt_entry_t *pte, npte;
vaddr_t va;
int s;
#if defined(M68K_MMU_HP) || defined(M68040) || defined(M68060)
#if defined(CACHE_HAVE_VAC) || defined(M68040) || defined(M68060)
bool firstpage = true;
#endif
bool r;
@ -2489,7 +2489,7 @@ pmap_changebit(paddr_t pa, int set, int mask)
#endif
va = pv->pv_va;
pte = pmap_pte(pv->pv_pmap, va);
#ifdef M68K_MMU_HP
#ifdef CACHE_HAVE_VAC
/*
* Flush VAC to ensure we get correct state of HW bits