Some ARM32_PMAP_NEW-related cleanup:

* Define a new "MMU type", ARM_MMU_SA1.  While the SA-1's MMU is basically
  compatible with the generic, the SA-1 cache does not have a write-through
  mode, and it is useful to know have an indication of this.
* Add a new PMAP_NEEDS_PTE_SYNC indicator, and try to evaluate it at
  compile time.  We evaluate it like so:
  - If SA-1-style MMU is the only type configured -> 1
  - If SA-1-style MMU is not configured -> 0
  - Otherwise, defer to a run-time variable.
  If PMAP_NEEDS_PTE_SYNC might evaluate to true (SA-1 only or run-time
  check), then we also define PMAP_INCLUDE_PTE_SYNC so that e.g. assembly
  code can include the necessary run-time support.  PMAP_INCLUDE_PTE_SYNC
  largely replaces the ARM32_PMAP_NEEDS_PTE_SYNC manual setting Steve
  included with the original new pmap.
* In the new pmap, make pmap_pte_init_generic() check to see if the CPU
  has a write-back cache.  If so, init the PT cache mode to C=1,B=0 to get
  write-through mode.  Otherwise, init the PT cache mode to C=1,B=1.
* Add a new pmap_pte_init_arm8().  Old pmap, same as generic.  New pmap,
  sets page table cacheability to 0 (ARM8 has a write-back cache, but
  flushing it is quite expensive).
* In the new pmap, make pmap_pte_init_arm9() reset the PT cache mode to
  C=1,B=0, since the write-back check in generic gets it wrong for ARM9,
  since we use write-through mode all the time on ARM9 right now.  (What
  this really tells me is that the test for write-through cache is less
  than perfect, but we can fix that later.)
* Add a new pmap_pte_init_sa1().  Old pmap, same as generic.  New pmap,
  does generic initialization, then resets page table cache mode to
  C=1,B=1, since C=1,B=0 does not produce write-through on the SA-1.
This commit is contained in:
thorpej 2003-04-22 00:24:48 +00:00
parent 645dee56e4
commit bbef46a7e9
9 changed files with 199 additions and 82 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpufunc.c,v 1.57 2003/04/21 04:33:30 thorpej Exp $ */
/* $NetBSD: cpufunc.c,v 1.58 2003/04/22 00:24:48 thorpej Exp $ */
/*
* arm7tdmi support code Copyright (c) 2001 John Fremlin
@ -882,7 +882,7 @@ set_cpufuncs()
cpufuncs = arm8_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */
get_cachetype_cp15();
pmap_pte_init_generic();
pmap_pte_init_arm8();
return 0;
}
#endif /* CPU_ARM8 */
@ -900,7 +900,7 @@ set_cpufuncs()
cpufuncs = sa110_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
get_cachetype_table();
pmap_pte_init_generic();
pmap_pte_init_sa1();
return 0;
}
#endif /* CPU_SA110 */
@ -909,7 +909,7 @@ set_cpufuncs()
cpufuncs = sa11x0_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
get_cachetype_table();
pmap_pte_init_generic();
pmap_pte_init_sa1();
/* Use powersave on this CPU. */
cpu_do_powersave = 1;
@ -922,7 +922,7 @@ set_cpufuncs()
cpufuncs = sa11x0_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
get_cachetype_table();
pmap_pte_init_generic();
pmap_pte_init_sa1();
/* Use powersave on this CPU. */
cpu_do_powersave = 1;
@ -935,7 +935,7 @@ set_cpufuncs()
cpufuncs = ixp12x0_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1;
get_cachetype_table();
pmap_pte_init_generic();
pmap_pte_init_sa1();
return 0;
}
#endif /* CPU_IXP12X0 */

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpuswitch.S,v 1.30 2003/04/18 11:08:25 scw Exp $ */
/* $NetBSD: cpuswitch.S,v 1.31 2003/04/22 00:24:49 thorpej Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@ -662,7 +662,7 @@ ENTRY(cpu_switch)
ldrne r0, [r9, #(PCB_L1VEC)] /* Fetch new vector_page value */
mcr p15, 0, r1, c3, c0, 0 /* Update DACR for new context */
cmpne r2, r0 /* Stuffing the same value? */
#ifndef ARM32_PMAP_NEEDS_PTE_SYNC
#ifndef PMAP_INCLUDE_PTE_SYNC
strne r0, [r7] /* Nope, update it */
#else
beq .Lcs_same_vector
@ -1010,7 +1010,7 @@ ENTRY(switch_exit)
cmpne r3, r2 /* Stuffing the same value? */
strne r2, [r0] /* Store if not. */
#ifdef ARM32_PMAP_NEEDS_PTE_SYNC
#ifdef PMAP_INCLUDE_PTE_SYNC
/*
* Need to sync the cache to make sure that last store is
* visible to the MMU.

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.22 2003/04/18 11:08:25 scw Exp $
# $NetBSD: genassym.cf,v 1.23 2003/04/22 00:24:49 thorpej Exp $
# Copyright (c) 1982, 1990 The Regents of the University of California.
# All rights reserved.
@ -67,6 +67,10 @@ define DOMAIN_CLIENT DOMAIN_CLIENT
define PMAP_DOMAIN_KERNEL PMAP_DOMAIN_KERNEL
endif
ifdef PMAP_INCLUDE_PTE_SYNC
define PMAP_INCLUDE_PTE_SYNC 1
endif
define PAGE_SIZE PAGE_SIZE
define UPAGES UPAGES
define PGSHIFT PGSHIFT

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.130 2003/04/01 23:19:09 thorpej Exp $ */
/* $NetBSD: pmap.c,v 1.131 2003/04/22 00:24:49 thorpej Exp $ */
/*
* Copyright (c) 2002 Wasabi Systems, Inc.
@ -144,7 +144,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.130 2003/04/01 23:19:09 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.131 2003/04/22 00:24:49 thorpej Exp $");
#ifdef PMAP_DEBUG
#define PDEBUG(_lev_,_stat_) \
@ -1789,7 +1789,7 @@ pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
* StrongARM accesses to non-cached pages are non-burst making writing
* _any_ bulk data very slow.
*/
#if ARM_MMU_GENERIC == 1
#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
void
pmap_zero_page_generic(paddr_t phys)
{
@ -1814,7 +1814,7 @@ pmap_zero_page_generic(paddr_t phys)
bzero_page(cdstp);
cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
}
#endif /* ARM_MMU_GENERIC == 1 */
#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
#if ARM_MMU_XSCALE == 1
void
@ -1907,7 +1907,7 @@ pmap_pageidlezero(paddr_t phys)
* hook points. The same comment regarding cachability as in
* pmap_zero_page also applies here.
*/
#if ARM_MMU_GENERIC == 1
#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
void
pmap_copy_page_generic(paddr_t src, paddr_t dst)
{
@ -1949,7 +1949,7 @@ pmap_copy_page_generic(paddr_t src, paddr_t dst)
simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
}
#endif /* ARM_MMU_GENERIC == 1 */
#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
#if ARM_MMU_XSCALE == 1
void
@ -3950,7 +3950,7 @@ pt_entry_t pte_l2_s_proto;
void (*pmap_copy_page_func)(paddr_t, paddr_t);
void (*pmap_zero_page_func)(paddr_t);
#if ARM_MMU_GENERIC == 1
#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
void
pmap_pte_init_generic(void)
{
@ -3976,6 +3976,16 @@ pmap_pte_init_generic(void)
pmap_zero_page_func = pmap_zero_page_generic;
}
#if defined(CPU_ARM8)
void
pmap_pte_init_arm8(void)
{
/* ARM8 is the same as generic in this pmap. */
pmap_pte_init_generic();
}
#endif /* CPU_ARM8 */
#if defined(CPU_ARM9)
void
pmap_pte_init_arm9(void)
@ -3994,6 +4004,16 @@ pmap_pte_init_arm9(void)
#endif /* CPU_ARM9 */
#endif /* ARM_MMU_GENERIC == 1 */
#if ARM_MMU_SA1 == 1
void
pmap_pte_init_sa1(void)
{
/* SA-1 is the same as generic in this pmap. */
pmap_pte_init_generic();
}
#endif /* ARM_MMU_SA1 == 1 */
#if ARM_MMU_XSCALE == 1
void
pmap_pte_init_xscale(void)

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap_new.c,v 1.3 2003/04/18 23:46:12 thorpej Exp $ */
/* $NetBSD: pmap_new.c,v 1.4 2003/04/22 00:24:49 thorpej Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@ -210,7 +210,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
__KERNEL_RCSID(0, "$NetBSD: pmap_new.c,v 1.3 2003/04/18 23:46:12 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap_new.c,v 1.4 2003/04/22 00:24:49 thorpej Exp $");
#ifdef PMAP_DEBUG
#define PDEBUG(_lev_,_stat_) \
@ -442,12 +442,22 @@ struct l2_dtable {
pool_cache_put(&pmap_l2dtable_cache, (l2))
static pt_entry_t *pmap_alloc_l2_ptp(paddr_t *);
#ifndef ARM32_PMAP_NEEDS_PTE_SYNC
#ifndef PMAP_INCLUDE_PTE_SYNC
static void pmap_free_l2_ptp(pt_entry_t *, paddr_t);
#else
static void pmap_free_l2_ptp(boolean_t, pt_entry_t *, paddr_t);
#endif
/*
* We try to map the page tables write-through, if possible. However, not
* all CPUs have a write-through cache mode, so on those we have to sync
* the cache when we frob page tables.
*
* We try to evaluate this at compile time, if possible. However, it's
* not always possible to do that, hence this run-time var.
*/
int pmap_needs_pte_sync;
/*
* Real definition of pv_entry.
*/
@ -702,7 +712,7 @@ pmap_alloc_l2_ptp(paddr_t *pap)
* Free an L2 descriptor table.
*/
static __inline void
#ifndef ARM32_PMAP_NEEDS_PTE_SYNC
#ifndef PMAP_INCLUDE_PTE_SYNC
pmap_free_l2_ptp(pt_entry_t *l2, paddr_t pa)
#else
pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2, paddr_t pa)
@ -710,7 +720,7 @@ pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2, paddr_t pa)
{
if (__predict_true(uvm.page_init_done)) {
#ifdef ARM32_PMAP_NEEDS_PTE_SYNC
#ifdef PMAP_INCLUDE_PTE_SYNC
/*
* Note: With a write-back cache, we may need to sync this
* L2 table before re-using it.
@ -763,10 +773,11 @@ pmap_is_cached(pmap_t pm)
* - There is no pmap active in the cache/tlb.
* - The specified pmap is 'active' in the cache/tlb.
*/
#ifdef ARM32_PMAP_NEEDS_PTE_SYNC
#ifdef PMAP_INCLUDE_PTE_SYNC
#define PTE_SYNC_CURRENT(pm, ptep) \
do { \
if (pmap_is_cached(pm)) \
if (PMAP_NEEDS_PTE_SYNC && \
pmap_is_cached(pm)) \
PTE_SYNC(ptep); \
} while (/*CONSTCOND*/0)
#else
@ -1241,7 +1252,7 @@ pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
/*
* Release the L2 descriptor table back to the pool cache.
*/
#ifndef ARM32_PMAP_NEEDS_PTE_SYNC
#ifndef PMAP_INCLUDE_PTE_SYNC
pmap_free_l2_ptp(ptep, l2b->l2b_phys);
#else
pmap_free_l2_ptp(!pmap_is_cached(pm), ptep, l2b->l2b_phys);
@ -1270,7 +1281,7 @@ pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
static int
pmap_l2ptp_ctor(void *arg, void *v, int flags)
{
#ifndef ARM32_PMAP_NEEDS_PTE_SYNC
#ifndef PMAP_INCLUDE_PTE_SYNC
struct l2_bucket *l2b;
pt_entry_t *ptep, pte;
vaddr_t va = (vaddr_t)v & ~PGOFSET;
@ -3298,7 +3309,7 @@ pmap_reference(pmap_t pm)
* StrongARM accesses to non-cached pages are non-burst making writing
* _any_ bulk data very slow.
*/
#if ARM_MMU_GENERIC == 1
#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
void
pmap_zero_page_generic(paddr_t phys)
{
@ -3323,7 +3334,7 @@ pmap_zero_page_generic(paddr_t phys)
bzero_page(cdstp);
cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
}
#endif /* ARM_MMU_GENERIC == 1 */
#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
#if ARM_MMU_XSCALE == 1
void
@ -3417,7 +3428,7 @@ pmap_pageidlezero(paddr_t phys)
* hook points. The same comment regarding cachability as in
* pmap_zero_page also applies here.
*/
#if ARM_MMU_GENERIC == 1
#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
void
pmap_copy_page_generic(paddr_t src, paddr_t dst)
{
@ -3459,7 +3470,7 @@ pmap_copy_page_generic(paddr_t src, paddr_t dst)
simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
}
#endif /* ARM_MMU_GENERIC == 1 */
#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
#if ARM_MMU_XSCALE == 1
void
@ -4483,7 +4494,7 @@ pt_entry_t pte_l2_s_proto;
void (*pmap_copy_page_func)(paddr_t, paddr_t);
void (*pmap_zero_page_func)(paddr_t);
#if ARM_MMU_GENERIC == 1
#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
void
pmap_pte_init_generic(void)
{
@ -4497,15 +4508,20 @@ pmap_pte_init_generic(void)
pte_l2_s_cache_mode = L2_B|L2_C;
pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
#ifdef ARM32_PMAP_NEEDS_PTE_SYNC
pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
pte_l2_l_cache_mode_pt = L2_B|L2_C;
pte_l2_s_cache_mode_pt = L2_B|L2_C;
#else
pte_l1_s_cache_mode_pt = L1_S_C;
pte_l2_l_cache_mode_pt = L2_C;
pte_l2_s_cache_mode_pt = L2_C;
#endif
/*
* If we have a write-through cache, set B and C. If
* we have a write-back cache, then we assume setting
* only C will make those pages write-through.
*/
if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) {
pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
pte_l2_l_cache_mode_pt = L2_B|L2_C;
pte_l2_s_cache_mode_pt = L2_B|L2_C;
} else {
pte_l1_s_cache_mode_pt = L1_S_C;
pte_l2_l_cache_mode_pt = L2_C;
pte_l2_s_cache_mode_pt = L2_C;
}
pte_l2_s_prot_u = L2_S_PROT_U_generic;
pte_l2_s_prot_w = L2_S_PROT_W_generic;
@ -4519,6 +4535,23 @@ pmap_pte_init_generic(void)
pmap_zero_page_func = pmap_zero_page_generic;
}
#if defined(CPU_ARM8)
void
pmap_pte_init_arm8(void)
{
/*
* ARM8 is compatible with generic, but we need to use
* the page tables uncached.
*/
pmap_pte_init_generic();
pte_l1_s_cache_mode_pt = 0;
pte_l2_l_cache_mode_pt = 0;
pte_l2_s_cache_mode_pt = 0;
}
#endif /* CPU_ARM8 */
#if defined(CPU_ARM9)
void
pmap_pte_init_arm9(void)
@ -4533,9 +4566,34 @@ pmap_pte_init_arm9(void)
pte_l1_s_cache_mode = L1_S_C;
pte_l2_l_cache_mode = L2_C;
pte_l2_s_cache_mode = L2_C;
pte_l1_s_cache_mode_pt = L1_S_C;
pte_l2_l_cache_mode_pt = L2_C;
pte_l2_s_cache_mode_pt = L2_C;
}
#endif /* CPU_ARM9 */
#endif /* ARM_MMU_GENERIC == 1 */
#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
#if ARM_MMU_SA1 == 1
void
pmap_pte_init_sa1(void)
{
/*
* The StrongARM SA-1 cache does not have a write-through
* mode. So, do the generic initialization, then reset
* the page table cache mode to B=1,C=1, and note that
* the PTEs need to be sync'd.
*/
pmap_pte_init_generic();
pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
pte_l2_l_cache_mode_pt = L2_B|L2_C;
pte_l2_s_cache_mode_pt = L2_B|L2_C;
pmap_needs_pte_sync = 1;
}
#endif /* ARM_MMU_SA1 == 1*/
#if ARM_MMU_XSCALE == 1
void
@ -4553,15 +4611,9 @@ pmap_pte_init_xscale(void)
pte_l2_s_cache_mode = L2_B|L2_C;
pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
#ifdef ARM32_PMAP_NEEDS_PTE_SYNC
pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
pte_l2_l_cache_mode_pt = L2_B|L2_C;
pte_l2_s_cache_mode_pt = L2_B|L2_C;
#else
pte_l1_s_cache_mode_pt = L1_S_C;
pte_l2_l_cache_mode_pt = L2_C;
pte_l2_s_cache_mode_pt = L2_C;
#endif
#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.26 2003/04/18 11:08:26 scw Exp $ */
/* $NetBSD: vm_machdep.c,v 1.27 2003/04/22 00:24:49 thorpej Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@ -344,9 +344,6 @@ pagemove(from, to, size)
*fpte++ = 0;
size -= PAGE_SIZE;
}
#ifndef ARM32_PMAP_NEEDS_PTE_SYNC
(void)ptecnt;
#endif
PTE_SYNC_RANGE(vtopte((vaddr_t)from), ptecnt);
PTE_SYNC_RANGE(vtopte((vaddr_t)to), ptecnt);
//cpu_tlb_flushD();

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.68 2003/04/18 23:45:50 thorpej Exp $ */
/* $NetBSD: pmap.h,v 1.69 2003/04/22 00:24:50 thorpej Exp $ */
/*
* Copyright (c) 2002, 2003 Wasabi Systems, Inc.
@ -395,34 +395,56 @@ vtophys(vaddr_t va)
return (pa);
}
#endif /* ARM32_PMAP_NEW */
/*
* The new pmap ensures that page-tables are always mapping Write-Thru.
* Thus, on some platforms we can run fast and loose and avoid syncing PTEs
* on every change.
*
* Actually, this may not work out quite as well as I'd planned.
* According to some documentation, the cache-mode "write-thru, unbuffered",
* as used by the pmap for page tables, may not work correctly on all types
* of cache.
* Unfortunately, not all CPUs have a write-through cache mode. So we
* define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
* and if there is the chance for PTE syncs to be needed, we define
* PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
* the code.
*/
#if !defined(ARM32_PMAP_NEW) || defined(ARM32_PMAP_NEEDS_PTE_SYNC)
#define PTE_SYNC(pte) \
cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t))
#define PTE_FLUSH(pte) \
cpu_dcache_wbinv_range((vaddr_t)(pte), sizeof(pt_entry_t))
#define PTE_SYNC_RANGE(pte, cnt) \
cpu_dcache_wb_range((vaddr_t)(pte), (cnt) << 2) /* * sizeof(...) */
#define PTE_FLUSH_RANGE(pte, cnt) \
cpu_dcache_wbinv_range((vaddr_t)(pte), (cnt) << 2) /* * sizeof(...) */
#else
#define PTE_SYNC(x) /* no-op */
#define PTE_FLUSH(x) /* no-op */
#define PTE_SYNC_RANGE(x,y) /* no-op */
#define PTE_FLUSH_RANGE(x,y) /* no-op */
extern int pmap_needs_pte_sync;
#if defined(_KERNEL_OPT)
/*
* StrongARM SA-1 caches do not have a write-through mode. So, on these,
* we need to do PTE syncs. If only SA-1 is configured, then evaluate
* this at compile time.
*/
#if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
#define PMAP_NEEDS_PTE_SYNC 1
#define PMAP_INCLUDE_PTE_SYNC
#elif (ARM_MMU_SA1 == 0)
#define PMAP_NEEDS_PTE_SYNC 0
#endif
#endif /* _KERNEL_OPT */
/*
* Provide a fallback in case we were not able to determine it at
* compile-time.
*/
#ifndef PMAP_NEEDS_PTE_SYNC
#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
#define PMAP_INCLUDE_PTE_SYNC
#endif
#define PTE_SYNC(pte) \
do { \
if (PMAP_NEEDS_PTE_SYNC) \
cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\
} while (/*CONSTCOND*/0)
#define PTE_SYNC_RANGE(pte, cnt) \
do { \
if (PMAP_NEEDS_PTE_SYNC) { \
cpu_dcache_wb_range((vaddr_t)(pte), \
(cnt) << 2); /* * sizeof(pt_entry_t) */ \
} \
} while (/*CONSTCOND*/0)
#endif /* ARM32_PMAP_NEW */
#define l1pte_valid(pde) ((pde) != 0)
#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
@ -455,15 +477,22 @@ vtophys(vaddr_t va)
/************************* ARM MMU configuration *****************************/
#if ARM_MMU_GENERIC == 1
#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
void pmap_copy_page_generic(paddr_t, paddr_t);
void pmap_zero_page_generic(paddr_t);
void pmap_pte_init_generic(void);
#if defined(CPU_ARM8)
void pmap_pte_init_arm8(void);
#endif
#if defined(CPU_ARM9)
void pmap_pte_init_arm9(void);
#endif /* CPU_ARM9 */
#endif /* ARM_MMU_GENERIC == 1 */
#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
#if ARM_MMU_SA1 == 1
void pmap_pte_init_sa1(void);
#endif /* ARM_MMU_SA1 == 1 */
#if ARM_MMU_XSCALE == 1
void pmap_copy_page_xscale(paddr_t, paddr_t);
@ -577,7 +606,7 @@ extern void (*pmap_zero_page_func)(paddr_t);
#define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d))
#define pmap_zero_page(d) (*pmap_zero_page_func)((d))
#elif ARM_MMU_GENERIC == 1
#elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
#define L2_S_PROT_U L2_S_PROT_U_generic
#define L2_S_PROT_W L2_S_PROT_W_generic
#define L2_S_PROT_MASK L2_S_PROT_MASK_generic

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpuconf.h,v 1.5 2003/04/09 02:34:31 thorpej Exp $ */
/* $NetBSD: cpuconf.h,v 1.6 2003/04/22 00:24:49 thorpej Exp $ */
/*
* Copyright (c) 2002 Wasabi Systems, Inc.
@ -42,6 +42,12 @@
#include "opt_cputypes.h"
#endif /* _KERNEL_OPT */
/*
* IF YOU CHANGE THIS FILE, MAKE SURE TO UPDATE THE DEFINITION OF
* "PMAP_NEEDS_PTE_SYNC" IN <arm/arm32/pmap.h> FOR THE CPU TYPE
* YOU ARE ADDING SUPPORT FOR.
*/
/*
* Step 1: Count the number of CPU types configured into the kernel.
*/
@ -108,6 +114,9 @@
*
* ARM_MMU_GENERIC Generic ARM MMU, compatible with ARM6.
*
* ARM_MMU_SA1 StrongARM SA-1 MMU. Compatible with generic
* ARM MMU, but has no write-through cache mode.
*
* ARM_MMU_XSCALE XScale MMU. Compatible with generic ARM
* MMU, but also has several extensions which
* require different PTE layout to use.
@ -121,13 +130,20 @@
#if !defined(_KERNEL_OPT) || \
(defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
defined(CPU_ARM8) || defined(CPU_ARM9) || defined(CPU_SA110) || \
defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0))
defined(CPU_ARM8) || defined(CPU_ARM9))
#define ARM_MMU_GENERIC 1
#else
#define ARM_MMU_GENERIC 0
#endif
#if !defined(_KERNEL_OPT) || \
(defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||\
defined(CPU_IXP12X0))
#define ARM_MMU_SA1 1
#else
#define ARM_MMU_SA1 0
#endif
#if !defined(_KERNEL_OPT) || \
(defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(CPU_XSCALE_PXA2X0))
@ -137,7 +153,7 @@
#endif
#define ARM_NMMUS (ARM_MMU_MEMC + ARM_MMU_GENERIC + \
ARM_MMU_XSCALE)
ARM_MMU_SA1 + ARM_MMU_XSCALE)
#if ARM_NMMUS == 0
#error ARM_NMMUS is 0
#endif

View File

@ -1,4 +1,4 @@
# $NetBSD: GENERIC,v 1.14 2003/04/18 11:11:51 scw Exp $
# $NetBSD: GENERIC,v 1.15 2003/04/22 00:24:50 thorpej Exp $
#
# Generic Shark configuration.
#
@ -7,7 +7,7 @@ include "arch/shark/conf/std.shark"
options INCLUDE_CONFIG_FILE # embed config file in kernel binary
#ident "GENERIC-$Revision: 1.14 $"
#ident "GENERIC-$Revision: 1.15 $"
# estimated number of users
maxusers 32
@ -137,7 +137,6 @@ options USERCONF # userconf(4) support
# Note: These are not defined in std.shark at this time to allow people
# with Sharks to experiment with the new pmap.
options ARM32_PMAP_NEW
options ARM32_PMAP_NEEDS_PTE_SYNC
# Development and Debugging options