Define 2 classes of ARM MMUs:

1. Generic (compatible with ARM6)
1. XScale (can be used as generic, but also has certainly nifty extensions).

Define abstract PTE bit defintions for each MMU class.  If only one MMU
class is configured into the kernel (based on CPU_* options), then we
get the constants for that MMU class.  Otherwise we indirect through
varaibles set up via set_cpufuncs().

XXX The XScale bits are currently the same as the generic bits.  Baby steps.
This commit is contained in:
thorpej 2002-04-09 21:00:42 +00:00
parent 30453d09bc
commit c535f4ffc4
3 changed files with 237 additions and 22 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpufunc.c,v 1.39 2002/04/05 16:58:03 thorpej Exp $ */
/* $NetBSD: cpufunc.c,v 1.40 2002/04/09 21:00:42 thorpej Exp $ */
/*
* arm7tdmi support code Copyright (c) 2001 John Fremlin
@ -719,6 +719,7 @@ set_cpufuncs()
cpufuncs = arm6_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 0;
get_cachetype_table();
pmap_pte_init_generic();
return 0;
}
#endif /* CPU_ARM6 */
@ -729,6 +730,7 @@ set_cpufuncs()
cpufuncs = arm7_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 0;
get_cachetype_table();
pmap_pte_init_generic();
return 0;
}
#endif /* CPU_ARM7 */
@ -739,6 +741,7 @@ set_cpufuncs()
cpufuncs = arm7tdmi_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 0;
get_cachetype_cp15();
pmap_pte_init_generic();
return 0;
}
#endif
@ -748,15 +751,16 @@ set_cpufuncs()
cpufuncs = arm8_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */
get_cachetype_cp15();
pmap_pte_init_generic();
return 0;
}
#endif /* CPU_ARM8 */
#ifdef CPU_ARM9
if (cputype == CPU_ID_ARM920T) {
pte_cache_mode = L2_C; /* Select write-through cacheing. */
cpufuncs = arm9_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
get_cachetype_cp15();
pmap_pte_init_arm9();
return 0;
}
#endif /* CPU_ARM9 */
@ -766,6 +770,7 @@ set_cpufuncs()
cpufuncs = sa110_cpufuncs;
cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
get_cachetype_table();
pmap_pte_init_generic();
/*
* Enable the right variant of sleeping.
*/
@ -811,7 +816,6 @@ set_cpufuncs()
:
: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
pte_cache_mode = L2_C; /* Select write-through cacheing. */
cpufuncs = xscale_cpufuncs;
/*
@ -826,6 +830,7 @@ set_cpufuncs()
cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
get_cachetype_cp15();
pmap_pte_init_i80200();
return 0;
}
#endif /* CPU_XSCALE_80200 */
@ -849,6 +854,7 @@ set_cpufuncs()
cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
get_cachetype_cp15();
pmap_pte_init_xscale();
return 0;
}
#endif /* CPU_XSCALE_80321 */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.84 2002/04/09 19:44:22 thorpej Exp $ */
/* $NetBSD: pmap.c,v 1.85 2002/04/09 21:00:43 thorpej Exp $ */
/*
* Copyright (c) 2002 Wasabi Systems, Inc.
@ -143,7 +143,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.84 2002/04/09 19:44:22 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.85 2002/04/09 21:00:43 thorpej Exp $");
#ifdef PMAP_DEBUG
#define PDEBUG(_lev_,_stat_) \
if (pmap_debug_level >= (_lev_)) \
@ -3607,3 +3607,87 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
#endif
return (size);
}
/********************** PTE initialization routines **************************/
/*
* These routines are called when the CPU type is identified to set up
* the PTE prototypes, cache modes, etc.
*
* The variables are always here, just in case LKMs need to reference
* them (though, they shouldn't).
*/
pt_entry_t pte_cache_mode;
pt_entry_t pte_cache_mask;
pt_entry_t pte_l2_s_prot_u;
pt_entry_t pte_l2_s_prot_w;
pt_entry_t pte_l2_s_prot_mask;
pt_entry_t pte_l1_s_proto;
pt_entry_t pte_l1_c_proto;
pt_entry_t pte_l2_s_proto;
#if ARM_MMU_GENERIC == 1
void
pmap_pte_init_generic(void)
{
pte_cache_mode = L2_B|L2_C;
pte_cache_mask = L2_CACHE_MASK_generic;
pte_l2_s_prot_u = L2_S_PROT_U_generic;
pte_l2_s_prot_w = L2_S_PROT_W_generic;
pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
pte_l1_s_proto = L1_S_PROTO_generic;
pte_l1_c_proto = L1_C_PROTO_generic;
pte_l2_s_proto = L2_S_PROTO_generic;
}
#if defined(CPU_ARM9)
void
pmap_pte_init_arm9(void)
{
/*
* ARM9 is compatible with generic, but we want to use
* write-through caching for now.
*/
pmap_pte_init_generic();
pte_cache_mode = L2_C;
}
#endif /* CPU_ARM9 */
#endif /* ARM_MMU_GENERIC == 1 */
#if ARM_MMU_XSCALE == 1
void
pmap_pte_init_xscale(void)
{
pte_cache_mode = L2_B|L2_C;
pte_cache_mask = L2_CACHE_MASK_xscale;
pte_l2_s_prot_u = L2_S_PROT_U_xscale;
pte_l2_s_prot_w = L2_S_PROT_W_xscale;
pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
pte_l1_s_proto = L1_S_PROTO_xscale;
pte_l1_c_proto = L1_C_PROTO_xscale;
pte_l2_s_proto = L2_S_PROTO_xscale;
}
#if defined(CPU_XSCALE_80200)
void
pmap_pte_init_i80200(void)
{
/*
* Use write-through caching on the i80200.
*/
pmap_pte_init_xscale();
pte_cache_mode = L2_C;
}
#endif /* CPU_XSCALE_80200 */
#endif /* ARM_MMU_XSCALE == 1 */

View File

@ -1,4 +1,39 @@
/* $NetBSD: pmap.h,v 1.45 2002/04/09 19:37:17 thorpej Exp $ */
/* $NetBSD: pmap.h,v 1.46 2002/04/09 21:00:44 thorpej Exp $ */
/*
* Copyright (c 2002 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1994,1995 Mark Brinicombe.
@ -219,6 +254,60 @@ extern vaddr_t pmap_curmaxkvaddr;
#define KERNEL_PD_SIZE \
(L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
/************************* ARM MMU configuration *****************************/
/*
* We define several classes of ARM MMU, here:
*
* ARM_MMU_GENERIC Generic ARM MMU, compatible with ARM6.
*
* ARM_MMU_XSCALE XScale MMU. Compatible with generic ARM
* MMU, but also has several extensions which
* require different PTE layout to use.
*/
#if defined(_LKM) || defined(CPU_ARM6) || defined(CPU_ARM7) || \
defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110)
#define ARM_MMU_GENERIC 1
void pmap_pte_init_generic(void);
#if defined(CPU_ARM9)
void pmap_pte_init_arm9(void);
#endif /* CPU_ARM9 */
#else
#define ARM_MMU_GENERIC 0
#endif
#if defined(_LKM) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
#define ARM_MMU_XSCALE 1
void pmap_pte_init_xscale(void);
#if defined(CPU_XSCALE_80200)
void pmap_pte_init_i80200(void);
#endif /* CPU_XSCALE_80200 */
#else
#define ARM_MMU_XSCALE 0
#endif
#define ARM_NMMUS (ARM_MMU_GENERIC + ARM_MMU_XSCALE)
#if ARM_NMMUS == 0
#error ARM_NMMUS is 0
#endif
extern pt_entry_t pte_cache_mode;
extern pt_entry_t pte_cache_mask;
extern pt_entry_t pte_l2_s_prot_u;
extern pt_entry_t pte_l2_s_prot_w;
extern pt_entry_t pte_l2_s_prot_mask;
extern pt_entry_t pte_l1_s_proto;
extern pt_entry_t pte_l1_c_proto;
extern pt_entry_t pte_l2_s_proto;
/*****************************************************************************/
/*
* tell MI code that the cache is virtually-indexed *and* virtually-tagged.
*/
@ -240,11 +329,59 @@ extern vaddr_t pmap_curmaxkvaddr;
#define L2_L_PROT_W (L2_AP(AP_W))
#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
#define L2_S_PROT_U (L2_AP(AP_U))
#define L2_S_PROT_W (L2_AP(AP_W))
#define L2_S_PROT_MASK (L2_S_PROT_U|L2_S_PROT_W)
#define L2_S_PROT_U_generic (L2_AP(AP_U))
#define L2_S_PROT_W_generic (L2_AP(AP_W))
#define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W)
#define L2_CACHE_MASK (L2_B|L2_C)
#define L2_S_PROT_U_xscale (L2_AP(AP_U))
#define L2_S_PROT_W_xscale (L2_AP(AP_W))
#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W)
#define L2_CACHE_MASK_generic (L2_B|L2_C)
#define L2_CACHE_MASK_xscale (L2_B|L2_C)
#define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
#define L1_S_PROTO_xscale (L1_TYPE_S | L1_S_IMP) /* XXX IMP */
#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
#define L1_C_PROTO_xscale (L1_TYPE_C | L1_C_IMP2) /* XXX IMP */
#define L2_L_PROTO (L2_TYPE_L)
#define L2_S_PROTO_generic (L2_TYPE_S)
#define L2_S_PROTO_xscale (L2_TYPE_S)
/*
* User-visible names for the ones that vary with MMU class.
*/
#if ARM_NMMUS > 1
/* More than one MMU class configured; use variables. */
#define L2_S_PROT_U pte_l2_s_prot_u
#define L2_S_PROT_W pte_l2_s_prot_w
#define L2_S_PROT_MASK pte_l2_s_prot_mask
#define L1_S_PROTO pte_l1_s_proto
#define L1_C_PROTO pte_l1_c_proto
#define L2_S_PROTO pte_l2_s_proto
#elif ARM_MMU_GENERIC == 1
#define L2_S_PROT_U L2_S_PROT_U_generic
#define L2_S_PROT_W L2_S_PROT_W_generic
#define L2_S_PROT_MASK L2_S_PROT_MASK_generic
#define L1_S_PROTO L1_S_PROTO_generic
#define L1_C_PROTO L1_C_PROTO_generic
#define L2_S_PROTO L2_S_PROTO_generic
#elif ARM_MMU_XSCALE == 1
#define L2_S_PROT_U L2_S_PROT_U_xscale
#define L2_S_PROT_W L2_S_PROT_W_xscale
#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
#define L1_S_PROTO L1_S_PROTO_xscale
#define L1_C_PROTO L1_C_PROTO_xscale
#define L2_S_PROTO L2_S_PROTO_xscale
#endif /* ARM_NMMUS > 1 */
/*
* These macros return various bits based on kernel/user and protection.
@ -259,18 +396,6 @@ extern vaddr_t pmap_curmaxkvaddr;
#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
(((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
extern pt_entry_t pte_cache_mode;
/*
* The following macros are used to construct prototype PTEs.
*/
#define L1_S_PROTO (L1_TYPE_S | L1_S_IMP) /* XXX IMP */
#define L1_C_PROTO (L1_TYPE_C | L1_C_IMP2) /* XXX IMP */
#define L2_L_PROTO (L2_TYPE_L)
#define L2_S_PROTO (L2_TYPE_S)
#endif /* _KERNEL */
#endif /* _ARM32_PMAP_H_ */