Keep track of which kernel PTs are available during bootstrap,

and let pmap_map_chunk() lookup the correct one to use for the
current VA.  Eliminate the "l2table" argument to pmap_map_chunk().

Add a second L2 table for mapping kernel text/data/bss on the
IQ80310 (fixes booting kernels with ramdisks).
This commit is contained in:
thorpej 2002-02-21 21:58:00 +00:00
parent 89e46d8a99
commit 79738a99e9
9 changed files with 278 additions and 291 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: rpc_machdep.c,v 1.21 2002/02/21 05:25:23 thorpej Exp $ */
/* $NetBSD: rpc_machdep.c,v 1.22 2002/02/21 21:58:00 thorpej Exp $ */
/*
* Copyright (c) 2000-2001 Reinoud Zandijk.
@ -57,7 +57,7 @@
#include <sys/param.h>
__RCSID("$NetBSD: rpc_machdep.c,v 1.21 2002/02/21 05:25:23 thorpej Exp $");
__RCSID("$NetBSD: rpc_machdep.c,v 1.22 2002/02/21 21:58:00 thorpej Exp $");
#include <sys/systm.h>
#include <sys/kernel.h>
@ -173,7 +173,7 @@ extern int pmap_debug_level;
#define KERNEL_PT_VMDATA_NUM (KERNEL_VM_SIZE >> (PDSHIFT + 2))
#define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
pt_entry_t kernel_pt_table[NUM_KERNEL_PTS];
pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
struct user *proc0paddr;
@ -615,7 +615,10 @@ initarm(void *cookie)
&& kernel_l1pt.pv_pa == 0) {
valloc_pages(kernel_l1pt, PD_SIZE / NBPG);
} else {
alloc_pages(kernel_pt_table[loop1], PT_SIZE / NBPG);
alloc_pages(kernel_pt_table[loop1].pv_pa,
PT_SIZE / NBPG);
kernel_pt_table[loop1].pv_va =
kernel_pt_table[loop1].pv_pa;
++loop1;
}
}
@ -683,16 +686,16 @@ initarm(void *cookie)
/* Map the L2 pages tables in the L1 page table */
pmap_link_l2pt(l1pagetable, 0x00000000,
kernel_pt_table[KERNEL_PT_SYS]);
&kernel_pt_table[KERNEL_PT_SYS]);
pmap_link_l2pt(l1pagetable, KERNEL_BASE,
kernel_pt_table[KERNEL_PT_KERNEL]);
&kernel_pt_table[KERNEL_PT_KERNEL]);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
kernel_pt_table[KERNEL_PT_VMDATA + loop]);
&kernel_pt_table[KERNEL_PT_VMDATA + loop]);
pmap_link_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE,
kernel_ptpt.pv_pa);
&kernel_ptpt);
pmap_link_l2pt(l1pagetable, VMEM_VBASE,
kernel_pt_table[KERNEL_PT_VMEM]);
&kernel_pt_table[KERNEL_PT_VMEM]);
#ifdef VERBOSE_INIT_ARM
@ -700,7 +703,7 @@ initarm(void *cookie)
#endif
/* Now we fill in the L2 pagetable for the kernel code/data */
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL].pv_pa;
/*
* The defines are a workaround for a recent problem that occurred
@ -709,15 +712,15 @@ initarm(void *cookie)
*/
if (N_GETMAGIC(kernexec[0]) == ZMAGIC) {
#if defined(CPU_ARM6) || defined(CPU_ARM7)
logical = pmap_map_chunk(l1pagetable, l2pagetable,
KERNEL_TEXT_BASE, physical_start, kernexec->a_text,
logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
physical_start, kernexec->a_text,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
#else /* CPU_ARM6 || CPU_ARM7 */
logical = pmap_map_chunk(l1pagetable, l2pagetable,
KERNEL_TEXT_BASE, physical_start, kernexec->a_text,
logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
physical_start, kernexec->a_text,
VM_PROT_READ, PTE_CACHE);
#endif /* CPU_ARM6 || CPU_ARM7 */
logical += pmap_map_chunk(l1pagetable, l2pagetable,
logical += pmap_map_chunk(l1pagetable,
KERNEL_TEXT_BASE + logical, physical_start + logical,
kerneldatasize - kernexec->a_text,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
@ -726,7 +729,7 @@ initarm(void *cookie)
* Most likely an ELF kernel ...
* XXX no distinction yet between read only and read/write area's ...
*/
pmap_map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
physical_start, kerneldatasize,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
};
@ -737,20 +740,16 @@ initarm(void *cookie)
#endif
/* Map the stack pages */
pmap_map_chunk(l1pagetable, l2pagetable, irqstack.pv_va,
irqstack.pv_pa, IRQ_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, abtstack.pv_va,
abtstack.pv_pa, ABT_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, undstack.pv_va,
undstack.pv_pa, UND_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, kernelstack.pv_va,
kernelstack.pv_pa, UPAGES * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
PD_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/* Map the page table that maps the kernel pages */
@ -767,14 +766,11 @@ initarm(void *cookie)
* it but we would need the page tables if DRAM was in use.
* XXX please map two adjacent virtual areas to ONE physical area
*/
l2pagetable = kernel_pt_table[KERNEL_PT_VMEM];
pmap_map_chunk(l1pagetable, l2pagetable, VMEM_VBASE,
pmap_map_chunk(l1pagetable, VMEM_VBASE, videomemory.vidm_pbase,
videomemory.vidm_size, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, VMEM_VBASE + videomemory.vidm_size,
videomemory.vidm_pbase, videomemory.vidm_size,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable,
VMEM_VBASE + videomemory.vidm_size, videomemory.vidm_pbase,
videomemory.vidm_size, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
/*
@ -784,20 +780,20 @@ initarm(void *cookie)
/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
l2pagetable = kernel_ptpt.pv_pa;
pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL], VM_PROT_READ|VM_PROT_WRITE,
kernel_pt_table[KERNEL_PT_KERNEL].pv_pa, VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE);
pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (VMEM_VBASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_VMEM], VM_PROT_READ|VM_PROT_WRITE,
kernel_pt_table[KERNEL_PT_VMEM].pv_pa, VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE);
pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS], VM_PROT_READ|VM_PROT_WRITE,
kernel_pt_table[KERNEL_PT_SYS].pv_pa, VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) {
pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
(loop * 0x00400000)) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_VMDATA + loop],
kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
}
@ -805,7 +801,7 @@ initarm(void *cookie)
* Map the system page in the kernel page table for the bottom 1Meg
* of the virtual memory map.
*/
l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
l2pagetable = kernel_pt_table[KERNEL_PT_SYS].pv_pa;
pmap_map_entry(l2pagetable, 0x0000000, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.45 2002/02/21 06:36:11 thorpej Exp $ */
/* $NetBSD: pmap.c,v 1.46 2002/02/21 21:58:01 thorpej Exp $ */
/*
* Copyright (c) 2001 Richard Earnshaw
@ -142,7 +142,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.45 2002/02/21 06:36:11 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.46 2002/02/21 21:58:01 thorpej Exp $");
#ifdef PMAP_DEBUG
#define PDEBUG(_lev_,_stat_) \
if (pmap_debug_level >= (_lev_)) \
@ -3722,6 +3722,27 @@ pmap_alloc_ptp(struct pmap *pmap, vaddr_t va, boolean_t just_try)
/************************ Bootstrapping routines ****************************/
/*
* This list exists for the benefit of pmap_map_chunk(). It keeps track
* of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
* find them as necessary.
*
* Note that the data on this list is not valid after initarm() returns.
*/
SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
static vaddr_t
kernel_pt_lookup(paddr_t pa)
{
pv_addr_t *pv;
SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
if (pv->pv_pa == pa)
return (pv->pv_va);
}
return (0);
}
/*
* pmap_map_section:
*
@ -3767,17 +3788,19 @@ pmap_map_entry(vaddr_t l2pt, vaddr_t va, paddr_t pa, int prot, int cache)
* page table at the slot for "va".
*/
void
pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, paddr_t l2pa)
pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
{
pd_entry_t *pde = (pd_entry_t *) l1pt;
u_int slot = va >> PDSHIFT;
KASSERT((l2pa & PGOFSET) == 0);
KASSERT((l2pv->pv_pa & PGOFSET) == 0);
pde[slot + 0] = L1_PTE(l2pa + 0x000);
pde[slot + 1] = L1_PTE(l2pa + 0x400);
pde[slot + 2] = L1_PTE(l2pa + 0x800);
pde[slot + 3] = L1_PTE(l2pa + 0xc00);
pde[slot + 0] = L1_PTE(l2pv->pv_pa + 0x000);
pde[slot + 1] = L1_PTE(l2pv->pv_pa + 0x400);
pde[slot + 2] = L1_PTE(l2pv->pv_pa + 0x800);
pde[slot + 3] = L1_PTE(l2pv->pv_pa + 0xc00);
SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
}
/*
@ -3788,13 +3811,13 @@ pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, paddr_t l2pa)
* provided L1 and L2 tables at the specified virtual address.
*/
vsize_t
pmap_map_chunk(vaddr_t l1pt, vaddr_t l2pt, vaddr_t va, paddr_t pa,
vsize_t size, int prot, int cache)
pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
int prot, int cache)
{
pd_entry_t *pde = (pd_entry_t *) l1pt;
pt_entry_t *pte = (pt_entry_t *) l2pt;
pt_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
pt_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
pt_entry_t *pte;
vsize_t resid;
int i;
@ -3830,8 +3853,13 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t l2pt, vaddr_t va, paddr_t pa,
* for the current VA.
*/
if ((pde[va >> PDSHIFT] & L1_MASK) != L1_PAGE)
panic("pmap_map_chunk: no L2 table for VA 0x%08lx\n",
va);
panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
pte = (pt_entry_t *)
kernel_pt_lookup(pde[va >> PDSHIFT] & PG_FRAME);
if (pte == NULL)
panic("pmap_map_chunk: can't find L2 table for VA"
"0x%08lx", va);
/* See if we can use a L2 large page mapping. */
if (((pa | va) & (L2_LPAGE_SIZE - 1)) == 0 &&
@ -3840,13 +3868,8 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t l2pt, vaddr_t va, paddr_t pa,
printf("L");
#endif
for (i = 0; i < 16; i++) {
#ifdef cats /* XXXJRT */
pte[((va >> PGSHIFT) & 0x7f0) + i] =
L2_LPTE(pa, ap, fl);
#else
pte[((va >> PGSHIFT) & 0x3f0) + i] =
L2_LPTE(pa, ap, fl);
#endif
}
va += L2_LPAGE_SIZE;
pa += L2_LPAGE_SIZE;
@ -3858,11 +3881,7 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t l2pt, vaddr_t va, paddr_t pa,
#ifdef VERBOSE_INIT_ARM
printf("P");
#endif
#ifdef cats /* XXXJRT */
pte[(va >> PGSHIFT) & 0x7ff] = L2_SPTE(pa, ap, fl);
#else
pte[(va >> PGSHIFT) & 0x3ff] = L2_SPTE(pa, ap, fl);
#endif
va += NBPG;
pa += NBPG;
resid -= NBPG;

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.27 2002/02/21 02:52:21 thorpej Exp $ */
/* $NetBSD: pmap.h,v 1.28 2002/02/21 21:58:01 thorpej Exp $ */
/*
* Copyright (c) 1994,1995 Mark Brinicombe.
@ -123,8 +123,8 @@ struct pv_head {
* entry address for each page hook.
*/
typedef struct {
vaddr_t va;
pt_entry_t *pte;
vaddr_t va;
pt_entry_t *pte;
} pagehook_t;
/*
@ -132,7 +132,8 @@ typedef struct {
* during bootstrapping) we need to keep track of the physical and virtual
* addresses of various pages
*/
typedef struct {
typedef struct pv_addr {
SLIST_ENTRY(pv_addr) pv_list;
paddr_t pv_pa;
vaddr_t pv_va;
} pv_addr_t;
@ -188,9 +189,8 @@ pt_entry_t *pmap_pte __P((struct pmap *, vaddr_t));
/* Bootstrapping routines. */
void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
vsize_t pmap_map_chunk(vaddr_t, vaddr_t, vaddr_t, paddr_t, vsize_t,
int, int);
void pmap_link_l2pt(vaddr_t, vaddr_t, paddr_t);
vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
/*
* Special page zero routine for use by the idle loop (no cache cleans).

View File

@ -1,4 +1,4 @@
/* $NetBSD: cats_machdep.c,v 1.20 2002/02/21 05:25:24 thorpej Exp $ */
/* $NetBSD: cats_machdep.c,v 1.21 2002/02/21 21:58:02 thorpej Exp $ */
/*
* Copyright (c) 1997,1998 Mark Brinicombe.
@ -134,16 +134,16 @@ extern u_int undefined_handler_address;
extern int pmap_debug_level;
#endif
#define KERNEL_PT_SYS 0 /* Page table for mapping proc0 zero page */
#define KERNEL_PT_KERNEL 1 /* Page table for mapping kernel */
#define KERNEL_PT_KERNEL2 2 /* 2nd page table for mapping kernel */
#define KERNEL_PT_VMDATA 3 /* Page tables for mapping kernel VM */
#ifndef KERNEL_PT_VMDATA_NUM
#define KERNEL_PT_SYS 0 /* L2 table for mapping zero page */
#define KERNEL_PT_KERNEL 1 /* L2 table for mapping kernel */
#define KERNEL_PT_KERNEL_NUM 2
/* L2 tables for mapping kernel VM */
#define KERNEL_PT_VMDATA (KERNEL_PT_KERNEL + KERNEL_PT_KERNEL_NUM)
#define KERNEL_PT_VMDATA_NUM (KERNEL_VM_SIZE >> (PDSHIFT + 2))
#endif
#define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
pt_entry_t kernel_pt_table[NUM_KERNEL_PTS];
pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
struct user *proc0paddr;
@ -482,27 +482,14 @@ initarm(bootargs)
&& kernel_l1pt.pv_pa == 0) {
valloc_pages(kernel_l1pt, PD_SIZE / NBPG);
} else {
alloc_pages(kernel_pt_table[loop1], PT_SIZE / NBPG);
alloc_pages(kernel_pt_table[loop1].pv_pa,
PT_SIZE / NBPG);
++loop1;
kernel_pt_table[loop1].pv_va =
kernel_pt_table[loop1].pv_pa;
}
}
/*
* we require the that the pt's for mapping the kernel be
* contiguous, otherwise pmap_map_chunk etc will fail. To achieve
* this we swap the last 2 pt's for the kernel mapping pt's
*/
{
paddr_t tmp;
tmp = kernel_pt_table[KERNEL_PT_KERNEL];
kernel_pt_table[KERNEL_PT_KERNEL] = kernel_pt_table[NUM_KERNEL_PTS-2];
kernel_pt_table[NUM_KERNEL_PTS-2] = tmp;
tmp = kernel_pt_table[KERNEL_PT_KERNEL2];
kernel_pt_table[KERNEL_PT_KERNEL2] = kernel_pt_table[NUM_KERNEL_PTS-1];
kernel_pt_table[NUM_KERNEL_PTS-1] = tmp;
}
#ifdef DIAGNOSTIC
/* This should never be able to happen but better confirm that. */
if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (PD_SIZE-1)) != 0)
@ -552,43 +539,41 @@ initarm(bootargs)
/* Map the L2 pages tables in the L1 page table */
pmap_link_l2pt(l1pagetable, 0x00000000,
kernel_pt_table[KERNEL_PT_SYS]);
pmap_link_l2pt(l1pagetable, KERNEL_BASE,
kernel_pt_table[KERNEL_PT_KERNEL]);
pmap_link_l2pt(l1pagetable, KERNEL_BASE + 0x00400000,
kernel_pt_table[KERNEL_PT_KERNEL2]);
&kernel_pt_table[KERNEL_PT_SYS]);
for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
&kernel_pt_table[KERNEL_PT_KERNEL + loop]);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
kernel_pt_table[KERNEL_PT_VMDATA + loop]);
pmap_link_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE,
kernel_ptpt.pv_pa);
&kernel_pt_table[KERNEL_PT_VMDATA + loop]);
pmap_link_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE, &kernel_ptpt);
#ifdef VERBOSE_INIT_ARM
printf("Mapping kernel\n");
#endif
/* Now we fill in the L2 pagetable for the kernel static code/data */
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL].pv_pa;
if (N_GETMAGIC(kernexec[0]) != ZMAGIC)
panic("Illegal kernel format\n");
else {
extern int end;
logical = pmap_map_chunk(l1pagetable, l2pagetable,
KERNEL_TEXT_BASE,
logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
physical_start, kernexec->a_text,
VM_PROT_READ, PTE_CACHE);
logical += pmap_map_chunk(l1pagetable, l2pagetable,
logical += pmap_map_chunk(l1pagetable,
KERNEL_TEXT_BASE + logical,
physical_start + logical, kernexec->a_data,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
logical += pmap_map_chunk(l1pagetable, l2pagetable,
logical += pmap_map_chunk(l1pagetable,
KERNEL_TEXT_BASE + logical,
physical_start + logical, kernexec->a_bss,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
logical += pmap_map_chunk(l1pagetable, l2pagetable,
logical += pmap_map_chunk(l1pagetable,
KERNEL_TEXT_BASE + logical,
physical_start + logical, kernexec->a_syms + sizeof(int)
+ *(u_int *)((int)&end + kernexec->a_syms + sizeof(int)),
@ -615,22 +600,17 @@ initarm(bootargs)
ebsabootinfo.bt_pargp, VM_PROT_READ, PTE_CACHE);
/* Map the stack pages */
pmap_map_chunk(l1pagetable, l2pagetable, irqstack.pv_va,
irqstack.pv_pa, IRQ_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, abtstack.pv_va,
abtstack.pv_pa, ABT_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, undstack.pv_va,
undstack.pv_pa, UND_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, kernelstack.pv_va,
kernelstack.pv_pa, UPAGES * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, kernel_l1pt.pv_va,
kernel_l1pt.pv_pa, PD_SIZE,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
PD_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/* Map the page table that maps the kernel pages */
pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
@ -642,30 +622,29 @@ initarm(bootargs)
*/
/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
l2pagetable = kernel_ptpt.pv_pa;
pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, ((KERNEL_BASE +0x00400000) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL2],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
pmap_map_entry(l2pagetable, ((KERNEL_BASE +
(loop * 0x00400000)) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL + loop].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS],
kernel_pt_table[KERNEL_PT_SYS].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
(loop * 0x00400000)) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_VMDATA + loop],
kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/*
* Map the system page in the kernel page table for the bottom 1Meg
* of the virtual memory map.
*/
l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
l2pagetable = kernel_pt_table[KERNEL_PT_SYS].pv_pa;
pmap_map_entry(l2pagetable, 0x00000000, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

View File

@ -1,4 +1,4 @@
/* $NetBSD: integrator_machdep.c,v 1.13 2002/02/21 05:25:24 thorpej Exp $ */
/* $NetBSD: integrator_machdep.c,v 1.14 2002/02/21 21:58:02 thorpej Exp $ */
/*
* Copyright (c) 2001 ARM Ltd
@ -157,7 +157,7 @@ extern int pmap_debug_level;
#define KERNEL_PT_VMDATA_NUM (KERNEL_VM_SIZE >> (PDSHIFT + 2))
#define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
pt_entry_t kernel_pt_table[NUM_KERNEL_PTS];
pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
struct user *proc0paddr;
@ -561,8 +561,11 @@ initarm(bootinfo)
&& kernel_l1pt.pv_pa == 0) {
valloc_pages(kernel_l1pt, PD_SIZE / NBPG);
} else {
alloc_pages(kernel_pt_table[loop1], PT_SIZE / NBPG);
alloc_pages(kernel_pt_table[loop1].pv_pa,
PT_SIZE / NBPG);
++loop1;
kernel_pt_table[loop1].pv_va =
kernel_pt_table[loop1].pv_pa;
}
}
@ -613,21 +616,21 @@ initarm(bootinfo)
/* Map the L2 pages tables in the L1 page table */
pmap_link_l2pt(l1pagetable, 0x00000000,
kernel_pt_table[KERNEL_PT_SYS]);
&kernel_pt_table[KERNEL_PT_SYS]);
pmap_link_l2pt(l1pagetable, KERNEL_BASE,
kernel_pt_table[KERNEL_PT_KERNEL]);
&kernel_pt_table[KERNEL_PT_KERNEL]);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
kernel_pt_table[KERNEL_PT_VMDATA + loop]);
&kernel_pt_table[KERNEL_PT_VMDATA + loop]);
pmap_link_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE,
kernel_ptpt.pv_pa);
&kernel_ptpt);
#ifdef VERBOSE_INIT_ARM
printf("Mapping kernel\n");
#endif
/* Now we fill in the L2 pagetable for the kernel static code/data */
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL].pv_pa;
{
u_int logical;
@ -638,18 +641,18 @@ initarm(bootinfo)
*/
textsize = textsize & ~PGOFSET;
totalsize = (totalsize + PGOFSET) & ~PGOFSET;
/* logical = pmap_map_chunk(l1pagetable, l2pagetable,
/* logical = pmap_map_chunk(l1pagetable,
KERNEL_BASE, physical_start, KERNEL_TEXT_BASE - KERNEL_BASE,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); */
logical = pmap_map_chunk(l1pagetable, l2pagetable,
logical = pmap_map_chunk(l1pagetable,
KERNEL_TEXT_BASE, physical_start, textsize,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
logical += pmap_map_chunk(l1pagetable, l2pagetable,
logical += pmap_map_chunk(l1pagetable,
KERNEL_TEXT_BASE + logical, physical_start + logical,
totalsize - textsize, VM_PROT_READ|VM_PROT_WRITE,
PTE_CACHE);
#if 0
logical += pmap_map_chunk(l1pagetable, l2pagetable,
logical += pmap_map_chunk(l1pagetable,
KERNEL_BASE + logical,
physical_start + logical, kernexec->a_syms + sizeof(int)
+ *(u_int *)((int)end + kernexec->a_syms + sizeof(int)),
@ -668,22 +671,17 @@ initarm(bootinfo)
#endif
/* Map the stack pages */
pmap_map_chunk(l1pagetable, l2pagetable, irqstack.pv_va,
irqstack.pv_pa, IRQ_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, abtstack.pv_va,
abtstack.pv_pa, ABT_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, undstack.pv_va,
undstack.pv_pa, UND_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, kernelstack.pv_va,
kernelstack.pv_pa, UPAGES * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, kernel_l1pt.pv_va,
kernel_l1pt.pv_pa, PD_SIZE,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
PD_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/* Map the page table that maps the kernel pages */
pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
@ -696,25 +694,25 @@ initarm(bootinfo)
/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
l2pagetable = kernel_ptpt.pv_pa;
pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL],
kernel_pt_table[KERNEL_PT_KERNEL].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS],
kernel_pt_table[KERNEL_PT_SYS].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
(loop * 0x00400000)) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_VMDATA + loop],
kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/*
* Map the system page in the kernel page table for the bottom 1Meg
* of the virtual memory map.
*/
l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
l2pagetable = kernel_pt_table[KERNEL_PT_SYS].pv_pa;
#if 1
/* MULTI-ICE requires that page 0 is NC/NB so that it can download
the cache-clean code there. */

View File

@ -1,4 +1,4 @@
/* $NetBSD: iq80310_machdep.c,v 1.26 2002/02/21 05:25:25 thorpej Exp $ */
/* $NetBSD: iq80310_machdep.c,v 1.27 2002/02/21 21:58:02 thorpej Exp $ */
/*
* Copyright (c) 2001, 2002 Wasabi Systems, Inc.
@ -162,14 +162,20 @@ extern u_int undefined_handler_address;
extern int pmap_debug_level;
#endif
#define KERNEL_PT_SYS 0 /* Page table for mapping proc0 zero page */
#define KERNEL_PT_KERNEL 1 /* Page table for mapping kernel */
#define KERNEL_PT_IOPXS 2 /* Page table for mapping i80312 */
#define KERNEL_PT_VMDATA 3 /* Page tables for mapping kernel VM */
#define KERNEL_PT_SYS 0 /* L2 table for mapping zero page */
#define KERNEL_PT_KERNEL 1 /* L2 table for mapping kernel */
#define KERNEL_PT_KERNEL_NUM 2
/* L2 table for mapping i80312 */
#define KERNEL_PT_IOPXS (KERNEL_PT_KERNEL + KERNEL_PT_KERNEL_NUM)
/* L2 tables for mapping kernel VM */
#define KERNEL_PT_VMDATA (KERNEL_PT_IOPXS + 1)
#define KERNEL_PT_VMDATA_NUM (KERNEL_VM_SIZE >> (PDSHIFT + 2))
#define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
pt_entry_t kernel_pt_table[NUM_KERNEL_PTS];
pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
struct user *proc0paddr;
@ -525,7 +531,10 @@ initarm(void *arg)
&& kernel_l1pt.pv_pa == 0) {
valloc_pages(kernel_l1pt, PD_SIZE / NBPG);
} else {
alloc_pages(kernel_pt_table[loop1], PT_SIZE / NBPG);
alloc_pages(kernel_pt_table[loop1].pv_pa,
PT_SIZE / NBPG);
kernel_pt_table[loop1].pv_va =
kernel_pt_table[loop1].pv_pa;
++loop1;
}
}
@ -590,23 +599,23 @@ initarm(void *arg)
/* Map the L2 pages tables in the L1 page table */
pmap_link_l2pt(l1pagetable, 0x00000000,
kernel_pt_table[KERNEL_PT_SYS]);
pmap_link_l2pt(l1pagetable, KERNEL_BASE,
kernel_pt_table[KERNEL_PT_KERNEL]);
&kernel_pt_table[KERNEL_PT_SYS]);
for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
&kernel_pt_table[KERNEL_PT_KERNEL + loop]);
pmap_link_l2pt(l1pagetable, IQ80310_IOPXS_VBASE,
kernel_pt_table[KERNEL_PT_IOPXS]);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
&kernel_pt_table[KERNEL_PT_IOPXS]);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
kernel_pt_table[KERNEL_PT_VMDATA + loop]);
pmap_link_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE,
kernel_ptpt.pv_pa);
&kernel_pt_table[KERNEL_PT_VMDATA + loop]);
pmap_link_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE, &kernel_ptpt);
#ifdef VERBOSE_INIT_ARM
printf("Mapping kernel\n");
#endif
/* Now we fill in the L2 pagetable for the kernel static code/data */
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL].pv_va;
{
extern char etext[], _end[];
@ -622,18 +631,15 @@ initarm(void *arg)
/*
* This maps the kernel text/data/bss VA==PA.
*/
logical += pmap_map_chunk(l1pagetable, l2pagetable,
KERNEL_BASE + logical,
logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
physical_start + logical, textsize,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
logical += pmap_map_chunk(l1pagetable, l2pagetable,
KERNEL_BASE + logical,
logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
physical_start + logical, totalsize - textsize,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
#if 0 /* XXX No symbols yet. */
logical += pmap_map_chunk(l1pagetable, l2pagetable,
KERNEL_BASE + logical,
logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
physical_start + logical, kernexec->a_syms + sizeof(int)
+ *(u_int *)((int)end + kernexec->a_syms + sizeof(int)),
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
@ -645,26 +651,21 @@ initarm(void *arg)
#endif
/* Map the stack pages */
pmap_map_chunk(l1pagetable, l2pagetable, irqstack.pv_va,
irqstack.pv_pa, IRQ_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, abtstack.pv_va,
abtstack.pv_pa, ABT_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, undstack.pv_va,
undstack.pv_pa, UND_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, kernelstack.pv_va,
kernelstack.pv_pa, UPAGES * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, kernel_l1pt.pv_va,
kernel_l1pt.pv_pa, PD_SIZE,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
PD_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/* Map the Mini-Data cache clean area. */
pmap_map_chunk(l1pagetable, l2pagetable, minidataclean.pv_va,
minidataclean.pv_pa, NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, minidataclean.pv_va, minidataclean.pv_pa,
NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
/* Map the page table that maps the kernel pages */
pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
@ -676,26 +677,28 @@ initarm(void *arg)
*/
/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
l2pagetable = kernel_ptpt.pv_pa;
pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
pmap_map_entry(l2pagetable, ((KERNEL_BASE +
(loop * 0x00400000)) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL + loop].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS],
kernel_pt_table[KERNEL_PT_SYS].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
(loop * 0x00400000)) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_VMDATA + loop],
kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/*
* Map the system page in the kernel page table for the bottom 1Meg
* of the virtual memory map.
*/
l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
l2pagetable = kernel_pt_table[KERNEL_PT_SYS].pv_va;
pmap_map_entry(l2pagetable, 0x00000000, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
@ -724,14 +727,14 @@ initarm(void *arg)
* Map the PCI I/O spaces and i80312 registers. These are too
* small to be mapped w/ section mappings.
*/
l2pagetable = kernel_pt_table[KERNEL_PT_IOPXS];
l2pagetable = kernel_pt_table[KERNEL_PT_IOPXS].pv_va;
#ifdef VERBOSE_INIT_ARM
printf("Mapping PIOW 0x%08lx -> 0x%08lx @ 0x%08lx\n",
I80312_PCI_XLATE_PIOW_BASE,
I80312_PCI_XLATE_PIOW_BASE + I80312_PCI_XLATE_IOSIZE - 1,
IQ80310_PIOW_VBASE);
#endif
pmap_map_chunk(l1pagetable, l2pagetable, IQ80310_PIOW_VBASE,
pmap_map_chunk(l1pagetable, IQ80310_PIOW_VBASE,
I80312_PCI_XLATE_PIOW_BASE, I80312_PCI_XLATE_IOSIZE,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
@ -741,7 +744,7 @@ initarm(void *arg)
I80312_PCI_XLATE_SIOW_BASE + I80312_PCI_XLATE_IOSIZE - 1,
IQ80310_SIOW_VBASE);
#endif
pmap_map_chunk(l1pagetable, l2pagetable, IQ80310_SIOW_VBASE,
pmap_map_chunk(l1pagetable, IQ80310_SIOW_VBASE,
I80312_PCI_XLATE_SIOW_BASE, I80312_PCI_XLATE_IOSIZE,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
@ -751,7 +754,7 @@ initarm(void *arg)
I80312_PMMR_BASE + I80312_PMMR_SIZE - 1,
IQ80310_80312_VBASE);
#endif
pmap_map_chunk(l1pagetable, l2pagetable, IQ80310_80312_VBASE,
pmap_map_chunk(l1pagetable, IQ80310_80312_VBASE,
I80312_PMMR_BASE, I80312_PMMR_SIZE,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);

View File

@ -1,4 +1,4 @@
/* $NetBSD: hpc_machdep.c,v 1.32 2002/02/21 05:25:25 thorpej Exp $ */
/* $NetBSD: hpc_machdep.c,v 1.33 2002/02/21 21:58:02 thorpej Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@ -158,7 +158,7 @@ extern int pmap_debug_level;
#define KERNEL_PT_VMDATA_NUM (KERNEL_VM_SIZE >> (PDSHIFT + 2))
#define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
pt_entry_t kernel_pt_table[NUM_KERNEL_PTS];
pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
struct user *proc0paddr;
@ -426,7 +426,8 @@ initarm(argc, argv, bi)
valloc_pages(kernel_l1pt, PD_SIZE / NBPG);
for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
alloc_pages(kernel_pt_table[loop], PT_SIZE / NBPG);
alloc_pages(kernel_pt_table[loop].pv_pa, PT_SIZE / NBPG);
kernel_pt_table[loop].pv_va = kernel_pt_table[loop].pv_pa;
}
/*
@ -497,17 +498,17 @@ initarm(argc, argv, bi)
/* Map the L2 pages tables in the L1 page table */
pmap_link_l2pt(l1pagetable, 0x00000000,
kernel_pt_table[KERNEL_PT_SYS]);
&kernel_pt_table[KERNEL_PT_SYS]);
pmap_link_l2pt(l1pagetable, KERNEL_SPACE_START,
kernel_pt_table[KERNEL_PT_KERNEL]);
&kernel_pt_table[KERNEL_PT_KERNEL]);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
kernel_pt_table[KERNEL_PT_VMDATA + loop]);
&kernel_pt_table[KERNEL_PT_VMDATA + loop]);
pmap_link_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE,
kernel_ptpt.pv_pa);
&kernel_ptpt);
#define SAIPIO_BASE 0xd0000000 /* XXX XXX */
pmap_link_l2pt(l1pagetable, SAIPIO_BASE,
kernel_pt_table[KERNEL_PT_IO]);
&kernel_pt_table[KERNEL_PT_IO]);
#ifdef VERBOSE_INIT_ARM
@ -515,7 +516,7 @@ initarm(argc, argv, bi)
#endif
/* Now we fill in the L2 pagetable for the kernel code/data */
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL].pv_pa;
/*
* XXX there is no ELF header to find RO region.
@ -523,17 +524,16 @@ initarm(argc, argv, bi)
*/
#if 0
if (N_GETMAGIC(kernexec[0]) == ZMAGIC) {
logical = pmap_map_chunk(l1pagetable, l2pagetable,
KERNEL_TEXT_BASE,
logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
physical_start, kernexec->a_text,
VM_PROT_READ, PTE_CACHE);
logical += pmap_map_chunk(l1pagetable, l2pagetable,
logical += pmap_map_chunk(l1pagetable,
KERNEL_TEXT_BASE + logical, physical_start + logical,
kerneldatasize - kernexec->a_text,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
} else
#endif
pmap_map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
KERNEL_TEXT_BASE, kerneldatasize,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
@ -542,23 +542,18 @@ initarm(argc, argv, bi)
#endif
/* Map the stack pages */
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
pmap_map_chunk(l1pagetable, l2pagetable, irqstack.pv_va,
irqstack.pv_pa, IRQ_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, abtstack.pv_va,
abtstack.pv_pa, ABT_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, undstack.pv_va,
undstack.pv_pa, UND_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, kernelstack.pv_va,
kernelstack.pv_pa, UPAGES * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL].pv_pa;
pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, kernel_l1pt.pv_va,
kernel_l1pt.pv_pa, PD_SIZE,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
PD_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/* Map the page table that maps the kernel pages */
pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
@ -575,45 +570,44 @@ initarm(argc, argv, bi)
/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
l2pagetable = kernel_ptpt.pv_pa;
pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS],
kernel_pt_table[KERNEL_PT_SYS].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (KERNEL_SPACE_START >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL],
kernel_pt_table[KERNEL_PT_KERNEL].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL],
kernel_pt_table[KERNEL_PT_KERNEL].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) {
pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
(loop * 0x00400000)) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_VMDATA + loop],
kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
}
pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (SAIPIO_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_IO], VM_PROT_READ|VM_PROT_WRITE,
kernel_pt_table[KERNEL_PT_IO].pv_pa, VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE);
/*
* Map the system page in the kernel page table for the bottom 1Meg
* of the virtual memory map.
*/
l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
l2pagetable = kernel_pt_table[KERNEL_PT_SYS].pv_pa;
pmap_map_entry(l2pagetable, 0x0000000, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
/* Map any I/O modules here, as we don't have real bus_space_map() */
printf("mapping IO...");
l2pagetable = kernel_pt_table[KERNEL_PT_IO];
l2pagetable = kernel_pt_table[KERNEL_PT_IO].pv_pa;
pmap_map_entry(l2pagetable, SACOM3_BASE, SACOM3_HW_BASE,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
#ifdef CPU_SA110
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
pmap_map_chunk(l1pagetable, l2pagetable, sa110_cache_clean_addr,
0xe0000000, CPU_SA110_CACHE_CLEAN_SIZE,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL].pv_pa;
pmap_map_chunk(l1pagetable, sa110_cache_clean_addr, 0xe0000000,
CPU_SA110_CACHE_CLEAN_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
#endif
/*
* Now we have the real page tables in place so we can switch to them.

View File

@ -1,4 +1,4 @@
/* $NetBSD: netwinder_machdep.c,v 1.20 2002/02/21 05:25:25 thorpej Exp $ */
/* $NetBSD: netwinder_machdep.c,v 1.21 2002/02/21 21:58:03 thorpej Exp $ */
/*
* Copyright (c) 1997,1998 Mark Brinicombe.
@ -147,7 +147,7 @@ extern int pmap_debug_level;
#define KERNEL_PT_VMDATA_NUM (KERNEL_VM_SIZE >> (PDSHIFT + 2))
#define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
pt_entry_t kernel_pt_table[NUM_KERNEL_PTS];
pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
struct user *proc0paddr;
@ -519,7 +519,10 @@ initarm(bootinfo)
&& kernel_l1pt.pv_pa == 0) {
valloc_pages(kernel_l1pt, PD_SIZE / NBPG);
} else {
alloc_pages(kernel_pt_table[loop1], PT_SIZE / NBPG);
alloc_pages(kernel_pt_table[loop1].pv_pa,
PT_SIZE / NBPG);
kernel_pt_table[loop1].pv_va =
kernel_pt_table[loop1].pv_pa;
++loop1;
}
}
@ -571,21 +574,21 @@ initarm(bootinfo)
/* Map the L2 pages tables in the L1 page table */
pmap_link_l2pt(l1pagetable, 0x00000000,
kernel_pt_table[KERNEL_PT_SYS]);
&kernel_pt_table[KERNEL_PT_SYS]);
pmap_link_l2pt(l1pagetable, KERNEL_BASE,
kernel_pt_table[KERNEL_PT_KERNEL]);
&kernel_pt_table[KERNEL_PT_KERNEL]);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
kernel_pt_table[KERNEL_PT_VMDATA + loop]);
&kernel_pt_table[KERNEL_PT_VMDATA + loop]);
pmap_link_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE,
kernel_ptpt.pv_pa);
&kernel_ptpt);
#ifdef VERBOSE_INIT_ARM
printf("Mapping kernel\n");
#endif
/* Now we fill in the L2 pagetable for the kernel static code/data */
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL].pv_pa;
#if 0
{
@ -598,19 +601,19 @@ initarm(bootinfo)
*/
textsize = textsize & ~PGOFSET;
totalsize = (totalsize + PGOFSET) & ~PGOFSET;
logical = pmap_map_chunk(l1pagetable, l2pagetable,
logical = pmap_map_chunk(l1pagetable,
KERNEL_BASE, physical_start,
KERNEL_TEXT_BASE - KERNEL_BASE,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
logical += pmap_map_chunk(l1pagetable, l2pagetable,
logical += pmap_map_chunk(l1pagetable,
KERNEL_BASE + logical, physical_start + logical,
textsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
logical += pmap_map_chunk(l1pagetable, l2pagetable,
logical += pmap_map_chunk(l1pagetable,
KERNEL_BASE + logical, physical_start + logical,
totalsize - textsize,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
#if 0
logical += pmap_map_chunk(l1pagetable, l2pagetable,
logical += pmap_map_chunk(l1pagetable,
KERNEL_BASE + logical,
physical_start + logical, kernexec->a_syms + sizeof(int)
+ *(u_int *)((int)end + kernexec->a_syms + sizeof(int)),
@ -647,22 +650,17 @@ initarm(bootinfo)
#endif
/* Map the stack pages */
pmap_map_chunk(l1pagetable, l2pagetable, irqstack.pv_va,
irqstack.pv_pa, IRQ_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, abtstack.pv_va,
abtstack.pv_pa, ABT_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, undstack.pv_va,
undstack.pv_pa, UND_STACK_SIZE * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, kernelstack.pv_va,
kernelstack.pv_pa, UPAGES * NBPG,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, l2pagetable, kernel_l1pt.pv_va,
kernel_l1pt.pv_pa, PD_SIZE,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
PD_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/* Map the page table that maps the kernel pages */
pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
@ -675,25 +673,25 @@ initarm(bootinfo)
/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
l2pagetable = kernel_ptpt.pv_pa;
pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL],
kernel_pt_table[KERNEL_PT_KERNEL].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS],
kernel_pt_table[KERNEL_PT_SYS].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
(loop * 0x00400000)) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_VMDATA + loop],
kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/*
* Map the system page in the kernel page table for the bottom 1Meg
* of the virtual memory map.
*/
l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
l2pagetable = kernel_pt_table[KERNEL_PT_SYS].pv_pa;
pmap_map_entry(l2pagetable, 0x00000000, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

View File

@ -1,4 +1,4 @@
/* $NetBSD: ofw.c,v 1.5 2002/02/21 06:33:05 thorpej Exp $ */
/* $NetBSD: ofw.c,v 1.6 2002/02/21 21:58:03 thorpej Exp $ */
/*
* Copyright 1997
@ -1398,19 +1398,19 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt)
/* Construct the proc0 L1 pagetable. */
L1pagetable = proc0_pagedir.pv_va;
pmap_link_l2pt(L1pagetable, 0x0, proc0_pt_sys.pv_pa);
pmap_link_l2pt(L1pagetable, KERNEL_BASE, proc0_pt_kernel.pv_pa);
pmap_link_l2pt(L1pagetable, 0x0, &proc0_pt_sys);
pmap_link_l2pt(L1pagetable, KERNEL_BASE, &proc0_pt_kernel);
pmap_link_l2pt(L1pagetable, PROCESS_PAGE_TBLS_BASE,
proc0_pt_pte.pv_pa);
&proc0_pt_pte);
for (i = 0; i < KERNEL_VMDATA_PTS; i++)
pmap_link_l2pt(L1pagetable, KERNEL_VM_BASE + i * 0x00400000,
proc0_pt_vmdata[i].pv_pa);
&proc0_pt_vmdata[i]);
for (i = 0; i < KERNEL_OFW_PTS; i++)
pmap_link_l2pt(L1pagetable, OFW_VIRT_BASE + i * 0x00400000,
proc0_pt_ofw[i].pv_pa);
&proc0_pt_ofw[i]);
for (i = 0; i < KERNEL_IO_PTS; i++)
pmap_link_l2pt(L1pagetable, IO_VIRT_BASE + i * 0x00400000,
proc0_pt_io[i].pv_pa);
&proc0_pt_io[i]);
/*
* gross hack for the sake of not thrashing the TLB and making