Collapse map_entry{,ro,nc}() into a single pmap_map_entry() that

takes a prot and a "cacheable" indicator.
This commit is contained in:
thorpej 2002-02-20 02:32:56 +00:00
parent 0c7bc63d83
commit c44b9117f0
11 changed files with 181 additions and 175 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: rpc_machdep.c,v 1.17 2002/02/20 00:10:15 thorpej Exp $ */
/* $NetBSD: rpc_machdep.c,v 1.18 2002/02/20 02:32:56 thorpej Exp $ */
/*
* Copyright (c) 2000-2001 Reinoud Zandijk.
@ -57,7 +57,7 @@
#include <sys/param.h>
__RCSID("$NetBSD: rpc_machdep.c,v 1.17 2002/02/20 00:10:15 thorpej Exp $");
__RCSID("$NetBSD: rpc_machdep.c,v 1.18 2002/02/20 02:32:56 thorpej Exp $");
#include <sys/systm.h>
#include <sys/kernel.h>
@ -187,9 +187,6 @@ void physcon_display_base __P((u_int addr));
extern void consinit __P((void));
void map_pagetable __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
void map_entry __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
void map_entry_nc __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
void map_entry_ro __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
vm_size_t map_chunk __P((vm_offset_t pd, vm_offset_t pt, vm_offset_t va,
vm_offset_t pa, vm_size_t size, u_int acc,
u_int flg));
@ -756,8 +753,8 @@ initarm(void *cookie)
PD_SIZE, AP_KRW, 0);
/* Map the page table that maps the kernel pages */
map_entry_nc(l2pagetable, kernel_ptpt.pv_pa - physical_start,
kernel_ptpt.pv_pa);
pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa - physical_start,
kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/* Now we fill in the L2 pagetable for the VRAM */
@ -784,18 +781,22 @@ initarm(void *cookie)
*/
/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
l2pagetable = kernel_ptpt.pv_pa;
map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL]);
map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa);
map_entry_nc(l2pagetable, (VMEM_VBASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_VMEM]);
map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS]);
pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL], VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE);
pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (VMEM_VBASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_VMEM], VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE);
pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS], VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) {
map_entry_nc(l2pagetable, ((KERNEL_VM_BASE +
pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
(loop * 0x00400000)) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_VMDATA + loop]);
kernel_pt_table[KERNEL_PT_VMDATA + loop],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
}
/*
@ -803,7 +804,8 @@ initarm(void *cookie)
* of the virtual memory map.
*/
l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
map_entry(l2pagetable, 0x0000000, systempage.pv_pa);
pmap_map_entry(l2pagetable, 0x0000000, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
/* Map the core memory needed before autoconfig */
loop = 0;

View File

@ -1,4 +1,4 @@
/* $NetBSD: arm32_machdep.c,v 1.13 2002/02/20 00:10:17 thorpej Exp $ */
/* $NetBSD: arm32_machdep.c,v 1.14 2002/02/20 02:32:57 thorpej Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@ -251,55 +251,6 @@ map_chunk(pd, pt, va, pa, size, acc, flg)
return(size);
}
/* cats versions have larger 2 l2pt's next to each other */
void
map_entry(pagetable, va, pa)
vaddr_t pagetable;
vaddr_t va;
paddr_t pa;
{
#ifndef cats
((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
L2_PTE((pa & PG_FRAME), AP_KRW);
#else
((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
L2_PTE((pa & PG_FRAME), AP_KRW);
#endif
}
void
map_entry_nc(pagetable, va, pa)
vaddr_t pagetable;
vaddr_t va;
paddr_t pa;
{
#ifndef cats
((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
L2_PTE_NC_NB((pa & PG_FRAME), AP_KRW);
#else
((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
L2_PTE_NC_NB((pa & PG_FRAME), AP_KRW);
#endif
}
void
map_entry_ro(pagetable, va, pa)
vaddr_t pagetable;
vaddr_t va;
paddr_t pa;
{
#ifndef cats
((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000003ff)] =
L2_PTE((pa & PG_FRAME), AP_KR);
#else
((pt_entry_t *)pagetable)[((va >> PGSHIFT) & 0x000007ff)] =
L2_PTE((pa & PG_FRAME), AP_KR);
#endif
}
/*
* void cpu_startup(void)
*

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.40 2002/02/20 00:10:17 thorpej Exp $ */
/* $NetBSD: pmap.c,v 1.41 2002/02/20 02:32:57 thorpej Exp $ */
/*
* Copyright (c) 2001 Richard Earnshaw
@ -142,7 +142,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.40 2002/02/20 00:10:17 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.41 2002/02/20 02:32:57 thorpej Exp $");
#ifdef PMAP_DEBUG
#define PDEBUG(_lev_,_stat_) \
if (pmap_debug_level >= (_lev_)) \
@ -3739,3 +3739,24 @@ pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
pde[va >> PDSHIFT] = L1_SEC(pa & PD_MASK,
cache == PTE_CACHE ? pte_cache_mode : 0);
}
/*
* pmap_map_entry:
*
* Create a single page mapping.
*/
void
pmap_map_entry(vaddr_t l2pt, vaddr_t va, paddr_t pa, int prot, int cache)
{
pt_entry_t *pte = (pt_entry_t *) l2pt;
pt_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
pt_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
KASSERT(((va | pa) & PGOFSET) == 0);
#ifdef cats /* XXXJRT */
pte[(va >> PGSHIFT) & 0x7ff] = L2_SPTE(pa & PG_FRAME, ap, fl);
#else
pte[(va >> PGSHIFT) & 0x3ff] = L2_SPTE(pa & PG_FRAME, ap, fl);
#endif
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.h,v 1.4 2002/02/20 00:10:18 thorpej Exp $ */
/* $NetBSD: machdep.h,v 1.5 2002/02/20 02:32:57 thorpej Exp $ */
#ifndef _ARM32_BOOT_MACHDEP_H_
#define _ARM32_BOOT_MACHDEP_H_
@ -12,9 +12,6 @@ void undefinedinstruction_bounce __P((trapframe_t *));
void dumpsys __P((void));
void map_pagetable(vaddr_t, vaddr_t, paddr_t);
void map_entry(vaddr_t, vaddr_t, paddr_t);
void map_entry_nc(vaddr_t, vaddr_t, paddr_t);
void map_entry_ro(vaddr_t, vaddr_t, paddr_t);
vsize_t map_chunk(vaddr_t, vaddr_t, vaddr_t, paddr_t, vsize_t,
u_int, u_int);

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.24 2002/02/20 00:10:18 thorpej Exp $ */
/* $NetBSD: pmap.h,v 1.25 2002/02/20 02:32:58 thorpej Exp $ */
/*
* Copyright (c) 1994,1995 Mark Brinicombe.
@ -187,6 +187,7 @@ pt_entry_t *pmap_pte __P((struct pmap *, vaddr_t));
/* Bootstrapping routines. */
void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
/*
* Special page zero routine for use by the idle loop (no cache cleans).

View File

@ -1,4 +1,4 @@
/* $NetBSD: cats_machdep.c,v 1.16 2002/02/20 00:10:18 thorpej Exp $ */
/* $NetBSD: cats_machdep.c,v 1.17 2002/02/20 02:32:58 thorpej Exp $ */
/*
* Copyright (c) 1997,1998 Mark Brinicombe.
@ -611,7 +611,8 @@ initarm(bootargs)
#endif
/* Map the boot arguments page */
map_entry_ro(l2pagetable, ebsabootinfo.bt_vargp, ebsabootinfo.bt_pargp);
pmap_map_entry(l2pagetable, ebsabootinfo.bt_vargp,
ebsabootinfo.bt_pargp, VM_PROT_READ, PTE_CACHE);
/* Map the stack pages */
map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
@ -626,7 +627,8 @@ initarm(bootargs)
PD_SIZE, AP_KRW, 0);
/* Map the page table that maps the kernel pages */
map_entry_nc(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa);
pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/*
* Map entries in the page table used to map PTE's
@ -634,26 +636,32 @@ initarm(bootargs)
*/
/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
l2pagetable = kernel_ptpt.pv_pa;
map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL]);
map_entry_nc(l2pagetable, ((KERNEL_BASE +0x00400000) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL2]);
pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, ((KERNEL_BASE +0x00400000) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL2],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa);
map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS]);
pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
map_entry_nc(l2pagetable, ((KERNEL_VM_BASE +
pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
(loop * 0x00400000)) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_VMDATA + loop]);
kernel_pt_table[KERNEL_PT_VMDATA + loop],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/*
* Map the system page in the kernel page table for the bottom 1Meg
* of the virtual memory map.
*/
l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
map_entry(l2pagetable, 0x00000000, systempage.pv_pa);
pmap_map_entry(l2pagetable, 0x00000000, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
/* Map the core memory needed before autoconfig */
loop = 0;

View File

@ -1,4 +1,4 @@
/* $NetBSD: integrator_machdep.c,v 1.9 2002/02/20 00:10:18 thorpej Exp $ */
/* $NetBSD: integrator_machdep.c,v 1.10 2002/02/20 02:32:58 thorpej Exp $ */
/*
* Copyright (c) 2001 ARM Ltd
@ -166,9 +166,6 @@ struct user *proc0paddr;
void consinit __P((void));
void map_pagetable __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
void map_entry __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
void map_entry_nc __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
void map_entry_ro __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
vm_size_t map_chunk __P((vm_offset_t pd, vm_offset_t pt, vm_offset_t va,
vm_offset_t pa, vm_size_t size, u_int acc,
u_int flg));
@ -669,7 +666,8 @@ initarm(bootinfo)
/* Map the boot arguments page */
#if 0
map_entry_ro(l2pagetable, intbootinfo.bt_vargp, intbootinfo.bt_pargp);
pmap_map_entry(l2pagetable, intbootinfo.bt_vargp,
intbootinfo.bt_pargp, VM_PROT_READ, PTE_CACHE);
#endif
/* Map the stack pages */
@ -685,7 +683,8 @@ initarm(bootinfo)
PD_SIZE, AP_KRW, 0);
/* Map the page table that maps the kernel pages */
map_entry_nc(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa);
pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/*
* Map entries in the page table used to map PTE's
@ -693,16 +692,20 @@ initarm(bootinfo)
*/
/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
l2pagetable = kernel_ptpt.pv_pa;
map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL]);
map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa);
map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS]);
pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
map_entry_nc(l2pagetable, ((KERNEL_VM_BASE +
pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
(loop * 0x00400000)) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_VMDATA + loop]);
kernel_pt_table[KERNEL_PT_VMDATA + loop],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/*
* Map the system page in the kernel page table for the bottom 1Meg
@ -712,9 +715,11 @@ initarm(bootinfo)
#if 1
/* MULTI-ICE requires that page 0 is NC/NB so that it can download
the cache-clean code there. */
map_entry_nc(l2pagetable, 0x00000000, systempage.pv_pa);
pmap_map_entry(l2pagetable, 0x00000000, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
#else
map_entry_nc(l2pagetable, 0x00000000, systempage.pv_pa);
pmap_map_entry(l2pagetable, 0x00000000, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
#endif
/* Map the core memory needed before autoconfig */
loop = 0;

View File

@ -1,4 +1,4 @@
/* $NetBSD: iq80310_machdep.c,v 1.21 2002/02/20 00:10:19 thorpej Exp $ */
/* $NetBSD: iq80310_machdep.c,v 1.22 2002/02/20 02:32:58 thorpej Exp $ */
/*
* Copyright (c) 2001, 2002 Wasabi Systems, Inc.
@ -661,7 +661,8 @@ initarm(void *arg)
NBPG, AP_KRW, PT_CACHEABLE);
/* Map the page table that maps the kernel pages */
map_entry_nc(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa);
pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/*
* Map entries in the page table used to map PTE's
@ -669,23 +670,28 @@ initarm(void *arg)
*/
/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
l2pagetable = kernel_ptpt.pv_pa;
map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL]);
map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa);
map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS]);
pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
map_entry_nc(l2pagetable, ((KERNEL_VM_BASE +
pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
(loop * 0x00400000)) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_VMDATA + loop]);
kernel_pt_table[KERNEL_PT_VMDATA + loop],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/*
* Map the system page in the kernel page table for the bottom 1Meg
* of the virtual memory map.
*/
l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
map_entry(l2pagetable, 0x00000000, systempage.pv_pa);
pmap_map_entry(l2pagetable, 0x00000000, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
/*
* Map devices we can map w/ section mappings.

View File

@ -1,4 +1,4 @@
/* $NetBSD: hpc_machdep.c,v 1.28 2002/02/20 00:10:19 thorpej Exp $ */
/* $NetBSD: hpc_machdep.c,v 1.29 2002/02/20 02:32:58 thorpej Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@ -178,9 +178,6 @@ void physcon_display_base __P((u_int addr));
void consinit __P((void));
void map_pagetable __P((vaddr_t pt, vaddr_t va, vaddr_t pa));
void map_entry __P((vaddr_t pt, vaddr_t va, vaddr_t pa));
void map_entry_nc __P((vaddr_t pt, vaddr_t va, vaddr_t pa));
void map_entry_ro __P((vaddr_t pt, vaddr_t va, vaddr_t pa));
vm_size_t map_chunk __P((vaddr_t pd, vaddr_t pt, vaddr_t va,
vaddr_t pa, vm_size_t size, u_int acc,
u_int flg));
@ -561,10 +558,12 @@ initarm(argc, argv, bi)
PD_SIZE, AP_KRW, 0);
/* Map the page table that maps the kernel pages */
map_entry_nc(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa);
pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/* Map a page for entering idle mode */
map_entry_nc(l2pagetable, sa11x0_idle_mem, sa11x0_idle_mem);
pmap_map_entry(l2pagetable, sa11x0_idle_mem, sa11x0_idle_mem,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/*
* Map entries in the page table used to map PTE's
@ -572,33 +571,40 @@ initarm(argc, argv, bi)
*/
/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
l2pagetable = kernel_ptpt.pv_pa;
map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS]);
map_entry_nc(l2pagetable, (KERNEL_SPACE_START >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL]);
map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL]);
pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (KERNEL_SPACE_START >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) {
map_entry_nc(l2pagetable, ((KERNEL_VM_BASE +
pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
(loop * 0x00400000)) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_VMDATA + loop]);
kernel_pt_table[KERNEL_PT_VMDATA + loop],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
}
map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa);
map_entry_nc(l2pagetable, (SAIPIO_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_IO]);
pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (SAIPIO_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_IO], VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE);
/*
* Map the system page in the kernel page table for the bottom 1Meg
* of the virtual memory map.
*/
l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
map_entry(l2pagetable, 0x0000000, systempage.pv_pa);
pmap_map_entry(l2pagetable, 0x0000000, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
/* Map any I/O modules here, as we don't have real bus_space_map() */
printf("mapping IO...");
l2pagetable = kernel_pt_table[KERNEL_PT_IO];
map_entry_nc(l2pagetable, SACOM3_BASE, SACOM3_HW_BASE);
pmap_map_entry(l2pagetable, SACOM3_BASE, SACOM3_HW_BASE,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
#ifdef CPU_SA110
l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];

View File

@ -1,4 +1,4 @@
/* $NetBSD: netwinder_machdep.c,v 1.16 2002/02/20 00:10:19 thorpej Exp $ */
/* $NetBSD: netwinder_machdep.c,v 1.17 2002/02/20 02:32:59 thorpej Exp $ */
/*
* Copyright (c) 1997,1998 Mark Brinicombe.
@ -161,9 +161,6 @@ int fcomcndetach __P((void));
void isa_netwinder_init __P((u_int iobase, u_int membase));
void map_pagetable __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
void map_entry __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
void map_entry_nc __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
void map_entry_ro __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
vm_size_t map_chunk __P((vm_offset_t pd, vm_offset_t pt, vm_offset_t va,
vm_offset_t pa, vm_size_t size, u_int acc,
u_int flg));
@ -647,7 +644,8 @@ initarm(bootinfo)
/* Map the boot arguments page */
#if 0
map_entry_ro(l2pagetable, nwbootinfo.bt_vargp, nwbootinfo.bt_pargp);
pmap_map_entry(l2pagetable, nwbootinfo.bt_vargp, nwbootinfo.bt_pargp,
VM_PROT_READ, PTE_CACHE);
#endif
/* Map the stack pages */
@ -663,7 +661,8 @@ initarm(bootinfo)
PD_SIZE, AP_KRW, 0);
/* Map the page table that maps the kernel pages */
map_entry_nc(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa);
pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa, kernel_ptpt.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/*
* Map entries in the page table used to map PTE's
@ -671,23 +670,28 @@ initarm(bootinfo)
*/
/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
l2pagetable = kernel_ptpt.pv_pa;
map_entry_nc(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL]);
map_entry_nc(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa);
map_entry_nc(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS]);
pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_KERNEL],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_SYS],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
map_entry_nc(l2pagetable, ((KERNEL_VM_BASE +
pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
(loop * 0x00400000)) >> (PGSHIFT-2)),
kernel_pt_table[KERNEL_PT_VMDATA + loop]);
kernel_pt_table[KERNEL_PT_VMDATA + loop],
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/*
* Map the system page in the kernel page table for the bottom 1Meg
* of the virtual memory map.
*/
l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
map_entry(l2pagetable, 0x00000000, systempage.pv_pa);
pmap_map_entry(l2pagetable, 0x00000000, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
/* Map the core memory needed before autoconfig */
loop = 0;

View File

@ -1,4 +1,4 @@
/* $NetBSD: ofw.c,v 1.2 2002/02/20 00:10:20 thorpej Exp $ */
/* $NetBSD: ofw.c,v 1.3 2002/02/20 02:32:59 thorpej Exp $ */
/*
* Copyright 1997
@ -94,9 +94,6 @@ extern int ofw_handleticks;
* Imported routines
*/
extern void map_pagetable __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
extern void map_entry __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
extern void map_entry_nc __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
extern void map_entry_ro __P((vm_offset_t pt, vm_offset_t va, vm_offset_t pa));
extern void dump_spl_masks __P((void));
extern void dumpsys __P((void));
extern void dotickgrovelling __P((vm_offset_t));
@ -1327,10 +1324,10 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt)
}
/* Make the entry. */
if ((tp->mode & 0xC) == 0xC)
map_entry(L2pagetable, va, pa);
else
map_entry_nc(L2pagetable, va, pa);
pmap_map_entry(L2pagetable, va, pa,
VM_PROT_READ|VM_PROT_WRITE,
(tp->mode & 0xC) == 0xC ? PTE_CACHE
: PTE_NOCACHE);
}
}
@ -1362,12 +1359,14 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt)
* cached ...
* Really these should be uncached when allocated.
*/
map_entry_nc(proc0_pt_kernel.pv_va, proc0_pt_pte.pv_va,
proc0_pt_pte.pv_pa);
pmap_map_entry(proc0_pt_kernel.pv_va, proc0_pt_pte.pv_va,
proc0_pt_pte.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (i = 0; i < (PD_SIZE / NBPG); ++i)
map_entry_nc(proc0_pt_kernel.pv_va,
pmap_map_entry(proc0_pt_kernel.pv_va,
proc0_pagedir.pv_va + NBPG * i,
proc0_pagedir.pv_pa + NBPG * i);
proc0_pagedir.pv_pa + NBPG * i,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/*
* Construct the proc0 L2 pagetables that map page tables.
@ -1375,21 +1374,27 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt)
/* Map entries in the L2pagetable used to map L2PTs. */
L2pagetable = proc0_pt_pte.pv_va;
map_entry_nc(L2pagetable, (0x00000000 >> (PGSHIFT-2)),
proc0_pt_sys.pv_pa);
map_entry_nc(L2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
proc0_pt_kernel.pv_pa);
map_entry_nc(L2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
proc0_pt_pte.pv_pa);
pmap_map_entry(L2pagetable, (0x00000000 >> (PGSHIFT-2)),
proc0_pt_sys.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(L2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
proc0_pt_kernel.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(L2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
proc0_pt_pte.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (i = 0; i < KERNEL_VMDATA_PTS; i++)
map_entry_nc(L2pagetable, ((KERNEL_VM_BASE + i * 0x00400000)
>> (PGSHIFT-2)), proc0_pt_vmdata[i].pv_pa);
pmap_map_entry(L2pagetable, ((KERNEL_VM_BASE + i * 0x00400000)
>> (PGSHIFT-2)), proc0_pt_vmdata[i].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (i = 0; i < KERNEL_OFW_PTS; i++)
map_entry_nc(L2pagetable, ((OFW_VIRT_BASE + i * 0x00400000)
>> (PGSHIFT-2)), proc0_pt_ofw[i].pv_pa);
pmap_map_entry(L2pagetable, ((OFW_VIRT_BASE + i * 0x00400000)
>> (PGSHIFT-2)), proc0_pt_ofw[i].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (i = 0; i < KERNEL_IO_PTS; i++)
map_entry_nc(L2pagetable, ((IO_VIRT_BASE + i * 0x00400000)
>> (PGSHIFT-2)), proc0_pt_io[i].pv_pa);
pmap_map_entry(L2pagetable, ((IO_VIRT_BASE + i * 0x00400000)
>> (PGSHIFT-2)), proc0_pt_io[i].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/* Construct the proc0 L1 pagetable. */
L1pagetable = proc0_pagedir.pv_va;