diff --git a/sys/arch/acorn32/acorn32/rpc_machdep.c b/sys/arch/acorn32/acorn32/rpc_machdep.c index 5f9ff61185a0..a14145c82a67 100644 --- a/sys/arch/acorn32/acorn32/rpc_machdep.c +++ b/sys/arch/acorn32/acorn32/rpc_machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: rpc_machdep.c,v 1.50 2003/04/26 19:35:03 chris Exp $ */ +/* $NetBSD: rpc_machdep.c,v 1.51 2003/05/02 23:22:33 thorpej Exp $ */ /* * Copyright (c) 2000-2002 Reinoud Zandijk. @@ -56,7 +56,7 @@ #include -__KERNEL_RCSID(0, "$NetBSD: rpc_machdep.c,v 1.50 2003/04/26 19:35:03 chris Exp $"); +__KERNEL_RCSID(0, "$NetBSD: rpc_machdep.c,v 1.51 2003/05/02 23:22:33 thorpej Exp $"); #include #include @@ -432,7 +432,9 @@ initarm(void *cookie) u_int l1pagetable; struct exec *kernexec = (struct exec *)KERNEL_TEXT_BASE; pv_addr_t kernel_l1pt; +#ifndef ARM32_PMAP_NEW pv_addr_t kernel_ptpt; +#endif /* * Heads up ... Setup the CPU / MMU / TLB functions @@ -633,8 +635,10 @@ initarm(void *cookie) */ alloc_pages(systempage.pv_pa, 1); +#ifndef ARM32_PMAP_NEW /* Allocate a page for the page table to map kernel page tables */ valloc_pages(kernel_ptpt, L2_TABLE_SIZE / PAGE_SIZE); +#endif /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); @@ -692,7 +696,9 @@ initarm(void *cookie) for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_VMDATA + loop]); +#ifndef ARM32_PMAP_NEW pmap_link_l2pt(l1pagetable, PTE_BASE, &kernel_ptpt); +#endif pmap_link_l2pt(l1pagetable, VMEM_VBASE, &kernel_pt_table[KERNEL_PT_VMEM]); @@ -765,13 +771,10 @@ initarm(void *cookie) } #endif - /* Map the page table that maps the kernel pages */ #ifndef ARM32_PMAP_NEW + /* Map the page table that maps the kernel pages */ pmap_map_entry(l1pagetable, kernel_ptpt.pv_va, kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); -#else - pmap_map_entry(l1pagetable, kernel_ptpt.pv_va, kernel_ptpt.pv_pa, - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); #endif /* Now we fill in the L2 pagetable for the VRAM */ @@ -790,7 +793,7 @@ initarm(void *cookie) videomemory.vidm_pbase, videomemory.vidm_size, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); - +#ifndef ARM32_PMAP_NEW /* * Map entries in the page table used to map PTE's * Basically every kernel page table gets mapped here @@ -799,23 +802,11 @@ initarm(void *cookie) pmap_map_entry(l1pagetable, PTE_BASE + (KERNEL_BASE >> (PGSHIFT-2)), kernel_pt_table[KERNEL_PT_KERNEL].pv_pa, -#ifndef ARM32_PMAP_NEW - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE -#else - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE -#endif - ); -#ifndef ARM32_PMAP_NEW + VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_entry(l1pagetable, PTE_BASE + (PTE_BASE >> (PGSHIFT-2)), kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); -#else - pmap_map_entry(l1pagetable, - PTE_BASE + (PTE_BASE >> (PGSHIFT-2)), - kernel_ptpt.pv_pa, - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif pmap_map_entry(l1pagetable, PTE_BASE + (VMEM_VBASE >> (PGSHIFT-2)), kernel_pt_table[KERNEL_PT_VMEM].pv_pa, VM_PROT_READ|VM_PROT_WRITE, @@ -831,6 +822,7 @@ initarm(void *cookie) kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); } +#endif /* Map the vector page. */ pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa, diff --git a/sys/arch/arm/include/arm32/vmparam.h b/sys/arch/arm/include/arm32/vmparam.h index c1f6b43f0028..0f78e999e28f 100644 --- a/sys/arch/arm/include/arm32/vmparam.h +++ b/sys/arch/arm/include/arm32/vmparam.h @@ -1,4 +1,4 @@ -/* $NetBSD: vmparam.h,v 1.13 2003/04/18 11:08:28 scw Exp $ */ +/* $NetBSD: vmparam.h,v 1.14 2003/05/02 23:22:34 thorpej Exp $ */ /* * Copyright (c) 2001, 2002 Wasabi Systems, Inc. @@ -83,6 +83,16 @@ #define PAGE_MASK (PAGE_SIZE - 1) #ifndef ARM32_NEW_VM_LAYOUT +#ifdef ARM32_PMAP_NEW +/* + * Mach derived constants + */ +#define VM_MIN_ADDRESS ((vaddr_t) 0x00001000) +#define VM_MAXUSER_ADDRESS ((vaddr_t) KERNEL_BASE) +#define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS +#define VM_MIN_KERNEL_ADDRESS ((vaddr_t) KERNEL_TEXT_BASE) +#define VM_MAX_KERNEL_ADDRESS ((vaddr_t) 0xffffffff) +#else /* ! ARM32_PMAP_NEW */ /* * Linear page table space: number of PTEs required to map the 4G address * space * size of each PTE. @@ -102,6 +112,7 @@ sizeof(pt_entry_t))) #define VM_MIN_KERNEL_ADDRESS ((vaddr_t) KERNEL_TEXT_BASE) #define VM_MAX_KERNEL_ADDRESS ((vaddr_t) 0xffffffff) +#endif /* ARM32_PMAP_NEW */ #else /* ARM32_NEW_VM_LAYOUT */ diff --git a/sys/arch/cats/cats/cats_machdep.c b/sys/arch/cats/cats/cats_machdep.c index bce7779b345a..5b770f4b7cdd 100644 --- a/sys/arch/cats/cats/cats_machdep.c +++ b/sys/arch/cats/cats/cats_machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: cats_machdep.c,v 1.41 2003/04/26 17:35:57 chris Exp $ */ +/* $NetBSD: cats_machdep.c,v 1.42 2003/05/02 23:22:34 thorpej Exp $ */ /* * Copyright (c) 1997,1998 Mark Brinicombe. @@ -346,8 +346,10 @@ initarm(bootargs) u_int l1pagetable; struct exec *kernexec = (struct exec *)KERNEL_TEXT_BASE; pv_addr_t kernel_l1pt; +#ifndef ARM32_PMAP_NEW pv_addr_t kernel_ptpt; - +#endif + /* * Heads up ... Setup the CPU / MMU / TLB functions */ @@ -512,8 +514,10 @@ initarm(bootargs) */ alloc_pages(systempage.pv_pa, 1); +#ifndef ARM32_PMAP_NEW /* Allocate a page for the page table to map kernel page tables*/ valloc_pages(kernel_ptpt, L2_TABLE_SIZE / PAGE_SIZE); +#endif /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); @@ -562,7 +566,9 @@ initarm(bootargs) pmap_curmaxkvaddr = KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000); +#ifndef ARM32_PMAP_NEW pmap_link_l2pt(l1pagetable, PTE_BASE, &kernel_ptpt); +#endif #ifdef VERBOSE_INIT_ARM printf("Mapping kernel\n"); @@ -636,14 +642,11 @@ initarm(bootargs) } #endif - /* Map the page table that maps the kernel pages */ #ifndef ARM32_PMAP_NEW + /* Map the page table that maps the kernel pages */ pmap_map_entry(l1pagetable, kernel_ptpt.pv_va, kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); -#else - pmap_map_entry(l1pagetable, kernel_ptpt.pv_va, kernel_ptpt.pv_pa, - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); -#endif + /* * Map entries in the page table used to map PTE's * Basically every kernel page table gets mapped here @@ -654,23 +657,11 @@ initarm(bootargs) PTE_BASE + ((KERNEL_BASE + (loop * 0x00400000)) >> (PGSHIFT-2)), kernel_pt_table[KERNEL_PT_KERNEL + loop].pv_pa, -#ifndef ARM32_PMAP_NEW - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE -#else - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE -#endif - ); -#ifndef ARM32_PMAP_NEW + VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_entry(l1pagetable, PTE_BASE + (PTE_BASE >> (PGSHIFT-2)), kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); -#else - pmap_map_entry(l1pagetable, - PTE_BASE + (PTE_BASE >> (PGSHIFT-2)), - kernel_ptpt.pv_pa, - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif pmap_map_entry(l1pagetable, PTE_BASE + (0x00000000 >> (PGSHIFT-2)), @@ -682,6 +673,7 @@ initarm(bootargs) (loop * 0x00400000)) >> (PGSHIFT-2)), kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); +#endif /* Map the vector page. */ pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa, diff --git a/sys/arch/evbarm/adi_brh/brh_machdep.c b/sys/arch/evbarm/adi_brh/brh_machdep.c index 072b9e5cfd6a..996cba6a4d42 100644 --- a/sys/arch/evbarm/adi_brh/brh_machdep.c +++ b/sys/arch/evbarm/adi_brh/brh_machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: brh_machdep.c,v 1.6 2003/04/26 11:05:09 ragge Exp $ */ +/* $NetBSD: brh_machdep.c,v 1.7 2003/05/02 23:22:34 thorpej Exp $ */ /* * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc. @@ -388,7 +388,9 @@ initarm(void *arg) int loop1; u_int l1pagetable; pv_addr_t kernel_l1pt; +#ifndef ARM32_PMAP_NEW pv_addr_t kernel_ptpt; +#endif paddr_t memstart; psize_t memsize; @@ -539,8 +541,10 @@ initarm(void *arg) */ alloc_pages(systempage.pv_pa, 1); +#ifndef ARM32_PMAP_NEW /* Allocate a page for the page table to map kernel page tables. */ valloc_pages(kernel_ptpt, L2_TABLE_SIZE / PAGE_SIZE); +#endif /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); @@ -594,7 +598,9 @@ initarm(void *arg) for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++) pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_VMDATA + loop]); +#ifndef ARM32_PMAP_NEW pmap_link_l2pt(l1pagetable, PTE_BASE, &kernel_ptpt); +#endif /* update the top of the kernel VM */ pmap_curmaxkvaddr = @@ -656,13 +662,10 @@ initarm(void *arg) xscale_setup_minidata(l1pagetable, minidataclean.pv_va, minidataclean.pv_pa); +#ifndef ARM32_PMAP_NEW /* Map the page table that maps the kernel pages */ pmap_map_entry(l1pagetable, kernel_ptpt.pv_va, kernel_ptpt.pv_pa, -#ifndef ARM32_PMAP_NEW VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); -#else - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif /* * Map entries in the page table used to map PTE's @@ -674,19 +677,11 @@ initarm(void *arg) PTE_BASE + ((KERNEL_BASE + (loop * 0x00400000)) >> (PGSHIFT-2)), kernel_pt_table[KERNEL_PT_KERNEL + loop].pv_pa, -#ifndef ARM32_PMAP_NEW VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); -#else - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif } pmap_map_entry(l1pagetable, PTE_BASE + (PTE_BASE >> (PGSHIFT-2)), -#ifndef ARM32_PMAP_NEW kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); -#else - kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif pmap_map_entry(l1pagetable, trunc_page(PTE_BASE + (ARM_VECTORS_HIGH >> (PGSHIFT-2))), kernel_pt_table[KERNEL_PT_SYS].pv_pa, @@ -697,6 +692,7 @@ initarm(void *arg) (loop * 0x00400000)) >> (PGSHIFT-2)), kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); +#endif /* Map the vector page. */ pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, diff --git a/sys/arch/evbarm/iq80310/iq80310_machdep.c b/sys/arch/evbarm/iq80310/iq80310_machdep.c index 08de591ec29c..096e296f83c7 100644 --- a/sys/arch/evbarm/iq80310/iq80310_machdep.c +++ b/sys/arch/evbarm/iq80310/iq80310_machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: iq80310_machdep.c,v 1.50 2003/04/26 11:05:10 ragge Exp $ */ +/* $NetBSD: iq80310_machdep.c,v 1.51 2003/05/02 23:22:34 thorpej Exp $ */ /* * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc. @@ -336,7 +336,9 @@ initarm(void *arg) int loop1; u_int l1pagetable; pv_addr_t kernel_l1pt; +#ifndef ARM32_PMAP_NEW pv_addr_t kernel_ptpt; +#endif paddr_t memstart; psize_t memsize; @@ -510,8 +512,10 @@ initarm(void *arg) */ alloc_pages(systempage.pv_pa, 1); +#ifndef ARM32_PMAP_NEW /* Allocate a page for the page table to map kernel page tables. */ valloc_pages(kernel_ptpt, L2_TABLE_SIZE / PAGE_SIZE); +#endif /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); @@ -567,7 +571,9 @@ initarm(void *arg) for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++) pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_VMDATA + loop]); +#ifndef ARM32_PMAP_NEW pmap_link_l2pt(l1pagetable, PTE_BASE, &kernel_ptpt); +#endif /* update the top of the kernel VM */ pmap_curmaxkvaddr = @@ -629,13 +635,10 @@ initarm(void *arg) xscale_setup_minidata(l1pagetable, minidataclean.pv_va, minidataclean.pv_pa); +#ifndef ARM32_PMAP_NEW /* Map the page table that maps the kernel pages */ pmap_map_entry(l1pagetable, kernel_ptpt.pv_va, kernel_ptpt.pv_pa, -#ifndef ARM32_PMAP_NEW VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); -#else - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif /* * Map entries in the page table used to map PTE's @@ -647,19 +650,11 @@ initarm(void *arg) PTE_BASE + ((KERNEL_BASE + (loop * 0x00400000)) >> (PGSHIFT-2)), kernel_pt_table[KERNEL_PT_KERNEL + loop].pv_pa, -#ifndef ARM32_PMAP_NEW VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); -#else - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif } pmap_map_entry(l1pagetable, PTE_BASE + (PTE_BASE >> (PGSHIFT-2)), -#ifndef ARM32_PMAP_NEW kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); -#else - kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif pmap_map_entry(l1pagetable, trunc_page(PTE_BASE + (ARM_VECTORS_HIGH >> (PGSHIFT-2))), kernel_pt_table[KERNEL_PT_SYS].pv_pa, @@ -670,6 +665,7 @@ initarm(void *arg) (loop * 0x00400000)) >> (PGSHIFT-2)), kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); +#endif /* Map the vector page. */ pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, diff --git a/sys/arch/evbarm/iq80321/iq80321_machdep.c b/sys/arch/evbarm/iq80321/iq80321_machdep.c index fe89052b4461..a3d14ca2e9de 100644 --- a/sys/arch/evbarm/iq80321/iq80321_machdep.c +++ b/sys/arch/evbarm/iq80321/iq80321_machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: iq80321_machdep.c,v 1.19 2003/04/30 18:12:03 thorpej Exp $ */ +/* $NetBSD: iq80321_machdep.c,v 1.20 2003/05/02 23:22:35 thorpej Exp $ */ /* * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc. @@ -380,7 +380,9 @@ initarm(void *arg) int loop1; u_int l1pagetable; pv_addr_t kernel_l1pt; +#ifndef ARM32_PMAP_NEW pv_addr_t kernel_ptpt; +#endif paddr_t memstart; psize_t memsize; @@ -530,8 +532,10 @@ initarm(void *arg) */ alloc_pages(systempage.pv_pa, 1); +#ifndef ARM32_PMAP_NEW /* Allocate a page for the page table to map kernel page tables. */ valloc_pages(kernel_ptpt, L2_TABLE_SIZE / PAGE_SIZE); +#endif /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); @@ -587,7 +591,9 @@ initarm(void *arg) for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++) pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_VMDATA + loop]); +#ifndef ARM32_PMAP_NEW pmap_link_l2pt(l1pagetable, PTE_BASE, &kernel_ptpt); +#endif /* update the top of the kernel VM */ pmap_curmaxkvaddr = @@ -649,13 +655,10 @@ initarm(void *arg) xscale_setup_minidata(l1pagetable, minidataclean.pv_va, minidataclean.pv_pa); +#ifndef ARM32_PMAP_NEW /* Map the page table that maps the kernel pages */ pmap_map_entry(l1pagetable, kernel_ptpt.pv_va, kernel_ptpt.pv_pa, -#ifndef ARM32_PMAP_NEW VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); -#else - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif /* * Map entries in the page table used to map PTE's @@ -667,19 +670,11 @@ initarm(void *arg) PTE_BASE + ((KERNEL_BASE + (loop * 0x00400000)) >> (PGSHIFT-2)), kernel_pt_table[KERNEL_PT_KERNEL + loop].pv_pa, -#ifndef ARM32_PMAP_NEW VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); -#else - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif } pmap_map_entry(l1pagetable, PTE_BASE + (PTE_BASE >> (PGSHIFT-2)), -#ifndef ARM32_PMAP_NEW kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); -#else - kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif pmap_map_entry(l1pagetable, trunc_page(PTE_BASE + (ARM_VECTORS_HIGH >> (PGSHIFT-2))), kernel_pt_table[KERNEL_PT_SYS].pv_pa, @@ -690,6 +685,7 @@ initarm(void *arg) (loop * 0x00400000)) >> (PGSHIFT-2)), kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); +#endif /* Map the vector page. */ pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, diff --git a/sys/arch/hpcarm/hpcarm/hpc_machdep.c b/sys/arch/hpcarm/hpcarm/hpc_machdep.c index 2204552e56ed..bc855ab62612 100644 --- a/sys/arch/hpcarm/hpcarm/hpc_machdep.c +++ b/sys/arch/hpcarm/hpcarm/hpc_machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: hpc_machdep.c,v 1.62 2003/05/02 14:42:48 toshii Exp $ */ +/* $NetBSD: hpc_machdep.c,v 1.63 2003/05/02 23:22:35 thorpej Exp $ */ /* * Copyright (c) 1994-1998 Mark Brinicombe. @@ -297,7 +297,9 @@ initarm(argc, argv, bi) u_int l1pagetable; vaddr_t freemempos; pv_addr_t kernel_l1pt; +#ifndef ARM32_PMAP_NEW pv_addr_t kernel_ptpt; +#endif vsize_t pt_size; #if NKSYMS || defined(DDB) || defined(LKM) Elf_Shdr *sh; @@ -460,8 +462,10 @@ initarm(argc, argv, bi) pt_size = round_page(freemempos) - KERNEL_BASE; +#ifndef ARM32_PMAP_NEW /* Allocate a page for the page table to map kernel page tables*/ valloc_pages(kernel_ptpt, L2_TABLE_SIZE / PAGE_SIZE); +#endif /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); @@ -529,7 +533,9 @@ initarm(argc, argv, bi) for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_VMDATA + loop]); +#ifndef ARM32_PMAP_NEW pmap_link_l2pt(l1pagetable, PTE_BASE, &kernel_ptpt); +#endif /* update the top of the kernel VM */ pmap_curmaxkvaddr = @@ -592,14 +598,17 @@ initarm(argc, argv, bi) VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE); #endif +#ifndef ARM32_PMAP_NEW /* Map the page table that maps the kernel pages */ pmap_map_entry(l1pagetable, kernel_ptpt.pv_va, kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); +#endif /* Map a page for entering idle mode */ pmap_map_entry(l1pagetable, sa11x0_idle_mem, sa11x0_idle_mem, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); +#ifndef ARM32_PMAP_NEW /* * Map entries in the page table used to map PTE's * Basically every kernel page table gets mapped here @@ -622,15 +631,12 @@ initarm(argc, argv, bi) } pmap_map_entry(l1pagetable, PTE_BASE + (PTE_BASE >> (PGSHIFT-2)), -#ifdef ARM32_PMAP_NEW - kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#else kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); -#endif pmap_map_entry(l1pagetable, PTE_BASE + (SAIPIO_BASE >> (PGSHIFT-2)), kernel_pt_table[KERNEL_PT_IO].pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); +#endif /* Map the vector page. */ pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa, diff --git a/sys/arch/netwinder/netwinder/netwinder_machdep.c b/sys/arch/netwinder/netwinder/netwinder_machdep.c index 57a8cca47979..52faf7a4623e 100644 --- a/sys/arch/netwinder/netwinder/netwinder_machdep.c +++ b/sys/arch/netwinder/netwinder/netwinder_machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: netwinder_machdep.c,v 1.46 2003/04/26 11:05:16 ragge Exp $ */ +/* $NetBSD: netwinder_machdep.c,v 1.47 2003/05/02 23:22:35 thorpej Exp $ */ /* * Copyright (c) 1997,1998 Mark Brinicombe. @@ -366,7 +366,9 @@ initarm(void *arg) u_int l1pagetable; extern char _end[]; pv_addr_t kernel_l1pt; +#ifndef ARM32_PMAP_NEW pv_addr_t kernel_ptpt; +#endif /* * Set up a diagnostic console so we can see what's going @@ -513,8 +515,10 @@ initarm(void *arg) */ alloc_pages(systempage.pv_pa, 1); +#ifndef ARM32_PMAP_NEW /* Allocate a page for the page table to map kernel page tables*/ valloc_pages(kernel_ptpt, L2_TABLE_SIZE / PAGE_SIZE); +#endif /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); @@ -559,7 +563,9 @@ initarm(void *arg) for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_VMDATA + loop]); +#ifndef ARM32_PMAP_NEW pmap_link_l2pt(l1pagetable, PTE_BASE, &kernel_ptpt); +#endif /* update the top of the kernel VM */ pmap_curmaxkvaddr = @@ -626,13 +632,10 @@ initarm(void *arg) } #endif +#ifndef ARM32_PMAP_NEW /* Map the page table that maps the kernel pages */ pmap_map_entry(l1pagetable, kernel_ptpt.pv_va, kernel_ptpt.pv_pa, -#ifndef ARM32_PMAP_NEW VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); -#else - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif /* * Map entries in the page table used to map PTE's @@ -642,19 +645,11 @@ initarm(void *arg) pmap_map_entry(l1pagetable, PTE_BASE + (KERNEL_BASE >> (PGSHIFT-2)), kernel_pt_table[KERNEL_PT_KERNEL].pv_pa, -#ifndef ARM32_PMAP_NEW VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); -#else - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif pmap_map_entry(l1pagetable, PTE_BASE + (PTE_BASE >> (PGSHIFT-2)), kernel_ptpt.pv_pa, -#ifndef ARM32_PMAP_NEW VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); -#else - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif pmap_map_entry(l1pagetable, PTE_BASE + (0x00000000 >> (PGSHIFT-2)), kernel_pt_table[KERNEL_PT_SYS].pv_pa, @@ -665,6 +660,7 @@ initarm(void *arg) (loop * 0x00400000)) >> (PGSHIFT-2)), kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); +#endif /* Map the vector page. */ pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa, diff --git a/sys/arch/shark/ofw/ofw.c b/sys/arch/shark/ofw/ofw.c index b4ace1da56fe..6045d37b8741 100644 --- a/sys/arch/shark/ofw/ofw.c +++ b/sys/arch/shark/ofw/ofw.c @@ -1,4 +1,4 @@ -/* $NetBSD: ofw.c,v 1.24 2003/04/18 11:11:52 scw Exp $ */ +/* $NetBSD: ofw.c,v 1.25 2003/05/02 23:22:36 thorpej Exp $ */ /* * Copyright 1997 @@ -209,7 +209,11 @@ static ofw_handle_t ofw_client_services_handle; static void ofw_callbackhandler __P((void *)); +#ifndef ARM32_PMAP_NEW static void ofw_construct_proc0_addrspace __P((pv_addr_t *, pv_addr_t *)); +#else +static void ofw_construct_proc0_addrspace __P((pv_addr_t *)); +#endif static void ofw_getphysmeminfo __P((void)); static void ofw_getvirttranslations __P((void)); static void *ofw_malloc(vm_size_t size); @@ -755,11 +759,17 @@ void ofw_configmem(void) { pv_addr_t proc0_ttbbase; +#ifndef ARM32_PMAP_NEW pv_addr_t proc0_ptpt; +#endif int i; /* Set-up proc0 address space. */ +#ifndef ARM32_PMAP_NEW ofw_construct_proc0_addrspace(&proc0_ttbbase, &proc0_ptpt); +#else + ofw_construct_proc0_addrspace(&proc0_ttbbase); +#endif /* * Get a dump of OFW's picture of physical memory. @@ -1288,9 +1298,11 @@ ofw_callbackhandler(v) } static void -ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt) - pv_addr_t *proc0_ttbbase; - pv_addr_t *proc0_ptpt; +#ifndef ARM32_PMAP_NEW +ofw_construct_proc0_addrspace(pv_addr_t *proc0_ttbbase, pv_addr_t *proc0_ptpt) +#else +ofw_construct_proc0_addrspace(pv_addr_t *proc0_ttbbase) +#endif { int i, oft; #ifndef ARM32_PMAP_NEW @@ -1304,7 +1316,6 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt) pv_addr_t msgbuf; #else static pv_addr_t proc0_pagedir; - static pv_addr_t proc0_pt_pte; static pv_addr_t proc0_pt_sys; static pv_addr_t proc0_pt_kernel[KERNEL_IMG_PTS]; static pv_addr_t proc0_pt_vmdata[KERNEL_VMDATA_PTS]; @@ -1343,7 +1354,9 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt) /* Allocate/initialize space for the proc0, NetBSD-managed */ /* page tables that we will be switching to soon. */ ofw_claimpages(&virt_freeptr, &proc0_pagedir, L1_TABLE_SIZE); +#ifndef ARM32_PMAP_NEW ofw_claimpages(&virt_freeptr, &proc0_pt_pte, L2_TABLE_SIZE); +#endif ofw_claimpages(&virt_freeptr, &proc0_pt_sys, L2_TABLE_SIZE); for (i = 0; i < KERNEL_IMG_PTS; i++) ofw_claimpages(&virt_freeptr, &proc0_pt_kernel[i], L2_TABLE_SIZE); @@ -1373,8 +1386,9 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt) for (i = 0; i < KERNEL_IMG_PTS; i++) pmap_link_l2pt(L1pagetable, KERNEL_BASE + i * 0x00400000, &proc0_pt_kernel[i]); - pmap_link_l2pt(L1pagetable, PTE_BASE, - &proc0_pt_pte); +#ifndef ARM32_PMAP_NEW + pmap_link_l2pt(L1pagetable, PTE_BASE, &proc0_pt_pte); +#endif for (i = 0; i < KERNEL_VMDATA_PTS; i++) pmap_link_l2pt(L1pagetable, KERNEL_VM_BASE + i * 0x00400000, &proc0_pt_vmdata[i]); @@ -1458,6 +1472,7 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt) * we don't want aliases to physical addresses that the kernel * has-mapped/will-map elsewhere. */ +#ifndef ARM32_PMAP_NEW ofw_discardmappings(proc0_pt_kernel[KERNEL_IMG_PTS - 1].pv_va, proc0_pt_sys.pv_va, L2_TABLE_SIZE); for (i = 0; i < KERNEL_IMG_PTS; i++) @@ -1472,31 +1487,24 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt) for (i = 0; i < KERNEL_IO_PTS; i++) ofw_discardmappings(proc0_pt_kernel[KERNEL_IMG_PTS - 1].pv_va, proc0_pt_io[i].pv_va, L2_TABLE_SIZE); +#endif ofw_discardmappings(proc0_pt_kernel[KERNEL_IMG_PTS - 1].pv_va, msgbuf.pv_va, MSGBUFSIZE); +#ifndef ARM32_PMAP_NEW /* * We did not throw away the proc0_pt_pte and proc0_pagedir * mappings as well still want them. However we don't want them * cached ... * Really these should be uncached when allocated. */ -#ifndef ARM32_PMAP_NEW pmap_map_entry(L1pagetable, proc0_pt_pte.pv_va, proc0_pt_pte.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); -#else - pmap_map_entry(L1pagetable, proc0_pt_pte.pv_va, - proc0_pt_pte.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); -#endif for (i = 0; i < (L1_TABLE_SIZE / PAGE_SIZE); ++i) pmap_map_entry(L1pagetable, proc0_pagedir.pv_va + PAGE_SIZE * i, proc0_pagedir.pv_pa + PAGE_SIZE * i, -#ifndef ARM32_PMAP_NEW VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); -#else - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif /* * Construct the proc0 L2 pagetables that map page tables. @@ -1512,17 +1520,10 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt) PTE_BASE + ((KERNEL_BASE + i * 0x00400000) >> (PGSHIFT-2)), proc0_pt_kernel[i].pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); -#ifndef ARM32_PMAP_NEW pmap_map_entry(L1pagetable, PTE_BASE + (PTE_BASE >> (PGSHIFT-2)), proc0_pt_pte.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); -#else - pmap_map_entry(L1pagetable, - PTE_BASE + (PTE_BASE >> (PGSHIFT-2)), - proc0_pt_pte.pv_pa, - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); -#endif for (i = 0; i < KERNEL_VMDATA_PTS; i++) pmap_map_entry(L1pagetable, PTE_BASE + ((KERNEL_VM_BASE + i * 0x00400000) @@ -1538,6 +1539,7 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt) PTE_BASE + ((IO_VIRT_BASE + i * 0x00400000) >> (PGSHIFT-2)), proc0_pt_io[i].pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); +#endif /* update the top of the kernel VM */ pmap_curmaxkvaddr = @@ -1566,33 +1568,11 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt) } } -#ifdef ARM32_PMAP_NEW - /* - * This is not a pretty sight, but it works. - */ - proc0_pt_sys.pv_va = PTE_BASE + (0x00000000 >> (PGSHIFT-2)); - proc0_pt_pte.pv_va = PTE_BASE + (PTE_BASE >> (PGSHIFT-2)); - for (i = 0; i < KERNEL_IMG_PTS; i++) { - proc0_pt_kernel[i].pv_va = PTE_BASE + - ((KERNEL_BASE + i * 0x00400000) >> (PGSHIFT-2)); - } - for (i = 0; i < KERNEL_VMDATA_PTS; i++) { - proc0_pt_vmdata[i].pv_va = PTE_BASE + - ((KERNEL_VM_BASE + i * 0x00400000) >> (PGSHIFT-2)); - } - for (i = 0; i < KERNEL_OFW_PTS; i++) { - proc0_pt_ofw[i].pv_va = PTE_BASE + - ((OFW_VIRT_BASE + i * 0x00400000) >> (PGSHIFT-2)); - } - for (i = 0; i < KERNEL_IO_PTS; i++) { - proc0_pt_io[i].pv_va = PTE_BASE + - ((IO_VIRT_BASE + i * 0x00400000) >> (PGSHIFT-2)); - } -#endif - /* OUT parameters are the new ttbbase and the pt which maps pts. */ *proc0_ttbbase = proc0_pagedir; +#ifndef ARM32_PMAP_NEW *proc0_ptpt = proc0_pt_pte; +#endif }