Replace the ad-hoc "section mapping table" for static device mappings

with a more generic "devmap" structure that can also handle mappings
made with large and small pages.  Add new pmap routines to enter these
mappings during bootstrap (and "remember" the devmap), and routines to
look up the static mappings once the kernel is running.
This commit is contained in:
thorpej 2003-06-15 17:45:21 +00:00
parent d35db0b328
commit 87d5bba5b3
6 changed files with 147 additions and 187 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.134 2003/05/21 18:07:07 thorpej Exp $ */
/* $NetBSD: pmap.c,v 1.135 2003/06/15 17:45:21 thorpej Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@ -210,7 +210,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.134 2003/05/21 18:07:07 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.135 2003/06/15 17:45:21 thorpej Exp $");
#ifdef PMAP_DEBUG
#define PDEBUG(_lev_,_stat_) \
@ -4494,6 +4494,75 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
return (size);
}
/********************** Static device map routines ***************************/
static const struct pmap_devmap *pmap_devmap_table;
/*
* Map all of the static regions in the devmap table, and remember
* the devmap table so other parts of the kernel can look up entries
* later.
*/
void
pmap_devmap_bootstrap(vaddr_t l1pt, const struct pmap_devmap *table)
{
int i;
KASSERT(pmap_devmap_table == NULL);
pmap_devmap_table = table;
for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
#ifdef VERBOSE_INIT_ARM
printf("devmap: %08lx -> %08lx @ %08lx\n",
pmap_devmap_table[i].pd_pa,
pmap_devmap_table[i].pd_pa +
pmap_devmap_table[i].pd_size - 1,
pmap_devmap_table[i].pd_va);
#endif
pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va,
pmap_devmap_table[i].pd_pa,
pmap_devmap_table[i].pd_size,
pmap_devmap_table[i].pd_prot,
pmap_devmap_table[i].pd_cache);
}
}
const struct pmap_devmap *
pmap_devmap_find_pa(paddr_t pa, psize_t size)
{
int i;
if (pmap_devmap_table == NULL)
return (NULL);
for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
if (pa >= pmap_devmap_table[i].pd_pa &&
pa + size <= pmap_devmap_table[i].pd_pa +
pmap_devmap_table[i].pd_size)
return (&pmap_devmap_table[i]);
}
return (NULL);
}
const struct pmap_devmap *
pmap_devmap_find_va(vaddr_t va, vsize_t size)
{
int i;
if (pmap_devmap_table == NULL)
return (NULL);
for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
if (va >= pmap_devmap_table[i].pd_va &&
va + size <= pmap_devmap_table[i].pd_va +
pmap_devmap_table[i].pd_size)
return (&pmap_devmap_table[i]);
}
return (NULL);
}
/********************** PTE initialization routines **************************/
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.72 2003/05/21 18:04:43 thorpej Exp $ */
/* $NetBSD: pmap.h,v 1.73 2003/06/15 17:45:23 thorpej Exp $ */
/*
* Copyright (c) 2002, 2003 Wasabi Systems, Inc.
@ -149,6 +149,18 @@ union pmap_cache_state {
*/
#define PMAP_CACHE_STATE_ALL 0xffffffffu
/*
* This structure is used by machine-dependent code to describe
* static mappings of devices, created at bootstrap time.
*/
struct pmap_devmap {
vaddr_t pd_va; /* virtual address */
paddr_t pd_pa; /* physical address */
psize_t pd_size; /* size of region */
vm_prot_t pd_prot; /* protection code */
int pd_cache; /* cache attributes */
};
/*
* The pmap structure itself
*/
@ -252,11 +264,15 @@ void pmap_postinit(void);
void vector_page_setprot(int);
const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
/* Bootstrapping routines. */
void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
/*
* Special page zero routine for use by the idle loop (no cache cleans).

View File

@ -1,4 +1,4 @@
/* $NetBSD: brh_machdep.c,v 1.15 2003/06/14 17:01:09 thorpej Exp $ */
/* $NetBSD: brh_machdep.c,v 1.16 2003/06/15 17:45:24 thorpej Exp $ */
/*
* Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
@ -285,17 +285,8 @@ cpu_reboot(int howto, char *bootstr)
cpu_reset();
}
/*
* Mapping table for core kernel memory. This memory is mapped at init
* time with section mappings.
*/
struct l1_sec_map {
vaddr_t va;
vaddr_t pa;
vsize_t size;
vm_prot_t prot;
int cache;
} l1_sec_table[] = {
/* Static device mappings. */
static const struct pmap_devmap brh_devmap[] = {
{
BRH_PCI_CONF_VBASE,
BECC_PCI_CONF_BASE,
@ -654,26 +645,8 @@ initarm(void *arg)
pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
/*
* Map devices we can map w/ section mappings.
*/
loop = 0;
while (l1_sec_table[loop].size) {
vm_size_t sz;
#ifdef VERBOSE_INIT_ARM
printf("%08lx -> %08lx @ %08lx\n", l1_sec_table[loop].pa,
l1_sec_table[loop].pa + l1_sec_table[loop].size - 1,
l1_sec_table[loop].va);
#endif
for (sz = 0; sz < l1_sec_table[loop].size; sz += L1_S_SIZE)
pmap_map_section(l1pagetable,
l1_sec_table[loop].va + sz,
l1_sec_table[loop].pa + sz,
l1_sec_table[loop].prot,
l1_sec_table[loop].cache);
++loop;
}
/* Map the statically mapped devices. */
pmap_devmap_bootstrap(l1pagetable, brh_devmap);
/*
* Give the XScale global cache clean code an appropriately

View File

@ -1,4 +1,4 @@
/* $NetBSD: integrator_machdep.c,v 1.41 2003/06/14 17:01:10 thorpej Exp $ */
/* $NetBSD: integrator_machdep.c,v 1.42 2003/06/15 17:45:24 thorpej Exp $ */
/*
* Copyright (c) 2001,2002 ARM Ltd
@ -319,17 +319,8 @@ cpu_reboot(int howto, char *bootstr)
/*NOTREACHED*/
}
/*
* Mapping table for core kernel memory. This memory is mapped at init
* time with section mappings.
*/
struct l1_sec_map {
vaddr_t va;
vaddr_t pa;
vsize_t size;
vm_prot_t prot;
int cache;
} l1_sec_table[] = {
/* Statically mapped devices. */
static const struct pmap_devmap integrator_devmap[] = {
#if NPLCOM > 0 && defined(PLCONSOLE)
{
UART0_BOOT_BASE,
@ -361,7 +352,8 @@ struct l1_sec_map {
IFPGA_PCI_CONF_BASE,
IFPGA_PCI_CONF_VSIZE,
VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE },
PTE_NOCACHE
},
#endif
{
@ -672,24 +664,9 @@ initarm(void *arg)
pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
#endif
/* Map the core memory needed before autoconfig */
loop = 0;
while (l1_sec_table[loop].size) {
vm_size_t sz;
#ifdef VERBOSE_INIT_ARM
printf("%08lx -> %08lx @ %08lx\n", l1_sec_table[loop].pa,
l1_sec_table[loop].pa + l1_sec_table[loop].size - 1,
l1_sec_table[loop].va);
#endif
for (sz = 0; sz < l1_sec_table[loop].size; sz += L1_S_SIZE)
pmap_map_section(l1pagetable,
l1_sec_table[loop].va + sz,
l1_sec_table[loop].pa + sz,
l1_sec_table[loop].prot,
l1_sec_table[loop].cache);
++loop;
}
/* Map the statically mapped devices. */
pmap_devmap_bootstrap(l1pagetable, integrator_devmap);
/*
* Now we have the real page tables in place so we can switch to them.

View File

@ -1,4 +1,4 @@
/* $NetBSD: iq80310_machdep.c,v 1.58 2003/06/14 17:01:10 thorpej Exp $ */
/* $NetBSD: iq80310_machdep.c,v 1.59 2003/06/15 17:45:25 thorpej Exp $ */
/*
* Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
@ -286,17 +286,8 @@ cpu_reboot(int howto, char *bootstr)
/*NOTREACHED*/
}
/*
* Mapping table for core kernel memory. This memory is mapped at init
* time with section mappings.
*/
struct l1_sec_map {
vaddr_t va;
vaddr_t pa;
vsize_t size;
vm_prot_t prot;
int cache;
} l1_sec_table[] = {
/* Static device mappings. */
static const struct pmap_devmap iq80310_devmap[] = {
/*
* Map the on-board devices VA == PA so that we can access them
* with the MMU on or off.
@ -308,6 +299,27 @@ struct l1_sec_map {
VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE,
},
{
IQ80310_PIOW_VBASE,
I80312_PCI_XLATE_PIOW_BASE,
I80312_PCI_XLATE_IOSIZE,
VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE,
},
{
IQ80310_SIOW_VBASE,
I80312_PCI_XLATE_SIOW_BASE,
I80312_PCI_XLATE_IOSIZE,
VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE,
},
{
IQ80310_80312_VBASE,
I80312_PMMR_BASE,
I80312_PMMR_SIZE,
VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE,
},
{
0,
@ -630,60 +642,8 @@ initarm(void *arg)
pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
/*
* Map devices we can map w/ section mappings.
*/
loop = 0;
while (l1_sec_table[loop].size) {
vm_size_t sz;
#ifdef VERBOSE_INIT_ARM
printf("%08lx -> %08lx @ %08lx\n", l1_sec_table[loop].pa,
l1_sec_table[loop].pa + l1_sec_table[loop].size - 1,
l1_sec_table[loop].va);
#endif
for (sz = 0; sz < l1_sec_table[loop].size; sz += L1_S_SIZE)
pmap_map_section(l1pagetable,
l1_sec_table[loop].va + sz,
l1_sec_table[loop].pa + sz,
l1_sec_table[loop].prot,
l1_sec_table[loop].cache);
++loop;
}
/*
* Map the PCI I/O spaces and i80312 registers. These are too
* small to be mapped w/ section mappings.
*/
#ifdef VERBOSE_INIT_ARM
printf("Mapping PIOW 0x%08lx -> 0x%08lx @ 0x%08lx\n",
I80312_PCI_XLATE_PIOW_BASE,
I80312_PCI_XLATE_PIOW_BASE + I80312_PCI_XLATE_IOSIZE - 1,
IQ80310_PIOW_VBASE);
#endif
pmap_map_chunk(l1pagetable, IQ80310_PIOW_VBASE,
I80312_PCI_XLATE_PIOW_BASE, I80312_PCI_XLATE_IOSIZE,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
#ifdef VERBOSE_INIT_ARM
printf("Mapping SIOW 0x%08lx -> 0x%08lx @ 0x%08lx\n",
I80312_PCI_XLATE_SIOW_BASE,
I80312_PCI_XLATE_SIOW_BASE + I80312_PCI_XLATE_IOSIZE - 1,
IQ80310_SIOW_VBASE);
#endif
pmap_map_chunk(l1pagetable, IQ80310_SIOW_VBASE,
I80312_PCI_XLATE_SIOW_BASE, I80312_PCI_XLATE_IOSIZE,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
#ifdef VERBOSE_INIT_ARM
printf("Mapping 80312 0x%08lx -> 0x%08lx @ 0x%08lx\n",
I80312_PMMR_BASE,
I80312_PMMR_BASE + I80312_PMMR_SIZE - 1,
IQ80310_80312_VBASE);
#endif
pmap_map_chunk(l1pagetable, IQ80310_80312_VBASE,
I80312_PMMR_BASE, I80312_PMMR_SIZE,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/* Map the statically mapped devices. */
pmap_devmap_bootstrap(l1pagetable, iq80310_devmap);
/*
* Give the XScale global cache clean code an appropriately

View File

@ -1,4 +1,4 @@
/* $NetBSD: iq80321_machdep.c,v 1.27 2003/06/14 17:01:10 thorpej Exp $ */
/* $NetBSD: iq80321_machdep.c,v 1.28 2003/06/15 17:45:25 thorpej Exp $ */
/*
* Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
@ -321,17 +321,8 @@ cpu_reboot(int howto, char *bootstr)
for (;;);
}
/*
* Mapping table for core kernel memory. This memory is mapped at init
* time with section mappings.
*/
struct l1_sec_map {
vaddr_t va;
vaddr_t pa;
vsize_t size;
vm_prot_t prot;
int cache;
} l1_sec_table[] = {
/* Static device mappings. */
static const struct pmap_devmap iq80321_devmap[] = {
/*
* Map the on-board devices VA == PA so that we can access them
* with the MMU on or off.
@ -345,6 +336,22 @@ struct l1_sec_map {
},
{
IQ80321_IOW_VBASE,
VERDE_OUT_XLATE_IO_WIN0_BASE,
VERDE_OUT_XLATE_IO_WIN_SIZE,
VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE,
},
{
IQ80321_80321_VBASE,
VERDE_PMMR_BASE,
VERDE_PMMR_SIZE,
VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE,
},
{
0,
0,
0,
@ -648,50 +655,8 @@ initarm(void *arg)
pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
/*
* Map devices we can map w/ section mappings.
*/
loop = 0;
while (l1_sec_table[loop].size) {
vm_size_t sz;
#ifdef VERBOSE_INIT_ARM
printf("%08lx -> %08lx @ %08lx\n", l1_sec_table[loop].pa,
l1_sec_table[loop].pa + l1_sec_table[loop].size - 1,
l1_sec_table[loop].va);
#endif
for (sz = 0; sz < l1_sec_table[loop].size; sz += L1_S_SIZE)
pmap_map_section(l1pagetable,
l1_sec_table[loop].va + sz,
l1_sec_table[loop].pa + sz,
l1_sec_table[loop].prot,
l1_sec_table[loop].cache);
++loop;
}
/*
* Map the PCI I/O spaces and i80321 registers. These are too
* small to be mapped w/ section mappings.
*/
#ifdef VERBOSE_INIT_ARM
printf("Mapping IOW 0x%08lx -> 0x%08lx @ 0x%08lx\n",
VERDE_OUT_XLATE_IO_WIN0_BASE,
VERDE_OUT_XLATE_IO_WIN0_BASE + VERDE_OUT_XLATE_IO_WIN_SIZE - 1,
IQ80321_IOW_VBASE);
#endif
pmap_map_chunk(l1pagetable, IQ80321_IOW_VBASE,
VERDE_OUT_XLATE_IO_WIN0_BASE, VERDE_OUT_XLATE_IO_WIN_SIZE,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
#ifdef VERBOSE_INIT_ARM
printf("Mapping 80321 0x%08lx -> 0x%08lx @ 0x%08lx\n",
VERDE_PMMR_BASE,
VERDE_PMMR_BASE + VERDE_PMMR_SIZE - 1,
IQ80321_80321_VBASE);
#endif
pmap_map_chunk(l1pagetable, IQ80321_80321_VBASE,
VERDE_PMMR_BASE, VERDE_PMMR_SIZE,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/* Map the statically mapped devices. */
pmap_devmap_bootstrap(l1pagetable, iq80321_devmap);
/*
* Give the XScale global cache clean code an appropriately