A couple of tidy ups to pmap:

pmap_t -> struct pmap * in pmap.c and pmap.h
kernel_pmap -> pmap_kernel() everywhere.

Compiled and booted on riscpc and cats.
This commit is contained in:
chris 2001-07-28 18:12:43 +00:00
parent e2de9310c3
commit 9f04d8d670
21 changed files with 131 additions and 107 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: arm32_machdep.c,v 1.1 2001/07/28 13:28:03 chris Exp $ */
/* $NetBSD: arm32_machdep.c,v 1.2 2001/07/28 18:12:43 chris Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@ -448,7 +448,7 @@ cpu_startup()
curpcb->pcb_flags = 0;
curpcb->pcb_und_sp = (u_int)proc0.p_addr + USPACE_UNDEF_STACK_TOP;
curpcb->pcb_sp = (u_int)proc0.p_addr + USPACE_SVC_STACK_TOP;
(void) pmap_extract(kernel_pmap, (vaddr_t)(kernel_pmap)->pm_pdir,
(void) pmap_extract(pmap_kernel(), (vaddr_t)(pmap_kernel())->pm_pdir,
(paddr_t *)&curpcb->pcb_pagedir);
curpcb->pcb_tf = (struct trapframe *)curpcb->pcb_sp - 1;

View File

@ -1,4 +1,4 @@
/* $NetBSD: db_interface.c,v 1.6 2001/06/24 07:32:20 chs Exp $ */
/* $NetBSD: db_interface.c,v 1.7 2001/07/28 18:12:43 chris Exp $ */
/*
* Copyright (c) 1996 Scott K. Stevens
@ -190,7 +190,7 @@ db_validate_address(addr)
* otherwise use the kernel pmap's page directory.
*/
if (!p || !p->p_vmspace || !p->p_vmspace->vm_map.pmap)
pdep = kernel_pmap->pm_pdir;
pdep = pmap_kernel()->pm_pdir;
else
pdep = p->p_vmspace->vm_map.pmap->pm_pdir;

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.14 2001/07/08 19:44:43 chs Exp $ */
/* $NetBSD: pmap.c,v 1.15 2001/07/28 18:12:43 chris Exp $ */
/*
* Copyright (c) 2001 Richard Earnshaw
@ -151,7 +151,6 @@ int pmap_debug_level = -2;
#endif /* PMAP_DEBUG */
struct pmap kernel_pmap_store;
pmap_t kernel_pmap;
/*
* pool that pmap structures are allocated from
@ -192,7 +191,7 @@ extern pv_addr_t systempage;
#define ALLOC_PAGE_HOOK(x, s) \
x.va = virtual_start; \
x.pte = (pt_entry_t *)pmap_pte(kernel_pmap, virtual_start); \
x.pte = (pt_entry_t *)pmap_pte(pmap_kernel(), virtual_start); \
virtual_start += s;
/* Variables used by the L1 page table queue code */
@ -206,20 +205,20 @@ int l1pt_create_count; /* stat - L1's create count */
int l1pt_reuse_count; /* stat - L1's reused count */
/* Local function prototypes (not used outside this file) */
pt_entry_t *pmap_pte __P((pmap_t pmap, vaddr_t va));
pt_entry_t *pmap_pte __P((struct pmap *pmap, vaddr_t va));
void map_pagetable __P((vaddr_t pagetable, vaddr_t va,
paddr_t pa, unsigned int flags));
void pmap_copy_on_write __P((paddr_t pa));
void pmap_pinit __P((pmap_t));
void pmap_freepagedir __P((pmap_t));
void pmap_release __P((pmap_t));
void pmap_pinit __P((struct pmap *));
void pmap_freepagedir __P((struct pmap *));
void pmap_release __P((struct pmap *));
/* Other function prototypes */
extern void bzero_page __P((vaddr_t));
extern void bcopy_page __P((vaddr_t, vaddr_t));
struct l1pt *pmap_alloc_l1pt __P((void));
static __inline void pmap_map_in_l1 __P((pmap_t pmap, vaddr_t va,
static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
vaddr_t l2pa));
static pt_entry_t *pmap_map_ptes __P((struct pmap *));
@ -450,7 +449,7 @@ pmap_collect_pv()
/*__inline*/ void
pmap_enter_pv(pmap, va, pv, flags)
pmap_t pmap;
struct pmap *pmap;
vaddr_t va;
struct pv_entry *pv;
u_int flags;
@ -514,7 +513,7 @@ pmap_enter_pv(pmap, va, pv, flags)
/*__inline*/ void
pmap_remove_pv(pmap, va, pv)
pmap_t pmap;
struct pmap *pmap;
vaddr_t va;
struct pv_entry *pv;
{
@ -571,7 +570,7 @@ pmap_remove_pv(pmap, va, pv)
/*__inline */ u_int
pmap_modify_pv(pmap, va, pv, bic_mask, eor_mask)
pmap_t pmap;
struct pmap *pmap;
vaddr_t va;
struct pv_entry *pv;
u_int bic_mask;
@ -628,7 +627,7 @@ pmap_modify_pv(pmap, va, pv, bic_mask, eor_mask)
*/
static /*__inline*/ void
pmap_map_in_l1(pmap, va, l2pa)
pmap_t pmap;
struct pmap *pmap;
vaddr_t va, l2pa;
{
vaddr_t ptva;
@ -658,7 +657,7 @@ pmap_map_in_l1(pmap, va, l2pa)
#if 0
static /*__inline*/ void
pmap_unmap_in_l1(pmap, va)
pmap_t pmap;
struct pmap *pmap;
vaddr_t va;
{
vaddr_t ptva;
@ -734,13 +733,11 @@ pmap_bootstrap(kernel_l1pt, kernel_ptpt)
#endif
vsize_t size;
kernel_pmap = &kernel_pmap_store;
kernel_pmap->pm_pdir = kernel_l1pt;
kernel_pmap->pm_pptpt = kernel_ptpt.pv_pa;
kernel_pmap->pm_vptpt = kernel_ptpt.pv_va;
simple_lock_init(&kernel_pmap->pm_lock);
kernel_pmap->pm_count = 1;
pmap_kernel()->pm_pdir = kernel_l1pt;
pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
simple_lock_init(&pmap_kernel()->pm_lock);
pmap_kernel()->pm_count = 1;
/*
* Initialize PAGE_SIZE-dependent variables.
@ -837,7 +834,7 @@ pmap_bootstrap(kernel_l1pt, kernel_ptpt)
virtual_start += NBPG;
msgbufaddr = (caddr_t)virtual_start;
msgbufpte = (pt_entry_t)pmap_pte(kernel_pmap, virtual_start);
msgbufpte = (pt_entry_t)pmap_pte(pmap_kernel(), virtual_start);
virtual_start += round_page(MSGBUFSIZE);
size = npages * sizeof(struct pv_entry);
@ -966,7 +963,7 @@ pmap_postinit()
pmap_t
pmap_create()
{
pmap_t pmap;
struct pmap *pmap;
/*
* Fetch pmap entry from the pool
@ -1067,7 +1064,7 @@ pmap_free_l1pt(pt)
struct l1pt *pt;
{
/* Separate the physical memory for the virtual space */
pmap_remove(kernel_pmap, pt->pt_va, pt->pt_va + PD_SIZE);
pmap_remove(pmap_kernel(), pt->pt_va, pt->pt_va + PD_SIZE);
pmap_update();
/* Return the physical memory */
@ -1131,7 +1128,7 @@ pmap_allocpagedir(pmap)
if (!(pt->pt_flags & PTFLAG_KPT)) {
/* Duplicate the kernel mapping i.e. all mappings 0xf0000000+ */
bcopy((char *)kernel_pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
bcopy((char *)pmap_kernel()->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
(char *)pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
KERNEL_PD_SIZE);
pt->pt_flags |= PTFLAG_KPT;
@ -1151,11 +1148,11 @@ pmap_allocpagedir(pmap)
return(ENOMEM);
}
(void) pmap_extract(kernel_pmap, pmap->pm_vptpt, &pmap->pm_pptpt);
(void) pmap_extract(pmap_kernel(), pmap->pm_vptpt, &pmap->pm_pptpt);
pmap->pm_pptpt &= PG_FRAME;
/* Revoke cacheability and bufferability */
/* XXX should be done better than this */
pte = pmap_pte(kernel_pmap, pmap->pm_vptpt);
pte = pmap_pte(pmap_kernel(), pmap->pm_vptpt);
*pte = *pte & ~(PT_C | PT_B);
/* Wire in this page table */
@ -1224,7 +1221,7 @@ pmap_pinit(pmap)
void
pmap_freepagedir(pmap)
pmap_t pmap;
struct pmap *pmap;
{
/* Free the memory used for the page table mapping */
if (pmap->pm_vptpt != 0)
@ -1255,7 +1252,7 @@ pmap_freepagedir(pmap)
void
pmap_destroy(pmap)
pmap_t pmap;
struct pmap *pmap;
{
int count;
@ -1281,7 +1278,7 @@ pmap_destroy(pmap)
void
pmap_release(pmap)
pmap_t pmap;
struct pmap *pmap;
{
struct vm_page *page;
pt_entry_t *pte;
@ -1321,14 +1318,14 @@ pmap_release(pmap)
/*
* void pmap_reference(pmap_t pmap)
* void pmap_reference(struct pmap *pmap)
*
* Add a reference to the specified pmap.
*/
void
pmap_reference(pmap)
pmap_t pmap;
struct pmap *pmap;
{
if (pmap == NULL)
return;
@ -1364,10 +1361,10 @@ void
pmap_activate(p)
struct proc *p;
{
pmap_t pmap = p->p_vmspace->vm_map.pmap;
struct pmap *pmap = p->p_vmspace->vm_map.pmap;
struct pcb *pcb = &p->p_addr->u_pcb;
(void) pmap_extract(kernel_pmap, (vaddr_t)pmap->pm_pdir,
(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
(paddr_t *)&pcb->pcb_pagedir);
PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
@ -1566,7 +1563,7 @@ pmap_next_phys_page(addr)
#if 0
void
pmap_pte_addref(pmap, va)
pmap_t pmap;
struct pmap *pmap;
vaddr_t va;
{
pd_entry_t *pde;
@ -1588,7 +1585,7 @@ pmap_pte_addref(pmap, va)
void
pmap_pte_delref(pmap, va)
pmap_t pmap;
struct pmap *pmap;
vaddr_t va;
{
pd_entry_t *pde;
@ -1748,7 +1745,7 @@ pmap_vac_me_harder(struct pmap *pmap, struct pv_entry *pv, pt_entry_t *ptes,
void
pmap_remove(pmap, sva, eva)
pmap_t pmap;
struct pmap *pmap;
vaddr_t sva;
vaddr_t eva;
{
@ -1782,7 +1779,7 @@ pmap_remove(pmap, sva, eva)
pte = &ptes[arm_byte_to_page(sva)];
/* Note if the pmap is active thus require cache and tlb cleans */
if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
|| (pmap == kernel_pmap))
|| (pmap == pmap_kernel()))
pmap_active = 1;
else
pmap_active = 0;
@ -1900,7 +1897,7 @@ pmap_remove_all(pa)
paddr_t pa;
{
struct pv_entry *ph, *pv, *npv;
pmap_t pmap;
struct pmap *pmap;
pt_entry_t *pte, *ptes;
int s;
@ -1976,7 +1973,7 @@ reduce wiring count on page table pages as references drop
void
pmap_protect(pmap, sva, eva, prot)
pmap_t pmap;
struct pmap *pmap;
vaddr_t sva;
vaddr_t eva;
vm_prot_t prot;
@ -2069,7 +2066,7 @@ next:
}
/*
* void pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
* void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
* int flags)
*
* Insert the given physical page (p) at
@ -2086,7 +2083,7 @@ next:
int
pmap_enter(pmap, va, pa, prot, flags)
pmap_t pmap;
struct pmap *pmap;
vaddr_t va;
paddr_t pa;
vm_prot_t prot;
@ -2283,7 +2280,7 @@ pmap_enter(pmap, va, pa, prot, flags)
*/
ptes = pmap_map_ptes(pmap);
if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
|| (pmap == kernel_pmap))
|| (pmap == pmap_kernel()))
pmap_active = TRUE;
pmap_vac_me_harder(pmap, pv, ptes, pmap_active);
pmap_unmap_ptes(pmap);
@ -2393,7 +2390,7 @@ pmap_page_protect(pg, prot)
void
pmap_unwire(pmap, va)
pmap_t pmap;
struct pmap *pmap;
vaddr_t va;
{
pt_entry_t *pte;
@ -2423,7 +2420,7 @@ pmap_unwire(pmap, va)
}
/*
* pt_entry_t *pmap_pte(pmap_t pmap, vaddr_t va)
* pt_entry_t *pmap_pte(struct pmap *pmap, vaddr_t va)
*
* Return the pointer to a page table entry corresponding to the supplied
* virtual address.
@ -2438,7 +2435,7 @@ pmap_unwire(pmap, va)
*/
pt_entry_t *
pmap_pte(pmap, va)
pmap_t pmap;
struct pmap *pmap;
vaddr_t va;
{
pt_entry_t *ptp;
@ -2471,7 +2468,7 @@ pmap_pte(pmap, va)
* Otherwise we need to map the page tables to an alternative
* address and reference them there.
*/
if (pmap == kernel_pmap || pmap->pm_pptpt
if (pmap == pmap_kernel() || pmap->pm_pptpt
== (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
+ ((PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) &
~3) + (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
@ -2530,7 +2527,7 @@ pmap_pte(pmap, va)
*/
boolean_t
pmap_extract(pmap, va, pap)
pmap_t pmap;
struct pmap *pmap;
vaddr_t va;
paddr_t *pap;
{
@ -2589,8 +2586,8 @@ pmap_extract(pmap, va, pap)
void
pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
pmap_t dst_pmap;
pmap_t src_pmap;
struct pmap *dst_pmap;
struct pmap *src_pmap;
vaddr_t dst_addr;
vsize_t len;
vaddr_t src_addr;
@ -2803,7 +2800,7 @@ pmap_is_referenced(pg)
int
pmap_modified_emulation(pmap, va)
pmap_t pmap;
struct pmap *pmap;
vaddr_t va;
{
pt_entry_t *pte;
@ -2865,7 +2862,7 @@ pmap_modified_emulation(pmap, va)
int
pmap_handled_emulation(pmap, va)
pmap_t pmap;
struct pmap *pmap;
vaddr_t va;
{
pt_entry_t *pte;
@ -2919,7 +2916,7 @@ pmap_handled_emulation(pmap, va)
void
pmap_collect(pmap)
pmap_t pmap;
struct pmap *pmap;
{
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: footbridge_io.c,v 1.1 2001/06/09 10:29:12 chris Exp $ */
/* $NetBSD: footbridge_io.c,v 1.2 2001/07/28 18:12:44 chris Exp $ */
/*
* Copyright (c) 1997 Causality Limited
@ -209,7 +209,7 @@ footbridge_mem_bs_map(t, bpa, size, cacheable, bshp)
/* Now map the pages */
/* The cookie is the physical base address for the I/O area */
while (startpa < endpa) {
pmap_enter(kernel_pmap, va, (bus_addr_t)t + startpa,
pmap_enter(pmap_kernel(), va, (bus_addr_t)t + startpa,
VM_PROT_READ | VM_PROT_WRITE, 0);
va += NBPG;
startpa += NBPG;

View File

@ -1,4 +1,4 @@
/* $NetBSD: footbridge_machdep.c,v 1.2 2001/06/09 10:44:11 chris Exp $ */
/* $NetBSD: footbridge_machdep.c,v 1.3 2001/07/28 18:12:44 chris Exp $ */
/*
* Copyright (c) 1997 Mark Brinicombe.
@ -76,7 +76,7 @@ footbridge_sa110_cc_setup(void)
/* printf("vaddr=%x addr=%x\n", vaddr, addr);*/
for (loop = 0; loop < cleanarea; loop += NBPG) {
pte = pmap_pte(kernel_pmap, (addr + loop));
pte = pmap_pte(pmap_kernel(), (addr + loop));
*pte = L2_PTE(DC21285_SA_CACHE_FLUSH_BASE + loop, AP_KR);
}
sa110_cache_clean_addr = addr;

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.10 2001/06/22 09:12:11 chris Exp $ */
/* $NetBSD: pmap.h,v 1.11 2001/07/28 18:12:44 chris Exp $ */
/*
* Copyright (c) 1994,1995 Mark Brinicombe.
@ -36,6 +36,34 @@
#include <machine/cpufunc.h>
#include <machine/pte.h>
/*
* a pmap describes a processes' 4GB virtual address space. this
* virtual address space can be broken up into 4096 1MB regions which
* are described by PDEs in the PDP. the PDEs are defined as follows:
*
* (ranges are inclusive -> exclusive, just like vm_map_entry start/end)
* (the following assumes that KERNBASE is 0xf0000000)
*
* PDE#s VA range usage
* 0->3835 0x0 -> 0xefc00000 user address space
* 3836->3839 0xefc00000-> recursive mapping of PDP (used for
* 0xf0000000 linear mapping of PTPs)
* 3840->3851 0xf0000000-> kernel text address space (constant
* 0xf0c00000 across all pmap's/processes)
* 3852->3855 0xf0c00000-> "alternate" recursive PDP mapping
* 0xf1000000 (for other pmaps)
* 3856->4095 0xf1000000-> KVM and device mappings, constant
* 0x00000000 across all pmaps
*
* The maths works out that to then map each 1MB block into 4k pages requires
* 256 entries, of 4 bytes each, totaling 1k per 1MB. However as we use 4k
* pages we allocate 4 PDE's at a time, allocating the same access permissions
* to them all. This means we only need 1024 entries in the page table page
* table, IE we use 1 4k page to linearly map all the other page tables used.
*/
/*
* Data structures used by pmap
*/
@ -75,7 +103,7 @@ typedef struct pmap *pmap_t;
*/
typedef struct pv_entry {
struct pv_entry *pv_next; /* next pv_entry */
pmap_t pv_pmap; /* pmap where mapping lies */
struct pmap *pv_pmap; /* pmap where mapping lies */
vaddr_t pv_va; /* virtual address for mapping */
int pv_flags; /* flags */
} *pv_entry_t;
@ -137,8 +165,7 @@ typedef struct {
* Commonly referenced structures
*/
extern pv_entry_t pv_table; /* Phys to virt mappings, per page. */
extern pmap_t kernel_pmap; /* pmap pointer used for the kernel */
extern struct pmap kernel_pmap_store; /* kernel_pmap points to this */
extern struct pmap kernel_pmap_store;
extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
/*
@ -165,10 +192,10 @@ extern void pmap_procwr __P((struct proc *, vaddr_t, int));
*/
extern void pmap_bootstrap __P((pd_entry_t *, pv_addr_t));
extern void pmap_debug __P((int));
extern int pmap_handled_emulation __P((pmap_t, vaddr_t));
extern int pmap_modified_emulation __P((pmap_t, vaddr_t));
extern int pmap_handled_emulation __P((struct pmap *, vaddr_t));
extern int pmap_modified_emulation __P((struct pmap *, vaddr_t));
extern void pmap_postinit __P((void));
extern pt_entry_t *pmap_pte __P((pmap_t, vaddr_t));
extern pt_entry_t *pmap_pte __P((struct pmap *, vaddr_t));
#endif /* _KERNEL */

View File

@ -1,4 +1,4 @@
/* $NetBSD: mainbus_io.c,v 1.2 2001/04/24 04:30:53 thorpej Exp $ */
/* $NetBSD: mainbus_io.c,v 1.3 2001/07/28 18:12:44 chris Exp $ */
/*
* Copyright (c) 1997 Mark Brinicombe.
@ -161,7 +161,7 @@ mainbus_bs_map(t, bpa, size, cacheable, bshp)
for(pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
pte = pmap_pte(kernel_pmap, va);
pte = pmap_pte(pmap_kernel(), va);
if (cacheable)
*pte |= PT_CACHEABLE;
else

View File

@ -1,4 +1,4 @@
/* $NetBSD: sa11x0_io.c,v 1.1 2001/07/08 23:37:53 rjs Exp $ */
/* $NetBSD: sa11x0_io.c,v 1.2 2001/07/28 18:12:44 chris Exp $ */
/*
* Copyright (c) 1997 Mark Brinicombe.
@ -164,7 +164,7 @@ sa11x0_bs_map(t, bpa, size, cacheable, bshp)
for(pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
pte = pmap_pte(kernel_pmap, va);
pte = pmap_pte(pmap_kernel(), va);
if (cacheable)
*pte |= PT_CACHEABLE;
else

View File

@ -1,4 +1,4 @@
/* $NetBSD: iomd_dma.c,v 1.10 2001/07/09 21:46:20 reinoud Exp $ */
/* $NetBSD: iomd_dma.c,v 1.11 2001/07/28 18:12:44 chris Exp $ */
/*
* Copyright (c) 1995 Scott Stevens
@ -198,7 +198,7 @@ dma_intr(dp)
/* return(0);*/
/* XXX */
#define PHYS(x, y) pmap_extract(kernel_pmap, (vaddr_t)x, (paddr_t *)(y))
#define PHYS(x, y) pmap_extract(pmap_kernel(), (vaddr_t)x, (paddr_t *)(y))
fill:
#ifdef DMA_DEBUG
printf("fill:\n");

View File

@ -1,4 +1,4 @@
/* $NetBSD: esc.c,v 1.12 2001/06/12 15:17:17 wiz Exp $ */
/* $NetBSD: esc.c,v 1.13 2001/07/28 18:12:45 chris Exp $ */
/*
* Copyright (c) 1995 Scott Stevens
@ -198,14 +198,14 @@ escinitialize(dev)
* Setup bump buffer.
*/
dev->sc_bump_va = (u_char *)uvm_km_zalloc(kernel_map, dev->sc_bump_sz);
(void) pmap_extract(kernel_pmap, (vaddr_t)dev->sc_bump_va,
(void) pmap_extract(pmap_kernel(), (vaddr_t)dev->sc_bump_va,
(paddr_t *)&dev->sc_bump_pa);
/*
* Setup pages to noncachable, that way we don't have to flush the cache
* every time we need "bumped" transfer.
*/
pte = pmap_pte(kernel_pmap, (vm_offset_t)dev->sc_bump_va);
pte = pmap_pte(pmap_kernel(), (vm_offset_t)dev->sc_bump_va);
*pte &= ~PT_C;
cpu_tlb_flushD();
cpu_cache_purgeD_rng((vm_offset_t)dev->sc_bump_va, NBPG);

View File

@ -1,4 +1,4 @@
/* $NetBSD: podulebus.c,v 1.48 2001/07/10 20:10:49 bjh21 Exp $ */
/* $NetBSD: podulebus.c,v 1.49 2001/07/28 18:12:45 chris Exp $ */
/*
* Copyright (c) 1994-1996 Mark Brinicombe.
@ -437,7 +437,7 @@ podulebusattach(parent, self, aux)
/* Map the FAST and SYNC simple podules */
map_section((vm_offset_t)kernel_pmap->pm_pdir,
map_section((vm_offset_t)pmap_kernel()->pm_pdir,
SYNC_PODULE_BASE & 0xfff00000, SYNC_PODULE_HW_BASE & 0xfff00000, 0);
cpu_tlb_flushD();
@ -448,7 +448,7 @@ podulebusattach(parent, self, aux)
for (loop1 = loop * EASI_SIZE; loop1 < ((loop + 1) * EASI_SIZE);
loop1 += L1_SEC_SIZE)
map_section((vm_offset_t)kernel_pmap->pm_pdir, EASI_BASE + loop1,
map_section((vm_offset_t)pmap_kernel()->pm_pdir, EASI_BASE + loop1,
EASI_HW_BASE + loop1, 0);
}
cpu_tlb_flushD();

View File

@ -1,4 +1,4 @@
/* $NetBSD: sfas.c,v 1.20 2001/06/12 15:17:17 wiz Exp $ */
/* $NetBSD: sfas.c,v 1.21 2001/07/28 18:12:45 chris Exp $ */
/*
* Copyright (c) 1995 Scott Stevens
@ -199,14 +199,14 @@ sfasinitialize(dev)
* Setup bump buffer.
*/
dev->sc_bump_va = (u_char *)uvm_km_zalloc(kernel_map, dev->sc_bump_sz);
(void) pmap_extract(kernel_pmap, (vaddr_t)dev->sc_bump_va,
(void) pmap_extract(pmap_kernel(), (vaddr_t)dev->sc_bump_va,
(paddr_t *)&dev->sc_bump_pa);
/*
* Setup pages to noncachable, that way we don't have to flush the cache
* every time we need "bumped" transfer.
*/
pte = pmap_pte(kernel_pmap, (vm_offset_t)dev->sc_bump_va);
pte = pmap_pte(pmap_kernel(), (vm_offset_t)dev->sc_bump_va);
*pte &= ~(PT_C | PT_B);
cpu_tlb_flushD();
cpu_cache_purgeD_rng((vm_offset_t)dev->sc_bump_va, NBPG);

View File

@ -1,4 +1,4 @@
/* $NetBSD: rpc_machdep.c,v 1.50 2001/07/10 20:44:00 bjh21 Exp $ */
/* $NetBSD: rpc_machdep.c,v 1.51 2001/07/28 18:12:45 chris Exp $ */
/*
* Copyright (c) 2000-2001 Reinoud Zandijk.
@ -1045,9 +1045,9 @@ rpc_sa110_cc_setup(void)
paddr_t kaddr;
pt_entry_t *pte;
(void) pmap_extract(kernel_pmap, KERNEL_TEXT_BASE, &kaddr);
(void) pmap_extract(pmap_kernel(), KERNEL_TEXT_BASE, &kaddr);
for (loop = 0; loop < CPU_SA110_CACHE_CLEAN_SIZE; loop += NBPG) {
pte = pmap_pte(kernel_pmap, (sa110_cc_base + loop));
pte = pmap_pte(pmap_kernel(), (sa110_cc_base + loop));
*pte = L2_PTE(kaddr, AP_KR);
}
sa110_cache_clean_addr = sa110_cc_base;

View File

@ -1,4 +1,4 @@
/* $NetBSD: beep.c,v 1.25 2001/07/09 23:35:58 bjh21 Exp $ */
/* $NetBSD: beep.c,v 1.26 2001/07/28 18:12:44 chris Exp $ */
/*
* Copyright (c) 1995 Mark Brinicombe
@ -147,10 +147,10 @@ beepattach(parent, self, aux)
panic("beep: Cannot allocate page aligned buffer\n");
sc->sc_buffer1 = sc->sc_buffer0;
(void) pmap_extract(kernel_pmap, (vaddr_t)sc->sc_buffer0 & PG_FRAME,
(void) pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_buffer0 & PG_FRAME,
(paddr_t *)&sc->sc_sound_cur0);
sc->sc_sound_end0 = (sc->sc_sound_cur0 + NBPG - 16) | 0x00000000;
(void) pmap_extract(kernel_pmap, (vaddr_t)sc->sc_buffer1 & PG_FRAME,
(void) pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_buffer1 & PG_FRAME,
(paddr_t *)&sc->sc_sound_cur1);
sc->sc_sound_end1 = (sc->sc_sound_cur1 + NBPG - 16) | 0x00000000;

View File

@ -1,4 +1,4 @@
/* $NetBSD: vidcrender.c,v 1.6 2001/07/10 22:09:05 chris Exp $ */
/* $NetBSD: vidcrender.c,v 1.7 2001/07/28 18:12:45 chris Exp $ */
/*
* Copyright (c) 1996 Mark Brinicombe
@ -1325,7 +1325,7 @@ vidc_cursor_init(vc)
cursor_data = (char *)uvm_km_zalloc(kernel_map, NBPG);
if (!cursor_data)
panic("Cannot allocate memory for hardware cursor\n");
(void) pmap_extract(kernel_pmap, (vaddr_t)cursor_data, &pa);
(void) pmap_extract(pmap_kernel(), (vaddr_t)cursor_data, &pa);
IOMD_WRITE_WORD(IOMD_CURSINIT, pa);
}
@ -1359,9 +1359,9 @@ vidc_cursor_init(vc)
}
(void) pmap_extract(kernel_pmap, (vaddr_t)cursor_normal,
(void) pmap_extract(pmap_kernel(), (vaddr_t)cursor_normal,
(paddr_t *)&p_cursor_normal);
(void) pmap_extract(kernel_pmap, (vaddr_t)cursor_transparent,
(void) pmap_extract(pmap_kernel(), (vaddr_t)cursor_transparent,
(paddr_t *)&p_cursor_transparent);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: lmcaudio.c,v 1.30 2001/02/27 20:23:12 reinoud Exp $ */
/* $NetBSD: lmcaudio.c,v 1.31 2001/07/28 18:12:45 chris Exp $ */
/*
* Copyright (c) 1996, Danny C Tsen.
@ -532,7 +532,7 @@ lmcaudio_rate(rate)
return(0);
}
#define PHYS(x, y) pmap_extract(kernel_pmap, ((x)&PG_FRAME), (paddr_t *)(y))
#define PHYS(x, y) pmap_extract(pmap_kernel(), ((x)&PG_FRAME), (paddr_t *)(y))
/*
* Program the next buffer to be used

View File

@ -1,4 +1,4 @@
/* $NetBSD: vidc20config.c,v 1.4 2001/07/10 22:09:04 chris Exp $ */
/* $NetBSD: vidc20config.c,v 1.5 2001/07/28 18:12:45 chris Exp $ */
/*
* Copyright (c) 2001 Reinoud Zandijk
@ -727,7 +727,7 @@ vidcvideo_cursor_init(int width, int height)
cursor_data = (char *)uvm_km_zalloc(kernel_map, NBPG);
if (!cursor_data)
panic("Cannot allocate memory for hardware cursor\n");
(void) pmap_extract(kernel_pmap, (vaddr_t)cursor_data, &pa);
(void) pmap_extract(pmap_kernel(), (vaddr_t)cursor_data, &pa);
IOMD_WRITE_WORD(IOMD_CURSINIT, pa);
}
@ -760,9 +760,9 @@ vidcvideo_cursor_init(int width, int height)
}
(void) pmap_extract(kernel_pmap, (vaddr_t)cursor_normal,
(void) pmap_extract(pmap_kernel(), (vaddr_t)cursor_normal,
(paddr_t *)&p_cursor_normal);
(void) pmap_extract(kernel_pmap, (vaddr_t)cursor_transparent,
(void) pmap_extract(pmap_kernel(), (vaddr_t)cursor_transparent,
(paddr_t *)&p_cursor_transparent);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: vidcaudio.c,v 1.38 2001/07/10 20:10:48 bjh21 Exp $ */
/* $NetBSD: vidcaudio.c,v 1.39 2001/07/28 18:12:45 chris Exp $ */
/*
* Copyright (c) 1995 Melvin Tang-Richardson
@ -507,7 +507,7 @@ vidcaudio_stereo(channel, position)
return 0;
}
#define PHYS(x, y) pmap_extract(kernel_pmap, ((x)&PG_FRAME), (paddr_t *)(y))
#define PHYS(x, y) pmap_extract(pmap_kernel(), ((x)&PG_FRAME), (paddr_t *)(y))
/*
* Program the next buffer to be used

View File

@ -1,4 +1,4 @@
/* $NetBSD: hpc_machdep.c,v 1.15 2001/06/29 02:40:28 toshii Exp $ */
/* $NetBSD: hpc_machdep.c,v 1.16 2001/07/28 18:12:46 chris Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@ -765,9 +765,9 @@ rpc_sa110_cc_setup(void)
paddr_t kaddr;
pt_entry_t *pte;
(void) pmap_extract(kernel_pmap, KERNEL_TEXT_BASE, &kaddr);
(void) pmap_extract(pmap_kernel(), KERNEL_TEXT_BASE, &kaddr);
for (loop = 0; loop < CPU_SA110_CACHE_CLEAN_SIZE; loop += NBPG) {
pte = pmap_pte(kernel_pmap, (sa110_cc_base + loop));
pte = pmap_pte(pmap_kernel(), (sa110_cc_base + loop));
*pte = L2_PTE(kaddr, AP_KR);
}
sa110_cache_clean_addr = sa110_cc_base;

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.11 2001/06/29 02:40:28 toshii Exp $ */
/* $NetBSD: machdep.c,v 1.12 2001/07/28 18:12:46 chris Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@ -461,7 +461,7 @@ cpu_startup()
curpcb->pcb_flags = 0;
curpcb->pcb_und_sp = (u_int)proc0.p_addr + USPACE_UNDEF_STACK_TOP;
curpcb->pcb_sp = (u_int)proc0.p_addr + USPACE_SVC_STACK_TOP;
(void) pmap_extract(kernel_pmap, (vaddr_t)(kernel_pmap)->pm_pdir,
(void) pmap_extract(pmap_kernel(), (vaddr_t)(pmap_kernel())->pm_pdir,
(paddr_t *)&curpcb->pcb_pagedir);
curpcb->pcb_tf = (struct trapframe *)curpcb->pcb_sp - 1;

View File

@ -1,4 +1,4 @@
/* $NetBSD: sa11x0_io.c,v 1.3 2001/04/24 04:30:57 thorpej Exp $ */
/* $NetBSD: sa11x0_io.c,v 1.4 2001/07/28 18:12:46 chris Exp $ */
/*
* Copyright (c) 1997 Mark Brinicombe.
@ -164,7 +164,7 @@ sa11x0_bs_map(t, bpa, size, cacheable, bshp)
for(pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
pte = pmap_pte(kernel_pmap, va);
pte = pmap_pte(pmap_kernel(), va);
if (cacheable)
*pte |= PT_CACHEABLE;
else