PMAP_NEW is no longer optional on the i386; the old pmap's page table

allocation strategy no longer works at all.  Move pmap.new.* to pmap.*.

To read the revision history of PMAP_NEW up until this merge, use cvs
rlog of the old pmap.new.* files.
This commit is contained in:
thorpej 1999-06-17 00:12:10 +00:00
parent 01d1ebf115
commit e2442268e9
15 changed files with 3991 additions and 6406 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: files.i386,v 1.133 1999/06/07 20:31:30 thorpej Exp $
# $NetBSD: files.i386,v 1.134 1999/06/17 00:12:10 thorpej Exp $
#
# new style config file for i386 architecture
#
@ -49,8 +49,7 @@ file arch/i386/i386/math_emulate.c math_emulate
file arch/i386/i386/mem.c
file arch/i386/i386/microtime.s
file arch/i386/i386/ns_cksum.c ns
file arch/i386/i386/pmap.c !pmap_new
file arch/i386/i386/pmap.new.c pmap_new
file arch/i386/i386/pmap.c
file arch/i386/i386/process_machdep.c
file arch/i386/i386/sys_machdep.c
file arch/i386/i386/trap.c

View File

@ -1,9 +1,11 @@
# $NetBSD: std.i386,v 1.13 1999/03/24 06:06:10 mrg Exp $
# $NetBSD: std.i386,v 1.14 1999/06/17 00:12:10 thorpej Exp $
#
# standard, required NetBSD/i386 'options'
machine i386
# NOTE: PMAP_NEW is no longer optional on the i386 -- the old pmap
# no longer works, and has been deleted from the source tree.
options PMAP_NEW # UVM's new pmap interface
options EXEC_AOUT # exec a.out binaries

View File

@ -1,4 +1,4 @@
/* $NetBSD: db_memrw.c,v 1.6 1999/04/12 20:38:19 pk Exp $ */
/* $NetBSD: db_memrw.c,v 1.7 1999/06/17 00:12:11 thorpej Exp $ */
/*
* Mach Operating System
@ -33,8 +33,6 @@
* by DDB and KGDB.
*/
#include "opt_pmap_new.h"
#include <sys/param.h>
#include <sys/proc.h>
#include <sys/systm.h>
@ -61,10 +59,6 @@ db_read_bytes(addr, size, data)
*data++ = *src++;
}
#if !defined(PMAP_NEW)
pt_entry_t *pmap_pte __P((pmap_t, vaddr_t));
#endif
/*
* Write bytes to kernel address space for debugger.
*/
@ -85,22 +79,14 @@ db_write_bytes(addr, size, data)
if (addr >= VM_MIN_KERNEL_ADDRESS &&
addr < (vaddr_t)&etext) {
#if defined(PMAP_NEW)
ptep0 = PTE_BASE + i386_btop(addr);
#else
ptep0 = pmap_pte(pmap_kernel(), addr);
#endif
oldmap0 = *ptep0;
*(int *)ptep0 |= /* INTEL_PTE_WRITE */ PG_RW;
addr1 = i386_trunc_page(addr + size - 1);
if (i386_trunc_page(addr) != addr1) {
/* data crosses a page boundary */
#if defined(PMAP_NEW)
ptep1 = PTE_BASE + i386_btop(addr1);
#else
ptep1 = pmap_pte(pmap_kernel(), addr1);
#endif
oldmap1 = *ptep1;
*(int *)ptep1 |= /* INTEL_PTE_WRITE */ PG_RW;
}

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.16 1999/03/24 11:23:45 tron Exp $
# $NetBSD: genassym.cf,v 1.17 1999/06/17 00:12:11 thorpej Exp $
#
# Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -74,7 +74,6 @@
# @(#)genassym.c 5.11 (Berkeley) 5/10/91
#
include "opt_pmap_new.h"
include "opt_compat_svr4.h"
include "opt_compat_freebsd.h"
include "opt_compat_linux.h"
@ -120,20 +119,11 @@ endif
define SRUN SRUN
ifdef PMAP_NEW
define PDSLOT_PTE PDSLOT_PTE
define PDSLOT_APTE PDSLOT_APTE
define PDSLOT_KERN PDSLOT_KERN
define NKPTP_MIN NKPTP_MIN
define NKPTP_MAX NKPTP_MAX
else
define PTDPTDI PTDPTDI
define KPTDI KPTDI
define NKPDE_BASE NKPDE_BASE
define NKPDE_MAX NKPDE_MAX
define NKPDE_SCALE NKPDE_SCALE
define APTDPTDI APTDPTDI
endif
define VM_MAXUSER_ADDRESS (int)VM_MAXUSER_ADDRESS

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.208 1999/03/24 05:51:00 mrg Exp $ */
/* $NetBSD: locore.s,v 1.209 1999/06/17 00:12:11 thorpej Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -79,7 +79,6 @@
#include "opt_vm86.h"
#include "opt_user_ldt.h"
#include "opt_dummy_nops.h"
#include "opt_pmap_new.h"
#include "opt_compat_freebsd.h"
#include "opt_compat_linux.h"
#include "opt_compat_ibcs2.h"
@ -187,16 +186,9 @@
*
* XXX 4 == sizeof pde
*/
#ifdef PMAP_NEW
.set _C_LABEL(PTmap),(PDSLOT_PTE << PDSHIFT)
.set _C_LABEL(PTD),(_C_LABEL(PTmap) + PDSLOT_PTE * NBPG)
.set _C_LABEL(PTDpde),(_C_LABEL(PTD) + PDSLOT_PTE * 4)
#else
.globl _C_LABEL(PTmap),_C_LABEL(PTD),_C_LABEL(PTDpde)
.set _C_LABEL(PTmap),(PTDPTDI << PDSHIFT)
.set _C_LABEL(PTD),(_C_LABEL(PTmap) + PTDPTDI * NBPG)
.set _C_LABEL(PTDpde),(_C_LABEL(PTD) + PTDPTDI * 4)
#endif
/*
* APTmap, APTD is the alternate recursive pagemap.
@ -204,16 +196,9 @@
*
* XXX 4 == sizeof pde
*/
#ifdef PMAP_NEW
.set _C_LABEL(APTmap),(PDSLOT_APTE << PDSHIFT)
.set _C_LABEL(APTD),(_C_LABEL(APTmap) + PDSLOT_APTE * NBPG)
.set _C_LABEL(APTDpde),(_C_LABEL(PTD) + PDSLOT_APTE * 4)
#else
.globl _C_LABEL(APTmap),_C_LABEL(APTD),_C_LABEL(APTDpde)
.set _C_LABEL(APTmap),(APTDPTDI << PDSHIFT)
.set _C_LABEL(APTD),(_C_LABEL(APTmap) + APTDPTDI * NBPG)
.set _C_LABEL(APTDpde),(_C_LABEL(PTD) + APTDPTDI * 4)
#endif
/*
@ -550,7 +535,6 @@ try586: /* Use the `cpuid' instruction. */
* Calculate the size of the kernel page table directory, and
* how many entries it will have.
*/
#if defined(PMAP_NEW)
movl RELOC(nkpde),%ecx # get nkpde
cmpl $NKPTP_MIN,%ecx # larger than min?
jge 1f
@ -560,21 +544,6 @@ try586: /* Use the `cpuid' instruction. */
jle 2f
movl $NKPTP_MAX,%ecx
2:
#else
movl RELOC(nkpde),%ecx # get nkpde
testl %ecx,%ecx # if it's non-zero, use as-is
jnz 2f
movl RELOC(biosextmem),%ecx
shrl $10,%ecx # cvt. # of KB to # of MB
imull $NKPDE_SCALE,%ecx # scale to # of KPDEs
addl $NKPDE_BASE,%ecx # and add the base.
cmpl $NKPDE_MAX,%ecx # clip to max.
jle 1f
movl $NKPDE_MAX,%ecx
1: movl %ecx,RELOC(nkpde)
2:
#endif
/* Clear memory for bootstrap tables. */
shll $PGSHIFT,%ecx
@ -649,21 +618,13 @@ try586: /* Use the `cpuid' instruction. */
/* Map kernel PDEs. */
movl RELOC(nkpde),%ecx # for this many pde s,
#if defined(PMAP_NEW)
leal (PROC0PDIR+PDSLOT_KERN*4)(%esi),%ebx # kernel pde offset
#else
leal (PROC0PDIR+KPTDI*4)(%esi),%ebx # offset of pde for kernel
#endif
leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0,
fillkpt
/* Install a PDE recursively mapping page directory as a page table! */
leal (PROC0PDIR+PG_V|PG_KW)(%esi),%eax # pte for ptd
#ifdef PMAP_NEW
movl %eax,(PROC0PDIR+PDSLOT_PTE*4)(%esi) # recursive PD slot
#else
movl %eax,(PROC0PDIR+PTDPTDI*4)(%esi) # which is where PTmap maps!
#endif
/* Save phys. addr of PTD, for libkvm. */
movl %esi,RELOC(PTDpaddr)
@ -1150,10 +1111,8 @@ ENTRY(copyout)
/* Compute PTE offset for start address. */
shrl $PGSHIFT,%edi
#if defined(PMAP_NEW)
movl _C_LABEL(curpcb),%edx
movl $2f,PCB_ONFAULT(%edx)
#endif
1: /* Check PTE for each page. */
testb $PG_RW,_C_LABEL(PTmap)(,%edi,4)
@ -1280,10 +1239,8 @@ ENTRY(copyoutstr)
movl $NBPG,%ecx
subl %eax,%ecx # ecx = NBPG - (src % NBPG)
#if defined(PMAP_NEW)
movl _C_LABEL(curpcb),%eax
movl $6f,PCB_ONFAULT(%eax)
#endif
1: /*
* Once per page, check that we are still within the bounds of user
@ -1577,10 +1534,8 @@ ENTRY(suword)
jne 2f
#endif /* I486_CPU || I586_CPU || I686_CPU */
#ifdef PMAP_NEW
movl _C_LABEL(curpcb),%eax
movl $3f,PCB_ONFAULT(%eax)
#endif
movl %edx,%eax
shrl $PGSHIFT,%eax # calculate pte address
@ -1624,10 +1579,8 @@ ENTRY(susword)
jne 2f
#endif /* I486_CPU || I586_CPU || I686_CPU */
#ifdef PMAP_NEW
movl _C_LABEL(curpcb),%eax
movl $3f,PCB_ONFAULT(%eax)
#endif
movl %edx,%eax
shrl $PGSHIFT,%eax # calculate pte address
@ -1706,10 +1659,8 @@ ENTRY(subyte)
jne 2f
#endif /* I486_CPU || I586_CPU || I686_CPU */
#ifdef PMAP_NEW
movl _C_LABEL(curpcb),%eax
movl $3f,PCB_ONFAULT(%eax)
#endif
movl %edx,%eax
shrl $PGSHIFT,%eax # calculate pte address

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.358 1999/05/26 19:16:31 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.359 1999/06/17 00:12:11 thorpej Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -79,7 +79,6 @@
#include "opt_ddb.h"
#include "opt_vm86.h"
#include "opt_user_ldt.h"
#include "opt_pmap_new.h"
#include "opt_compat_netbsd.h"
#include "opt_cpureset_delay.h"
#include "opt_compat_svr4.h"
@ -371,22 +370,15 @@ cpu_startup()
/*
* Initialize error message buffer (et end of core).
*/
#if defined(PMAP_NEW)
msgbuf_vaddr = uvm_km_valloc(kernel_map, i386_round_page(MSGBUFSIZE));
if (msgbuf_vaddr == NULL)
panic("failed to valloc msgbuf_vaddr");
#endif
/* msgbuf_paddr was init'd in pmap */
#if defined(PMAP_NEW)
for (x = 0; x < btoc(MSGBUFSIZE); x++)
pmap_kenter_pa((vaddr_t)msgbuf_vaddr + x * NBPG,
msgbuf_paddr + x * NBPG, VM_PROT_READ|VM_PROT_WRITE);
#else
for (x = 0; x < btoc(MSGBUFSIZE); x++)
pmap_enter(pmap_kernel(), (vaddr_t)msgbuf_vaddr + x * NBPG,
msgbuf_paddr + x * NBPG, VM_PROT_READ|VM_PROT_WRITE, TRUE,
VM_PROT_READ|VM_PROT_WRITE);
#endif
initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE));
printf(version);
@ -482,18 +474,9 @@ cpu_startup()
panic("biostramp_image_size too big: %x vs. %x\n",
biostramp_image_size, NBPG);
#endif
#if defined(PMAP_NEW)
pmap_kenter_pa((vaddr_t)BIOSTRAMP_BASE, /* virtual */
(paddr_t)BIOSTRAMP_BASE, /* physical */
VM_PROT_ALL); /* protection */
#else
pmap_enter(pmap_kernel(),
(vaddr_t)BIOSTRAMP_BASE, /* virtual */
(paddr_t)BIOSTRAMP_BASE, /* physical */
VM_PROT_ALL, /* protection */
TRUE, /* wired down */
VM_PROT_READ|VM_PROT_WRITE);
#endif
memcpy((caddr_t)BIOSTRAMP_BASE, biostramp_image, biostramp_image_size);
#ifdef DEBUG
printf("biostramp installed @ %x\n", BIOSTRAMP_BASE);
@ -568,13 +551,7 @@ i386_bufinit()
if (pg == NULL)
panic("cpu_startup: not enough memory for "
"buffer cache");
#if defined(PMAP_NEW)
pmap_kenter_pgs(curbuf, &pg, 1);
#else
pmap_enter(kernel_map->pmap, curbuf,
VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE,
TRUE, VM_PROT_READ|VM_PROT_WRITE);
#endif
curbuf += PAGE_SIZE;
curbufsize -= PAGE_SIZE;
}
@ -1650,10 +1627,8 @@ init386(first_avail)
extern void consinit __P((void));
proc0.p_addr = proc0paddr;
#if defined(PMAP_NEW)
/* XXX: PMAP_NEW requires valid curpcb. also init'd in cpu_startup */
curpcb = &proc0.p_addr->u_pcb;
#endif
/*
@ -2306,12 +2281,7 @@ i386_mem_add_mapping(bpa, size, cacheable, bshp)
*bshp = (bus_space_handle_t)(va + (bpa & PGOFSET));
for (; pa < endpa; pa += NBPG, va += NBPG) {
#if defined(PMAP_NEW)
pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
#else
pmap_enter(pmap_kernel(), va, pa,
VM_PROT_READ | VM_PROT_WRITE, TRUE, 0);
#endif
/*
* PG_N doesn't exist on 386's, so we assume that
@ -2324,11 +2294,7 @@ i386_mem_add_mapping(bpa, size, cacheable, bshp)
*pte &= ~PG_N;
else
*pte |= PG_N;
#if defined(PMAP_NEW)
pmap_update_pg(va);
#else
pmap_update();
#endif
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: mem.c,v 1.41 1999/03/27 00:30:07 mycroft Exp $ */
/* $NetBSD: mem.c,v 1.42 1999/06/17 00:12:11 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -40,7 +40,6 @@
* @(#)mem.c 8.3 (Berkeley) 1/12/94
*/
#include "opt_pmap_new.h"
#include "opt_compat_netbsd.h"
/*

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.131 1999/03/24 05:51:02 mrg Exp $ */
/* $NetBSD: trap.c,v 1.132 1999/06/17 00:12:12 thorpej Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -83,7 +83,6 @@
#include "opt_math_emulate.h"
#include "opt_vm86.h"
#include "opt_ktrace.h"
#include "opt_pmap_new.h"
#include "opt_cputype.h"
#include "opt_compat_freebsd.h"
#include "opt_compat_linux.h"
@ -478,40 +477,12 @@ trap(frame)
}
}
/*
* PMAP_NEW allocates PTPs at pmap_enter time, not here.
*/
#if !defined(PMAP_NEW)
/* Create a page table page if necessary, and wire it. */
if ((PTD[pdei(va)] & PG_V) == 0) {
unsigned v;
v = trunc_page(vtopte(va));
rv = uvm_map_pageable(map, v, v + NBPG, FALSE);
if (rv != KERN_SUCCESS)
goto nogo;
}
#endif /* PMAP_NEW */
/* Fault the original page in. */
rv = uvm_fault(map, va, 0, ftype);
if (rv == KERN_SUCCESS) {
if (nss > vm->vm_ssize)
vm->vm_ssize = nss;
#if !defined(PMAP_NEW)
/*
* If this is a pagefault for a PT page,
* wire it. Normally we fault them in
* ourselves, but this can still happen on
* a 386 in copyout & friends.
*/
if (map != kernel_map && va >= UPT_MIN_ADDRESS &&
va < UPT_MAX_ADDRESS) {
va = trunc_page(va);
uvm_map_pageable(map, va, va + NBPG, FALSE);
}
#endif
if (type == T_PAGEFLT)
return;
goto out;

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.80 1999/05/26 22:19:35 thorpej Exp $ */
/* $NetBSD: vm_machdep.c,v 1.81 1999/06/17 00:12:12 thorpej Exp $ */
/*-
* Copyright (c) 1995 Charles M. Hannum. All rights reserved.
@ -46,7 +46,6 @@
*/
#include "opt_user_ldt.h"
#include "opt_pmap_new.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -289,7 +288,6 @@ setredzone(pte, vaddr)
}
#endif
#if defined(PMAP_NEW)
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap,
@ -329,33 +327,6 @@ pagemove(from, to, size)
pmap_update();
#endif
}
#else /* PMAP_NEW */
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap,
* and size must be a multiple of CLSIZE.
*/
void
pagemove(from, to, size)
register caddr_t from, to;
size_t size;
{
register pt_entry_t *fpte, *tpte;
if (size % CLBYTES)
panic("pagemove");
fpte = kvtopte(from);
tpte = kvtopte(to);
while (size > 0) {
*tpte++ = *fpte;
*fpte++ = 0;
from += NBPG;
to += NBPG;
size -= NBPG;
}
pmap_update();
}
#endif /* PMAP_NEW */
/*
* Convert kernel VA to physical address
@ -379,7 +350,6 @@ extern vm_map_t phys_map;
* Note: the pages are already locked by uvm_vslock(), so we
* do not need to pass an access_type to pmap_enter().
*/
#if defined(PMAP_NEW)
void
vmapbuf(bp, len)
struct buf *bp;
@ -417,35 +387,6 @@ vmapbuf(bp, len)
len -= PAGE_SIZE;
}
}
#else /* PMAP_NEW */
void
vmapbuf(bp, len)
struct buf *bp;
vsize_t len;
{
vaddr_t faddr, taddr, off;
pt_entry_t *fpte, *tpte;
pt_entry_t *pmap_pte __P((pmap_t, vaddr_t));
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
faddr = trunc_page(bp->b_saveaddr = bp->b_data);
off = (vaddr_t)bp->b_data - faddr;
len = round_page(off + len);
taddr = uvm_km_valloc_wait(phys_map, len);
bp->b_data = (caddr_t)(taddr + off);
/*
* The region is locked, so we expect that pmap_pte() will return
* non-NULL.
*/
fpte = pmap_pte(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map), faddr);
tpte = pmap_pte(vm_map_pmap(phys_map), taddr);
do {
*tpte++ = *fpte++;
len -= PAGE_SIZE;
} while (len);
}
#endif /* PMAP_NEW */
/*
* Unmap a previously-mapped user I/O request.

View File

@ -1,4 +1,4 @@
# $NetBSD: Makefile,v 1.7 1999/04/28 09:10:07 christos Exp $
# $NetBSD: Makefile,v 1.8 1999/06/17 00:12:12 thorpej Exp $
KDIR= /sys/arch/i386/include
INCSDIR= /usr/include/i386
@ -9,7 +9,7 @@ INCS= ansi.h aout_machdep.h apmvar.h asm.h bioscall.h bootinfo.h bswap.h \
freebsd_machdep.h gdt.h ibcs2_machdep.h ieee.h ieeefp.h intr.h \
joystick.h kcore.h loadfile_machdep.h limits.h mouse.h npx.h \
param.h pcb.h pccons.h pio.h pmap.h \
pmap.new.h proc.h profile.h psl.h pte.h ptrace.h reg.h segments.h \
proc.h profile.h psl.h pte.h ptrace.h reg.h segments.h \
setjmp.h signal.h specialreg.h spkr.h stdarg.h svr4_machdep.h \
sysarch.h trap.h tss.h types.h varargs.h vm86.h vmparam.h

View File

@ -1,11 +1,9 @@
/* $NetBSD: pmap.h,v 1.39 1999/05/12 19:28:30 thorpej Exp $ */
/* $NetBSD: pmap.h,v 1.40 1999/06/17 00:12:12 thorpej Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
* All rights reserved.
/*
*
* This code is derived from software contributed to The NetBSD Foundation
* by Charles M. Hannum.
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -16,250 +14,521 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* must display the following acknowledgment:
* This product includes software developed by Charles D. Cranor and
* Washington University.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1991 Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* the Systems Programming Group of the University of Utah Computer
* Science Department and William Jolitz of UUNET Technologies Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)pmap.h 7.4 (Berkeley) 5/12/91
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Derived from hp300 version by Mike Hibler, this version by William
* Jolitz uses a recursive map [a pde points to the page directory] to
* map the page tables using the pagetables themselves. This is done to
* reduce the impact on kernel virtual memory for lots of sparse address
* space, and to reduce the cost of memory to each process.
*
* from hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* pmap.h: see pmap.c for the history of this pmap module.
*/
#if defined(_KERNEL) && !defined(_LKM)
#include "opt_pmap_new.h"
#include "opt_user_ldt.h"
#endif
#ifdef PMAP_NEW /* redirect */
#include <machine/pmap.new.h> /* defines _I386_PMAP_H_ */
#endif
#ifndef _I386_PMAP_H_
#define _I386_PMAP_H_
#if defined(_KERNEL) && !defined(_LKM)
#include "opt_user_ldt.h"
#endif
#include <machine/cpufunc.h>
#include <machine/pte.h>
#include <machine/segments.h>
#include <uvm/uvm_object.h>
/*
* 386 page table entry and page table directory
* W.Jolitz, 8/89
* see pte.h for a description of i386 MMU terminology and hardware
* interface.
*
* a pmap describes a processes' 4GB virtual address space. this
* virtual address space can be broken up into 1024 4MB regions which
* are described by PDEs in the PDP. the PDEs are defined as follows:
*
* (ranges are inclusive -> exclusive, just like vm_map_entry start/end)
* (the following assumes that KERNBASE is 0xf0000000)
*
* PDE#s VA range usage
* 0->959 0x0 -> 0xefc00000 user address space, note that the
* max user address is 0xefbfe000
* the final two pages in the last 4MB
* used to be reserved for the UAREA
* but now are no longer used
* 959 0xefc00000-> recursive mapping of PDP (used for
* 0xf0000000 linear mapping of PTPs)
* 960->1023 0xf0000000-> kernel address space (constant
* 0xffc00000 across all pmap's/processes)
* 1023 0xffc00000-> "alternate" recursive PDP mapping
* <end> (for other pmaps)
*
*
* note: a recursive PDP mapping provides a way to map all the PTEs for
* a 4GB address space into a linear chunk of virtual memory. in other
* words, the PTE for page 0 is the first int mapped into the 4MB recursive
* area. the PTE for page 1 is the second int. the very last int in the
* 4MB range is the PTE that maps VA 0xffffe000 (the last page in a 4GB
* address).
*
* all pmap's PD's must have the same values in slots 960->1023 so that
* the kernel is always mapped in every process. these values are loaded
* into the PD at pmap creation time.
*
* at any one time only one pmap can be active on a processor. this is
* the pmap whose PDP is pointed to by processor register %cr3. this pmap
* will have all its PTEs mapped into memory at the recursive mapping
* point (slot #959 as show above). when the pmap code wants to find the
* PTE for a virtual address, all it has to do is the following:
*
* address of PTE = (959 * 4MB) + (VA / NBPG) * sizeof(pt_entry_t)
* = 0xefc00000 + (VA /4096) * 4
*
* what happens if the pmap layer is asked to perform an operation
* on a pmap that is not the one which is currently active? in that
* case we take the PA of the PDP of non-active pmap and put it in
* slot 1023 of the active pmap. this causes the non-active pmap's
* PTEs to get mapped in the final 4MB of the 4GB address space
* (e.g. starting at 0xffc00000).
*
* the following figure shows the effects of the recursive PDP mapping:
*
* PDP (%cr3)
* +----+
* | 0| -> PTP#0 that maps VA 0x0 -> 0x400000
* | |
* | |
* | 959| -> points back to PDP (%cr3) mapping VA 0xefc00000 -> 0xf0000000
* | 960| -> first kernel PTP (maps 0xf0000000 -> 0xf0400000)
* | |
* |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end)
* +----+
*
* note that the PDE#959 VA (0xefc00000) is defined as "PTE_BASE"
* note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE"
*
* starting at VA 0xefc00000 the current active PDP (%cr3) acts as a
* PTP:
*
* PTP#959 == PDP(%cr3) => maps VA 0xefc00000 -> 0xf0000000
* +----+
* | 0| -> maps the contents of PTP#0 at VA 0xefc00000->0xefc01000
* | |
* | |
* | 959| -> maps contents of PTP#959 (the PDP) at VA 0xeffbf000
* | 960| -> maps contents of first kernel PTP
* | |
* |1023|
* +----+
*
* note that mapping of the PDP at PTP#959's VA (0xeffbf000) is
* defined as "PDP_BASE".... within that mapping there are two
* defines:
* "PDP_PDE" (0xeffbfefc) is the VA of the PDE in the PDP
* which points back to itself.
* "APDP_PDE" (0xeffbfffc) is the VA of the PDE in the PDP which
* establishes the recursive mapping of the alternate pmap.
* to set the alternate PDP, one just has to put the correct
* PA info in *APDP_PDE.
*
* note that in the APTE_BASE space, the APDP appears at VA
* "APDP_BASE" (0xfffff000).
*/
/*
* PG_AVAIL usage ...
* the following defines identify the slots used as described above.
*/
#define PG_W PG_AVAIL1 /* "wired" mapping */
#define PDSLOT_PTE ((KERNBASE/NBPD)-1) /* 959: for recursive PDP map */
#define PDSLOT_KERN (KERNBASE/NBPD) /* 960: start of kernel space */
#define PDSLOT_APTE ((unsigned)1023) /* 1023: alternative recursive slot */
/*
* One page directory, shared between
* kernel and user modes.
* the following defines give the virtual addresses of various MMU
* data structures:
* PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
* PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
* PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
*/
#define PTDPTDI 0x3bf /* ptd entry that points to ptd! */
#define KPTDI 0x3c0 /* start of kernel virtual pde's */
#define NKPDE_BASE 4 /* min. # of kernel PDEs */
#define NKPDE_MAX 63 /* max. # of kernel PDEs */
#define NKPDE_SCALE 1 /* # of kernel PDEs to add per meg. */
#define APTDPTDI 0x3ff /* start of alternate page directory */
#define UPT_MIN_ADDRESS (PTDPTDI<<PDSHIFT)
#define UPT_MAX_ADDRESS (UPT_MIN_ADDRESS + (PTDPTDI<<PGSHIFT))
#define PTE_BASE ((pt_entry_t *) (PDSLOT_PTE * NBPD) )
#define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD) )
#define PDP_BASE ((pd_entry_t *) (((char *)PTE_BASE) + (PDSLOT_PTE * NBPG)) )
#define APDP_BASE ((pd_entry_t *) (((char *)APTE_BASE) + (PDSLOT_APTE * NBPG)) )
#define PDP_PDE (PDP_BASE + PDSLOT_PTE)
#define APDP_PDE (PDP_BASE + PDSLOT_APTE)
/*
* Address of current and alternate address space page table maps
* and directories.
* XXXCDC: tmp xlate from old names:
* PTDPTDI -> PDSLOT_PTE
* KPTDI -> PDSLOT_KERN
* APTDPTDI -> PDSLOT_APTE
*/
#ifdef _KERNEL
extern pt_entry_t PTmap[], APTmap[], Upte;
extern pd_entry_t PTD[], APTD[], PTDpde, APTDpde, Upde;
extern pt_entry_t *Sysmap;
extern u_long PTDpaddr; /* physical address of kernel PTD */
/*
* the follow define determines how many PTPs should be set up for the
* kernel by locore.s at boot time. this should be large enough to
* get the VM system running. once the VM system is running, the
* pmap module can add more PTPs to the kernel area on demand.
*/
void pmap_bootstrap __P((vaddr_t start));
boolean_t pmap_testbit __P((paddr_t, int));
void pmap_changebit __P((paddr_t, int, int));
#ifndef NKPTP
#define NKPTP 4 /* 16MB to start */
#endif
#define NKPTP_MIN 4 /* smallest value we allow */
#define NKPTP_MAX (1024 - (KERNBASE/NBPD) - 1)
/* largest value (-1 for APTP space) */
/*
* virtual address to page table entry and
* to physical address. Likewise for alternate address space.
* Note: these work recursively, thus vtopte of a pte will give
* the corresponding pde that in turn maps it.
* various address macros
*
* vtopte: return a pointer to the PTE mapping a VA
* kvtopte: same as above (takes a KVA, but doesn't matter with this pmap)
* ptetov: given a pointer to a PTE, return the VA that it maps
* vtophys: translate a VA to the PA mapped to it
*
* plus alternative versions of the above
*/
#define vtopte(va) (PTmap + i386_btop(va))
#define kvtopte(va) vtopte(va)
#define ptetov(pt) (i386_ptob(pt - PTmap))
#define vtophys(va) \
((*vtopte(va) & PG_FRAME) | ((unsigned)(va) & ~PG_FRAME))
#define avtopte(va) (APTmap + i386_btop(va))
#define ptetoav(pt) (i386_ptob(pt - APTmap))
#define avtophys(va) \
((*avtopte(va) & PG_FRAME) | ((unsigned)(va) & ~PG_FRAME))
#define vtopte(VA) (PTE_BASE + i386_btop(VA))
#define kvtopte(VA) vtopte(VA)
#define ptetov(PT) (i386_ptob(PT - PTE_BASE))
#define vtophys(VA) ((*vtopte(VA) & PG_FRAME) | ((unsigned)(VA) & ~PG_FRAME))
#define avtopte(VA) (APTE_BASE + i386_btop(VA))
#define ptetoav(PT) (i386_ptob(PT - APTE_BASE))
#define avtophys(VA) ((*avtopte(VA) & PG_FRAME) | ((unsigned)(VA) & ~PG_FRAME))
/*
* macros to generate page directory/table indicies
* pdei/ptei: generate index into PDP/PTP from a VA
*/
#define pdei(va) (((va) & PD_MASK) >> PDSHIFT)
#define ptei(va) (((va) & PT_MASK) >> PGSHIFT)
#define pdei(VA) (((VA) & PD_MASK) >> PDSHIFT)
#define ptei(VA) (((VA) & PT_MASK) >> PGSHIFT)
/*
* Pmap stuff
* PTP macros:
* a PTP's index is the PD index of the PDE that points to it
* a PTP's offset is the byte-offset in the PTE space that this PTP is at
* a PTP's VA is the first VA mapped by that PTP
*
* note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries)
* NBPD == number of bytes a PTP can map (4MB)
*/
typedef struct pmap {
pd_entry_t *pm_pdir; /* KVA of page directory */
short pm_dref; /* page directory ref count */
short pm_count; /* pmap reference count */
simple_lock_data_t pm_lock; /* lock on pmap */
struct pmap_statistics pm_stats; /* pmap statistics */
long pm_ptpages; /* more stats: PT pages */
int pm_flags; /* see below */
#define ptp_i2o(I) ((I) * NBPG) /* index => offset */
#define ptp_o2i(O) ((O) / NBPG) /* offset => index */
#define ptp_i2v(I) ((I) * NBPD) /* index => VA */
#define ptp_v2i(V) ((V) / NBPD) /* VA => index (same as pdei) */
union descriptor *pm_ldt; /* user-set LDT entries */
int pm_ldt_len; /* number of LDT entries */
int pm_ldt_sel; /* LDT selector */
} *pmap_t;
/*
* PG_AVAIL usage: we make use of the ignored bits of the PTE
*/
#define PG_W PG_AVAIL1 /* "wired" mapping */
#define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */
/* PG_AVAIL3 not used */
#ifdef _KERNEL
/*
* pmap data structures: see pmap.c for details of locking.
*/
struct pmap;
typedef struct pmap *pmap_t;
/*
* we maintain a list of all non-kernel pmaps
*/
LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
/*
* the pmap structure
*
* note that the pm_obj contains the simple_lock, the reference count,
* page list, and number of PTPs within the pmap.
*/
struct pmap {
struct uvm_object pm_obj; /* object (lck by object lock) */
#define pm_lock pm_obj.vmobjlock
LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
u_int32_t pm_pdirpa; /* PA of PD (read-only after create) */
struct vm_page *pm_ptphint; /* pointer to a random PTP in our pmap */
struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
int pm_flags; /* see below */
union descriptor *pm_ldt; /* user-set LDT */
int pm_ldt_len; /* number of LDT entries */
int pm_ldt_sel; /* LDT selector */
};
/* pm_flags */
#define PMF_USER_LDT 0x01 /* pmap has user-set LDT */
/*
* For each vm_page_t, there is a list of all currently valid virtual
* mappings of that page. An entry is a pv_entry, the list is pv_table.
* for each managed physical page we maintain a list of <PMAP,VA>'s
* which it is mapped at. the list is headed by a pv_head structure.
* there is one pv_head per managed phys page (allocated at boot time).
* the pv_head structure points to a list of pv_entry structures (each
* describes one mapping).
*/
struct pv_entry {
struct pv_entry *pv_next; /* next pv_entry */
pmap_t pv_pmap; /* pmap where mapping lies */
vaddr_t pv_va; /* virtual address for mapping */
struct pv_entry;
struct pv_head {
simple_lock_data_t pvh_lock; /* locks every pv on this list */
struct pv_entry *pvh_list; /* head of list (locked by pvh_lock) */
};
struct pv_page;
struct pv_page_info {
TAILQ_ENTRY(pv_page) pgi_list;
struct pv_entry *pgi_freelist;
int pgi_nfree;
struct pv_entry { /* all fields locked by their pvh_lock */
struct pv_entry *pv_next; /* next entry */
struct pmap *pv_pmap; /* the pmap */
vaddr_t pv_va; /* the virtual address */
struct vm_page *pv_ptp; /* the vm_page of the PTP */
};
/*
* This is basically:
* ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
* pv_entrys are dynamically allocated in chunks from a single page.
* we keep track of how many pv_entrys are in use for each page and
* we can free pv_entry pages if needed. there is one lock for the
* entire allocation system.
*/
#define NPVPPG 340
struct pv_page {
struct pv_page_info pvp_pgi;
struct pv_entry pvp_pv[NPVPPG];
struct pv_page_info {
TAILQ_ENTRY(pv_page) pvpi_list;
struct pv_entry *pvpi_pvfree;
int pvpi_nfree;
};
#ifdef _KERNEL
extern int nkpde; /* number of kernel page dir. ents */
extern struct pmap kernel_pmap_store;
/*
* number of pv_entry's in a pv_page
* (note: won't work on systems where NPBG isn't a constant)
*/
#define PVE_PER_PVPAGE ( (NBPG - sizeof(struct pv_page_info)) / \
sizeof(struct pv_entry) )
/*
* a pv_page: where pv_entrys are allocated from
*/
struct pv_page {
struct pv_page_info pvinfo;
struct pv_entry pvents[PVE_PER_PVPAGE];
};
/*
* pmap_remove_record: a record of VAs that have been unmapped, used to
* flush TLB. if we have more than PMAP_RR_MAX then we stop recording.
*/
#define PMAP_RR_MAX 16 /* max of 16 pages (64K) */
struct pmap_remove_record {
int prr_npages;
vaddr_t prr_vas[PMAP_RR_MAX];
};
/*
* pmap_transfer_location: used to pass the current location in the
* pmap between pmap_transfer and pmap_transfer_ptes [e.g. during
* a pmap_copy].
*/
struct pmap_transfer_location {
vaddr_t addr; /* the address (page-aligned) */
pt_entry_t *pte; /* the PTE that maps address */
struct vm_page *ptp; /* the PTP that the PTE lives in */
};
/*
* global kernel variables
*/
/* PTDpaddr: is the physical address of the kernel's PDP */
extern u_long PTDpaddr;
extern struct pmap kernel_pmap_store; /* kernel pmap */
extern int nkpde; /* current # of PDEs for kernel */
extern int pmap_pg_g; /* do we support PG_G? */
/*
* macros
*/
#define pmap_kernel() (&kernel_pmap_store)
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
#define pmap_update() tlbflush()
vaddr_t reserve_dumppages __P((vaddr_t));
#define pmap_clear_modify(pg) pmap_change_attrs(pg, 0, PG_M)
#define pmap_clear_reference(pg) pmap_change_attrs(pg, 0, PG_U)
#define pmap_copy(DP,SP,D,L,S) pmap_transfer(DP,SP,D,L,S, FALSE)
#define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
#define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
#define pmap_move(DP,SP,D,L,S) pmap_transfer(DP,SP,D,L,S, TRUE)
#define pmap_phys_address(ppn) i386_ptob(ppn)
#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
/*
* prototypes
*/
void pmap_activate __P((struct proc *));
void pmap_bootstrap __P((vaddr_t));
boolean_t pmap_change_attrs __P((struct vm_page *, int, int));
void pmap_deactivate __P((struct proc *));
static void pmap_kenter_pa __P((vaddr_t, paddr_t, vm_prot_t));
static void pmap_page_protect __P((struct vm_page *, vm_prot_t));
void pmap_page_remove __P((struct vm_page *));
static void pmap_protect __P((struct pmap *, vaddr_t,
vaddr_t, vm_prot_t));
void pmap_remove __P((struct pmap *, vaddr_t, vaddr_t));
boolean_t pmap_test_attrs __P((struct vm_page *, int));
void pmap_transfer __P((struct pmap *, struct pmap *, vaddr_t,
vsize_t, vaddr_t, boolean_t));
static void pmap_update_pg __P((vaddr_t));
static void pmap_update_2pg __P((vaddr_t,vaddr_t));
void pmap_write_protect __P((struct pmap *, vaddr_t,
vaddr_t, vm_prot_t));
vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */
#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
/*
* inline functions
*/
/*
* pmap_update_pg: flush one page from the TLB (or flush the whole thing
* if hardware doesn't support one-page flushing)
*/
__inline static void pmap_update_pg(va)
vaddr_t va;
static __inline void
pmap_clear_modify(paddr_t pa)
{
pmap_changebit(pa, 0, ~PG_M);
#if defined(I386_CPU)
if (cpu_class == CPUCLASS_386)
pmap_update();
else
#endif
invlpg((u_int) va);
}
static __inline void
pmap_clear_reference(paddr_t pa)
/*
* pmap_update_2pg: flush two pages from the TLB
*/
__inline static void pmap_update_2pg(va, vb)
vaddr_t va, vb;
{
pmap_changebit(pa, 0, ~PG_U);
#if defined(I386_CPU)
if (cpu_class == CPUCLASS_386)
pmap_update();
else
#endif
{
invlpg((u_int) va);
invlpg((u_int) vb);
}
}
static __inline void
pmap_copy_on_write(paddr_t pa)
/*
* pmap_page_protect: change the protection of all recorded mappings
* of a managed page
*
* => this function is a frontend for pmap_page_remove/pmap_change_attrs
* => we only have to worry about making the page more protected.
* unprotecting a page is done on-demand at fault time.
*/
__inline static void pmap_page_protect(pg, prot)
struct vm_page *pg;
vm_prot_t prot;
{
pmap_changebit(pa, PG_RO, ~PG_RW);
if ((prot & VM_PROT_WRITE) == 0) {
if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
(void) pmap_change_attrs(pg, PG_RO, PG_RW);
} else {
pmap_page_remove(pg);
}
}
}
static __inline boolean_t
pmap_is_modified(paddr_t pa)
/*
* pmap_protect: change the protection of pages in a pmap
*
* => this function is a frontend for pmap_remove/pmap_write_protect
* => we only have to worry about making the page more protected.
* unprotecting a page is done on-demand at fault time.
*/
__inline static void pmap_protect(pmap, sva, eva, prot)
struct pmap *pmap;
vaddr_t sva, eva;
vm_prot_t prot;
{
return pmap_testbit(pa, PG_M);
if ((prot & VM_PROT_WRITE) == 0) {
if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
pmap_write_protect(pmap, sva, eva, prot);
} else {
pmap_remove(pmap, sva, eva);
}
}
}
static __inline boolean_t
pmap_is_referenced(paddr_t pa)
/*
* pmap_kenter_pa: enter a kernel mapping without R/M (pv_entry) tracking
*
* => no need to lock anything, assume va is already allocated
* => should be faster than normal pmap enter function
*/
__inline static void pmap_kenter_pa(va, pa, prot)
vaddr_t va;
paddr_t pa;
vm_prot_t prot;
{
return pmap_testbit(pa, PG_U);
}
struct pmap *pm = pmap_kernel();
pt_entry_t *pte, opte;
int s;
static __inline paddr_t
pmap_phys_address(int ppn)
{
return i386_ptob(ppn);
}
s = splimp();
simple_lock(&pm->pm_obj.vmobjlock);
pm->pm_stats.resident_count++;
pm->pm_stats.wired_count++;
simple_unlock(&pm->pm_obj.vmobjlock);
splx(s);
pte = vtopte(va);
opte = *pte;
*pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) |
PG_V | pmap_pg_g; /* zap! */
if (pmap_valid_entry(opte))
pmap_update_pg(va);
}
vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, int));
@ -268,6 +537,5 @@ void pmap_ldt_cleanup __P((struct proc *));
#define PMAP_FORK
#endif /* USER_LDT */
#endif /* _KERNEL */
#endif /* _I386_PMAP_H_ */
#endif /* _KERNEL */
#endif /* _I386_PMAP_H_ */

View File

@ -1,537 +0,0 @@
/* $NetBSD: pmap.new.h,v 1.9 1999/05/12 19:28:30 thorpej Exp $ */
/*
*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgment:
* This product includes software developed by Charles D. Cranor and
* Washington University.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmap.h: see pmap.c for the history of this pmap module.
*/
#ifndef _I386_PMAP_H_
#define _I386_PMAP_H_
#include <machine/cpufunc.h>
#include <machine/pte.h>
#include <machine/segments.h>
#include <uvm/uvm_object.h>
/*
* see pte.h for a description of i386 MMU terminology and hardware
* interface.
*
* a pmap describes a processes' 4GB virtual address space. this
* virtual address space can be broken up into 1024 4MB regions which
* are described by PDEs in the PDP. the PDEs are defined as follows:
*
* (ranges are inclusive -> exclusive, just like vm_map_entry start/end)
* (the following assumes that KERNBASE is 0xf0000000)
*
* PDE#s VA range usage
* 0->959 0x0 -> 0xefc00000 user address space, note that the
* max user address is 0xefbfe000
* the final two pages in the last 4MB
* used to be reserved for the UAREA
* but now are no longer used
* 959 0xefc00000-> recursive mapping of PDP (used for
* 0xf0000000 linear mapping of PTPs)
* 960->1023 0xf0000000-> kernel address space (constant
* 0xffc00000 across all pmap's/processes)
* 1023 0xffc00000-> "alternate" recursive PDP mapping
* <end> (for other pmaps)
*
*
* note: a recursive PDP mapping provides a way to map all the PTEs for
* a 4GB address space into a linear chunk of virtual memory. in other
* words, the PTE for page 0 is the first int mapped into the 4MB recursive
* area. the PTE for page 1 is the second int. the very last int in the
* 4MB range is the PTE that maps VA 0xffffe000 (the last page in a 4GB
* address).
*
* all pmap's PD's must have the same values in slots 960->1023 so that
* the kernel is always mapped in every process. these values are loaded
* into the PD at pmap creation time.
*
* at any one time only one pmap can be active on a processor. this is
* the pmap whose PDP is pointed to by processor register %cr3. this pmap
* will have all its PTEs mapped into memory at the recursive mapping
* point (slot #959 as show above). when the pmap code wants to find the
* PTE for a virtual address, all it has to do is the following:
*
* address of PTE = (959 * 4MB) + (VA / NBPG) * sizeof(pt_entry_t)
* = 0xefc00000 + (VA /4096) * 4
*
* what happens if the pmap layer is asked to perform an operation
* on a pmap that is not the one which is currently active? in that
* case we take the PA of the PDP of non-active pmap and put it in
* slot 1023 of the active pmap. this causes the non-active pmap's
* PTEs to get mapped in the final 4MB of the 4GB address space
* (e.g. starting at 0xffc00000).
*
* the following figure shows the effects of the recursive PDP mapping:
*
* PDP (%cr3)
* +----+
* | 0| -> PTP#0 that maps VA 0x0 -> 0x400000
* | |
* | |
* | 959| -> points back to PDP (%cr3) mapping VA 0xefc00000 -> 0xf0000000
* | 960| -> first kernel PTP (maps 0xf0000000 -> 0xf0400000)
* | |
* |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end)
* +----+
*
* note that the PDE#959 VA (0xefc00000) is defined as "PTE_BASE"
* note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE"
*
* starting at VA 0xefc00000 the current active PDP (%cr3) acts as a
* PTP:
*
* PTP#959 == PDP(%cr3) => maps VA 0xefc00000 -> 0xf0000000
* +----+
* | 0| -> maps the contents of PTP#0 at VA 0xefc00000->0xefc01000
* | |
* | |
* | 959| -> maps contents of PTP#959 (the PDP) at VA 0xeffbf000
* | 960| -> maps contents of first kernel PTP
* | |
* |1023|
* +----+
*
* note that mapping of the PDP at PTP#959's VA (0xeffbf000) is
* defined as "PDP_BASE".... within that mapping there are two
* defines:
* "PDP_PDE" (0xeffbfefc) is the VA of the PDE in the PDP
* which points back to itself.
* "APDP_PDE" (0xeffbfffc) is the VA of the PDE in the PDP which
* establishes the recursive mapping of the alternate pmap.
* to set the alternate PDP, one just has to put the correct
* PA info in *APDP_PDE.
*
* note that in the APTE_BASE space, the APDP appears at VA
* "APDP_BASE" (0xfffff000).
*/
/*
* the following defines identify the slots used as described above.
*/
#define PDSLOT_PTE ((KERNBASE/NBPD)-1) /* 959: for recursive PDP map */
#define PDSLOT_KERN (KERNBASE/NBPD) /* 960: start of kernel space */
#define PDSLOT_APTE ((unsigned)1023) /* 1023: alternative recursive slot */
/*
* the following defines give the virtual addresses of various MMU
* data structures:
* PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
* PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
* PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
*/
#define PTE_BASE ((pt_entry_t *) (PDSLOT_PTE * NBPD) )
#define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD) )
#define PDP_BASE ((pd_entry_t *) (((char *)PTE_BASE) + (PDSLOT_PTE * NBPG)) )
#define APDP_BASE ((pd_entry_t *) (((char *)APTE_BASE) + (PDSLOT_APTE * NBPG)) )
#define PDP_PDE (PDP_BASE + PDSLOT_PTE)
#define APDP_PDE (PDP_BASE + PDSLOT_APTE)
/*
* XXXCDC: tmp xlate from old names:
* PTDPTDI -> PDSLOT_PTE
* KPTDI -> PDSLOT_KERN
* APTDPTDI -> PDSLOT_APTE
*/
/*
* the follow define determines how many PTPs should be set up for the
* kernel by locore.s at boot time. this should be large enough to
* get the VM system running. once the VM system is running, the
* pmap module can add more PTPs to the kernel area on demand.
*/
#ifndef NKPTP
#define NKPTP 4 /* 16MB to start */
#endif
#define NKPTP_MIN 4 /* smallest value we allow */
#define NKPTP_MAX (1024 - (KERNBASE/NBPD) - 1)
/* largest value (-1 for APTP space) */
/*
* various address macros
*
* vtopte: return a pointer to the PTE mapping a VA
* kvtopte: same as above (takes a KVA, but doesn't matter with this pmap)
* ptetov: given a pointer to a PTE, return the VA that it maps
* vtophys: translate a VA to the PA mapped to it
*
* plus alternative versions of the above
*/
#define vtopte(VA) (PTE_BASE + i386_btop(VA))
#define kvtopte(VA) vtopte(VA)
#define ptetov(PT) (i386_ptob(PT - PTE_BASE))
#define vtophys(VA) ((*vtopte(VA) & PG_FRAME) | ((unsigned)(VA) & ~PG_FRAME))
#define avtopte(VA) (APTE_BASE + i386_btop(VA))
#define ptetoav(PT) (i386_ptob(PT - APTE_BASE))
#define avtophys(VA) ((*avtopte(VA) & PG_FRAME) | ((unsigned)(VA) & ~PG_FRAME))
/*
* pdei/ptei: generate index into PDP/PTP from a VA
*/
#define pdei(VA) (((VA) & PD_MASK) >> PDSHIFT)
#define ptei(VA) (((VA) & PT_MASK) >> PGSHIFT)
/*
* PTP macros:
* a PTP's index is the PD index of the PDE that points to it
* a PTP's offset is the byte-offset in the PTE space that this PTP is at
* a PTP's VA is the first VA mapped by that PTP
*
* note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries)
* NBPD == number of bytes a PTP can map (4MB)
*/
#define ptp_i2o(I) ((I) * NBPG) /* index => offset */
#define ptp_o2i(O) ((O) / NBPG) /* offset => index */
#define ptp_i2v(I) ((I) * NBPD) /* index => VA */
#define ptp_v2i(V) ((V) / NBPD) /* VA => index (same as pdei) */
/*
* PG_AVAIL usage: we make use of the ignored bits of the PTE
*/
#define PG_W PG_AVAIL1 /* "wired" mapping */
#define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */
/* PG_AVAIL3 not used */
#ifdef _KERNEL
/*
* pmap data structures: see pmap.c for details of locking.
*/
struct pmap;
typedef struct pmap *pmap_t;
/*
* we maintain a list of all non-kernel pmaps
*/
LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
/*
* the pmap structure
*
* note that the pm_obj contains the simple_lock, the reference count,
* page list, and number of PTPs within the pmap.
*/
struct pmap {
struct uvm_object pm_obj; /* object (lck by object lock) */
#define pm_lock pm_obj.vmobjlock
LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
u_int32_t pm_pdirpa; /* PA of PD (read-only after create) */
struct vm_page *pm_ptphint; /* pointer to a random PTP in our pmap */
struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
int pm_flags; /* see below */
union descriptor *pm_ldt; /* user-set LDT */
int pm_ldt_len; /* number of LDT entries */
int pm_ldt_sel; /* LDT selector */
};
/* pm_flags */
#define PMF_USER_LDT 0x01 /* pmap has user-set LDT */
/*
* for each managed physical page we maintain a list of <PMAP,VA>'s
* which it is mapped at. the list is headed by a pv_head structure.
* there is one pv_head per managed phys page (allocated at boot time).
* the pv_head structure points to a list of pv_entry structures (each
* describes one mapping).
*/
struct pv_entry;
struct pv_head {
simple_lock_data_t pvh_lock; /* locks every pv on this list */
struct pv_entry *pvh_list; /* head of list (locked by pvh_lock) */
};
struct pv_entry { /* all fields locked by their pvh_lock */
struct pv_entry *pv_next; /* next entry */
struct pmap *pv_pmap; /* the pmap */
vaddr_t pv_va; /* the virtual address */
struct vm_page *pv_ptp; /* the vm_page of the PTP */
};
/*
* pv_entrys are dynamically allocated in chunks from a single page.
* we keep track of how many pv_entrys are in use for each page and
* we can free pv_entry pages if needed. there is one lock for the
* entire allocation system.
*/
struct pv_page_info {
TAILQ_ENTRY(pv_page) pvpi_list;
struct pv_entry *pvpi_pvfree;
int pvpi_nfree;
};
/*
* number of pv_entry's in a pv_page
* (note: won't work on systems where NPBG isn't a constant)
*/
#define PVE_PER_PVPAGE ( (NBPG - sizeof(struct pv_page_info)) / \
sizeof(struct pv_entry) )
/*
* a pv_page: where pv_entrys are allocated from
*/
struct pv_page {
struct pv_page_info pvinfo;
struct pv_entry pvents[PVE_PER_PVPAGE];
};
/*
* pmap_remove_record: a record of VAs that have been unmapped, used to
* flush TLB. if we have more than PMAP_RR_MAX then we stop recording.
*/
#define PMAP_RR_MAX 16 /* max of 16 pages (64K) */
struct pmap_remove_record {
int prr_npages;
vaddr_t prr_vas[PMAP_RR_MAX];
};
/*
* pmap_transfer_location: used to pass the current location in the
* pmap between pmap_transfer and pmap_transfer_ptes [e.g. during
* a pmap_copy].
*/
struct pmap_transfer_location {
vaddr_t addr; /* the address (page-aligned) */
pt_entry_t *pte; /* the PTE that maps address */
struct vm_page *ptp; /* the PTP that the PTE lives in */
};
/*
* global kernel variables
*/
/* PTDpaddr: is the physical address of the kernel's PDP */
extern u_long PTDpaddr;
extern struct pmap kernel_pmap_store; /* kernel pmap */
extern int nkpde; /* current # of PDEs for kernel */
extern int pmap_pg_g; /* do we support PG_G? */
/*
* macros
*/
#define pmap_kernel() (&kernel_pmap_store)
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
#define pmap_update() tlbflush()
#define pmap_clear_modify(pg) pmap_change_attrs(pg, 0, PG_M)
#define pmap_clear_reference(pg) pmap_change_attrs(pg, 0, PG_U)
#define pmap_copy(DP,SP,D,L,S) pmap_transfer(DP,SP,D,L,S, FALSE)
#define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
#define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
#define pmap_move(DP,SP,D,L,S) pmap_transfer(DP,SP,D,L,S, TRUE)
#define pmap_phys_address(ppn) i386_ptob(ppn)
#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
/*
* prototypes
*/
void pmap_activate __P((struct proc *));
void pmap_bootstrap __P((vaddr_t));
boolean_t pmap_change_attrs __P((struct vm_page *, int, int));
void pmap_deactivate __P((struct proc *));
static void pmap_kenter_pa __P((vaddr_t, paddr_t, vm_prot_t));
static void pmap_page_protect __P((struct vm_page *, vm_prot_t));
void pmap_page_remove __P((struct vm_page *));
static void pmap_protect __P((struct pmap *, vaddr_t,
vaddr_t, vm_prot_t));
void pmap_remove __P((struct pmap *, vaddr_t, vaddr_t));
boolean_t pmap_test_attrs __P((struct vm_page *, int));
void pmap_transfer __P((struct pmap *, struct pmap *, vaddr_t,
vsize_t, vaddr_t, boolean_t));
static void pmap_update_pg __P((vaddr_t));
static void pmap_update_2pg __P((vaddr_t,vaddr_t));
void pmap_write_protect __P((struct pmap *, vaddr_t,
vaddr_t, vm_prot_t));
vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */
#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
/*
* inline functions
*/
/*
* pmap_update_pg: flush one page from the TLB (or flush the whole thing
* if hardware doesn't support one-page flushing)
*/
__inline static void pmap_update_pg(va)
vaddr_t va;
{
#if defined(I386_CPU)
if (cpu_class == CPUCLASS_386)
pmap_update();
else
#endif
invlpg((u_int) va);
}
/*
* pmap_update_2pg: flush two pages from the TLB
*/
__inline static void pmap_update_2pg(va, vb)
vaddr_t va, vb;
{
#if defined(I386_CPU)
if (cpu_class == CPUCLASS_386)
pmap_update();
else
#endif
{
invlpg((u_int) va);
invlpg((u_int) vb);
}
}
/*
* pmap_page_protect: change the protection of all recorded mappings
* of a managed page
*
* => this function is a frontend for pmap_page_remove/pmap_change_attrs
* => we only have to worry about making the page more protected.
* unprotecting a page is done on-demand at fault time.
*/
__inline static void pmap_page_protect(pg, prot)
struct vm_page *pg;
vm_prot_t prot;
{
if ((prot & VM_PROT_WRITE) == 0) {
if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
(void) pmap_change_attrs(pg, PG_RO, PG_RW);
} else {
pmap_page_remove(pg);
}
}
}
/*
* pmap_protect: change the protection of pages in a pmap
*
* => this function is a frontend for pmap_remove/pmap_write_protect
* => we only have to worry about making the page more protected.
* unprotecting a page is done on-demand at fault time.
*/
__inline static void pmap_protect(pmap, sva, eva, prot)
struct pmap *pmap;
vaddr_t sva, eva;
vm_prot_t prot;
{
if ((prot & VM_PROT_WRITE) == 0) {
if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
pmap_write_protect(pmap, sva, eva, prot);
} else {
pmap_remove(pmap, sva, eva);
}
}
}
/*
* pmap_kenter_pa: enter a kernel mapping without R/M (pv_entry) tracking
*
* => no need to lock anything, assume va is already allocated
* => should be faster than normal pmap enter function
*/
__inline static void pmap_kenter_pa(va, pa, prot)
vaddr_t va;
paddr_t pa;
vm_prot_t prot;
{
struct pmap *pm = pmap_kernel();
pt_entry_t *pte, opte;
int s;
s = splimp();
simple_lock(&pm->pm_obj.vmobjlock);
pm->pm_stats.resident_count++;
pm->pm_stats.wired_count++;
simple_unlock(&pm->pm_obj.vmobjlock);
splx(s);
pte = vtopte(va);
opte = *pte;
*pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) |
PG_V | pmap_pg_g; /* zap! */
if (pmap_valid_entry(opte))
pmap_update_pg(va);
}
vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, int));
#if defined(USER_LDT)
void pmap_ldt_cleanup __P((struct proc *));
#define PMAP_FORK
#endif /* USER_LDT */
#endif /* _KERNEL */
#endif /* _I386_PMAP_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: vmparam.h,v 1.31 1999/04/26 22:46:46 thorpej Exp $ */
/* $NetBSD: vmparam.h,v 1.32 1999/06/17 00:12:12 thorpej Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -41,10 +41,6 @@
#ifndef _VMPARAM_H_
#define _VMPARAM_H_
#if defined(_KERNEL) && !defined(_LKM)
#include "opt_pmap_new.h"
#endif
/*
* Machine dependent constants for 386.
*/
@ -161,16 +157,9 @@
/*
* pmap specific data stored in the vm_physmem[] array
*/
#if defined(PMAP_NEW)
struct pmap_physseg {
struct pv_head *pvhead; /* pv_head array */
char *attrs; /* attrs array */
};
#else
struct pmap_physseg {
struct pv_entry *pvent; /* pv_entry array */
char *attrs; /* attrs array */
};
#endif
#endif /* _VMPARAM_H_ */