Switch the pc532 to MACHINE_NEW_NONCONTIG and add machine specific bits

for UVM. All of this was mostly done by stealing code from the i386 port.
Prepare for stealing pmap.new.c as well.
This commit is contained in:
matthias 1998-03-18 21:59:38 +00:00
parent cfdf9a95ad
commit e14a1c1120
11 changed files with 912 additions and 152 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pcb.h,v 1.8 1996/04/04 06:36:47 phil Exp $ */
/* $NetBSD: pcb.h,v 1.9 1998/03/18 21:59:39 matthias Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -41,6 +41,10 @@
#ifndef _MACHINE_PCB_H_
#define _MACHINE_PCB_H_
#if defined(_KERNEL) && !defined(_LKM)
#include "opt_pmap_new.h"
#endif
/*
* PC 532 process control block
*
@ -66,6 +70,9 @@ struct pcb {
* Software pcb (extension)
*/
caddr_t pcb_onfault; /* copyin/out fault recovery */
#if defined(PMAP_NEW)
struct pmap *pcb_pmap; /* back pointer to our pmap */
#endif
};
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.19 1998/02/18 02:05:35 cgd Exp $ */
/* $NetBSD: pmap.h,v 1.20 1998/03/18 21:59:39 matthias Exp $ */
/*
* Copyright (c) 1995 Charles M. Hannum. All rights reserved.
@ -50,6 +50,14 @@
* from hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
*/
#if defined(_KERNEL) && !defined(_LKM)
#include "opt_pmap_new.h"
#endif
#ifdef PMAP_NEW /* redirect */
#include <machine/pmap.new.h> /* defines _NS532_PMAP_H_ */
#endif
#ifndef _NS532_PMAP_H_
#define _NS532_PMAP_H_
@ -61,6 +69,12 @@
* W.Jolitz, 8/89
*/
/*
* PG_AVAIL usage ...
*/
#define PG_W PG_AVAIL1 /* "wired" mapping */
/*
* One page directory, shared between
* kernel and user modes.
@ -72,6 +86,9 @@
#define NKPDE_SCALE 1 /* # of kernel PDEs to add per meg. */
#define APTDPTDI 0x3fe /* start of alternate page directory */
#define UPT_MIN_ADDRESS (PTDPTDI<<PDSHIFT)
#define UPT_MAX_ADDRESS (UPT_MIN_ADDRESS + (PTDPTDI<<PGSHIFT))
/*
* Address of current and alternate address space page table maps
* and directories.
@ -80,6 +97,10 @@
extern pt_entry_t PTmap[], APTmap[];
extern pd_entry_t PTD[], APTD[], PTDpde, APTDpde;
extern pt_entry_t *Sysmap;
void pmap_bootstrap __P((vm_offset_t start));
boolean_t pmap_testbit __P((vm_offset_t, int));
void pmap_changebit __P((vm_offset_t, int, int));
#endif
/*
@ -110,7 +131,6 @@ extern pt_entry_t *Sysmap;
*/
typedef struct pmap {
pd_entry_t *pm_pdir; /* KVA of page directory */
boolean_t pm_pdchanged; /* pdir changed */
short pm_dref; /* page directory ref count */
short pm_count; /* pmap reference count */
simple_lock_data_t pm_lock; /* lock on pmap */
@ -148,18 +168,14 @@ struct pv_page {
};
#ifdef _KERNEL
extern int nkpde; /* number of kernel page dir. ents */
extern struct pmap kernel_pmap_store;
struct pv_entry *pv_table; /* array of entries, one per page */
pt_entry_t *pmap_pte __P((pmap_t, vm_offset_t));
#define pmap_kernel() (&kernel_pmap_store)
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
#define pmap_update() tlbflush()
void pmap_bootstrap __P((vm_offset_t start));
pt_entry_t *pmap_pte __P((pmap_t, vm_offset_t));
boolean_t pmap_testbit __P((vm_offset_t, int));
void pmap_changebit __P((vm_offset_t, int, int));
vm_offset_t reserve_dumppages __P((vm_offset_t));
static __inline void

View File

@ -1,4 +1,4 @@
/* $NetBSD: vmparam.h,v 1.11 1997/07/12 16:19:52 perry Exp $ */
/* $NetBSD: vmparam.h,v 1.12 1998/03/18 21:59:39 matthias Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -41,6 +41,10 @@
#ifndef _NS532_VMPARAM_H_
#define _NS532_VMPARAM_H_
#if defined(_KERNEL) && !defined(_LKM)
#include "opt_pmap_new.h"
#endif
/*
* Machine dependent constants for 532.
*/
@ -81,6 +85,13 @@
#define MAXSSIZ (8*1024*1024) /* max stack size */
#endif
/*
* Size of shared memory map
*/
#ifndef SHMMAXPGS
#define SHMMAXPGS 1024
#endif
/*
* PTEs for mapping user space into the kernel for phyio operations.
* One page is enough to handle 4Mb of simultaneous raw IO operations.
@ -89,13 +100,6 @@
#define USRIOSIZE (1 * NPTEPG) /* 4mb */
#endif
/*
* Size of shared memory map
*/
#ifndef SHMMAXPGS
#define SHMMAXPGS 1024
#endif
/*
* The time for a process to be blocked before being very swappable.
* This is a number of seconds which the system takes as being a non-trivial
@ -148,4 +152,26 @@
#define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES)
#define VM_KMEM_SIZE (NKMEMCLUSTERS*CLBYTES)
#define VM_PHYS_SIZE (USRIOSIZE*CLBYTES)
#define MACHINE_NEW_NONCONTIG /* VM <=> pmap interface modifier */
#define VM_PHYSSEG_MAX 1 /* we have contiguous memory */
#define VM_PHYSSEG_STRAT VM_PSTRAT_RANDOM
#define VM_PHYSSEG_NOADD /* can't add RAM after vm_mem_init */
/*
* pmap specific data stored in the vm_physmem[] array
*/
#if defined(PMAP_NEW)
struct pmap_physseg {
struct pv_head *pvhead; /* pv_head array */
short *attrs; /* attrs array */
};
#else
struct pmap_physseg {
struct pv_entry *pvent; /* pv_entry array */
short *attrs; /* attrs array */
};
#endif
#endif

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.2 1997/02/08 09:33:52 matthias Exp $
# $NetBSD: genassym.cf,v 1.3 1998/03/18 21:59:38 matthias Exp $
#
# Copyright (c) 1982, 1990 The Regents of the University of California.
@ -41,12 +41,21 @@
#type long
#asmtype i
include "opt_uvm.h"
include "opt_pmap_new.h"
include <sys/param.h>
include <sys/proc.h>
include <sys/resourcevar.h>
include <sys/device.h>
include <sys/user.h>
include <vm/vm.h>
ifdef UVM
include <uvm/uvm_extern.h>
endif
include <machine/trap.h>
include <machine/pmap.h>
include <machine/vmparam.h>
@ -58,9 +67,14 @@ define PGSHIFT PGSHIFT
define PGOFSET PGOFSET
define NBPG NBPG
ifdef PMAP_NEW
else
define PTDPTDI PTDPTDI
define KPTDI KPTDI
define APTDPTDI APTDPTDI
endif
define PDE_SIZE sizeof(pd_entry_t)
define KERNBASE KERNBASE
define VM_MAXUSER_ADDRESS VM_MAXUSER_ADDRESS
@ -75,7 +89,11 @@ define P_VMSPACE offsetof(struct proc, p_vmspace)
define P_FLAG offsetof(struct proc, p_flag)
define P_PID offsetof(struct proc, p_pid)
ifdef UVM
define V_INTR offsetof(struct uvmexp, intrs)
else
define V_INTR offsetof(struct vmmeter, v_intr)
endif
define PCB_ONSTACK offsetof(struct pcb, pcb_onstack)
define PCB_FSR offsetof(struct pcb, pcb_fsr)

View File

@ -1,4 +1,4 @@
/* $NetBSD: intr.c,v 1.19 1997/04/01 16:32:25 matthias Exp $ */
/* $NetBSD: intr.c,v 1.20 1998/03/18 21:59:38 matthias Exp $ */
/*
* Copyright (c) 1994 Matthias Pfaller.
@ -30,11 +30,17 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "opt_uvm.h"
#define DEFINE_SPLX
#include <sys/param.h>
#include <sys/vmmeter.h>
#include <sys/systm.h>
#include <vm/vm.h>
#if defined(UVM)
#include <uvm/uvm_extern.h>
#endif
#include <machine/psl.h>
#define INTS 32
@ -125,7 +131,11 @@ check_sir(arg)
cirpending & -mask; mask <<= 1, iv++) {
if ((cirpending & mask) != 0) {
register int s;
#if defined(UVM)
uvmexp.softs++;
#else
cnt.v_soft++;
#endif
iv->iv_cnt++;
s = splraise(iv->iv_mask);
iv->iv_vec(iv->iv_arg);

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.48 1997/11/10 01:50:37 phil Exp $ */
/* $NetBSD: locore.s,v 1.49 1998/03/18 21:59:38 matthias Exp $ */
/*
* Copyright (c) 1993 Philip A. Nelson.
@ -40,6 +40,9 @@
*
*/
#include "opt_uvm.h"
#include "opt_pmap_new.h"
#include "assym.h"
#include <sys/errno.h>
@ -58,19 +61,29 @@
* PTmap is recursive pagemap at top of virtual address space.
* Within PTmap, the page directory can be found (third indirection).
*/
#ifdef PMAP_NEW
SET(PTmap, (PDSLOT_PTE << PDSHIFT))
SET(PTD, (_C_LABEL(PTmap) + PDSLOT_PTE * NBPG))
SET(PTDpde, (_C_LABEL(PTD) + PDSLOT_PTE * PDE_SIZE))
#else
SET(PTmap, (PTDPTDI << PDSHIFT))
SET(PTD, (_C_LABEL(PTmap) + PTDPTDI * NBPG))
SET(PTDpde, (_C_LABEL(PTD) + PTDPTDI * 4)) /* XXX 4 == sizeof pde */
SET(Sysmap, (_C_LABEL(PTmap) + KPTDI * NBPG))
SET(PTDpde, (_C_LABEL(PTD) + PTDPTDI * PDE_SIZE))
#endif
/*
* APTmap, APTD is the alternate recursive pagemap.
* It's used when modifying another process's page tables.
*/
#ifdef PMAP_NEW
SET(APTmap, (PDSLOT_APTE << PDSHIFT))
SET(APTD, (_C_LABEL(APTmap) + PDSLOT_APTE * NBPG))
SET(APTDpde, (_C_LABEL(PTD) + PDSLOT_APTE * PDE_SIZE))
#else
SET(APTmap, (APTDPTDI << PDSHIFT))
SET(APTD, (_C_LABEL(APTmap) + APTDPTDI * NBPG))
SET(APTDpde, (_C_LABEL(PTD) + APTDPTDI * 4)) /* XXX 4 == sizeof pde */
SET(APTDpde, (_C_LABEL(PTD) + APTDPTDI * PDE_SIZE))
#endif
/*
* kernel_text is used by libkvm.
@ -178,13 +191,14 @@ GLOBAL(esigcode)
*/
/*
* copyout(caddr_t from, caddr_t to, size_t len);
* int copyout(caddr_t from, caddr_t to, size_t len);
*
* Copy len bytes into the user's address space.
*/
ENTRY(copyout)
enter [r3,r4],0
movd _C_LABEL(curpcb)(pc),r4
addr _C_LABEL(copy_fault)(pc),PCB_ONFAULT(r4)
addr _ASM_LABEL(copy_fault)(pc),PCB_ONFAULT(r4)
movd B_ARG0,r1 /* from */
movd B_ARG1,r2 /* to */
@ -233,13 +247,14 @@ ENTRY(copyout)
ret 0
/*
* copyin(caddr_t from, caddr_t to, size_t len);
* int copyin(caddr_t from, caddr_t to, size_t len);
*
* Copy len bytes from the user's address space.
*/
ENTRY(copyin)
enter [r3,r4],0
movd _C_LABEL(curpcb)(pc),r4
addr _C_LABEL(copy_fault)(pc),PCB_ONFAULT(r4)
addr _ASM_LABEL(copy_fault)(pc),PCB_ONFAULT(r4)
movd B_ARG0,r1 /* from */
movd B_ARG1,r2 /* to */
@ -255,9 +270,9 @@ ENTRY(copyin)
movd r1,r3
addd r0,r3
cmpd r3,VM_MAXUSER_ADDRESS
bhi _C_LABEL(copy_fault)
bhi _ASM_LABEL(copy_fault)
cmpd r1,r3
bhs _C_LABEL(copy_fault) /* check for overflow. */
bhs _ASM_LABEL(copy_fault) /* check for overflow. */
/* And now do the copy. */
lshd -2,r0
@ -269,14 +284,15 @@ ENTRY(copyin)
exit [r3,r4]
ret 0
ENTRY(copy_fault)
ASLOCAL(copy_fault)
movqd 0,PCB_ONFAULT(r4)
movd EFAULT,r0
exit [r3,r4]
ret 0
/*
* copyoutstr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied);
* int copyoutstr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied);
*
* Copy a NUL-terminated string, at most maxlen characters long, into the
* user's address space. Return the number of characters copied (including
* the NUL) in *lencopied. If the string is too long, return ENAMETOOLONG;
@ -285,7 +301,7 @@ ENTRY(copy_fault)
ENTRY(copyoutstr)
enter [r3],0
movd _C_LABEL(curpcb)(pc),r3
addr _C_LABEL(copystr_fault)(pc),PCB_ONFAULT(r3)
addr _ASM_LABEL(copystr_fault)(pc),PCB_ONFAULT(r3)
movd B_ARG0,r0 /* from */
movd B_ARG1,r1 /* to */
movd B_ARG2,r2 /* maxlen */
@ -305,7 +321,8 @@ ENTRY(copyoutstr)
br _ASM_LABEL(copystr_return)
/*
* copyinstr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied);
* int copyinstr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied);
*
* Copy a NUL-terminated string, at most maxlen characters long, from the
* user's address space. Return the number of characters copied (including
* the NUL) in *lencopied. If the string is too long, return ENAMETOOLONG;
@ -314,7 +331,7 @@ ENTRY(copyoutstr)
ENTRY(copyinstr)
enter [r3],0
movd _C_LABEL(curpcb)(pc),r3
addr _C_LABEL(copystr_fault)(pc),PCB_ONFAULT(r3)
addr _ASM_LABEL(copystr_fault)(pc),PCB_ONFAULT(r3)
movd B_ARG0,r0 /* from */
movd B_ARG1,r1 /* to */
movd B_ARG2,r2 /* maxlen */
@ -333,7 +350,7 @@ ENTRY(copyinstr)
movqd 0,r0
br _ASM_LABEL(copystr_return)
ENTRY(copystr_fault)
ASLOCAL(copystr_fault)
movd EFAULT,r0
ASLOCAL(copystr_return)
@ -349,7 +366,8 @@ ASLOCAL(copystr_return)
ret 0
/*
* copystr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied);
* int copystr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied);
*
* Copy a NUL-terminated string, at most maxlen characters long. Return the
* number of characters copied (including the NUL) in *lencopied. If the
* string is too long, return ENAMETOOLONG; else return 0.
@ -392,6 +410,103 @@ ENTRY(copystr)
3: exit [r4]
ret 0
#if defined(UVM)
/*
* int kcopy(const void *src, void *dst, size_t len);
*
* Copy len bytes from src to dst, aborting if we encounter a fatal
* page fault.
*
* kcopy() _must_ save and restore the old fault handler since it is
* called by uiomove(), which may be in the path of servicing a non-fatal
* page fault.
*
* We can't use bcopy because the state of the stack at fault time must
* be known. So we duplicate the bcopy code here. Sigh.
*/
0: movd _C_LABEL(curpcb)(pc),r4
movd tos,PCB_ONFAULT(r4)
movd EFAULT,r0
exit [r3,r4]
ret 0
ENTRY(kcopy)
enter [r3,r4],0
movd _C_LABEL(curpcb)(pc),r4
movd PCB_ONFAULT(r4),tos
addr 0b(pc),PCB_ONFAULT(r4)
movd B_ARG2,r0
movd B_ARG0,r1
movd B_ARG1,r2
cmpd r2,r1
bls 0f
movd r1,r3
addd r0,r3
cmpd r2,r3
bls 2f
0: cmpqd 4,r0
bhi 1f
/*
* Align destination address.
*/
movd 3,r3
andd r2,r3
movd 0(r1),0(r2)
negd r3,r3
addqd 4,r3
addd r3,r1
addd r3,r2
subd r3,r0
movqd 3,r3
andd r0,r3
lshd -2,r0
movsd
movd r3,r0
1: movsb
movd tos,PCB_ONFAULT(r4)
movqd 0,r0
exit [r3,r4]
ret 0
2: addd r0,r1
addd r0,r2
addqd -1,r1
addqd -1,r2
cmpqd 4,r0
bhi 0f
/*
* Align destination address.
*/
movd r0,r3
movqd 1,r0
addd r2,r0
andd 3,r0
subd r0,r3
movsb b
movd r3,r0
andd 3,r3
addqd -3,r1
addqd -3,r2
lshd -2,r0
movsd b
movd r3,r0
addqd 3,r1
addqd 3,r2
0: movsb b
movd tos,PCB_ONFAULT(r4)
movqd 0,r0
exit [r3,r4]
ret 0
#endif /* UVM */
/*
* fuword(caddr_t uaddr);
* Fetch an int from the user's address space.
@ -998,7 +1113,11 @@ ASENTRY_NOPROFILE(interrupt)
* Increment interrupt counters.
*/
addqd 1,_C_LABEL(intrcnt)(pc)[r1:d]
#ifdef UVM
addqd 1,_C_LABEL(uvmexp)+V_INTR(pc)
#else
addqd 1,_C_LABEL(cnt)+V_INTR(pc)
#endif
addqd 1,_C_LABEL(ivt)+IV_CNT(r0)
movd _C_LABEL(ivt)+IV_ARG(r0),r1 /* Get argument */

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.72 1998/02/19 04:18:33 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.73 1998/03/18 21:59:38 matthias Exp $ */
/*-
* Copyright (c) 1996 Matthias Pfaller.
@ -42,6 +42,9 @@
* @(#)machdep.c 7.4 (Berkeley) 6/3/91
*/
#include "opt_uvm.h"
#include "opt_pmap_new.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/signalvar.h>
@ -61,14 +64,12 @@
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/msgbuf.h>
#include <sys/mount.h>
#include <sys/vnode.h>
#include <sys/device.h>
#include <sys/mount.h>
#include <sys/syscallargs.h>
#include <sys/core.h>
#include <sys/kcore.h>
#include <vm/vm.h>
#include <sys/sysctl.h>
#include <sys/syscallargs.h>
#ifdef SYSVMSG
#include <sys/msg.h>
#endif
@ -85,6 +86,12 @@
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#if defined(UVM)
#include <uvm/uvm_extern.h>
#endif
#include <sys/sysctl.h>
#include <machine/cpu.h>
#include <machine/cpufunc.h>
#include <machine/psl.h>
@ -171,9 +178,15 @@ int maxphysmem = 0;
int physmem;
int boothowto;
caddr_t msgbufaddr;
vm_offset_t msgbuf_vaddr, msgbuf_paddr;
#if defined(UVM)
vm_map_t exec_map = NULL;
vm_map_t mb_map = NULL;
vm_map_t phys_map = NULL;
#else
vm_map_t buffer_map;
#endif
extern vm_offset_t avail_start, avail_end;
extern int nkpde;
@ -205,11 +218,22 @@ cpu_startup()
/*
* Initialize error message buffer (at end of core).
*/
/* avail_end was pre-decremented in pmap_bootstrap to compensate */
#if defined(UVM) && defined(PMAP_NEW)
msgbuf_vaddr = uvm_km_valloc(kernel_map, ns532_round_page(MSGBUFSIZE));
if (msgbuf_vaddr == NULL)
panic("failed to valloc msgbuf_vaddr");
#endif
/* msgbuf_paddr was init'd in pmap */
#if defined(PMAP_NEW)
for (i = 0; i < btoc(MSGBUFSIZE); i++)
pmap_enter(pmap_kernel(), (vm_offset_t)(msgbufaddr + i * NBPG),
avail_end + i * NBPG, VM_PROT_ALL, TRUE);
initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
pmap_kenter_pa((vm_offset_t)msgbuf_vaddr + i * NBPG,
msgbuf_paddr + i * NBPG, VM_PROT_ALL);
#else
for (i = 0; i < btoc(MSGBUFSIZE); i++)
pmap_enter(pmap_kernel(), (vm_offset_t)msgbuf_vaddr + i * NBPG,
msgbuf_paddr + i * NBPG, VM_PROT_ALL, TRUE);
#endif
initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE));
printf(version);
printf("real mem = %d\n", ctob(physmem));
@ -219,8 +243,13 @@ cpu_startup()
* and then give everything true virtual addresses.
*/
sz = (int)allocsys((caddr_t)0);
#if defined(UVM)
if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
panic("startup: no room for tables");
#else
if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(sz))) == 0)
panic("startup: no room for tables");
#endif
if (allocsys(v) - v != sz)
panic("startup: table size inconsistency");
@ -229,12 +258,21 @@ cpu_startup()
* in that they usually occupy more virtual memory than physical.
*/
size = MAXBSIZE * nbuf;
#if defined(UVM)
if (uvm_map(kernel_map, (vm_offset_t *) &buffers, round_page(size),
NULL, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
panic("cpu_startup: cannot allocate VM for buffers");
minaddr = (vm_offset_t)buffers;
#else
buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
&maxaddr, size, TRUE);
minaddr = (vm_offset_t)buffers;
if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
&minaddr, size, FALSE) != KERN_SUCCESS)
panic("startup: cannot allocate buffers");
#endif
if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
/* don't want to alloc more physical mem than needed */
bufpages = btoc(MAXBSIZE) * nbuf;
@ -242,6 +280,35 @@ cpu_startup()
base = bufpages / nbuf;
residual = bufpages % nbuf;
for (i = 0; i < nbuf; i++) {
#if defined(UVM)
vm_size_t curbufsize;
vm_offset_t curbuf;
struct vm_page *pg;
/*
* Each buffer has MAXBSIZE bytes of VM space allocated. Of
* that MAXBSIZE space, we allocate and map (base+1) pages
* for the first "residual" buffers, and then we allocate
* "base" pages for the rest.
*/
curbuf = (vm_offset_t) buffers + (i * MAXBSIZE);
curbufsize = CLBYTES * ((i < residual) ? (base+1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL);
if (pg == NULL)
panic("cpu_startup: not enough memory for "
"buffer cache");
#if defined(PMAP_NEW)
pmap_kenter_pgs(curbuf, &pg, 1);
#else
pmap_enter(kernel_map->pmap, curbuf,
VM_PAGE_TO_PHYS(pg), VM_PROT_ALL, TRUE);
#endif
curbuf += PAGE_SIZE;
curbufsize -= PAGE_SIZE;
}
#else
vm_size_t curbufsize;
vm_offset_t curbuf;
@ -256,36 +323,60 @@ cpu_startup()
curbufsize = CLBYTES * (i < residual ? base+1 : base);
vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
vm_map_simplify(buffer_map, curbuf);
#endif
}
/*
* Allocate a submap for exec arguments. This map effectively
* limits the number of processes exec'ing at any time.
*/
#if defined(UVM)
exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
16*NCARGS, TRUE, FALSE, NULL);
#else
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
16*NCARGS, TRUE);
#endif
/*
* Allocate a submap for physio
*/
#if defined(UVM)
phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, TRUE, FALSE, NULL);
#else
phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, TRUE);
#endif
/*
* Finally, allocate mbuf cluster submap.
*/
#if defined(UVM)
mb_map = uvm_km_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
VM_MBUF_SIZE, FALSE, FALSE, NULL);
#else
mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
VM_MBUF_SIZE, FALSE);
#endif
/*
* Tell the VM system that writing to kernel text isn't allowed.
* If we don't, we might end up COW'ing the text segment!
*/
#if defined(UVM)
if (uvm_map_protect(kernel_map,
ns532_round_page(&kernel_text),
ns532_round_page(&etext),
UVM_PROT_READ|UVM_PROT_EXEC, TRUE) != KERN_SUCCESS)
panic("can't protect kernel text");
#else
if (vm_map_protect(kernel_map,
ns532_round_page(&kernel_text),
ns532_round_page(&etext),
VM_PROT_READ|VM_PROT_EXECUTE, TRUE) != KERN_SUCCESS)
panic("can't protect kernel text");
#endif
/*
* Initialize callouts
@ -294,7 +385,11 @@ cpu_startup()
for (i = 1; i < ncallout; i++)
callout[i-1].c_next = &callout[i];
#if defined(UVM)
printf("avail mem = %ld\n", ptoa(uvmexp.free));
#else
printf("avail mem = %ld\n", ptoa(cnt.v_free_count));
#endif
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * CLBYTES);
@ -367,7 +462,9 @@ allocsys(v)
if (nswbuf > 256)
nswbuf = 256; /* sanity */
}
#if !defined(UVM)
valloc(swbuf, struct buf, nswbuf);
#endif
valloc(buf, struct buf, nbuf);
return v;
}
@ -451,11 +548,18 @@ sendsig(catcher, sig, mask, code)
fp = (struct sigframe *)regs->r_sp - 1;
}
if ((unsigned)fp <= (unsigned)p->p_vmspace->vm_maxsaddr + MAXSSIZ - ctob(p->p_vmspace->vm_ssize))
(void)grow(p, (unsigned)fp);
if ((unsigned)fp <= (unsigned)p->p_vmspace->vm_maxsaddr + MAXSSIZ - ctob(p->p_vmspace->vm_ssize))
#if defined(UVM)
(void) uvm_grow(p, (unsigned)fp);
#else
(void) grow(p, (unsigned)fp);
#endif
#if defined(UVM)
if (uvm_useracc((caddr_t)fp, sizeof (struct sigframe), B_WRITE) == 0) {
#else
if (useracc((caddr_t)fp, sizeof (struct sigframe), B_WRITE) == 0) {
/*
#endif /*
* Process has trashed its stack; give it an illegal
* instruction to halt it in its tracks.
*/
@ -525,7 +629,11 @@ sys_sigreturn(p, v, retval)
* program jumps out of a signal handler.
*/
scp = SCARG(uap, sigcntxp);
#if defined(UVM)
if (uvm_useracc((caddr_t)scp, sizeof (*scp), B_READ) == 0)
#else
if (useracc((caddr_t)scp, sizeof (*scp), B_READ) == 0)
#endif
return(EINVAL);
/*
@ -926,10 +1034,10 @@ map(pd, virtual, physical, protection, size)
physical += NBPG;
size -= NBPG;
} else {
size -= (NPTEPD - ix2) * NBPG;
ix2 = NPTEPD - 1;
size -= (PTES_PER_PTP - ix2) * NBPG;
ix2 = PTES_PER_PTP - 1;
}
if (++ix2 == NPTEPD) {
if (++ix2 == PTES_PER_PTP) {
ix1++;
ix2 = 0;
pt = (pt_entry_t *) (pd[ix1] & PG_FRAME);

View File

@ -1,4 +1,4 @@
/* $NetBSD: mem.c,v 1.15 1997/04/01 16:32:52 matthias Exp $ */
/* $NetBSD: mem.c,v 1.16 1998/03/18 21:59:39 matthias Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -40,6 +40,9 @@
* @(#)mem.c 8.3 (Berkeley) 1/12/94
*/
#include "opt_uvm.h"
#include "opt_pmap_new.h"
/*
* Memory special file
*/
@ -56,6 +59,9 @@
#include <machine/conf.h>
#include <vm/vm.h>
#if defined(UVM)
#include <uvm/uvm_extern.h>
#endif
extern char *vmmap; /* poor name! */
caddr_t zeropage;
@ -117,6 +123,16 @@ mmrw(dev, uio, flags)
/* minor device 0 is physical memory */
case 0:
#if defined(PMAP_NEW)
v = uio->uio_offset;
pmap_kenter_pa((vm_offset_t)vmmap, trunc_page(v),
(uio->uio_rw == UIO_READ) ? VM_PROT_READ :
VM_PROT_ALL);
o = uio->uio_offset & PGOFSET;
c = min(uio->uio_resid, (int)(NBPG - o));
error = uiomove((caddr_t)vmmap + o, c, uio);
pmap_kremove((vm_offset_t)vmmap, NBPG);
#else /* PMAP_NEW */
v = uio->uio_offset;
pmap_enter(pmap_kernel(), (vm_offset_t)vmmap,
trunc_page(v), uio->uio_rw == UIO_READ ?
@ -126,15 +142,22 @@ mmrw(dev, uio, flags)
error = uiomove((caddr_t)vmmap + o, c, uio);
pmap_remove(pmap_kernel(), (vm_offset_t)vmmap,
(vm_offset_t)vmmap + NBPG);
#endif /* PMAP_NEW */
break;
/* minor device 1 is kernel memory */
case 1:
v = uio->uio_offset;
c = min(iov->iov_len, MAXPHYS);
#if defined(UVM)
if (!uvm_kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return (EFAULT);
#else
if (!kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return (EFAULT);
#endif
error = uiomove((caddr_t)v, c, uio);
break;
@ -176,5 +199,29 @@ mmmmap(dev, off, prot)
dev_t dev;
int off, prot;
{
return (EOPNOTSUPP);
struct proc *p = curproc; /* XXX */
switch (minor(dev)) {
/* minor device 0 is physical memory */
case 0:
if (off > ctob(physmem) &&
suser(p->p_ucred, &p->p_acflag) != 0)
return -1;
return ns532_btop(off);
/* minor device 1 is kernel memory */
case 1:
/* XXX - writability, executability checks? */
#if defined(UVM)
if (!uvm_kernacc((caddr_t)off, NBPG, B_READ))
return -1;
#else
if (!kernacc((caddr_t)off, NBPG, B_READ))
return -1;
#endif
return ns532_btop(vtophys(off));
default:
return -1;
}
}

View File

@ -1,7 +1,7 @@
/* $NetBSD: pmap.c,v 1.23 1998/01/28 18:26:09 thorpej Exp $ */
/* $NetBSD: pmap.c,v 1.24 1998/03/18 21:59:39 matthias Exp $ */
/*
* Copyright (c) 1993, 1994, 1995 Charles M. Hannum. All rights reserved.
* Copyright (c) 1993, 1994, 1995, 1997 Charles M. Hannum. All rights reserved.
* Copyright (c) 1991 Regents of the University of California.
* All rights reserved.
*
@ -78,6 +78,8 @@
* and to when physical maps must be made correct.
*/
#include "opt_uvm.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
@ -88,15 +90,13 @@
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#if defined(UVM)
#include <uvm/uvm.h>
#endif
#include <machine/cpu.h>
#include <machine/cpufunc.h>
/*
* Allocate various and sundry SYSMAPs used in the days of old VM
* and not yet converted. XXX.
*/
#define BSDVM_COMPAT 1
#ifdef DEBUG
void pmap_pvdump __P((vm_offset_t pa));
void pads __P((pmap_t pm)); /* print address space of pmap*/
@ -172,12 +172,17 @@ vm_offset_t avail_start; /* PA of first available physical page */
vm_offset_t avail_end; /* PA of last available physical page */
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
#if !defined(MACHINE_NEW_NONCONTIG)
vm_offset_t vm_first_phys; /* PA of first managed page */
vm_offset_t vm_last_phys; /* PA just past last managed page */
#endif
int npages;
boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
#if !defined(MACHINE_NEW_NONCONTIG)
short *pmap_attributes; /* reference and modify bits */
struct pv_entry *pv_table; /* array of entries, one per page */
#endif
TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
int pv_nfree;
@ -185,9 +190,11 @@ pt_entry_t *pmap_pte __P((pmap_t, vm_offset_t));
struct pv_entry * pmap_alloc_pv __P((void));
void pmap_free_pv __P((struct pv_entry *));
void ns532_protection_init __P((void));
#if 0
void pmap_collect_pv __P((void));
__inline void pmap_remove_pv __P((pmap_t, vm_offset_t, u_int));
__inline void pmap_enter_pv __P((pmap_t, vm_offset_t, u_int));
#endif
__inline void pmap_remove_pv __P((pmap_t, vm_offset_t, struct pv_entry *));
__inline void pmap_enter_pv __P((pmap_t, vm_offset_t, struct pv_entry *));
void pmap_remove_all __P((vm_offset_t));
#ifdef NKPDE
@ -196,48 +203,14 @@ int nkpde = NKPDE;
int nkpde = 0;
#endif
#if BSDVM_COMPAT
extern caddr_t msgbufaddr;
/*
* All those kernel PT submaps that BSD is so fond of
*/
pt_entry_t *CMAP1, *CMAP2, *XXX_mmap;
pt_entry_t *CMAP1, *CMAP2;
caddr_t CADDR1, CADDR2, vmmap;
pt_entry_t *msgbufmap;
#endif /* BSDVM_COMPAT */
/*
* Bootstrap memory allocator. This function allows for early dynamic
* memory allocation until the virtual memory system has been bootstrapped.
* After that point, either kmem_alloc or malloc should be used. This
* function works by stealing pages from the (to be) managed page pool,
* stealing virtual address space, then mapping the pages and zeroing them.
*
* It should be used from pmap_bootstrap till vm_page_startup, afterwards
* it cannot be used, and will generate a panic if tried. Note that this
* memory will never be freed, and in essence it is wired down.
*/
void *
pmap_bootstrap_alloc(size)
int size;
{
extern boolean_t vm_page_startup_initialized;
vm_offset_t val;
if (vm_page_startup_initialized)
panic("pmap_bootstrap_alloc: called after startup initialized");
size = round_page(size);
val = virtual_avail;
virtual_avail = pmap_map(virtual_avail, avail_start,
avail_start + size, VM_PROT_READ|VM_PROT_WRITE);
avail_start += size;
bzero((caddr_t) val, size);
return ((void *) val);
}
extern vm_offset_t msgbuf_vaddr, msgbuf_paddr;
extern vm_offset_t idt_vaddr, idt_paddr;
/*
* Bootstrap the system enough to run with virtual memory.
@ -255,12 +228,17 @@ void
pmap_bootstrap(virtual_start)
vm_offset_t virtual_start;
{
#if BSDVM_COMPAT
vm_offset_t va;
pt_entry_t *pte;
pt_entry_t *pte, *junk;
/*
* set the VM page size.
*/
#if defined(UVM)
uvm_setpagesize();
#else
vm_set_page_size();
#endif
/* XXX: allow for msgbuf */
avail_end -= ns532_round_page(MSGBUFSIZE);
virtual_avail = virtual_start;
virtual_end = VM_MAX_KERNEL_ADDRESS;
@ -276,31 +254,119 @@ pmap_bootstrap(virtual_start)
simple_lock_init(&pmap_kernel()->pm_lock);
pmap_kernel()->pm_count = 1;
#if BSDVM_COMPAT
/*
* Allocate all the submaps we need
*/
#define SYSMAP(c, p, v, n) \
v = (c)va; va += ((n)*NBPG); p = pte; pte += (n);
va = virtual_avail;
pte = pmap_pte(pmap_kernel(), va);
SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 )
SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 )
SYSMAP(caddr_t ,XXX_mmap ,vmmap ,1 )
SYSMAP(caddr_t ,msgbufmap ,msgbufaddr ,btoc(MSGBUFSIZE))
#define SYSMAP(c, p, v, n) \
do { \
v = (c)va; \
va += ctob(n); \
p = pte; \
pte += (n); \
} while (0)
SYSMAP(caddr_t, CMAP1, CADDR1, 1);
SYSMAP(caddr_t, CMAP2, CADDR2, 1);
SYSMAP(caddr_t, junk, vmmap, 1);
SYSMAP(vm_offset_t, junk, msgbuf_vaddr, btoc(MSGBUFSIZE));
avail_end -= round_page(MSGBUFSIZE);
msgbuf_paddr = avail_end;
virtual_avail = va;
#endif
/*
* Reserve pmap space for mapping physical pages during dump.
*/
virtual_avail = reserve_dumppages(virtual_avail);
#if defined(MACHINE_NEW_NONCONTIG)
/*
* we must call vm_page_physload() after we are done playing
* with virtual_avail but before we call pmap_steal_memory.
* [i.e. here]
*/
#if defined(UVM)
uvm_page_physload(atop(avail_start), atop(avail_end),
atop(avail_start), atop(avail_end));
#else
vm_page_physload(atop(avail_start), atop(avail_end),
atop(avail_start), atop(avail_end));
#endif
#endif
pmap_update();
}
void
pmap_virtual_space(startp, endp)
vm_offset_t *startp;
vm_offset_t *endp;
{
*startp = virtual_avail;
*endp = virtual_end;
}
#if defined(MACHINE_NEW_NONCONTIG)
/*
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
*/
void
pmap_init()
{
vm_offset_t addr;
vm_size_t s;
int lcv;
if (PAGE_SIZE != NBPG)
panic("pmap_init: CLSIZE != 1");
npages = 0;
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
npages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
s = (vm_size_t) (sizeof(struct pv_entry) * npages +
sizeof(*(vm_physmem[0].pmseg.attrs)) * npages);
s = round_page(s);
#if defined(UVM)
addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s);
if (addr == NULL)
panic("pmap_init");
#else
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
#endif
/* allocate pv_entry stuff first */
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
vm_physmem[lcv].pmseg.pvent = (struct pv_entry *) addr;
addr = (vm_offset_t)(vm_physmem[lcv].pmseg.pvent +
(vm_physmem[lcv].end - vm_physmem[lcv].start));
}
/* allocate attrs next */
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
vm_physmem[lcv].pmseg.attrs = (short *) addr;
addr = (vm_offset_t)(vm_physmem[lcv].pmseg.attrs +
(vm_physmem[lcv].end - vm_physmem[lcv].start));
}
TAILQ_INIT(&pv_page_freelist);
#ifdef DEBUG
if (pmapdebug & PDB_INIT)
printf("pmap_init: %lx bytes (%x pgs)\n",
s, npages);
#endif
/*
* Now it is safe to enable pv_entry recording.
*/
pmap_initialized = TRUE;
}
#else /* MACHINE_NEW_NONCONTIG */
/*
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
@ -321,7 +387,13 @@ pmap_init(phys_start, phys_end)
s = (vm_size_t) (sizeof(struct pv_entry) * npages +
sizeof(*pmap_attributes) * npages);
s = round_page(s);
#if defined(UVM)
addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s);
if (addr == NULL)
panic("pmap_init");
#else
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
#endif
pv_table = (struct pv_entry *) addr;
addr += sizeof(struct pv_entry) * npages;
pmap_attributes = (short *) addr;
@ -340,6 +412,7 @@ pmap_init(phys_start, phys_end)
vm_last_phys = phys_end;
pmap_initialized = TRUE;
}
#endif
struct pv_entry *
pmap_alloc_pv()
@ -349,9 +422,14 @@ pmap_alloc_pv()
int i;
if (pv_nfree == 0) {
#if defined(UVM)
/* NOTE: can't lock kernel_map here */
MALLOC(pvp, struct pv_page *, NBPG, M_VMPVENT, M_WAITOK);
#else
pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG);
#endif
if (pvp == 0)
panic("pmap_alloc_pv: kmem_alloc() failed");
panic("pmap_alloc_pv: alloc failed");
pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
for (i = NPVPPG - 2; i; i--, pv++)
pv->pv_next = pv + 1;
@ -393,11 +471,16 @@ pmap_free_pv(pv)
case NPVPPG:
pv_nfree -= NPVPPG - 1;
TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
#if defined(UVM)
FREE((vm_offset_t) pvp, M_VMPVENT);
#else
kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
#endif
break;
}
}
#if 0
void
pmap_collect_pv()
{
@ -451,11 +534,19 @@ pmap_collect_pv()
for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
npvp = pvp->pvp_pgi.pgi_list.tqe_next;
#if defined(UVM)
FREE((vm_offset_t) pvp, M_VMPVENT);
#else
kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
#endif
}
}
#endif
int
#if !defined(MACHINE_NEW_NONCONTIG)
int pmap_page_index(vm_offset_t);
static __inline int
pmap_page_index(pa)
vm_offset_t pa;
{
@ -463,20 +554,20 @@ pmap_page_index(pa)
return ns532_btop(pa - vm_first_phys);
return -1;
}
#endif
__inline void
pmap_enter_pv(pmap, va, pind)
pmap_enter_pv(pmap, va, pv)
register pmap_t pmap;
vm_offset_t va;
u_int pind;
struct pv_entry *pv;
{
register struct pv_entry *pv, *npv;
register struct pv_entry *npv;
int s;
if (!pmap_initialized)
return;
pv = &pv_table[pind];
s = splimp();
#ifdef DEBUG
@ -519,19 +610,18 @@ pmap_enter_pv(pmap, va, pind)
}
__inline void
pmap_remove_pv(pmap, va, pind)
pmap_remove_pv(pmap, va, pv)
register pmap_t pmap;
vm_offset_t va;
u_int pind;
{
register struct pv_entry *pv, *npv;
struct pv_entry *pv;
{
register struct pv_entry *npv;
int s;
/*
* Remove from the PV table (raise IPL since we
* may be called at interrupt time).
*/
pv = &pv_table[pind];
s = splimp();
/*
@ -642,7 +732,11 @@ pmap_pinit(pmap)
* No need to allocate page table space yet but we do need a
* valid page directory table.
*/
#if defined(UVM)
pmap->pm_pdir = (pd_entry_t *) uvm_km_zalloc(kernel_map, NBPG);
#else
pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG);
#endif
/* wire in kernel global address entries */
movsdnu(&PTD[KPTDI], &pmap->pm_pdir[KPTDI],
@ -702,13 +796,17 @@ pmap_release(pmap)
printf("pmap_release(%p)\n", pmap);
#endif
#ifdef DIAGNOSTIC
#ifdef notdef /* DIAGNOSTIC */
/* sometimes 1, sometimes 0; could rearrange pmap_destroy */
if (pmap->pm_count != 1)
panic("pmap_release count");
#endif
#if defined(UVM)
uvm_km_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG);
#else
kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG);
#endif
}
/*
@ -733,24 +831,26 @@ pmap_reference(pmap)
}
/*
* Activate the specified address space of the specified process.
* If the process is the current process, load the MMU context.
* pmap_activate:
*
* Mark that a processor is about to be used by a given pmap.
*/
void
pmap_activate(p)
struct proc *p;
{
struct pcb *pcb = &p->p_addr->u_pcb;
pmap_t pmap = p->p_vmspace->vm_map.pmap;
pmap_t pmap = p->p_vmspace->vm_map.pmap;
pcb->pcb_ptb = pmap_extract(pmap_kernel(), (vm_offset_t)pmap->pm_pdir);
if (p == curproc)
load_ptb(pcb->pcb_ptb);
pmap->pm_pdchanged = FALSE;
}
/*
* Deactivate the address space of the specified process.
* pmap_deactivate:
*
* Mark that a processor is no longer used by a given pmap.
*/
void
pmap_deactivate(p)
@ -771,7 +871,11 @@ pmap_remove(pmap, sva, eva)
{
register pt_entry_t *pte;
vm_offset_t pa;
#if defined(MACHINE_NEW_NONCONTIG)
int bank, off;
#else
u_int pind;
#endif
int flush = 0;
sva &= PG_FRAME;
@ -853,10 +957,19 @@ pmap_remove(pmap, sva, eva)
reduce wiring count on page table pages as references drop
#endif
#if defined(MACHINE_NEW_NONCONTIG)
if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
vm_physmem[bank].pmseg.attrs[off] |=
*pte & (PG_M | PG_U);
pmap_remove_pv(pmap, sva,
&vm_physmem[bank].pmseg.pvent[off]);
}
#else
if ((pind = pmap_page_index(pa)) != -1) {
pmap_attributes[pind] |= *pte & (PG_M | PG_U);
pmap_remove_pv(pmap, sva, pind);
pmap_remove_pv(pmap, sva, &pv_table[pind]);
}
#endif /* MACHINE_NEW_NONCONTIG */
*pte = 0;
@ -885,7 +998,11 @@ pmap_remove_all(pa)
struct pv_entry *ph, *pv, *npv;
register pmap_t pmap;
register pt_entry_t *pte;
#if defined(MACHINE_NEW_NONCONTIG)
int bank, off;
#else
u_int pind;
#endif
int s;
#ifdef DEBUG
@ -894,10 +1011,22 @@ pmap_remove_all(pa)
/*pmap_pvdump(pa);*/
#endif
#if defined(MACHINE_NEW_NONCONTIG)
bank = vm_physseg_find(atop(pa), &off);
if (bank == -1)
return;
pv = ph = &vm_physmem[bank].pmseg.pvent[off];
#else
if ((pind = pmap_page_index(pa)) == -1)
return;
pv = ph = &pv_table[pind];
#endif
s = splimp();
if (ph->pv_pmap == NULL) {
@ -938,7 +1067,11 @@ reduce wiring count on page table pages as references drop
/*
* Update saved attributes for managed page
*/
#if defined(MACHINE_NEW_NONCONTIG)
vm_physmem[bank].pmseg.attrs[off] |= *pte & (PG_M | PG_U);
#else
pmap_attributes[pind] |= *pte & (PG_M | PG_U);
#endif
*pte = 0;
@ -1078,7 +1211,11 @@ pmap_enter(pmap, va, pa, prot, wired)
{
register pt_entry_t *pte;
register pt_entry_t npte;
#if defined(MACHINE_NEW_NONCONTIG)
int bank, off;
#else
u_int pind;
#endif
int flush = 0;
boolean_t cacheable;
@ -1106,8 +1243,33 @@ pmap_enter(pmap, va, pa, prot, wired)
* Page Directory table entry not valid, we need a new PT page
*/
pte = pmap_pte(pmap, va);
if (!pte)
panic("ptdi %x", pmap->pm_pdir[PTDPTDI]);
if (!pte) {
/* XXX
* Normally, PT pages are faulted in when a page fault
* is taken. But we might come in here to wire pages
* down, without going through trap(), which means we
* need to allocate a PT page here if we can.
*/
if (pmap != pmap_kernel() && curproc != NULL &&
pmap == curproc->p_vmspace->vm_map.pmap) {
unsigned v;
int rv;
v = trunc_page(vtopte(va));
#if defined(UVM)
rv = uvm_map_pageable(&curproc->p_vmspace->vm_map,
v, v + NBPG, FALSE);
#else
rv = vm_map_pageable(&curproc->p_vmspace->vm_map,
v, v + NBPG, FALSE);
#endif
if (rv != KERN_SUCCESS)
goto die;
} else {
die:
panic("ptdi %x", pmap->pm_pdir[PTDPTDI]);
}
}
#ifdef DEBUG
if (pmapdebug & PDB_ENTER)
@ -1162,10 +1324,20 @@ pmap_enter(pmap, va, pa, prot, wired)
printf("enter: removing old mapping %lx pa %lx ",
va, opa);
#endif
#if defined(MACHINE_NEW_NONCONTIG)
if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
vm_physmem[bank].pmseg.attrs[off] |=
*pte & (PG_M | PG_U);
pmap_remove_pv(pmap, va,
&vm_physmem[bank].pmseg.pvent[off]);
}
#else
if ((pind = pmap_page_index(opa)) != -1) {
pmap_attributes[pind] |= *pte & (PG_M | PG_U);
pmap_remove_pv(pmap, va, pind);
pmap_remove_pv(pmap, va, &pv_table[pind]);
}
#endif
#ifdef DEBUG
enter_stats.mchange++;
#endif
@ -1181,13 +1353,24 @@ pmap_enter(pmap, va, pa, prot, wired)
/*
* Enter on the PV list if part of our managed memory
*/
#if defined(MACHINE_NEW_NONCONTIG)
if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
#ifdef DEBUG
enter_stats.managed++;
#endif
pmap_enter_pv(pmap, va, &vm_physmem[bank].pmseg.pvent[off]);
cacheable = TRUE;
}
#else
if ((pind = pmap_page_index(pa)) != -1) {
#ifdef DEBUG
enter_stats.managed++;
#endif
pmap_enter_pv(pmap, va, pind);
pmap_enter_pv(pmap, va, &pv_table[pind]);
cacheable = TRUE;
} else if (pmap_initialized) {
}
#endif
else if (pmap_initialized) {
#ifdef DEBUG
enter_stats.unmanaged++;
#endif
@ -1533,7 +1716,11 @@ pmap_pageable(pmap, sva, eva, pageable)
register pt_entry_t *pte;
#ifdef DEBUG
#if defined(MACHINE_NEW_NONCONTIG)
int bank, off;
#else
u_int pind;
#endif
register struct pv_entry *pv;
if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
@ -1550,10 +1737,18 @@ pmap_pageable(pmap, sva, eva, pageable)
pa = pmap_pte_pa(pte);
#ifdef DEBUG
#if defined(MACHINE_NEW_NONCONTIG)
if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
return;
pv = &vm_physmem[bank].pmseg.pvent[off];
#else
if ((pind = pmap_page_index(pa)) == -1)
return;
pv = &pv_table[pind];
#endif
if (pv->pv_va != sva || pv->pv_next) {
printf("pmap_pageable: bad PT page va %lx next %p\n",
pv->pv_va, pv->pv_next);
@ -1601,19 +1796,34 @@ pmap_testbit(pa, setbits)
{
register struct pv_entry *pv;
register pt_entry_t *pte;
#if defined(MACHINE_NEW_NONCONTIG)
int bank, off;
#else
u_int pind;
#endif
int s;
#if defined(MACHINE_NEW_NONCONTIG)
if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
return FALSE;
pv = &vm_physmem[bank].pmseg.pvent[off];
#else
if ((pind = pmap_page_index(pa)) == -1)
return FALSE;
pv = &pv_table[pind];
#endif
s = splimp();
/*
* Check saved info first
*/
if (pmap_attributes[pind] & setbits) {
#if defined(MACHINE_NEW_NONCONTIG)
if (vm_physmem[bank].pmseg.attrs[off] & setbits)
#else
if (pmap_attributes[pind] & setbits)
#endif
{
splx(s);
return TRUE;
}
@ -1648,7 +1858,11 @@ pmap_changebit(pa, setbits, maskbits)
register struct pv_entry *pv;
register pt_entry_t *pte;
vm_offset_t va;
#if defined(MACHINE_NEW_NONCONTIG)
int bank, off;
#else
u_int pind;
#endif
int s;
#ifdef DEBUG
@ -1657,17 +1871,27 @@ pmap_changebit(pa, setbits, maskbits)
pa, setbits, ~maskbits);
#endif
#if defined(MACHINE_NEW_NONCONTIG)
if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
return;
pv = &vm_physmem[bank].pmseg.pvent[off];
#else
if ((pind = pmap_page_index(pa)) == -1)
return;
pv = &pv_table[pind];
#endif
s = splimp();
/*
* Clear saved attributes (modify, reference)
*/
#if defined(MACHINE_NEW_NONCONTIG)
if (~maskbits)
vm_physmem[bank].pmseg.attrs[off] &= maskbits;
#else
if (~maskbits)
pmap_attributes[pind] &= maskbits;
#endif
/*
* Loop over all current mappings setting/clearing as appropos
@ -1682,10 +1906,15 @@ pmap_changebit(pa, setbits, maskbits)
*/
if ((PG_RO && setbits == PG_RO) ||
(PG_RW && maskbits == ~PG_RW)) {
#if defined(UVM)
if (va >= uvm.pager_sva && va < uvm.pager_eva)
continue;
#else
extern vm_offset_t pager_sva, pager_eva;
if (va >= pager_sva && va < pager_eva)
continue;
#endif
}
pte = pmap_pte(pv->pv_pmap, va);
@ -1702,12 +1931,27 @@ pmap_pvdump(pa)
vm_offset_t pa;
{
register struct pv_entry *pv;
#if defined(MACHINE_NEW_NONCONTIG)
int bank, off;
#endif
printf("pa %lx", pa);
#if defined(MACHINE_NEW_NONCONTIG)
if ((bank = vm_physseg_find(atop(pa), &off)) == -1) {
printf("INVALID PA!");
} else {
for (pv = &vm_physmem[bank].pmseg.pvent[off] ; pv ;
pv = pv->pv_next) {
printf(" -> pmap %p, va %lx", pv->pv_pmap, pv->pv_va);
pads(pv->pv_pmap);
}
}
#else
for (pv = &pv_table[pmap_page_index(pa)]; pv; pv = pv->pv_next) {
printf(" -> pmap %p, va %lx", pv->pv_pmap, pv->pv_va);
pads(pv->pv_pmap);
}
#endif
printf(" ");
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.27 1997/10/22 03:40:24 phil Exp $ */
/* $NetBSD: trap.c,v 1.28 1998/03/18 21:59:39 matthias Exp $ */
/*-
* Copyright (c) 1996 Matthias Pfaller. All rights reserved.
@ -44,6 +44,9 @@
* 532 Trap and System call handling
*/
#include "opt_uvm.h"
#include "opt_pmap_new.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
@ -60,9 +63,9 @@
#endif
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#if defined(UVM)
#include <uvm/uvm_extern.h>
#endif
#include <machine/cpu.h>
#include <machine/cpufunc.h>
@ -178,7 +181,11 @@ trap(frame)
extern char cinvstart[], cinvend[];
#endif
#if defined(UVM)
uvmexp.traps++;
#else
cnt.v_trap++;
#endif
#ifdef DEBUG
if (trapdebug) {
@ -291,7 +298,11 @@ trap(frame)
goto out;
case T_AST | T_USER: /* Allow process switch */
#if defined(UVM)
uvmexp.softs++;
#else
cnt.v_soft++;
#endif
if (p->p_flag & P_OWEUPC) {
p->p_flag &= ~P_OWEUPC;
ADDUPROF(p);
@ -361,7 +372,7 @@ trap(frame)
int rv;
vm_prot_t ftype;
extern vm_map_t kernel_map;
unsigned nss, v;
unsigned nss;
va = trunc_page((vm_offset_t)frame.tf_tear);
/*
@ -400,19 +411,51 @@ trap(frame)
}
}
/*
* PMAP_NEW allocates PTPs at pmap_enter time, not here.
*/
#if !defined(PMAP_NEW)
/* Create a page table page if necessary, and wire it. */
if ((PTD[pdei(va)] & PG_V) == 0) {
unsigned v;
v = trunc_page(vtopte(va));
#if defined(UVM)
rv = uvm_map_pageable(map, v, v + NBPG, FALSE);
#else
rv = vm_map_pageable(map, v, v + NBPG, FALSE);
#endif
if (rv != KERN_SUCCESS)
goto nogo;
}
#endif /* PMAP_NEW */
/* Fault the original page in. */
#if defined(UVM)
rv = uvm_fault(map, va, 0, ftype);
#else
rv = vm_fault(map, va, ftype, FALSE);
#endif
if (rv == KERN_SUCCESS) {
if (nss > vm->vm_ssize)
vm->vm_ssize = nss;
#if !defined(PMAP_NEW)
/*
* If this is a pagefault for a PT page,
* wire it. Normally we fault them in
* ourselves, but this can still happen on
* a ns32532 in copyout & friends.
*/
if (map != kernel_map && va >= UPT_MIN_ADDRESS &&
va < UPT_MAX_ADDRESS) {
va = trunc_page(va);
#if defined(UVM)
uvm_map_pageable(map, va, va + NBPG, FALSE);
#else
vm_map_pageable(map, va, va + NBPG, FALSE);
#endif
}
#endif
if (type == T_ABT)
return;
goto out;
@ -425,11 +468,22 @@ trap(frame)
frame.tf_regs.r_pc = (int)curpcb->pcb_onfault;
return;
}
#if defined(UVM)
printf("uvm_fault(%p, 0x%lx, 0, %d) -> %x\n",
map, va, ftype, rv);
#else
printf("vm_fault(%p, %lx, %x, 0) -> %x\n",
map, va, ftype, rv);
#endif
goto we_re_toast;
}
trapsignal(p, SIGSEGV, T_ABT);
if (rv == KERN_RESOURCE_SHORTAGE) {
printf("UVM: process %d killed: out of swap space\n",
p->p_pid);
trapsignal(p, SIGKILL, T_ABT);
} else {
trapsignal(p, SIGSEGV, T_ABT);
}
break;
}
@ -481,7 +535,11 @@ syscall(frame)
register_t code, args[8], rval[2];
u_quad_t sticks;
#if defined(UVM)
uvmexp.syscalls++;
#else
cnt.v_syscall++;
#endif
if (!USERMODE(frame.sf_regs.r_psr))
panic("syscall");
p = curproc;
@ -535,7 +593,7 @@ syscall(frame)
if (error)
goto bad;
rval[0] = 0;
rval[1] = frame.sf_regs.r_r1;
rval[1] = frame.sf_regs.r_r1;
error = (*callp->sy_call)(p, args, rval);
switch (error) {
case 0:

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.21 1998/01/02 22:43:30 thorpej Exp $ */
/* $NetBSD: vm_machdep.c,v 1.22 1998/03/18 21:59:39 matthias Exp $ */
/*-
* Copyright (c) 1996 Matthias Pfaller.
@ -43,6 +43,9 @@
* @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
*/
#include "opt_uvm.h"
#include "opt_pmap_new.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
@ -57,6 +60,10 @@
#include <vm/vm.h>
#include <vm/vm_kern.h>
#if defined(UVM)
#include <uvm/uvm_extern.h>
#endif
#include <machine/cpu.h>
#include <machine/cpufunc.h>
@ -163,7 +170,11 @@ cpu_exit(arg)
{
extern struct user *proc0paddr;
register struct proc *p __asm("r3");
#if defined(UVM)
uvmexp.swtch++;
#else
cnt.v_swtch++;
#endif
/* Copy arg into a register. */
movd(arg, p);
@ -177,9 +188,15 @@ cpu_exit(arg)
load_ptb(proc0paddr->u_pcb.pcb_ptb);
/* Free resources. */
#if defined(UVM)
uvmspace_free(p->p_vmspace);
(void) splhigh();
uvm_km_free(kernel_map, (vm_offset_t)p->p_addr, USPACE);
#else
vmspace_free(p->p_vmspace);
(void) splhigh();
kmem_free(kernel_map, (vm_offset_t)p->p_addr, USPACE);
#endif
/* Don't update pcb in cpu_switch. */
curproc = NULL;
@ -265,12 +282,46 @@ setredzone(pte, vaddr)
* Both addresses are assumed to reside in the Sysmap,
* and size must be a multiple of CLSIZE.
*/
#if defined(PMAP_NEW)
void
pagemove(from, to, size)
register caddr_t from, to;
size_t size;
{
int *fpte, *tpte;
register pt_entry_t *fpte, *tpte, ofpte, otpte;
if (size % CLBYTES)
panic("pagemove");
fpte = kvtopte(from);
tpte = kvtopte(to);
while (size > 0) {
otpte = *tpte;
ofpte = *fpte;
*tpte++ = *fpte;
*fpte++ = 0;
if (otpte & PG_V)
pmap_update_pg((vm_offset_t) to);
if (ofpte & PG_V)
pmap_update_pg((vm_offset_t) from);
from += NBPG;
to += NBPG;
size -= NBPG;
}
}
#else /* PMAP_NEW */
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap,
* and size must be a multiple of CLSIZE.
*/
void
pagemove(from, to, size)
register caddr_t from, to;
size_t size;
{
register pt_entry_t *fpte, *tpte;
if (size % CLBYTES)
panic("pagemove");
@ -278,13 +329,14 @@ pagemove(from, to, size)
tpte = kvtopte(to);
while (size > 0) {
*tpte++ = *fpte;
*(int *)fpte++ = 0;
*fpte++ = 0;
from += NBPG;
to += NBPG;
size -= NBPG;
}
pmap_update();
}
#endif /* PMAP_NEW */
/*
* Convert kernel VA to physical address
@ -321,6 +373,51 @@ extern vm_map_t phys_map;
* All requests are (re)mapped into kernel VA space via the useriomap
* (a name with only slightly more meaning than "kernelmap")
*/
#if defined(PMAP_NEW)
void
vmapbuf(bp, len)
struct buf *bp;
vm_size_t len;
{
vm_offset_t faddr, taddr, off, fpa;
pt_entry_t *tpte;
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
faddr = trunc_page(bp->b_saveaddr = bp->b_data);
off = (vm_offset_t)bp->b_data - faddr;
len = round_page(off + len);
#if defined(UVM)
taddr= uvm_km_valloc_wait(phys_map, len);
#else
taddr = kmem_alloc_wait(phys_map, len);
#endif
bp->b_data = (caddr_t)(taddr + off);
/*
* The region is locked, so we expect that pmap_pte() will return
* non-NULL.
* XXX: unwise to expect this in a multithreaded environment.
* anything can happen to a pmap between the time we lock a
* region, release the pmap lock, and then relock it for
* the pmap_extract().
*
* no need to flush TLB since we expect nothing to be mapped
* where we we just allocated (TLB will be flushed when our
* mapping is removed).
*/
tpte = PTE_BASE + i386_btop(taddr);
while (len) {
fpa = pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
faddr);
*tpte = fpa | PG_RW | PG_V | pmap_pg_g;
tpte++;
faddr += PAGE_SIZE;
len -= PAGE_SIZE;
}
}
#else /* PMAP_NEW */
void
vmapbuf(bp, len)
struct buf *bp;
@ -328,13 +425,18 @@ vmapbuf(bp, len)
{
vm_offset_t faddr, taddr, off;
pt_entry_t *fpte, *tpte;
pt_entry_t *pmap_pte __P((pmap_t, vm_offset_t));
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
faddr = trunc_page(bp->b_saveaddr = bp->b_data);
off = (vm_offset_t)bp->b_data - faddr;
len = round_page(off + len);
#if defined(UVM)
taddr= uvm_km_valloc_wait(phys_map, len);
#else
taddr = kmem_alloc_wait(phys_map, len);
#endif
bp->b_data = (caddr_t)(taddr + off);
/*
* The region is locked, so we expect that pmap_pte() will return
@ -347,6 +449,7 @@ vmapbuf(bp, len)
len -= PAGE_SIZE;
} while (len);
}
#endif
/*
* Free the io map PTEs associated with this IO operation.
@ -364,7 +467,11 @@ vunmapbuf(bp, len)
addr = trunc_page(bp->b_data);
off = (vm_offset_t)bp->b_data - addr;
len = round_page(off + len);
#if defined(UVM)
uvm_km_free_wakeup(phys_map, addr, len);
#else
kmem_free_wakeup(phys_map, addr, len);
#endif
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = 0;
}