add the i386 MD portions for UVM.

This commit is contained in:
mrg 1998-02-06 07:21:42 +00:00
parent e134619946
commit 29e93b8bea
29 changed files with 873 additions and 127 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: BOAT_ANCHOR,v 1.75 1997/11/25 20:29:24 kleink Exp $
# $NetBSD: BOAT_ANCHOR,v 1.76 1998/02/06 07:21:42 mrg Exp $
#
# BOAT_ANCHOR -- kernel for the 386-20 the gang uses for testing
#
@ -24,6 +24,13 @@ options COMPAT_12
options COMPAT_13
options COMPAT_43 # and 4.3BSD
#options UVM # Use UVM instead of Mach VM.
# These are required for Mach VM
options VNODEPAGER # paging to vnodes
options SWAPPAGER # pageout
options DEVPAGER # paging to devices
file-system FFS # UFS and quotas
file-system NFS # Network File System client
file-system MSDOSFS # MS-DOS file system

View File

@ -1,4 +1,4 @@
# $NetBSD: DISKLESS,v 1.57 1998/01/15 02:26:34 cgd Exp $
# $NetBSD: DISKLESS,v 1.58 1998/02/06 07:21:43 mrg Exp $
#
# DISKLESS -- Generic machine setup for diskless boot.
# This kernel can be loaded from a bootable floppy (i.e. kernel-copy)
@ -40,6 +40,13 @@ options DIAGNOSTIC # internal consistency checks
options KTRACE # system call tracing via ktrace(1)
#options KMEMSTATS # kernel memory statistics (vmstat -m)
#options UVM # Use UVM instead of Mach VM.
# These are required for Mach VM
options VNODEPAGER # paging to vnodes
options SWAPPAGER # pageout
options DEVPAGER # paging to devices
options SYSVMSG # System V-like message queues
options SYSVSEM # System V-like semaphores
options SYSVSHM # System V-like memory sharing

View File

@ -1,4 +1,4 @@
# $NetBSD: GENERIC,v 1.133 1998/01/29 01:53:03 tron Exp $
# $NetBSD: GENERIC,v 1.134 1998/02/06 07:21:44 mrg Exp $
#
# GENERIC -- everything that's currently supported
#
@ -45,6 +45,13 @@ options RTC_OFFSET=0 # hardware clock is this many mins. west of GMT
options KTRACE # system call tracing via ktrace(1)
#options UVM # Use UVM instead of Mach VM.
# These are required for Mach VM
options VNODEPAGER # paging to vnodes
options SWAPPAGER # pageout
options DEVPAGER # paging to devices
options SYSVMSG # System V-like message queues
options SYSVSEM # System V-like semaphores
options SYSVSHM # System V-like memory sharing

View File

@ -1,4 +1,4 @@
# $NetBSD: HOMEWORLD,v 1.26 1997/11/27 09:58:38 fvdl Exp $
# $NetBSD: HOMEWORLD,v 1.27 1998/02/06 07:21:45 mrg Exp $
#
# HOMEWORLD - Mail, CVS, and GNATS server
#
@ -35,6 +35,13 @@ options RTC_OFFSET=0 # hardware clock is this many mins. west of GMT
#options DIAGNOSTIC # internal consistency checks
options KTRACE # system call tracing, a la ktrace(1)
#options UVM # Use UVM instead of Mach VM.
# These are required for Mach VM
options VNODEPAGER # paging to vnodes
options SWAPPAGER # pageout
options DEVPAGER # paging to devices
#options SYSVMSG # System V-like message queues
#options SYSVSEM # System V-like semaphores
#options SYSVSHM # System V-like memory sharing

View File

@ -1,4 +1,4 @@
# $NetBSD: INSTALL,v 1.73 1998/01/15 02:26:37 cgd Exp $
# $NetBSD: INSTALL,v 1.74 1998/02/06 07:21:45 mrg Exp $
#
# INSTALL - Installation kernel.
#
@ -58,6 +58,13 @@ options RTC_OFFSET=0 # hardware clock is this many mins. west of GMT
#options KTRACE # system call tracing via ktrace(1)
#options UVM # Use UVM instead of Mach VM.
# These are required for Mach VM
options VNODEPAGER # paging to vnodes
options SWAPPAGER # pageout
options DEVPAGER # paging to devices
#options SYSVMSG # System V-like message queues
#options SYSVSEM # System V-like semaphores
#options SYSVSHM # System V-like memory sharing

View File

@ -1,4 +1,4 @@
# $NetBSD: INSTALL_SMALL,v 1.11 1998/01/15 02:26:38 cgd Exp $
# $NetBSD: INSTALL_SMALL,v 1.12 1998/02/06 07:21:46 mrg Exp $
#
# INSTALL_SMALL - Small Installation kernel.
#
@ -59,6 +59,13 @@ options RTC_OFFSET=0 # hardware clock is this many mins. west of GMT
#options KTRACE # system call tracing via ktrace(1)
#options UVM # Use UVM instead of Mach VM.
# These are required for Mach VM
options VNODEPAGER # paging to vnodes
options SWAPPAGER # pageout
options DEVPAGER # paging to devices
#options SYSVMSG # System V-like message queues
#options SYSVSEM # System V-like semaphores
#options SYSVSHM # System V-like memory sharing

View File

@ -1,4 +1,4 @@
# $NetBSD: KICKME,v 1.47 1997/11/27 09:58:51 fvdl Exp $
# $NetBSD: KICKME,v 1.48 1998/02/06 07:21:47 mrg Exp $
#
# KICKME -- 486Cx-33 development machine
#
@ -20,6 +20,13 @@ options INSECURE # insecure; allow /dev/mem writing for X
options DIAGNOSTIC # internal consistency checks
options KTRACE # system call tracing, a la ktrace(1)
#options UVM # Use UVM instead of Mach VM.
# These are required for Mach VM
options VNODEPAGER # paging to vnodes
options SWAPPAGER # pageout
options DEVPAGER # paging to devices
options SYSVMSG # System V-like message queues
options SYSVSEM # System V-like semaphores
options SYSVSHM # System V-like memory sharing

View File

@ -1,4 +1,4 @@
# $NetBSD: Makefile.i386,v 1.88 1997/11/12 23:12:09 thorpej Exp $
# $NetBSD: Makefile.i386,v 1.89 1998/02/06 07:21:48 mrg Exp $
# Makefile for NetBSD
#
@ -99,7 +99,7 @@ SYSTEM_LD_TAIL+=; \
echo cp $@ $@.gdb; rm -f $@.gdb; cp $@ $@.gdb; \
echo ${STRIP} ${STRIPFLAGS} $@; ${STRIP} ${STRIPFLAGS} $@
.else
LINKFLAGS+= -S
LINKFLAGS+= -X
.endif
%LOAD

View File

@ -1,4 +1,4 @@
# $NetBSD: SUN_LAMP,v 1.95 1997/11/27 09:58:57 fvdl Exp $
# $NetBSD: SUN_LAMP,v 1.96 1998/02/06 07:21:48 mrg Exp $
#
# SUN_LAMP -- kernel for one of cgd's 486/50 EISA boxes...
#
@ -31,6 +31,13 @@ options RTC_OFFSET=480 # hardware clock is this many mins. west of GMT
options DIAGNOSTIC # internal consistency checks
options KTRACE # system call tracing, a la ktrace(1)
#options UVM # Use UVM instead of Mach VM.
# These are required for Mach VM
options VNODEPAGER # paging to vnodes
options SWAPPAGER # pageout
options DEVPAGER # paging to devices
#options SYSVMSG # System V-like message queues
#options SYSVSEM # System V-like semaphores
#options SYSVSHM # System V-like memory sharing

View File

@ -1,4 +1,4 @@
# $NetBSD: WARPED,v 1.30 1997/11/27 09:59:02 fvdl Exp $
# $NetBSD: WARPED,v 1.31 1998/02/06 07:21:49 mrg Exp $
#
# WARPED - ftp/sup.netbsd.org server machine
#
@ -39,6 +39,13 @@ options RTC_OFFSET=480 # hardware clock is this many mins. west of GMT
#options DIAGNOSTIC # internal consistency checks
options KTRACE # system call tracing, a la ktrace(1)
#options UVM # Use UVM instead of Mach VM.
# These are required for Mach VM
options VNODEPAGER # paging to vnodes
options SWAPPAGER # pageout
options DEVPAGER # paging to devices
#options SYSVMSG # System V-like message queues
#options SYSVSEM # System V-like semaphores
#options SYSVSHM # System V-like memory sharing

View File

@ -1,4 +1,4 @@
# $NetBSD: files.i386,v 1.105 1998/01/22 01:16:23 thorpej Exp $
# $NetBSD: files.i386,v 1.106 1998/02/06 07:21:50 mrg Exp $
#
# new style config file for i386 architecture
#
@ -41,7 +41,8 @@ file arch/i386/i386/math_emulate.c math_emulate
file arch/i386/i386/mem.c
file arch/i386/i386/microtime.s
file arch/i386/i386/ns_cksum.c ns
file arch/i386/i386/pmap.c
file arch/i386/i386/pmap.c !pmap_new
file arch/i386/i386/pmap.new.c pmap_new
file arch/i386/i386/process_machdep.c
file arch/i386/i386/random.s
file arch/i386/i386/sys_machdep.c

View File

@ -1,12 +1,8 @@
# $NetBSD: std.i386,v 1.8 1997/11/17 01:57:55 lukem Exp $
# $NetBSD: std.i386,v 1.9 1998/02/06 07:21:51 mrg Exp $
#
# standard, required NetBSD/i386 'options'
machine i386
options SWAPPAGER # paging
options VNODEPAGER # mmap() of files
options DEVPAGER # mmap() of devices
options EXEC_AOUT # exec a.out binaries
options EXEC_SCRIPT # exec #! scripts

View File

@ -1,4 +1,4 @@
/* $NetBSD: db_memrw.c,v 1.1 1997/07/05 20:46:38 thorpej Exp $ */
/* $NetBSD: db_memrw.c,v 1.2 1998/02/06 07:21:52 mrg Exp $ */
/*
* Mach Operating System
@ -59,7 +59,9 @@ db_read_bytes(addr, size, data)
*data++ = *src++;
}
#if !defined(PMAP_NEW)
pt_entry_t *pmap_pte __P((pmap_t, vm_offset_t));
#endif
/*
* Write bytes to kernel address space for debugger.
@ -81,14 +83,22 @@ db_write_bytes(addr, size, data)
if (addr >= VM_MIN_KERNEL_ADDRESS &&
addr < (vm_offset_t)&etext) {
#if defined(PMAP_NEW)
ptep0 = PTE_BASE + i386_btop(addr);
#else
ptep0 = pmap_pte(pmap_kernel(), addr);
#endif
oldmap0 = *ptep0;
*(int *)ptep0 |= /* INTEL_PTE_WRITE */ PG_RW;
addr1 = i386_trunc_page(addr + size - 1);
if (i386_trunc_page(addr) != addr1) {
/* data crosses a page boundary */
#if defined(PMAP_NEW)
ptep1 = PTE_BASE + i386_btop(addr1);
#else
ptep1 = pmap_pte(pmap_kernel(), addr1);
#endif
oldmap1 = *ptep1;
*(int *)ptep1 |= /* INTEL_PTE_WRITE */ PG_RW;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: gdt.c,v 1.11 1998/01/23 00:44:02 mycroft Exp $ */
/* $NetBSD: gdt.c,v 1.12 1998/02/06 07:21:52 mrg Exp $ */
/*-
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
@ -44,6 +44,10 @@
#include <vm/vm.h>
#include <vm/vm_kern.h>
#if defined(UVM)
#include <uvm/uvm_extern.h>
#endif
#include <machine/gdt.h>
#define MINGDTSIZ 512
@ -169,9 +173,15 @@ gdt_init()
gdt_free = GNULL_SEL;
old_gdt = gdt;
#if defined(UVM)
gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
uvm_map_pageable(kernel_map, (vm_offset_t)gdt,
(vm_offset_t)gdt + min_len, FALSE);
#else
gdt = (union descriptor *)kmem_alloc_pageable(kernel_map, max_len);
vm_map_pageable(kernel_map, (vm_offset_t)gdt,
(vm_offset_t)gdt + min_len, FALSE);
#endif
bcopy(old_gdt, gdt, NGDT * sizeof(gdt[0]));
setregion(&region, gdt, max_len - 1);
@ -187,8 +197,13 @@ gdt_grow()
gdt_size <<= 1;
new_len = old_len << 1;
#if defined(UVM)
uvm_map_pageable(kernel_map, (vm_offset_t)gdt + old_len,
(vm_offset_t)gdt + new_len, FALSE);
#else
vm_map_pageable(kernel_map, (vm_offset_t)gdt + old_len,
(vm_offset_t)gdt + new_len, FALSE);
#endif
}
void
@ -200,8 +215,13 @@ gdt_shrink()
gdt_size >>= 1;
new_len = old_len >> 1;
#if defined(UVM)
uvm_map_pageable(kernel_map, (vm_offset_t)gdt + new_len,
(vm_offset_t)gdt + old_len, TRUE);
#else
vm_map_pageable(kernel_map, (vm_offset_t)gdt + new_len,
(vm_offset_t)gdt + old_len, TRUE);
#endif
}
/*

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.5 1997/11/13 03:25:27 mycroft Exp $
# $NetBSD: genassym.cf,v 1.6 1998/02/06 07:21:53 mrg Exp $
#
# Copyright (c) 1995, 1997 Charles M. Hannum. All rights reserved.
@ -48,6 +48,10 @@ include <sys/mbuf.h>
include <vm/vm.h>
ifdef UVM
include <uvm/uvm_extern.h>
endif
include <machine/trap.h>
include <machine/pmap.h>
include <machine/vmparam.h>
@ -77,12 +81,20 @@ endif
define SRUN SRUN
if PMAP_NEW
define PDSLOT_PTE PDSLOT_PTE
define PDSLOT_APTE PDSLOT_APTE
define PDSLOT_KERN PDSLOT_KERN
define NKPTP_MIN NKPTP_MIN
define NKPTP_MAX NKPTP_MAX
else
define PTDPTDI PTDPTDI
define KPTDI KPTDI
define NKPDE_BASE NKPDE_BASE
define NKPDE_MAX NKPDE_MAX
define NKPDE_SCALE NKPDE_SCALE
define APTDPTDI APTDPTDI
endif
define VM_MAXUSER_ADDRESS (int)VM_MAXUSER_ADDRESS
@ -101,8 +113,13 @@ define M_DATA offsetof(struct mbuf, m_data)
define M_LEN offsetof(struct mbuf, m_len)
define M_NEXT offsetof(struct mbuf, m_next)
ifdef UVM
define V_TRAP offsetof(struct uvmexp, traps)
define V_INTR offsetof(struct uvmexp, intrs)
else
define V_TRAP offsetof(struct vmmeter, v_trap)
define V_INTR offsetof(struct vmmeter, v_intr)
endif
define PCB_CR3 offsetof(struct pcb, pcb_cr3)
define PCB_EBP offsetof(struct pcb, pcb_ebp)

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.185 1998/01/24 15:50:42 mycroft Exp $ */
/* $NetBSD: locore.s,v 1.186 1998/02/06 07:21:54 mrg Exp $ */
/*-
* Copyright (c) 1993, 1994, 1995, 1997
@ -143,20 +143,31 @@
* PTmap is recursive pagemap at top of virtual address space.
* Within PTmap, the page directory can be found (third indirection).
*/
.globl _PTmap,_PTD,_PTDpde,_Sysmap
#ifdef PMAP_NEW
.set _PTmap,(PDSLOT_PTE << PDSHIFT)
.set _PTD,(_PTmap + PDSLOT_PTE * NBPG)
.set _PTDpde,(_PTD + PDSLOT_PTE * 4) # XXX 4 == sizeof pde
#else
.globl _PTmap,_PTD,_PTDpde
.set _PTmap,(PTDPTDI << PDSHIFT)
.set _PTD,(_PTmap + PTDPTDI * NBPG)
.set _PTDpde,(_PTD + PTDPTDI * 4) # XXX 4 == sizeof pde
.set _Sysmap,(_PTmap + KPTDI * NBPG)
#endif
/*
* APTmap, APTD is the alternate recursive pagemap.
* It's used when modifying another process's page tables.
*/
#ifdef PMAP_NEW
.set _APTmap,(PDSLOT_APTE << PDSHIFT)
.set _APTD,(_APTmap + PDSLOT_APTE * NBPG)
.set _APTDpde,(_PTD + PDSLOT_APTE * 4) # XXX 4 == sizeof pde
#else
.globl _APTmap,_APTD,_APTDpde
.set _APTmap,(APTDPTDI << PDSHIFT)
.set _APTD,(_APTmap + APTDPTDI * NBPG)
.set _APTDpde,(_PTD + APTDPTDI * 4) # XXX 4 == sizeof pde
#endif
/*
@ -455,7 +466,7 @@ try586: /* Use the `cpuid' instruction. */
/*
* Virtual address space of kernel:
*
* text | data | bss | [syms] | page dir | proc0 kstack | Sysmap
* text | data | bss | [syms] | page dir | proc0 kstack
* 0 1 2 3
*/
#define PROC0PDIR ((0) * NBPG)
@ -489,6 +500,17 @@ try586: /* Use the `cpuid' instruction. */
* Calculate the size of the kernel page table directory, and
* how many entries it will have.
*/
#if defined(PMAP_NEW)
movl RELOC(_nkpde),%ecx # get nkpde
cmpl $NKPTP_MIN,%ecx # larger than min?
jge 1f
movl $NKPTP_MIN,%ecx # set at min
jmp 2f
1: cmpl $NKPTP_MAX,%ecx # larger than max?
jle 2f
movl $NKPTP_MAX,%ecx
2:
#else
movl RELOC(_nkpde),%ecx # get nkpde
testl %ecx,%ecx # if it's non-zero, use as-is
jnz 2f
@ -502,6 +524,7 @@ try586: /* Use the `cpuid' instruction. */
movl $NKPDE_MAX,%ecx
1: movl %ecx,RELOC(_nkpde)
2:
#endif
/* Clear memory for bootstrap tables. */
shll $PGSHIFT,%ecx
@ -573,7 +596,11 @@ try586: /* Use the `cpuid' instruction. */
movl %eax,(PROC0PDIR+0*4)(%esi) # which is where temp maps!
/* Map kernel PDEs. */
movl RELOC(_nkpde),%ecx # for this many pde s,
#if defined(PMAP_NEW)
leal (PROC0PDIR+PDSLOT_KERN*4)(%esi),%ebx # kernel pde offset
#else
leal (PROC0PDIR+KPTDI*4)(%esi),%ebx # offset of pde for kernel
#endif
fillkpt
#if NBIOSCALL > 0
@ -588,7 +615,11 @@ try586: /* Use the `cpuid' instruction. */
/* Install a PDE recursively mapping page directory as a page table! */
leal (PROC0PDIR+PG_V|PG_KW)(%esi),%eax # pte for ptd
#ifdef PMAP_NEW
movl %eax,(PROC0PDIR+PDSLOT_PTE*4)(%esi) # recursive PD slot
#else
movl %eax,(PROC0PDIR+PTDPTDI*4)(%esi) # which is where PTmap maps!
#endif
/* Save phys. addr of PTD, for libkvm. */
movl %esi,RELOC(_PTDpaddr)
@ -919,6 +950,67 @@ ENTRY(bcopy)
cld
ret
#if defined(UVM)
/*
* kcopy(caddr_t from, caddr_t to, size_t len);
* Copy len bytes, abort on fault.
*/
ENTRY(kcopy)
pushl %esi
pushl %edi
movl _curpcb,%eax # load curpcb into eax and set on-fault
movl $_copy_fault, PCB_ONFAULT(%eax)
movl 12(%esp),%esi
movl 16(%esp),%edi
movl 20(%esp),%ecx
movl %edi,%eax
subl %esi,%eax
cmpl %ecx,%eax # overlapping?
jb 1f
cld # nope, copy forward
shrl $2,%ecx # copy by 32-bit words
rep
movsl
movl 20(%esp),%ecx
andl $3,%ecx # any bytes left?
rep
movsb
xorl %eax,%eax
popl %edi
popl %esi
movl _curpcb,%edx
movl %eax,PCB_ONFAULT(%edx)
ret
ALIGN_TEXT
1: addl %ecx,%edi # copy backward
addl %ecx,%esi
std
andl $3,%ecx # any fractional bytes?
decl %edi
decl %esi
rep
movsb
movl 20(%esp),%ecx # copy remainder by 32-bit words
shrl $2,%ecx
subl $3,%esi
subl $3,%edi
rep
movsl
xorl %eax,%eax
popl %edi
popl %esi
movl _curpcb,%edx
movl %eax,PCB_ONFAULT(%edx)
cld
ret
#endif
/*****************************************************************************/
/*
@ -1601,7 +1693,11 @@ ENTRY(longjmp)
* actually to shrink the 0-127 range of priorities into the 32 available
* queues.
*/
#ifdef UVM
.globl _whichqs,_qs,_uvmexp,_panic
#else
.globl _whichqs,_qs,_cnt,_panic
#endif
/*
* setrunqueue(struct proc *p);
@ -1890,7 +1986,11 @@ switch_return:
* Switch to proc0's saved context and deallocate the address space and kernel
* stack for p. Then jump into cpu_switch(), as if we were in proc0 all along.
*/
#if defined(UVM)
.globl _proc0,_uvmspace_free,_kernel_map,_uvm_km_free,_tss_free
#else
.globl _proc0,_vmspace_free,_kernel_map,_kmem_free,_tss_free
#endif
ENTRY(switch_exit)
movl 4(%esp),%edi # old process
movl $_proc0,%ebx
@ -1939,11 +2039,19 @@ ENTRY(switch_exit)
pushl P_ADDR(%edi)
call _tss_free
pushl P_VMSPACE(%edi)
#if defined(UVM)
call _uvmspace_free
#else
call _vmspace_free
#endif
pushl $USPACE
pushl P_ADDR(%edi)
pushl _kernel_map
#if defined(UVM)
call _uvm_km_free
#else
call _kmem_free
#endif
addl $20,%esp
/* Jump into cpu_switch() with the right state. */
@ -2064,7 +2172,11 @@ IDTVEC(trap10)
INTRENTRY
pushl _cpl
pushl %esp
#if defined(UVM)
incl _uvmexp+V_TRAP
#else
incl _cnt+V_TRAP
#endif
call _npxintr
addl $8,%esp
INTRFASTEXIT

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.283 1998/02/06 05:35:16 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.284 1998/02/06 07:21:55 mrg Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -123,6 +123,10 @@
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#if defined(UVM)
#include <uvm/uvm_extern.h>
#endif
#include <sys/sysctl.h>
#define _I386_BUS_DMA_PRIVATE
@ -223,7 +227,13 @@ vm_offset_t idt_vaddr, idt_paddr;
vm_offset_t pentium_idt_vaddr;
#endif
#if defined(UVM)
vm_map_t exec_map = NULL;
vm_map_t mb_map = NULL;
vm_map_t phys_map = NULL;
#else
vm_map_t buffer_map;
#endif
extern int biosbasemem, biosextmem;
extern vm_offset_t avail_start, avail_end;
@ -348,6 +358,26 @@ cpu_startup()
extern u_char biostramp_image[];
#endif
/*
* Initialize error message buffer (et end of core).
*/
#if defined(UVM) && defined(PMAP_NEW)
msgbuf_vaddr = uvm_km_valloc(kernel_map, i386_round_page(MSGBUFSIZE));
if (msgbuf_vaddr == NULL)
panic("failed to valloc msgbuf_vaddr");
#endif
/* msgbuf_paddr was init'd in pmap */
#if defined(PMAP_NEW)
for (x = 0; x < btoc(MSGBUFSIZE); x++)
pmap_kenter_pa((vm_offset_t)msgbuf_vaddr + x * NBPG,
msgbuf_paddr + x * NBPG, VM_PROT_ALL);
#else
for (x = 0; x < btoc(MSGBUFSIZE); x++)
pmap_enter(pmap_kernel(), (vm_offset_t)msgbuf_vaddr + x * NBPG,
msgbuf_paddr + x * NBPG, VM_PROT_ALL, TRUE);
#endif
initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE));
printf(version);
identifycpu();
@ -358,22 +388,36 @@ cpu_startup()
* and then give everything true virtual addresses.
*/
sz = (int)allocsys((caddr_t)0);
#if defined(UVM)
if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
panic("startup: no room for tables");
#else
if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(sz))) == 0)
panic("startup: no room for tables");
#endif
if (allocsys(v) - v != sz)
panic("startup: table size inconsistency");
/*
* Now allocate buffers proper. They are different than the above
* in that they usually occupy more virtual memory than physical.
* Allocate virtual address space for the buffers. The area
* is not managed by the VM system.
*/
size = MAXBSIZE * nbuf;
#if defined(UVM)
if (uvm_map(kernel_map, (vm_offset_t *) &buffers, round_page(size),
NULL, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
panic("cpu_startup: cannot allocate VM for buffers");
minaddr = (vm_offset_t)buffers;
#else
buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
&maxaddr, size, TRUE);
minaddr = (vm_offset_t)buffers;
if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
&minaddr, size, FALSE) != KERN_SUCCESS)
panic("startup: cannot allocate buffers");
#endif
if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
/* don't want to alloc more physical mem than needed */
bufpages = btoc(MAXBSIZE) * nbuf;
@ -398,20 +442,35 @@ cpu_startup()
* Allocate a submap for exec arguments. This map effectively
* limits the number of processes exec'ing at any time.
*/
#if defined(UVM)
exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
16*NCARGS, TRUE, NULL);
#else
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
16*NCARGS, TRUE);
#endif
/*
* Allocate a submap for physio
*/
#if defined(UVM)
phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, TRUE, NULL);
#else
phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, TRUE);
#endif
/*
* Finally, allocate mbuf cluster submap.
*/
#if defined(UVM)
mb_map = uvm_km_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
VM_MBUF_SIZE, FALSE, NULL);
#else
mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
VM_MBUF_SIZE, FALSE);
#endif
/*
* Initialize callouts
@ -420,7 +479,16 @@ cpu_startup()
for (i = 1; i < ncallout; i++)
callout[i-1].c_next = &callout[i];
printf("avail mem = %ld\n", ptoa(cnt.v_free_count));
/*
* XXX Buffer cache pages haven't yet been allocated, so
* XXX we need to account for those pages when printing
* XXX the amount of free memory.
*/
#if defined(UVM)
printf("avail mem = %ld\n", ptoa(uvmexp.free - bufpages));
#else
printf("avail mem = %ld\n", ptoa(cnt.v_free_count - bufpages));
#endif
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * CLBYTES);
@ -434,11 +502,17 @@ cpu_startup()
panic("biostramp_image_size too big: %x vs. %x\n",
biostramp_image_size, NBPG);
#endif
#if defined(PMAP_NEW)
pmap_kenter_pa((vm_offset_t)BIOSTRAMP_BASE, /* virtual */
(vm_offset_t)BIOSTRAMP_BASE, /* physical */
VM_PROT_ALL); /* protection */
#else
pmap_enter(pmap_kernel(),
(vm_offset_t)BIOSTRAMP_BASE, /* virtual */
(vm_offset_t)BIOSTRAMP_BASE, /* physical */
VM_PROT_ALL, /* protection */
TRUE); /* wired down */
#endif
bcopy(biostramp_image, (caddr_t)BIOSTRAMP_BASE, biostramp_image_size);
#ifdef DEBUG
printf("biostramp installed @ %x\n", BIOSTRAMP_BASE);
@ -457,6 +531,35 @@ cpu_startup()
base = bufpages / nbuf;
residual = bufpages % nbuf;
for (i = 0; i < nbuf; i++) {
#if defined(UVM)
vm_size_t curbufsize;
vm_offset_t curbuf;
struct vm_page *pg;
/*
* Each buffer has MAXBSIZE bytes of VM space allocated. Of
* that MAXBSIZE space, we allocate and map (base+1) pages
* for the first "residual" buffers, and then we allocate
* "base" pages for the rest.
*/
curbuf = (vm_offset_t) buffers + (i * MAXBSIZE);
curbufsize = CLBYTES * ((i < residual) ? (base+1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL);
if (pg == NULL)
panic("cpu_startup: not enough memory for "
"buffer cache");
#if defined(PMAP_NEW)
pmap_kenter_pgs(curbuf, &pg, 1);
#else
pmap_enter(kernel_map->pmap, curbuf,
VM_PAGE_TO_PHYS(pg), VM_PROT_ALL, TRUE);
#endif
curbuf += PAGE_SIZE;
curbufsize -= PAGE_SIZE;
}
#else
vm_size_t curbufsize;
vm_offset_t curbuf;
@ -471,6 +574,7 @@ cpu_startup()
curbufsize = CLBYTES * (i < residual ? base+1 : base);
vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
vm_map_simplify(buffer_map, curbuf);
#endif
}
/*
@ -568,7 +672,9 @@ allocsys(v)
if (nswbuf > 256)
nswbuf = 256; /* sanity */
}
#if !defined(UVM)
valloc(swbuf, struct buf, nswbuf);
#endif
valloc(buf, struct buf, nbuf);
return v;
}
@ -1501,6 +1607,10 @@ init386(first_avail)
extern void consinit __P((void));
proc0.p_addr = proc0paddr;
#if defined(PMAP_NEW)
/* XXX: PMAP_NEW requires valid curpcb. also init'd in cpu_startup */
curpcb = &proc0.p_addr->u_pcb;
#endif
/*
@ -1666,14 +1776,6 @@ init386(first_avail)
ctob(physmem), 2*1024*1024);
cngetc();
}
/*
* Initialize error message buffer (at end of core).
*/
for (x = 0; x < btoc(MSGBUFSIZE); x++)
pmap_enter(pmap_kernel(), msgbuf_vaddr + x * NBPG,
msgbuf_paddr + x * NBPG, VM_PROT_ALL, TRUE);
initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE));
}
struct queue {
@ -2128,7 +2230,11 @@ i386_mem_add_mapping(bpa, size, cacheable, bshp)
panic("i386_mem_add_mapping: overflow");
#endif
#if defined(UVM)
va = uvm_km_valloc(kernel_map, endpa - pa);
#else
va = kmem_alloc_pageable(kernel_map, endpa - pa);
#endif
if (va == 0)
return (ENOMEM);
@ -2149,7 +2255,11 @@ i386_mem_add_mapping(bpa, size, cacheable, bshp)
*pte &= ~PG_N;
else
*pte |= PG_N;
#if defined(PMAP_NEW)
pmap_update_pg(va);
#else
pmap_update();
#endif
}
}
@ -2187,7 +2297,11 @@ i386_memio_unmap(t, bsh, size)
/*
* Free the kernel virtual mapping.
*/
#if defined(UVM)
uvm_km_free(kernel_map, va, endva - va);
#else
kmem_free(kernel_map, va, endva - va);
#endif
} else
panic("i386_memio_unmap: bad bus space tag");
@ -2474,7 +2588,11 @@ _bus_dmamem_free(t, segs, nsegs)
}
}
#if defined(UVM)
uvm_pglistfree(&mlist);
#else
vm_page_free_memory(&mlist);
#endif
}
/*
@ -2496,9 +2614,21 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
size = round_page(size);
#if defined(UVM)
/*
* VALLOC some unmapped VAs from kmem_map
*
* NOTE: all access to kmem_map/kmem_object must be at splimp
*/
s = splimp();
va = uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object, size,
UVM_KMF_VALLOC);
splx(s);
#else
s = splimp();
va = kmem_alloc_pageable(kmem_map, size);
splx(s);
#endif
if (va == 0)
return (ENOMEM);
@ -2511,17 +2641,11 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
addr += NBPG, va += NBPG, size -= NBPG) {
if (size == 0)
panic("_bus_dmamem_map: size botch");
#if defined(PMAP_NEW)
pmap_kenter_pa(va, addr, VM_PROT_READ | VM_PROT_WRITE);
#else
pmap_enter(pmap_kernel(), va, addr,
VM_PROT_READ | VM_PROT_WRITE, TRUE);
#if 0
/*
* This is not necessary on x86-family
* processors.
*/
if (flags & BUS_DMA_COHERENT)
pmap_changebit(addr, PG_N, ~0);
else
pmap_changebit(addr, 0, ~PG_N);
#endif
}
}
@ -2547,9 +2671,19 @@ _bus_dmamem_unmap(t, kva, size)
#endif
size = round_page(size);
#if defined(UVM)
/*
* NOTE: all accesses through kmem_map must be at splimp
*/
s = splimp();
uvm_km_free(kmem_map, (vm_offset_t)kva, size);
splx(s);
#else
s = splimp();
kmem_free(kmem_map, (vm_offset_t)kva, size);
splx(s);
#endif
}
/*
@ -2697,8 +2831,13 @@ _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
* Allocate pages from the VM system.
*/
TAILQ_INIT(&mlist);
#if defined(UVM)
error = uvm_pglistalloc(size, low, high, alignment, boundary,
&mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
#else
error = vm_page_alloc_memory(size, low, high,
alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
#endif
if (error)
return (error);

View File

@ -1,4 +1,4 @@
/* $NetBSD: mem.c,v 1.32 1997/03/24 21:16:59 mycroft Exp $ */
/* $NetBSD: mem.c,v 1.33 1998/02/06 07:21:57 mrg Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -56,6 +56,9 @@
#include <machine/conf.h>
#include <vm/vm.h>
#if defined(UVM)
#include <uvm/uvm_extern.h>
#endif
extern char *vmmap; /* poor name! */
caddr_t zeropage;
@ -134,6 +137,16 @@ mmrw(dev, uio, flags)
/* minor device 0 is physical memory */
case 0:
#if defined(PMAP_NEW)
v = uio->uio_offset;
pmap_kenter_pa((vm_offset_t)vmmap, trunc_page(v),
(uio->uio_rw == UIO_READ) ? VM_PROT_READ :
VM_PROT_ALL);
o = uio->uio_offset & PGOFSET;
c = min(uio->uio_resid, (int)(NBPG - o));
error = uiomove((caddr_t)vmmap + o, c, uio);
pmap_kremove((vm_offset_t)vmmap, NBPG);
#else /* PMAP_NEW */
v = uio->uio_offset;
pmap_enter(pmap_kernel(), (vm_offset_t)vmmap,
trunc_page(v), uio->uio_rw == UIO_READ ?
@ -143,15 +156,22 @@ mmrw(dev, uio, flags)
error = uiomove((caddr_t)vmmap + o, c, uio);
pmap_remove(pmap_kernel(), (vm_offset_t)vmmap,
(vm_offset_t)vmmap + NBPG);
#endif /* PMAP_NEW */
break;
/* minor device 1 is kernel memory */
case 1:
v = uio->uio_offset;
c = min(iov->iov_len, MAXPHYS);
#if defined(UVM)
if (!uvm_kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return (EFAULT);
#else
if (!kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return (EFAULT);
#endif
error = uiomove((caddr_t)v, c, uio);
break;
@ -206,8 +226,13 @@ mmmmap(dev, off, prot)
/* minor device 1 is kernel memory */
case 1:
/* XXX - writability, executability checks? */
#if defined(UVM)
if (!uvm_kernacc((caddr_t)off, NBPG, B_READ))
return -1;
#else
if (!kernacc((caddr_t)off, NBPG, B_READ))
return -1;
#endif
return i386_btop(vtophys(off));
default:

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.54 1998/01/23 00:44:08 mycroft Exp $ */
/* $NetBSD: pmap.c,v 1.55 1998/02/06 07:21:58 mrg Exp $ */
/*
* Copyright (c) 1993, 1994, 1995, 1997 Charles M. Hannum. All rights reserved.
@ -90,6 +90,10 @@
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#if defined(UVM)
#include <uvm/uvm.h>
#endif
#include <machine/cpu.h>
#include <dev/isa/isareg.h>
@ -233,7 +237,11 @@ pmap_bootstrap(virtual_start)
/*
* set the VM page size.
*/
#if defined(UVM)
uvm_setpagesize();
#else
vm_set_page_size();
#endif
virtual_avail = virtual_start;
virtual_end = VM_MAX_KERNEL_ADDRESS;
@ -307,11 +315,19 @@ pmap_bootstrap(virtual_start)
* with virtual_avail but before we call pmap_steal_memory.
* [i.e. here]
*/
#if defined(UVM)
if (avail_start < hole_start)
uvm_page_physload(atop(avail_start), atop(hole_start),
atop(avail_start), atop(hole_start));
uvm_page_physload(atop(hole_end), atop(avail_end),
atop(hole_end), atop(avail_end));
#else
if (avail_start < hole_start)
vm_page_physload(atop(avail_start), atop(hole_start),
atop(avail_start), atop(hole_start));
vm_page_physload(atop(hole_end), atop(avail_end),
atop(hole_end), atop(avail_end));
#endif
#endif
pmap_update();
@ -348,7 +364,13 @@ pmap_init()
npages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
s = (vm_size_t) (sizeof(struct pv_entry) * npages + npages);
s = round_page(s);
#if defined(UVM)
addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s);
if (addr == NULL)
panic("pmap_init");
#else
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
#endif
/* allocate pv_entry stuff first */
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
@ -394,7 +416,13 @@ pmap_init()
npages = pmap_page_index(avail_end - 1) + 1;
s = (vm_size_t) (sizeof(struct pv_entry) * npages + npages);
s = round_page(s);
#if defined(UVM)
addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s);
if (addr == NULL)
panic("pmap_init");
#else
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
#endif
pv_table = (struct pv_entry *) addr;
addr += sizeof(struct pv_entry) * npages;
pmap_attributes = (char *) addr;
@ -421,9 +449,14 @@ pmap_alloc_pv()
int i;
if (pv_nfree == 0) {
#if defined(UVM)
/* NOTE: can't lock kernel_map here */
MALLOC(pvp, struct pv_page *, NBPG, M_VMPVENT, M_WAITOK);
#else
pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG);
#endif
if (pvp == 0)
panic("pmap_alloc_pv: kmem_alloc() failed");
panic("pmap_alloc_pv: alloc failed");
pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
for (i = NPVPPG - 2; i; i--, pv++)
pv->pv_next = pv + 1;
@ -465,7 +498,11 @@ pmap_free_pv(pv)
case NPVPPG:
pv_nfree -= NPVPPG - 1;
TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
#if defined(UVM)
FREE((vm_offset_t) pvp, M_VMPVENT);
#else
kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
#endif
break;
}
}
@ -524,7 +561,11 @@ pmap_collect_pv()
for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
npvp = pvp->pvp_pgi.pgi_list.tqe_next;
#if defined(UVM)
FREE((vm_offset_t) pvp, M_VMPVENT);
#else
kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
#endif
}
}
#endif
@ -705,7 +746,11 @@ pmap_pinit(pmap)
* No need to allocate page table space yet but we do need a
* valid page directory table.
*/
#if defined(UVM)
pmap->pm_pdir = (pd_entry_t *) uvm_km_zalloc(kernel_map, NBPG);
#else
pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG);
#endif
/* wire in kernel global address entries */
bcopy(&PTD[KPTDI], &pmap->pm_pdir[KPTDI], nkpde * sizeof(pd_entry_t));
@ -767,7 +812,11 @@ pmap_release(pmap)
panic("pmap_release count");
#endif
#if defined(UVM)
uvm_km_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG);
#else
kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG);
#endif
}
/*
@ -1821,10 +1870,15 @@ pmap_changebit(pa, setbits, maskbits)
*/
if ((PG_RO && setbits == PG_RO) ||
(PG_RW && maskbits == ~PG_RW)) {
#if defined(UVM)
if (va >= uvm.pager_sva && va < uvm.pager_eva)
continue;
#else
extern vm_offset_t pager_sva, pager_eva;
if (va >= pager_sva && va < pager_eva)
continue;
#endif
}
pte = pmap_pte(pv->pv_pmap, va);

View File

@ -1,4 +1,4 @@
/* $NetBSD: sys_machdep.c,v 1.38 1998/01/23 00:44:09 mycroft Exp $ */
/* $NetBSD: sys_machdep.c,v 1.39 1998/02/06 07:21:59 mrg Exp $ */
/*-
* Copyright (c) 1995, 1997
@ -62,6 +62,9 @@
#include <vm/vm.h>
#include <vm/vm_kern.h>
#if defined(UVM)
#include <uvm/uvm_extern.h>
#endif
#include <machine/cpu.h>
#include <machine/cpufunc.h>
@ -156,8 +159,13 @@ i386_user_cleanup(pcb)
pcb->pcb_ldt_sel = GSEL(GLDT_SEL, SEL_KPL);
if (pcb == curpcb)
lldt(pcb->pcb_ldt_sel);
#if defined(UVM)
uvm_km_free(kernel_map, (vm_offset_t)pcb->pcb_ldt,
(pcb->pcb_ldt_len * sizeof(union descriptor)));
#else
kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ldt,
(pcb->pcb_ldt_len * sizeof(union descriptor)));
#endif
pcb->pcb_ldt = 0;
}
@ -247,7 +255,11 @@ i386_set_ldt(p, args, retval)
while ((ua.start + ua.num) > pcb->pcb_ldt_len)
pcb->pcb_ldt_len *= 2;
new_len = pcb->pcb_ldt_len * sizeof(union descriptor);
#if defined(UVM)
new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, new_len);
#else
new_ldt = (union descriptor *)kmem_alloc(kernel_map, new_len);
#endif
bcopy(old_ldt, new_ldt, old_len);
bzero((caddr_t)new_ldt + old_len, new_len - old_len);
pcb->pcb_ldt = new_ldt;
@ -260,8 +272,13 @@ i386_set_ldt(p, args, retval)
if (pcb == curpcb)
lldt(pcb->pcb_ldt_sel);
#if defined(UVM)
if (old_ldt != ldt)
uvm_km_free(kernel_map, (vm_offset_t)old_ldt, old_len);
#else
if (old_ldt != ldt)
kmem_free(kernel_map, (vm_offset_t)old_ldt, old_len);
#endif
#ifdef DEBUG
printf("i386_set_ldt(%d): new_ldt=%p\n", p->p_pid, new_ldt);
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.108 1998/01/21 23:29:09 thorpej Exp $ */
/* $NetBSD: trap.c,v 1.109 1998/02/06 07:22:00 mrg Exp $ */
/*-
* Copyright (c) 1995 Charles M. Hannum. All rights reserved.
@ -59,6 +59,9 @@
#include <sys/syscall.h>
#include <vm/vm.h>
#if defined(UVM)
#include <uvm/uvm_extern.h>
#endif
#include <machine/cpu.h>
#include <machine/cpufunc.h>
@ -189,7 +192,11 @@ trap(frame)
struct trapframe *vframe;
int resume;
#if defined(UVM)
uvmexp.traps++;
#else
cnt.v_trap++;
#endif
#ifdef DEBUG
if (trapdebug) {
@ -310,7 +317,11 @@ trap(frame)
goto out;
case T_ASTFLT|T_USER: /* Allow process switch */
#if defined(UVM)
uvmexp.softs++;
#else
cnt.v_soft++;
#endif
if (p->p_flag & P_OWEUPC) {
p->p_flag &= ~P_OWEUPC;
ADDUPROF(p);
@ -369,7 +380,7 @@ trap(frame)
int rv;
vm_prot_t ftype;
extern vm_map_t kernel_map;
unsigned nss, v;
unsigned nss;
va = trunc_page((vm_offset_t)rcr2());
/*
@ -407,20 +418,35 @@ trap(frame)
}
}
/*
* PMAP_NEW allocates PTPs at pmap_enter time, not here.
*/
#if !defined(PMAP_NEW)
/* Create a page table page if necessary, and wire it. */
if ((PTD[pdei(va)] & PG_V) == 0) {
unsigned v;
v = trunc_page(vtopte(va));
#if defined(UVM)
rv = uvm_map_pageable(map, v, v + NBPG, FALSE);
#else
rv = vm_map_pageable(map, v, v + NBPG, FALSE);
#endif
if (rv != KERN_SUCCESS)
goto nogo;
}
#endif /* PMAP_NEW */
/* Fault the original page in. */
#if defined(UVM)
rv = uvm_fault(map, va, 0, ftype);
#else
rv = vm_fault(map, va, ftype, FALSE);
#endif
if (rv == KERN_SUCCESS) {
if (nss > vm->vm_ssize)
vm->vm_ssize = nss;
#if !defined(PMAP_NEW)
/*
* If this is a pagefault for a PT page,
* wire it. Normally we fault them in
@ -430,8 +456,13 @@ trap(frame)
if (map != kernel_map && va >= UPT_MIN_ADDRESS &&
va < UPT_MAX_ADDRESS) {
va = trunc_page(va);
#if defined(UVM)
uvm_map_pageable(map, va, va + NBPG, FALSE);
#else
vm_map_pageable(map, va, va + NBPG, FALSE);
#endif
}
#endif
if (type == T_PAGEFLT)
return;
@ -442,11 +473,22 @@ trap(frame)
if (type == T_PAGEFLT) {
if (pcb->pcb_onfault != 0)
goto copyfault;
#if defined(UVM)
printf("uvm_fault(%p, 0x%lx, 0, %d) -> %x\n",
map, va, ftype, rv);
#else
printf("vm_fault(%p, %lx, %x, 0) -> %x\n",
map, va, ftype, rv);
#endif
goto we_re_toast;
}
trapsignal(p, SIGSEGV, T_PAGEFLT);
if (rv == KERN_RESOURCE_SHORTAGE) {
printf("UVM: process %d killed: out of swap space\n",
p->p_pid);
trapsignal(p, SIGKILL, T_PAGEFLT);
} else {
trapsignal(p, SIGSEGV, T_PAGEFLT);
}
break;
}
@ -472,6 +514,7 @@ trap(frame)
/* NMI can be hooked up to a pushbutton for debugging */
printf ("NMI ... going to debugger\n");
#ifdef KGDB
if (kgdb_trap(type, &frame))
return;
#endif
@ -519,9 +562,15 @@ trapwrite(addr)
return 1;
}
#if defined(UVM)
if (uvm_fault(&vm->vm_map, va, 0, VM_PROT_READ | VM_PROT_WRITE)
!= KERN_SUCCESS)
return 1;
#else
if (vm_fault(&vm->vm_map, va, VM_PROT_READ | VM_PROT_WRITE, FALSE)
!= KERN_SUCCESS)
return 1;
#endif
if (nss > vm->vm_ssize)
vm->vm_ssize = nss;
@ -547,7 +596,11 @@ syscall(frame)
register_t code, args[8], rval[2];
u_quad_t sticks;
#if defined(UVM)
uvmexp.syscalls++;
#else
cnt.v_syscall++;
#endif
if (!USERMODE(frame.tf_cs, frame.tf_eflags))
panic("syscall");
p = curproc;

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.66 1998/01/22 00:39:31 thorpej Exp $ */
/* $NetBSD: vm_machdep.c,v 1.67 1998/02/06 07:22:01 mrg Exp $ */
/*-
* Copyright (c) 1995 Charles M. Hannum. All rights reserved.
@ -61,6 +61,10 @@
#include <vm/vm.h>
#include <vm/vm_kern.h>
#if defined(UVM)
#include <uvm/uvm_extern.h>
#endif
#include <machine/cpu.h>
#include <machine/gdt.h>
#include <machine/reg.h>
@ -129,7 +133,11 @@ cpu_fork(p1, p2)
union descriptor *new_ldt;
len = pcb->pcb_ldt_len * sizeof(union descriptor);
#if defined(UVM)
new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, len);
#else
new_ldt = (union descriptor *)kmem_alloc(kernel_map, len);
#endif
bcopy(pcb->pcb_ldt, new_ldt, len);
pcb->pcb_ldt = new_ldt;
ldt_alloc(pcb, new_ldt, len);
@ -179,7 +187,7 @@ cpu_swapout(p)
*
* We clean up a little and then call switch_exit() with the old proc as an
* argument. switch_exit() first switches to proc0's context, then does the
* vmspace_free() and kmem_free() that we don't do here, and finally jumps
* vmspace_free() and uvm_km_free() that we don't do here, and finally jumps
* into switch() to wait for another process to wake up.
*/
void
@ -204,10 +212,16 @@ cpu_exit(p)
#endif
vm = p->p_vmspace;
#if !defined(UVM)
if (vm->vm_refcnt == 1)
vm_map_remove(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
#endif
#if defined(UVM)
uvmexp.swtch++;
#else
cnt.v_swtch++;
#endif
switch_exit(p);
}
@ -284,6 +298,47 @@ setredzone(pte, vaddr)
}
#endif
#if defined(PMAP_NEW)
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap,
* and size must be a multiple of CLSIZE.
*/
void
pagemove(from, to, size)
register caddr_t from, to;
size_t size;
{
register pt_entry_t *fpte, *tpte, ofpte, otpte;
if (size % CLBYTES)
panic("pagemove");
fpte = kvtopte(from);
tpte = kvtopte(to);
while (size > 0) {
otpte = *tpte;
ofpte = *fpte;
*tpte++ = *fpte;
*fpte++ = 0;
#if defined(I386_CPU)
if (cpu_class != CPUCLASS_386)
#endif
{
if (otpte & PG_V)
pmap_update_pg((vm_offset_t) to);
if (ofpte & PG_V)
pmap_update_pg((vm_offset_t) from);
}
from += NBPG;
to += NBPG;
size -= NBPG;
}
#if defined(I386_CPU)
if (cpu_class == CPUCLASS_386)
pmap_update();
#endif
}
#else /* PMAP_NEW */
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap,
@ -309,6 +364,7 @@ pagemove(from, to, size)
}
pmap_update();
}
#endif /* PMAP_NEW */
/*
* Convert kernel VA to physical address
@ -345,6 +401,53 @@ extern vm_map_t phys_map;
* All requests are (re)mapped into kernel VA space via the phys_map
* (a name with only slightly more meaning than "kernel_map")
*/
#if defined(PMAP_NEW)
void
vmapbuf(bp, len)
struct buf *bp;
vm_size_t len;
{
vm_offset_t faddr, taddr, off, fpa;
pt_entry_t *tpte;
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
faddr = trunc_page(bp->b_saveaddr = bp->b_data);
off = (vm_offset_t)bp->b_data - faddr;
len = round_page(off + len);
#if defined(UVM)
taddr= uvm_km_valloc_wait(phys_map, len);
#else
taddr = kmem_alloc_wait(phys_map, len);
#endif
bp->b_data = (caddr_t)(taddr + off);
/*
* The region is locked, so we expect that pmap_pte() will return
* non-NULL.
* XXX: unwise to expect this in a multithreaded environment.
* anything can happen to a pmap between the time we lock a
* region, release the pmap lock, and then relock it for
* the pmap_extract().
*
* no need to flush TLB since we expect nothing to be mapped
* where we we just allocated (TLB will be flushed when our
* mapping is removed).
*/
tpte = PTE_BASE + i386_btop(taddr);
while (len) {
fpa = pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
faddr);
*tpte = fpa | PG_RW | PG_V | pmap_pg_g;
tpte++;
faddr += PAGE_SIZE;
len -= PAGE_SIZE;
}
}
#else /* PMAP_NEW */
void
vmapbuf(bp, len)
struct buf *bp;
@ -359,7 +462,11 @@ vmapbuf(bp, len)
faddr = trunc_page(bp->b_saveaddr = bp->b_data);
off = (vm_offset_t)bp->b_data - faddr;
len = round_page(off + len);
#if defined(UVM)
taddr= uvm_km_valloc_wait(phys_map, len);
#else
taddr = kmem_alloc_wait(phys_map, len);
#endif
bp->b_data = (caddr_t)(taddr + off);
/*
* The region is locked, so we expect that pmap_pte() will return
@ -373,6 +480,8 @@ vmapbuf(bp, len)
} while (len);
}
#endif
/*
* Free the io map PTEs associated with this IO operation.
* We also invalidate the TLB entries and restore the original b_addr.
@ -389,7 +498,11 @@ vunmapbuf(bp, len)
addr = trunc_page(bp->b_data);
off = (vm_offset_t)bp->b_data - addr;
len = round_page(off + len);
#if defined(UVM)
uvm_km_free_wakeup(phys_map, addr, len);
#else
kmem_free_wakeup(phys_map, addr, len);
#endif
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = 0;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pcb.h,v 1.21 1996/01/08 13:51:42 mycroft Exp $ */
/* $NetBSD: pcb.h,v 1.22 1998/02/06 07:22:02 mrg Exp $ */
/*-
* Copyright (c) 1995 Charles M. Hannum. All rights reserved.
@ -79,6 +79,9 @@ struct pcb {
int vm86_flagmask; /* flag mask for vm86 mode */
void *vm86_userp; /* XXX performance hack */
u_long pcb_iomap[NIOPORTS/32]; /* I/O bitmap */
#if defined(PMAP_NEW)
struct pmap *pcb_pmap; /* back pointer to our pmap */
#endif
};
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.32 1998/01/13 12:52:28 mrg Exp $ */
/* $NetBSD: pmap.h,v 1.33 1998/02/06 07:22:02 mrg Exp $ */
/*
* Copyright (c) 1995 Charles M. Hannum. All rights reserved.
@ -50,6 +50,10 @@
* from hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
*/
#ifdef PMAP_NEW /* redirect */
#include <machine/pmap.new.h> /* defines _I386_PMAP_H_ */
#endif
#ifndef _I386_PMAP_H_
#define _I386_PMAP_H_
@ -61,6 +65,12 @@
* W.Jolitz, 8/89
*/
/*
* PG_AVAIL usage ...
*/
#define PG_W PG_AVAIL1 /* "wired" mapping */
/*
* One page directory, shared between
* kernel and user modes.

View File

@ -1,11 +1,7 @@
/* $NetBSD: pte.h,v 1.9 1996/02/01 22:30:59 mycroft Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
/*
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -16,89 +12,179 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
* must display the following acknowledgment:
* This product includes software developed by Charles D. Cranor and
* Washington University.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)pte.h 5.5 (Berkeley) 5/9/91
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* 386 page table entry and page table directory
* W.Jolitz, 8/89
*
* There are two major kinds of pte's: those which have ever existed (and are
* thus either now in core or on the swap device), and those which have
* never existed, but which will be filled on demand at first reference.
* There is a structure describing each. There is also an ancillary
* structure used in page clustering.
* pte.h rewritten by chuck based on the jolitz version, plus random
* info on the pentium and other processors found on the net. the
* goal of this rewrite is to provide enough documentation on the MMU
* hardware that the reader will be able to understand it without having
* to refer to a hardware manual.
*/
#ifndef _I386_PTE_H_
#define _I386_PTE_H_
#define PDSHIFT 22 /* LOG2(NBPDR) */
#define NBPD (1 << PDSHIFT) /* bytes/page dir */
#define PDOFSET (NBPD-1) /* byte offset into page dir */
#define NPTEPD (NBPD / NBPG)
/*
* i386 MMU hardware structure:
*
* the i386 MMU is a two-level MMU which maps 4GB of virtual memory.
* the pagesize is 4K (4096 [0x1000] bytes), although newer pentium
* processors can support a 4MB pagesize as well.
*
* the first level table (segment table?) is called a "page directory"
* and it contains 1024 page directory entries (PDEs). each PDE is
* 4 bytes (an int), so a PD fits in a single 4K page. this page is
* the page directory page (PDP). each PDE in a PDP maps 4MB of space
* (1024 * 4MB = 4GB). a PDE contains the physical address of the
* second level table: the page table. or, if 4MB pages are being used,
* then the PDE contains the PA of the 4MB page being mapped.
*
* a page table consists of 1024 page table entries (PTEs). each PTE is
* 4 bytes (an int), so a page table also fits in a single 4K page. a
* 4K page being used as a page table is called a page table page (PTP).
* each PTE in a PTP maps one 4K page (1024 * 4K = 4MB). a PTE contains
* the physical address of the page it maps and some flag bits (described
* below).
*
* the processor has a special register, "cr3", which points to the
* the PDP which is currently controlling the mappings of the virtual
* address space.
*
* the following picture shows the translation process for a 4K page:
*
* %cr3 register [PA of PDP]
* |
* |
* | bits <31-22> of VA bits <21-12> of VA bits <11-0>
* | index the PDP (0 - 1023) index the PTP are the page offset
* | | | |
* | v | |
* +--->+----------+ | |
* | PD Page | PA of v |
* | |---PTP-------->+------------+ |
* | 1024 PDE | | page table |--PTE--+ |
* | entries | | (aka PTP) | | |
* +----------+ | 1024 PTE | | |
* | entries | | |
* +------------+ | |
* | |
* bits <31-12> bits <11-0>
* p h y s i c a l a d d r
*
* the i386 caches PTEs in a TLB. it is important to flush out old
* TLB mappings when making a change to a mappings. writing to the
* %cr3 will flush the entire TLB. newer processors also have an
* instruction that will invalidate the mapping of a single page (which
* is useful if you are changing a single mappings because it preserves
* all the cached TLB entries).
*
* as shows, bits 31-12 of the PTE contain PA of the page being mapped.
* the rest of the PTE is defined as follows:
* bit# name use
* 11 n/a available for OS use, hardware ignores it
* 10 n/a available for OS use, hardware ignores it
* 9 n/a available for OS use, hardware ignores it
* 8 G global bit (see discussion below)
* 7 PS page size [for PDEs] (0=4k, 1=4M <if supported>)
* 6 D dirty (modified) page
* 5 A accessed (referenced) page
* 4 PCD cache disable
* 3 PWT prevent write through (cache)
* 2 U/S user/supervisor bit (0=supervisor only, 1=both u&s)
* 1 R/W read/write bit (0=read only, 1=read-write)
* 0 P present (valid)
*
* notes:
* - on the i386 the R/W bit is ignored if processor is in supervisor
* state (bug!)
* - PS is only supported on newer processors
* - PTEs with the G bit are global in the sense that they are not
* flushed from the TLB when %cr3 is written (to flush, use the
* "flush single page" instruction). this is only supported on
* newer processors. this bit can be used to keep the kernel's
* TLB entries around while context switching. since the kernel
* is mapped into all processes at the same place it does not make
* sense to flush these entries when switching from one process'
* pmap to another.
*/
#if defined(_KERNEL) && !defined(_LOCORE)
/*
* here we define the data types for PDEs and PTEs
*/
typedef u_int32_t pd_entry_t; /* PDE */
typedef u_int32_t pt_entry_t; /* PTE */
#ifndef _LOCORE
typedef int pd_entry_t; /* page directory entry */
typedef int pt_entry_t; /* Mach page table entry */
#endif
/*
* now we define various for playing with virtual addresses
*/
#define PDSHIFT 22 /* offset of PD index in VA */
#define NBPD (1 << PDSHIFT) /* # bytes mapped by PD (4MB) */
#define PDOFSET (NBPD-1) /* mask for non-PD part of VA */
#if 0 /* not used? */
#define NPTEPD (NBPD / NBPG) /* # of PTEs in a PD */
#else
#define PTES_PER_PTP (NBPD / NBPG) /* # of PTEs in a PTP */
#endif
#define PD_MASK 0xffc00000 /* page directory address bits */
#define PT_MASK 0x003ff000 /* page table address bits */
#define PG_V 0x00000001 /* present */
#define PG_RO 0x00000000 /* read-only by user (and kernel if 486) */
#define PG_RW 0x00000002 /* read-write by user */
#define PG_u 0x00000004 /* accessible by user */
/*
* here we define the bits of the PDE/PTE, as described above:
*
* XXXCDC: need to rename these (PG_u == ugly).
*/
#define PG_V 0x00000001 /* valid entry */
#define PG_RO 0x00000000 /* read-only page */
#define PG_RW 0x00000002 /* read-write page */
#define PG_u 0x00000004 /* user accessible page */
#define PG_PROT 0x00000006 /* all protection bits */
#define PG_N 0x00000018 /* non-cacheable */
#define PG_U 0x00000020 /* has been used */
#define PG_M 0x00000040 /* has been modified */
#define PG_W 0x00000200 /* page is wired */
#define PG_PS 0x00000080 /* 4MB page size */
#define PG_G 0x00000100 /* global, don't TLB flush */
#define PG_AVAIL1 0x00000200 /* ignored by hardware */
#define PG_AVAIL2 0x00000400 /* ignored by hardware */
#define PG_AVAIL3 0x00000800 /* ignored by hardware */
#define PG_FRAME 0xfffff000 /* page frame mask */
#define PG_FZERO 0
#define PG_FTEXT 1
#define PG_FMAX (PG_FTEXT)
/*
* various short-hand protection codes
*/
#define PG_NOACC 0
#define PG_KR 0x00000000
#define PG_KW 0x00000002
#define PG_URKR 0x00000004
#define PG_URKW 0x00000004
#define PG_UW 0x00000006
#define PG_KR 0x00000000 /* kernel read-only */
#define PG_KW 0x00000002 /* kernel read-write */
/*
* Page Protection Exception bits
* page protection exception bits
*/
#define PGEX_P 0x01 /* Protection violation vs. not present */
#define PGEX_W 0x02 /* during a Write cycle */
#define PGEX_U 0x04 /* access from User mode (UPL) */
#ifndef _LOCORE
#ifdef _KERNEL
/* utilities defined in pmap.c */
extern pt_entry_t *Sysmap;
#endif
#endif
#define PGEX_P 0x01 /* protection violation (vs. no mapping) */
#define PGEX_W 0x02 /* exception during a write cycle */
#define PGEX_U 0x04 /* exception while in user mode (upl) */
#endif /* _I386_PTE_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: vmparam.h,v 1.24 1998/01/15 22:20:15 thorpej Exp $ */
/* $NetBSD: vmparam.h,v 1.25 1998/02/06 07:22:03 mrg Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
@ -157,9 +157,16 @@
/*
* pmap specific data stored in the vm_physmem[] array
*/
#if defined(PMAP_NEW)
struct pmap_physseg {
struct pv_head *pvhead; /* pv_head array */
char *attrs; /* attrs array */
};
#else
struct pmap_physseg {
struct pv_entry *pvent; /* pv_entry array */
char *attrs; /* attrs array */
};
#endif
#endif /* _VMPARAM_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: fd.c,v 1.116 1998/01/15 06:11:55 thorpej Exp $ */
/* $NetBSD: fd.c,v 1.117 1998/02/06 07:22:05 mrg Exp $ */
/*-
* Copyright (c) 1993, 1994, 1995, 1996
@ -79,6 +79,11 @@
#include <sys/rnd.h>
#endif
#if defined(UVM)
#include <vm/vm.h>
#include <uvm/uvm_extern.h>
#endif
#include <dev/cons.h>
#include <machine/cpu.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: npx.c,v 1.63 1998/01/24 13:19:59 mycroft Exp $ */
/* $NetBSD: npx.c,v 1.64 1998/02/06 07:22:06 mrg Exp $ */
#if 0
#define IPRINTF(x) printf x
@ -53,6 +53,11 @@
#include <sys/device.h>
#include <sys/vmmeter.h>
#if defined(UVM)
#include <vm/vm.h>
#include <uvm/uvm_extern.h>
#endif
#include <machine/cpu.h>
#include <machine/intr.h>
#include <machine/pio.h>
@ -371,7 +376,11 @@ npxintr(arg)
struct intrframe *frame = arg;
int code;
#if defined(UVM)
uvmexp.traps++;
#else
cnt.v_trap++;
#endif
IPRINTF(("Intr"));
if (p == 0 || npx_type == NPX_NONE) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: vector.s,v 1.38 1997/11/16 12:31:00 mycroft Exp $ */
/* $NetBSD: vector.s,v 1.39 1998/02/06 07:22:07 mrg Exp $ */
/*
* Copyright (c) 1993, 1994, 1995, 1997 Charles M. Hannum. All rights reserved.
@ -145,6 +145,12 @@
*
* On exit, we jump to Xdoreti(), to process soft interrupts and ASTs.
*/
#if defined(UVM)
#define MY_COUNT _uvmexp
#else
#define MY_COUNT _cnt
#endif
#define INTR(irq_num, icu, ack) \
IDTVEC(resume/**/irq_num) ;\
cli ;\
@ -161,7 +167,7 @@ _Xintr/**/irq_num/**/: ;\
MAKE_FRAME ;\
MASK(irq_num, icu) /* mask it in hardware */ ;\
ack(irq_num) /* and allow other intrs */ ;\
incl _cnt+V_INTR /* statistical info */ ;\
incl MY_COUNT+V_INTR /* statistical info */ ;\
testb $IRQ_BIT(irq_num),_cpl + IRQ_BYTE(irq_num) ;\
jnz _Xhold/**/irq_num /* currently masked; hold it */ ;\
1: movl _cpl,%eax /* cpl to restore on exit */ ;\