initial import of the new virtual memory system, UVM, into -current.

UVM was written by chuck cranor <chuck@maria.wustl.edu>, with some
minor portions derived from the old Mach code.  i provided some help
getting swap and paging working, and other bug fixes/ideas.  chuck
silvers <chuq@chuq.com> also provided some other fixes.

this is the sparc portion.

this will be KNF'd shortly.  :-)
This commit is contained in:
mrg 1998-02-05 07:57:48 +00:00
parent 0392bc60a1
commit 5e91ce6ef1
15 changed files with 591 additions and 33 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: GENERIC,v 1.59 1998/02/04 21:49:12 pk Exp $
# $NetBSD: GENERIC,v 1.60 1998/02/05 07:57:48 mrg Exp $
include "arch/sparc/conf/std.sparc"
@ -34,6 +34,15 @@ options RASTERCONSOLE # fast rasterop console
config netbsd root on ? type ?
## Virtual memory configuration. There are two choices, the old Mach
## based VM system, or the new UVM system. Old VM requires the pager
## options.
options OLDVM # MACH VM
options SWAPPAGER # swap paging
options DEVPAGER # paging to devices
options VNODEPAGER # paging to vnodes
#options UVM # UVM VM
## System call tracing (see ktrace(1)).
options KTRACE

View File

@ -1,4 +1,4 @@
# $NetBSD: std.sparc,v 1.9 1997/05/07 01:55:24 lukem Exp $
# $NetBSD: std.sparc,v 1.10 1998/02/05 07:57:49 mrg Exp $
#
# Mandatory NetBSD/sparc kernel options.
@ -6,9 +6,5 @@
machine sparc # Machine architecture; required by config(8)
options SWAPPAGER # swap and anonymous memory
options VNODEPAGER # mapped files
options DEVPAGER # mapped devices
options EXEC_AOUT # execve(2) support for a.out binaries
options EXEC_SCRIPT # execve(2) support for scripts

View File

@ -1,4 +1,4 @@
/* $NetBSD: bt_subr.c,v 1.5 1996/03/14 19:44:32 christos Exp $ */
/* $NetBSD: bt_subr.c,v 1.6 1998/02/05 07:57:50 mrg Exp $ */
/*
* Copyright (c) 1993
@ -78,10 +78,17 @@ bt_getcmap(p, cm, cmsize)
count = p->count;
if (start >= cmsize || start + count > cmsize)
return (EINVAL);
#if defined(UVM)
if (!uvm_useracc(p->red, count, B_WRITE) ||
!uvm_useracc(p->green, count, B_WRITE) ||
!uvm_useracc(p->blue, count, B_WRITE))
return (EFAULT);
#else
if (!useracc(p->red, count, B_WRITE) ||
!useracc(p->green, count, B_WRITE) ||
!useracc(p->blue, count, B_WRITE))
return (EFAULT);
#endif
for (cp = &cm->cm_map[start][0], i = 0; i < count; cp += 3, i++) {
p->red[i] = cp[0];
p->green[i] = cp[1];
@ -106,10 +113,17 @@ bt_putcmap(p, cm, cmsize)
count = p->count;
if (start >= cmsize || start + count > cmsize)
return (EINVAL);
#if defined(UVM)
if (!uvm_useracc(p->red, count, B_READ) ||
!uvm_useracc(p->green, count, B_READ) ||
!uvm_useracc(p->blue, count, B_READ))
return (EFAULT);
#else
if (!useracc(p->red, count, B_READ) ||
!useracc(p->green, count, B_READ) ||
!useracc(p->blue, count, B_READ))
return (EFAULT);
#endif
for (cp = &cm->cm_map[start][0], i = 0; i < count; cp += 3, i++) {
cp[0] = p->red[i];
cp[1] = p->green[i];

View File

@ -1,4 +1,4 @@
/* $NetBSD: cgfourteen.c,v 1.8 1998/01/12 20:23:43 thorpej Exp $ */
/* $NetBSD: cgfourteen.c,v 1.9 1998/02/05 07:57:51 mrg Exp $ */
/*
* Copyright (c) 1996
@ -480,9 +480,15 @@ cgfourteenioctl(dev, cmd, data, flags, p)
if ((u_int)p->size.x > 32 || (u_int)p->size.y > 32)
return (EINVAL);
count = p->size.y * 32 / NBBY;
#if defined(UVM)
if (!uvm_useracc(p->image, count, B_READ) ||
!uvm_useracc(p->mask, count, B_READ))
return (EFAULT);
#else
if (!useracc(p->image, count, B_READ) ||
!useracc(p->mask, count, B_READ))
return (EFAULT);
#endif
}
/* parameters are OK; do it */
@ -765,10 +771,17 @@ cg14_get_cmap(p, cm, cmsize)
}
#endif
#if defined(UVM)
if (!uvm_useracc(p->red, count, B_WRITE) ||
!uvm_useracc(p->green, count, B_WRITE) ||
!uvm_useracc(p->blue, count, B_WRITE))
return (EFAULT);
#else
if (!useracc(p->red, count, B_WRITE) ||
!useracc(p->green, count, B_WRITE) ||
!useracc(p->blue, count, B_WRITE))
return (EFAULT);
#endif
for (cp = &cm->cm_map[start][0], i = 0; i < count; cp += 4, i++) {
p->red[i] = cp[3];
p->green[i] = cp[2];
@ -800,10 +813,17 @@ cg14_put_cmap(p, cm, cmsize)
}
#endif
#if defined(UVM)
if (!uvm_useracc(p->red, count, B_READ) ||
!uvm_useracc(p->green, count, B_READ) ||
!uvm_useracc(p->blue, count, B_READ))
return (EFAULT);
#else
if (!useracc(p->red, count, B_READ) ||
!useracc(p->green, count, B_READ) ||
!useracc(p->blue, count, B_READ))
return (EFAULT);
#endif
for (cp = &cm->cm_map[start][0], i = 0; i < count; cp += 4, i++) {
cp[3] = p->red[i];
cp[2] = p->green[i];

View File

@ -1,4 +1,4 @@
/* $NetBSD: cgsix.c,v 1.35 1998/01/25 16:49:33 pk Exp $ */
/* $NetBSD: cgsix.c,v 1.36 1998/02/05 07:57:52 mrg Exp $ */
/*
* Copyright (c) 1993
@ -485,9 +485,15 @@ cgsixioctl(dev, cmd, data, flags, p)
if ((u_int)p->size.x > 32 || (u_int)p->size.y > 32)
return (EINVAL);
count = p->size.y * 32 / NBBY;
#if defined(UVM)
if (!uvm_useracc(p->image, count, B_READ) ||
!uvm_useracc(p->mask, count, B_READ))
return (EFAULT);
#else
if (!useracc(p->image, count, B_READ) ||
!useracc(p->mask, count, B_READ))
return (EFAULT);
#endif
}
/* parameters are OK; do it */

View File

@ -1,4 +1,4 @@
/* $NetBSD: fd.c,v 1.55 1998/01/12 20:23:49 thorpej Exp $ */
/* $NetBSD: fd.c,v 1.56 1998/02/05 07:57:53 mrg Exp $ */
/*-
* Copyright (c) 1993, 1994, 1995 Charles Hannum.
@ -61,6 +61,11 @@
#include <dev/cons.h>
#if defined(UVM)
#include <vm/vm.h>
#include <uvm/uvm_extern.h>
#endif
#include <machine/cpu.h>
#include <machine/autoconf.h>
#include <machine/conf.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: amd7930intr.s,v 1.10 1997/03/11 01:03:07 pk Exp $ */
/* $NetBSD: amd7930intr.s,v 1.11 1998/02/05 07:57:54 mrg Exp $ */
/*
* Copyright (c) 1995 Rolf Grossmann.
* Copyright (c) 1992, 1993
@ -99,10 +99,17 @@ _amd7930_trap:
st %l2, [%l7 + %lo(savepc)]
! tally interrupt
#if defined(UVM)
sethi %hi(_uvmexp+V_INTR), %l7
ld [%l7 + %lo(_uvmexp+V_INTR)], %l6
inc %l6
st %l6, [%l7 + %lo(_uvmexp+V_INTR)]
#else
sethi %hi(_cnt+V_INTR), %l7
ld [%l7 + %lo(_cnt+V_INTR)], %l6
inc %l6
st %l6, [%l7 + %lo(_cnt+V_INTR)]
#endif
sethi %hi(_auiop), %l7
ld [%l7 + %lo(_auiop)], %l7

View File

@ -1,4 +1,4 @@
/* $NetBSD: bsd_fdintr.s,v 1.11 1997/04/07 21:00:36 pk Exp $ */
/* $NetBSD: bsd_fdintr.s,v 1.12 1998/02/05 07:57:55 mrg Exp $ */
/*
* Copyright (c) 1995 Paul Kranenburg
@ -167,10 +167,17 @@ _fdchwintr:
st %l2, [%l7 + 8]
! tally interrupt
#if defined(UVM)
sethi %hi(_uvmexp+V_INTR), %l7
ld [%l7 + %lo(_uvmexp+V_INTR)], %l6
inc %l6
st %l6, [%l7 + %lo(_uvmexp+V_INTR)]
#else
sethi %hi(_cnt+V_INTR), %l7
ld [%l7 + %lo(_cnt+V_INTR)], %l6
inc %l6
st %l6, [%l7 + %lo(_cnt+V_INTR)]
#endif
! load fdc, if it's NULL there's nothing to do: schedule soft interrupt
sethi %hi(_fdciop), %l7

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.3 1997/10/16 10:40:48 mycroft Exp $
# $NetBSD: genassym.cf,v 1.4 1998/02/05 07:57:55 mrg Exp $
#
# Copyright (c) 1997 Christos Zoulas. All rights reserved.
@ -59,6 +59,9 @@ include <sys/disklabel.h>
include <sys/disk.h>
include <vm/vm.h>
ifdef UVM
include <uvm/uvm_extern.h>
endif
include <machine/pmap.h>
include <machine/cpu.h>
@ -98,9 +101,15 @@ define PMAP_CTX offsetof(struct pmap, pm_ctx)
define PMAP_CTXNUM offsetof(struct pmap, pm_ctxnum)
# interrupt/fault metering
ifdef UVM
define V_SWTCH offsetof(struct uvmexp, swtch)
define V_INTR offsetof(struct uvmexp, intrs)
define V_FAULTS offsetof(struct uvmexp, faults)
else
define V_SWTCH offsetof(struct vmmeter, v_swtch)
define V_INTR offsetof(struct vmmeter, v_intr)
define V_FAULTS offsetof(struct vmmeter, v_faults)
endif
# CPU info structure
define CPUINFO_FAULTSTATUS offsetof(struct cpu_softc, get_faultstatus)

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.78 1998/01/13 20:51:01 pk Exp $ */
/* $NetBSD: locore.s,v 1.79 1998/02/05 07:57:57 mrg Exp $ */
/*
* Copyright (c) 1996 Paul Kranenburg
@ -1700,7 +1700,11 @@ ctw_invalid:
#if defined(SUN4)
memfault_sun4:
TRAP_SETUP(-CCFSZ-80)
#if defined(UVM)
INCR(_uvmexp+V_FAULTS) ! cnt.v_faults++ (clobbers %o0,%o1)
#else
INCR(_cnt+V_FAULTS) ! cnt.v_faults++ (clobbers %o0,%o1)
#endif
st %g1, [%sp + CCFSZ + 20] ! save g1
rd %y, %l4 ! save y
@ -1765,7 +1769,11 @@ memfault_sun4:
memfault_sun4c:
#if defined(SUN4C)
TRAP_SETUP(-CCFSZ-80)
#if defined(UVM)
INCR(_uvmexp+V_FAULTS) ! cnt.v_faults++ (clobbers %o0,%o1)
#else
INCR(_cnt+V_FAULTS) ! cnt.v_faults++ (clobbers %o0,%o1)
#endif
st %g1, [%sp + CCFSZ + 20] ! save g1
rd %y, %l4 ! save y
@ -1863,7 +1871,11 @@ memfault_sun4c:
#if defined(SUN4M)
memfault_sun4m:
TRAP_SETUP(-CCFSZ-80)
#if defined(UVM)
INCR(_uvmexp+V_FAULTS) ! cnt.v_faults++ (clobbers %o0,%o1)
#else
INCR(_cnt+V_FAULTS) ! cnt.v_faults++ (clobbers %o0,%o1)
#endif
st %g1, [%sp + CCFSZ + 20] ! save g1
rd %y, %l4 ! save y
@ -2310,7 +2322,11 @@ softintr_sun44c:
softintr_common:
INTR_SETUP(-CCFSZ-80)
std %g2, [%sp + CCFSZ + 24] ! save registers
#if defined(UVM)
INCR(_uvmexp+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
#else
INCR(_cnt+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
#endif
mov %g1, %l7
rd %y, %l6
std %g4, [%sp + CCFSZ + 32]
@ -2378,7 +2394,11 @@ _sparc_interrupt44c:
_sparc_interrupt_common:
INTR_SETUP(-CCFSZ-80)
std %g2, [%sp + CCFSZ + 24] ! save registers
#if defined(UVM)
INCR(_uvmexp+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
#else
INCR(_cnt+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
#endif
mov %g1, %l7
rd %y, %l6
std %g4, [%sp + CCFSZ + 32]
@ -2474,7 +2494,11 @@ zshard:
#if defined(SUN4)
nmi_sun4:
INTR_SETUP(-CCFSZ-80)
#if defined(UVM)
INCR(_uvmexp+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
#else
INCR(_cnt+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
#endif
/*
* Level 15 interrupts are nonmaskable, so with traps off,
* disable all interrupts to prevent recursion.
@ -2500,7 +2524,11 @@ nmi_sun4:
#if defined(SUN4C)
nmi_sun4c:
INTR_SETUP(-CCFSZ-80)
#if defined(UVM)
INCR(_uvmexp+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
#else
INCR(_cnt+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
#endif
/*
* Level 15 interrupts are nonmaskable, so with traps off,
* disable all interrupts to prevent recursion.
@ -2555,7 +2583,11 @@ nmi_common:
#if defined(SUN4M)
nmi_sun4m:
INTR_SETUP(-CCFSZ-80)
#if defined(UVM)
INCR(_uvmexp+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
#else
INCR(_cnt+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
#endif
/*
* Level 15 interrupts are nonmaskable, so with traps off,
* disable all interrupts to prevent recursion.
@ -4315,7 +4347,11 @@ ENTRY(switchexit)
wr %g0, PSR_S|PSR_ET, %psr ! and then enable traps
mov %g2, %o0 ! now ready to call kmem_free
mov %g3, %o1
#if defined(UVM)
call _uvm_km_free
#else
call _kmem_free
#endif
mov %g4, %o2
/*
@ -4335,7 +4371,11 @@ ENTRY(switchexit)
*/
INCR(_nswitchexit) ! nswitchexit++;
#if defined(UVM)
INCR(_uvmexp+V_SWTCH) ! cnt.v_switch++;
#else
INCR(_cnt+V_SWTCH) ! cnt.v_switch++;
#endif
mov PSR_S|PSR_ET, %g1 ! oldpsr = PSR_S | PSR_ET;
sethi %hi(_whichqs), %g2
@ -5348,6 +5388,195 @@ Lback_mopb:
retl ! dst[-1] = b;
stb %o4, [%o1 - 1] ! }
/*
* kcopy() is exactly like bcopy except that it set pcb_onfault such that
* when a fault occurs, it is able to return -1 to indicate this to the
* caller.
*/
ENTRY(kcopy)
sethi %hi(_cpcb), %o5 ! cpcb->pcb_onfault = Lkcerr;
ld [%o5 + %lo(_cpcb)], %o5
set Lkcerr, %o3
st %o3, [%o5 + PCB_ONFAULT]
cmp %o2, BCOPY_SMALL
Lkcopy_start:
bge,a Lkcopy_fancy ! if >= this many, go be fancy.
btst 7, %o0 ! (part of being fancy)
/*
* Not much to copy, just do it a byte at a time.
*/
deccc %o2 ! while (--len >= 0)
bl 1f
EMPTY
0:
inc %o0
ldsb [%o0 - 1], %o4 ! (++dst)[-1] = *src++;
stb %o4, [%o1]
deccc %o2
bge 0b
inc %o1
1:
st %g0, [%o5 + PCB_ONFAULT]! clear onfault
retl
mov 0, %o0 ! delay slot: return success
/* NOTREACHED */
/*
* Plenty of data to copy, so try to do it optimally.
*/
Lkcopy_fancy:
! check for common case first: everything lines up.
! btst 7, %o0 ! done already
bne 1f
EMPTY
btst 7, %o1
be,a Lkcopy_doubles
dec 8, %o2 ! if all lined up, len -= 8, goto bcopy_doubes
! If the low bits match, we can make these line up.
1:
xor %o0, %o1, %o3 ! t = src ^ dst;
btst 1, %o3 ! if (t & 1) {
be,a 1f
btst 1, %o0 ! [delay slot: if (src & 1)]
! low bits do not match, must copy by bytes.
0:
ldsb [%o0], %o4 ! do {
inc %o0 ! (++dst)[-1] = *src++;
inc %o1
deccc %o2
bnz 0b ! } while (--len != 0);
stb %o4, [%o1 - 1]
st %g0, [%o5 + PCB_ONFAULT]! clear onfault
retl
mov 0, %o0 ! delay slot: return success
/* NOTREACHED */
! lowest bit matches, so we can copy by words, if nothing else
1:
be,a 1f ! if (src & 1) {
btst 2, %o3 ! [delay slot: if (t & 2)]
! although low bits match, both are 1: must copy 1 byte to align
ldsb [%o0], %o4 ! *dst++ = *src++;
stb %o4, [%o1]
inc %o0
inc %o1
dec %o2 ! len--;
btst 2, %o3 ! } [if (t & 2)]
1:
be,a 1f ! if (t & 2) {
btst 2, %o0 ! [delay slot: if (src & 2)]
dec 2, %o2 ! len -= 2;
0:
ldsh [%o0], %o4 ! do {
sth %o4, [%o1] ! *(short *)dst = *(short *)src;
inc 2, %o0 ! dst += 2, src += 2;
deccc 2, %o2 ! } while ((len -= 2) >= 0);
bge 0b
inc 2, %o1
b Lkcopy_mopb ! goto mop_up_byte;
btst 1, %o2 ! } [delay slot: if (len & 1)]
/* NOTREACHED */
! low two bits match, so we can copy by longwords
1:
be,a 1f ! if (src & 2) {
btst 4, %o3 ! [delay slot: if (t & 4)]
! although low 2 bits match, they are 10: must copy one short to align
ldsh [%o0], %o4 ! (*short *)dst = *(short *)src;
sth %o4, [%o1]
inc 2, %o0 ! dst += 2;
inc 2, %o1 ! src += 2;
dec 2, %o2 ! len -= 2;
btst 4, %o3 ! } [if (t & 4)]
1:
be,a 1f ! if (t & 4) {
btst 4, %o0 ! [delay slot: if (src & 4)]
dec 4, %o2 ! len -= 4;
0:
ld [%o0], %o4 ! do {
st %o4, [%o1] ! *(int *)dst = *(int *)src;
inc 4, %o0 ! dst += 4, src += 4;
deccc 4, %o2 ! } while ((len -= 4) >= 0);
bge 0b
inc 4, %o1
b Lkcopy_mopw ! goto mop_up_word_and_byte;
btst 2, %o2 ! } [delay slot: if (len & 2)]
/* NOTREACHED */
! low three bits match, so we can copy by doublewords
1:
be 1f ! if (src & 4) {
dec 8, %o2 ! [delay slot: len -= 8]
ld [%o0], %o4 ! *(int *)dst = *(int *)src;
st %o4, [%o1]
inc 4, %o0 ! dst += 4, src += 4, len -= 4;
inc 4, %o1
dec 4, %o2 ! }
1:
Lkcopy_doubles:
! swap %o4 with %o2 during doubles copy, since %o5 is verboten
mov %o2, %o4
Lkcopy_doubles2:
ldd [%o0], %o2 ! do {
std %o2, [%o1] ! *(double *)dst = *(double *)src;
inc 8, %o0 ! dst += 8, src += 8;
deccc 8, %o4 ! } while ((len -= 8) >= 0);
bge Lkcopy_doubles2
inc 8, %o1
mov %o4, %o2 ! restore len
! check for a usual case again (save work)
btst 7, %o2 ! if ((len & 7) == 0)
be Lkcopy_done ! goto bcopy_done;
btst 4, %o2 ! if ((len & 4)) == 0)
be,a Lkcopy_mopw ! goto mop_up_word_and_byte;
btst 2, %o2 ! [delay slot: if (len & 2)]
ld [%o0], %o4 ! *(int *)dst = *(int *)src;
st %o4, [%o1]
inc 4, %o0 ! dst += 4;
inc 4, %o1 ! src += 4;
btst 2, %o2 ! } [if (len & 2)]
1:
! mop up trailing word (if present) and byte (if present).
Lkcopy_mopw:
be Lkcopy_mopb ! no word, go mop up byte
btst 1, %o2 ! [delay slot: if (len & 1)]
ldsh [%o0], %o4 ! *(short *)dst = *(short *)src;
be Lkcopy_done ! if ((len & 1) == 0) goto done;
sth %o4, [%o1]
ldsb [%o0 + 2], %o4 ! dst[2] = src[2];
stb %o4, [%o1 + 2]
st %g0, [%o5 + PCB_ONFAULT]! clear onfault
retl
mov 0, %o0 ! delay slot: return success
/* NOTREACHED */
! mop up trailing byte (if present).
Lkcopy_mopb:
bne,a 1f
ldsb [%o0], %o4
Lkcopy_done:
st %g0, [%o5 + PCB_ONFAULT]! clear onfault
retl
mov 0, %o0 ! delay slot: return success
1:
stb %o4,[%o1]
mov 0, %o0 ! delay slot: return success
retl
Lkcerr:
st %g0, [%o5 + PCB_ONFAULT]! clear onfault
retl ! and return error indicator
mov -1, %o0
/*
* savefpstate(f) struct fpstate *f;

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.100 1998/02/04 05:13:00 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.101 1998/02/05 07:57:58 mrg Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -115,6 +115,10 @@
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#if defined(UVM)
#include <uvm/uvm.h> /* XXX: not _extern ... need vm_map_create */
#endif
#include <sys/sysctl.h>
#define _SPARC_BUS_DMA_PRIVATE
@ -132,7 +136,13 @@
#include "fb.h"
vm_map_t buffer_map;
#if defined(UVM)
vm_map_t exec_map = NULL;
vm_map_t mb_map = NULL;
vm_map_t phys_map = NULL;
#else
vm_map_t buffer_map;
#endif
extern vm_offset_t avail_end;
/*
@ -226,12 +236,64 @@ cpu_startup()
*/
sz = (int)allocsys((caddr_t)0);
#if defined(UVM)
if ((v = (caddr_t)uvm_km_alloc(kernel_map, round_page(sz))) == 0)
panic("startup: no room for tables");
#else
if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(sz))) == 0)
panic("startup: no room for tables");
#endif
if (allocsys(v) - v != sz)
panic("startup: table size inconsistency");
#if defined(UVM)
/*
* allocate virtual and physical memory for the buffers.
*/
size = MAXBSIZE * nbuf; /* # bytes for buffers */
/* allocate VM for buffers... area is not managed by VM system */
if (uvm_map(kernel_map, (vm_offset_t *) &buffers, round_page(size),
NULL, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
panic("cpu_startup: cannot allocate VM for buffers");
minaddr = (vm_offset_t) buffers;
if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
bufpages = btoc(MAXBSIZE) * nbuf; /* do not overallocate RAM */
}
base = bufpages / nbuf;
residual = bufpages % nbuf;
/* now allocate RAM for buffers */
for (i = 0 ; i < nbuf ; i++) {
vm_offset_t curbuf;
vm_size_t curbufsize;
struct vm_page *pg;
/*
* each buffer has MAXBSIZE bytes of VM space allocated. of
* that MAXBSIZE space we allocate and map (base+1) pages
* for the first "residual" buffers, and then we allocate
* "base" pages for the rest.
*/
curbuf = (vm_offset_t) buffers + (i * MAXBSIZE);
curbufsize = CLBYTES * ((i < residual) ? (base+1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL);
if (pg == NULL)
panic("cpu_startup: "
"not enough RAM for buffer cache");
pmap_enter(kernel_map->pmap, curbuf,
VM_PAGE_TO_PHYS(pg), VM_PROT_ALL, TRUE);
curbuf += PAGE_SIZE;
curbufsize -= PAGE_SIZE;
}
}
#else
/*
* Now allocate buffers proper. They are different than the above
* in that they usually occupy more virtual memory than physical.
@ -270,13 +332,19 @@ cpu_startup()
vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
vm_map_simplify(buffer_map, curbuf);
}
#endif
/*
* Allocate a submap for exec arguments. This map effectively
* limits the number of processes exec'ing at any time.
*/
#if defined(UVM)
exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
16*NCARGS, TRUE, NULL);
#else
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
16*NCARGS, TRUE);
#endif
/*
* Allocate a map for physio. Others use a submap of the kernel
@ -285,7 +353,11 @@ cpu_startup()
*/
dvma_base = CPU_ISSUN4M ? DVMA4M_BASE : DVMA_BASE;
dvma_end = CPU_ISSUN4M ? DVMA4M_END : DVMA_END;
#if defined(UVM)
phys_map = uvm_map_create(pmap_kernel(), dvma_base, dvma_end, 1);
#else
phys_map = vm_map_create(pmap_kernel(), dvma_base, dvma_end, 1);
#endif
if (phys_map == NULL)
panic("unable to create DVMA map");
/*
@ -293,16 +365,26 @@ cpu_startup()
* resource map for double mappings which is usable from
* interrupt contexts.
*/
#if defined(UVM)
if (uvm_km_valloc_wait(phys_map, (dvma_end-dvma_base)) != dvma_base)
panic("unable to allocate from DVMA map");
#else
if (kmem_alloc_wait(phys_map, (dvma_end-dvma_base)) != dvma_base)
panic("unable to allocate from DVMA map");
#endif
rminit(dvmamap, btoc((dvma_end-dvma_base)),
vtorc(dvma_base), "dvmamap", ndvmamap);
/*
* Finally, allocate mbuf cluster submap.
*/
#if defined(UVM)
mb_map = uvm_km_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
VM_MBUF_SIZE, FALSE, NULL);
#else
mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
VM_MBUF_SIZE, FALSE);
#endif
/*
* Initialize callouts
*/
@ -314,7 +396,11 @@ cpu_startup()
#ifdef DEBUG
pmapdebug = opmapdebug;
#endif
#if defined(UVM)
printf("avail mem = %ld\n", ptoa(uvmexp.free));
#else
printf("avail mem = %ld\n", ptoa(cnt.v_free_count));
#endif
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * CLBYTES);
@ -402,7 +488,9 @@ allocsys(v)
if (nswbuf > 256)
nswbuf = 256; /* sanity */
}
#if !defined(UVM)
valloc(swbuf, struct buf, nswbuf);
#endif
valloc(buf, struct buf, nbuf);
/*
* Allocate DVMA slots for 1/4 of the number of i/o buffers
@ -648,8 +736,13 @@ sys_sigreturn(p, v, retval)
p->p_comm, p->p_pid, SCARG(uap, sigcntxp));
#endif
scp = SCARG(uap, sigcntxp);
#if defined(UVM)
if ((int)scp & 3 || uvm_useracc((caddr_t)scp,sizeof *scp, B_WRITE) == 0)
return (EINVAL);
#else
if ((int)scp & 3 || useracc((caddr_t)scp, sizeof *scp, B_WRITE) == 0)
return (EINVAL);
#endif
tf = p->p_md.md_tf;
/*
* Only the icc bits in the psr are used, so it need not be
@ -987,9 +1080,15 @@ oldmon_w_trace(va)
else
printf("no curproc\n");
#if defined(UVM)
printf("uvm: swtch %d, trap %d, sys %d, intr %d, soft %d, faults %d\n",
uvmexp.swtch, uvmexp.traps, uvmexp.syscalls, uvmexp.intrs,
uvmexp.softs, uvmexp.faults);
#else
printf("cnt: swtch %d, trap %d, sys %d, intr %d, soft %d, faults %d\n",
cnt.v_swtch, cnt.v_trap, cnt.v_syscall, cnt.v_intr, cnt.v_soft,
cnt.v_faults);
#endif
write_user_windows();
#define round_up(x) (( (x) + (NBPG-1) ) & (~(NBPG-1)) )
@ -1456,8 +1555,13 @@ _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
* Allocate pages from the VM system.
*/
TAILQ_INIT(mlist);
#if defined(UVM)
error = uvm_pglistalloc(size, low, high,
alignment, boundary, mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
#else
error = vm_page_alloc_memory(size, low, high,
alignment, boundary, mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
#endif
if (error)
return (error);
@ -1540,7 +1644,11 @@ _bus_dmamem_free(t, segs, nsegs)
/*
* Return the list of pages back to the VM system.
*/
#if defined(UVM)
uvm_pglistfree(segs[0]._ds_mlist);
#else
vm_page_free_memory(segs[0]._ds_mlist);
#endif
free(segs[0]._ds_mlist, M_DEVBUF);
}
@ -1584,7 +1692,13 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
* our aligment requirements.
*/
oversize = size + align - PAGE_SIZE;
#if defined(UVM)
r = uvm_map(kmem_map, &sva, oversize, NULL, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_NORMAL, 0));
#else
r = vm_map_find(kmem_map, NULL, (vm_offset_t)0, &sva, oversize, TRUE);
#endif
if (r != KERN_SUCCESS)
return (ENOMEM);
@ -1594,9 +1708,17 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
/* Return excess virtual addresses */
if (va != sva)
#if defined(UVM)
(void)uvm_unmap(kmem_map, sva, va, 0);
#else
vm_map_remove(kmem_map, sva, va);
#endif
if (va + size != sva + oversize)
#if defined(UVM)
(void)uvm_unmap(kmem_map, va + size, sva + oversize, 0);
#else
vm_map_remove(kmem_map, va + size, sva + oversize);
#endif
*kvap = (caddr_t)va;
@ -1638,7 +1760,11 @@ _bus_dmamem_unmap(t, kva, size)
#endif
size = round_page(size);
#if defined(UVM)
uvm_unmap(kmem_map, (vm_offset_t)kva, (vm_offset_t)kva + size, 0);
#else
vm_map_remove(kmem_map, (vm_offset_t)kva, (vm_offset_t)kva + size);
#endif
#if 0
kmem_free(kmem_map, (vm_offset_t)kva, size);
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: mem.c,v 1.14 1997/04/19 21:28:53 pk Exp $ */
/* $NetBSD: mem.c,v 1.15 1998/02/05 07:57:59 mrg Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -148,9 +148,15 @@ mmrw(dev, uio, flags)
c = min(iov->iov_len, prom_vend - prom_vstart);
} else {
c = min(iov->iov_len, MAXPHYS);
#if defined(UVM)
if (!uvm_kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return (EFAULT);
#else
if (!kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return (EFAULT);
#endif
}
error = uiomove((caddr_t)v, c, uio);
break;

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.109 1998/01/17 15:02:17 pk Exp $ */
/* $NetBSD: pmap.c,v 1.110 1998/02/05 07:58:01 mrg Exp $ */
/*
* Copyright (c) 1996
@ -72,6 +72,10 @@
#include <vm/vm_prot.h>
#include <vm/vm_page.h>
#if defined(UVM)
#include <uvm/uvm.h>
#endif
#include <machine/autoconf.h>
#include <machine/bsd_openprom.h>
#include <machine/oldmon.h>
@ -878,11 +882,19 @@ pmap_page_upload()
/* First, the gap we created in pmap_bootstrap() */
if (avail_next != unavail_gap_start)
/* Avoid empty ranges */
#if defined(UVM)
uvm_page_physload(
atop(avail_next),
atop(unavail_gap_start),
atop(avail_next),
atop(unavail_gap_start));
#else
vm_page_physload(
atop(avail_next),
atop(unavail_gap_start),
atop(avail_next),
atop(unavail_gap_start));
#endif
avail_next = unavail_gap_end;
}
@ -896,11 +908,19 @@ pmap_page_upload()
if (start == end)
continue;
#if defined(UVM)
uvm_page_physload(
atop(start),
atop(end),
atop(start),
atop(end));
#else
vm_page_physload(
atop(start),
atop(end),
atop(start),
atop(end));
#endif
}
}
@ -2051,6 +2071,15 @@ pv_changepte4_4c(pv0, bis, bic)
/* in hardware: fix hardware copy */
if (CTX_USABLE(pm,rp)) {
#if defined(UVM)
/*
* Bizarreness: we never clear PG_W on
* pager pages, nor PG_NC on DVMA pages.
*/
if (bic == PG_W &&
va >= uvm.pager_sva && va < uvm.pager_eva)
continue;
#else
extern vm_offset_t pager_sva, pager_eva;
/*
@ -2060,6 +2089,7 @@ pv_changepte4_4c(pv0, bis, bic)
if (bic == PG_W &&
va >= pager_sva && va < pager_eva)
continue;
#endif
if (bic == PG_NC &&
va >= DVMA_BASE && va < DVMA_END)
continue;
@ -2337,6 +2367,16 @@ pv_changepte4m(pv0, bis, bic)
sp = &rp->rg_segmap[VA_VSEG(va)];
if (pm->pm_ctx) {
#if defined(UVM)
/*
* Bizarreness: we never clear PG_W on
* pager pages, nor set PG_C on DVMA pages.
*/
if ((bic & PPROT_WRITE) &&
va >= uvm.pager_sva && va < uvm.pager_eva)
continue;
#else
extern vm_offset_t pager_sva, pager_eva;
/*
@ -2346,6 +2386,7 @@ pv_changepte4m(pv0, bis, bic)
if ((bic & PPROT_WRITE) &&
va >= pager_sva && va < pager_eva)
continue;
#endif
if ((bis & SRMMU_PG_C) &&
va >= DVMA_BASE && va < DVMA_END)
continue;
@ -2626,8 +2667,13 @@ pmap_bootstrap(nctx, nregion, nsegment)
int nsegment, nctx, nregion;
{
#if defined(UVM)
uvmexp.pagesize = NBPG;
uvm_setpagesize();
#else
cnt.v_page_size = NBPG;
vm_set_page_size();
#endif
#if defined(SUN4) && (defined(SUN4C) || defined(SUN4M))
/* In this case NPTESG is not a #define */
@ -2686,8 +2732,13 @@ pmap_bootstrap4_4c(nctx, nregion, nsegment)
}
}
#if defined(UVM)
uvmexp.pagesize = NBPG;
uvm_setpagesize();
#else
cnt.v_page_size = NBPG;
vm_set_page_size();
#endif
#if defined(SUN4)
/*
@ -3548,7 +3599,13 @@ pass2:
}
if (pass1) {
#if defined(UVM)
/* XXXCDC: ABSOLUTELY WRONG! uvm_km_alloc() _CAN_
return 0 if out of VM */
pa = pmap_extract(pmap_kernel(), uvm_km_alloc(kernel_map, s));
#else
pa = pmap_extract(pmap_kernel(), kmem_alloc(kernel_map, s));
#endif
pass1 = 0;
goto pass2;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.61 1998/01/28 02:27:36 thorpej Exp $ */
/* $NetBSD: trap.c,v 1.62 1998/02/05 07:58:02 mrg Exp $ */
/*
* Copyright (c) 1996
@ -274,7 +274,11 @@ trap(type, psr, pc, tf)
/* This steps the PC over the trap. */
#define ADVANCE (n = tf->tf_npc, tf->tf_pc = n, tf->tf_npc = n + 4)
#if defined(UVM)
uvmexp.traps++;
#else
cnt.v_trap++;
#endif
/*
* Generally, kernel traps cause a panic. Any exceptions are
* handled early here.
@ -646,12 +650,16 @@ mem_access_fault(type, ser, v, pc, psr, tf)
register struct vmspace *vm;
register vm_offset_t va;
register int rv;
vm_prot_t ftype;
vm_prot_t atype;
int onfault;
u_quad_t sticks;
char bits[64];
#if defined(UVM)
uvmexp.traps++;
#else
cnt.v_trap++;
#endif
if ((p = curproc) == NULL) /* safety check */
p = &proc0;
sticks = p->p_sticks;
@ -667,9 +675,11 @@ mem_access_fault(type, ser, v, pc, psr, tf)
*/
if (type == T_TEXTFAULT)
v = pc;
if (VA_INHOLE(v))
if (VA_INHOLE(v)) {
rv = KERN_PROTECTION_FAILURE;
goto fault;
ftype = ser & SER_WRITE ? VM_PROT_READ|VM_PROT_WRITE : VM_PROT_READ;
}
atype = ser & SER_WRITE ? VM_PROT_READ|VM_PROT_WRITE : VM_PROT_READ;
va = trunc_page(v);
if (psr & PSR_PS) {
extern char Lfsbail[];
@ -695,8 +705,13 @@ mem_access_fault(type, ser, v, pc, psr, tf)
if (cold)
goto kfault;
if (va >= KERNBASE) {
if (vm_fault(kernel_map, va, ftype, 0) == KERN_SUCCESS)
#if defined(UVM)
if (uvm_fault(kernel_map, va, 0, atype) == KERN_SUCCESS)
return;
#else
if (vm_fault(kernel_map, va, atype, 0) == KERN_SUCCESS)
return;
#endif
goto kfault;
}
} else
@ -710,13 +725,19 @@ mem_access_fault(type, ser, v, pc, psr, tf)
vm = p->p_vmspace;
rv = mmu_pagein(vm->vm_map.pmap, va,
ser & SER_WRITE ? VM_PROT_WRITE : VM_PROT_READ);
if (rv < 0)
if (rv < 0) {
rv = KERN_PROTECTION_FAILURE;
goto fault;
}
if (rv > 0)
goto out;
/* alas! must call the horrible vm code */
rv = vm_fault(&vm->vm_map, (vm_offset_t)va, ftype, FALSE);
#if defined(UVM)
rv = uvm_fault(&vm->vm_map, (vm_offset_t)va, 0, atype);
#else
rv = vm_fault(&vm->vm_map, (vm_offset_t)va, atype, FALSE);
#endif
/*
* If this was a stack access we keep track of the maximum
@ -764,7 +785,12 @@ kfault:
tf->tf_npc = onfault + 4;
return;
}
trapsignal(p, SIGSEGV, (u_int)v);
if (rv == KERN_RESOURCE_SHORTAGE) {
printf("UVM: process %d killed: out of swap space\n",
p->p_pid);
trapsignal(p, SIGKILL, (u_int)v);
} else
trapsignal(p, SIGSEGV, (u_int)v);
}
out:
if ((psr & PSR_PS) == 0) {
@ -796,12 +822,16 @@ mem_access_fault4m(type, sfsr, sfva, afsr, afva, tf)
register struct vmspace *vm;
register vm_offset_t va;
register int rv;
vm_prot_t ftype;
vm_prot_t atype;
int onfault;
u_quad_t sticks;
char bits[64];
#if defined(UVM)
uvmexp.traps++;
#else
cnt.v_trap++;
#endif
if ((p = curproc) == NULL) /* safety check */
p = &proc0;
sticks = p->p_sticks;
@ -864,8 +894,10 @@ mem_access_fault4m(type, sfsr, sfva, afsr, afva, tf)
if ((sfsr & SFSR_FAV) == 0) {
if (type == T_TEXTFAULT)
sfva = pc;
else
else {
rv = KERN_PROTECTION_FAILURE;
goto fault;
}
}
if ((sfsr & SFSR_FT) == SFSR_FT_TRANSERR) {
@ -895,8 +927,13 @@ mem_access_fault4m(type, sfsr, sfva, afsr, afva, tf)
* XXX: Is this really necessary?
*/
if (mmumod == SUN4M_MMU_HS) { /* On HS, we have va for both */
#if defined(UVM)
if (uvm_fault(kernel_map, trunc_page(pc),
0, VM_PROT_READ) != KERN_SUCCESS)
#else
if (vm_fault(kernel_map, trunc_page(pc),
VM_PROT_READ, 0) != KERN_SUCCESS)
#endif
#ifdef DEBUG
printf("mem_access_fault: "
"can't pagein 1st text fault.\n")
@ -908,7 +945,7 @@ mem_access_fault4m(type, sfsr, sfva, afsr, afva, tf)
/* Now munch on protections... */
ftype = sfsr & SFSR_AT_STORE ? VM_PROT_READ|VM_PROT_WRITE:VM_PROT_READ;
atype = sfsr & SFSR_AT_STORE ? VM_PROT_READ|VM_PROT_WRITE:VM_PROT_READ;
if (psr & PSR_PS) {
extern char Lfsbail[];
if (sfsr & SFSR_AT_TEXT || type == T_TEXTFAULT) {
@ -934,8 +971,13 @@ mem_access_fault4m(type, sfsr, sfva, afsr, afva, tf)
if (cold)
goto kfault;
if (va >= KERNBASE) {
if (vm_fault(kernel_map, va, ftype, 0) == KERN_SUCCESS)
#if defined(UVM)
if (uvm_fault(kernel_map, va, 0, atype) == KERN_SUCCESS)
return;
#else
if (vm_fault(kernel_map, va, atype, 0) == KERN_SUCCESS)
return;
#endif
goto kfault;
}
} else
@ -944,7 +986,11 @@ mem_access_fault4m(type, sfsr, sfva, afsr, afva, tf)
vm = p->p_vmspace;
/* alas! must call the horrible vm code */
rv = vm_fault(&vm->vm_map, (vm_offset_t)va, ftype, FALSE);
#if defined(UVM)
rv = uvm_fault(&vm->vm_map, (vm_offset_t)va, 0, atype);
#else
rv = vm_fault(&vm->vm_map, (vm_offset_t)va, atype, FALSE);
#endif
/*
* If this was a stack access we keep track of the maximum
@ -984,7 +1030,12 @@ kfault:
tf->tf_npc = onfault + 4;
return;
}
trapsignal(p, SIGSEGV, (u_int)sfva);
if (rv == KERN_RESOURCE_SHORTAGE) {
printf("UVM: process %d killed: out of swap space\n",
p->p_pid);
trapsignal(p, SIGKILL, (u_int)sfva);
} else
trapsignal(p, SIGSEGV, (u_int)sfva);
}
out:
if ((psr & PSR_PS) == 0) {
@ -1021,7 +1072,11 @@ syscall(code, tf, pc)
extern struct pcb *cpcb;
#endif
#if defined(UVM)
uvmexp.syscalls++;
#else
cnt.v_syscall++;
#endif
p = curproc;
#ifdef DIAGNOSTIC
if (tf->tf_psr & PSR_PS)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.32 1997/10/18 00:17:21 gwr Exp $ */
/* $NetBSD: vm_machdep.c,v 1.33 1998/02/05 07:58:03 mrg Exp $ */
/*
* Copyright (c) 1996
@ -316,7 +316,11 @@ vmapbuf(bp, len)
uva = trunc_page(bp->b_data);
off = (vm_offset_t)bp->b_data - uva;
len = round_page(off + len);
#if defined(UVM)
kva = uvm_km_valloc_wait(kernel_map, len);
#else
kva = kmem_alloc_wait(kernel_map, len);
#endif
bp->b_data = (caddr_t)(kva + off);
/*
@ -361,7 +365,11 @@ vunmapbuf(bp, len)
len = round_page(off + len);
/* This will call pmap_remove() for us. */
#if defined(UVM)
uvm_km_free_wakeup(kernel_map, kva, len);
#else
kmem_free_wakeup(kernel_map, kva, len);
#endif
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = NULL;
@ -516,7 +524,11 @@ cpu_exit(p)
}
free((void *)fs, M_SUBPROC);
}
#if defined(UVM)
uvmspace_free(p->p_vmspace);
#else
vmspace_free(p->p_vmspace);
#endif
switchexit(kernel_map, p->p_addr, USPACE);
/* NOTREACHED */
}