Add UVM option.

This commit is contained in:
leo 1998-05-11 07:46:15 +00:00
parent d5157b2b05
commit e63efc87df
10 changed files with 405 additions and 29 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus.c,v 1.2 1998/04/10 08:19:53 leo Exp $ */
/* $NetBSD: bus.c,v 1.3 1998/05/11 07:46:15 leo Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -37,6 +37,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "opt_uvm.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
@ -78,7 +80,11 @@ bus_space_handle_t *mhp;
panic("bus_mem_map: overflow");
#endif
#if defined(UVM)
va = uvm_km_valloc(kernel_map, endpa - pa);
#else
va = kmem_alloc_pageable(kernel_map, endpa - pa);
#endif
if (va == 0)
return 1;
*mhp = (caddr_t)(va + (bpa & PGOFSET));
@ -108,7 +114,11 @@ bus_size_t size;
panic("unmap_iospace: overflow");
#endif
#if defined(UVM)
uvm_km_free(kernel_map, (vm_offset_t)va, endva - va);
#else
kmem_free(kernel_map, va, endva - va);
#endif
}
/*

View File

@ -1,4 +1,4 @@
# $NetBSD: genassym.cf,v 1.6 1998/01/06 08:46:12 thorpej Exp $
# $NetBSD: genassym.cf,v 1.7 1998/05/11 07:46:16 leo Exp $
#-
# Copyright (c) 1997 The NetBSD Foundation, Inc.
@ -45,6 +45,9 @@ include <sys/syscall.h>
include <sys/user.h>
include <vm/vm.h>
ifdef UVM
include <uvm/uvm_extern.h>
endif
include <machine/cpu.h>
include <machine/trap.h>
@ -85,7 +88,12 @@ define MD_REGS offsetof(struct mdproc, md_regs)
define SRUN SRUN
# interrupt/fault metering
# interrupt/fault metering
ifdef UVM
define UVMEXP_INTRS offsetof(struct uvmexp, intrs)
else
define V_INTR offsetof(struct vmmeter, v_intr)
endif
# general constants
define UPAGES UPAGES

View File

@ -1,4 +1,4 @@
/* $NetBSD: intr.c,v 1.4 1997/10/04 09:46:07 thorpej Exp $ */
/* $NetBSD: intr.c,v 1.5 1998/05/11 07:46:16 leo Exp $ */
/*-
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -36,12 +36,19 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "opt_uvm.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/vmmeter.h>
#include <sys/queue.h>
#include <sys/device.h>
#if defined(UVM)
#include <vm/vm.h>
#include <uvm/uvm_extern.h>
#endif
#include <machine/cpu.h>
#include <atari/atari/intr.h>
@ -287,8 +294,11 @@ struct clockframe frame;
ih_list_t *vec_list;
struct intrhand *ih;
#if defined(UVM)
uvmexp.intrs++;
#else
cnt.v_intr++;
#endif
vector = (frame.cf_vo & 0xfff) >> 2;
if (vector < (AVEC_LOC+AVEC_MAX) && vector >= AVEC_LOC)
vec_list = &autovec_list[vector - AVEC_LOC];

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.48 1998/01/05 23:16:25 thorpej Exp $ */
/* $NetBSD: locore.s,v 1.49 1998/05/11 07:46:17 leo Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -49,6 +49,8 @@
* Atari Modifications: Leo Weppelman
*/
#include "opt_uvm.h"
#include "assym.h"
#include <machine/asm.h>
@ -551,7 +553,11 @@ Lbrkpt3:
_spurintr:
addql #1,_intrcnt+0
addql #1,_cnt+V_INTR
#if defined(UVM)
addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS
#else
addql #1,_C_LABEL(cnt)+V_INTR
#endif
jra rei
/* MFP timer A handler --- System clock --- */
@ -562,7 +568,11 @@ mfp_tima:
addql #4,sp | pop params
addql #1,_intrcnt_user+52 | add another system clock interrupt
moveml sp@+,d0-d1/a0-a1 | restore scratch regs
addql #1,_cnt+V_INTR | chalk up another interrupt
#if defined(UVM)
addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS
#else
addql #1,_C_LABEL(cnt)+V_INTR
#endif
jra rei | all done
#ifdef STATCLOCK
@ -572,7 +582,11 @@ mfp_timc:
jbsr _statintr | call statistics clock handler
addql #1,_intrcnt+36 | add another stat clock interrupt
moveml sp@+,d0-d1/a0-a1 | restore scratch regs
addql #1,_cnt+V_INTR | chalk up another interrupt
#if defined(UVM)
addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS
#else
addql #1,_C_LABEL(cnt)+V_INTR
#endif
jra rei | all done
#endif /* STATCLOCK */
@ -586,7 +600,11 @@ mfp_kbd:
jbsr _kbdintr | handle interrupt
addql #4,sp | pop SR
moveml sp@+,d0-d1/a0-a1
addql #1,_cnt+V_INTR | chalk up another interrupt
#if defined(UVM)
addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS
#else
addql #1,_C_LABEL(cnt)+V_INTR
#endif
jra rei
/* MFP2 SCSI DMA handler --- NCR5380 --- */
@ -599,7 +617,11 @@ mfp2_5380dm:
jbsr _scsi_dma | handle interrupt
addql #4,sp | pop SR
moveml sp@+,d0-d1/a0-a1
addql #1,_cnt+V_INTR | chalk up another interrupt
#if defined(UVM)
addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS
#else
addql #1,_C_LABEL(cnt)+V_INTR
#endif
jra rei
/* MFP2 SCSI handler --- NCR5380 --- */
@ -612,7 +634,11 @@ mfp2_5380:
jbsr _scsi_ctrl | handle interrupt
addql #4,sp | pop SR
moveml sp@+,d0-d1/a0-a1
addql #1,_cnt+V_INTR | chalk up another interrupt
#if defined(UVM)
addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS
#else
addql #1,_C_LABEL(cnt)+V_INTR
#endif
jra rei
/* SCC Interrupt --- modem2/serial2 --- */
@ -625,7 +651,11 @@ sccint:
jbsr _zshard | handle interrupt
addql #4,sp | pop SR
moveml sp@+,d0-d1/a0-a1
addql #1,_cnt+V_INTR | chalk up another interrupt
#if defined(UVM)
addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS
#else
addql #1,_C_LABEL(cnt)+V_INTR
#endif
jra rei
/* Level 1 (Software) interrupt handler */
@ -636,7 +666,11 @@ _lev1intr:
addql #1,_intrcnt+16 | add another software interrupt
jbsr _softint | handle software interrupts
moveml sp@+,d0-d1/a0-a1
addql #1,_cnt+V_INTR | chalk up another interrupt
#if defined(UVM)
addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS
#else
addql #1,_C_LABEL(cnt)+V_INTR
#endif
jra rei
/*
@ -1038,7 +1072,10 @@ ENTRY(qsetjmp)
moveq #0,d0 | return 0
rts
.globl _whichqs,_qs,_cnt,_panic
#if !defined(UVM)
.globl _cnt
#endif
.globl _whichqs,_qs,_panic
.globl _curproc
.comm _want_resched,4
@ -1076,7 +1113,11 @@ ENTRY(switch_exit)
movl #USPACE,sp@- | size of u-area
movl a0@(P_ADDR),sp@- | address of process's u-area
movl _kernel_map,sp@- | map it was allocated in
jbsr _kmem_free | deallocate it
#if defined(UVM)
jbsr _C_LABEL(uvm_km_free) | deallocate it
#else
jbsr _C_LABEL(kmem_free) | deallocate it
#endif
lea sp@(12),sp | pop args
jra _cpu_switch

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.61 1998/05/07 07:25:51 leo Exp $ */
/* $NetBSD: machdep.c,v 1.62 1998/05/11 07:46:17 leo Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -42,6 +42,8 @@
* @(#)machdep.c 7.16 (Berkeley) 6/3/91
*/
#include "opt_uvm.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/signalvar.h>
@ -77,6 +79,10 @@
#include <vm/vm.h>
#include <vm/vm_kern.h>
#if defined(UVM)
#include <uvm/uvm_extern.h>
#endif
#include <sys/sysctl.h>
#include <machine/db_machdep.h>
@ -97,6 +103,14 @@ static void netintr __P((void));
void straymfpint __P((int, u_short));
void straytrap __P((int, u_short));
#if defined(UVM)
vm_map_t exec_map = NULL;
vm_map_t mb_map = NULL;
vm_map_t phys_map = NULL;
#else
vm_map_t buffer_map;
#endif
/*
* Declare these as initialized data so we can patch them.
*/
@ -156,6 +170,7 @@ cpu_startup()
register unsigned i;
register caddr_t v, firstaddr;
int base, residual;
u_long avail_mem;
#ifdef DEBUG
extern int pmapdebug;
@ -252,14 +267,21 @@ again:
if (nswbuf > 256)
nswbuf = 256; /* sanity */
}
#if !defined(UVM)
valloc(swbuf, struct buf, nswbuf);
#endif
valloc(buf, struct buf, nbuf);
/*
* End of first pass, size has been calculated so allocate memory
*/
if (firstaddr == 0) {
size = (vm_size_t)(v - firstaddr);
#if defined(UVM)
firstaddr = (caddr_t) uvm_km_zalloc(kernel_map,
round_page(size));
#else
firstaddr = (caddr_t) kmem_alloc(kernel_map, round_page(size));
#endif
if (firstaddr == 0)
panic("startup: no room for tables");
goto again;
@ -275,12 +297,21 @@ again:
* in that they usually occupy more virtual memory than physical.
*/
size = MAXBSIZE * nbuf;
#if defined(UVM)
if (uvm_map(kernel_map, (vm_offset_t *) &buffers, round_page(size),
NULL, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
panic("startup: cannot allocate VM for buffers");
minaddr = (vm_offset_t)buffers;
#else
buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
&maxaddr, size, TRUE);
minaddr = (vm_offset_t)buffers;
if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
&minaddr, size, FALSE) != KERN_SUCCESS)
panic("startup: cannot allocate buffers");
#endif
if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
/* don't want to alloc more physical mem than needed */
bufpages = btoc(MAXBSIZE) * nbuf;
@ -288,6 +319,31 @@ again:
base = bufpages / nbuf;
residual = bufpages % nbuf;
for (i = 0; i < nbuf; i++) {
#if defined(UVM)
vm_size_t curbufsize;
vm_offset_t curbuf;
struct vm_page *pg;
/*
* Each buffer has MAXBSIZE bytes of VM space allocated. Of
* that MAXBSIZE space, we allocate and map (base+1) pages
* for the first "residual" buffers, and then we allocate
* "base" pages for the rest.
*/
curbuf = (vm_offset_t) buffers + (i * MAXBSIZE);
curbufsize = CLBYTES * ((i < residual) ? (base+1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL);
if (pg == NULL)
panic("cpu_startup: not enough memory for "
"buffer cache");
pmap_enter(kernel_map->pmap, curbuf,
VM_PAGE_TO_PHYS(pg), VM_PROT_ALL, TRUE);
curbuf += PAGE_SIZE;
curbufsize -= PAGE_SIZE;
}
#else /* ! UVM */
vm_size_t curbufsize;
vm_offset_t curbuf;
@ -302,24 +358,42 @@ again:
curbufsize = CLBYTES * (i < residual ? base+1 : base);
vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
vm_map_simplify(buffer_map, curbuf);
#endif /* UVM */
}
/*
* Allocate a submap for exec arguments. This map effectively
* limits the number of processes exec'ing at any time.
*/
exec_map = kmem_suballoc(kernel_map,&minaddr,&maxaddr, 16*NCARGS, TRUE);
#if defined(UVM)
exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
16*NCARGS, TRUE, FALSE, NULL);
#else
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
16*NCARGS, TRUE);
#endif
/*
* Allocate a submap for physio
*/
phys_map= kmem_suballoc(kernel_map,&minaddr,&maxaddr,VM_PHYS_SIZE,TRUE);
#if defined(UVM)
phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, TRUE, FALSE, NULL);
#else
phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, TRUE);
#endif
/*
* Finally, allocate mbuf cluster submap.
*/
#if defined(UVM)
mb_map = uvm_km_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
VM_MBUF_SIZE, FALSE, FALSE, NULL);
#else
mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
VM_MBUF_SIZE, FALSE);
#endif
/*
* Tell the VM system that page 0 isn't mapped.
@ -327,9 +401,15 @@ again:
* XXX This is bogus; should just fix KERNBASE and
* XXX VM_MIN_KERNEL_ADDRESS, but not right now.
*/
#if defined(UVM)
if (uvm_map_protect(kernel_map, 0, NBPG, UVM_PROT_NONE, TRUE)
!= KERN_SUCCESS)
panic("can't mark page 0 off-limits");
#else
if (vm_map_protect(kernel_map, 0, NBPG, VM_PROT_NONE, TRUE)
!= KERN_SUCCESS)
panic("can't mark page 0 off-limits");
#endif
/*
* Tell the VM system that writing to kernel text isn't allowed.
@ -338,10 +418,15 @@ again:
* XXX Should be m68k_trunc_page(&kernel_text) instead
* XXX of NBPG.
*/
#if defined(UVM)
if (uvm_map_protect(kernel_map, NBPG, m68k_round_page(&etext),
UVM_PROT_READ|UVM_PROT_EXEC, TRUE) != KERN_SUCCESS)
panic("can't protect kernel text");
#else
if (vm_map_protect(kernel_map, NBPG, m68k_round_page(&etext),
VM_PROT_READ|VM_PROT_EXECUTE, TRUE) != KERN_SUCCESS)
panic("can't protect kernel text");
#endif
/*
* Initialize callouts
*/
@ -352,8 +437,12 @@ again:
#ifdef DEBUG
pmapdebug = opmapdebug;
#endif
printf("avail mem = %ld (%ld pages)\n", ptoa(cnt.v_free_count),
ptoa(cnt.v_free_count)/NBPG);
#if defined(UVM)
avail_mem = ptoa(uvmexp.free);
#else
avail_mem = ptoa(cnt.v_free_count);
#endif
printf("avail mem = %ld (%ld pages)\n", avail_mem, avail_mem/NBPG);
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * CLBYTES);
@ -782,18 +871,30 @@ softint()
{
if(ssir & SIR_NET) {
siroff(SIR_NET);
#if defined(UVM)
uvmexp.softs++;
#else
cnt.v_soft++;
#endif
netintr();
}
if(ssir & SIR_CLOCK) {
siroff(SIR_CLOCK);
#if defined(UVM)
uvmexp.softs++;
#else
cnt.v_soft++;
#endif
/* XXXX softclock(&frame.f_stackadj); */
softclock();
}
if (ssir & SIR_CBACK) {
siroff(SIR_CBACK);
#if defined(UVM)
uvmexp.softs++;
#else
cnt.v_soft++;
#endif
call_sicallbacks();
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: mem.c,v 1.9 1997/04/25 19:07:45 leo Exp $ */
/* $NetBSD: mem.c,v 1.10 1998/05/11 07:46:18 leo Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -44,6 +44,8 @@
* Memory special file
*/
#include "opt_uvm.h"
#include <sys/param.h>
#include <sys/conf.h>
#include <sys/proc.h>
@ -53,6 +55,9 @@
#include <sys/malloc.h>
#include <vm/vm.h>
#if defined(UVM)
#include <uvm/uvm_extern.h>
#endif
#include <machine/cpu.h>
#include <machine/pte.h>
@ -153,9 +158,15 @@ mmrw(dev, uio, flags)
case 1: /* minor device 1 is kernel memory */
v = uio->uio_offset;
c = min(iov->iov_len, MAXPHYS);
#if defined(UVM)
if (!uvm_kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return (EFAULT);
#else
if (!kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return (EFAULT);
#endif
error = uiomove((caddr_t)v, c, uio);
break;

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.25 1998/05/07 07:25:52 leo Exp $ */
/* $NetBSD: pmap.c,v 1.26 1998/05/11 07:46:18 leo Exp $ */
/*
* Copyright (c) 1991 Regents of the University of California.
@ -74,6 +74,7 @@
* to which processors are currently using which maps,
* and to when physical maps must be made correct.
*/
#include "opt_uvm.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -83,11 +84,17 @@
#include <sys/user.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#if defined(UVM)
#include <uvm/uvm.h>
#endif
#include <m68k/cpu.h>
#include <m68k/cacheops.h>
#include <machine/pte.h>
#include <machine/cpu.h>
#include <machine/vmparam.h>
/*
* Allocate various and sundry SYSMAPs used in the days of old VM
* and not yet converted. XXX.
@ -235,6 +242,9 @@ vm_size_t Sysptsize = VM_KERNEL_PT_PAGES + 4 / NPTEPG;
struct pmap kernel_pmap_store;
vm_map_t pt_map;
#if defined(UVM)
struct vm_map pt_map_store;
#endif
vm_size_t mem_size; /* memory size in bytes */
vm_offset_t avail_end; /* PA of last available physical page */
@ -340,10 +350,17 @@ u_int hw_addr, hw_pages;
* Announce available memory to the VM-system
*/
for (i = 0; usable_segs[i].start; i++)
#if defined(UVM)
uvm_page_physload(atop(usable_segs[i].start),
atop(usable_segs[i].end),
atop(usable_segs[i].start),
atop(usable_segs[i].end));
#else
vm_page_physload(atop(usable_segs[i].start),
atop(usable_segs[i].end),
atop(usable_segs[i].start),
atop(usable_segs[i].end));
#endif
virtual_avail = VM_MIN_KERNEL_ADDRESS + kernel_size;
@ -408,6 +425,30 @@ pmap_init()
* Now that kernel map has been allocated, we can mark as
* unavailable regions which we have mapped in atari_init.c.
*/
#if defined(UVM)
addr = atarihwaddr;
if (uvm_map(kernel_map, &addr,
ptoa(atarihwpg),
NULL, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
UVM_INH_NONE, UVM_ADV_RANDOM,
UVM_FLAG_FIXED)) != KERN_SUCCESS)
goto bogons;
addr = (vm_offset_t) Sysmap;
if (uvm_map(kernel_map, &addr, ATARI_KPTSIZE,
NULL, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
UVM_INH_NONE, UVM_ADV_RANDOM,
UVM_FLAG_FIXED)) != KERN_SUCCESS) {
/*
* If this fails, it is probably because the static
* portion of the kernel page table isn't big enough
* and we overran the page table map.
*/
bogons:
panic("pmap_init: bogons in the VM system!\n");
}
#else
addr = atarihwaddr;
(void)vm_map_find(kernel_map, NULL, 0, &addr, ptoa(atarihwpg), FALSE);
if (addr != atarihwaddr)
@ -424,6 +465,8 @@ pmap_init()
*/
if (addr != (vm_offset_t)Sysmap)
panic("pmap_init: bogons in the VM system!\n");
#endif /* UVM */
#ifdef DEBUG
if (pmapdebug & PDB_INIT) {
printf("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
@ -449,7 +492,13 @@ pmap_init()
s += page_cnt * sizeof(char); /* attribute table */
s = round_page(s);
#if defined(UVM)
addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s);
if (addr == 0)
panic("pmap_init: can't allocate data structures");
#else
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
#endif
Segtabzero = (u_int *) addr;
Segtabzeropa = (u_int *) pmap_extract(pmap_kernel(), addr);
@ -501,17 +550,35 @@ pmap_init()
* Verify that space will be allocated in region for which
* we already have kernel PT pages.
*/
#if defined(UVM)
addr = 0;
rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
if (rv != KERN_SUCCESS || (addr + s) >= (vm_offset_t)Sysmap)
panic("pmap_init: kernel PT too small");
rv = uvm_unmap(kernel_map, addr, addr + s, FALSE);
if (rv != KERN_SUCCESS)
panic("pmap_init: uvm_unmap failed");
#else
addr = 0;
rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap)
panic("pmap_init: kernel PT too small");
vm_map_remove(kernel_map, addr, addr + s);
#endif
/*
* Now allocate the space and link the pages together to
* form the KPT free list.
*/
#if defined(UVM)
addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s);
if (addr == 0)
panic("pmap_init: cannot allocate KPT free list");
#else
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
#endif
s = ptoa(npg);
addr2 = addr + s;
kpt_pages = &((struct kpt_page *)addr2)[npg];
@ -548,6 +615,11 @@ pmap_init()
maxproc = ATARI_UPTMAXSIZE / ATARI_UPTSIZE;
}
else s = maxproc * ATARI_UPTSIZE;
#if defined(UVM)
pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, TRUE,
TRUE, &pt_map_store);
#else
addr2 = addr + s;
rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
if (rv != KERN_SUCCESS)
@ -559,6 +631,8 @@ pmap_init()
rv = vm_map_submap(kernel_map, addr, addr2, pt_map);
if (rv != KERN_SUCCESS)
panic("pmap_init: cannot map range to pt_map");
#endif /* UVM */
#ifdef DEBUG
if (pmapdebug & PDB_INIT)
printf("pmap_init: pt_map [%lx - %lx)\n", addr, addr2);
@ -609,9 +683,15 @@ pmap_alloc_pv()
int i;
if (pv_nfree == 0) {
#if defined(UVM)
pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG);
if (pvp == 0)
panic("pmap_alloc_pv: uvm_km_zalloc() failed");
#else
pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG);
if (pvp == 0)
panic("pmap_alloc_pv: kmem_alloc() failed");
#endif
pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
for (i = NPVPPG - 2; i; i--, pv++)
pv->pv_next = pv + 1;
@ -653,7 +733,11 @@ pmap_free_pv(pv)
case NPVPPG:
pv_nfree -= NPVPPG - 1;
TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
#if defined(UVM)
uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG);
#else
kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
#endif
break;
}
}
@ -800,11 +884,21 @@ pmap_release(pmap)
panic("pmap_release count");
#endif
if (pmap->pm_ptab)
#if defined(UVM)
uvm_km_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
ATARI_UPTSIZE);
#else
kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
ATARI_UPTSIZE);
#endif
if (pmap->pm_stab != Segtabzero)
#if defined(UVM)
uvm_km_free_wakeup(kernel_map, (vm_offset_t)pmap->pm_stab,
ATARI_STSIZE);
#else
kmem_free_wakeup(kernel_map, (vm_offset_t)pmap->pm_stab,
ATARI_STSIZE);
#endif
}
/*
@ -908,8 +1002,13 @@ pmap_remove(pmap, sva, eva)
*/
if (pmap != pmap_kernel()) {
pte = pmap_pte(pmap, va);
vm_map_pageable(pt_map, trunc_page(pte),
#if defined(UVM)
(void) uvm_map_pageable(pt_map, trunc_page(pte),
round_page(pte+1), TRUE);
#else
(void) vm_map_pageable(pt_map, trunc_page(pte),
round_page(pte+1), TRUE);
#endif
#ifdef DEBUG
if (pmapdebug & PDB_WIRING)
pmap_check_wiring("remove", trunc_page(pte));
@ -1019,9 +1118,15 @@ pmap_remove(pmap, sva, eva)
printf("remove: free stab %p\n",
ptpmap->pm_stab);
#endif
#if defined(UVM)
uvm_km_free_wakeup(kernel_map,
(vm_offset_t)ptpmap->pm_stab,
ATARI_STSIZE);
#else
kmem_free_wakeup(kernel_map,
(vm_offset_t)ptpmap->pm_stab,
ATARI_STSIZE);
#endif
ptpmap->pm_stab = Segtabzero;
ptpmap->pm_stpa = Segtabzeropa;
#if defined(M68040) || defined(M68060)
@ -1235,8 +1340,13 @@ pmap_enter(pmap, va, pa, prot, wired)
* For user mapping, allocate kernel VM resources if necessary.
*/
if (pmap->pm_ptab == NULL)
#if defined(UVM)
pmap->pm_ptab = (u_int *)
uvm_km_valloc_wait(pt_map, ATARI_UPTSIZE);
#else
pmap->pm_ptab = (u_int *)
kmem_alloc_wait(pt_map, ATARI_UPTSIZE);
#endif
/*
* Segment table entry not valid, we need a new PT page
@ -1307,8 +1417,13 @@ pmap_enter(pmap, va, pa, prot, wired)
* is a valid mapping in the page.
*/
if (pmap != pmap_kernel())
vm_map_pageable(pt_map, trunc_page(pte),
round_page(pte+1), FALSE);
#if defined(UVM)
(void) uvm_map_pageable(pt_map, trunc_page(pte),
round_page(pte+1), FALSE);
#else
(void) vm_map_pageable(pt_map, trunc_page(pte),
round_page(pte+1), FALSE);
#endif
/*
* Enter on the PV list if part of our managed memory
@ -2059,10 +2174,15 @@ pmap_changebit(pa, bit, setem)
* XXX don't write protect pager mappings
*/
if (bit == PG_RO) {
#if defined(UVM)
if (va >= uvm.pager_sva && va < uvm.pager_eva)
continue;
#else
extern vm_offset_t pager_sva, pager_eva;
if (va >= pager_sva && va < pager_eva)
continue;
#endif
}
pte = (int *) pmap_pte(pv->pv_pmap, va);
@ -2118,8 +2238,13 @@ pmap_enter_ptpage(pmap, va)
* reference count drops to zero.
*/
if (pmap->pm_stab == Segtabzero) {
#if defined(UVM)
pmap->pm_stab = (u_int *)
uvm_km_zalloc(kernel_map, ATARI_STSIZE);
#else
pmap->pm_stab = (u_int *)
kmem_alloc(kernel_map, ATARI_STSIZE);
#endif
pmap->pm_stpa = (u_int *) pmap_extract(
pmap_kernel(), (vm_offset_t)pmap->pm_stab);
#if defined(M68040) || defined(M68060)
@ -2258,9 +2383,18 @@ pmap_enter_ptpage(pmap, va)
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
printf("enter_pt: about to fault UPT pg at %lx\n", va);
#endif
#if defined(UVM)
if ((s = uvm_fault(pt_map, va, 0, VM_PROT_READ|VM_PROT_WRITE))
!= KERN_SUCCESS) {
printf("uvm_fault(pt_map, 0x%lx, 0, RW) -> %d\n",
va, s);
panic("pmap_enter: uvm_fault failed");
}
#else
if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE)
!= KERN_SUCCESS)
panic("pmap_enter: vm_fault failed");
#endif
ptpa = pmap_extract(pmap_kernel(), va);
#ifdef DEBUG
PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.28 1997/07/08 16:56:34 kleink Exp $ */
/* $NetBSD: trap.c,v 1.29 1998/05/11 07:46:19 leo Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -42,6 +42,8 @@
* @(#)trap.c 7.15 (Berkeley) 8/2/91
*/
#include "opt_uvm.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
@ -58,6 +60,9 @@
#include <vm/vm.h>
#include <sys/user.h>
#include <vm/pmap.h>
#if defined(UVM)
#include <uvm/uvm_extern.h>
#endif
#include <m68k/cpu.h>
#include <m68k/cacheops.h>
@ -337,7 +342,12 @@ trap(type, code, v, frame)
p = curproc;
sticks = ucode = 0;
#if defined(UVM)
uvmexp.traps++;
#else
cnt.v_trap++;
#endif
/* I have verified that this DOES happen! -gwr */
if (p == NULL)
@ -361,6 +371,9 @@ trap(type, code, v, frame)
#endif
switch (type) {
#ifdef DEBUG
dopanic:
#endif /* DEBUG */
default:
panictrap(type, code, v, &frame);
/*
@ -544,7 +557,11 @@ trap(type, code, v, frame)
* If this was not an AST trap, we are all done.
*/
if (type != (T_ASTFLT|T_USER)) {
#if defined(UVM)
uvmexp.traps--;
#else
cnt.v_trap--;
#endif
return;
}
spl0();
@ -605,7 +622,11 @@ trap(type, code, v, frame)
panictrap(type, code, v, &frame);
}
#endif
#if defined(UVM)
rv = uvm_fault(map, va, 0, ftype);
#else
rv = vm_fault(map, va, ftype, FALSE);
#endif
#ifdef DEBUG
if (rv && MDB_ISPID(p->p_pid))
printf("vm_fault(%x, %x, %x, 0) -> %x\n",
@ -908,6 +929,7 @@ writeback(fp, docachepush)
}
}
p->p_addr->u_pcb.pcb_onfault = oonfault;
/*
* Determine the cause of the failure if any translating to
* a signal. If the corresponding VA is valid and RO it is
@ -915,6 +937,17 @@ writeback(fp, docachepush)
* illegal reference (SIGSEGV).
*/
if (err) {
#if defined(UVM)
if (uvm_map_checkprot(&p->p_vmspace->vm_map,
trunc_page(fa), round_page(fa),
VM_PROT_READ) &&
!uvm_map_checkprot(&p->p_vmspace->vm_map,
trunc_page(fa), round_page(fa),
VM_PROT_WRITE))
err = SIGBUS;
else
err = SIGSEGV;
#else /* ! UVM */
if (vm_map_check_protection(&p->p_vmspace->vm_map,
trunc_page(fa), round_page(fa),
VM_PROT_READ) &&
@ -924,6 +957,7 @@ writeback(fp, docachepush)
err = SIGBUS;
else
err = SIGSEGV;
#endif /* UVM */
}
return(err);
}
@ -995,7 +1029,11 @@ syscall(code, frame)
register_t args[8], rval[2];
u_quad_t sticks;
#if defined(UVM)
uvmexp.syscalls++;
#else
cnt.v_syscall++;
#endif
if (!USERMODE(frame.f_sr))
panic("syscall");
p = curproc;

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.14 1998/01/06 07:49:39 thorpej Exp $ */
/* $NetBSD: vm_machdep.c,v 1.15 1998/05/11 07:46:20 leo Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -42,6 +42,8 @@
* @(#)vm_machdep.c 7.10 (Berkeley) 5/7/91
*/
#include "opt_uvm.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
@ -57,6 +59,10 @@
#include <vm/vm.h>
#include <sys/user.h>
#include <vm/vm_kern.h>
#if defined(UVM)
#include <uvm/uvm_extern.h>
#endif
#include <machine/pte.h>
#include <machine/cpu.h>
@ -134,10 +140,18 @@ void
cpu_exit(p)
struct proc *p;
{
#if defined(UVM)
uvmspace_free(p->p_vmspace);
#else
vmspace_free(p->p_vmspace);
#endif
(void)splhigh();
#if defined(UVM)
uvmexp.swtch++;
#else
cnt.v_swtch++;
#endif
switch_exit(p);
/* NOTREACHED */
}
@ -349,7 +363,11 @@ vmapbuf(bp, len)
uva = m68k_trunc_page(bp->b_saveaddr = bp->b_data);
off = (vm_offset_t)bp->b_data - uva;
len = m68k_round_page(off + len);
#if defined(UVM)
kva = uvm_km_valloc_wait(phys_map, len);
#else
kva = kmem_alloc_wait(phys_map, len);
#endif
bp->b_data = (caddr_t)(kva + off);
upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
@ -387,7 +405,11 @@ vunmapbuf(bp, len)
* pmap_remove() is unnecessary here, as kmem_free_wakeup()
* will do it for us.
*/
#if defined(UVM)
uvm_km_free_wakeup(phys_map, kva, len);
#else
kmem_free_wakeup(phys_map, kva, len);
#endif
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = 0;
}

View File

@ -1,5 +1,5 @@
#
# $NetBSD: GENERIC,v 1.41 1998/04/23 09:26:25 leo Exp $
# $NetBSD: GENERIC,v 1.42 1998/05/11 07:46:28 leo Exp $
#
# Generic atari
#
@ -105,6 +105,7 @@ options MEMORY_DISK_HOOKS # Boot RAM-disk
options DISKLABEL_NBDA # NetBSD disklabels (required)
options DISKLABEL_AHDI # NetBSD/AHDI disklabels
options SERCONSOLE # modem1 console support
options UVM # New UVM VM system
# Try linked commands on all targets
options TRY_SCSI_LINKED_COMMANDS=0x7f