diff --git a/sys/arch/mvme68k/conf/VME147 b/sys/arch/mvme68k/conf/VME147 index 2a8ce6a93cfe..bc7c77856ce7 100644 --- a/sys/arch/mvme68k/conf/VME147 +++ b/sys/arch/mvme68k/conf/VME147 @@ -1,4 +1,4 @@ -# $NetBSD: VME147,v 1.20 1997/12/17 21:20:06 scw Exp $ +# $NetBSD: VME147,v 1.21 1998/02/21 19:03:25 scw Exp $ include "arch/mvme68k/conf/std.mvme68k" @@ -7,6 +7,9 @@ options MVME147 maxusers 2 +options UVM +#options DDB + options DIAGNOSTIC, DEBUG #options SYSVSHM diff --git a/sys/arch/mvme68k/include/pmap.h b/sys/arch/mvme68k/include/pmap.h index d055abe91e81..c08b8233e070 100644 --- a/sys/arch/mvme68k/include/pmap.h +++ b/sys/arch/mvme68k/include/pmap.h @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.9 1998/02/18 02:05:34 cgd Exp $ */ +/* $NetBSD: pmap.h,v 1.10 1998/02/21 19:03:25 scw Exp $ */ /* * Copyright (c) 1987 Carnegie-Mellon University @@ -142,19 +142,15 @@ extern vm_offset_t vm_first_phys, vm_num_phys; extern struct pv_entry *pv_table; /* array of entries, one per page */ -#ifndef MACHINE_NONCONTIG +#ifndef MACHINE_NEW_NONCONTIG #define pmap_page_index(pa) atop(pa - vm_first_phys) #endif -#define pa_to_pvh(pa) (&pv_table[pmap_page_index(pa)]) #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) extern pt_entry_t *Sysmap; extern char *vmmap; /* map for mem, dumps, etc. */ - -vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int)); - #endif /* _KERNEL */ #endif /* !_MACHINE_PMAP_H_ */ diff --git a/sys/arch/mvme68k/include/vmparam.h b/sys/arch/mvme68k/include/vmparam.h index e4cd1ecf0474..88a9556a97c5 100644 --- a/sys/arch/mvme68k/include/vmparam.h +++ b/sys/arch/mvme68k/include/vmparam.h @@ -1,4 +1,4 @@ -/* $NetBSD: vmparam.h,v 1.5 1997/10/12 15:39:35 scw Exp $ */ +/* $NetBSD: vmparam.h,v 1.6 1998/02/21 19:03:25 scw Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -42,9 +42,13 @@ * @(#)vmparam.h 8.2 (Berkeley) 4/19/94 */ +#ifndef _MVME68K_VMPARAM_H_ +#define _MVME68K_VMPARAM_H_ + /* * Machine dependent constants for MVME68K */ + /* * USRTEXT is the start of the user text/data space, while USRSTACK * is the top (end) of the user stack. LOWPAGES and HIGHPAGES are @@ -232,4 +236,26 @@ /* pcb base */ #define pcbb(p) ((u_int)(p)->p_addr) -#define MACHINE_NONCONTIG /* VM <=> pmap interface modifier */ +/* Use new VM page bootstrap interface. */ +#define MACHINE_NEW_NONCONTIG + +#ifdef MACHINE_NEW_NONCONTIG +/* + * Constants which control the way the VM system deals with memory segments. + * The mvme68k port has two physical memory segments: 1 for onboard RAM + * and another for contiguous VMEbus RAM. + */ +#define VM_PHYSSEG_MAX 2 +#define VM_PHYSSEG_STRAT VM_PSTRAT_RANDOM +#define VM_PHYSSEG_NOADD + +/* + * pmap-specific data stored in the vm_physmem[] array. + */ +struct pmap_physseg { + struct pv_entry *pvent; /* pv table for this seg */ + char *attrs; /* page attributes for this seg */ +}; +#endif + +#endif /* _MVME68K_VMPARAM_H_ */ diff --git a/sys/arch/mvme68k/mvme68k/genassym.cf b/sys/arch/mvme68k/mvme68k/genassym.cf index fbd9f63db8f8..a7de5e671fd8 100644 --- a/sys/arch/mvme68k/mvme68k/genassym.cf +++ b/sys/arch/mvme68k/mvme68k/genassym.cf @@ -1,4 +1,4 @@ -# $NetBSD: genassym.cf,v 1.7 1998/01/06 08:46:17 thorpej Exp $ +# $NetBSD: genassym.cf,v 1.8 1998/02/21 19:03:26 scw Exp $ # # Copyright (c) 1982, 1990, 1993 @@ -46,6 +46,10 @@ include include +ifdef UVM +include +endif + include include include @@ -106,7 +110,7 @@ define UPAGES UPAGES define USPACE USPACE define NBPG NBPG define PGSHIFT PGSHIFT -#define USRSTACK USRSTACK +define USRSTACK USRSTACK # boot stuff define RB_SBOOT RB_SBOOT @@ -134,7 +138,11 @@ define SSLEEP SSLEEP define SRUN SRUN # interrupt/fault metering +ifdef UVM +define UVMEXP_INTRS offsetof(struct uvmexp, intrs) +else define V_INTR offsetof(struct vmmeter, v_intr) +endif # PSL values (should just include psl.h?) define PSL_S PSL_S diff --git a/sys/arch/mvme68k/mvme68k/isr.c b/sys/arch/mvme68k/mvme68k/isr.c index cc660ed22e31..fa22656005b2 100644 --- a/sys/arch/mvme68k/mvme68k/isr.c +++ b/sys/arch/mvme68k/mvme68k/isr.c @@ -1,4 +1,4 @@ -/* $NetBSD: isr.c,v 1.7 1997/11/13 10:43:11 veego Exp $ */ +/* $NetBSD: isr.c,v 1.8 1998/02/21 19:03:26 scw Exp $ */ /*- * Copyright (c) 1996 The NetBSD Foundation, Inc. @@ -40,13 +40,22 @@ * Link and dispatch interrupts. */ +#include "opt_uvm.h" + #include #include #include #include -#include + +#ifdef UVM +#include +#include +#endif + #include +#include + #include isr_autovec_list_t isr_autovec[NISRAUTOVEC]; @@ -213,7 +222,11 @@ isrdispatch_autovec(evec) ipl = vec - ISRAUTOVEC; intrcnt[ipl]++; +#ifdef UVM + uvmexp.intrs++; +#else cnt.v_intr++; +#endif list = &isr_autovec[ipl]; if (list->lh_first == NULL) { @@ -251,7 +264,11 @@ isrdispatch_vectored(pc, evec, frame) ipl = (getsr() >> 8) & 7; intrcnt[ipl]++; +#ifdef UVM + uvmexp.intrs++; +#else cnt.v_intr++; +#endif if ((vec < ISRVECTORED) || (vec >= (ISRVECTORED + NISRVECTORED))) panic("isrdispatch_vectored: bad vec 0x%x\n", vec); diff --git a/sys/arch/mvme68k/mvme68k/locore.s b/sys/arch/mvme68k/mvme68k/locore.s index d71467be7ba0..0439222049c4 100644 --- a/sys/arch/mvme68k/mvme68k/locore.s +++ b/sys/arch/mvme68k/mvme68k/locore.s @@ -1,4 +1,4 @@ -/* $NetBSD: locore.s,v 1.32 1998/01/05 23:16:32 thorpej Exp $ */ +/* $NetBSD: locore.s,v 1.33 1998/02/21 19:03:26 scw Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -42,10 +42,11 @@ * @(#)locore.s 8.6 (Berkeley) 5/27/94 */ +#include "opt_uvm.h" + #include "assym.h" #include #include -#include /* * Temporary stack for a variety of purposes. @@ -164,20 +165,7 @@ start: | start of kernel and .text! RELOC(_myea, a0) movl 0xfffe0778,a0@ | XXXCDC -- HARDWIRED HEX - /* initialize memory sizes (for pmap_bootstrap) */ -#ifndef MACHINE_NONCONTIG - movl 0xfffe0774,d1 | XXXCDC -- hardwired HEX - moveq #PGSHIFT,d2 - lsrl d2,d1 | convert to page (click) number - RELOC(_maxmem, a0) - movl d1,a0@ | save as maxmem - movl a5,d0 | lowram value from ROM via boot - lsrl d2,d0 | convert to page number - subl d0,d1 | compute amount of RAM present - RELOC(_physmem, a0) - movl d1,a0@ | and physmem -#else - /* initialise list of physical memory segments */ + /* initialise list of physical memory segments for pmap_bootstrap */ RELOC(_phys_seg_list, a0) movl a5,a0@ | phys_seg_list[0].ps_start movl 0xfffe0774,d1 | End + 1 of onboard memory @@ -218,8 +206,6 @@ Lsavmaxmem: lsrl d2,d1 | convert to page (click) number RELOC(_maxmem, a0) movl d1,a0@ | save as maxmem -#endif - jra Lstart1 Lnot147: #endif @@ -393,7 +379,11 @@ Lmotommu2: Lenab1: /* select the software page size now */ lea tmpstk,sp | temporary stack +#ifdef UVM + jbsr _uvm_setpagesize | select software page size +#else jbsr _vm_set_page_size | select software page size +#endif /* set kernel stack, user SP, and initial pcb */ movl _proc0paddr,a1 | get proc0 pcb addr lea a1@(USPACE-4),sp | set kernel stack to end of area @@ -840,7 +830,11 @@ Lsigr1: _spurintr: /* Level 0 */ addql #1,_intrcnt+0 +#ifdef UVM + addql #1,_uvmexp+UVMEXP_INTRS +#else addql #1,_cnt+V_INTR +#endif jra rei _intrhand_autovec: /* Levels 1 through 6 */ @@ -973,7 +967,10 @@ Ldorte: */ #include - .globl _whichqs,_qs,_cnt,_panic +#ifndef UVM + .globl _cnt +#endif + .globl _whichqs,_qs,_panic .globl _curproc,_want_resched /* @@ -1010,7 +1007,11 @@ ENTRY(switch_exit) movl #USPACE,sp@- | size of u-area movl a0@(P_ADDR),sp@- | address of process's u-area movl _kernel_map,sp@- | map it was allocated in +#ifdef UVM + jbsr _uvm_km_free | deallocate it +#else jbsr _kmem_free | deallocate it +#endif lea sp@(12),sp | pop args jra _cpu_switch diff --git a/sys/arch/mvme68k/mvme68k/machdep.c b/sys/arch/mvme68k/mvme68k/machdep.c index db21ae1bcd1e..5bb9f30fbbde 100644 --- a/sys/arch/mvme68k/mvme68k/machdep.c +++ b/sys/arch/mvme68k/mvme68k/machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: machdep.c,v 1.33 1998/02/19 04:18:33 thorpej Exp $ */ +/* $NetBSD: machdep.c,v 1.34 1998/02/21 19:03:26 scw Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -42,6 +42,8 @@ * @(#)machdep.c 8.10 (Berkeley) 4/20/94 */ +#include "opt_uvm.h" + #include #include #include @@ -80,6 +82,10 @@ #include #include +#if defined(UVM) +#include +#endif + #include #include @@ -92,7 +98,7 @@ #include /* XXX should be pulled in by sys/kcore.h */ -#ifdef MACHINE_NONCONTIG +#ifdef MACHINE_NEW_NONCONTIG #include #endif @@ -101,7 +107,13 @@ /* the following is used externally (sysctl_hw) */ char machine[] = MACHINE; /* from */ +#if defined(UVM) +vm_map_t exec_map = NULL; +vm_map_t mb_map = NULL; +vm_map_t phys_map = NULL; +#else vm_map_t buffer_map; +#endif extern vm_offset_t avail_end; /* @@ -191,6 +203,24 @@ mvme68k_init() { int i; + /* + * Tell the VM system about available physical memory. + */ +#ifdef MACHINE_NEW_NONCONTIG + for (i = 0; i < MAX_PHYS_SEGS && phys_seg_list[i].ps_start; i++) +#if defined(UVM) + uvm_page_physload(atop(phys_seg_list[i].ps_start), + atop(phys_seg_list[i].ps_end), + atop(phys_seg_list[i].ps_start), + atop(phys_seg_list[i].ps_end)); +#else + vm_page_physload(atop(phys_seg_list[i].ps_start), + atop(phys_seg_list[i].ps_end), + atop(phys_seg_list[i].ps_start), + atop(phys_seg_list[i].ps_end)); +#endif +#endif + /* Initialize interrupt handlers. */ isrinit(); @@ -218,7 +248,7 @@ mvme68k_init() * Initialize error message buffer (at end of core). * avail_end was pre-decremented in pmap_bootstrap to compensate. */ -#ifdef MACHINE_NONCONTIG +#ifdef MACHINE_NEW_NONCONTIG #define MVME_MSG_BUF_START phys_seg_list[0].ps_end #else #define MVME_MSG_BUF_START avail_end @@ -342,8 +372,8 @@ cpu_startup() identifycpu(); printf("real mem = %d", ctob(physmem)); -#ifdef MACHINE_NONCONTIG - maxaddr = 0; +#ifdef MACHINE_NEW_NONCONTIG + maxaddr = 0; for (i = 1; i < MAX_PHYS_SEGS && phys_seg_list[i].ps_start; i++) maxaddr += phys_seg_list[i].ps_end - phys_seg_list[i].ps_start; @@ -354,12 +384,17 @@ cpu_startup() printf("\n"); /* - * Fine out how much space we need, allocate it, + * Find out how much space we need, allocate it, * and then give everything true virtual addresses. */ size = (vm_size_t)allocsys((caddr_t)0); +#if defined(UVM) + if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(size))) == 0) + panic("startup: no room for tables"); +#else if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(size))) == 0) panic("startup: no room for tables"); +#endif if ((allocsys(v) - v) != size) panic("startup: talbe size inconsistency"); @@ -369,15 +404,53 @@ cpu_startup() * in that they usually occupy more virtual memory than physical. */ size = MAXBSIZE * nbuf; +#if defined(UVM) + if (uvm_map(kernel_map, (vm_offset_t *) &buffers, round_page(size), + NULL, UVM_UNKNOWN_OFFSET, + UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, + UVM_ADV_NORMAL, 0)) != KERN_SUCCESS) + panic("startup: cannot allocate VM for buffers"); + minaddr = (vm_offset_t)buffers; +#else buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers, &maxaddr, size, TRUE); minaddr = (vm_offset_t)buffers; if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0, &minaddr, size, FALSE) != KERN_SUCCESS) panic("startup: cannot allocate buffers"); +#endif base = bufpages / nbuf; residual = bufpages % nbuf; for (i = 0; i < nbuf; i++) { +#if defined(UVM) + vm_size_t curbufsize; + vm_offset_t curbuf; + struct vm_page *pg; + + /* + * Each buffer has MAXBSIZE bytes of VM space allocated. Of + * that MAXBSIZE space, we allocate and map (base+1) pages + * for the first "residual" buffers, and then we allocate + * "base" pages for the rest. + */ + curbuf = (vm_offset_t) buffers + (i * MAXBSIZE); + curbufsize = CLBYTES * ((i < residual) ? (base+1) : base); + + while (curbufsize) { + pg = uvm_pagealloc(NULL, 0, NULL); + if (pg == NULL) + panic("cpu_startup: not enough memory for " + "buffer cache"); +#ifdef PMAP_NEW + pmap_kenter_pgs(curbuf, &pg, 1); +#else + pmap_enter(kernel_map->pmap, curbuf, + VM_PAGE_TO_PHYS(pg), VM_PROT_ALL, TRUE); +#endif + curbuf += PAGE_SIZE; + curbufsize -= PAGE_SIZE; + } +#else /* ! UVM */ vm_size_t curbufsize; vm_offset_t curbuf; @@ -392,24 +465,42 @@ cpu_startup() curbufsize = CLBYTES * (i < residual ? base+1 : base); vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE); vm_map_simplify(buffer_map, curbuf); +#endif /* UVM */ } + /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ +#if defined(UVM) + exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, + 16*NCARGS, TRUE, FALSE, NULL); +#else exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS, TRUE); +#endif /* * Allocate a submap for physio */ +#if defined(UVM) + phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, + VM_PHYS_SIZE, TRUE, FALSE, NULL); +#else phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, TRUE); +#endif /* * Finally, allocate mbuf cluster submap. */ +#if defined(UVM) + mb_map = uvm_km_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr, + VM_MBUF_SIZE, FALSE, FALSE, NULL); +#else mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr, VM_MBUF_SIZE, FALSE); +#endif + /* * Initialize callouts */ @@ -421,7 +512,11 @@ cpu_startup() #ifdef DEBUG pmapdebug = opmapdebug; #endif - printf("avail mem = %d\n", ptoa(cnt.v_free_count)); +#if defined(UVM) + printf("avail mem = %ld\n", ptoa(uvmexp.free)); +#else + printf("avail mem = %ld\n", ptoa(cnt.v_free_count)); +#endif printf("using %d buffers containing %d bytes of memory\n", nbuf, bufpages * CLBYTES); @@ -432,18 +527,31 @@ cpu_startup() * XXX Should just change KERNBASE and VM_MIN_KERNEL_ADDRESS, * XXX but not right now. */ +#if defined(UVM) + if (uvm_map_protect(kernel_map, 0, round_page(&kernel_text), + UVM_PROT_NONE, TRUE) != KERN_SUCCESS) + panic("can't mark pre-text pages off-limits"); +#else if (vm_map_protect(kernel_map, 0, round_page(&kernel_text), VM_PROT_NONE, TRUE) != KERN_SUCCESS) panic("can't mark pre-text pages off-limits"); +#endif /* * Tell the VM system that writing to the kernel text isn't allowed. * If we don't, we might end up COW'ing the text segment! */ +#if defined(UVM) + if (uvm_map_protect(kernel_map, trunc_page(&kernel_text), + round_page(&etext), UVM_PROT_READ|UVM_PROT_EXEC, TRUE) + != KERN_SUCCESS) + panic("can't protect kernel text"); +#else if (vm_map_protect(kernel_map, trunc_page(&kernel_text), round_page(&etext), VM_PROT_READ|VM_PROT_EXECUTE, TRUE) != KERN_SUCCESS) panic("can't protect kernel text"); +#endif /* * Set up CPU-specific registers, cache, etc. @@ -517,7 +625,9 @@ allocsys(v) if (nswbuf > 256) nswbuf = 256; /* sanity */ } +#if !defined(UVM) valloc(swbuf, struct buf, nswbuf); +#endif valloc(buf, struct buf, nbuf); return (v); } diff --git a/sys/arch/mvme68k/mvme68k/mem.c b/sys/arch/mvme68k/mvme68k/mem.c index 163dd84b5679..ccb2504aa364 100644 --- a/sys/arch/mvme68k/mvme68k/mem.c +++ b/sys/arch/mvme68k/mvme68k/mem.c @@ -1,4 +1,4 @@ -/* $NetBSD: mem.c,v 1.2 1997/02/02 08:27:15 thorpej Exp $ */ +/* $NetBSD: mem.c,v 1.3 1998/02/21 19:03:26 scw Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -44,16 +44,22 @@ * Memory special file */ +#include "opt_uvm.h" + #include -#include -#include #include -#include +#include +#include #include +#include +#include #include #include +#ifdef UVM +#include +#endif extern u_int lowram; static caddr_t devzeropage; @@ -85,9 +91,9 @@ mmrw(dev, uio, flags) struct uio *uio; int flags; { - register vm_offset_t o, v; - register int c; - register struct iovec *iov; + vm_offset_t o, v; + int c; + struct iovec *iov; int error = 0; static int physlock; @@ -137,9 +143,15 @@ mmrw(dev, uio, flags) case 1: v = uio->uio_offset; c = min(iov->iov_len, MAXPHYS); +#ifdef UVM + if (!uvm_kernacc((caddr_t)v, c, + uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) + return (EFAULT); +#else if (!kernacc((caddr_t)v, c, uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) return (EFAULT); +#endif error = uiomove((caddr_t)v, c, uio); continue; diff --git a/sys/arch/mvme68k/mvme68k/pmap.c b/sys/arch/mvme68k/mvme68k/pmap.c index d997e660fd4e..34f1f3a155b4 100644 --- a/sys/arch/mvme68k/mvme68k/pmap.c +++ b/sys/arch/mvme68k/mvme68k/pmap.c @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.12 1998/01/31 01:33:04 ross Exp $ */ +/* $NetBSD: pmap.c,v 1.13 1998/02/21 19:03:26 scw Exp $ */ /* * Copyright (c) 1991, 1993 @@ -94,6 +94,8 @@ * and to when physical maps must be made correct. */ +#include "opt_uvm.h" + #include #include #include @@ -107,9 +109,13 @@ #include #include +#if defined(UVM) +#include +#endif + #include -#ifdef MACHINE_NONCONTIG +#if defined(MACHINE_NEW_NONCONTIG) #include #endif @@ -270,20 +276,21 @@ vm_size_t Sysptsize = VM_KERNEL_PT_PAGES; struct pmap kernel_pmap_store; vm_map_t st_map, pt_map; +#if defined(UVM) +struct vm_map st_map_store, pt_map_store; +#endif vm_offset_t avail_start; /* PA of first available physical page */ vm_offset_t avail_end; /* PA of last available physical page */ vm_size_t mem_size; /* memory size in bytes */ -#ifdef MACHINE_NONCONTIG +#if defined(MACHINE_NEW_NONCONTIG) vm_size_t avail_remaining; vm_offset_t avail_next; struct phys_seg_list_t phys_seg_list[MAX_PHYS_SEGS]; #endif vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ -vm_offset_t vm_first_phys; /* PA of first managed page */ -vm_offset_t vm_last_phys; /* PA just past last managed page */ -int npages; +int page_cnt; boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ struct pv_entry *pv_table; @@ -298,6 +305,36 @@ int pmap_aliasmask; /* seperation at which VA aliasing ok */ int protostfree; /* prototype (default) free ST map */ #endif +#if !defined(MACHINE_NEW_NONCONTIG) +vm_offset_t vm_first_phys; /* PA of first managed page */ +vm_offset_t vm_last_phys; /* PA just past last managed page */ + +#define PAGE_IS_MANAGED(pa) (pmap_initialized && \ + (pa) >= vm_first_phys && (pa) < vm_last_phys) + +#define pa_to_pvh(pa) (&pv_table[pmap_page_index((pa))]) +#define pa_to_attribute(pa) (&pmap_attributes[pmap_page_index((pa))]) +#else +#define PAGE_IS_MANAGED(pa) (pmap_initialized && \ + vm_physseg_find(atop((pa)), NULL) != -1) + +#define pa_to_pvh(pa) \ +({ \ + int bank_, pg_; \ + \ + bank_ = vm_physseg_find(atop((pa)), &pg_); \ + &vm_physmem[bank_].pmseg.pvent[pg_]; \ +}) + +#define pa_to_attribute(pa) \ +({ \ + int bank_, pg_; \ + \ + bank_ = vm_physseg_find(atop((pa)), &pg_); \ + &vm_physmem[bank_].pmseg.attrs[pg_]; \ +}) +#endif /* MACHINE_NEW_NONCONTIG */ + /* * Internal routines */ @@ -305,6 +342,7 @@ void pmap_remove_mapping __P((pmap_t, vm_offset_t, pt_entry_t *, int)); boolean_t pmap_testbit __P((vm_offset_t, int)); void pmap_changebit __P((vm_offset_t, int, boolean_t)); void pmap_enter_ptpage __P((pmap_t, vm_offset_t)); +void pmap_collect1 __P((pmap_t, vm_offset_t, vm_offset_t)); #ifdef DEBUG void pmap_pvdump __P((vm_offset_t)); void pmap_check_wiring __P((char *, vm_offset_t)); @@ -314,6 +352,7 @@ void pmap_check_wiring __P((char *, vm_offset_t)); #define PRM_TFLUSH 1 #define PRM_CFLUSH 2 +#if !defined(MACHINE_NEW_NONCONTIG) /* * Bootstrap memory allocator. This function allows for early dynamic * memory allocation until the virtual memory system has been bootstrapped. @@ -344,6 +383,26 @@ pmap_bootstrap_alloc(size) bzero ((caddr_t) val, size); return ((void *) val); } +#endif /* !MACHINE_NEW_NONCONTIG */ + +#if defined(MACHINE_NEW_NONCONTIG) +/* + * Routine: pmap_virtual_space + * + * Function: + * Report the range of available kernel virtual address + * space to the VM system during bootstrap. Called by + * vm_bootstrap_steal_memory(). + */ +void +pmap_virtual_space(vstartp, vendp) + vm_offset_t *vstartp, *vendp; +{ + + *vstartp = virtual_avail; + *vendp = virtual_end; +} +#endif /* MACHINE_NEW_NONCONTIG */ /* * Initialize the pmap module. @@ -351,7 +410,7 @@ pmap_bootstrap_alloc(size) * system needs to map virtual memory. */ void -#ifdef MACHINE_NONCONTIG +#if defined(MACHINE_NEW_NONCONTIG) pmap_init() #else pmap_init(phys_start, phys_end) @@ -361,11 +420,17 @@ pmap_init(phys_start, phys_end) vm_offset_t addr, addr2; vm_size_t s; int rv; + int npages; +#if defined(MACHINE_NEW_NONCONTIG) + struct pv_entry *pv; + char *attr; + int bank; +#endif #ifdef DEBUG if (pmapdebug & PDB_INIT) -#ifdef MACHINE_NONCONTIG - printf("pmap_init(%x, %x)\n", avail_start, avail_end); +#if defined(MACHINE_NEW_NONCONTIG) + printf("pmap_init()\n"); #else printf("pmap_init(%x, %x)\n", phys_start, phys_end); #endif @@ -374,6 +439,29 @@ pmap_init(phys_start, phys_end) * Now that kernel map has been allocated, we can mark as * unavailable regions which we have mapped in locore. */ +#if defined(UVM) + addr = (vm_offset_t) intiobase; + if (uvm_map(kernel_map, &addr, m68k_ptob(IIOMAPSIZE), + NULL, UVM_UNKNOWN_OFFSET, + UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, + UVM_INH_NONE, UVM_ADV_RANDOM, + UVM_FLAG_FIXED)) != KERN_SUCCESS) + goto bogons; + addr = (vm_offset_t) Sysmap; + if (uvm_map(kernel_map, &addr, HP_MAX_PTSIZE, + NULL, UVM_UNKNOWN_OFFSET, + UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, + UVM_INH_NONE, UVM_ADV_RANDOM, + UVM_FLAG_FIXED)) != KERN_SUCCESS) { + /* + * If this fails, it is probably because the static + * portion of the kernel page tabel isn't big enough + * and we overran the page table map. + */ +bogons: + panic("pmap_init: bogons in the VM system!\n"); + } +#else addr = (vm_offset_t) intiobase; (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0, &addr, m68k_ptob(IIOMAPSIZE), FALSE); @@ -391,6 +479,7 @@ pmap_init(phys_start, phys_end) if (addr != (vm_offset_t)Sysmap) bogons: panic("pmap_init: bogons in the VM system!\n"); +#endif /* ! UVM */ #ifdef DEBUG if (pmapdebug & PDB_INIT) { @@ -405,32 +494,57 @@ bogons: * Allocate memory for random pmap data structures. Includes the * initial segment table, pv_head_table and pmap_attributes. */ -#ifdef MACHINE_NONCONTIG - { - int i; - for (npages = 0, i = 0; phys_seg_list[i].ps_start; ++i) - npages += atop(phys_seg_list[i].ps_end - - phys_seg_list[i].ps_start); - } +#if defined(MACHINE_NEW_NONCONTIG) + for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++) + page_cnt += (vm_physmem[bank].end - vm_physmem[bank].start); #else - npages = atop(phys_end - phys_start); + page_cnt = atop(phys_end - phys_start); #endif - s = (vm_size_t) (HP_STSIZE + sizeof(struct pv_entry) * npages + npages); + s = HP_STSIZE; /* Segtabzero */ + s += page_cnt * sizeof(struct pv_entry); /* pv table */ + s += page_cnt * sizeof(char); /* attribute table */ s = round_page(s); +#if defined(UVM) + addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s); + if (addr == 0) + panic("pmap_init: can't allocate data structures"); +#else addr = (vm_offset_t) kmem_alloc(kernel_map, s); +#endif + Segtabzero = (st_entry_t *) addr; Segtabzeropa = (st_entry_t *) pmap_extract(pmap_kernel(), addr); addr += HP_STSIZE; + pv_table = (struct pv_entry *) addr; - addr += sizeof(struct pv_entry) * npages; + addr += page_cnt * sizeof(struct pv_entry); + pmap_attributes = (char *) addr; + #ifdef DEBUG if (pmapdebug & PDB_INIT) - printf("pmap_init: %x bytes: npages %x s0 %x(%x) tbl %x atr %x\n", - s, npages, Segtabzero, Segtabzeropa, + printf("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) " + "tbl %p atr %p\n", + s, page_cnt, Segtabzero, Segtabzeropa, pv_table, pmap_attributes); #endif +#if defined(MACHINE_NEW_NONCONTIG) + /* + * Now that the pv and attribute tables have been allocated, + * assign them to the memory segments. + */ + pv = pv_table; + attr = pmap_attributes; + for (bank = 0; bank < vm_nphysseg; bank++) { + npages = vm_physmem[bank].end - vm_physmem[bank].start; + vm_physmem[bank].pmseg.pvent = pv; + vm_physmem[bank].pmseg.attrs = attr; + pv += npages; + attr += npages; + } +#endif + /* * Allocate physical memory for kernel PT pages and their management. * We need 1 PT page per possible task plus some slop. @@ -442,17 +556,35 @@ bogons: * Verify that space will be allocated in region for which * we already have kernel PT pages. */ +#if defined(UVM) + addr = 0; + rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, + UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, + UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)); + if (rv != KERN_SUCCESS || (addr + s) >= (vm_offset_t)Sysmap) + panic("pmap_init: kernel PT too small"); + rv = uvm_unmap(kernel_map, addr, addr + s, FALSE); + if (rv != KERN_SUCCESS) + panic("pmap_init: uvm_unmap failed"); +#else addr = 0; rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE); if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap) panic("pmap_init: kernel PT too small"); vm_map_remove(kernel_map, addr, addr + s); +#endif /* * Now allocate the space and link the pages together to * form the KPT free list. */ +#if defined(UVM) + addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s); + if (addr == 0) + panic("pmap_init: cannot allocate KPT free list"); +#else addr = (vm_offset_t) kmem_alloc(kernel_map, s); +#endif s = ptoa(npages); addr2 = addr + s; kpt_pages = &((struct kpt_page *)addr2)[npages]; @@ -469,10 +601,33 @@ bogons: #endif #ifdef DEBUG if (pmapdebug & PDB_INIT) - printf("pmap_init: KPT: %d pages from %x to %x\n", + printf("pmap_init: KPT: %ld pages from %lx to %lx\n", atop(s), addr, addr + s); #endif +#if defined(UVM) + /* + * Allocate the segment table map and the page table map + */ + s = maxproc + HP_STSIZE; + st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, TRUE, + FALSE, &st_map_store); + + addr = HP_PTBASE; + if ((HP_PTMAXSIZE / HP_MAX_PTSIZE) < maxproc) { + s = HP_PTMAXSIZE; + /* + * XXX We don't want to hang when we run out of + * page tables, so we lower maxproc so that fork() + * will fail instead. Note that root could still raise + * this value via sysctl(3). + */ + maxproc = (HP_PTMAXSIZE / HP_MAX_PTSIZE); + } else + s = (maxproc * HP_MAX_PTSIZE); + pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, TRUE, + TRUE, &pt_map_store); +#else /* * Allocate the segment table map */ @@ -508,8 +663,9 @@ bogons: panic("pmap_init: cannot map range to pt_map"); #ifdef DEBUG if (pmapdebug & PDB_INIT) - printf("pmap_init: pt_map [%x - %x)\n", addr, addr2); + printf("pmap_init: pt_map [%lx - %lx)\n", addr, addr2); #endif +#endif /* ! UVM */ #if defined(M68040) if (mmutype == MMU_68040) { @@ -522,89 +678,13 @@ bogons: /* * Now it is safe to enable pv_table recording. */ -#ifdef MACHINE_NONCONTIG - vm_first_phys = avail_start; - vm_last_phys = avail_end; -#else +#if !defined(MACHINE_NEW_NONCONTIG) vm_first_phys = phys_start; vm_last_phys = phys_end; #endif pmap_initialized = TRUE; } -#ifdef MACHINE_NONCONTIG -unsigned int -pmap_free_pages() -{ - return avail_remaining; -} - -int -pmap_next_page(addrp) - vm_offset_t *addrp; -{ - static int cur_seg = 0; -#ifdef DEBUG - static int foo = 0; - if ( foo == 0 && pmapdebug & PDB_INIT ) { - int i; - for (i = 0; phys_seg_list[i].ps_start; i++) { - printf("pmap_next_page: Seg%d.start 0x%08lx, end 0x%08lx, page %d\n", - i, phys_seg_list[i].ps_start, phys_seg_list[i].ps_end, - phys_seg_list[i].ps_startpage); - } - foo = 1; - } -#endif - - if (phys_seg_list[cur_seg].ps_start == 0) - return FALSE; - - if (avail_next == phys_seg_list[cur_seg].ps_end) { - if ( ++cur_seg >= MAX_PHYS_SEGS ) - return FALSE; - avail_next = phys_seg_list[cur_seg].ps_start; -#ifdef DEBUG - if (pmapdebug & PDB_INIT) - printf("pmap_next_page: next %lx remain %ld\n", - avail_next, avail_remaining); -#endif - } - - if (avail_next == 0) - return FALSE; - *addrp = avail_next; - avail_next += NBPG; - avail_remaining--; - return TRUE; -} - -int -pmap_page_index(pa) - vm_offset_t pa; -{ - struct phys_seg_list_t *s = &phys_seg_list[0]; - - while (s->ps_start) { - if (pa >= s->ps_start && pa < s->ps_end) - return (m68k_btop(pa - s->ps_start) + s->ps_startpage); - ++s; - } - return -1; -} - -void -pmap_virtual_space(startp, endp) - vm_offset_t *startp; - vm_offset_t *endp; -{ - *startp = virtual_avail; - *endp = virtual_end; -} -#else -#define pmap_page_index(pa) (pa_index(pa)) -#endif /* MACHINE_NONCONTIG */ - struct pv_entry * pmap_alloc_pv() { @@ -613,9 +693,15 @@ pmap_alloc_pv() int i; if (pv_nfree == 0) { +#if defined(UVM) + pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG); + if (pvp == 0) + panic("pmap_alloc_pv: uvm_km_zalloc() failed"); +#else pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG); if (pvp == 0) panic("pmap_alloc_pv: kmem_alloc() failed"); +#endif pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1]; for (i = NPVPPG - 2; i; i--, pv++) pv->pv_next = pv + 1; @@ -643,8 +729,7 @@ void pmap_free_pv(pv) struct pv_entry *pv; { - register struct pv_page *pvp; - register int i; + struct pv_page *pvp; pvp = (struct pv_page *) trunc_page(pv); switch (++pvp->pvp_pgi.pgi_nfree) { @@ -658,7 +743,11 @@ pmap_free_pv(pv) case NPVPPG: pv_nfree -= NPVPPG - 1; TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); +#if defined(UVM) + uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG); +#else kmem_free(kernel_map, (vm_offset_t)pvp, NBPG); +#endif break; } } @@ -688,7 +777,7 @@ pmap_collect_pv() if (pv_page_collectlist.tqh_first == 0) return; - for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) { + for (ph = &pv_table[page_cnt - 1]; ph >= &pv_table[0]; ph--) { if (ph->pv_pmap == 0) continue; s = splimp(); @@ -716,7 +805,11 @@ pmap_collect_pv() for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) { npvp = pvp->pvp_pgi.pgi_list.tqe_next; +#if defined(UVM) + uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG); +#else kmem_free(kernel_map, (vm_offset_t)pvp, NBPG); +#endif } } @@ -735,7 +828,7 @@ pmap_map(va, spa, epa, prot) #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_map(%x, %x, %x, %x)\n", va, spa, epa, prot); + printf("pmap_map(%lx, %lx, %lx, %x)\n", va, spa, epa, prot); #endif while (spa < epa) { @@ -762,11 +855,11 @@ pmap_t pmap_create(size) vm_size_t size; { - register pmap_t pmap; + pmap_t pmap; #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) - printf("pmap_create(%x)\n", size); + printf("pmap_create(%lx)\n", size); #endif /* @@ -792,12 +885,12 @@ pmap_create(size) */ void pmap_pinit(pmap) - register struct pmap *pmap; + struct pmap *pmap; { #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) - printf("pmap_pinit(%x)\n", pmap); + printf("pmap_pinit(%p)\n", pmap); #endif /* @@ -823,7 +916,7 @@ pmap_pinit(pmap) */ void pmap_destroy(pmap) - register pmap_t pmap; + pmap_t pmap; { int count; @@ -832,7 +925,7 @@ pmap_destroy(pmap) #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_destroy(%x)\n", pmap); + printf("pmap_destroy(%p)\n", pmap); #endif simple_lock(&pmap->pm_lock); @@ -851,12 +944,12 @@ pmap_destroy(pmap) */ void pmap_release(pmap) - register struct pmap *pmap; + struct pmap *pmap; { #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_release(%x)\n", pmap); + printf("pmap_release(%p)\n", pmap); #endif #ifdef notdef /* DIAGNOSTIC */ @@ -867,11 +960,21 @@ pmap_release(pmap) #endif if (pmap->pm_ptab) +#if defined(UVM) + uvm_km_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab, + HP_MAX_PTSIZE); +#else kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab, HP_MAX_PTSIZE); +#endif if (pmap->pm_stab != Segtabzero) +#if defined(UVM) + uvm_km_free_wakeup(st_map, (vm_offset_t)pmap->pm_stab, + HP_STSIZE); +#else kmem_free_wakeup(st_map, (vm_offset_t)pmap->pm_stab, HP_STSIZE); +#endif } /* @@ -887,7 +990,7 @@ pmap_reference(pmap) #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_reference(%x)\n", pmap); + printf("pmap_reference(%p)\n", pmap); #endif simple_lock(&pmap->pm_lock); @@ -929,17 +1032,17 @@ pmap_deactivate(p) */ void pmap_remove(pmap, sva, eva) - register pmap_t pmap; - register vm_offset_t sva, eva; + pmap_t pmap; + vm_offset_t sva, eva; { - register vm_offset_t nssva; - register pt_entry_t *pte; + vm_offset_t nssva; + pt_entry_t *pte; boolean_t firstpage, needcflush; int flags; #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) - printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva); + printf("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva); #endif if (pmap == NULL) @@ -1013,8 +1116,7 @@ pmap_remove(pmap, sva, eva) * 2. if it is a user mapping not for the current process, * it won't be there */ - if (pmap_aliasmask && - (pmap == pmap_kernel() || pmap != curproc->p_vmspace->vm_map.pmap)) + if (pmap_aliasmask && !active_user_pmap(pmap)) needcflush = FALSE; #ifdef DEBUG if (pmap_aliasmask && (pmapvacflush & PVF_REMOVE)) { @@ -1052,15 +1154,15 @@ pmap_page_protect(pa, prot) vm_offset_t pa; vm_prot_t prot; { - register struct pv_entry *pv; + struct pv_entry *pv; int s; #ifdef DEBUG if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || - prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)) - printf("pmap_page_protect(%x, %x)\n", pa, prot); + (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) + printf("pmap_page_protect(%lx, %x)\n", pa, prot); #endif - if (pa < vm_first_phys || pa >= vm_last_phys) + if (PAGE_IS_MANAGED(pa) == 0) return; switch (prot) { @@ -1079,7 +1181,7 @@ pmap_page_protect(pa, prot) pv = pa_to_pvh(pa); s = splimp(); while (pv->pv_pmap != NULL) { - register pt_entry_t *pte; + pt_entry_t *pte; pte = pmap_pte(pv->pv_pmap, pv->pv_va); #ifdef DEBUG @@ -1094,7 +1196,7 @@ pmap_page_protect(pa, prot) pv = pv->pv_next; #ifdef DEBUG if (pmapdebug & PDB_PARANOIA) - printf("%s wired mapping for %x not removed\n", + printf("%s wired mapping for %lx not removed\n", "pmap_page_protect:", pa); #endif if (pv == NULL) @@ -1110,18 +1212,18 @@ pmap_page_protect(pa, prot) */ void pmap_protect(pmap, sva, eva, prot) - register pmap_t pmap; - register vm_offset_t sva, eva; + pmap_t pmap; + vm_offset_t sva, eva; vm_prot_t prot; { - register vm_offset_t nssva; - register pt_entry_t *pte; + vm_offset_t nssva; + pt_entry_t *pte; boolean_t firstpage, needtflush; int isro; #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) - printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot); + printf("pmap_protect(%p, %lx, %lx, %x)\n", pmap, sva, eva, prot); #endif if (pmap == NULL) @@ -1229,21 +1331,21 @@ pmap_protect(pmap, sva, eva, prot) */ void pmap_enter(pmap, va, pa, prot, wired) - register pmap_t pmap; + pmap_t pmap; vm_offset_t va; - register vm_offset_t pa; + vm_offset_t pa; vm_prot_t prot; boolean_t wired; { - register pt_entry_t *pte; - register int npte; + pt_entry_t *pte; + int npte; vm_offset_t opa; boolean_t cacheable = TRUE; boolean_t checkpv = TRUE; #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) - printf("pmap_enter(%x, %x, %x, %x, %x)\n", + printf("pmap_enter(%p, %lx, %lx, %x, %x)\n", pmap, va, pa, prot, wired); #endif if (pmap == NULL) @@ -1259,8 +1361,13 @@ pmap_enter(pmap, va, pa, prot, wired) * For user mapping, allocate kernel VM resources if necessary. */ if (pmap->pm_ptab == NULL) +#if defined(UVM) + pmap->pm_ptab = (pt_entry_t *) + uvm_km_zalloc(pt_map, HP_MAX_PTSIZE); +#else pmap->pm_ptab = (pt_entry_t *) kmem_alloc_wait(pt_map, HP_MAX_PTSIZE); +#endif /* * Segment table entry not valid, we need a new PT page @@ -1273,7 +1380,7 @@ pmap_enter(pmap, va, pa, prot, wired) opa = pmap_pte_pa(pte); #ifdef DEBUG if (pmapdebug & PDB_ENTER) - printf("enter: pte %x, *pte %x\n", pte, *pte); + printf("enter: pte %p, *pte %x\n", pte, *pte); #endif /* @@ -1325,7 +1432,7 @@ pmap_enter(pmap, va, pa, prot, wired) if (opa) { #ifdef DEBUG if (pmapdebug & PDB_ENTER) - printf("enter: removing old mapping %x\n", va); + printf("enter: removing old mapping %lx\n", va); #endif pmap_remove_mapping(pmap, va, pte, PRM_TFLUSH|PRM_CFLUSH); #ifdef PMAPSTATS @@ -1339,16 +1446,21 @@ pmap_enter(pmap, va, pa, prot, wired) * is a valid mapping in the page. */ if (pmap != pmap_kernel()) +#if defined(UVM) + (void) uvm_map_pageable(pt_map, trunc_page(pte), + round_page(pte+1), FALSE); +#else (void) vm_map_pageable(pt_map, trunc_page(pte), round_page(pte+1), FALSE); +#endif /* * Enter on the PV list if part of our managed memory * Note that we raise IPL while manipulating pv_table * since pmap_enter can be called at interrupt time. */ - if (pa >= vm_first_phys && pa < vm_last_phys) { - register struct pv_entry *pv, *npv; + if (PAGE_IS_MANAGED(pa)) { + struct pv_entry *pv, *npv; int s; #ifdef PMAPSTATS @@ -1358,7 +1470,7 @@ pmap_enter(pmap, va, pa, prot, wired) s = splimp(); #ifdef DEBUG if (pmapdebug & PDB_ENTER) - printf("enter: pv at %x: %x/%x/%x\n", + printf("enter: pv at %p: %lx/%p/%p\n", pv, pv->pv_va, pv->pv_pmap, pv->pv_next); #endif /* @@ -1424,7 +1536,7 @@ pmap_enter(pmap, va, pa, prot, wired) if (pv->pv_flags & PV_CI) { #ifdef DEBUG if (pmapdebug & PDB_CACHE) - printf("enter: pa %x already CI'ed\n", + printf("enter: pa %lx already CI'ed\n", pa); #endif checkpv = cacheable = FALSE; @@ -1436,7 +1548,7 @@ pmap_enter(pmap, va, pa, prot, wired) (va & pmap_aliasmask)))) { #ifdef DEBUG if (pmapdebug & PDB_CACHE) - printf("enter: pa %x CI'ing all\n", + printf("enter: pa %lx CI'ing all\n", pa); #endif cacheable = FALSE; @@ -1556,15 +1668,15 @@ validate: */ void pmap_change_wiring(pmap, va, wired) - register pmap_t pmap; + pmap_t pmap; vm_offset_t va; boolean_t wired; { - register pt_entry_t *pte; + pt_entry_t *pte; #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired); + printf("pmap_change_wiring(%p, %lx, %x)\n", pmap, va, wired); #endif if (pmap == NULL) return; @@ -1578,7 +1690,7 @@ pmap_change_wiring(pmap, va, wired) */ if (!pmap_ste_v(pmap, va)) { if (pmapdebug & PDB_PARANOIA) - printf("pmap_change_wiring: invalid STE for %x\n", va); + printf("pmap_change_wiring: invalid STE for %lx\n", va); return; } /* @@ -1587,7 +1699,7 @@ pmap_change_wiring(pmap, va, wired) */ if (!pmap_pte_v(pte)) { if (pmapdebug & PDB_PARANOIA) - printf("pmap_change_wiring: invalid PTE for %x\n", va); + printf("pmap_change_wiring: invalid PTE for %lx\n", va); } #endif /* @@ -1613,14 +1725,14 @@ pmap_change_wiring(pmap, va, wired) vm_offset_t pmap_extract(pmap, va) - register pmap_t pmap; + pmap_t pmap; vm_offset_t va; { - register vm_offset_t pa; + vm_offset_t pa; #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_extract(%x, %x) -> ", pmap, va); + printf("pmap_extract(%p, %lx) -> ", pmap, va); #endif pa = 0; if (pmap && pmap_ste_v(pmap, va)) @@ -1629,7 +1741,7 @@ pmap_extract(pmap, va) pa = (pa & PG_FRAME) | (va & ~PG_FRAME); #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("%x\n", pa); + printf("%lx\n", pa); #endif return(pa); } @@ -1650,7 +1762,7 @@ void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) { #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_copy(%x, %x, %x, %x, %x)\n", + printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n", dst_pmap, src_pmap, dst_addr, len, src_addr); #endif } @@ -1687,29 +1799,61 @@ void pmap_collect(pmap) pmap_t pmap; { - register vm_offset_t pa; - register struct pv_entry *pv; - register pt_entry_t *pte; - vm_offset_t kpa; +#if defined(MACHINE_NEW_NONCONTIG) + int bank, s; +#else int s; +#endif /* MACHINE_NEW_NONCONTIG */ -#ifdef DEBUG - st_entry_t *ste; - int opmapdebug; -#endif if (pmap != pmap_kernel()) return; #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_collect(%x)\n", pmap); + printf("pmap_collect(%p)\n", pmap); #endif #ifdef PMAPSTATS kpt_stats.collectscans++; #endif s = splimp(); - for (pa = vm_first_phys; pa < vm_last_phys; pa += NBPG) { - register struct kpt_page *kpt, **pkpt; +#if defined(MACHINE_NEW_NONCONTIG) + for (bank = 0; bank < vm_nphysseg; bank++) + pmap_collect1(pmap, ptoa(vm_physmem[bank].start), + ptoa(vm_physmem[bank].end)); +#else + pmap_collect1(pmap, vm_first_phys, vm_last_phys); +#endif + splx(s); + +#ifdef notyet + /* Go compact and garbage-collect the pv_table */ + pmap_collect_pv(); +#endif +} + +/* + * Routine: pmap_collect1() + * + * Function: + * Helper function for pmap_collect(). Do the actual + * garbage-collection of range of physical addresses. + */ +void +pmap_collect1(pmap, startpa, endpa) + pmap_t pmap; + vm_offset_t startpa, endpa; +{ + vm_offset_t pa; + struct pv_entry *pv; + pt_entry_t *pte; + vm_offset_t kpa; +#ifdef DEBUG + st_entry_t *ste; + int opmapdebug = 0 /* XXX initialize to quiet gcc -Wall */; +#endif + + for (pa = startpa; pa < endpa; pa += NBPG) { + struct kpt_page *kpt, **pkpt; /* * Locate physical pages which are being used as kernel @@ -1721,7 +1865,7 @@ pmap_collect(pmap) do { if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel()) break; - } while (pv = pv->pv_next); + } while ((pv = pv->pv_next)); if (pv == NULL) continue; #ifdef DEBUG @@ -1742,7 +1886,7 @@ ok: #ifdef DEBUG if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) { - printf("collect: freeing KPT page at %x (ste %x@%x)\n", + printf("collect: freeing KPT page at %lx (ste %x@%p)\n", pv->pv_va, *pv->pv_ptste, pv->pv_ptste); opmapdebug = pmapdebug; pmapdebug |= PDB_PTPAGE; @@ -1772,7 +1916,7 @@ ok: if (kpt == (struct kpt_page *)0) panic("pmap_collect: lost a KPT page"); if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) - printf("collect: %x (%x) to free list\n", + printf("collect: %lx (%lx) to free list\n", kpt->kpt_va, kpa); #endif *pkpt = kpt->kpt_next; @@ -1787,15 +1931,14 @@ ok: pmapdebug = opmapdebug; if (*ste != SG_NV) - printf("collect: kernel STE at %x still valid (%x)\n", + printf("collect: kernel STE at %p still valid (%x)\n", ste, *ste); ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)]; if (*ste != SG_NV) - printf("collect: kernel PTmap at %x still valid (%x)\n", + printf("collect: kernel PTmap at %p still valid (%x)\n", ste, *ste); #endif } - splx(s); } /* @@ -1814,12 +1957,12 @@ void pmap_zero_page(phys) vm_offset_t phys; { - register vm_offset_t kva; + vm_offset_t kva; extern caddr_t CADDR1; #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_zero_page(%x)\n", phys); + printf("pmap_zero_page(%lx)\n", phys); #endif kva = (vm_offset_t) CADDR1; pmap_enter(pmap_kernel(), kva, phys, VM_PROT_READ|VM_PROT_WRITE, TRUE); @@ -1845,12 +1988,12 @@ void pmap_copy_page(src, dst) vm_offset_t src, dst; { - register vm_offset_t skva, dkva; + vm_offset_t skva, dkva; extern caddr_t CADDR1, CADDR2; #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_copy_page(%x, %x)\n", src, dst); + printf("pmap_copy_page(%lx, %lx)\n", src, dst); #endif skva = (vm_offset_t) CADDR1; dkva = (vm_offset_t) CADDR2; @@ -1883,7 +2026,7 @@ pmap_pageable(pmap, sva, eva, pageable) { #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_pageable(%x, %x, %x, %x)\n", + printf("pmap_pageable(%p, %lx, %lx, %lx)\n", pmap, sva, eva, pageable); #endif /* @@ -1895,25 +2038,25 @@ pmap_pageable(pmap, sva, eva, pageable) * - PT pages have only one pv_table entry */ if (pmap == pmap_kernel() && pageable && sva + NBPG == eva) { - register struct pv_entry *pv; - register vm_offset_t pa; + struct pv_entry *pv; + vm_offset_t pa; #ifdef DEBUG if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE) - printf("pmap_pageable(%x, %x, %x, %x)\n", + printf("pmap_pageable(%p, %lx, %lx, %x)\n", pmap, sva, eva, pageable); #endif if (!pmap_ste_v(pmap, sva)) return; pa = pmap_pte_pa(pmap_pte(pmap, sva)); - if (pa < vm_first_phys || pa >= vm_last_phys) + if (PAGE_IS_MANAGED(pa) == 0) return; pv = pa_to_pvh(pa); if (pv->pv_ptste == NULL) return; #ifdef DEBUG if (pv->pv_va != sva || pv->pv_next) { - printf("pmap_pageable: bad PT page va %x next %x\n", + printf("pmap_pageable: bad PT page va %lx next %p\n", pv->pv_va, pv->pv_next); return; } @@ -1924,12 +2067,12 @@ pmap_pageable(pmap, sva, eva, pageable) pmap_changebit(pa, PG_M, FALSE); #ifdef DEBUG if ((PHYS_TO_VM_PAGE(pa)->flags & PG_CLEAN) == 0) { - printf("pa %x: flags=%x: not clean\n", + printf("pa %lx: flags=%x: not clean\n", pa, PHYS_TO_VM_PAGE(pa)->flags); PHYS_TO_VM_PAGE(pa)->flags |= PG_CLEAN; } if (pmapdebug & PDB_PTPAGE) - printf("pmap_pageable: PT page %x(%x) unmodified\n", + printf("pmap_pageable: PT page %lx(%x) unmodified\n", sva, *pmap_pte(pmap, sva)); if (pmapdebug & PDB_WIRING) pmap_check_wiring("pageable", sva); @@ -1947,7 +2090,7 @@ pmap_clear_modify(pa) { #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_clear_modify(%x)\n", pa); + printf("pmap_clear_modify(%lx)\n", pa); #endif pmap_changebit(pa, PG_M, FALSE); } @@ -1963,7 +2106,7 @@ void pmap_clear_reference(pa) { #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) - printf("pmap_clear_reference(%x)\n", pa); + printf("pmap_clear_reference(%lx)\n", pa); #endif pmap_changebit(pa, PG_U, FALSE); } @@ -1982,7 +2125,7 @@ pmap_is_referenced(pa) #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) { boolean_t rv = pmap_testbit(pa, PG_U); - printf("pmap_is_referenced(%x) -> %c\n", pa, "FT"[rv]); + printf("pmap_is_referenced(%lx) -> %c\n", pa, "FT"[rv]); return(rv); } #endif @@ -2003,7 +2146,7 @@ pmap_is_modified(pa) #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) { boolean_t rv = pmap_testbit(pa, PG_M); - printf("pmap_is_modified(%x) -> %c\n", pa, "FT"[rv]); + printf("pmap_is_modified(%lx) -> %c\n", pa, "FT"[rv]); return(rv); } #endif @@ -2026,6 +2169,7 @@ pmap_phys_address(ppn) * We implement this at the segment table level, the machine independent * VM knows nothing about it. */ +int pmap_mapmulti(pmap, va) pmap_t pmap; vm_offset_t va; @@ -2035,10 +2179,10 @@ pmap_mapmulti(pmap, va) #ifdef DEBUG if (pmapdebug & PDB_MULTIMAP) { ste = pmap_ste(pmap, HPMMBASEADDR(va)); - printf("pmap_mapmulti(%x, %x): bste %x(%x)", + printf("pmap_mapmulti(%p, %lx): bste %p(%x)", pmap, va, ste, *ste); ste = pmap_ste(pmap, va); - printf(" ste %x(%x)\n", ste, *ste); + printf(" ste %p(%x)\n", ste, *ste); } #endif bste = pmap_ste(pmap, HPMMBASEADDR(va)); @@ -2065,13 +2209,13 @@ pmap_mapmulti(pmap, va) /* static */ void pmap_remove_mapping(pmap, va, pte, flags) - register pmap_t pmap; - register vm_offset_t va; - register pt_entry_t *pte; + pmap_t pmap; + vm_offset_t va; + pt_entry_t *pte; int flags; { - register vm_offset_t pa; - register struct pv_entry *pv, *npv; + vm_offset_t pa; + struct pv_entry *pv, *npv; pmap_t ptpmap; st_entry_t *ste; int s, bits; @@ -2079,7 +2223,7 @@ pmap_remove_mapping(pmap, va, pte, flags) pt_entry_t opte; if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) - printf("pmap_remove_mapping(%x, %x, %x, %x)\n", + printf("pmap_remove_mapping(%p, %lx, %p, %x)\n", pmap, va, pte, flags); #endif @@ -2106,8 +2250,7 @@ pmap_remove_mapping(pmap, va, pte, flags) * flush the VAC. Note that the kernel side was flushed * above so we don't worry about non-CI kernel mappings. */ - if (pmap == curproc->p_vmspace->vm_map.pmap && - !pmap_pte_ci(pte)) { + if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) { DCIU(); #ifdef PMAPSTATS remove_stats.uflushes++; @@ -2134,7 +2277,7 @@ pmap_remove_mapping(pmap, va, pte, flags) */ #ifdef DEBUG if (pmapdebug & PDB_REMOVE) - printf("remove: invalidating pte at %x\n", pte); + printf("remove: invalidating pte at %p\n", pte); #endif bits = *pte & (PG_U|PG_M); *pte = PG_NV; @@ -2148,8 +2291,13 @@ pmap_remove_mapping(pmap, va, pte, flags) * PT page. */ if (pmap != pmap_kernel()) { +#if defined(UVM) + (void) uvm_map_pageable(pt_map, trunc_page(pte), + round_page(pte+1), TRUE); +#else (void) vm_map_pageable(pt_map, trunc_page(pte), round_page(pte+1), TRUE); +#endif #ifdef DEBUG if (pmapdebug & PDB_WIRING) pmap_check_wiring("remove", trunc_page(pte)); @@ -2158,7 +2306,7 @@ pmap_remove_mapping(pmap, va, pte, flags) /* * If this isn't a managed page, we are all done. */ - if (pa < vm_first_phys || pa >= vm_last_phys) + if (PAGE_IS_MANAGED(pa) == 0) return; /* * Otherwise remove it from the PV table @@ -2213,7 +2361,7 @@ pmap_remove_mapping(pmap, va, pte, flags) pv->pv_pmap && pv->pv_next == NULL && (pv->pv_flags & PV_CI)) { #ifdef DEBUG if (pmapdebug & PDB_CACHE) - printf("remove: clearing CI for pa %x\n", pa); + printf("remove: clearing CI for pa %lx\n", pa); #endif pv->pv_flags &= ~PV_CI; pmap_changebit(pa, PG_CI, FALSE); @@ -2234,7 +2382,7 @@ pmap_remove_mapping(pmap, va, pte, flags) #endif #ifdef DEBUG if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE)) - printf("remove: ste was %x@%x pte was %x@%x\n", + printf("remove: ste was %x@%p pte was %x@%p\n", *ste, ste, opte, pmap_pte(pmap, va)); #endif #if defined(M68040) @@ -2257,7 +2405,7 @@ pmap_remove_mapping(pmap, va, pte, flags) if (ptpmap != pmap_kernel()) { #ifdef DEBUG if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB)) - printf("remove: stab %x, refcnt %d\n", + printf("remove: stab %p, refcnt %d\n", ptpmap->pm_stab, ptpmap->pm_sref - 1); if ((pmapdebug & PDB_PARANOIA) && ptpmap->pm_stab != (st_entry_t *)trunc_page(ste)) @@ -2266,12 +2414,18 @@ pmap_remove_mapping(pmap, va, pte, flags) if (--(ptpmap->pm_sref) == 0) { #ifdef DEBUG if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB)) - printf("remove: free stab %x\n", + printf("remove: free stab %p\n", ptpmap->pm_stab); #endif +#if defined(UVM) + uvm_km_free_wakeup(st_map, + (vm_offset_t)ptpmap->pm_stab, + HP_STSIZE); +#else kmem_free_wakeup(st_map, (vm_offset_t)ptpmap->pm_stab, HP_STSIZE); +#endif ptpmap->pm_stab = Segtabzero; ptpmap->pm_stpa = Segtabzeropa; #if defined(M68040) @@ -2307,21 +2461,21 @@ pmap_remove_mapping(pmap, va, pte, flags) /* * Update saved attributes for managed page */ - pmap_attributes[pmap_page_index(pa)] |= bits; + *pa_to_attribute(pa) |= bits; splx(s); } /* static */ boolean_t pmap_testbit(pa, bit) - register vm_offset_t pa; + vm_offset_t pa; int bit; { - register struct pv_entry *pv; - register pt_entry_t *pte; + struct pv_entry *pv; + pt_entry_t *pte; int s; - if (pa < vm_first_phys || pa >= vm_last_phys) + if (PAGE_IS_MANAGED(pa) == 0) return(FALSE); pv = pa_to_pvh(pa); @@ -2329,7 +2483,7 @@ pmap_testbit(pa, bit) /* * Check saved info first */ - if (pmap_attributes[pmap_page_index(pa)] & bit) { + if (*pa_to_attribute(pa) & bit) { splx(s); return(TRUE); } @@ -2360,25 +2514,27 @@ pmap_testbit(pa, bit) /* static */ void pmap_changebit(pa, bit, setem) - register vm_offset_t pa; + vm_offset_t pa; int bit; boolean_t setem; { - register struct pv_entry *pv; - register pt_entry_t *pte, npte; + struct pv_entry *pv; + pt_entry_t *pte, npte; vm_offset_t va; int s; +#if defined(HAVEVAC) || defined(M68040) boolean_t firstpage = TRUE; +#endif #ifdef PMAPSTATS struct chgstats *chgp; #endif #ifdef DEBUG if (pmapdebug & PDB_BITS) - printf("pmap_changebit(%x, %x, %s)\n", + printf("pmap_changebit(%lx, %x, %s)\n", pa, bit, setem ? "set" : "clear"); #endif - if (pa < vm_first_phys || pa >= vm_last_phys) + if (PAGE_IS_MANAGED(pa) == 0) return; #ifdef PMAPSTATS @@ -2394,7 +2550,7 @@ pmap_changebit(pa, bit, setem) * Clear saved attributes (modify, reference) */ if (!setem) - pmap_attributes[pmap_page_index(pa)] &= ~bit; + *pa_to_attribute(pa) &= ~bit; /* * Loop over all current mappings setting/clearing as appropos * If setting RO do we need to clear the VAC? @@ -2413,10 +2569,15 @@ pmap_changebit(pa, bit, setem) * XXX don't write protect pager mappings */ if (bit == PG_RO) { +#if defined(UVM) + if (va >= uvm.pager_sva && va < uvm.pager_eva) + continue; +#else extern vm_offset_t pager_sva, pager_eva; if (va >= pager_sva && va < pager_eva) continue; +#endif } pte = pmap_pte(pv->pv_pmap, va); @@ -2485,17 +2646,17 @@ pmap_changebit(pa, bit, setem) /* static */ void pmap_enter_ptpage(pmap, va) - register pmap_t pmap; - register vm_offset_t va; + pmap_t pmap; + vm_offset_t va; { - register vm_offset_t ptpa; - register struct pv_entry *pv; + vm_offset_t ptpa; + struct pv_entry *pv; st_entry_t *ste; int s; #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE)) - printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va); + printf("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va); #endif #ifdef PMAPSTATS enter_stats.ptpneeded++; @@ -2508,8 +2669,13 @@ pmap_enter_ptpage(pmap, va) * reference count drops to zero. */ if (pmap->pm_stab == Segtabzero) { +#if defined(UVM) + pmap->pm_stab = (st_entry_t *) + uvm_km_zalloc(st_map, HP_STSIZE); +#else pmap->pm_stab = (st_entry_t *) kmem_alloc(st_map, HP_STSIZE); +#endif pmap->pm_stpa = (st_entry_t *) pmap_extract(pmap_kernel(), (vm_offset_t)pmap->pm_stab); #if defined(M68040) @@ -2529,7 +2695,7 @@ pmap_enter_ptpage(pmap, va) PMAP_ACTIVATE(pmap, 1); #ifdef DEBUG if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB)) - printf("enter: pmap %x stab %x(%x)\n", + printf("enter: pmap %p stab %p(%p)\n", pmap, pmap->pm_stab, pmap->pm_stpa); #endif } @@ -2554,7 +2720,7 @@ pmap_enter_ptpage(pmap, va) *ste = (u_int)addr | SG_RW | SG_U | SG_V; #ifdef DEBUG if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB)) - printf("enter: alloc ste2 %d(%x)\n", ix, addr); + printf("enter: alloc ste2 %d(%p)\n", ix, addr); #endif } ste = pmap_ste2(pmap, va); @@ -2569,7 +2735,7 @@ pmap_enter_ptpage(pmap, va) ste = (st_entry_t *)((int)ste & ~(NBPG/SG4_LEV3SIZE-1)); #ifdef DEBUG if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB)) - printf("enter: ste2 %x (%x)\n", + printf("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste); #endif } @@ -2582,7 +2748,7 @@ pmap_enter_ptpage(pmap, va) * pmap_enter). */ if (pmap == pmap_kernel()) { - register struct kpt_page *kpt; + struct kpt_page *kpt; s = splimp(); if ((kpt = kpt_free_list) == (struct kpt_page *)0) { @@ -2612,7 +2778,7 @@ pmap_enter_ptpage(pmap, va) if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) { int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0); - printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n", + printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n", ix, Sysptmap[ix], kpt->kpt_va); } #endif @@ -2623,15 +2789,29 @@ pmap_enter_ptpage(pmap, va) * letting the VM system allocate a zero-filled page. */ else { + /* + * Count the segment table reference now so that we won't + * lose the segment table when low on memory. + */ + pmap->pm_sref++; #ifdef DEBUG if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) - printf("enter: about to fault UPT pg at %x\n", va); + printf("enter: about to fault UPT pg at %lx\n", va); #endif +#if defined(UVM) + s = uvm_fault(pt_map, va, 0, VM_PROT_READ|VM_PROT_WRITE); + if (s != KERN_SUCCESS) { + printf("uvm_fault(pt_map, 0x%lx, 0, RW) -> %d\n", + va, s); + panic("pmap_enter: uvm_fault failed"); + } +#else s = vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE); if (s != KERN_SUCCESS) { - printf("vm_fault(pt_map, %x, RW, 0) -> %d\n", va, s); + printf("vm_fault(pt_map, %lx, RW, 0) -> %d\n", va, s); panic("pmap_enter: vm_fault failed"); } +#endif ptpa = pmap_extract(pmap_kernel(), va); /* * Mark the page clean now to avoid its pageout (and @@ -2639,7 +2819,7 @@ pmap_enter_ptpage(pmap, va) * is wired; i.e. while it is on a paging queue. */ PHYS_TO_VM_PAGE(ptpa)->flags |= PG_CLEAN; -#ifdef DEBUG +#if defined(DEBUG) && !defined(UVM) PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE; #endif } @@ -2655,7 +2835,7 @@ pmap_enter_ptpage(pmap, va) pt_entry_t *pte = pmap_pte(pmap_kernel(), va); #ifdef DEBUG if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0) - printf("%s PT no CCB: kva=%x ptpa=%x pte@%x=%x\n", + printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n", pmap == pmap_kernel() ? "Kernel" : "User", va, ptpa, pte, *pte); #endif @@ -2684,7 +2864,7 @@ pmap_enter_ptpage(pmap, va) pv->pv_ptpmap = pmap; #ifdef DEBUG if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) - printf("enter: new PT page at PA %x, ste at %x\n", ptpa, ste); + printf("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste); #endif /* @@ -2707,10 +2887,9 @@ pmap_enter_ptpage(pmap, va) #endif *ste = (ptpa & SG_FRAME) | SG_RW | SG_V; if (pmap != pmap_kernel()) { - pmap->pm_sref++; #ifdef DEBUG if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB)) - printf("enter: stab %x refcnt %d\n", + printf("enter: stab %p refcnt %d\n", pmap->pm_stab, pmap->pm_sref); #endif } @@ -2733,11 +2912,11 @@ void pmap_pvdump(pa) vm_offset_t pa; { - register struct pv_entry *pv; + struct pv_entry *pv; - printf("pa %x", pa); + printf("pa %lx", pa); for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) - printf(" -> pmap %x, va %x, ptste %x, ptpmap %x, flags %x", + printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p, flags %x", pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap, pv->pv_flags); printf("\n"); @@ -2750,24 +2929,31 @@ pmap_check_wiring(str, va) vm_offset_t va; { vm_map_entry_t entry; - register int count; - register pt_entry_t *pte; + int count; + pt_entry_t *pte; va = trunc_page(va); if (!pmap_ste_v(pmap_kernel(), va) || !pmap_pte_v(pmap_pte(pmap_kernel(), va))) return; - if (!vm_map_lookup_entry(pt_map, va, &entry)) { - printf("wired_check: entry for %x not found\n", va); +#if defined(UVM) + if (!uvm_map_lookup_entry(pt_map, va, &entry)) { + printf("wired_check: entry for %lx not found\n", va); return; } +#else + if (!vm_map_lookup_entry(pt_map, va, &entry)) { + printf("wired_check: entry for %lx not found\n", va); + return; + } +#endif count = 0; for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + NBPG); pte++) if (*pte) count++; if (entry->wired_count != count) - printf("*%s*: %x: w%d/a%d\n", + printf("*%s*: %lx: w%d/a%d\n", str, va, entry->wired_count, count); } #endif diff --git a/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c b/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c index 9b34d2f4e6c8..996e5efb1e35 100644 --- a/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c +++ b/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_bootstrap.c,v 1.7 1997/12/06 20:29:59 scw Exp $ */ +/* $NetBSD: pmap_bootstrap.c,v 1.8 1998/02/21 19:03:27 scw Exp $ */ /* * Copyright (c) 1991, 1993 @@ -45,7 +45,7 @@ #include #include -#ifdef MACHINE_NONCONTIG +#ifdef MACHINE_NEW_NONCONTIG #include #endif @@ -62,7 +62,7 @@ extern pt_entry_t *Sysptmap, *Sysmap; extern int maxmem, physmem; extern vm_offset_t avail_start, avail_end, virtual_avail, virtual_end; extern vm_size_t mem_size; -#ifdef MACHINE_NONCONTIG +#ifdef MACHINE_NEW_NONCONTIG extern vm_size_t avail_remaining; extern vm_offset_t avail_next; #endif @@ -104,7 +104,7 @@ pmap_bootstrap(nextpa, firstpa) u_int nptpages, kstsize; register st_entry_t protoste, *ste; register pt_entry_t protopte, *pte, *epte; -#ifdef MACHINE_NONCONTIG +#ifdef MACHINE_NEW_NONCONTIG int i; #endif @@ -411,7 +411,7 @@ pmap_bootstrap(nextpa, firstpa) */ RELOC(avail_start, vm_offset_t) = nextpa; -#ifndef MACHINE_NONCONTIG +#ifndef MACHINE_NEW_NONCONTIG RELOC(avail_end, vm_offset_t) = m68k_ptob(RELOC(maxmem, int)) /* XXX allow for msgbuf */ diff --git a/sys/arch/mvme68k/mvme68k/seglist.h b/sys/arch/mvme68k/mvme68k/seglist.h index 47a3ae3fda5c..11f9b60afcf8 100644 --- a/sys/arch/mvme68k/mvme68k/seglist.h +++ b/sys/arch/mvme68k/mvme68k/seglist.h @@ -1,4 +1,4 @@ -/* $NetBSD: seglist.h,v 1.1 1997/10/09 21:45:05 scw Exp $ */ +/* $NetBSD: seglist.h,v 1.2 1998/02/21 19:03:27 scw Exp $ */ /* * Copyright (c) 1997 The Steve Woodford @@ -48,7 +48,7 @@ struct phys_seg_list_t { }; /* Space for onboard RAM, and one contiguous offboard segment */ -#define MAX_PHYS_SEGS 2 +#define MAX_PHYS_SEGS VM_PHYSSEG_MAX /* Instantiated in pmap.c */ extern struct phys_seg_list_t phys_seg_list[MAX_PHYS_SEGS]; diff --git a/sys/arch/mvme68k/mvme68k/trap.c b/sys/arch/mvme68k/mvme68k/trap.c index 1842afa19acb..bb5dc78b637c 100644 --- a/sys/arch/mvme68k/mvme68k/trap.c +++ b/sys/arch/mvme68k/mvme68k/trap.c @@ -1,4 +1,4 @@ -/* $NetBSD: trap.c,v 1.14 1998/02/13 07:41:56 scottr Exp $ */ +/* $NetBSD: trap.c,v 1.15 1998/02/21 19:03:27 scw Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -42,6 +42,8 @@ * @(#)trap.c 8.5 (Berkeley) 1/4/94 */ +#include "opt_uvm.h" + #include #include #include @@ -64,6 +66,10 @@ #include #include +#if defined(UVM) +#include +#endif + #ifdef COMPAT_HPUX #include #endif @@ -239,7 +245,11 @@ trap(type, code, v, frame) #endif int bit; +#if defined(UVM) + uvmexp.traps++; +#else cnt.v_trap++; +#endif p = curproc; ucode = 0; if (USERMODE(frame.f_sr)) { @@ -458,7 +468,11 @@ copyfault: while (bit = ffs(ssir)) { --bit; ssir &= ~(1 << bit); +#if defined(UVM) + uvmexp.softs++; +#else cnt.v_soft++; +#endif if (sir_routines[bit]) sir_routines[bit](sir_args[bit]); } @@ -467,7 +481,11 @@ copyfault: * If this was not an AST trap, we are all done. */ if (type != (T_ASTFLT|T_USER)) { +#if defined(UVM) + uvmexp.traps++; +#else cnt.v_trap--; +#endif return; } spl0(); @@ -532,18 +550,31 @@ copyfault: rv = pmap_mapmulti(map->pmap, va); if (rv != KERN_SUCCESS) { bva = HPMMBASEADDR(va); +#if defined(UVM) + rv = uvm_fault(map, bva, 0, ftype); +#else rv = vm_fault(map, bva, ftype, FALSE); +#endif if (rv == KERN_SUCCESS) (void) pmap_mapmulti(map->pmap, va); } } else #endif +#if defined(UVM) + rv = uvm_fault(map, va, 0, ftype); +#ifdef DEBUG + if (rv && MDB_ISPID(p->p_pid)) + printf("uvm_fault(%p, 0x%lx, 0, 0x%x) -> 0x%x\n", + map, va, ftype, rv); +#endif +#else /* ! UVM */ rv = vm_fault(map, va, ftype, FALSE); #ifdef DEBUG if (rv && MDB_ISPID(p->p_pid)) printf("vm_fault(%x, %x, %x, 0) -> %x\n", map, va, ftype, rv); #endif +#endif /* UVM */ /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection @@ -574,8 +605,13 @@ copyfault: if (type == T_MMUFLT) { if (p->p_addr->u_pcb.pcb_onfault) goto copyfault; +#if defined(UVM) + printf("uvm_fault(%p, 0x%lx, 0, 0x%x) -> 0x%x\n", + map, va, ftype, rv); +#else printf("vm_fault(%x, %x, %x, 0) -> %x\n", map, va, ftype, rv); +#endif printf(" type %x, code [mmu,,ssw]: %x\n", type, code); goto dopanic; @@ -904,7 +940,11 @@ syscall(code, frame) register_t args[8], rval[2]; u_quad_t sticks; +#if defined(UVM) + uvmexp.syscalls++; +#else cnt.v_syscall++; +#endif if (!USERMODE(frame.f_sr)) panic("syscall"); p = curproc; diff --git a/sys/arch/mvme68k/mvme68k/vm_machdep.c b/sys/arch/mvme68k/mvme68k/vm_machdep.c index 710c6fefb737..c2e45266ba7c 100644 --- a/sys/arch/mvme68k/mvme68k/vm_machdep.c +++ b/sys/arch/mvme68k/mvme68k/vm_machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: vm_machdep.c,v 1.10 1998/01/06 07:49:46 thorpej Exp $ */ +/* $NetBSD: vm_machdep.c,v 1.11 1998/02/21 19:03:27 scw Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -42,6 +42,8 @@ * @(#)vm_machdep.c 8.6 (Berkeley) 1/12/94 */ +#include "opt_uvm.h" + #include #include #include @@ -55,6 +57,10 @@ #include #include +#ifdef UVM +#include +#endif + #include #include #include @@ -120,10 +126,18 @@ cpu_exit(p) struct proc *p; { +#ifdef UVM + uvmspace_free(p->p_vmspace); +#else vmspace_free(p->p_vmspace); +#endif (void) splhigh(); +#ifdef UVM + uvmexp.swtch++; +#else cnt.v_swtch++; +#endif switch_exit(p); /* NOTREACHED */ } @@ -280,7 +294,11 @@ iomap(paddr, size) size = m68k_round_page(size); /* Get some kernel virtual space. */ +#ifdef UVM + va = uvm_km_alloc(kernel_map, size); +#else va = kmem_alloc_pageable(kernel_map, size); +#endif if (va == 0) return (NULL); rval = va + off; @@ -302,7 +320,11 @@ iounmap(kva, size) size = m68k_round_page(size); physunaccess((caddr_t)va, size); +#ifdef UVM + uvm_km_free(kernel_map, va, size); +#else kmem_free(kernel_map, va, size); +#endif } /* @@ -364,7 +386,11 @@ vmapbuf(bp, len) uva = m68k_trunc_page(bp->b_saveaddr = bp->b_data); off = (vm_offset_t)bp->b_data - uva; len = m68k_round_page(off + len); +#ifdef UVM + kva = uvm_km_valloc_wait(phys_map, len); +#else kva = kmem_alloc_wait(phys_map, len); +#endif bp->b_data = (caddr_t)(kva + off); upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map); @@ -402,7 +428,11 @@ vunmapbuf(bp, len) * pmap_remove() is unnecessary here, as kmem_free_wakeup() * will do it for us. */ +#ifdef UVM + uvm_km_free_wakeup(phys_map, kva, len); +#else kmem_free_wakeup(phys_map, kva, len); +#endif bp->b_data = bp->b_saveaddr; bp->b_saveaddr = 0; }