initial import of the new virtual memory system, UVM, into -current.

UVM was written by chuck cranor <chuck@maria.wustl.edu>, with some
minor portions derived from the old Mach code.  i provided some help
getting swap and paging working, and other bug fixes/ideas.  chuck
silvers <chuq@chuq.com> also provided some other fixes.

these are the changes to the old Mach VM system to allow both to be
available in the kernel.
This commit is contained in:
mrg 1998-02-06 00:14:43 +00:00
parent 40b61fb276
commit 8f7ee94e13
13 changed files with 295 additions and 136 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.20 1998/01/08 23:28:04 thorpej Exp $ */
/* $NetBSD: pmap.h,v 1.21 1998/02/06 00:14:43 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -88,6 +88,26 @@ typedef struct pmap_statistics *pmap_statistics_t;
#include <machine/pmap.h>
/*
* PMAP_PGARG hack
*
* operations that take place on managed pages used to take PAs.
* this caused us to translate the PA back to a page (or pv_head).
* PMAP_NEW avoids this by passing the vm_page in (pv_head should be
* pointed to by vm_page (or be a part of it)).
*
* applies to: pmap_page_protect, pmap_is_referenced, pmap_is_modified,
* pmap_clear_reference, pmap_clear_modify.
*
* the latter two functions are boolean_t in PMAP_NEW. they return
* TRUE if something was cleared.
*/
#if defined(PMAP_NEW)
#define PMAP_PGARG(PG) (PG)
#else
#define PMAP_PGARG(PG) (VM_PAGE_TO_PHYS(PG))
#endif
#ifndef PMAP_EXCLUDE_DECLS /* Used in Sparc port to virtualize pmap mod */
#ifdef _KERNEL
__BEGIN_DECLS
@ -99,19 +119,34 @@ void pmap_bootstrap( /* machine dependent */ );
void pmap_activate __P((struct proc *));
void pmap_deactivate __P((struct proc *));
void pmap_change_wiring __P((pmap_t, vm_offset_t, boolean_t));
#if defined(PMAP_NEW)
#if !defined(pmap_clear_modify)
boolean_t pmap_clear_modify __P((struct vm_page *));
#endif
#if !defined(pmap_clear_reference)
boolean_t pmap_clear_reference __P((struct vm_page *));
#endif
#else /* PMAP_NEW */
void pmap_clear_modify __P((vm_offset_t pa));
void pmap_clear_reference __P((vm_offset_t pa));
#endif /* PMAP_NEW */
void pmap_collect __P((pmap_t));
void pmap_copy __P((pmap_t,
pmap_t, vm_offset_t, vm_size_t, vm_offset_t));
void pmap_copy_page __P((vm_offset_t, vm_offset_t));
#if defined(PMAP_NEW)
struct pmap *pmap_create __P((void));
#else
pmap_t pmap_create __P((vm_size_t));
#endif
void pmap_destroy __P((pmap_t));
void pmap_enter __P((pmap_t,
vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
#ifndef pmap_page_index
int pmap_page_index __P((vm_offset_t));
#if defined(PMAP_NEW) && defined(PMAP_GROWKERNEL)
void pmap_growkernel __P((vm_offset_t));
#endif
#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
@ -119,13 +154,38 @@ void pmap_init __P((vm_offset_t, vm_offset_t));
#else
void pmap_init __P((void));
#endif
#if defined(PMAP_NEW)
void pmap_kenter_pa __P((vm_offset_t, vm_offset_t, vm_prot_t));
void pmap_kenter_pgs __P((vm_offset_t, struct vm_page **, int));
void pmap_kremove __P((vm_offset_t, vm_size_t));
#if !defined(pmap_is_modified)
boolean_t pmap_is_modified __P((struct vm_page *));
#endif
#if !defined(pmap_is_referenced)
boolean_t pmap_is_referenced __P((struct vm_page *));
#endif
#else /* PMAP_NEW */
boolean_t pmap_is_modified __P((vm_offset_t pa));
boolean_t pmap_is_referenced __P((vm_offset_t pa));
#endif /* PMAP_NEW */
vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
#ifndef pmap_page_index
int pmap_page_index __P((vm_offset_t));
#endif
#if defined(PMAP_NEW)
void pmap_page_protect __P((struct vm_page *, vm_prot_t));
#else
void pmap_page_protect __P((vm_offset_t, vm_prot_t));
#endif
void pmap_pageable __P((pmap_t,
vm_offset_t, vm_offset_t, boolean_t));
#if !defined(pmap_phys_address)
vm_offset_t pmap_phys_address __P((int));
#endif
void pmap_pinit __P((pmap_t));
void pmap_protect __P((pmap_t,
vm_offset_t, vm_offset_t, vm_prot_t));

View File

@ -1,4 +1,4 @@
/* $NetBSD: swap_pager.c,v 1.33 1997/06/12 14:51:24 mrg Exp $ */
/* $NetBSD: swap_pager.c,v 1.34 1998/02/06 00:14:45 mrg Exp $ */
/*
* Copyright (c) 1990 University of Utah.
@ -129,6 +129,8 @@ struct swpclean swap_pager_inuse; /* list of pending page cleans */
struct swpclean swap_pager_free; /* list of free pager clean structs */
struct pagerlst swap_pager_list; /* list of "named" anon regions */
extern struct buf bswlist; /* import from vm_swap.c */
static void swap_pager_init __P((void));
static vm_pager_t swap_pager_alloc
__P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm.h,v 1.16 1998/01/06 08:36:23 thorpej Exp $ */
/* $NetBSD: vm.h,v 1.17 1998/02/06 00:14:47 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -61,10 +61,12 @@ typedef struct pager_struct *vm_pager_t;
/*
* MACH VM locking type mappings to kernel types
*/
#if !defined(UVM)
typedef struct simplelock simple_lock_data_t;
typedef struct simplelock *simple_lock_t;
typedef struct lock lock_data_t;
typedef struct lock *lock_t;
#endif
#include <sys/vmmeter.h>
#include <sys/queue.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_extern.h,v 1.30 1998/01/31 04:02:39 ross Exp $ */
/* $NetBSD: vm_extern.h,v 1.31 1998/02/06 00:14:48 mrg Exp $ */
/*-
* Copyright (c) 1992, 1993
@ -44,7 +44,7 @@ struct mount;
struct vnode;
struct core;
#ifdef KGDB
#if defined(KGDB) && !defined(UVM)
void chgkprot __P((caddr_t, int, int));
#endif
@ -63,9 +63,13 @@ int sstk __P((struct proc *, void *, int *));
#endif
void assert_wait __P((void *, boolean_t));
#if !defined(UVM)
int grow __P((struct proc *, vm_offset_t));
#endif
void iprintf __P((void (*)(const char *, ...), const char *, ...));
#if !defined(UVM)
int kernacc __P((caddr_t, int, int));
#endif
int kinfo_loadavg __P((int, char *, int *, int, int *));
int kinfo_meter __P((int, caddr_t, int *, int, int *));
vm_offset_t kmem_alloc __P((vm_map_t, vm_size_t));
@ -78,17 +82,23 @@ vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *,
vm_size_t, boolean_t));
void loadav __P((struct loadavg *));
#if !defined(UVM)
void munmapfd __P((struct proc *, int));
#endif
int pager_cache __P((vm_object_t, boolean_t));
void sched __P((void));
#if !defined(UVM)
void scheduler __P((void));
#endif
int svm_allocate __P((struct proc *, void *, int *));
int svm_deallocate __P((struct proc *, void *, int *));
int svm_inherit __P((struct proc *, void *, int *));
int svm_protect __P((struct proc *, void *, int *));
void swapinit __P((void));
#if !defined(UVM)
void swapout __P((struct proc *));
void swapout_threads __P((void));
#endif
int swfree __P((struct proc *, int));
void swstrategy __P((struct buf *));
void thread_block __P((char *));
@ -102,6 +112,7 @@ void thread_sleep_msg __P((void *, simple_lock_t,
* void thread_wakeup __P((void *));
*/
#define thread_wakeup wakeup
#if !defined(UVM)
int useracc __P((caddr_t, int, int));
int vm_allocate __P((vm_map_t, vm_offset_t *, vm_size_t,
boolean_t));
@ -110,35 +121,46 @@ int vm_allocate_with_pager __P((vm_map_t, vm_offset_t *,
int vm_coredump __P((struct proc *, struct vnode *, struct ucred *,
struct core *));
int vm_deallocate __P((vm_map_t, vm_offset_t, vm_size_t));
#endif
int vm_fault __P((vm_map_t, vm_offset_t, vm_prot_t, boolean_t));
void vm_fault_copy_entry __P((vm_map_t,
vm_map_t, vm_map_entry_t, vm_map_entry_t));
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
#if !defined(UVM)
void vm_fork __P((struct proc *, struct proc *, boolean_t));
#endif
int vm_inherit __P((vm_map_t,
vm_offset_t, vm_size_t, vm_inherit_t));
#if !defined(UVM)
void vm_init_limits __P((struct proc *));
#endif
void vm_mem_init __P((void));
#if !defined(UVM)
int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t,
vm_prot_t, vm_prot_t, int, caddr_t, vm_offset_t));
#endif
int vm_protect __P((vm_map_t,
vm_offset_t, vm_size_t, boolean_t, vm_prot_t));
void vm_set_page_size __P((void));
void vmmeter __P((void));
#if !defined(UVM)
struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t, int));
struct vmspace *vmspace_fork __P((struct vmspace *));
void vmspace_exec __P((struct proc *));
void vmspace_free __P((struct vmspace *));
void vmspace_share __P((struct proc *, struct proc *));
void vmspace_unshare __P((struct proc *));
#endif
void vmtotal __P((struct vmtotal *));
void vnode_pager_setsize __P((struct vnode *, u_quad_t));
void vnode_pager_sync __P((struct mount *));
void vnode_pager_umount __P((struct mount *));
boolean_t vnode_pager_uncache __P((struct vnode *));
#if !defined(UVM)
void vslock __P((caddr_t, u_int));
void vsunlock __P((caddr_t, u_int));
#endif
/* Machine dependent portion */
void vmapbuf __P((struct buf *, vm_size_t));

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_glue.c,v 1.70 1998/01/31 04:02:40 ross Exp $ */
/* $NetBSD: vm_glue.c,v 1.71 1998/02/06 00:14:49 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -508,90 +508,3 @@ swapout(p)
p->p_swtime = 0;
++cnt.v_swpout;
}
/*
* The rest of these routines fake thread handling
*/
void
assert_wait(event, ruptible)
void *event;
boolean_t ruptible;
{
#ifdef lint
ruptible++;
#endif
curproc->p_thread = event;
}
void
thread_block(msg)
char *msg;
{
int s = splhigh();
if (curproc->p_thread)
tsleep(curproc->p_thread, PVM, msg, 0);
splx(s);
}
void
thread_sleep_msg(event, lock, ruptible, msg, timo)
void *event;
simple_lock_t lock;
boolean_t ruptible;
char *msg;
{
int s = splhigh();
#ifdef lint
ruptible++;
#endif
curproc->p_thread = event;
simple_unlock(lock);
if (curproc->p_thread)
tsleep(event, PVM, msg, timo);
splx(s);
}
/*
* DEBUG stuff
*/
int indent = 0;
/*
* Note that stdarg.h and the ANSI style va_start macro is used for both
* ANSI and traditional C compilers. (Same as subr_prf.c does.)
* XXX: This requires that stdarg.h defines: va_alist, va_dcl
*/
#include <machine/stdarg.h>
/*ARGSUSED2*/
void
#ifdef __STDC__
iprintf(void (*pr)(const char *, ...), const char *fmt, ...)
#else
iprintf(pr, fmt, va_alist)
void (*pr)();
const char *fmt;
va_dcl
#endif
{
register int i;
va_list ap;
va_start(ap, fmt);
for (i = indent; i >= 8; i -= 8)
(*pr)("\t");
while (--i >= 0)
(*pr)(" ");
#ifdef __powerpc__ /* XXX */
if (pr != printf) /* XXX */
panic("iprintf"); /* XXX */
vprintf(fmt, ap); /* XXX */
#else /* XXX */
(*pr)("%:", fmt, ap); /* XXX */
#endif /* __powerpc__ */ /* XXX */
va_end(ap);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_kern.h,v 1.9 1994/06/29 06:48:03 cgd Exp $ */
/* $NetBSD: vm_kern.h,v 1.10 1998/02/06 00:14:51 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -66,9 +66,18 @@
/* Kernel memory management definitions. */
#if defined(UVM)
extern vm_map_t buffer_map;
extern vm_map_t exec_map;
extern vm_map_t kernel_map;
extern vm_map_t kmem_map;
extern vm_map_t mb_map;
extern vm_map_t phys_map;
#else
vm_map_t buffer_map;
vm_map_t exec_map;
vm_map_t kernel_map;
vm_map_t kmem_map;
vm_map_t mb_map;
vm_map_t phys_map;
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_map.h,v 1.14 1997/07/20 23:23:45 fvdl Exp $ */
/* $NetBSD: vm_map.h,v 1.15 1998/02/06 00:14:52 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -71,6 +71,10 @@
#ifndef _VM_MAP_
#define _VM_MAP_
#ifdef UVM
#include <uvm/uvm_anon.h>
#endif
/*
* Types defined:
*
@ -83,12 +87,17 @@
* Objects which live in maps may be either VM objects, or
* another map (called a "sharing map") which denotes read-write
* sharing with other maps.
*
* XXXCDC: private pager data goes here now
*/
union vm_map_object {
struct vm_object *vm_object; /* object object */
struct vm_map *share_map; /* share map */
struct vm_map *sub_map; /* belongs to another map */
#ifdef UVM
struct uvm_object *uvm_obj; /* UVM OBJECT */
#endif /* UVM */
};
/*
@ -104,16 +113,30 @@ struct vm_map_entry {
vm_offset_t end; /* end address */
union vm_map_object object; /* object I point to */
vm_offset_t offset; /* offset into object */
#if defined(UVM)
/* etype is a bitmap that replaces the following 4 items */
int etype; /* entry type */
#else
boolean_t is_a_map; /* Is "object" a map? */
boolean_t is_sub_map; /* Is "object" a submap? */
/* Only in sharing maps: */
boolean_t copy_on_write; /* is data copy-on-write */
boolean_t needs_copy; /* does object need to be copied */
#endif
/* Only in task maps: */
vm_prot_t protection; /* protection code */
vm_prot_t max_protection; /* maximum protection */
vm_inherit_t inheritance; /* inheritance */
int wired_count; /* can be paged if = 0 */
#ifdef UVM
struct vm_aref aref; /* anonymous overlay */
int advice; /* madvise advice */
#define uvm_map_entry_stop_copy flags
u_int8_t flags; /* flags */
#define UVM_MAP_STATIC 0x01 /* static map entry */
#endif /* UVM */
};
/*
@ -199,6 +222,21 @@ typedef struct {
(map)->lk_flags &= ~LK_CANRECURSE; \
simple_unlock(&(map)->lk_interlock); \
}
#if defined(UVM) && defined(_KERNEL)
/* XXX: clean up later */
static boolean_t vm_map_lock_try __P((vm_map_t));
static __inline boolean_t vm_map_lock_try(map)
vm_map_t map;
{
if (lockmgr(&(map)->lock, LK_EXCLUSIVE|LK_NOWAIT, (void *)0, curproc) != 0)
return(FALSE);
map->timestamp++;
return(TRUE);
}
#endif
/*
* Functions implemented as macros
@ -209,7 +247,11 @@ typedef struct {
/* XXX: number of kernel maps and entries to statically allocate */
#define MAX_KMAP 10
#if 0
#define MAX_KMAPENT 500
#else
#define MAX_KMAPENT 1000 /* XXXCDC: no crash */
#endif
#ifdef _KERNEL
boolean_t vm_map_check_protection __P((vm_map_t,

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_page.c,v 1.39 1998/01/31 04:02:44 ross Exp $ */
/* $NetBSD: vm_page.c,v 1.40 1998/02/06 00:14:54 mrg Exp $ */
#define VM_PAGE_ALLOC_MEMORY_STATS
@ -173,7 +173,6 @@ simple_lock_data_t vm_page_queue_free_lock;
boolean_t vm_page_startup_initialized;
vm_page_t vm_page_array;
int vm_page_count;
#if defined(MACHINE_NEW_NONCONTIG)
/* NOTHING NEEDED HERE */
#elif defined(MACHINE_NONCONTIG)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_page.h,v 1.22 1998/01/08 23:03:27 thorpej Exp $ */
/* $NetBSD: vm_page.h,v 1.23 1998/02/06 00:14:57 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -92,23 +92,60 @@
*
* Fields in this structure are locked either by the lock on the
* object that the page belongs to (O) or by the lock on the page
* queues (P).
* queues (P) [or both].
*/
#if defined(UVM)
/*
* locking note: the mach version of this data structure had bit
* fields for the flags, and the bit fields were divided into two
* items (depending on who locked what). some time, in BSD, the bit
* fields were dumped and all the flags were lumped into one short.
* that is fine for a single threaded uniprocessor OS, but bad if you
* want to actual make use of locking (simple_lock's). so, we've
* seperated things back out again.
*
* note the page structure has no lock of its own.
*/
#include <uvm/uvm_extern.h>
#include <vm/pglist.h>
#else
TAILQ_HEAD(pglist, vm_page);
#endif /* UVM */
struct vm_page {
TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO
* queue or free list (P) */
TAILQ_ENTRY(vm_page) hashq; /* hash table links (O)*/
TAILQ_ENTRY(vm_page) listq; /* pages in same object (O)*/
TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO
* queue or free list (P) */
TAILQ_ENTRY(vm_page) hashq; /* hash table links (O)*/
TAILQ_ENTRY(vm_page) listq; /* pages in same object (O)*/
vm_object_t object; /* which object am I in (O,P)*/
vm_offset_t offset; /* offset into object (O,P) */
#if !defined(UVM) /* uvm uses obju */
vm_object_t object; /* which object am I in (O,P)*/
#endif
vm_offset_t offset; /* offset into object (O,P) */
u_short wire_count; /* wired down maps refs (P) */
u_short flags; /* see below */
#if defined(UVM)
struct uvm_object *uobject; /* object (O,P) */
struct vm_anon *uanon; /* anon (O,P) */
u_short flags; /* object flags [O] */
u_short version; /* version count [O] */
u_short wire_count; /* wired down map refs [P] */
u_short pqflags; /* page queue flags [P] */
u_int loan_count; /* number of active loans
* to read: [O or P]
* to modify: [O _and_ P] */
#else
u_short wire_count; /* wired down maps refs (P) */
u_short flags; /* see below */
#endif
vm_offset_t phys_addr; /* physical address of page */
vm_offset_t phys_addr; /* physical address of page */
#if defined(UVM) && defined(UVM_PAGE_TRKOWN)
/* debugging fields to track page ownership */
pid_t owner; /* proc that set PG_BUSY */
char *owner_tag; /* why it was set busy */
#endif
};
/*
@ -116,6 +153,38 @@ struct vm_page {
*
* Note: PG_FILLED and PG_DIRTY are added for the filesystems.
*/
#if defined(UVM)
/*
* locking rules:
* PG_ ==> locked by object lock
* PQ_ ==> lock by page queue lock
* PQ_FREE is locked by free queue lock and is mutex with all other PQs
*
* possible deadwood: PG_FAULTING, PQ_LAUNDRY
*/
#define PG_CLEAN 0x0008 /* page has not been modified */
#define PG_BUSY 0x0010 /* page is in transit */
#define PG_WANTED 0x0020 /* someone is waiting for page */
#define PG_TABLED 0x0040 /* page is in VP table */
#define PG_FAKE 0x0200 /* page is placeholder for pagein */
#define PG_FILLED 0x0400 /* client flag to set when filled */
#define PG_DIRTY 0x0800 /* client flag to set when dirty */
#define PG_RELEASED 0x1000 /* page released while paging */
#define PG_FAULTING 0x2000 /* page is being faulted in */
#define PG_CLEANCHK 0x4000 /* clean bit has been checked */
#define PQ_FREE 0x0001 /* page is on free list */
#define PQ_INACTIVE 0x0002 /* page is in inactive list */
#define PQ_ACTIVE 0x0004 /* page is in active list */
#define PQ_LAUNDRY 0x0008 /* page is being cleaned now */
#define PQ_ANON 0x0010 /* page is part of an anon, rather
than an uvm_object */
#define PQ_AOBJ 0x0020 /* page is part of an anonymous
uvm_object */
#define PQ_SWAPBACKED (PQ_ANON|PQ_AOBJ)
#else
#define PG_INACTIVE 0x0001 /* page is in inactive list (P) */
#define PG_ACTIVE 0x0002 /* page is in active list (P) */
#define PG_LAUNDRY 0x0004 /* page is being cleaned now (P)*/
@ -141,6 +210,7 @@ struct vm_page {
#define PG_FAULTING 0x2000 /* page is being faulted in */
#define PG_PAGEROWNED 0x4000 /* DEBUG: async paging op in progress */
#define PG_PTPAGE 0x8000 /* DEBUG: is a user page table page */
#endif
#if defined(MACHINE_NEW_NONCONTIG)
/*
@ -201,17 +271,15 @@ struct pglist vm_page_queue_active; /* active memory queue */
extern
struct pglist vm_page_queue_inactive; /* inactive memory queue */
extern
vm_page_t vm_page_array; /* First resident page in table */
#if defined(MACHINE_NEW_NONCONTIG)
/*
* physical memory config is stored in vm_physmem.
*/
extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
extern int vm_nphysseg;
extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
extern int vm_nphysseg;
#else
#if defined(MACHINE_NONCONTIG)
@ -220,6 +288,8 @@ extern
u_long first_page; /* first physical page number */
extern
int vm_page_count; /* How many pages do we manage? */
extern
vm_page_t vm_page_array; /* First resident page in table */
#define VM_PAGE_INDEX(pa) \
(pmap_page_index((pa)) - first_page)
@ -236,11 +306,13 @@ extern
vm_offset_t first_phys_addr; /* physical address for first_page */
extern
vm_offset_t last_phys_addr; /* physical address for last_page */
extern
vm_page_t vm_page_array; /* First resident page in table */
#define VM_PAGE_INDEX(pa) \
(atop((pa)) - first_page)
#endif /* MACHINE_NONCONTIG */
#endif /* MACHINE_NONCONTIG */
#endif /* MACHINE_NEW_NONCONTIG */
/*
@ -410,7 +482,15 @@ PHYS_TO_VM_PAGE(pa)
#endif /* (OLD) MACHINE_NONCONTIG */
#define VM_PAGE_IS_FREE(entry) ((entry)->flags & PG_FREE)
#if defined(UVM)
#define VM_PAGE_IS_FREE(entry) ((entry)->pqflags & PQ_FREE)
#else /* UVM */
#define VM_PAGE_IS_FREE(entry) ((entry)->flags & PG_FREE)
#endif /* UVM */
extern
simple_lock_data_t vm_page_queue_lock; /* lock on active and inactive

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_pageout.h,v 1.12 1998/01/31 04:02:46 ross Exp $ */
/* $NetBSD: vm_pageout.h,v 1.13 1998/02/06 00:14:59 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -87,6 +87,7 @@ u_int32_t vm_pages_reserved; /* i.e., reserved for pageout_daemon */
* Signal pageout-daemon and wait for it.
*/
#if !defined(UVM)
#ifdef _KERNEL
void vm_wait __P((char *));
void vm_pageout __P((void));
@ -94,3 +95,4 @@ void vm_pageout_scan __P((void));
void vm_pageout_page __P((vm_page_t, vm_object_t));
void vm_pageout_cluster __P((vm_page_t, vm_object_t));
#endif
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_pager.h,v 1.11 1997/01/03 18:03:41 mrg Exp $ */
/* $NetBSD: vm_pager.h,v 1.12 1998/02/06 00:15:00 mrg Exp $ */
/*
* Copyright (c) 1990 University of Utah.
@ -104,19 +104,24 @@ struct pagerops {
/*
* get/put return values
* OK operation was successful
* BAD specified data was out of the accepted range
* FAIL specified data was in range, but doesn't exist
* PEND operations was initiated but not completed
* ERROR error while accessing data that is in range and exists
* AGAIN temporary resource shortage prevented operation from happening
* OK operation was successful
* BAD specified data was out of the accepted range
* FAIL specified data was in range, but doesn't exist
* PEND operations was initiated but not completed
* ERROR error while accessing data that is in range and exists
* AGAIN temporary resource shortage prevented operation from happening
* UNLOCK unlock the map and try again
* REFAULT [uvm_fault internal use only!] unable to relock data structures,
* thus the mapping needs to be reverified before we can procede
*/
#define VM_PAGER_OK 0
#define VM_PAGER_BAD 1
#define VM_PAGER_FAIL 2
#define VM_PAGER_PEND 3
#define VM_PAGER_ERROR 4
#define VM_PAGER_AGAIN 5
#define VM_PAGER_OK 0
#define VM_PAGER_BAD 1
#define VM_PAGER_FAIL 2
#define VM_PAGER_PEND 3
#define VM_PAGER_ERROR 4
#define VM_PAGER_AGAIN 5
#define VM_PAGER_UNLOCK 6
#define VM_PAGER_REFAULT 7
#ifdef _KERNEL
extern struct pagerops *dfltpagerops;

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_param.h,v 1.13 1997/10/16 23:29:31 christos Exp $ */
/* $NetBSD: vm_param.h,v 1.14 1998/02/06 00:15:01 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -91,10 +91,17 @@ typedef int boolean_t;
* or PAGE_SHIFT. The fact they are variables is hidden here so that
* we can easily make them constant if we so desire.
*/
#if defined(UVM)
#define PAGE_SIZE uvmexp.pagesize /* size of page */
#define PAGE_MASK uvmexp.pagemask /* size of page - 1 */
#define PAGE_SHIFT uvmexp.pageshift /* bits to shift for pages */
#else
#define PAGE_SIZE cnt.v_page_size /* size of page */
#define PAGE_MASK page_mask /* size of page - 1 */
#define PAGE_SHIFT page_shift /* bits to shift for pages */
#ifdef _KERNEL
#endif
#if defined(_KERNEL) && !defined(UVM)
extern vm_size_t page_mask;
extern int page_shift;
#endif
@ -104,6 +111,7 @@ extern int page_shift;
*/
#define VM_METER 1 /* struct vmmeter */
#define VM_LOADAVG 2 /* struct loadavg */
#if !defined(UVM)
#define VM_MAXID 3 /* number of valid vm ids */
#define CTL_VM_NAMES { \
@ -112,6 +120,21 @@ extern int page_shift;
{ "loadavg", CTLTYPE_STRUCT }, \
}
#else
#define VM_UVMEXP 3 /* struct uvmexp */
#define VM_MAXID 4 /* number of valid vm ids */
#define CTL_VM_NAMES { \
{ 0, 0 }, \
{ "vmmeter", CTLTYPE_STRUCT }, \
{ "loadavg", CTLTYPE_STRUCT }, \
{ "uvmexp", CTLTYPE_STRUCT }, \
}
#endif
/*
* Return values from the VM routines.
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_swap.h,v 1.6 1997/12/02 13:47:41 pk Exp $ */
/* $NetBSD: vm_swap.h,v 1.7 1998/02/06 00:15:03 mrg Exp $ */
/*
* Copyright (c) 1995, 1996 Matthew R. Green
@ -49,9 +49,9 @@ struct swapent {
#define SWF_INUSE 0x00000001
#define SWF_ENABLE 0x00000002
#define SWF_BUSY 0x00000004
#define SWF_FAKE 0x00000008
#ifdef _KERNEL
int sys_swapctl __P((struct proc *, void *, register_t *));
#if defined(_KERNEL) && !defined(UVM)
daddr_t swap_alloc __P((int size));
void swap_free __P((int size, daddr_t addr));
void swapinit __P((void));