don't be so agressive w/renaming, clean for consistency, types in one place.

This commit is contained in:
cgd 1994-03-17 02:51:57 +00:00
parent a148e5bcbe
commit 70dabb7d43
10 changed files with 239 additions and 218 deletions

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)device_pager.c 8.1 (Berkeley) 6/11/93
* $Id: device_pager.c,v 1.12 1994/01/07 17:12:51 mycroft Exp $
* $Id: device_pager.c,v 1.13 1994/03/17 02:51:57 cgd Exp $
*/
/*
@ -287,9 +287,9 @@ dev_pager_getpage(pager, m, sync)
queue_enter(&((dev_pager_t)pager->pg_data)->devp_pglist,
page, vm_page_t, pageq);
vm_object_lock(object);
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
vm_page_free(m);
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
vm_page_insert(page, object, offset);
PAGE_WAKEUP(m);
if (offset + PAGE_SIZE > object->size)

View File

@ -31,14 +31,31 @@
* SUCH DAMAGE.
*
* from: @(#)vm.h 7.1 (Berkeley) 5/5/91
* $Id: vm.h,v 1.8 1994/01/08 01:11:21 mycroft Exp $
* $Id: vm.h,v 1.9 1994/03/17 02:52:02 cgd Exp $
*/
#ifndef _VM_VM_H_
#define _VM_VM_H_
#ifndef _VM_H_
#define _VM_H_
typedef struct pager_struct *vm_pager_t;
typedef struct vm_page *vm_page_t;
typedef int vm_inherit_t; /* XXX: inheritance codes */
union vm_map_object;
typedef union vm_map_object vm_map_object_t;
struct vm_map_entry;
typedef struct vm_map_entry *vm_map_entry_t;
struct vm_map;
typedef struct vm_map *vm_map_t;
struct vm_object;
typedef struct vm_object *vm_object_t;
struct vm_page;
typedef struct vm_page *vm_page_t;
struct pager_struct;
typedef struct pager_struct *vm_pager_t;
#include <sys/vmmeter.h>
#include <vm/queue.h> /* sys/queue.h in 4.4 */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_fault.c 7.6 (Berkeley) 5/7/91
* $Id: vm_fault.c,v 1.10 1993/12/20 12:40:03 cgd Exp $
* $Id: vm_fault.c,v 1.11 1994/03/17 02:52:04 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -120,16 +120,16 @@ vm_fault(map, vaddr, fault_type, change_wiring)
*/
#define FREE_PAGE(m) { \
PAGE_WAKEUP(m); \
VM_PAGE_LOCK_QUEUES(); \
vm_page_lock_queues(); \
vm_page_free(m); \
VM_PAGE_UNLOCK_QUEUES(); \
vm_page_unlock_queues(); \
}
#define RELEASE_PAGE(m) { \
PAGE_WAKEUP(m); \
VM_PAGE_LOCK_QUEUES(); \
vm_page_lock_queues(); \
vm_page_activate(m); \
VM_PAGE_UNLOCK_QUEUES(); \
vm_page_unlock_queues(); \
}
#define UNLOCK_MAP { \
@ -267,7 +267,7 @@ thread_wakeup(&vm_pages_needed); /* XXX! */
* reach while we play with it.
*/
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
if (m->flags & PG_INACTIVE) {
queue_remove(&vm_page_queue_inactive, m,
vm_page_t, pageq);
@ -282,7 +282,7 @@ thread_wakeup(&vm_pages_needed); /* XXX! */
m->flags &= ~PG_ACTIVE;
vm_page_active_count--;
}
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
/*
* Mark page busy for other threads.
@ -488,11 +488,11 @@ thread_wakeup(&vm_pages_needed); /* XXX! */
* avoid the pmap_page_protect() call.
*/
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
vm_page_activate(m);
vm_page_deactivate(m);
pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
/*
* We no longer need the old page or object.
@ -525,7 +525,7 @@ thread_wakeup(&vm_pages_needed); /* XXX! */
}
else {
prot &= (~VM_PROT_WRITE);
m->flags |= PG_COW;
m->flags |= PG_COPYONWRITE;
}
}
@ -547,7 +547,7 @@ thread_wakeup(&vm_pages_needed); /* XXX! */
*/
if ((fault_type & VM_PROT_WRITE) == 0) {
prot &= ~VM_PROT_WRITE;
m->flags |= PG_COW;
m->flags |= PG_COPYONWRITE;
}
else {
/*
@ -703,12 +703,12 @@ thread_wakeup(&vm_pages_needed); /* XXX */
* from all pmaps. (We can't know which
* pmaps use it.)
*/
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
pmap_page_protect(VM_PAGE_TO_PHYS(old_m),
VM_PROT_NONE);
copy_m->flags &= ~PG_CLEAN;
vm_page_activate(copy_m); /* XXX */
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
PAGE_WAKEUP(copy_m);
}
@ -721,7 +721,7 @@ thread_wakeup(&vm_pages_needed); /* XXX */
*/
copy_object->ref_count--;
vm_object_unlock(copy_object);
m->flags &= ~PG_COW;
m->flags &= ~PG_COPYONWRITE;
}
}
@ -790,7 +790,7 @@ thread_wakeup(&vm_pages_needed); /* XXX */
* can't mark the page write-enabled after all.
*/
prot &= retry_prot;
if (m->flags & PG_COW)
if (m->flags & PG_COPYONWRITE)
prot &= ~VM_PROT_WRITE;
}
@ -802,7 +802,7 @@ thread_wakeup(&vm_pages_needed); /* XXX */
/* XXX This distorts the meaning of the copy_on_write bit */
if (prot & VM_PROT_WRITE)
m->flags &= ~PG_COW;
m->flags &= ~PG_COPYONWRITE;
/*
* It's critically important that a wired-down page be faulted
@ -829,7 +829,7 @@ thread_wakeup(&vm_pages_needed); /* XXX */
* pageout daemon can find it.
*/
vm_object_lock(object);
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
if (change_wiring) {
if (wired)
vm_page_wire(m);
@ -838,7 +838,7 @@ thread_wakeup(&vm_pages_needed); /* XXX */
}
else
vm_page_activate(m);
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
/*
* Unlock everything, and return
@ -906,7 +906,7 @@ vm_fault_unwire(map, start, end)
* get their mappings from the physical map system.
*/
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
for (va = start; va < end; va += PAGE_SIZE) {
pa = pmap_extract(pmap, va);
@ -916,7 +916,7 @@ vm_fault_unwire(map, start, end)
pmap_change_wiring(pmap, va, FALSE);
vm_page_unwire(PHYS_TO_VM_PAGE(pa));
}
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
/*
* Inform the physical mapping system that the range
@ -1024,9 +1024,9 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
* Mark it no longer busy, and put it on the active list.
*/
vm_object_lock(dst_object);
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
vm_page_activate(dst_m);
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
PAGE_WAKEUP(dst_m);
vm_object_unlock(dst_object);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_inherit.h 7.2 (Berkeley) 4/21/91
* $Id: vm_inherit.h,v 1.3 1993/05/20 03:59:23 cgd Exp $
* $Id: vm_inherit.h,v 1.4 1994/03/17 02:52:09 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -76,8 +76,6 @@
* vm_inherit_t inheritance codes.
*/
typedef int vm_inherit_t; /* might want to change this */
/*
* Enumeration of valid values for vm_inherit_t.
*/

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_map.h 7.3 (Berkeley) 4/21/91
* $Id: vm_map.h,v 1.6 1994/01/08 05:26:14 mycroft Exp $
* $Id: vm_map.h,v 1.7 1994/03/17 02:52:11 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -63,13 +63,13 @@
* rights to redistribute these changes.
*/
#ifndef _VM_VM_MAP_H_
#define _VM_VM_MAP_H_
/*
* Virtual memory map module definitions.
*/
#ifndef _VM_MAP_
#define _VM_MAP_
/*
* Types defined:
*
@ -90,8 +90,6 @@ union vm_map_object {
struct vm_map *sub_map; /* belongs to another map */
};
typedef union vm_map_object vm_map_object_t;
/*
* Address map entries consist of start and end addresses,
* a VM object (or sharing map) and offset into that object,
@ -117,8 +115,6 @@ struct vm_map_entry {
int wired_count; /* can be paged if = 0 */
};
typedef struct vm_map_entry *vm_map_entry_t;
/*
* Maps are doubly-linked lists of map entries, kept sorted
* by address. A single hint is provided to start
@ -143,8 +139,6 @@ struct vm_map {
#define max_offset header.end
};
typedef struct vm_map *vm_map_t;
/*
* Map versions are used to validate a previous lookup attempt.
*
@ -161,66 +155,71 @@ typedef struct {
} vm_map_version_t;
/*
* Exported procedures that operate on vm_map_t.
* Macros: vm_map_lock, etc.
* Function:
* Perform locking on the data portion of a map.
*/
boolean_t vm_map_check_protection
__P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t));
int vm_map_copy
__P((vm_map_t, vm_map_t, vm_offset_t, vm_size_t,
vm_offset_t, boolean_t, boolean_t));
vm_map_t vm_map_create
__P((pmap_t, vm_offset_t, vm_offset_t, boolean_t));
void vm_map_deallocate __P((vm_map_t));
int vm_map_delete __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_map_find
__P((vm_map_t, vm_object_t, vm_offset_t, vm_offset_t *,
vm_size_t, boolean_t));
int vm_map_inherit
__P((vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t));
void vm_map_init
__P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));
int vm_map_insert
__P((vm_map_t, vm_object_t, vm_offset_t, vm_offset_t,
vm_offset_t));
int vm_map_lookup
__P((vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *,
vm_object_t *, vm_offset_t *, vm_prot_t *,
boolean_t *, boolean_t *));
void vm_map_lookup_done __P((vm_map_t, vm_map_entry_t));
boolean_t vm_map_lookup_entry
__P((vm_map_t, vm_offset_t, vm_map_entry_t *));
int vm_map_pageable
__P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));
void vm_map_print __P((vm_map_t, boolean_t));
void _vm_map_print __P((vm_map_t, boolean_t, int (*)()));
int vm_map_protect
__P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t,
boolean_t));
int vm_map_remove __P((vm_map_t, vm_offset_t, vm_offset_t));
void vm_map_simplify __P((vm_map_t, vm_offset_t));
void vm_map_startup __P((void));
int vm_map_submap
__P((vm_map_t, vm_offset_t, vm_offset_t, vm_map_t));
#define vm_map_lock(map) { \
lock_write(&(map)->lock); \
(map)->timestamp++; \
}
#define vm_map_unlock(map) lock_write_done(&(map)->lock)
#define vm_map_lock_read(map) lock_read(&(map)->lock)
#define vm_map_unlock_read(map) lock_read_done(&(map)->lock)
/*
* Functions implemented as macros
*/
#define vm_map_lock(map) { lock_write(&(map)->lock); (map)->timestamp++; }
#define vm_map_unlock(map) lock_write_done(&(map)->lock)
#define vm_map_lock_read(map) lock_read(&(map)->lock)
#define vm_map_unlock_read(map) lock_read_done(&(map)->lock)
#define vm_map_min(map) ((map)->min_offset)
#define vm_map_max(map) ((map)->max_offset)
#define vm_map_pmap(map) ((map)->pmap)
/* XXX: number of kernel maps and entries to statically allocate */
#define MAX_KMAP 10
#define MAX_KMAP 10
#define MAX_KMAPENT 1000 /* XXX 250 */
#ifdef OMIT
#define MAX_KMAPENT 500
#else /* !OMIT*/
#define MAX_KMAPENT 1000 /* 15 Aug 92*/
#endif /* !OMIT*/
#endif /* !_VM_VM_MAP_H_ */
#ifdef KERNEL
boolean_t vm_map_check_protection __P((vm_map_t,
vm_offset_t, vm_offset_t, vm_prot_t));
int vm_map_copy __P((vm_map_t, vm_map_t, vm_offset_t,
vm_size_t, vm_offset_t, boolean_t, boolean_t));
/* XXX vm_map_copy_entry */
struct pmap;
vm_map_t vm_map_create __P((struct pmap *,
vm_offset_t, vm_offset_t, boolean_t));
void vm_map_deallocate __P((vm_map_t));
int vm_map_delete __P((vm_map_t, vm_offset_t, vm_offset_t));
/* XXX vm_map_entry_* */
int vm_map_find __P((vm_map_t, vm_object_t,
vm_offset_t, vm_offset_t *, vm_size_t, boolean_t));
/* XXX vm_map_findspace */
int vm_map_inherit __P((vm_map_t,
vm_offset_t, vm_offset_t, vm_inherit_t));
void vm_map_init __P((struct vm_map *,
vm_offset_t, vm_offset_t, boolean_t));
int vm_map_insert __P((vm_map_t,
vm_object_t, vm_offset_t, vm_offset_t, vm_offset_t));
int vm_map_lookup __P((vm_map_t *, vm_offset_t, vm_prot_t,
vm_map_entry_t *, vm_object_t *, vm_offset_t *, vm_prot_t *,
boolean_t *, boolean_t *));
void vm_map_lookup_done __P((vm_map_t, vm_map_entry_t));
boolean_t vm_map_lookup_entry __P((vm_map_t,
vm_offset_t, vm_map_entry_t *));
int vm_map_pageable __P((vm_map_t,
vm_offset_t, vm_offset_t, boolean_t));
/* XXX vm_map_clean */
void vm_map_print __P((vm_map_t, boolean_t));
/* XXX what the hell is this? */
void _vm_map_print __P((vm_map_t, boolean_t, int (*)()));
int vm_map_protect __P((vm_map_t,
vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
/* XXX vm_map_reference */
int vm_map_remove __P((vm_map_t, vm_offset_t, vm_offset_t));
void vm_map_simplify __P((vm_map_t, vm_offset_t));
/* XXX vm_map_simplify_entry */
void vm_map_startup __P((void));
int vm_map_submap __P((vm_map_t,
vm_offset_t, vm_offset_t, vm_map_t));
#endif
#endif /* _VM_MAP_ */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_object.c 7.4 (Berkeley) 5/7/91
* $Id: vm_object.c,v 1.16 1994/01/15 02:39:58 cgd Exp $
* $Id: vm_object.c,v 1.17 1994/03/17 02:52:19 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -389,7 +389,7 @@ vm_object_terminate(object)
while (!queue_end(&object->memq, (queue_entry_t) p)) {
VM_PAGE_CHECK(p);
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
if (p->flags & PG_ACTIVE) {
queue_remove(&vm_page_queue_active, p, vm_page_t,
pageq);
@ -403,7 +403,7 @@ vm_object_terminate(object)
p->flags &= ~PG_INACTIVE;
vm_page_inactive_count--;
}
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
p = (vm_page_t) queue_next(&p->listq);
}
@ -428,9 +428,9 @@ vm_object_terminate(object)
VM_PAGE_CHECK(p);
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
vm_page_free(p);
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
}
/*
@ -516,13 +516,13 @@ vm_object_deactivate_pages(object)
p = (vm_page_t) queue_first(&object->memq);
while (!queue_end(&object->memq, (queue_entry_t) p)) {
next = (vm_page_t) queue_next(&p->listq);
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
if (!(p->flags & PG_BUSY))
vm_page_deactivate(p); /* optimisation from mach 3.0 -
* andrew@werple.apana.org.au,
* Feb '93
*/
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
p = next;
}
}
@ -630,7 +630,7 @@ vm_object_pmap_copy(object, start, end)
while (!queue_end(&object->memq, (queue_entry_t) p)) {
if ((start <= p->offset) && (p->offset < end)) {
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ);
p->flags |= PG_COW;
p->flags |= PG_COPYONWRITE;
}
p = (vm_page_t) queue_next(&p->listq);
}
@ -727,7 +727,7 @@ vm_object_copy(src_object, src_offset, size,
p = (vm_page_t) queue_next(&p->listq)) {
if (src_offset <= p->offset &&
p->offset < src_offset + size)
p->flags |= PG_COW;
p->flags |= PG_COPYONWRITE;
}
vm_object_unlock(src_object);
@ -856,7 +856,7 @@ vm_object_copy(src_object, src_offset, size,
p = (vm_page_t) queue_first(&src_object->memq);
while (!queue_end(&src_object->memq, (queue_entry_t) p)) {
if ((new_start <= p->offset) && (p->offset < new_end))
p->flags |= PG_COW;
p->flags |= PG_COPYONWRITE;
p = (vm_page_t) queue_next(&p->listq);
}
@ -1201,15 +1201,15 @@ vm_object_collapse(object)
if (p->offset < backing_offset ||
new_offset >= size) {
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
vm_page_free(p);
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
} else {
pp = vm_page_lookup(object, new_offset);
if (pp != NULL && !(pp->flags & PG_FAKE)) {
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
vm_page_free(p);
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
}
else {
if (pp) {
@ -1226,9 +1226,9 @@ vm_object_collapse(object)
#else
/* may be someone waiting for it */
PAGE_WAKEUP(pp);
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
vm_page_free(pp);
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
#endif
}
/*
@ -1432,9 +1432,9 @@ vm_object_page_remove(object, start, end)
next = (vm_page_t) queue_next(&p->listq);
if ((start <= p->offset) && (p->offset < end)) {
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
vm_page_free(p);
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
}
p = next;
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_object.h 7.3 (Berkeley) 4/21/91
* $Id: vm_object.h,v 1.8 1994/01/08 04:59:11 mycroft Exp $
* $Id: vm_object.h,v 1.9 1994/03/17 02:52:25 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -103,8 +103,6 @@ struct vm_object {
#define OBJ_CANPERSIST 0x0001 /* allow to persist */
#define OBJ_INTERNAL 0x0002 /* internally created object */
typedef struct vm_object *vm_object_t;
struct vm_object_hash_entry {
queue_chain_t hash_links; /* hash chain links */
vm_object_t object; /* object we represent */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.11 1994/01/08 04:02:36 mycroft Exp $
* $Id: vm_page.c,v 1.12 1994/03/17 02:52:27 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -714,11 +714,11 @@ vm_page_rename(mem, new_object, new_offset)
if (mem->object == new_object)
return;
VM_PAGE_LOCK_QUEUES(); /* keep page from moving out from
vm_page_lock_queues(); /* keep page from moving out from
under pageout daemon */
vm_page_remove(mem);
vm_page_insert(mem, new_object, new_offset);
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
}
/*

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.h 7.3 (Berkeley) 4/21/91
* $Id: vm_page.h,v 1.8 1994/01/08 04:02:39 mycroft Exp $
* $Id: vm_page.h,v 1.9 1994/03/17 02:52:29 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -63,13 +63,13 @@
* rights to redistribute these changes.
*/
#ifndef _VM_VM_PAGE_H_
#define _VM_VM_PAGE_H_
/*
* Resident memory system definitions.
*/
#ifndef _VM_PAGE_
#define _VM_PAGE_
/*
* Management of resident (logical) pages.
*
@ -112,45 +112,49 @@ struct vm_page {
/*
* These are the flags defined for vm_page.
*
* Note: PG_FILLED and PG_DIRTY are added for the filesystems.
*/
#define PG_INACTIVE 0x0001 /* page is in inactive list (P) */
#define PG_ACTIVE 0x0002 /* page is in active list (P) */
#define PG_LAUNDRY 0x0004 /* page is being cleaned now (P)*/
#define PG_CLEAN 0x0008 /* page has not been modified */
#define PG_BUSY 0x0010 /* page is in transit (O) */
#define PG_WANTED 0x0020 /* someone is waiting for page (O) */
#define PG_TABLED 0x0040 /* page is in VP table (O) */
#define PG_COW 0x0080 /* must copy page before changing (O) */
#define PG_FICTITIOUS 0x0100 /* physical page doesn't exist (O) */
#define PG_FAKE 0x0200 /* page is placeholder for pagein (O) */
#define PG_PAGEROWNED 0x4000 /* DEBUG: async paging op in progress */
#define PG_PTPAGE 0x8000 /* DEBUG: is a user page table page */
#define PG_INACTIVE 0x0001 /* page is in inactive list (P) */
#define PG_ACTIVE 0x0002 /* page is in active list (P) */
#define PG_LAUNDRY 0x0004 /* page is being cleaned now (P)*/
#define PG_CLEAN 0x0008 /* page has not been modified */
#define PG_BUSY 0x0010 /* page is in transit (O) */
#define PG_WANTED 0x0020 /* someone is waiting for page (O) */
#define PG_TABLED 0x0040 /* page is in VP table (O) */
#define PG_COPYONWRITE 0x0080 /* must copy page before changing (O) */
#define PG_FICTITIOUS 0x0100 /* physical page doesn't exist (O) */
#define PG_FAKE 0x0200 /* page is placeholder for pagein (O) */
#define PG_FILLED 0x0400 /* client flag to set when filled */
#define PG_DIRTY 0x0800 /* client flag to set when dirty */
#define PG_PAGEROWNED 0x4000 /* DEBUG: async paging op in progress */
#define PG_PTPAGE 0x8000 /* DEBUG: is a user page table page */
#if VM_PAGE_DEBUG
#ifdef MACHINE_NONCONTIG
#ifndef MACHINE_NONCONTIG
#define VM_PAGE_CHECK(mem) { \
if ( (((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
(((unsigned int) mem) > \
((unsigned int) &vm_page_array[last_page-first_page])) || \
((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
(PG_ACTIVE | PG_INACTIVE))) \
panic("vm_page_check: not valid!"); \
}
#else /* MACHINE_NONCONTIG */
#define VM_PAGE_CHECK(mem) { \
if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
(((unsigned int) mem) > \
((unsigned int) &vm_page_array[vm_page_count])) || \
((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
(PG_ACTIVE | PG_INACTIVE))) \
panic("vm_page_check: not valid!"); \
}
#else /* MACHINE_NONCONTIG */
#define VM_PAGE_CHECK(mem) { \
if ( (((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
(((unsigned int) mem) > \
((unsigned int) &vm_page_array[last_page-first_page])) || \
((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
(PG_ACTIVE | PG_INACTIVE)) ) \
panic("vm_page_check: not valid!"); \
}
}
#endif /* MACHINE_NONCONTIG */
#else /* VM_PAGE_DEBUG */
#else /* VM_PAGE_DEBUG */
#define VM_PAGE_CHECK(mem)
#endif /* VM_PAGE_DEBUG */
#endif /* VM_PAGE_DEBUG */
#ifdef KERNEL
#ifdef KERNEL
/*
* Each pageable resident page falls into one of three lists:
*
@ -183,7 +187,8 @@ long first_page; /* first physical page number */
/* ... represented in vm_page_array */
extern
long last_page; /* last physical page number */
/* ... represented in vm_page_array */
/* [INCLUSIVE] */
extern
vm_offset_t first_phys_addr; /* physical address for first_page */
extern
@ -194,8 +199,8 @@ u_long first_page; /* first physical page number */
extern
int vm_page_count; /* How many pages do we manage? */
#endif /* MACHINE_NONCONTIG */
/* ... represented in vm_page_array */
/* XXX -- do these belong here? */
extern
int vm_page_free_count; /* How many pages are free? */
extern
@ -219,86 +224,90 @@ int vm_page_laundry_count; /* How many pages being laundered? */
#ifndef MACHINE_NONCONTIG
#define IS_VM_PHYSADDR(pa) \
((pa) >= first_phys_addr && (pa) <= last_phys_addr)
((pa) >= first_phys_addr && (pa) <= last_phys_addr)
#define PHYS_TO_VM_PAGE(pa) \
(&vm_page_array[atop(pa) - first_page ])
(&vm_page_array[atop(pa) - first_page ])
#else
#define IS_VM_PHYSADDR(pa) \
(pmap_page_index(pa) >= 0)
#define PHYS_TO_VM_PAGE(pa) \
(&vm_page_array[pmap_page_index(pa) - first_page])
#define IS_VM_PHYSADDR(pa) \
(pmap_page_index(pa) >= 0)
#define PHYS_TO_VM_PAGE(pa) \
(&vm_page_array[pmap_page_index(pa) - first_page])
#endif /* MACHINE_NONCONTIG */
extern
simple_lock_data_t vm_page_queue_lock; /* lock on active and inactive
page queues */
extern
extern /* lock on free page queue */
simple_lock_data_t vm_page_queue_free_lock;
/* lock on free page queue */
/*
* Exported procedures that operate on vm_page_t.
*/
void vm_set_page_size __P((void));
#ifdef MACHINE_NONCONTIG
void vm_page_bootstrap __P((vm_offset_t *, vm_offset_t *));
vm_offset_t pmap_steal_memory __P((vm_size_t));
void pmap_startup __P((vm_offset_t *, vm_offset_t *));
#else
vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
#endif
void vm_page_insert __P((vm_page_t, vm_object_t, vm_offset_t));
void vm_page_remove __P((vm_page_t));
vm_page_t vm_page_lookup __P((vm_object_t, vm_offset_t));
void vm_page_rename __P((vm_page_t, vm_object_t, vm_offset_t));
vm_page_t vm_page_alloc __P((vm_object_t, vm_offset_t));
void vm_page_free __P((vm_page_t));
void vm_page_wire __P((vm_page_t));
void vm_page_unwire __P((vm_page_t));
void vm_page_deactivate __P((vm_page_t));
void vm_page_activate __P((vm_page_t));
boolean_t vm_page_zero_fill __P((vm_page_t));
void vm_page_copy __P((vm_page_t, vm_page_t));
/*
* Functions implemented as macros
*/
#define PAGE_ASSERT_WAIT(m, interruptible) { \
(m)->flags |= PG_WANTED; \
assert_wait((int) (m), (interruptible)); \
}
#define PAGE_WAKEUP(m) { \
(m)->flags &= ~PG_BUSY; \
if ((m)->flags & PG_WANTED) { \
(m)->flags &= ~PG_WANTED; \
thread_wakeup((int) (m)); \
} \
}
#define PAGE_ASSERT_WAIT(m, interruptible) { \
(m)->flags |= PG_WANTED; \
assert_wait((int) (m), (interruptible)); \
}
#ifdef MACHINE_NONCONTIG
#define VM_PAGE_INIT(mem, obj, offset) { \
(mem)->flags = PG_BUSY | PG_CLEAN | PG_FAKE; \
if (obj) \
vm_page_insert((mem), (obj), (offset)); \
else \
(mem)->object = NULL; \
(mem)->wire_count = 0; \
}
#define PAGE_WAKEUP(m) { \
(m)->flags &= ~PG_BUSY; \
if ((m)->flags & PG_WANTED) { \
(m)->flags &= ~PG_WANTED; \
thread_wakeup((int) (m)); \
} \
}
#define vm_page_lock_queues() simple_lock(&vm_page_queue_lock)
#define vm_page_unlock_queues() simple_unlock(&vm_page_queue_lock)
#define vm_page_set_modified(m) { (m)->flags &= ~PG_CLEAN; }
#ifndef MACHINE_NONCONTIG
#define VM_PAGE_INIT(mem, obj, offset) { \
(mem)->flags = PG_BUSY | PG_CLEAN | PG_FAKE; \
vm_page_insert((mem), (obj), (offset)); \
(mem)->wire_count = 0; \
}
#else /* MACHINE_NONCONTIG */
#define VM_PAGE_INIT(mem, obj, offset) { \
(mem)->flags = PG_BUSY | PG_CLEAN | PG_FAKE; \
#define VM_PAGE_INIT(mem, obj, offset) { \
(mem)->flags = PG_BUSY | PG_CLEAN | PG_FAKE; \
if (obj) \
vm_page_insert((mem), (obj), (offset)); \
(mem)->wire_count = 0; \
}
else \
(mem)->object = NULL; \
(mem)->wire_count = 0; \
}
#endif /* MACHINE_NONCONTIG */
#define VM_PAGE_LOCK_QUEUES() simple_lock(&vm_page_queue_lock)
#define VM_PAGE_UNLOCK_QUEUES() simple_unlock(&vm_page_queue_lock)
/* XXX what is this here for? */
void vm_set_page_size __P((void));
#define VM_PAGE_SET_MODIFIED(m) { (m)->flags &= ~PG_CLEAN; }
/* XXX probably should be elsewhere. */
#ifdef MACHINE_NONCONTIG
vm_offset_t pmap_steal_memory __P((vm_size_t));
void pmap_startup __P((vm_offset_t *, vm_offset_t *));
#endif
#endif /* KERNEL */
void vm_page_activate __P((vm_page_t));
vm_page_t vm_page_alloc __P((vm_object_t, vm_offset_t));
#ifdef MACHINE_NONCONTIG
void vm_page_bootstrap __P((vm_offset_t *, vm_offset_t *));
#endif
void vm_page_copy __P((vm_page_t, vm_page_t));
void vm_page_deactivate __P((vm_page_t));
void vm_page_free __P((vm_page_t));
void vm_page_insert __P((vm_page_t, vm_object_t, vm_offset_t));
vm_page_t vm_page_lookup __P((vm_object_t, vm_offset_t));
void vm_page_remove __P((vm_page_t));
void vm_page_rename __P((vm_page_t, vm_object_t, vm_offset_t));
#ifndef MACHINE_NONCONTIG
vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
#endif
void vm_page_unwire __P((vm_page_t));
void vm_page_wire __P((vm_page_t));
boolean_t vm_page_zero_fill __P((vm_page_t));
#endif /* !_VM_VM_PAGE_H_ */
#endif /* KERNEL */
#endif /* !_VM_PAGE_ */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91
* $Id: vm_pageout.c,v 1.9 1994/01/07 22:23:31 mycroft Exp $
* $Id: vm_pageout.c,v 1.10 1994/03/17 02:52:36 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -122,7 +122,7 @@ vm_pageout_scan()
* Acquire the resident page system lock,
* as we may be changing what's resident quite a bit.
*/
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
/*
* Start scanning the inactive queue for pages we can free.
@ -220,7 +220,7 @@ vm_pageout_scan()
* making a pager for it. We must
* unlock the page queues first.
*/
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
vm_object_collapse(object);
@ -257,7 +257,7 @@ vm_pageout_scan()
vm_pager_put(pager, m, FALSE) :
VM_PAGER_FAIL;
vm_object_lock(object);
VM_PAGE_LOCK_QUEUES();
vm_page_lock_queues();
next = (vm_page_t) queue_next(&m->pageq);
switch (pageout_status) {
@ -336,7 +336,7 @@ vm_pageout_scan()
}
vm_page_pagesfreed += pages_freed;
VM_PAGE_UNLOCK_QUEUES();
vm_page_unlock_queues();
}
/*