convert vm system to use new queues. I'll never write code w/queues again.

This commit is contained in:
cgd 1994-04-15 07:04:03 +00:00
parent a35608603b
commit e937bc9bfd
24 changed files with 423 additions and 576 deletions

View File

@ -1,5 +1,5 @@
# @(#)symbols.raw 7.6 (Berkeley) 5/8/91
# $Id: symbols.raw,v 1.2 1994/02/11 07:00:01 chopps Exp $
# $Id: symbols.raw,v 1.3 1994/04/15 07:04:03 cgd Exp $
_version
#dmesg
@ -29,9 +29,8 @@
# _ndh11
#vmstat
_cp_time
_rate
_total
_sum
_cnt
# _rectime
# _pgintime
_dk_xfer

View File

@ -1,5 +1,5 @@
# from: @(#)symbols.raw 7.6 (Berkeley) 5/8/91
# $Id: symbols.raw,v 1.2 1993/05/22 07:57:45 cgd Exp $
# $Id: symbols.raw,v 1.3 1994/04/15 07:04:13 cgd Exp $
_version
#dmesg
@ -29,9 +29,8 @@
# _ndh11
#vmstat
_cp_time
_rate
_total
_sum
_cnt
# _rectime
# _pgintime
_dk_xfer

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.97 1994/04/08 19:15:52 mycroft Exp $
* $Id: machdep.c,v 1.98 1994/04/15 07:04:22 cgd Exp $
*/
#include <stddef.h>
@ -224,7 +224,7 @@ cpu_startup()
for (i = 1; i < ncallout; i++)
callout[i-1].c_next = &callout[i];
printf("avail mem = %d\n", ptoa(vm_page_free_count));
printf("avail mem = %d\n", ptoa(cnt.v_free_count));
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * CLBYTES);

View File

@ -1,5 +1,5 @@
# from: @(#)symbols.raw 7.6 (Berkeley) 5/8/91
# $Id: symbols.raw,v 1.3 1993/05/22 08:00:01 cgd Exp $
# $Id: symbols.raw,v 1.4 1994/04/15 07:04:26 cgd Exp $
#gdb
_IdlePTD
@ -36,9 +36,8 @@
_swapmap
#vmstat
_cp_time
_rate
_total
_sum
_cnt
# _rectime
# _pgintime
_dk_xfer

View File

@ -1,5 +1,5 @@
# from: @(#)symbols.raw 7.6 (Berkeley) 5/8/91
# $Id: symbols.raw,v 1.1.1.1 1993/10/12 03:22:34 deraadt Exp $
# $Id: symbols.raw,v 1.2 1994/04/15 07:04:30 cgd Exp $
_version
#dmesg
@ -29,9 +29,8 @@
# _ndh11
#vmstat
_cp_time
_rate
_total
_sum
_cnt
# _rectime
# _pgintime
_dk_xfer

View File

@ -31,57 +31,63 @@
* SUCH DAMAGE.
*
* from: @(#)vmmeter.h 7.3 (Berkeley) 5/5/91
* $Id: vmmeter.h,v 1.3 1993/05/20 16:23:51 cgd Exp $
* $Id: vmmeter.h,v 1.4 1994/04/15 07:04:36 cgd Exp $
*/
#ifndef _SYS_VMMETER_H_
#define _SYS_VMMETER_H_
/*
* Virtual memory related instrumentation
* System wide statistics counters.
*/
struct vmmeter
{
#define v_first v_swtch
struct vmmeter {
/*
* General system activity.
*/
unsigned v_swtch; /* context switches */
unsigned v_trap; /* calls to trap */
unsigned v_syscall; /* calls to syscall() */
unsigned v_intr; /* device interrupts */
unsigned v_soft; /* software interrupts */
unsigned v_pdma; /* pseudo-dma interrupts */
unsigned v_faults; /* total faults taken */
/*
* Virtual memory activity.
*/
unsigned v_lookups; /* object cache lookups */
unsigned v_hits; /* object cache hits */
unsigned v_vm_faults; /* number of address memory faults */
unsigned v_cow_faults; /* number of copy-on-writes */
unsigned v_swpin; /* swapins */
unsigned v_swpout; /* swapouts */
unsigned v_pswpin; /* pages swapped in */
unsigned v_pswpout; /* pages swapped out */
unsigned v_pgin; /* pageins */
unsigned v_pgout; /* pageouts */
unsigned v_pageins; /* number of pageins */
unsigned v_pageouts; /* number of pageouts */
unsigned v_pgpgin; /* pages paged in */
unsigned v_pgpgout; /* pages paged out */
unsigned v_intrans; /* intransit blocking page faults */
unsigned v_pgrec; /* total page reclaims */
unsigned v_xsfrec; /* found in free list rather than on swapdev */
unsigned v_xifrec; /* found in free list rather than in filsys */
unsigned v_exfod; /* pages filled on demand from executables */
unsigned v_zfod; /* pages zero filled on demand */
unsigned v_vrfod; /* fills of pages mapped by vread() */
unsigned v_nexfod; /* number of exfod's created */
unsigned v_nzfod; /* number of zfod's created */
unsigned v_nvrfod; /* number of vrfod's created */
unsigned v_pgfrec; /* page reclaims from free list */
unsigned v_faults; /* total faults taken */
unsigned v_scan; /* scans in page out daemon */
unsigned v_reactivated; /* number of pages reactivated from free list */
unsigned v_rev; /* revolutions of the hand */
unsigned v_seqfree; /* pages taken from sequential programs */
unsigned v_scan; /* scans in page out daemon */
unsigned v_dfree; /* pages freed by daemon */
unsigned v_fastpgrec; /* fast reclaims in locore */
#ifdef tahoe
unsigned v_fpe; /* floating point emulation traps */
unsigned v_align; /* alignment emulation traps */
#endif
#define v_last v_fastpgrec
unsigned v_swpin; /* swapins */
unsigned v_swpout; /* swapouts */
unsigned v_pfree; /* pages freed by exiting processes */
unsigned v_zfod; /* pages zero filled on demand */
unsigned v_nzfod; /* number of zfod's created */
/*
* Distribution of page usages.
*/
unsigned v_page_size; /* page size in bytes */
unsigned v_kernel_pages;/* number of pages in use by kernel */
unsigned v_free_target; /* number of pages desired free */
unsigned v_free_min; /* minimum number of pages desired free */
unsigned v_free_count; /* number of pages free */
unsigned v_wire_count; /* number of pages wired down */
unsigned v_active_count;/* number of pages active */
unsigned v_inactive_target; /* number of pages desired inactive */
unsigned v_inactive_count; /* number of pages inactive */
};
#ifdef KERNEL
struct vmmeter cnt, rate, sum;
struct vmmeter cnt;
#endif
/* systemwide totals computed every five seconds */
@ -96,10 +102,10 @@ struct vmtotal
long t_avm; /* active virtual memory */
long t_rm; /* total real memory in use */
long t_arm; /* active real memory */
long t_vmtxt; /* virtual memory used by text */
long t_avmtxt; /* active virtual memory used by text */
long t_rmtxt; /* real memory used by text */
long t_armtxt; /* active real memory used by text */
long t_vmshr; /* shared virtual memory */
long t_avmshr; /* active shared virtual memory */
long t_rmshr; /* shared real memory */
long t_armshr; /* active shared real memory */
long t_free; /* free memory pages */
};
#ifdef KERNEL

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)device_pager.c 8.1 (Berkeley) 6/11/93
* $Id: device_pager.c,v 1.13 1994/03/17 02:51:57 cgd Exp $
* $Id: device_pager.c,v 1.14 1994/04/15 07:04:40 cgd Exp $
*/
/*
@ -56,8 +56,8 @@
#include <vm/vm_page.h>
#include <vm/device_pager.h>
queue_head_t dev_pager_list; /* list of managed devices */
queue_head_t dev_pager_fakelist; /* list of available vm_page_t's */
struct pagerlst dev_pager_list; /* list of managed devices */
struct pglist dev_pager_fakelist; /* list of available vm_page_t's */
#ifdef DEBUG
int dpagerdebug = 0;
@ -95,8 +95,8 @@ dev_pager_init()
if (dpagerdebug & DDB_FOLLOW)
printf("dev_pager_init()\n");
#endif
queue_init(&dev_pager_list);
queue_init(&dev_pager_fakelist);
TAILQ_INIT(&dev_pager_list);
TAILQ_INIT(&dev_pager_fakelist);
}
static vm_pager_t
@ -172,7 +172,7 @@ top:
pager->pg_ops = &devicepagerops;
pager->pg_type = PG_DEVICE;
pager->pg_data = (caddr_t)devp;
queue_init(&devp->devp_pglist);
TAILQ_INIT(&devp->devp_pglist);
/*
* Allocate object and associate it with the pager.
*/
@ -190,7 +190,7 @@ top:
free((caddr_t)pager, M_VMPAGER);
goto top;
}
queue_enter(&dev_pager_list, pager, vm_pager_t, pg_list);
TAILQ_INSERT_TAIL(&dev_pager_list, pager, pg_list);
#ifdef DEBUG
if (dpagerdebug & DDB_ALLOC) {
printf("dev_pager_alloc: pager %x devp %x object %x\n",
@ -225,7 +225,7 @@ dev_pager_dealloc(pager)
if (dpagerdebug & DDB_FOLLOW)
printf("dev_pager_dealloc(%x)\n", pager);
#endif
queue_remove(&dev_pager_list, pager, vm_pager_t, pg_list);
TAILQ_REMOVE(&dev_pager_list, pager, pg_list);
/*
* Get the object.
* Note: cannot use vm_object_lookup since object has already
@ -240,8 +240,8 @@ dev_pager_dealloc(pager)
/*
* Free up our fake pages.
*/
while (!queue_empty(&devp->devp_pglist)) {
queue_remove_first(&devp->devp_pglist, m, vm_page_t, pageq);
while ((m = devp->devp_pglist.tqh_first) != NULL) {
TAILQ_REMOVE(&devp->devp_pglist, m, pageq);
dev_pager_putfake(m);
}
free((caddr_t)devp, M_VMPGDATA);
@ -284,8 +284,8 @@ dev_pager_getpage(pager, m, sync)
* up the original.
*/
page = dev_pager_getfake(paddr);
queue_enter(&((dev_pager_t)pager->pg_data)->devp_pglist,
page, vm_page_t, pageq);
TAILQ_INSERT_TAIL(&((dev_pager_t)pager->pg_data)->devp_pglist, page,
pageq);
vm_object_lock(object);
vm_page_lock_queues();
vm_page_free(m);
@ -333,14 +333,15 @@ dev_pager_getfake(paddr)
vm_page_t m;
int i;
if (queue_empty(&dev_pager_fakelist)) {
if (dev_pager_fakelist.tqh_first == NULL) {
m = (vm_page_t)malloc(PAGE_SIZE, M_VMPGDATA, M_WAITOK);
for (i = PAGE_SIZE / sizeof(*m); i > 0; i--) {
queue_enter(&dev_pager_fakelist, m, vm_page_t, pageq);
TAILQ_INSERT_TAIL(&dev_pager_fakelist, m, pageq);
m++;
}
}
queue_remove_first(&dev_pager_fakelist, m, vm_page_t, pageq);
m = dev_pager_fakelist.tqh_first;
TAILQ_REMOVE(&dev_pager_fakelist, m, pageq);
m->flags = PG_BUSY | PG_CLEAN | PG_FAKE | PG_FICTITIOUS;
m->phys_addr = paddr;
m->wire_count = 1;
@ -355,5 +356,5 @@ dev_pager_putfake(m)
if (!(m->flags & PG_FICTITIOUS))
panic("dev_pager_putfake: bad page");
#endif
queue_enter(&dev_pager_fakelist, m, vm_page_t, pageq);
TAILQ_INSERT_TAIL(&dev_pager_fakelist, m, pageq);
}

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)device_pager.h 8.1 (Berkeley) 6/11/93
* $Id: device_pager.h,v 1.5 1993/12/20 12:39:57 cgd Exp $
* $Id: device_pager.h,v 1.6 1994/04/15 07:04:42 cgd Exp $
*/
#ifndef _DEVICE_PAGER_
@ -46,7 +46,7 @@
* Device pager private data.
*/
struct devpager {
queue_head_t devp_pglist; /* list of pages allocated */
struct pglist devp_pglist; /* list of pages allocated */
vm_object_t devp_object; /* object representing this device */
};
typedef struct devpager *dev_pager_t;

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.h 7.4 (Berkeley) 5/7/91
* $Id: pmap.h,v 1.5 1993/08/29 12:12:20 brezak Exp $
* $Id: pmap.h,v 1.6 1994/04/15 07:04:43 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -63,18 +63,29 @@
* rights to redistribute these changes.
*/
#ifndef _VM_PMAP_H_
#define _VM_PMAP_H_
/*
* Machine address mapping definitions -- machine-independent
* section. [For machine-dependent section, see "machine/pmap.h".]
*/
#ifndef _PMAP_VM_
#define _PMAP_VM_
/*
* Each machine dependent implementation is expected to
* keep certain statistics. They may do this anyway they
* so choose, but are expected to return the statistics
* in the following structure.
*/
struct pmap_statistics {
long resident_count; /* # of pages mapped (total)*/
long wired_count; /* # of pages wired */
};
typedef struct pmap_statistics *pmap_statistics_t;
#include <machine/pmap.h>
#ifdef KERNEL
/*
* Currently this option is used on the i386 to be able to handle the
* memory from 0-640k and 1M+.
@ -168,4 +179,4 @@ boolean_t pmap_access();
extern pmap_t kernel_pmap;
#endif
#endif /* !_VM_PMAP_H_ */
#endif /* _PMAP_VM_ */

View File

@ -38,7 +38,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* from: @(#)swap_pager.c 8.1 (Berkeley) 6/11/93
* $Id: swap_pager.c,v 1.16 1994/01/13 18:20:36 cgd Exp $
* $Id: swap_pager.c,v 1.17 1994/04/15 07:04:45 cgd Exp $
*/
/*
@ -80,15 +80,19 @@ int swpagerdebug = 0x100;
#define SDB_FULL 0x080
#define SDB_ANOM 0x100
#define SDB_ANOMPANIC 0x200
#define SDB_CLUSTER 0x400
#define SDB_PARANOIA 0x800
#endif
TAILQ_HEAD(swpclean, swpagerclean);
struct swpagerclean {
queue_head_t spc_list;
int spc_flags;
struct buf *spc_bp;
sw_pager_t spc_swp;
vm_offset_t spc_kva;
vm_page_t spc_m;
TAILQ_ENTRY(swpagerclean) spc_list;
int spc_flags;
struct buf *spc_bp;
sw_pager_t spc_swp;
vm_offset_t spc_kva;
vm_page_t spc_m;
} swcleanlist[NPENDINGIO];
typedef struct swpagerclean *swp_clean_t;
@ -115,9 +119,9 @@ int swap_pager_poip; /* pageouts in progress */
int swap_pager_piip; /* pageins in progress */
#endif
queue_head_t swap_pager_inuse; /* list of pending page cleans */
queue_head_t swap_pager_free; /* list of free pager clean structs */
queue_head_t swap_pager_list; /* list of "named" anon regions */
struct swpclean swap_pager_inuse; /* list of pending page cleans */
struct swpclean swap_pager_free; /* list of free pager clean structs */
struct pagerlst swap_pager_list; /* list of "named" anon regions */
static int swap_pager_finish __P((swp_clean_t));
static void swap_pager_init __P((void));
@ -155,15 +159,15 @@ swap_pager_init()
printf("swpg_init()\n");
#endif
dfltpagerops = &swappagerops;
queue_init(&swap_pager_list);
TAILQ_INIT(&swap_pager_list);
/*
* Initialize clean lists
*/
queue_init(&swap_pager_inuse);
queue_init(&swap_pager_free);
TAILQ_INIT(&swap_pager_inuse);
TAILQ_INIT(&swap_pager_free);
for (i = 0, spc = swcleanlist; i < NPENDINGIO; i++, spc++) {
queue_enter(&swap_pager_free, spc, swp_clean_t, spc_list);
TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
spc->spc_flags = SPC_FREE;
}
@ -290,7 +294,7 @@ swap_pager_alloc(handle, size, prot, foff)
vm_object_t object;
swp->sw_flags = SW_NAMED;
queue_enter(&swap_pager_list, pager, vm_pager_t, pg_list);
TAILQ_INSERT_TAIL(&swap_pager_list, pager, pg_list);
/*
* Consistant with other pagers: return with object
* referenced. Can't do this with handle == NULL
@ -301,7 +305,8 @@ swap_pager_alloc(handle, size, prot, foff)
vm_object_setpager(object, pager, 0, FALSE);
} else {
swp->sw_flags = 0;
queue_init(&pager->pg_list);
pager->pg_list.tqe_next = NULL;
pager->pg_list.tqe_prev = NULL;
}
pager->pg_handle = handle;
pager->pg_ops = &swappagerops;
@ -339,7 +344,7 @@ swap_pager_dealloc(pager)
*/
swp = (sw_pager_t) pager->pg_data;
if (swp->sw_flags & SW_NAMED) {
queue_remove(&swap_pager_list, pager, vm_pager_t, pg_list);
TAILQ_INSERT_TAIL(&swap_pager_list, pager, pg_list);
swp->sw_flags &= ~SW_NAMED;
}
#ifdef DEBUG
@ -517,10 +522,10 @@ swap_pager_io(swp, m, flags)
* are available, we try again later.
*/
else if (swap_pager_clean(m, B_WRITE) ||
queue_empty(&swap_pager_free)) {
swap_pager_free.tqh_first == NULL) {
#ifdef DEBUG
if ((swpagerdebug & SDB_ANOM) &&
!queue_empty(&swap_pager_free))
swap_pager_free.tqh_first != NULL)
printf("swap_pager_io: page %x already cleaning\n", m);
#endif
return(VM_PAGER_FAIL);
@ -618,11 +623,11 @@ swap_pager_io(swp, m, flags)
*/
if ((flags & (B_READ|B_ASYNC)) == B_ASYNC) {
#ifdef DEBUG
if (queue_empty(&swap_pager_free))
if (swap_pager_free.tqh_first == NULL)
panic("swpg_io: lost spc");
#endif
queue_remove_first(&swap_pager_free,
spc, swp_clean_t, spc_list);
spc = swap_pager_free.tqh_first;
TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
#ifdef DEBUG
if (spc->spc_flags != SPC_FREE)
panic("swpg_io: bad free spc");
@ -636,7 +641,7 @@ swap_pager_io(swp, m, flags)
bp->b_iodone = swap_pager_iodone;
s = splbio();
swp->sw_poip++;
queue_enter(&swap_pager_inuse, spc, swp_clean_t, spc_list);
TAILQ_INSERT_TAIL(&swap_pager_inuse, spc, spc_list);
#ifdef DEBUG
swap_pager_poip++;
@ -728,12 +733,12 @@ swap_pager_clean(m, rw)
* at splbio() to avoid conflicts with swap_pager_iodone.
*/
s = splbio();
spc = (swp_clean_t) queue_first(&swap_pager_inuse);
while (!queue_end(&swap_pager_inuse, (queue_entry_t)spc)) {
for (spc = swap_pager_inuse.tqh_first;
spc != NULL;
spc = spc->spc_list.tqe_next) {
if ((spc->spc_flags & SPC_DONE) &&
swap_pager_finish(spc)) {
queue_remove(&swap_pager_inuse, spc,
swp_clean_t, spc_list);
TAILQ_REMOVE(&swap_pager_inuse, spc, spc_list);
break;
}
if (m && m == spc->spc_m) {
@ -744,14 +749,15 @@ swap_pager_clean(m, rw)
#endif
tspc = spc;
}
spc = (swp_clean_t) queue_next(&spc->spc_list);
}
/*
* No operations done, thats all we can do for now.
*/
if (queue_end(&swap_pager_inuse, (queue_entry_t)spc))
if (spc == NULL) {
splx(s);
break;
}
splx(s);
/*
@ -768,7 +774,7 @@ swap_pager_clean(m, rw)
}
spc->spc_flags = SPC_FREE;
vm_pager_unmap_page(spc->spc_kva);
queue_enter(&swap_pager_free, spc, swp_clean_t, spc_list);
TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
#ifdef DEBUG
if (swpagerdebug & SDB_WRITE)
printf("swpg_clean: free spc %x\n", spc);
@ -872,14 +878,13 @@ swap_pager_iodone(bp)
printf("swpg_iodone(%x)\n", bp);
#endif
s = splbio();
spc = (swp_clean_t) queue_first(&swap_pager_inuse);
while (!queue_end(&swap_pager_inuse, (queue_entry_t)spc)) {
for (spc = swap_pager_inuse.tqh_first;
spc != NULL;
spc = spc->spc_list.tqe_next)
if (spc->spc_bp == bp)
break;
spc = (swp_clean_t) queue_next(&spc->spc_list);
}
#ifdef DEBUG
if (queue_end(&swap_pager_inuse, (queue_entry_t)spc))
if (spc == NULL)
panic("swap_pager_iodone: bp not found");
#endif

View File

@ -31,11 +31,11 @@
* SUCH DAMAGE.
*
* from: @(#)vm.h 7.1 (Berkeley) 5/5/91
* $Id: vm.h,v 1.9 1994/03/17 02:52:02 cgd Exp $
* $Id: vm.h,v 1.10 1994/04/15 07:04:46 cgd Exp $
*/
#ifndef _VM_H_
#define _VM_H_
#ifndef VM_H
#define VM_H
typedef int vm_inherit_t; /* XXX: inheritance codes */
@ -43,30 +43,30 @@ union vm_map_object;
typedef union vm_map_object vm_map_object_t;
struct vm_map_entry;
typedef struct vm_map_entry *vm_map_entry_t;
typedef struct vm_map_entry *vm_map_entry_t;
struct vm_map;
typedef struct vm_map *vm_map_t;
struct vm_object;
struct vm_object;
typedef struct vm_object *vm_object_t;
struct vm_page;
typedef struct vm_page *vm_page_t;
struct pager_struct;
typedef struct pager_struct *vm_pager_t;
#include <sys/vmmeter.h>
#include <vm/queue.h> /* sys/queue.h in 4.4 */
#include <sys/queue.h>
#include <vm/vm_param.h>
#include <vm/lock.h>
#include <vm/vm_prot.h>
#include <vm/vm_inherit.h>
#include <vm/vm_object.h>
#include <vm/vm_statistics.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/pmap.h>
#include <vm/vm_extern.h>
/*
* Shareable process virtual address space.
@ -89,18 +89,4 @@ struct vmspace {
caddr_t vm_daddr; /* user virtual address of data XXX */
caddr_t vm_maxsaddr; /* user VA at max stack growth */
};
struct vmspace *vmspace_alloc __P((vm_offset_t min, vm_offset_t max,
int pageable));
struct vmspace *vmspace_fork __P((struct vmspace *));
void vmspace_free __P((struct vmspace *));
int vm_fault __P((vm_map_t, vm_offset_t, vm_prot_t, boolean_t));
void vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
void vm_fault_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t));
void vslock __P((caddr_t, u_int));
void vsunlock __P((caddr_t, u_int, int));
#endif /* !_VM_VM_H_ */
#endif /* VM_H */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_fault.c 7.6 (Berkeley) 5/7/91
* $Id: vm_fault.c,v 1.11 1994/03/17 02:52:04 cgd Exp $
* $Id: vm_fault.c,v 1.12 1994/04/15 07:04:48 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -114,7 +114,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
vm_page_t old_m;
vm_object_t next_object;
vm_stat.faults++; /* needs lock XXX */
cnt.v_vm_faults++; /* needs lock XXX */
/*
* Recovery actions
*/
@ -269,18 +269,16 @@ thread_wakeup(&vm_pages_needed); /* XXX! */
vm_page_lock_queues();
if (m->flags & PG_INACTIVE) {
queue_remove(&vm_page_queue_inactive, m,
vm_page_t, pageq);
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
m->flags &= ~PG_INACTIVE;
vm_page_inactive_count--;
vm_stat.reactivations++;
cnt.v_inactive_count--;
cnt.v_reactivated++;
}
if (m->flags & PG_ACTIVE) {
queue_remove(&vm_page_queue_active, m,
vm_page_t, pageq);
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
m->flags &= ~PG_ACTIVE;
vm_page_active_count--;
cnt.v_active_count--;
}
vm_page_unlock_queues();
@ -340,7 +338,7 @@ thread_wakeup(&vm_pages_needed); /* XXX! */
*/
m = vm_page_lookup(object, offset);
vm_stat.pageins++;
cnt.v_pageins++;
m->flags &= ~PG_FAKE;
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
break;
@ -411,7 +409,7 @@ thread_wakeup(&vm_pages_needed); /* XXX! */
first_m = NULL;
vm_page_zero_fill(m);
vm_stat.zero_fill_count++;
cnt.v_zfod++;
m->flags &= ~PG_FAKE;
break;
}
@ -505,7 +503,7 @@ thread_wakeup(&vm_pages_needed); /* XXX! */
* Only use the new page below...
*/
vm_stat.cow_faults++;
cnt.v_cow_faults++;
m = first_m;
object = first_object;
offset = first_offset;

View File

@ -287,7 +287,7 @@ vm_init_limits(p)
p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
p->p_rlimit[RLIMIT_RSS].rlim_cur = p->p_rlimit[RLIMIT_RSS].rlim_max =
ptoa(vm_page_free_count);
ptoa(cnt.v_free_count);
}
#include <vm/vm_pageout.h>
@ -354,12 +354,12 @@ noswap:
*/
size = round_page(ctob(UPAGES));
addr = (vm_offset_t) p->p_addr;
if (vm_page_free_count > atop(size)) {
if (cnt.v_free_count > atop(size)) {
#ifdef DEBUG
if (swapdebug & SDB_SWAPIN)
printf("swapin: pid %d(%s)@%x, pri %d free %d\n",
p->p_pid, p->p_comm, p->p_addr,
ppri, vm_page_free_count);
ppri, cnt.v_free_count);
#endif
vm_map_pageable(kernel_map, addr, addr+size, FALSE);
(void) splclock();
@ -377,14 +377,14 @@ noswap:
#ifdef DEBUG
if (swapdebug & SDB_FOLLOW)
printf("sched: no room for pid %d(%s), free %d\n",
p->p_pid, p->p_comm, vm_page_free_count);
p->p_pid, p->p_comm, cnt.v_free_count);
#endif
(void) splhigh();
VM_WAIT;
(void) spl0();
#ifdef DEBUG
if (swapdebug & SDB_FOLLOW)
printf("sched: room again, free %d\n", vm_page_free_count);
printf("sched: room again, free %d\n", cnt.v_free_count);
#endif
goto loop;
}
@ -445,7 +445,7 @@ swapout_threads()
* it (UPAGES pages).
*/
if (didswap == 0 &&
vm_page_free_count <= atop(round_page(ctob(UPAGES)))) {
cnt.v_free_count <= atop(round_page(ctob(UPAGES)))) {
if ((p = outp) == 0)
p = outp2;
#ifdef DEBUG
@ -468,7 +468,7 @@ swapout(p)
if (swapdebug & SDB_SWAPOUT)
printf("swapout: pid %d(%s)@%x, stat %x pri %d free %d\n",
p->p_pid, p->p_comm, p->p_addr, p->p_stat,
p->p_slptime, vm_page_free_count);
p->p_slptime, cnt.v_free_count);
#endif
size = round_page(ctob(UPAGES));
addr = (vm_offset_t) p->p_addr;

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_init.c 7.3 (Berkeley) 4/21/91
* $Id: vm_init.c,v 1.5 1993/12/20 12:40:06 cgd Exp $
* $Id: vm_init.c,v 1.6 1994/04/15 07:04:50 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -94,7 +94,7 @@ void vm_mem_init()
* From here on, all physical memory is accounted for,
* and we use only virtual addresses.
*/
vm_set_page_size();
#ifndef MACHINE_NONCONTIG
virtual_avail = vm_page_startup(avail_start, avail_end, virtual_avail);
#else

View File

@ -37,7 +37,7 @@
*
* from: Utah Hdr: vm_mmap.c 1.3 90/01/21
* from: @(#)vm_mmap.c 7.5 (Berkeley) 6/28/91
* $Id: vm_mmap.c,v 1.21 1994/04/02 08:39:55 cgd Exp $
* $Id: vm_mmap.c,v 1.22 1994/04/15 07:04:51 cgd Exp $
*/
/*
@ -57,7 +57,6 @@
#include <vm/vm.h>
#include <vm/vm_pager.h>
#include <vm/vm_prot.h>
#include <vm/vm_statistics.h>
#include <vm/vm_user.h>
#ifdef DEBUG
@ -156,7 +155,7 @@ smmap(p, uap, retval) /* XXX SHOULD BE mmap() */
* Size is implicitly rounded to a page boundary.
*/
addr = (vm_offset_t) uap->addr;
if ((flags & MAP_FIXED) && (addr & page_mask) || uap->len < 0)
if ((flags & MAP_FIXED) && (addr & PAGE_MASK) || uap->len < 0)
return(EINVAL);
size = (vm_size_t) round_page(uap->len);
if ((flags & MAP_FIXED) && (addr + size > VM_MAXUSER_ADDRESS))
@ -350,7 +349,7 @@ msync(p, uap, retval)
printf("msync(%d): addr %x len %x\n",
p->p_pid, uap->addr, uap->len);
#endif
if (((int)uap->addr & page_mask) || uap->len < 0)
if (((int)uap->addr & PAGE_MASK) || uap->len < 0)
return(EINVAL);
addr = oaddr = (vm_offset_t)uap->addr;
osize = (vm_size_t)uap->len;
@ -421,7 +420,7 @@ munmap(p, uap, retval)
p->p_pid, uap->addr, uap->len);
#endif
addr = (vm_offset_t) uap->addr;
if ((addr & page_mask) || uap->len < 0)
if ((addr & PAGE_MASK) || uap->len < 0)
return(EINVAL);
size = (vm_size_t) round_page(uap->len);
if (size == 0)
@ -474,7 +473,7 @@ mprotect(p, uap, retval)
p->p_pid, uap->addr, uap->len, uap->prot);
#endif
addr = (vm_offset_t) uap->addr;
if ((addr & page_mask) || uap->len < 0)
if ((addr & PAGE_MASK) || uap->len < 0)
return(EINVAL);
size = (vm_size_t) uap->len;
/*
@ -912,12 +911,19 @@ vm_allocate_with_pager(map, addr, size, fitit, pager, poffset, internal)
* it.
*/
object = vm_object_lookup(pager);
vm_stat.lookups++;
cnt.v_lookups++;
if (object == NULL) {
object = vm_object_allocate(size);
vm_object_enter(object, pager);
/*
* From Mike Hibler: "unnamed anonymous objects should never
* be on the hash list ... For now you can just change
* vm_allocate_with_pager to not do vm_object_enter if this
* is an internal object ..."
*/
if (!internal)
vm_object_enter(object, pager);
} else
vm_stat.hits++;
cnt.v_hits++;
if (internal)
object->flags |= OBJ_INTERNAL;
else

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_object.c 7.4 (Berkeley) 5/7/91
* $Id: vm_object.c,v 1.17 1994/03/17 02:52:19 cgd Exp $
* $Id: vm_object.c,v 1.18 1994/04/15 07:04:53 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -78,7 +78,6 @@ static void _vm_object_allocate __P((vm_size_t, vm_object_t));
static void vm_object_terminate __P((vm_object_t));
static void vm_object_deactivate_pages __P((vm_object_t));
static void vm_object_cache_trim __P((void));
static void vm_object_shutdown __P((void));
static void vm_object_remove __P((vm_pager_t));
static void vm_object_cache_clear __P((void));
@ -113,8 +112,8 @@ struct vm_object kmem_object_store;
#define VM_OBJECT_HASH_COUNT 157
int vm_cache_max = 100; /* can patch if necessary */
queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT];
int vm_cache_max = 100; /* can patch if necessary */
struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT];
long object_collapses = 0;
long object_bypasses = 0;
@ -129,14 +128,14 @@ vm_object_init()
{
register int i;
queue_init(&vm_object_cached_list);
queue_init(&vm_object_list);
TAILQ_INIT(&vm_object_cached_list);
TAILQ_INIT(&vm_object_list);
vm_object_count = 0;
simple_lock_init(&vm_cache_lock);
simple_lock_init(&vm_object_list_lock);
for (i = 0; i < VM_OBJECT_HASH_COUNT; i++)
queue_init(&vm_object_hashtable[i]);
TAILQ_INIT(&vm_object_hashtable[i]);
kernel_object = &kernel_object_store;
_vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
@ -170,7 +169,7 @@ _vm_object_allocate(size, object)
vm_size_t size;
register vm_object_t object;
{
queue_init(&object->memq);
TAILQ_INIT(&object->memq);
vm_object_lock_init(object);
object->ref_count = 1;
object->resident_page_count = 0;
@ -190,7 +189,7 @@ _vm_object_allocate(size, object)
object->shadow_offset = (vm_offset_t) 0;
simple_lock(&vm_object_list_lock);
queue_enter(&vm_object_list, object, vm_object_t, object_list);
TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
vm_object_count++;
simple_unlock(&vm_object_list_lock);
}
@ -267,10 +266,10 @@ vm_object_deallocate(object)
/*
* Check for dirty pages in object
* Print warning as this may signify kernel bugs
* pk@cs.few.eur.nl - 4/15/93
*/
p = (vm_page_t) queue_first(&object->memq);
while (!queue_end(&object->memq, (queue_entry_t) p)) {
for (p = object->memq.tqh_first;
p != NULL;
p = p->listq.tqe_next) {
VM_PAGE_CHECK(p);
if (pmap_is_modified(VM_PAGE_TO_PHYS(p)) ||
@ -279,13 +278,11 @@ vm_object_deallocate(object)
printf("vm_object_dealloc: persistent object %x isn't clean\n", object);
goto cant_persist;
}
p = (vm_page_t) queue_next(&p->listq);
}
#endif /* DIAGNOSTIC */
queue_enter(&vm_object_cached_list, object,
vm_object_t, cached_list);
TAILQ_INSERT_TAIL(&vm_object_cached_list, object,
cached_list);
vm_object_cached++;
vm_object_cache_unlock();
@ -306,23 +303,23 @@ vm_object_deallocate(object)
* Sanity check on the object hash table.
*/
{
register vm_object_hash_entry_t entry;
int i;
register vm_object_hash_entry_t entry;
int i;
for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) {
queue_t bucket = &vm_object_hashtable[i];
for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) {
struct vm_object_hash_head *bucket =
&vm_object_hashtable[i];
entry = (vm_object_hash_entry_t) queue_first(bucket);
while (!queue_end(bucket, (queue_entry_t) entry)) {
if (object == entry->object) {
vm_object_print(object,0);
panic("object hashtable burb");
for (entry = bucket->tqh_first;
entry != NULL;
entry = entry->hash_links.tqe_next) {
if (object == entry->object) {
vm_object_print(object,0);
panic("object hashtable burb");
}
}
entry = (vm_object_hash_entry_t)
queue_next(&entry->hash_links);
}
}
}
#endif
vm_object_cache_unlock();
@ -385,29 +382,12 @@ vm_object_terminate(object)
* before deallocating the paging space.
*/
p = (vm_page_t) queue_first(&object->memq);
while (!queue_end(&object->memq, (queue_entry_t) p)) {
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
VM_PAGE_CHECK(p);
vm_page_lock_queues();
if (p->flags & PG_ACTIVE) {
queue_remove(&vm_page_queue_active, p, vm_page_t,
pageq);
p->flags &= ~PG_ACTIVE;
vm_page_active_count--;
}
if (p->flags & PG_INACTIVE) {
queue_remove(&vm_page_queue_inactive, p, vm_page_t,
pageq);
p->flags &= ~PG_INACTIVE;
vm_page_inactive_count--;
}
vm_page_free(p);
vm_page_unlock_queues();
p = (vm_page_t) queue_next(&p->listq);
}
vm_object_unlock(object);
if (object->paging_in_progress != 0)
panic("vm_object_deallocate: pageout in progress");
@ -423,11 +403,8 @@ vm_object_terminate(object)
vm_object_page_clean(object, 0, 0);
vm_object_unlock(object);
}
while (!queue_empty(&object->memq)) {
p = (vm_page_t) queue_first(&object->memq);
while ((p = object->memq.tqh_first) != NULL) {
VM_PAGE_CHECK(p);
vm_page_lock_queues();
vm_page_free(p);
vm_page_unlock_queues();
@ -442,7 +419,7 @@ vm_object_terminate(object)
simple_lock(&vm_object_list_lock);
queue_remove(&vm_object_list, object, vm_object_t, object_list);
TAILQ_REMOVE(&vm_object_list, object, object_list);
vm_object_count--;
simple_unlock(&vm_object_list_lock);
@ -475,8 +452,7 @@ vm_object_page_clean(object, start, end)
return;
again:
p = (vm_page_t) queue_first(&object->memq);
while (!queue_end(&object->memq, (queue_entry_t) p)) {
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
if ((start == end || p->offset >= start && p->offset < end) &&
!(p->flags & PG_FICTITIOUS)) {
if ((p->flags & PG_CLEAN) &&
@ -495,7 +471,6 @@ again:
goto again;
}
}
p = (vm_page_t) queue_next(&p->listq);
}
}
@ -513,17 +488,11 @@ vm_object_deactivate_pages(object)
{
register vm_page_t p, next;
p = (vm_page_t) queue_first(&object->memq);
while (!queue_end(&object->memq, (queue_entry_t) p)) {
next = (vm_page_t) queue_next(&p->listq);
for (p = object->memq.tqh_first; p != NULL; p = next) {
next = p->listq.tqe_next;
vm_page_lock_queues();
if (!(p->flags & PG_BUSY))
vm_page_deactivate(p); /* optimisation from mach 3.0 -
* andrew@werple.apana.org.au,
* Feb '93
*/
vm_page_deactivate(p);
vm_page_unlock_queues();
p = next;
}
}
@ -537,7 +506,7 @@ vm_object_cache_trim()
vm_object_cache_lock();
while (vm_object_cached > vm_cache_max) {
object = (vm_object_t) queue_first(&vm_object_cached_list);
object = vm_object_cached_list.tqh_first;
vm_object_cache_unlock();
if (object != vm_object_lookup(object->pager))
@ -550,61 +519,6 @@ vm_object_cache_trim()
vm_object_cache_unlock();
}
/*
* vm_object_shutdown()
*
* Shut down the object system. Unfortunately, while we
* may be trying to do this, init is happily waiting for
* processes to exit, and therefore will be causing some objects
* to be deallocated. To handle this, we gain a fake reference
* to all objects we release paging areas for. This will prevent
* a duplicate deallocation. This routine is probably full of
* race conditions!
*/
static void
vm_object_shutdown()
{
register vm_object_t object;
/*
* Clean up the object cache *before* we screw up the reference
* counts on all of the objects.
*/
vm_object_cache_clear();
printf("free paging spaces: ");
/*
* First we gain a reference to each object so that
* no one else will deallocate them.
*/
simple_lock(&vm_object_list_lock);
object = (vm_object_t) queue_first(&vm_object_list);
while (!queue_end(&vm_object_list, (queue_entry_t) object)) {
vm_object_reference(object);
object = (vm_object_t) queue_next(&object->object_list);
}
simple_unlock(&vm_object_list_lock);
/*
* Now we deallocate all the paging areas. We don't need
* to lock anything because we've reduced to a single
* processor while shutting down. This also assumes that
* no new objects are being created.
*/
object = (vm_object_t) queue_first(&vm_object_list);
while (!queue_end(&vm_object_list, (queue_entry_t) object)) {
if (object->pager != NULL)
vm_pager_deallocate(object->pager);
object = (vm_object_t) queue_next(&object->object_list);
printf(".");
}
printf("done.\n");
}
/*
* vm_object_pmap_copy:
*
@ -626,13 +540,11 @@ vm_object_pmap_copy(object, start, end)
return;
vm_object_lock(object);
p = (vm_page_t) queue_first(&object->memq);
while (!queue_end(&object->memq, (queue_entry_t) p)) {
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
if ((start <= p->offset) && (p->offset < end)) {
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ);
p->flags |= PG_COPYONWRITE;
}
p = (vm_page_t) queue_next(&p->listq);
}
vm_object_unlock(object);
}
@ -657,12 +569,9 @@ vm_object_pmap_remove(object, start, end)
return;
vm_object_lock(object);
p = (vm_page_t) queue_first(&object->memq);
while (!queue_end(&object->memq, (queue_entry_t) p)) {
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next)
if ((start <= p->offset) && (p->offset < end))
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
p = (vm_page_t) queue_next(&p->listq);
}
vm_object_unlock(object);
}
@ -722,13 +631,10 @@ vm_object_copy(src_object, src_offset, size,
/*
* Mark all of the pages copy-on-write.
*/
for (p = (vm_page_t) queue_first(&src_object->memq);
!queue_end(&src_object->memq, (queue_entry_t)p);
p = (vm_page_t) queue_next(&p->listq)) {
for (p = src_object->memq.tqh_first; p; p = p->listq.tqe_next)
if (src_offset <= p->offset &&
p->offset < src_offset + size)
p->flags |= PG_COPYONWRITE;
}
vm_object_unlock(src_object);
*dst_object = src_object;
@ -853,12 +759,9 @@ vm_object_copy(src_object, src_offset, size,
* Mark all the affected pages of the existing object
* copy-on-write.
*/
p = (vm_page_t) queue_first(&src_object->memq);
while (!queue_end(&src_object->memq, (queue_entry_t) p)) {
for (p = src_object->memq.tqh_first; p != NULL; p = p->listq.tqe_next)
if ((new_start <= p->offset) && (p->offset < new_end))
p->flags |= PG_COPYONWRITE;
p = (vm_page_t) queue_next(&p->listq);
}
vm_object_unlock(src_object);
@ -954,22 +857,20 @@ vm_object_t
vm_object_lookup(pager)
vm_pager_t pager;
{
register queue_t bucket;
register vm_object_hash_entry_t entry;
vm_object_t object;
bucket = &vm_object_hashtable[vm_object_hash(pager)];
vm_object_cache_lock();
entry = (vm_object_hash_entry_t) queue_first(bucket);
while (!queue_end(bucket, (queue_entry_t) entry)) {
for (entry = vm_object_hashtable[vm_object_hash(pager)].tqh_first;
entry != NULL;
entry = entry->hash_links.tqe_next) {
object = entry->object;
if (object->pager == pager) {
vm_object_lock(object);
if (object->ref_count == 0) {
queue_remove(&vm_object_cached_list, object,
vm_object_t, cached_list);
TAILQ_REMOVE(&vm_object_cached_list, object,
cached_list);
vm_object_cached--;
}
object->ref_count++;
@ -977,7 +878,6 @@ vm_object_lookup(pager)
vm_object_cache_unlock();
return(object);
}
entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links);
}
vm_object_cache_unlock();
@ -994,7 +894,7 @@ vm_object_enter(object, pager)
vm_object_t object;
vm_pager_t pager;
{
register queue_t bucket;
struct vm_object_hash_head *bucket;
register vm_object_hash_entry_t entry;
/*
@ -1014,7 +914,7 @@ vm_object_enter(object, pager)
object->flags |= OBJ_CANPERSIST;
vm_object_cache_lock();
queue_enter(bucket, entry, vm_object_hash_entry_t, hash_links);
TAILQ_INSERT_TAIL(bucket, entry, hash_links);
vm_object_cache_unlock();
}
@ -1030,22 +930,21 @@ static void
vm_object_remove(pager)
register vm_pager_t pager;
{
register queue_t bucket;
struct vm_object_hash_head *bucket;
register vm_object_hash_entry_t entry;
register vm_object_t object;
bucket = &vm_object_hashtable[vm_object_hash(pager)];
entry = (vm_object_hash_entry_t) queue_first(bucket);
while (!queue_end(bucket, (queue_entry_t) entry)) {
for (entry = bucket->tqh_first;
entry != NULL;
entry = entry->hash_links.tqe_next) {
object = entry->object;
if (object->pager == pager) {
queue_remove(bucket, entry, vm_object_hash_entry_t,
hash_links);
TAILQ_REMOVE(bucket, entry, hash_links);
free((caddr_t)entry, M_VMOBJHASH);
break;
}
entry = (vm_object_hash_entry_t) queue_next(&entry->hash_links);
}
}
@ -1063,8 +962,7 @@ vm_object_cache_clear()
* list of cached objects.
*/
vm_object_cache_lock();
while (!queue_empty(&vm_object_cached_list)) {
object = (vm_object_t) queue_first(&vm_object_cached_list);
while ((object = vm_object_cached_list.tqh_first) != NULL) {
vm_object_cache_unlock();
/*
@ -1184,11 +1082,7 @@ vm_object_collapse(object)
* pages that shadow them.
*/
while (!queue_empty(&backing_object->memq)) {
p = (vm_page_t)
queue_first(&backing_object->memq);
while ((p = backing_object->memq.tqh_first) != NULL) {
new_offset = (p->offset - backing_offset);
/*
@ -1307,8 +1201,8 @@ vm_object_collapse(object)
vm_object_unlock(backing_object);
simple_lock(&vm_object_list_lock);
queue_remove(&vm_object_list, backing_object,
vm_object_t, object_list);
TAILQ_REMOVE(&vm_object_list, backing_object,
object_list);
vm_object_count--;
simple_unlock(&vm_object_list_lock);
@ -1339,10 +1233,9 @@ vm_object_collapse(object)
* of pages here.
*/
p = (vm_page_t) queue_first(&backing_object->memq);
while (!queue_end(&backing_object->memq,
(queue_entry_t) p)) {
for (p = backing_object->memq.tqh_first;
p != NULL;
p = p->listq.tqe_next) {
new_offset = (p->offset - backing_offset);
/*
@ -1365,7 +1258,6 @@ vm_object_collapse(object)
vm_object_unlock(backing_object);
return;
}
p = (vm_page_t) queue_next(&p->listq);
}
/*
@ -1427,16 +1319,14 @@ vm_object_page_remove(object, start, end)
if (object == NULL)
return;
p = (vm_page_t) queue_first(&object->memq);
while (!queue_end(&object->memq, (queue_entry_t) p)) {
next = (vm_page_t) queue_next(&p->listq);
for (p = object->memq.tqh_first; p != NULL; p = next) {
next = p->listq.tqe_next;
if ((start <= p->offset) && (p->offset < end)) {
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
vm_page_lock_queues();
vm_page_free(p);
vm_page_unlock_queues();
}
p = next;
}
}
@ -1566,15 +1456,14 @@ _vm_object_print(object, full, pr)
(int) object->pager, (int) object->paging_offset,
(int) object->shadow, (int) object->shadow_offset);
(*pr)("cache: next=0x%x, prev=0x%x\n",
object->cached_list.next, object->cached_list.prev);
object->cached_list.tqe_next, object->cached_list.tqe_prev);
if (!full)
return;
indent += 2;
count = 0;
p = (vm_page_t) queue_first(&object->memq);
while (!queue_end(&object->memq, (queue_entry_t) p)) {
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
if (count == 0)
iprintf(pr, "memory:=");
else if (count == 6) {
@ -1586,7 +1475,6 @@ _vm_object_print(object, full, pr)
count++;
(*pr)("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p));
p = (vm_page_t) queue_next(&p->listq);
}
if (count != 0)
(*pr)("\n");

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_object.h 7.3 (Berkeley) 4/21/91
* $Id: vm_object.h,v 1.9 1994/03/17 02:52:25 cgd Exp $
* $Id: vm_object.h,v 1.10 1994/04/15 07:04:54 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -70,6 +70,7 @@
* Virtual memory object module definitions.
*/
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
/*
@ -79,8 +80,8 @@
*/
struct vm_object {
queue_chain_t memq; /* Resident memory */
queue_chain_t object_list; /* list of all objects */
struct pglist memq; /* Resident memory */
TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
simple_lock_data_t Lock; /* Synchronization */
int ref_count; /* How many refs?? */
vm_size_t size; /* Object size */
@ -96,26 +97,32 @@ struct vm_object {
/* Paging (in or out) - don't
collapse or destroy */
u_short flags; /* object flags; see below */
queue_chain_t cached_list; /* for persistence */
TAILQ_ENTRY(vm_object) cached_list; /* for persistence */
};
/* Object flags */
/*
* Flags
*/
#define OBJ_CANPERSIST 0x0001 /* allow to persist */
#define OBJ_INTERNAL 0x0002 /* internally created object */
#define OBJ_ACTIVE 0x0004 /* used to mark active objects */
TAILQ_HEAD(vm_object_hash_head, vm_object_hash_entry);
struct vm_object_hash_entry {
queue_chain_t hash_links; /* hash chain links */
vm_object_t object; /* object we represent */
TAILQ_ENTRY(vm_object_hash_entry) hash_links; /* hash chain links */
vm_object_t object; /* object represented */
};
typedef struct vm_object_hash_entry *vm_object_hash_entry_t;
#ifdef KERNEL
queue_head_t vm_object_cached_list; /* list of objects persisting */
TAILQ_HEAD(object_q, vm_object);
struct object_q vm_object_cached_list; /* list of objects persisting */
int vm_object_cached; /* size of cached list */
simple_lock_data_t vm_cache_lock; /* lock for object cache */
queue_head_t vm_object_list; /* list of allocated objects */
struct object_q vm_object_list; /* list of allocated objects */
long vm_object_count; /* count of all objects */
simple_lock_data_t vm_object_list_lock;
/* lock for object list and count */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.12 1994/03/17 02:52:27 cgd Exp $
* $Id: vm_page.c,v 1.13 1994/04/15 07:04:57 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -91,18 +91,14 @@ vm_offset_t virtual_space_start;
vm_offset_t virtual_space_end;
#endif /* MACHINE_NONCONTIG */
queue_head_t *vm_page_buckets; /* Array of buckets */
struct pglist *vm_page_buckets; /* Array of buckets */
int vm_page_bucket_count = 0; /* How big is array? */
int vm_page_hash_mask; /* Mask for hash function */
simple_lock_data_t bucket_lock; /* lock for all buckets XXX */
vm_size_t page_size = 4096;
vm_size_t page_mask = 4095;
int page_shift = 12;
queue_head_t vm_page_queue_free;
queue_head_t vm_page_queue_active;
queue_head_t vm_page_queue_inactive;
struct pglist vm_page_queue_free;
struct pglist vm_page_queue_active;
struct pglist vm_page_queue_inactive;
simple_lock_data_t vm_page_queue_lock;
simple_lock_data_t vm_page_queue_free_lock;
@ -117,17 +113,8 @@ vm_offset_t last_phys_addr;
u_long first_page;
int vm_page_count;
#endif /* MACHINE_NONCONTIG */
int vm_page_free_count;
int vm_page_active_count;
int vm_page_inactive_count;
int vm_page_wire_count;
int vm_page_laundry_count;
int vm_page_free_target = 0;
int vm_page_free_min = 0;
int vm_page_inactive_target = 0;
int vm_page_free_reserved = 0;
vm_size_t page_mask;
int page_shift;
/*
* vm_set_page_size:
@ -141,13 +128,14 @@ int vm_page_free_reserved = 0;
void
vm_set_page_size()
{
page_mask = page_size - 1;
if ((page_mask & page_size) != 0)
if (cnt.v_page_size == 0)
cnt.v_page_size = DEFAULT_PAGE_SIZE;
page_mask = cnt.v_page_size - 1;
if ((page_mask & cnt.v_page_size) != 0)
panic("vm_set_page_size: page size not a power of two");
for (page_shift = 0; ; page_shift++)
if ((1 << page_shift) == page_size)
if ((1 << page_shift) == cnt.v_page_size)
break;
}
@ -169,6 +157,7 @@ vm_page_bootstrap(startp, endp)
vm_offset_t *endp;
{
int i;
register struct pglist *bucket;
extern vm_offset_t kentry_data;
extern vm_size_t kentry_data_size;
@ -186,9 +175,9 @@ vm_page_bootstrap(startp, endp)
* the active queue and the inactive queue.
*/
queue_init(&vm_page_queue_free);
queue_init(&vm_page_queue_active);
queue_init(&vm_page_queue_inactive);
TAILQ_INIT(&vm_page_queue_free);
TAILQ_INIT(&vm_page_queue_active);
TAILQ_INIT(&vm_page_queue_inactive);
/*
* Pre-allocate maps and map entries that cannot be dynamically
@ -235,15 +224,15 @@ vm_page_bootstrap(startp, endp)
vm_page_hash_mask = vm_page_bucket_count - 1;
vm_page_buckets = (queue_t)
pmap_steal_memory(vm_page_bucket_count * sizeof(*vm_page_buckets));
for (i = 0; i < vm_page_bucket_count; i++) {
register queue_head_t *bucket = &vm_page_buckets[i];
queue_init(bucket);
vm_page_buckets = (struct pglist *)
pmap_steal_memory(vm_page_bucket_count * sizeof(*vm_page_buckets));
bucket = vm_page_buckets;
for (i = vm_page_bucket_count; i--;) {
TAILQ_INIT(bucket);
bucket++;
}
simple_lock_init(&bucket_lock);
/*
@ -281,14 +270,13 @@ vm_page_startup(start, end, vaddr)
vm_offset_t end;
register vm_offset_t vaddr;
{
register vm_offset_t mapped;
register vm_page_t m;
register queue_t bucket;
register vm_offset_t mapped;
register struct pglist *bucket;
vm_size_t npages;
register vm_offset_t new_start;
int i;
vm_offset_t pa;
extern vm_offset_t kentry_data;
extern vm_size_t kentry_data_size;
@ -305,9 +293,9 @@ vm_page_startup(start, end, vaddr)
* the active queue and the inactive queue.
*/
queue_init(&vm_page_queue_free);
queue_init(&vm_page_queue_active);
queue_init(&vm_page_queue_inactive);
TAILQ_INIT(&vm_page_queue_free);
TAILQ_INIT(&vm_page_queue_active);
TAILQ_INIT(&vm_page_queue_inactive);
/*
* Allocate (and initialize) the hash table buckets.
@ -320,8 +308,7 @@ vm_page_startup(start, end, vaddr)
* This computation can be tweaked if desired.
*/
vm_page_buckets = (queue_t) vaddr;
bucket = vm_page_buckets;
vm_page_buckets = (struct pglist *) vaddr;
if (vm_page_bucket_count == 0) {
vm_page_bucket_count = 1;
while (vm_page_bucket_count < atop(end - start))
@ -334,7 +321,7 @@ vm_page_startup(start, end, vaddr)
* Validate these addresses.
*/
new_start = round_page(((queue_t)start) + vm_page_bucket_count);
new_start = round_page(((struct pglist *)start) + vm_page_bucket_count);
mapped = vaddr;
vaddr = pmap_map(mapped, start, new_start,
VM_PROT_READ|VM_PROT_WRITE);
@ -342,8 +329,9 @@ vm_page_startup(start, end, vaddr)
bzero((caddr_t) mapped, vaddr - mapped);
mapped = vaddr;
bucket = vm_page_buckets;
for (i = vm_page_bucket_count; i--;) {
queue_init(bucket);
TAILQ_INIT(bucket);
bucket++;
}
@ -389,7 +377,7 @@ vm_page_startup(start, end, vaddr)
* of a page structure per page).
*/
vm_page_free_count = npages =
cnt.v_free_count = npages =
(end - start + sizeof(struct vm_page))/(PAGE_SIZE + sizeof(struct vm_page));
/*
@ -404,7 +392,7 @@ vm_page_startup(start, end, vaddr)
last_page = first_page + npages - 1;
first_phys_addr = ptoa(first_page);
last_phys_addr = ptoa(last_page) + page_mask;
last_phys_addr = ptoa(last_page) + PAGE_MASK;
/*
* Validate these addresses.
@ -423,7 +411,7 @@ vm_page_startup(start, end, vaddr)
pa = first_phys_addr;
while (npages--) {
m->phys_addr = pa;
queue_enter(&vm_page_queue_free, m, vm_page_t, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
m++;
pa += PAGE_SIZE;
}
@ -448,12 +436,17 @@ pmap_steal_memory(size)
vm_size_t size;
{
vm_offset_t addr, vaddr, paddr;
#ifdef i386 /* XXX i386 calls pmap_steal_memory before vm_mem_init() */
if (cnt.v_page_size == 0) /* XXX */
vm_set_page_size();
#endif
/*
* We round the size to an integer multiple.
*/
size = (size + 3) &~ 3;
size = (size + 3) &~ 3; /* XXX */
/*
* If this is the first call to pmap_steal_memory,
@ -568,7 +561,7 @@ vm_page_insert(mem, object, offset)
register vm_object_t object;
register vm_offset_t offset;
{
register queue_t bucket;
register struct pglist *bucket;
int spl;
VM_PAGE_CHECK(mem);
@ -590,7 +583,7 @@ vm_page_insert(mem, object, offset)
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
spl = splimp();
simple_lock(&bucket_lock);
queue_enter(bucket, mem, vm_page_t, hashq);
TAILQ_INSERT_TAIL(bucket, mem, hashq);
simple_unlock(&bucket_lock);
(void) splx(spl);
@ -598,7 +591,7 @@ vm_page_insert(mem, object, offset)
* Now link into the object's list of backed pages.
*/
queue_enter(&object->memq, mem, vm_page_t, listq);
TAILQ_INSERT_TAIL(&object->memq, mem, listq);
mem->flags |= PG_TABLED;
/*
@ -621,7 +614,7 @@ void
vm_page_remove(mem)
register vm_page_t mem;
{
register queue_t bucket;
register struct pglist *bucket;
int spl;
VM_PAGE_CHECK(mem);
@ -636,7 +629,7 @@ vm_page_remove(mem)
bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
spl = splimp();
simple_lock(&bucket_lock);
queue_remove(bucket, mem, vm_page_t, hashq);
TAILQ_REMOVE(bucket, mem, hashq);
simple_unlock(&bucket_lock);
(void) splx(spl);
@ -644,7 +637,7 @@ vm_page_remove(mem)
* Now remove from the object's list of backed pages.
*/
queue_remove(&mem->object->memq, mem, vm_page_t, listq);
TAILQ_REMOVE(&mem->object->memq, mem, listq);
/*
* And show that the object has one fewer resident
@ -670,7 +663,7 @@ vm_page_lookup(object, offset)
register vm_offset_t offset;
{
register vm_page_t mem;
register queue_t bucket;
register struct pglist *bucket;
int spl;
/*
@ -681,15 +674,13 @@ vm_page_lookup(object, offset)
spl = splimp();
simple_lock(&bucket_lock);
mem = (vm_page_t) queue_first(bucket);
while (!queue_end(bucket, (queue_entry_t) mem)) {
for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
VM_PAGE_CHECK(mem);
if ((mem->object == object) && (mem->offset == offset)) {
simple_unlock(&bucket_lock);
splx(spl);
return(mem);
}
mem = (vm_page_t) queue_next(&mem->hashq);
}
simple_unlock(&bucket_lock);
@ -739,23 +730,16 @@ vm_page_alloc(object, offset)
spl = splimp(); /* XXX */
simple_lock(&vm_page_queue_free_lock);
if ( object != kernel_object &&
object != kmem_object &&
vm_page_free_count <= vm_page_free_reserved) {
simple_unlock(&vm_page_queue_free_lock);
splx(spl);
return(NULL);
}
if (queue_empty(&vm_page_queue_free)) {
if (vm_page_queue_free.tqh_first == NULL) {
simple_unlock(&vm_page_queue_free_lock);
splx(spl);
return(NULL);
}
queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq);
mem = vm_page_queue_free.tqh_first;
TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
vm_page_free_count--;
cnt.v_free_count--;
simple_unlock(&vm_page_queue_free_lock);
splx(spl);
@ -772,9 +756,9 @@ vm_page_alloc(object, offset)
* it doesn't really matter.
*/
if ((vm_page_free_count < vm_page_free_min) ||
((vm_page_free_count < vm_page_free_target) &&
(vm_page_inactive_count < vm_page_inactive_target)))
if ((cnt.v_free_count < cnt.v_free_min) ||
((cnt.v_free_count < cnt.v_free_target) &&
(cnt.v_inactive_count < cnt.v_inactive_target)))
thread_wakeup(&vm_pages_needed);
return(mem);
}
@ -793,15 +777,15 @@ vm_page_free(mem)
{
vm_page_remove(mem);
if (mem->flags & PG_ACTIVE) {
queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq);
TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
mem->flags &= ~PG_ACTIVE;
vm_page_active_count--;
cnt.v_active_count--;
}
if (mem->flags & PG_INACTIVE) {
queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq);
TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
mem->flags &= ~PG_INACTIVE;
vm_page_inactive_count--;
cnt.v_inactive_count--;
}
if (!(mem->flags & PG_FICTITIOUS)) {
@ -809,9 +793,9 @@ vm_page_free(mem)
spl = splimp();
simple_lock(&vm_page_queue_free_lock);
queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
vm_page_free_count++;
cnt.v_free_count++;
simple_unlock(&vm_page_queue_free_lock);
splx(spl);
}
@ -834,18 +818,16 @@ vm_page_wire(mem)
if (mem->wire_count == 0) {
if (mem->flags & PG_ACTIVE) {
queue_remove(&vm_page_queue_active, mem, vm_page_t,
pageq);
vm_page_active_count--;
TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
cnt.v_active_count--;
mem->flags &= ~PG_ACTIVE;
}
if (mem->flags & PG_INACTIVE) {
queue_remove(&vm_page_queue_inactive, mem, vm_page_t,
pageq);
vm_page_inactive_count--;
TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
cnt.v_inactive_count--;
mem->flags &= ~PG_INACTIVE;
}
vm_page_wire_count++;
cnt.v_wire_count++;
}
mem->wire_count++;
}
@ -866,10 +848,10 @@ vm_page_unwire(mem)
mem->wire_count--;
if (mem->wire_count == 0) {
queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
vm_page_active_count++;
TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
cnt.v_active_count++;
mem->flags |= PG_ACTIVE;
vm_page_wire_count--;
cnt.v_wire_count--;
}
}
@ -901,13 +883,13 @@ vm_page_deactivate(m)
if (!(m->flags & PG_INACTIVE) && m->wire_count == 0) {
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
if (m->flags & PG_ACTIVE) {
queue_remove(&vm_page_queue_active, m, vm_page_t, pageq);
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
m->flags &= ~PG_ACTIVE;
vm_page_active_count--;
cnt.v_active_count--;
}
queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
m->flags |= PG_INACTIVE;
vm_page_inactive_count++;
cnt.v_inactive_count++;
if (pmap_is_modified(VM_PAGE_TO_PHYS(m)))
m->flags &= ~PG_CLEAN;
if (m->flags & PG_CLEAN)
@ -931,18 +913,17 @@ vm_page_activate(m)
VM_PAGE_CHECK(m);
if (m->flags & PG_INACTIVE) {
queue_remove(&vm_page_queue_inactive, m, vm_page_t,
pageq);
vm_page_inactive_count--;
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
cnt.v_inactive_count--;
m->flags &= ~PG_INACTIVE;
}
if (m->wire_count == 0) {
if (m->flags & PG_ACTIVE)
panic("vm_page_activate: already active");
queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
m->flags |= PG_ACTIVE;
vm_page_active_count++;
cnt.v_active_count++;
}
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.h 7.3 (Berkeley) 4/21/91
* $Id: vm_page.h,v 1.9 1994/03/17 02:52:29 cgd Exp $
* $Id: vm_page.h,v 1.10 1994/04/15 07:04:58 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -95,19 +95,21 @@
* queues (P).
*/
TAILQ_HEAD(pglist, vm_page);
struct vm_page {
queue_chain_t pageq; /* queue info for FIFO
* queue or free list (P) */
queue_chain_t hashq; /* hash table links (O)*/
queue_chain_t listq; /* all pages in same object (O)*/
TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO
* queue or free list (P) */
TAILQ_ENTRY(vm_page) hashq; /* hash table links (O)*/
TAILQ_ENTRY(vm_page) listq; /* pages in same object (O)*/
vm_object_t object; /* which object am I in (O,P)*/
vm_offset_t offset; /* offset into that object (O,P) */
vm_object_t object; /* which object am I in (O,P)*/
vm_offset_t offset; /* offset into object (O,P) */
u_short wire_count; /* number wired down maps use me? (P) */
u_short flags; /* flags; see below */
u_short wire_count; /* wired down maps refs (P) */
u_short flags; /* see below */
vm_offset_t phys_addr; /* physical address of page */
vm_offset_t phys_addr; /* physical address of page */
};
/*
@ -172,11 +174,11 @@ struct vm_page {
*/
extern
queue_head_t vm_page_queue_free; /* memory free queue */
struct pglist vm_page_queue_free; /* memory free queue */
extern
queue_head_t vm_page_queue_active; /* active memory queue */
struct pglist vm_page_queue_active; /* active memory queue */
extern
queue_head_t vm_page_queue_inactive; /* inactive memory queue */
struct pglist vm_page_queue_inactive; /* inactive memory queue */
extern
vm_page_t vm_page_array; /* First resident page in table */
@ -200,26 +202,6 @@ extern
int vm_page_count; /* How many pages do we manage? */
#endif /* MACHINE_NONCONTIG */
/* XXX -- do these belong here? */
extern
int vm_page_free_count; /* How many pages are free? */
extern
int vm_page_active_count; /* How many pages are active? */
extern
int vm_page_inactive_count; /* How many pages are inactive? */
extern
int vm_page_wire_count; /* How many pages are wired? */
extern
int vm_page_free_target; /* How many do we want free? */
extern
int vm_page_free_min; /* When to wakeup pageout */
extern
int vm_page_inactive_target;/* How many do we want inactive? */
extern
int vm_page_free_reserved; /* How many pages reserved to do pageout */
extern
int vm_page_laundry_count; /* How many pages being laundered? */
#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
#ifndef MACHINE_NONCONTIG

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91
* $Id: vm_pageout.c,v 1.10 1994/03/17 02:52:36 cgd Exp $
* $Id: vm_pageout.c,v 1.11 1994/04/15 07:04:59 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -89,7 +89,7 @@ int vm_page_pagesfreed; /* Pages freed by page daemon */
void
vm_pageout_scan()
{
register vm_page_t m;
register vm_page_t m, next;
register int page_shortage;
register int s;
register int pages_freed;
@ -101,11 +101,11 @@ vm_pageout_scan()
s = splimp();
simple_lock(&vm_page_queue_free_lock);
free = vm_page_free_count;
free = cnt.v_free_count;
simple_unlock(&vm_page_queue_free_lock);
splx(s);
if (free < vm_page_free_target) {
if (free < cnt.v_free_target) {
#ifdef OMIT
swapout_threads();
#endif /* OMIT*/
@ -132,45 +132,43 @@ vm_pageout_scan()
*/
pages_freed = 0;
m = (vm_page_t) queue_first(&vm_page_queue_inactive);
while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) {
vm_page_t next;
for (m = vm_page_queue_inactive.tqh_first; m != NULL; m = next) {
s = splimp();
simple_lock(&vm_page_queue_free_lock);
free = vm_page_free_count;
free = cnt.v_free_count;
simple_unlock(&vm_page_queue_free_lock);
splx(s);
if (free >= vm_page_free_target)
if (free >= cnt.v_free_target)
break;
cnt.v_scan++;
next = m->pageq.tqe_next;
/*
* If the page has been referenced, move it back to the
* active queue.
*/
if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
vm_page_activate(m);
cnt.v_reactivated++;
continue;
}
if (m->flags & PG_CLEAN) {
next = (vm_page_t) queue_next(&m->pageq);
if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
vm_page_activate(m);
vm_stat.reactivations++;
}
else {
register vm_object_t object;
object = m->object;
if (!vm_object_lock_try(object)) {
/*
* Can't lock object -
* skip page.
*/
m = next;
continue;
}
register vm_object_t object;
object = m->object;
if (vm_object_lock_try(object)) {
pmap_page_protect(VM_PAGE_TO_PHYS(m),
VM_PROT_NONE);
vm_page_free(m); /* will dequeue */
pages_freed++;
cnt.v_dfree++;
vm_object_unlock(object);
}
m = next;
continue;
}
else {
{
/*
* If a page is dirty, then it is either
* being washed (but not yet cleaned)
@ -206,14 +204,13 @@ vm_pageout_scan()
* Skip page if we can't lock
* its object
*/
m = (vm_page_t) queue_next(&m->pageq);
continue;
}
pmap_page_protect(VM_PAGE_TO_PHYS(m),
VM_PROT_NONE);
m->flags |= PG_BUSY;
vm_stat.pageouts++;
cnt.v_pageouts++;
/*
* Try to collapse the object before
@ -231,7 +228,7 @@ vm_pageout_scan()
* Do a wakeup here in case the following
* operations block.
*/
thread_wakeup((int) &vm_page_free_count);
thread_wakeup((int) &cnt.v_free_count);
/*
* If there is no pager for the page,
@ -258,7 +255,6 @@ vm_pageout_scan()
VM_PAGER_FAIL;
vm_object_lock(object);
vm_page_lock_queues();
next = (vm_page_t) queue_next(&m->pageq);
switch (pageout_status) {
case VM_PAGER_OK:
@ -305,8 +301,6 @@ vm_pageout_scan()
vm_object_unlock(object);
m = next;
}
else
m = (vm_page_t) queue_next(&m->pageq);
}
}
@ -316,8 +310,8 @@ vm_pageout_scan()
* to inactive.
*/
page_shortage = vm_page_inactive_target - vm_page_inactive_count;
page_shortage -= vm_page_free_count;
page_shortage = cnt.v_inactive_target - cnt.v_inactive_count;
page_shortage -= cnt.v_free_count;
if ((page_shortage <= 0) && (pages_freed == 0))
page_shortage = 1;
@ -327,10 +321,8 @@ vm_pageout_scan()
* Move some more pages from active to inactive.
*/
if (queue_empty(&vm_page_queue_active)) {
if ((m = vm_page_queue_active.tqh_first) == NULL)
break;
}
m = (vm_page_t) queue_first(&vm_page_queue_active);
vm_page_deactivate(m);
page_shortage--;
}
@ -352,35 +344,26 @@ vm_pageout()
* Initialize some paging parameters.
*/
if (vm_page_free_min == 0) {
vm_page_free_min = vm_page_free_count / 20;
if (vm_page_free_min < 3)
vm_page_free_min = 3;
if (cnt.v_free_min == 0) {
cnt.v_free_min = cnt.v_free_count / 20;
if (cnt.v_free_min < 3)
cnt.v_free_min = 3;
if (vm_page_free_min > vm_page_free_min_sanity)
vm_page_free_min = vm_page_free_min_sanity;
if (cnt.v_free_min > vm_page_free_min_sanity)
cnt.v_free_min = vm_page_free_min_sanity;
}
if (vm_page_free_reserved == 0) {
if ((vm_page_free_reserved = vm_page_free_min / 2) < 10)
vm_page_free_reserved = 10;
}
if (vm_pageout_free_min == 0) {
if ((vm_pageout_free_min = vm_page_free_reserved / 2) > 10)
vm_pageout_free_min = 10;
}
if (cnt.v_free_target == 0)
cnt.v_free_target = (cnt.v_free_min * 4) / 3;
if (vm_page_free_target == 0)
vm_page_free_target = (vm_page_free_min * 4) / 3;
if (cnt.v_inactive_target == 0)
cnt.v_inactive_target = cnt.v_free_min * 2;
if (vm_page_inactive_target == 0)
vm_page_inactive_target = vm_page_free_min * 2;
if (cnt.v_free_target <= cnt.v_free_min)
cnt.v_free_target = cnt.v_free_min + 1;
if (vm_page_free_target <= vm_page_free_min)
vm_page_free_target = vm_page_free_min + 1;
if (vm_page_inactive_target <= vm_page_free_target)
vm_page_inactive_target = vm_page_free_target + 1;
if (cnt.v_inactive_target <= cnt.v_free_target)
cnt.v_inactive_target = cnt.v_free_target + 1;
/*
* The pageout daemon is never done, so loop
@ -394,6 +377,6 @@ vm_pageout()
vm_pageout_scan();
vm_pager_sync();
simple_lock(&vm_pages_needed_lock);
thread_wakeup((int) &vm_page_free_count);
thread_wakeup((int) &cnt.v_free_count);
}
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_pageout.h 7.3 (Berkeley) 4/21/91
* $Id: vm_pageout.h,v 1.4 1994/01/07 22:22:27 mycroft Exp $
* $Id: vm_pageout.h,v 1.5 1994/04/15 07:05:01 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -89,7 +89,7 @@ simple_lock_data_t vm_pages_needed_lock;
#define VM_WAIT { \
simple_lock(&vm_pages_needed_lock); \
thread_wakeup((int)&vm_pages_needed); \
thread_sleep((int)&vm_page_free_count, \
thread_sleep((int)&cnt.v_free_count, \
&vm_pages_needed_lock); \
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_pager.c 8.1 (Berkeley) 6/11/93
* $Id: vm_pager.c,v 1.10 1994/01/07 18:11:38 mycroft Exp $
* $Id: vm_pager.c,v 1.11 1994/04/15 07:05:02 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -260,19 +260,16 @@ vm_pager_unmap_page(kva)
}
vm_pager_t
vm_pager_lookup(list, handle)
register queue_head_t *list;
vm_pager_lookup(pglist, handle)
register struct pagerlst *pglist;
caddr_t handle;
{
register vm_pager_t pager;
pager = (vm_pager_t) queue_first(list);
while (!queue_end(list, (queue_entry_t)pager)) {
for (pager = pglist->tqh_first; pager; pager = pager->pg_list.tqe_next)
if (pager->pg_handle == handle)
return(pager);
pager = (vm_pager_t) queue_next(&pager->pg_list);
}
return(NULL);
return (pager);
return (NULL);
}
/*

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_param.h 7.2 (Berkeley) 4/21/91
* $Id: vm_param.h,v 1.4 1993/07/29 21:45:42 jtc Exp $
* $Id: vm_param.h,v 1.5 1994/04/15 07:05:03 cgd Exp $
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@ -87,6 +87,7 @@ typedef int boolean_t;
* The machine independent pages are refered to as PAGES. A page
* is some number of hardware pages, depending on the target machine.
*/
#define DEFAULT_PAGE_SIZE 4096
/*
* All references to the size of a page should be done with PAGE_SIZE
@ -94,8 +95,13 @@ typedef int boolean_t;
* we can easily make them constant if we so desire.
*/
#define PAGE_SIZE page_size /* size of page in addressible units */
#define PAGE_SHIFT page_shift /* number of bits to shift for pages */
#define PAGE_SIZE cnt.v_page_size /* size of page */
#define PAGE_MASK page_mask /* size of page - 1 */
#define PAGE_SHIFT page_shift /* bits to shift for pages */
#ifdef KERNEL
extern vm_size_t page_mask;
extern int page_shift;
#endif
/*
* Return values from the VM routines.
@ -118,8 +124,8 @@ typedef int boolean_t;
*/
#ifdef KERNEL
#define atop(x) (((unsigned)(x)) >> page_shift)
#define ptoa(x) ((vm_offset_t)((x) << page_shift))
#define atop(x) (((unsigned)(x)) >> PAGE_SHIFT)
#define ptoa(x) ((vm_offset_t)((x) << PAGE_SHIFT))
#endif /* KERNEL */
/*
@ -129,19 +135,14 @@ typedef int boolean_t;
*/
#ifdef KERNEL
#define round_page(x) ((vm_offset_t)((((vm_offset_t)(x)) + page_mask) & ~page_mask))
#define trunc_page(x) ((vm_offset_t)(((vm_offset_t)(x)) & ~page_mask))
#define round_page(x) ((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) & ~PAGE_MASK))
#define trunc_page(x) ((vm_offset_t)(((vm_offset_t)(x)) & ~PAGE_MASK))
#else /* KERNEL */
#define round_page(x) ((((vm_offset_t)(x) + (vm_page_size - 1)) / vm_page_size) * vm_page_size)
#define trunc_page(x) ((((vm_offset_t)(x)) / vm_page_size) * vm_page_size)
#endif /* KERNEL */
#ifdef KERNEL
extern vm_size_t page_size; /* machine independent page size */
extern vm_size_t page_mask; /* page_size - 1; mask for
offset within page */
extern int page_shift; /* shift to use for page size */
extern vm_size_t mem_size; /* size of physical memory (bytes) */
extern vm_offset_t first_addr; /* first physical page */
extern vm_offset_t last_addr; /* last physical page */

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 8.1 (Berkeley) 6/11/93
* $Id: vnode_pager.c,v 1.6 1994/01/07 18:12:12 mycroft Exp $
* $Id: vnode_pager.c,v 1.7 1994/04/15 07:05:04 cgd Exp $
*/
/*
@ -59,7 +59,7 @@
#include <vm/vm_page.h>
#include <vm/vnode_pager.h>
queue_head_t vnode_pager_list; /* list of managed vnodes */
struct pagerlst vnode_pager_list; /* list of managed vnodes */
#ifdef DEBUG
int vpagerdebug = 0x00;
@ -99,7 +99,7 @@ vnode_pager_init()
if (vpagerdebug & VDB_FOLLOW)
printf("vnode_pager_init()\n");
#endif
queue_init(&vnode_pager_list);
TAILQ_INIT(&vnode_pager_list);
}
/*
@ -167,7 +167,7 @@ vnode_pager_alloc(handle, size, prot, foff)
vnp->vnp_flags = 0;
vnp->vnp_vp = vp;
vnp->vnp_size = vattr.va_size;
queue_enter(&vnode_pager_list, pager, vm_pager_t, pg_list);
TAILQ_INSERT_TAIL(&vnode_pager_list, pager, pg_list);
pager->pg_handle = handle;
pager->pg_type = PG_VNODE;
pager->pg_ops = &vnodepagerops;
@ -212,7 +212,7 @@ vnode_pager_dealloc(pager)
#endif
vrele(vp);
}
queue_remove(&vnode_pager_list, pager, vm_pager_t, pg_list);
TAILQ_REMOVE(&vnode_pager_list, pager, pg_list);
free((caddr_t)vnp, M_VMPGDATA);
free((caddr_t)pager, M_VMPAGER);
}
@ -375,17 +375,16 @@ vnode_pager_umount(mp)
register vm_pager_t pager, npager;
struct vnode *vp;
pager = (vm_pager_t) queue_first(&vnode_pager_list);
while (!queue_end(&vnode_pager_list, (queue_entry_t)pager)) {
for (pager = vnode_pager_list.tqh_first; pager != NULL;
pager = npager){
/*
* Save the next pointer now since uncaching may
* terminate the object and render pager invalid
*/
npager = pager->pg_list.tqe_next;
vp = ((vn_pager_t)pager->pg_data)->vnp_vp;
npager = (vm_pager_t) queue_next(&pager->pg_list);
if (mp == (struct mount *)0 || vp->v_mount == mp)
(void) vnode_pager_uncache(vp);
pager = npager;
}
}