pull up vm_object_collapse() fix from mrg_vm_object_collapse branch.

many thanks to niklas hallqvist (niklas@appli.se) for this.
This commit is contained in:
mrg 1997-01-03 18:03:14 +00:00
parent f3d9c468e1
commit 4f2a217c2b
14 changed files with 971 additions and 245 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: device_pager.c,v 1.23 1996/10/12 21:50:05 christos Exp $ */
/* $NetBSD: device_pager.c,v 1.24 1997/01/03 18:03:14 mrg Exp $ */
/*
* Copyright (c) 1990 University of Utah.
@ -49,6 +49,7 @@
#include <sys/conf.h>
#include <sys/mman.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <vm/vm.h>
#include <vm/vm_kern.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: swap_pager.c,v 1.29 1996/10/12 21:50:07 christos Exp $ */
/* $NetBSD: swap_pager.c,v 1.30 1997/01/03 18:03:17 mrg Exp $ */
/*
* Copyright (c) 1990 University of Utah.
@ -137,6 +137,10 @@ static void swap_pager_cluster
__P((vm_pager_t, vm_offset_t,
vm_offset_t *, vm_offset_t *));
static void swap_pager_dealloc __P((vm_pager_t));
static int swap_pager_remove
__P((vm_pager_t, vm_offset_t, vm_offset_t));
static vm_offset_t swap_pager_next __P((vm_pager_t, vm_offset_t));
static int swap_pager_count __P((vm_pager_t));
static int swap_pager_getpage
__P((vm_pager_t, vm_page_t *, int, boolean_t));
static boolean_t swap_pager_haspage __P((vm_pager_t, vm_offset_t));
@ -144,6 +148,7 @@ static int swap_pager_io __P((sw_pager_t, vm_page_t *, int, int));
static void swap_pager_iodone __P((struct buf *));
static int swap_pager_putpage
__P((vm_pager_t, vm_page_t *, int, boolean_t));
static int count_bits __P((u_int));
struct pagerops swappagerops = {
swap_pager_init,
@ -152,7 +157,10 @@ struct pagerops swappagerops = {
swap_pager_getpage,
swap_pager_putpage,
swap_pager_haspage,
swap_pager_cluster
swap_pager_cluster,
swap_pager_remove,
swap_pager_next,
swap_pager_count
};
static void
@ -310,7 +318,7 @@ swap_pager_alloc(handle, size, prot, foff)
}
bzero((caddr_t)swp->sw_blocks,
swp->sw_nblocks * sizeof(*swp->sw_blocks));
swp->sw_poip = 0;
swp->sw_poip = swp->sw_cnt = 0;
if (handle) {
vm_object_t object;
@ -560,7 +568,8 @@ swap_pager_io(swp, mlist, npages, flags)
register struct buf *bp;
register sw_blk_t swb;
register int s;
int ix, mask;
int ix;
u_int mask;
boolean_t rv;
vm_offset_t kva, off;
swp_clean_t spc;
@ -732,6 +741,7 @@ swap_pager_io(swp, mlist, npages, flags)
printf("swpg_io: off=%lx, npg=%x, mask=%x, bmask=%x\n",
off, npages, mask, swb->swb_mask);
#endif
swp->sw_cnt += count_bits(mask & ~swb->swb_mask);
swb->swb_mask |= mask;
}
/*
@ -1029,3 +1039,241 @@ swap_pager_iodone(bp)
wakeup(&vm_pages_needed);
splx(s);
}
/*
* swap_pager_remove:
*
* This is called via the vm_pager_remove path and
* will remove any pages inside the range [from, to)
* backed by us. It is assumed that both addresses
* are multiples of PAGE_SIZE. The special case
* where TO is zero means: remove to end of object.
*/
static int
swap_pager_remove(pager, from, to)
vm_pager_t pager;
vm_offset_t from, to;
{
sw_pager_t swp;
sw_blk_t swb;
int bsize, blk, bit, to_blk, to_bit, mask, cnt = 0;
#ifdef DEBUG
if (swpagerdebug & SDB_FOLLOW)
printf("swpg_remove()\n");
#endif
/* Special case stupid ranges. */
if (to > 0 && from >= to)
return(0);
swp = (sw_pager_t)pager->pg_data;
/*
* If we back no pages, just return. XXX Can this
* ever be the case? At least all remove calls should
* be through vm_object_remove_from_pager which also
* deallocates the pager when it no longer backs any
* pages. Left is the initial case: can a swap-pager
* be created without any pages put into it?
*/
if (swp->sw_cnt == 0)
return(0);
bsize = dbtob(swp->sw_bsize);
blk = from / bsize;
/* Another fast one.. no blocks in range. */
if (blk >= swp->sw_nblocks)
return(0);
bit = atop(from % bsize);
/*
* Deal with the special case with TO == 0.
* XXX Perhaps the code might be improved if we
* made to_blk & to_bit signify the inclusive end
* of range instead (i.e. to - 1).
*/
if (to) {
to_blk = to / bsize;
if (to_blk >= swp->sw_nblocks) {
to_blk = swp->sw_nblocks;
to_bit = 0;
} else
to_bit = atop(to % bsize);
} else {
to_blk = swp->sw_nblocks;
to_bit = 0;
}
/*
* Loop over the range, remove pages as we find them.
* If all pages in a block get freed, deallocate the
* swap block as well.
*/
for (swb = &swp->sw_blocks[blk], mask = (1 << bit) - 1;
blk < to_blk || (blk == to_blk && to_bit);
blk++, swb++, mask = 0) {
/* Don't bother if the block is already cleared. */
if (swb->swb_block == 0)
continue;
/*
* When coming to the end-block we need to
* adjust the mask in the othher end, as well as
* ensuring this will be the last iteration.
*/
if (blk == to_blk) {
mask |= ~((1 << to_bit) - 1);
to_bit = 0;
}
/* Count pages that will be removed. */
cnt += count_bits(swb->swb_mask & ~mask);
/*
* Remove pages by applying our mask, and if this
* means no pages are left in the block, free it.
*/
if ((swb->swb_mask &= mask) == 0) {
rmfree(swapmap, swp->sw_bsize, swb->swb_block);
swb->swb_block = 0;
}
}
/* Adjust the page count and return the removed count. */
swp->sw_cnt -= cnt;
#ifdef DEBUG
if (swp->sw_cnt < 0)
panic("swap_pager_remove: sw_cnt < 0");
#endif
return(cnt);
}
/*
* swap_pager_next:
*
* This is called via the vm_pager_next path and
* will return the offset of the next page (addresswise)
* which this pager is backing. If there are no more
* pages we will return the size of the pager's managed
* space (which by definition is larger than any page's
* offset).
*/
static vm_offset_t
swap_pager_next(pager, offset)
vm_pager_t pager;
vm_offset_t offset;
{
sw_pager_t swp;
sw_blk_t swb;
int bsize, blk, bit, to_blk, to_bit, mask;
#ifdef DEBUG
if (swpagerdebug & SDB_FOLLOW)
printf("swpg_next()\n");
#endif
swp = (sw_pager_t)pager->pg_data;
/*
* If we back no pages, just return our size. XXX Can
* this ever be the case? At least all remove calls
* should be through vm_object_remove_from_pager which
* also deallocates the pager when it no longer backs any
* pages. Left is the initial case: can a swap-pager
* be created without any pages put into it?
*/
if (swp->sw_cnt == 0)
return(swp->sw_osize);
bsize = dbtob(swp->sw_bsize);
blk = offset / bsize;
/* Another fast one.. no blocks in range. */
if (blk >= swp->sw_nblocks)
return(swp->sw_osize);
bit = atop(offset % bsize);
to_blk = swp->sw_osize / bsize;
to_bit = atop(swp->sw_osize % bsize);
/*
* Loop over the remaining blocks, returning as soon
* as we find a page.
*/
swb = &swp->sw_blocks[blk];
mask = ~((1 << bit) - 1);
for (;;) {
if (blk == to_blk) {
/* Nothing to be done in this end-block? */
if (to_bit == 0)
break;
mask &= (1 << to_bit) - 1;
}
/*
* Check this block for a backed page and return
* its offset if there.
*/
mask &= swb->swb_mask;
if (mask)
return(blk * bsize + (ffs (mask) - 1) * PAGE_SIZE);
/*
* If we handled the end of range now, this
* means we are ready.
*/
if (blk == to_blk)
break;
/* Get on with the next block. */
blk++;
swb++;
mask = ~0;
}
return swp->sw_osize;
}
/*
* swap_pager_count:
*
* Just returns the count of pages backed by this pager.
*/
int
swap_pager_count(pager)
vm_pager_t pager;
{
#ifndef notyet
return((sw_pager_t)pager->pg_data)->sw_cnt;
#else
sw_pager_t swp;
sw_blk_t swb;
int i, cnt = 0;
swp = (sw_pager_t)pager->pg_data;
if (swp->sw_blocks == NULL)
return 0;
for (i = 0; i < swp->sw_nblocks; i++)
cnt += count_bits(swp->sw_blocks[i].swb_mask);
return cnt;
#endif
}
/*
* count_bits:
*
* Counts the number of set bits in a word.
*/
static int
count_bits(x)
u_int x;
{
int cnt = 0;
while (x) {
cnt += x & 1;
x >>= 1;
}
return(cnt);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: swap_pager.h,v 1.6 1994/06/29 06:47:49 cgd Exp $ */
/* $NetBSD: swap_pager.h,v 1.7 1997/01/03 18:03:19 mrg Exp $ */
/*
* Copyright (c) 1990 University of Utah.
@ -83,6 +83,7 @@ struct swpager {
sw_blk_t sw_blocks; /* pointer to list of swap blocks */
short sw_flags; /* flags */
short sw_poip; /* pageouts in progress */
int sw_cnt; /* count of pages in pager */
};
typedef struct swpager *sw_pager_t;

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_extern.h,v 1.21 1996/11/06 19:33:42 cgd Exp $ */
/* $NetBSD: vm_extern.h,v 1.22 1997/01/03 18:03:21 mrg Exp $ */
/*-
* Copyright (c) 1992, 1993
@ -93,7 +93,13 @@ int swfree __P((struct proc *, int));
void swstrategy __P((struct buf *));
void thread_block __P((void));
void thread_sleep __P((void *, simple_lock_t, boolean_t));
void thread_wakeup __P((void *));
/*
* This define replaces the thread_wakeup prototype, as thread_wakeup
* was solely a wrapper around wakeup.
*
* void thread_wakeup __P((void *));
*/
#define thread_wakeup wakeup
int useracc __P((caddr_t, int, int));
int vm_allocate __P((vm_map_t, vm_offset_t *, vm_size_t,
boolean_t));

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_fault.c,v 1.18 1996/05/20 17:40:02 mrg Exp $ */
/* $NetBSD: vm_fault.c,v 1.19 1997/01/03 18:03:22 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -190,6 +190,10 @@ vm_fault(map, vaddr, fault_type, change_wiring)
vm_object_lock(first_object);
first_object->ref_count++;
#ifdef DIAGNOSTIC
if (first_object->paging_in_progress == 0xdead)
panic("vm_fault: first_object deallocated");
#endif
first_object->paging_in_progress++;
/*
@ -424,6 +428,10 @@ vm_fault(map, vaddr, fault_type, change_wiring)
object->paging_in_progress--;
vm_object_unlock(object);
object = next_object;
#ifdef DIAGNOSTIC
if (object->paging_in_progress == 0xdead)
panic("vm_fault: object deallocated (1)");
#endif
object->paging_in_progress++;
}
}
@ -523,6 +531,10 @@ vm_fault(map, vaddr, fault_type, change_wiring)
*/
object->paging_in_progress--;
vm_object_collapse(object);
#ifdef DIAGNOSTIC
if (object->paging_in_progress == 0xdead)
panic("vm_fault: object deallocated (2)");
#endif
object->paging_in_progress++;
}
else {

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_glue.c,v 1.64 1996/11/06 20:20:04 cgd Exp $ */
/* $NetBSD: vm_glue.c,v 1.65 1997/01/03 18:03:25 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -584,16 +584,6 @@ thread_sleep(event, lock, ruptible)
splx(s);
}
void
thread_wakeup(event)
void *event;
{
int s = splhigh();
wakeup(event);
splx(s);
}
/*
* DEBUG stuff
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_kern.c,v 1.20 1996/10/12 21:50:09 christos Exp $ */
/* $NetBSD: vm_kern.c,v 1.21 1997/01/03 18:03:27 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -70,6 +70,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <vm/vm.h>
#include <vm/vm_page.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_object.c,v 1.36 1996/10/12 21:50:12 christos Exp $ */
/* $NetBSD: vm_object.c,v 1.37 1997/01/03 18:03:29 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -71,9 +71,11 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
/*
* Virtual memory objects maintain the actual data
@ -111,8 +113,26 @@ struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT];
long object_collapses = 0;
long object_bypasses = 0;
boolean_t vm_object_collapse_allowed = TRUE;
static void _vm_object_allocate __P((vm_size_t, vm_object_t));
#ifndef VMDEBUG
#define VMDEBUG 0
#endif
#ifdef DEBUG
#define VMDEBUG_SHADOW 0x1
#define VMDEBUG_SHADOW_VERBOSE 0x2
#define VMDEBUG_COLLAPSE 0x4
#define VMDEBUG_COLLAPSE_PAGEIN 0x8
int vmdebug = VMDEBUG;
#endif
static void _vm_object_allocate __P((vm_size_t, vm_object_t));
int vm_object_collapse_aux __P((vm_object_t));
int vm_object_bypass __P((vm_object_t));
void vm_object_set_shadow __P((vm_object_t, vm_object_t));
int vm_object_remove_from_pager
__P((vm_object_t, vm_offset_t, vm_offset_t));
/*
* vm_object_init:
@ -182,6 +202,7 @@ _vm_object_allocate(size, object)
object->paging_offset = 0;
object->shadow = NULL;
object->shadow_offset = (vm_offset_t) 0;
LIST_INIT(&object->shadowers);
simple_lock(&vm_object_list_lock);
TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
@ -222,7 +243,12 @@ void
vm_object_deallocate(object)
register vm_object_t object;
{
vm_object_t temp;
/*
* While "temp" is used for other things as well, we
* initialize it to NULL here for being able to check
* if we are in the first revolution of the loop.
*/
vm_object_t temp = NULL;
while (object != NULL) {
@ -239,12 +265,26 @@ vm_object_deallocate(object)
*/
vm_object_lock(object);
if (--(object->ref_count) != 0) {
/*
* If this is a deallocation of a shadow
* reference (which it is unless it's the
* first time round) and this operation made
* us singly-shadowed, try to collapse us
* with our shadower.
*/
vm_object_unlock(object);
if (temp != NULL &&
(temp = object->shadowers.lh_first) != NULL &&
temp->shadowers_list.le_next == NULL) {
vm_object_lock(temp);
vm_object_collapse(temp);
vm_object_unlock(temp);
}
/*
* If there are still references, then
* we are done.
*/
vm_object_unlock(object);
vm_object_cache_unlock();
return;
}
@ -296,12 +336,28 @@ vm_object_terminate(object)
register vm_page_t p;
vm_object_t shadow_object;
/*
* Setters of paging_in_progress might be interested that this object
* is going away as soon as we get a grip on it.
*/
object->flags |= OBJ_FADING;
/*
* Wait until the pageout daemon is through with the object or a
* potential collapse operation is finished.
*/
while (object->paging_in_progress) {
vm_object_sleep(object, object, FALSE);
vm_object_lock(object);
}
/*
* Detach the object from its shadow if we are the shadow's
* copy.
*/
if ((shadow_object = object->shadow) != NULL) {
vm_object_lock(shadow_object);
vm_object_set_shadow(object, NULL);
if (shadow_object->copy == object)
shadow_object->copy = NULL;
#if 0
@ -311,14 +367,6 @@ vm_object_terminate(object)
vm_object_unlock(shadow_object);
}
/*
* Wait until the pageout daemon is through with the object.
*/
while (object->paging_in_progress) {
vm_object_sleep(object, object, FALSE);
vm_object_lock(object);
}
/*
* If not an internal object clean all the pages, removing them
* from paging queues as we go.
@ -468,6 +516,11 @@ again:
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ);
if (!(p->flags & PG_CLEAN)) {
p->flags |= PG_BUSY;
#ifdef DIAGNOSTIC
if (object->paging_in_progress == 0xdead)
panic("vm_object_page_clean: "
"object deallocated");
#endif
object->paging_in_progress++;
vm_object_unlock(object);
/*
@ -760,11 +813,11 @@ vm_object_copy(src_object, src_offset, size,
/*
* Make the old copy-object shadow the new one.
* It will receive no more pages from the original
* object.
* object. Locking of new_copy not needed. We
* have the only pointer.
*/
src_object->ref_count--; /* remove ref. from old_copy */
old_copy->shadow = new_copy;
vm_object_set_shadow(old_copy, new_copy);
new_copy->ref_count++; /* locking not needed - we
have the only pointer */
vm_object_unlock(old_copy); /* done with old_copy */
@ -777,7 +830,7 @@ vm_object_copy(src_object, src_offset, size,
* Point the new copy at the existing object.
*/
new_copy->shadow = src_object;
vm_object_set_shadow(new_copy, src_object);
new_copy->shadow_offset = new_start;
src_object->ref_count++;
src_object->copy = new_copy;
@ -806,6 +859,8 @@ vm_object_copy(src_object, src_offset, size,
*
* The new object and offset into that object
* are returned in the source parameters.
*
* The old object should not be locked.
*/
void
vm_object_shadow(object, offset, length)
@ -821,7 +876,6 @@ vm_object_shadow(object, offset, length)
/*
* Allocate a new object with the given length
*/
if ((result = vm_object_allocate(length)) == NULL)
panic("vm_object_shadow: no object for shadowing");
@ -832,19 +886,19 @@ vm_object_shadow(object, offset, length)
* the source object. Net result: no change of reference
* count.
*/
result->shadow = source;
vm_object_lock(source);
vm_object_set_shadow(result, source);
vm_object_unlock(source);
/*
* Store the offset into the source object,
* and fix up the offset into the new object.
*/
result->shadow_offset = *offset;
/*
* Return the new things
*/
*offset = 0;
*object = result;
}
@ -853,8 +907,7 @@ vm_object_shadow(object, offset, length)
* Set the specified object's pager to the specified pager.
*/
void
vm_object_setpager(object, pager, paging_offset,
read_only)
vm_object_setpager(object, pager, paging_offset, read_only)
vm_object_t object;
vm_pager_t pager;
vm_offset_t paging_offset;
@ -1008,7 +1061,479 @@ vm_object_cache_clear()
vm_object_cache_unlock();
}
boolean_t vm_object_collapse_allowed = TRUE;
/*
* vm_object_remove_from_pager:
*
* Tell object's pager that it needn't back the page
* anymore. If the pager ends up empty, deallocate it.
*/
int
vm_object_remove_from_pager(object, from, to)
vm_object_t object;
vm_offset_t from, to;
{
vm_pager_t pager = object->pager;
int cnt = 0;
if (pager == NULL)
return 0;
cnt = vm_pager_remove(pager, from, to);
/* If pager became empty, remove it. */
if (cnt > 0 && vm_pager_count(pager) == 0) {
vm_pager_deallocate(pager);
object->pager = NULL;
}
return(cnt);
}
/*
* vm_object_collapse_aux:
*
* Internal function to vm_object_collapse called when
* it has been shown that a collapse operation is likely
* to succeed. We know that the backing object is only
* referenced by me and that paging is not in progress.
*/
int
vm_object_collapse_aux(object)
vm_object_t object;
{
vm_object_t backing_object = object->shadow;
vm_offset_t backing_offset = object->shadow_offset;
vm_size_t size = object->size;
vm_offset_t offset, paged_offset;
vm_page_t backing_page, page = NULL;
#ifdef DEBUG
if (vmdebug & VMDEBUG_COLLAPSE)
printf("vm_object_collapse_aux(0x%p)\n", object);
#endif
/*
* The algorithm used is roughly like this:
* (1) Trim a potential pager in the backing
* object so it'll only hold pages in reach.
* (2) Loop over all the resident pages in the
* shadow object and either remove them if
* they are shadowed or move them into the
* shadowing object.
* (3) Loop over the paged out pages in the
* shadow object. Start pageins on those
* that aren't shadowed, and just deallocate
* the others. In each iteration check if
* other users of these objects have caused
* pageins resulting in new resident pages.
* This can happen while we are waiting for
* a pagein of ours. If such resident pages
* turn up, restart from (2).
*/
/*
* As a first measure we know we can discard
* everything that the shadowing object doesn't
* shadow.
*/
if (backing_object->pager != NULL) {
if (backing_offset > 0)
vm_object_remove_from_pager(backing_object, 0,
backing_offset);
if (backing_offset + size < backing_object->size)
vm_object_remove_from_pager(backing_object,
backing_offset + size, backing_object->size);
}
/*
* This is the outer loop, iterating until all resident and
* paged out pages in the shadow object are drained.
*/
paged_offset = 0;
while (backing_object->memq.tqh_first != NULL ||
backing_object->pager != NULL) {
/*
* First of all get rid of resident pages in the
* backing object. We can guarantee to remove
* every page thus we can write the while-test
* like this.
*/
while ((backing_page = backing_object->memq.tqh_first) !=
NULL) {
/*
* If the page is outside the shadowing object's
* range or if the page is shadowed (either by a
* resident "non-fake" page or a paged out one) we
* can discard it right away. Otherwise we need
* to move the page to the shadowing object,
* perhaps waking up waiters for "fake" pages
* first.
*/
if (backing_page->offset < backing_offset ||
(offset = backing_page->offset - backing_offset) >=
size ||
((page = vm_page_lookup(object, offset)) != NULL &&
!(page->flags & PG_FAKE)) ||
(object->pager != NULL &&
vm_pager_has_page(object->pager, offset))) {
/*
* Just discard the page, noone needs it.
*/
vm_page_lock_queues();
vm_page_free(backing_page);
vm_page_unlock_queues();
} else {
/*
* If a "fake" page was found, someone may
* be waiting for it. Wake her up and
* then remove the page.
*/
if (page) {
PAGE_WAKEUP(page);
vm_page_lock_queues();
vm_page_free(page);
vm_page_unlock_queues();
}
/*
* If the backing page was ever paged out,
* it was due to it being dirty at one
* point. Unless we have no pager
* allocated to the front object (thus
* will move forward the shadow's one),
* mark it dirty again so it won't be
* thrown away without being paged out to
* the front pager.
*/
if (object->pager != NULL &&
vm_object_remove_from_pager(backing_object,
backing_page->offset,
backing_page->offset + PAGE_SIZE))
backing_page->flags &= ~PG_CLEAN;
/* Move the page up front. */
vm_page_rename(backing_page, object, offset);
}
}
/*
* If there isn't a pager in the shadow object, we're
* ready. Take the easy way out.
*/
if (backing_object->pager == NULL)
break;
/*
* If the shadowing object doesn't have a pager
* the easiest thing to do now is to just move the
* backing pager up front and everything is done.
*/
if (object->pager == NULL) {
object->pager = backing_object->pager;
object->paging_offset = backing_object->paging_offset +
backing_offset;
backing_object->pager = NULL;
break;
}
/*
* What's left to do is to find all paged out
* pages in the backing pager and either discard
* or move it to the front object. We need to
* recheck the resident page set as a pagein might
* have given other threads the chance to, via
* readfaults, page in another page into the
* resident set. In this case the outer loop must
* get reentered. That is also the case if some other
* thread removes the front pager, a case that has
* been seen...
*/
while (backing_object->memq.tqh_first == NULL &&
backing_object->pager != NULL && object->pager != NULL &&
(paged_offset = vm_pager_next(backing_object->pager,
paged_offset)) < backing_object->size) {
/*
* If the shadowing object has this page, get
* rid of it from the backing pager. Trust
* the loop condition to get us out of here
* quickly if we remove the last paged out page.
*
* XXX Would clustering several pages at a time
* be a win in this situation?
*/
if (((page = vm_page_lookup(object,
paged_offset - backing_offset)) == NULL ||
(page->flags & PG_FAKE)) &&
!vm_pager_has_page(object->pager,
paged_offset - backing_offset)) {
/*
* If a "fake" page was found, someone
* may be waiting for it. Wake her up
* and then remove the page.
*/
if (page) {
PAGE_WAKEUP(page);
vm_page_lock_queues();
vm_page_free(page);
vm_page_unlock_queues();
}
/*
* Suck the page from the pager and give
* it to the shadowing object.
*/
#ifdef DEBUG
if (vmdebug & VMDEBUG_COLLAPSE_PAGEIN)
printf("vm_object_collapse_aux: "
"pagein needed\n");
#endif
/*
* First allocate a page and mark it
* busy so another thread won't try
* to start another pagein.
*/
for (;;) {
backing_page =
vm_page_alloc(backing_object,
paged_offset);
if (backing_page)
break;
VM_WAIT;
}
backing_page->flags |= PG_BUSY;
/*
* Second, start paging it in. If this
* fails, what can we do but punt?
* Even though the shadowing object
* isn't exactly paging we say so in
* order to not get simultaneous
* cascaded collapses.
*/
object->paging_in_progress++;
backing_object->paging_in_progress++;
if (vm_pager_get_pages(backing_object->pager,
&backing_page, 1, TRUE) != VM_PAGER_OK) {
#ifdef DIAGNOSTIC
panic("vm_object_collapse_aux: "
"could not get paged out page");
#endif
return KERN_FAILURE;
}
cnt.v_pgpgin++;
/*
* A fault might have issued other
* pagein operations. We must wait for
* them to complete, then we get to
* wakeup potential other waiters as
* well.
*/
while (backing_object->paging_in_progress != 1
|| object->paging_in_progress != 1) {
if (object->paging_in_progress != 1) {
vm_object_sleep(object, object,
FALSE);
vm_object_lock(object);
continue;
}
vm_object_sleep(backing_object,
backing_object, FALSE);
vm_object_lock(backing_object);
}
backing_object->paging_in_progress--;
object->paging_in_progress--;
thread_wakeup(backing_object);
thread_wakeup(object);
/*
* During the pagein vm_object_terminate
* might have slept on our front object in
* order to remove it. If this is the
* case, we might as well stop all the
* collapse work right here.
*/
if (object->flags & OBJ_FADING) {
PAGE_WAKEUP(backing_page);
return KERN_FAILURE;
}
/*
* Third, relookup in case pager changed
* page. Pager is responsible for
* disposition of old page if moved.
*/
backing_page = vm_page_lookup(backing_object,
paged_offset);
/*
* This page was once dirty, otherwise
* it hadn't been paged out in this
* shadow object. As we now remove the
* persistant store of the page, make
* sure it will be paged out in the
* front pager by dirtying it.
*/
backing_page->flags &= ~(PG_FAKE|PG_CLEAN);
/*
* Fourth, move it up front, and wake up
* potential waiters.
*/
vm_page_rename(backing_page, object,
paged_offset - backing_offset);
PAGE_WAKEUP(backing_page);
}
vm_object_remove_from_pager(backing_object,
paged_offset, paged_offset + PAGE_SIZE);
paged_offset += PAGE_SIZE;
}
}
/*
* I've seen this condition once in an out of VM situation.
* For the moment I don't know why it occurred, although I suspect
* vm_object_page_clean can create a pager even if it won't use
* it.
*/
if (backing_object->pager != NULL &&
vm_pager_count(backing_object->pager) == 0) {
vm_pager_deallocate(backing_object->pager);
backing_object->pager = NULL;
}
#ifdef DIAGNOSTIC
if (backing_object->pager)
panic("vm_object_collapse_aux: backing_object->pager remains");
#endif
/*
* Object now shadows whatever backing_object did.
* Note that the reference to backing_object->shadow
* moves from within backing_object to within object.
*/
if(backing_object->shadow)
vm_object_lock(backing_object->shadow);
vm_object_set_shadow(object, backing_object->shadow);
if(backing_object->shadow) {
vm_object_set_shadow(backing_object, NULL);
vm_object_unlock(backing_object->shadow);
}
object->shadow_offset += backing_object->shadow_offset;
if (object->shadow != NULL && object->shadow->copy != NULL)
panic("vm_object_collapse_aux: we collapsed a copy-object!");
/* Fast cleanup is the only thing left now. */
vm_object_unlock(backing_object);
simple_lock(&vm_object_list_lock);
TAILQ_REMOVE(&vm_object_list, backing_object, object_list);
vm_object_count--;
simple_unlock(&vm_object_list_lock);
free((caddr_t)backing_object, M_VMOBJ);
object_collapses++;
return KERN_SUCCESS;
}
/*
* vm_object_bypass:
*
* Internal function to vm_object_collapse called when collapsing
* the object with its backing one is not allowed but there may
* be an opportunity to bypass the backing object and shadow the
* next object in the chain instead.
*/
int
vm_object_bypass(object)
vm_object_t object;
{
register vm_object_t backing_object = object->shadow;
register vm_offset_t backing_offset = object->shadow_offset;
register vm_offset_t new_offset;
register vm_page_t p, pp;
/*
* If all of the pages in the backing object are
* shadowed by the parent object, the parent
* object no longer has to shadow the backing
* object; it can shadow the next one in the
* chain.
*
* The backing object must not be paged out - we'd
* have to check all of the paged-out pages, as
* well.
*/
if (backing_object->pager != NULL)
return KERN_FAILURE;
/*
* Should have a check for a 'small' number
* of pages here.
*/
for (p = backing_object->memq.tqh_first; p != NULL;
p = p->listq.tqe_next) {
new_offset = p->offset - backing_offset;
/*
* If the parent has a page here, or if
* this page falls outside the parent,
* keep going.
*
* Otherwise, the backing_object must be
* left in the chain.
*/
if (p->offset >= backing_offset && new_offset < object->size &&
((pp = vm_page_lookup(object, new_offset)) == NULL ||
(pp->flags & PG_FAKE))) {
/*
* Page still needed. Can't go any further.
*/
return KERN_FAILURE;
}
}
/*
* Make the parent shadow the next object
* in the chain. Deallocating backing_object
* will not remove it, since its reference
* count is at least 2.
*/
vm_object_lock(object->shadow);
if (backing_object->shadow)
vm_object_lock(backing_object->shadow);
vm_object_set_shadow(object, backing_object->shadow);
if (backing_object->shadow)
vm_object_unlock(backing_object->shadow);
vm_object_reference(object->shadow);
vm_object_unlock(object->shadow);
object->shadow_offset += backing_object->shadow_offset;
/*
* Backing object might have had a copy pointer
* to us. If it did, clear it.
*/
if (backing_object->copy == object)
backing_object->copy = NULL;
/* Drop the reference count on backing_object.
* Since its ref_count was at least 2, it
* will not vanish; so we don't need to call
* vm_object_deallocate.
*/
backing_object->ref_count--;
vm_object_unlock(backing_object);
object_bypasses ++;
return KERN_SUCCESS;
}
/*
* vm_object_collapse:
*
@ -1026,10 +1551,6 @@ vm_object_collapse(object)
{
register vm_object_t backing_object;
register vm_offset_t backing_offset;
register vm_size_t size;
register vm_offset_t new_offset;
register vm_page_t p, pp;
if (!vm_object_collapse_allowed)
return;
@ -1039,11 +1560,9 @@ vm_object_collapse(object)
* Verify that the conditions are right for collapse:
*
* The object exists and no pages in it are currently
* being paged out (or have ever been paged out).
* being paged out.
*/
if (object == NULL ||
object->paging_in_progress != 0 ||
object->pager != NULL)
if (object == NULL || object->paging_in_progress)
return;
/*
@ -1084,195 +1603,21 @@ vm_object_collapse(object)
return;
}
/*
* We know that we can either collapse the backing
* object (if the parent is the only reference to
* it) or (perhaps) remove the parent's reference
* to it.
*/
backing_offset = object->shadow_offset;
size = object->size;
/*
* If there is exactly one reference to the backing
* object, we can collapse it into the parent.
* object, we can collapse it into the parent,
* otherwise we might be able to bypass it completely.
*/
if (backing_object->ref_count == 1) {
/*
* We can collapse the backing object.
*
* Move all in-memory pages from backing_object
* to the parent. Pages that have been paged out
* will be overwritten by any of the parent's
* pages that shadow them.
*/
while ((p = backing_object->memq.tqh_first) != NULL) {
new_offset = (p->offset - backing_offset);
/*
* If the parent has a page here, or if
* this page falls outside the parent,
* dispose of it.
*
* Otherwise, move it as planned.
*/
if (p->offset < backing_offset ||
new_offset >= size) {
vm_page_lock_queues();
vm_page_free(p);
vm_page_unlock_queues();
} else {
pp = vm_page_lookup(object, new_offset);
if (pp != NULL && !(pp->flags & PG_FAKE)) {
vm_page_lock_queues();
vm_page_free(p);
vm_page_unlock_queues();
}
else {
if (pp) {
/* may be someone waiting for it */
PAGE_WAKEUP(pp);
vm_page_lock_queues();
vm_page_free(pp);
vm_page_unlock_queues();
}
vm_page_rename(p, object, new_offset);
}
}
}
/*
* Move the pager from backing_object to object.
*
* XXX We're only using part of the paging space
* for keeps now... we ought to discard the
* unused portion.
*/
if (backing_object->pager) {
object->pager = backing_object->pager;
object->paging_offset = backing_offset +
backing_object->paging_offset;
backing_object->pager = NULL;
}
/*
* Object now shadows whatever backing_object did.
* Note that the reference to backing_object->shadow
* moves from within backing_object to within object.
*/
object->shadow = backing_object->shadow;
object->shadow_offset += backing_object->shadow_offset;
if (object->shadow != NULL &&
object->shadow->copy != NULL) {
panic("vm_object_collapse: we collapsed a copy-object!");
}
/*
* Discard backing_object.
*
* Since the backing object has no pages, no
* pager left, and no object references within it,
* all that is necessary is to dispose of it.
*/
vm_object_unlock(backing_object);
simple_lock(&vm_object_list_lock);
TAILQ_REMOVE(&vm_object_list, backing_object,
object_list);
vm_object_count--;
simple_unlock(&vm_object_list_lock);
free((caddr_t)backing_object, M_VMOBJ);
object_collapses++;
}
else {
/*
* If all of the pages in the backing object are
* shadowed by the parent object, the parent
* object no longer has to shadow the backing
* object; it can shadow the next one in the
* chain.
*
* The backing object must not be paged out - we'd
* have to check all of the paged-out pages, as
* well.
*/
if (backing_object->pager != NULL) {
if (vm_object_collapse_aux(object) != KERN_SUCCESS) {
vm_object_unlock(backing_object);
return;
}
/*
* Should have a check for a 'small' number
* of pages here.
*/
for (p = backing_object->memq.tqh_first;
p != NULL;
p = p->listq.tqe_next) {
new_offset = (p->offset - backing_offset);
/*
* If the parent has a page here, or if
* this page falls outside the parent,
* keep going.
*
* Otherwise, the backing_object must be
* left in the chain.
*/
if (p->offset >= backing_offset &&
new_offset < size &&
((pp = vm_page_lookup(object, new_offset))
== NULL ||
(pp->flags & PG_FAKE))) {
/*
* Page still needed.
* Can't go any further.
*/
vm_object_unlock(backing_object);
return;
}
}
/*
* Make the parent shadow the next object
* in the chain. Deallocating backing_object
* will not remove it, since its reference
* count is at least 2.
*/
object->shadow = backing_object->shadow;
vm_object_reference(object->shadow);
object->shadow_offset += backing_object->shadow_offset;
/*
* Backing object might have had a copy pointer
* to us. If it did, clear it.
*/
if (backing_object->copy == object) {
backing_object->copy = NULL;
}
/* Drop the reference count on backing_object.
* Since its ref_count was at least 2, it
* will not vanish; so we don't need to call
* vm_object_deallocate.
*/
backing_object->ref_count--;
vm_object_unlock(backing_object);
object_bypasses ++;
} else
if (vm_object_bypass(object) != KERN_SUCCESS) {
vm_object_unlock(backing_object);
return;
}
/*
@ -1420,9 +1765,10 @@ _vm_object_print(object, full, pr)
void (*pr) __P((const char *, ...));
{
register vm_page_t p;
extern indent;
char *delim;
vm_object_t o;
register int count;
extern int indent;
if (object == NULL)
return;
@ -1433,6 +1779,13 @@ _vm_object_print(object, full, pr)
(*pr)("pager=0x%lx+0x%lx, shadow=(0x%lx)+0x%lx\n",
(long) object->pager, (long) object->paging_offset,
(long) object->shadow, (long) object->shadow_offset);
(*pr)("shadowers=(");
delim = "";
for (o = object->shadowers.lh_first; o; o = o->shadowers_list.le_next) {
(*pr)("%s0x%x", delim, o);
delim = ", ";
};
(*pr)(")\n");
(*pr)("cache: next=0x%lx, prev=0x%lx\n",
(long)object->cached_list.tqe_next,
(long)object->cached_list.tqe_prev);
@ -1459,3 +1812,46 @@ _vm_object_print(object, full, pr)
(*pr)("\n");
indent -= 2;
}
/*
* vm_object_set_shadow:
*
* Maintain the shadow graph so that back-link consistency is
* always kept.
*
* Assumes both objects as well as the old shadow to be locked
* (unless NULL of course).
*/
void
vm_object_set_shadow(object, shadow)
vm_object_t object, shadow;
{
vm_object_t old_shadow = object->shadow;
#ifdef DEBUG
if (vmdebug & VMDEBUG_SHADOW)
printf("vm_object_set_shadow(object=0x%p, shadow=0x%p) "
"old_shadow=0x%p\n", object, shadow, old_shadow);
if (vmdebug & VMDEBUG_SHADOW_VERBOSE) {
vm_object_print(object, 0);
vm_object_print(old_shadow, 0);
vm_object_print(shadow, 0);
}
#endif
if (old_shadow == shadow)
return;
if (old_shadow) {
LIST_REMOVE(object, shadowers_list);
}
if (shadow) {
LIST_INSERT_HEAD(&shadow->shadowers, object, shadowers_list);
}
object->shadow = shadow;
#ifdef DEBUG
if (vmdebug & VMDEBUG_SHADOW_VERBOSE) {
vm_object_print(object, 0);
vm_object_print(old_shadow, 0);
vm_object_print(shadow, 0);
}
#endif
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_object.h,v 1.16 1995/03/29 22:10:28 briggs Exp $ */
/* $NetBSD: vm_object.h,v 1.17 1997/01/03 18:03:31 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -98,13 +98,18 @@ struct vm_object {
struct vm_object *shadow; /* My shadow */
vm_offset_t shadow_offset; /* Offset in shadow */
TAILQ_ENTRY(vm_object) cached_list; /* for persistence */
LIST_HEAD(, vm_object) shadowers; /* set of shadowers */
LIST_ENTRY(vm_object) shadowers_list; /* link to next shadower of
this object's shadow */
};
/*
* Flags
*/
#define OBJ_CANPERSIST 0x0001 /* allow to persist */
#define OBJ_INTERNAL 0x0002 /* internally created object */
#define OBJ_ACTIVE 0x0004 /* used to mark active objects */
#define OBJ_FADING 0x0008 /* tell others that the object is going away */
TAILQ_HEAD(vm_object_hash_head, vm_object_hash_entry);

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_page.c,v 1.29 1997/01/03 18:00:25 mrg Exp $ */
/* $NetBSD: vm_page.c,v 1.30 1997/01/03 18:03:33 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -70,6 +70,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <vm/vm.h>
#include <vm/vm_page.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_page.h,v 1.18 1995/03/26 20:39:13 jtc Exp $ */
/* $NetBSD: vm_page.h,v 1.19 1997/01/03 18:03:36 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -121,7 +121,16 @@ struct vm_page {
#define PG_INACTIVE 0x0001 /* page is in inactive list (P) */
#define PG_ACTIVE 0x0002 /* page is in active list (P) */
#define PG_LAUNDRY 0x0004 /* page is being cleaned now (P)*/
#define PG_CLEAN 0x0008 /* page has not been modified */
#define PG_CLEAN 0x0008 /* page has not been modified
There exists a case where this bit
will be cleared, although the page
is not physically dirty, which is
when a collapse operation moves
pages between two different pagers.
The bit is then used as a marker
for the pageout daemon to know it
should be paged out into the target
pager. */
#define PG_BUSY 0x0010 /* page is in transit (O) */
#define PG_WANTED 0x0020 /* someone is waiting for page (O) */
#define PG_TABLED 0x0040 /* page is in VP table (O) */

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_pageout.c,v 1.24 1996/09/18 02:04:50 mrg Exp $ */
/* $NetBSD: vm_pageout.c,v 1.25 1997/01/03 18:03:38 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -70,6 +70,7 @@
#include <sys/param.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <vm/vm.h>
@ -276,6 +277,10 @@ vm_pageout_page(m, object)
if (object->pager == NULL)
vm_object_collapse(object);
#ifdef DIAGNOSTIC
if (object->paging_in_progress == 0xdead)
panic("vm_pageout_page: object deallocated");
#endif
object->paging_in_progress++;
vm_object_unlock(object);
@ -442,6 +447,10 @@ vm_pageout_cluster(m, object)
* in case it blocks.
*/
vm_page_unlock_queues();
#ifdef DIAGNOSTIC
if (object->paging_in_progress == 0xdead)
panic("vm_pageout_cluster: object deallocated");
#endif
object->paging_in_progress++;
vm_object_unlock(object);
again:

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_pager.c,v 1.23 1996/10/12 21:50:13 christos Exp $ */
/* $NetBSD: vm_pager.c,v 1.24 1997/01/03 18:03:39 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@ -172,10 +172,38 @@ vm_pager_deallocate(pager)
{
if (pager == NULL)
panic("vm_pager_deallocate: null pager");
(*pager->pg_ops->pgo_dealloc)(pager);
}
int
vm_pager_remove(pager, from, to)
vm_pager_t pager;
vm_offset_t from, to;
{
if (pager == NULL)
panic("vm_pager_remove: null pager");
return (*pager->pg_ops->pgo_remove)(pager, from, to);
}
vm_offset_t
vm_pager_next(pager, offset)
vm_pager_t pager;
vm_offset_t offset;
{
if (pager == NULL)
panic("vm_pager_next: null pager");
return (*pager->pg_ops->pgo_next)(pager, offset);
}
int
vm_pager_count(pager)
vm_pager_t pager;
{
if (pager == NULL)
panic("vm_pager_count: null pager");
return (*pager->pg_ops->pgo_count)(pager);
}
int
vm_pager_get_pages(pager, mlist, npages, sync)
vm_pager_t pager;

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_pager.h,v 1.10 1995/03/26 20:39:15 jtc Exp $ */
/* $NetBSD: vm_pager.h,v 1.11 1997/01/03 18:03:41 mrg Exp $ */
/*
* Copyright (c) 1990 University of Utah.
@ -76,15 +76,30 @@ struct pagerops {
__P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
void (*pgo_dealloc) /* Disassociate. */
__P((vm_pager_t));
int (*pgo_getpages) /* Get (read) page. */
int (*pgo_getpages) /* Get (read) pages. */
__P((vm_pager_t, vm_page_t *, int, boolean_t));
int (*pgo_putpages) /* Put (write) page. */
int (*pgo_putpages) /* Put (write) pages. */
__P((vm_pager_t, vm_page_t *, int, boolean_t));
boolean_t (*pgo_haspage) /* Does pager have page? */
__P((vm_pager_t, vm_offset_t));
void (*pgo_cluster) /* Return range of cluster. */
__P((vm_pager_t, vm_offset_t,
vm_offset_t *, vm_offset_t *));
/*
* The following are an extension to the original Mach pager
* interface first seen in BSD/OS 2.1 (at least as far as I am
* aware). As compatibility is a good thing (tm) I choose to
* use that interface extension instead of coming up with one
* of my own (the interface must be extended to make the
* object collapse operation work in the presense of pagers).
* -- Niklas Hallqvist (niklas@appli.se).
*/
int (*pgo_remove) /* Don't manage range anymore */
__P((vm_pager_t, vm_offset_t, vm_offset_t));
vm_offset_t (*pgo_next) /* Find next page in pager. */
__P((vm_pager_t, vm_offset_t));
int (*pgo_count) /* How many pages in pager? */
__P((vm_pager_t));
};
/*
@ -116,6 +131,10 @@ void vm_pager_clusternull
__P((vm_pager_t, vm_offset_t,
vm_offset_t *, vm_offset_t *));
void vm_pager_deallocate __P((vm_pager_t));
int vm_pager_remove
__P((vm_pager_t, vm_offset_t, vm_offset_t));
int vm_pager_count __P((vm_pager_t));
vm_offset_t vm_pager_next __P((vm_pager_t, vm_offset_t));
int vm_pager_get_pages
__P((vm_pager_t, vm_page_t *, int, boolean_t));
boolean_t vm_pager_has_page __P((vm_pager_t, vm_offset_t));