2004-09-01 15:53:38 +04:00
|
|
|
/* $NetBSD: uvm_anon.c,v 1.31 2004/09/01 11:53:38 yamt Exp $ */
|
1999-01-25 02:53:14 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by Charles D. Cranor and
|
|
|
|
* Washington University.
|
|
|
|
* 4. The name of the author may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_anon.c: uvm anon ops
|
|
|
|
*/
|
|
|
|
|
2001-11-10 10:36:59 +03:00
|
|
|
#include <sys/cdefs.h>
|
2004-09-01 15:53:38 +04:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.31 2004/09/01 11:53:38 yamt Exp $");
|
2001-11-10 10:36:59 +03:00
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
#include "opt_uvmhist.h"
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/pool.h>
|
2000-01-11 09:57:49 +03:00
|
|
|
#include <sys/kernel.h>
|
1999-01-25 02:53:14 +03:00
|
|
|
|
|
|
|
#include <uvm/uvm.h>
|
|
|
|
#include <uvm/uvm_swap.h>
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
/*
|
|
|
|
* anonblock_list: global list of anon blocks,
|
|
|
|
* locked by swap_syscall_lock (since we never remove
|
|
|
|
* anything from this list and we only add to it via swapctl(2)).
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct uvm_anonblock {
|
|
|
|
LIST_ENTRY(uvm_anonblock) list;
|
|
|
|
int count;
|
|
|
|
struct vm_anon *anons;
|
|
|
|
};
|
|
|
|
static LIST_HEAD(anonlist, uvm_anonblock) anonblock_list;
|
|
|
|
|
|
|
|
|
2004-03-24 10:55:01 +03:00
|
|
|
static boolean_t anon_pagein(struct vm_anon *);
|
2000-01-11 09:57:49 +03:00
|
|
|
|
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
/*
|
|
|
|
* allocate anons
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_anon_init()
|
|
|
|
{
|
|
|
|
int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */
|
2000-01-11 09:57:49 +03:00
|
|
|
|
|
|
|
simple_lock_init(&uvm.afreelock);
|
|
|
|
LIST_INIT(&anonblock_list);
|
1999-01-25 02:53:14 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate the initial anons.
|
|
|
|
*/
|
2000-01-11 09:57:49 +03:00
|
|
|
uvm_anon_add(nanon);
|
1999-01-25 02:53:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* add some more anons to the free pool. called when we add
|
|
|
|
* more swap space.
|
2000-01-11 09:57:49 +03:00
|
|
|
*
|
|
|
|
* => swap_syscall_lock should be held (protects anonblock_list).
|
1999-01-25 02:53:14 +03:00
|
|
|
*/
|
2000-12-27 12:17:04 +03:00
|
|
|
int
|
2000-01-11 09:57:49 +03:00
|
|
|
uvm_anon_add(count)
|
|
|
|
int count;
|
1999-01-25 02:53:14 +03:00
|
|
|
{
|
2000-01-11 09:57:49 +03:00
|
|
|
struct uvm_anonblock *anonblock;
|
1999-01-25 02:53:14 +03:00
|
|
|
struct vm_anon *anon;
|
2000-01-11 09:57:49 +03:00
|
|
|
int lcv, needed;
|
1999-01-25 02:53:14 +03:00
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
simple_lock(&uvm.afreelock);
|
|
|
|
uvmexp.nanonneeded += count;
|
|
|
|
needed = uvmexp.nanonneeded - uvmexp.nanon;
|
|
|
|
simple_unlock(&uvm.afreelock);
|
|
|
|
|
|
|
|
if (needed <= 0) {
|
2000-12-27 12:17:04 +03:00
|
|
|
return 0;
|
2000-01-11 09:57:49 +03:00
|
|
|
}
|
|
|
|
anon = (void *)uvm_km_alloc(kernel_map, sizeof(*anon) * needed);
|
2000-12-27 12:17:04 +03:00
|
|
|
if (anon == NULL) {
|
|
|
|
simple_lock(&uvm.afreelock);
|
|
|
|
uvmexp.nanonneeded -= count;
|
|
|
|
simple_unlock(&uvm.afreelock);
|
|
|
|
return ENOMEM;
|
1999-01-25 02:53:14 +03:00
|
|
|
}
|
2000-12-27 12:17:04 +03:00
|
|
|
MALLOC(anonblock, void *, sizeof(*anonblock), M_UVMAMAP, M_WAITOK);
|
1999-01-25 02:53:14 +03:00
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
anonblock->count = needed;
|
|
|
|
anonblock->anons = anon;
|
|
|
|
LIST_INSERT_HEAD(&anonblock_list, anonblock, list);
|
|
|
|
memset(anon, 0, sizeof(*anon) * needed);
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
simple_lock(&uvm.afreelock);
|
2000-01-11 09:57:49 +03:00
|
|
|
uvmexp.nanon += needed;
|
|
|
|
uvmexp.nfreeanon += needed;
|
|
|
|
for (lcv = 0; lcv < needed; lcv++) {
|
2004-01-06 18:56:49 +03:00
|
|
|
simple_lock_init(&anon[lcv].an_lock);
|
1999-01-25 02:53:14 +03:00
|
|
|
anon[lcv].u.an_nxt = uvm.afree;
|
|
|
|
uvm.afree = &anon[lcv];
|
|
|
|
}
|
|
|
|
simple_unlock(&uvm.afreelock);
|
2000-12-27 12:17:04 +03:00
|
|
|
return 0;
|
1999-01-25 02:53:14 +03:00
|
|
|
}
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
/*
|
|
|
|
* remove anons from the free pool.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_anon_remove(count)
|
|
|
|
int count;
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* we never actually free any anons, to avoid allocation overhead.
|
|
|
|
* XXX someday we might want to try to free anons.
|
|
|
|
*/
|
|
|
|
|
|
|
|
simple_lock(&uvm.afreelock);
|
|
|
|
uvmexp.nanonneeded -= count;
|
|
|
|
simple_unlock(&uvm.afreelock);
|
|
|
|
}
|
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
/*
|
|
|
|
* allocate an anon
|
2001-01-23 05:27:39 +03:00
|
|
|
*
|
|
|
|
* => new anon is returned locked!
|
1999-01-25 02:53:14 +03:00
|
|
|
*/
|
|
|
|
struct vm_anon *
|
|
|
|
uvm_analloc()
|
|
|
|
{
|
|
|
|
struct vm_anon *a;
|
|
|
|
|
|
|
|
simple_lock(&uvm.afreelock);
|
|
|
|
a = uvm.afree;
|
|
|
|
if (a) {
|
|
|
|
uvm.afree = a->u.an_nxt;
|
|
|
|
uvmexp.nfreeanon--;
|
|
|
|
a->an_ref = 1;
|
|
|
|
a->an_swslot = 0;
|
|
|
|
a->u.an_page = NULL; /* so we can free quickly */
|
2001-01-23 05:27:39 +03:00
|
|
|
LOCK_ASSERT(simple_lock_held(&a->an_lock) == 0);
|
|
|
|
simple_lock(&a->an_lock);
|
1999-01-25 02:53:14 +03:00
|
|
|
}
|
|
|
|
simple_unlock(&uvm.afreelock);
|
|
|
|
return(a);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_anfree: free a single anon structure
|
|
|
|
*
|
|
|
|
* => caller must remove anon from its amap before calling (if it was in
|
|
|
|
* an amap).
|
|
|
|
* => anon must be unlocked and have a zero reference count.
|
|
|
|
* => we may lock the pageq's.
|
|
|
|
*/
|
2001-11-06 11:07:49 +03:00
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
void
|
|
|
|
uvm_anfree(anon)
|
|
|
|
struct vm_anon *anon;
|
|
|
|
{
|
|
|
|
struct vm_page *pg;
|
|
|
|
UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
|
|
|
|
UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
|
|
|
|
|
2001-01-23 04:56:16 +03:00
|
|
|
KASSERT(anon->an_ref == 0);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
LOCK_ASSERT(!simple_lock_held(&anon->an_lock));
|
2001-01-23 04:56:16 +03:00
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
/*
|
|
|
|
* get page
|
|
|
|
*/
|
|
|
|
|
|
|
|
pg = anon->u.an_page;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if there is a resident page and it is loaned, then anon may not
|
|
|
|
* own it. call out to uvm_anon_lockpage() to ensure the real owner
|
|
|
|
* of the page has been identified and locked.
|
|
|
|
*/
|
|
|
|
|
2002-09-21 10:16:07 +04:00
|
|
|
if (pg && pg->loan_count) {
|
|
|
|
simple_lock(&anon->an_lock);
|
1999-01-25 02:53:14 +03:00
|
|
|
pg = uvm_anon_lockloanpg(anon);
|
2002-09-21 10:16:07 +04:00
|
|
|
simple_unlock(&anon->an_lock);
|
|
|
|
}
|
1999-01-25 02:53:14 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if we have a resident page, we must dispose of it before freeing
|
|
|
|
* the anon.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (pg) {
|
|
|
|
|
|
|
|
/*
|
2001-05-25 08:06:11 +04:00
|
|
|
* if the page is owned by a uobject (now locked), then we must
|
1999-01-25 02:53:14 +03:00
|
|
|
* kill the loan on the page rather than free it.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (pg->uobject) {
|
|
|
|
uvm_lock_pageq();
|
2000-11-25 09:27:59 +03:00
|
|
|
KASSERT(pg->loan_count > 0);
|
1999-01-25 02:53:14 +03:00
|
|
|
pg->loan_count--;
|
|
|
|
pg->uanon = NULL;
|
|
|
|
uvm_unlock_pageq();
|
|
|
|
simple_unlock(&pg->uobject->vmobjlock);
|
|
|
|
} else {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* page has no uobject, so we must be the owner of it.
|
|
|
|
*/
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
KASSERT((pg->flags & PG_RELEASED) == 0);
|
2001-10-21 04:04:42 +04:00
|
|
|
simple_lock(&anon->an_lock);
|
1999-09-12 05:16:55 +04:00
|
|
|
pmap_page_protect(pg, VM_PROT_NONE);
|
2004-05-05 15:54:32 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if the page is busy, mark it as PG_RELEASED
|
|
|
|
* so that uvm_anon_release will release it later.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (pg->flags & PG_BUSY) {
|
|
|
|
pg->flags |= PG_RELEASED;
|
|
|
|
simple_unlock(&anon->an_lock);
|
|
|
|
return;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
2004-05-05 15:54:32 +04:00
|
|
|
uvm_lock_pageq();
|
|
|
|
uvm_pagefree(pg);
|
|
|
|
uvm_unlock_pageq();
|
2001-10-21 04:04:42 +04:00
|
|
|
simple_unlock(&anon->an_lock);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
UVMHIST_LOG(maphist, "anon 0x%x, page 0x%x: "
|
|
|
|
"freed now!", anon, pg, 0, 0);
|
1999-01-25 02:53:14 +03:00
|
|
|
}
|
|
|
|
}
|
2003-08-28 17:12:17 +04:00
|
|
|
if (pg == NULL && anon->an_swslot > 0) {
|
2001-11-06 11:07:49 +03:00
|
|
|
/* this page is no longer only in swap. */
|
|
|
|
simple_lock(&uvm.swap_data_lock);
|
|
|
|
KASSERT(uvmexp.swpgonly > 0);
|
|
|
|
uvmexp.swpgonly--;
|
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
|
|
|
}
|
1999-01-25 02:53:14 +03:00
|
|
|
|
|
|
|
/*
|
1999-03-26 20:34:15 +03:00
|
|
|
* free any swap resources.
|
1999-01-25 02:53:14 +03:00
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1999-03-26 20:34:15 +03:00
|
|
|
uvm_anon_dropswap(anon);
|
1999-01-25 02:53:14 +03:00
|
|
|
|
|
|
|
/*
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
* now that we've stripped the data areas from the anon,
|
|
|
|
* free the anon itself.
|
1999-01-25 02:53:14 +03:00
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2004-09-01 15:53:38 +04:00
|
|
|
KASSERT(anon->u.an_page == NULL);
|
|
|
|
KASSERT(anon->an_swslot == 0);
|
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
simple_lock(&uvm.afreelock);
|
|
|
|
anon->u.an_nxt = uvm.afree;
|
|
|
|
uvm.afree = anon;
|
|
|
|
uvmexp.nfreeanon++;
|
|
|
|
simple_unlock(&uvm.afreelock);
|
|
|
|
UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
|
|
|
|
}
|
|
|
|
|
1999-03-26 20:34:15 +03:00
|
|
|
/*
|
|
|
|
* uvm_anon_dropswap: release any swap resources from this anon.
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1999-03-26 20:34:15 +03:00
|
|
|
* => anon must be locked or have a reference count of 0.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_anon_dropswap(anon)
|
|
|
|
struct vm_anon *anon;
|
|
|
|
{
|
|
|
|
UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
|
2001-01-23 04:56:16 +03:00
|
|
|
|
|
|
|
if (anon->an_swslot == 0)
|
1999-03-26 20:34:15 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
|
|
|
|
anon, anon->an_swslot, 0, 0);
|
|
|
|
uvm_swap_free(anon->an_swslot, 1);
|
|
|
|
anon->an_swslot = 0;
|
|
|
|
}
|
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
/*
|
|
|
|
* uvm_anon_lockloanpg: given a locked anon, lock its resident page
|
|
|
|
*
|
|
|
|
* => anon is locked by caller
|
|
|
|
* => on return: anon is locked
|
|
|
|
* if there is a resident page:
|
|
|
|
* if it has a uobject, it is locked by us
|
|
|
|
* if it is ownerless, we take over as owner
|
|
|
|
* we return the resident page (it can change during
|
|
|
|
* this function)
|
|
|
|
* => note that the only time an anon has an ownerless resident page
|
|
|
|
* is if the page was loaned from a uvm_object and the uvm_object
|
|
|
|
* disowned it
|
|
|
|
* => this only needs to be called when you want to do an operation
|
|
|
|
* on an anon's resident page and that page has a non-zero loan
|
|
|
|
* count.
|
|
|
|
*/
|
|
|
|
struct vm_page *
|
|
|
|
uvm_anon_lockloanpg(anon)
|
|
|
|
struct vm_anon *anon;
|
|
|
|
{
|
|
|
|
struct vm_page *pg;
|
|
|
|
boolean_t locked = FALSE;
|
|
|
|
|
2001-01-23 04:56:16 +03:00
|
|
|
LOCK_ASSERT(simple_lock_held(&anon->an_lock));
|
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
/*
|
|
|
|
* loop while we have a resident page that has a non-zero loan count.
|
|
|
|
* if we successfully get our lock, we will "break" the loop.
|
|
|
|
* note that the test for pg->loan_count is not protected -- this
|
|
|
|
* may produce false positive results. note that a false positive
|
|
|
|
* result may cause us to do more work than we need to, but it will
|
|
|
|
* not produce an incorrect result.
|
|
|
|
*/
|
|
|
|
|
|
|
|
while (((pg = anon->u.an_page) != NULL) && pg->loan_count != 0) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* quickly check to see if the page has an object before
|
|
|
|
* bothering to lock the page queues. this may also produce
|
|
|
|
* a false positive result, but that's ok because we do a real
|
|
|
|
* check after that.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (pg->uobject) {
|
|
|
|
uvm_lock_pageq();
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
if (pg->uobject) {
|
1999-01-25 02:53:14 +03:00
|
|
|
locked =
|
|
|
|
simple_lock_try(&pg->uobject->vmobjlock);
|
|
|
|
} else {
|
|
|
|
/* object disowned before we got PQ lock */
|
|
|
|
locked = TRUE;
|
|
|
|
}
|
|
|
|
uvm_unlock_pageq();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we didn't get a lock (try lock failed), then we
|
|
|
|
* toggle our anon lock and try again
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!locked) {
|
|
|
|
simple_unlock(&anon->an_lock);
|
2000-11-25 09:27:59 +03:00
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
/*
|
|
|
|
* someone locking the object has a chance to
|
|
|
|
* lock us right now
|
|
|
|
*/
|
2000-11-25 09:27:59 +03:00
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
simple_lock(&anon->an_lock);
|
2000-11-25 09:27:59 +03:00
|
|
|
continue;
|
1999-01-25 02:53:14 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if page is un-owned [i.e. the object dropped its ownership],
|
|
|
|
* then we can take over as owner!
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) {
|
|
|
|
uvm_lock_pageq();
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
pg->pqflags |= PQ_ANON;
|
|
|
|
pg->loan_count--;
|
1999-01-25 02:53:14 +03:00
|
|
|
uvm_unlock_pageq();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return(pg);
|
|
|
|
}
|
2000-01-11 09:57:49 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* page in every anon that is paged out to a range of swslots.
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
2000-01-11 09:57:49 +03:00
|
|
|
* swap_syscall_lock should be held (protects anonblock_list).
|
|
|
|
*/
|
|
|
|
|
|
|
|
boolean_t
|
|
|
|
anon_swap_off(startslot, endslot)
|
|
|
|
int startslot, endslot;
|
|
|
|
{
|
|
|
|
struct uvm_anonblock *anonblock;
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
LIST_FOREACH(anonblock, &anonblock_list, list) {
|
2000-01-11 09:57:49 +03:00
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* loop thru all the anons in the anonblock,
|
|
|
|
* paging in where needed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (i = 0; i < anonblock->count; i++) {
|
|
|
|
struct vm_anon *anon = &anonblock->anons[i];
|
|
|
|
int slot;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* lock anon to work on it.
|
|
|
|
*/
|
|
|
|
|
|
|
|
simple_lock(&anon->an_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* is this anon's swap slot in range?
|
|
|
|
*/
|
|
|
|
|
|
|
|
slot = anon->an_swslot;
|
|
|
|
if (slot >= startslot && slot < endslot) {
|
|
|
|
boolean_t rv;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* yup, page it in.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* locked: anon */
|
|
|
|
rv = anon_pagein(anon);
|
|
|
|
/* unlocked: anon */
|
|
|
|
|
|
|
|
if (rv) {
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nope, unlock and proceed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
simple_unlock(&anon->an_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* fetch an anon's page.
|
|
|
|
*
|
|
|
|
* => anon must be locked, and is unlocked upon return.
|
|
|
|
* => returns TRUE if pagein was aborted due to lack of memory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static boolean_t
|
|
|
|
anon_pagein(anon)
|
|
|
|
struct vm_anon *anon;
|
|
|
|
{
|
|
|
|
struct vm_page *pg;
|
|
|
|
struct uvm_object *uobj;
|
|
|
|
int rv;
|
2000-08-06 03:40:55 +04:00
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
/* locked: anon */
|
2001-01-23 04:56:16 +03:00
|
|
|
LOCK_ASSERT(simple_lock_held(&anon->an_lock));
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
rv = uvmfault_anonget(NULL, NULL, anon);
|
2001-01-23 04:56:16 +03:00
|
|
|
|
2000-08-06 03:40:55 +04:00
|
|
|
/*
|
2001-03-11 01:46:45 +03:00
|
|
|
* if rv == 0, anon is still locked, else anon
|
2000-08-06 03:40:55 +04:00
|
|
|
* is unlocked
|
|
|
|
*/
|
2000-01-11 09:57:49 +03:00
|
|
|
|
|
|
|
switch (rv) {
|
2001-03-11 01:46:45 +03:00
|
|
|
case 0:
|
2000-01-11 09:57:49 +03:00
|
|
|
break;
|
|
|
|
|
2001-03-11 01:46:45 +03:00
|
|
|
case EIO:
|
|
|
|
case ERESTART:
|
2000-01-11 09:57:49 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* nothing more to do on errors.
|
2001-03-11 01:46:45 +03:00
|
|
|
* ERESTART can only mean that the anon was freed,
|
2000-01-11 09:57:49 +03:00
|
|
|
* so again there's nothing to do.
|
|
|
|
*/
|
|
|
|
|
|
|
|
return FALSE;
|
2003-08-11 20:54:10 +04:00
|
|
|
|
|
|
|
default:
|
|
|
|
return TRUE;
|
2000-01-11 09:57:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ok, we've got the page now.
|
|
|
|
* mark it as dirty, clear its swslot and un-busy it.
|
|
|
|
*/
|
|
|
|
|
|
|
|
pg = anon->u.an_page;
|
|
|
|
uobj = pg->uobject;
|
2003-08-11 20:44:35 +04:00
|
|
|
if (anon->an_swslot > 0)
|
|
|
|
uvm_swap_free(anon->an_swslot, 1);
|
2000-01-11 09:57:49 +03:00
|
|
|
anon->an_swslot = 0;
|
|
|
|
pg->flags &= ~(PG_CLEAN);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* deactivate the page (to put it on a page queue)
|
|
|
|
*/
|
|
|
|
|
|
|
|
pmap_clear_reference(pg);
|
|
|
|
uvm_lock_pageq();
|
2003-08-11 20:48:05 +04:00
|
|
|
if (pg->wire_count == 0)
|
|
|
|
uvm_pagedeactivate(pg);
|
2000-01-11 09:57:49 +03:00
|
|
|
uvm_unlock_pageq();
|
|
|
|
|
2003-08-11 20:54:10 +04:00
|
|
|
if (pg->flags & PG_WANTED) {
|
|
|
|
wakeup(pg);
|
|
|
|
pg->flags &= ~(PG_WANTED);
|
|
|
|
}
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
/*
|
|
|
|
* unlock the anon and we're done.
|
|
|
|
*/
|
|
|
|
|
|
|
|
simple_unlock(&anon->an_lock);
|
|
|
|
if (uobj) {
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
|
|
|
}
|
|
|
|
return FALSE;
|
|
|
|
}
|
2004-05-05 15:54:32 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_anon_release: release an anon and its page.
|
|
|
|
*
|
|
|
|
* => caller must lock the anon.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_anon_release(anon)
|
|
|
|
struct vm_anon *anon;
|
|
|
|
{
|
|
|
|
struct vm_page *pg = anon->u.an_page;
|
|
|
|
|
|
|
|
LOCK_ASSERT(simple_lock_held(&anon->an_lock));
|
|
|
|
|
|
|
|
KASSERT(pg != NULL);
|
|
|
|
KASSERT((pg->flags & PG_RELEASED) != 0);
|
|
|
|
KASSERT((pg->flags & PG_BUSY) != 0);
|
|
|
|
KASSERT(pg->uobject == NULL);
|
|
|
|
KASSERT(pg->uanon == anon);
|
|
|
|
KASSERT(pg->loan_count == 0);
|
|
|
|
KASSERT(anon->an_ref == 0);
|
|
|
|
|
|
|
|
uvm_lock_pageq();
|
|
|
|
uvm_pagefree(pg);
|
|
|
|
uvm_unlock_pageq();
|
|
|
|
simple_unlock(&anon->an_lock);
|
|
|
|
|
|
|
|
KASSERT(anon->u.an_page == NULL);
|
|
|
|
|
|
|
|
uvm_anfree(anon);
|
|
|
|
}
|