NetBSD/sys/uvm/uvm_fault.c

2433 lines
62 KiB
C
Raw Normal View History

/* $NetBSD: uvm_fault.c,v 1.197 2015/06/22 06:24:17 matt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1998-02-07 14:07:38 +03:00
*
* from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp
*/
/*
* uvm_fault.c: fault handler
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.197 2015/06/22 06:24:17 matt Exp $");
#include "opt_uvmhist.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/mman.h>
#include <uvm/uvm.h>
/*
*
* a word on page faults:
*
* types of page faults we handle:
*
* CASE 1: upper layer faults CASE 2: lower layer faults
*
* CASE 1A CASE 1B CASE 2A CASE 2B
* read/write1 write>1 read/write +-cow_write/zero
2001-05-25 08:06:11 +04:00
* | | | |
* +--|--+ +--|--+ +-----+ + | + | +-----+
* amap | V | | ---------> new | | | | ^ |
* +-----+ +-----+ +-----+ + | + | +--|--+
* | | |
* +-----+ +-----+ +--|--+ | +--|--+
* uobj | d/c | | d/c | | V | +----+ |
* +-----+ +-----+ +-----+ +-----+
*
* d/c = don't care
2001-05-25 08:06:11 +04:00
*
* case [0]: layerless fault
* no amap or uobj is present. this is an error.
*
* case [1]: upper layer fault [anon active]
* 1A: [read] or [write with anon->an_ref == 1]
* I/O takes place in upper level anon and uobj is not touched.
* 1B: [write with anon->an_ref > 1]
* new anon is alloc'd and data is copied off ["COW"]
*
* case [2]: lower layer fault [uobj]
* 2A: [read on non-NULL uobj] or [write to non-copy_on_write area]
* I/O takes place directly in object.
* 2B: [write to copy_on_write] or [read on NULL uobj]
2001-05-25 08:06:11 +04:00
* data is "promoted" from uobj to a new anon.
* if uobj is null, then we zero fill.
*
* we follow the standard UVM locking protocol ordering:
*
2001-05-25 08:06:11 +04:00
* MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ)
* we hold a PG_BUSY page if we unlock for I/O
*
*
* the code is structured as follows:
2001-05-25 08:06:11 +04:00
*
* - init the "IN" params in the ufi structure
* ReFault: (ERESTART returned to the loop in uvm_fault_internal)
* - do lookups [locks maps], check protection, handle needs_copy
* - check for case 0 fault (error)
* - establish "range" of fault
* - if we have an amap lock it and extract the anons
* - if sequential advice deactivate pages behind us
* - at the same time check pmap for unmapped areas and anon for pages
* that we could map in (and do map it if found)
* - check object for resident pages that we could map in
* - if (case 2) goto Case2
* - >>> handle case 1
* - ensure source anon is resident in RAM
* - if case 1B alloc new anon and copy from source
* - map the correct page in
* Case2:
* - >>> handle case 2
* - ensure source page is resident (if uobj)
* - if case 2B alloc new anon and copy from source (could be zero
* fill if uobj == NULL)
* - map the correct page in
* - done!
*
* note on paging:
* if we have to do I/O we place a PG_BUSY page in the correct object,
* unlock everything, and do the I/O. when I/O is done we must reverify
* the state of the world before assuming that our data structures are
* valid. [because mappings could change while the map is unlocked]
*
* alternative 1: unbusy the page in question and restart the page fault
* from the top (ReFault). this is easy but does not take advantage
2001-05-25 08:06:11 +04:00
* of the information that we already have from our previous lookup,
* although it is possible that the "hints" in the vm_map will help here.
*
* alternative 2: the system already keeps track of a "version" number of
* a map. [i.e. every time you write-lock a map (e.g. to change a
* mapping) you bump the version number up by one...] so, we can save
* the version number of the map before we release the lock and start I/O.
* then when I/O is done we can relock and check the version numbers
* to see if anything changed. this might save us some over 1 because
* we don't have to unbusy the page and may be less compares(?).
*
* alternative 3: put in backpointers or a way to "hold" part of a map
* in place while I/O is in progress. this could be complex to
* implement (especially with structures like amap that can be referenced
* by multiple map entries, and figuring out what should wait could be
* complex as well...).
*
2008-07-04 14:22:35 +04:00
* we use alternative 2. given that we are multi-threaded now we may want
* to reconsider the choice.
*/
/*
* local data structures
*/
struct uvm_advice {
1998-03-09 03:58:55 +03:00
int advice;
int nback;
int nforw;
};
/*
* page range array:
2001-05-25 08:06:11 +04:00
* note: index in array must match "advice" value
* XXX: borrowed numbers from freebsd. do they work well for us?
*/
2005-06-27 06:19:48 +04:00
static const struct uvm_advice uvmadvice[] = {
{ UVM_ADV_NORMAL, 3, 4 },
{ UVM_ADV_RANDOM, 0, 0 },
{ UVM_ADV_SEQUENTIAL, 8, 7},
};
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
#define UVM_MAXRANGE 16 /* must be MAX() of nback+nforw+1 */
/*
* private prototypes
*/
/*
* externs from other modules
*/
extern int start_init_exec; /* Is init_main() done / init running? */
/*
* inline functions
*/
/*
* uvmfault_anonflush: try and deactivate pages in specified anons
*
* => does not have to deactivate page if it is busy
*/
static inline void
2005-06-27 06:19:48 +04:00
uvmfault_anonflush(struct vm_anon **anons, int n)
{
1998-03-09 03:58:55 +03:00
int lcv;
struct vm_page *pg;
2001-05-25 08:06:11 +04:00
2010-02-05 05:27:15 +03:00
for (lcv = 0; lcv < n; lcv++) {
1998-03-09 03:58:55 +03:00
if (anons[lcv] == NULL)
continue;
KASSERT(mutex_owned(anons[lcv]->an_lock));
pg = anons[lcv]->an_page;
if (pg && (pg->flags & PG_BUSY) == 0) {
2008-01-02 14:48:20 +03:00
mutex_enter(&uvm_pageqlock);
1998-03-09 03:58:55 +03:00
if (pg->wire_count == 0) {
uvm_pagedeactivate(pg);
}
2008-01-02 14:48:20 +03:00
mutex_exit(&uvm_pageqlock);
1998-03-09 03:58:55 +03:00
}
}
}
/*
* normal functions
*/
/*
* uvmfault_amapcopy: clear "needs_copy" in a map.
*
* => called with VM data structures unlocked (usually, see below)
* => we get a write lock on the maps and clear needs_copy for a VA
* => if we are out of RAM we sleep (waiting for more)
*/
1998-03-09 03:58:55 +03:00
static void
2005-06-27 06:19:48 +04:00
uvmfault_amapcopy(struct uvm_faultinfo *ufi)
{
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
for (;;) {
1998-03-09 03:58:55 +03:00
/*
* no mapping? give up.
*/
2007-02-22 09:05:00 +03:00
if (uvmfault_lookup(ufi, true) == false)
1998-03-09 03:58:55 +03:00
return;
1998-03-09 03:58:55 +03:00
/*
* copy if needed.
*/
1998-03-09 03:58:55 +03:00
if (UVM_ET_ISNEEDSCOPY(ufi->entry))
amap_copy(ufi->map, ufi->entry, AMAP_COPY_NOWAIT,
ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
1998-03-09 03:58:55 +03:00
/*
* didn't work? must be out of RAM. unlock and sleep.
*/
if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
2007-02-22 09:05:00 +03:00
uvmfault_unlockmaps(ufi, true);
1998-03-09 03:58:55 +03:00
uvm_wait("fltamapcopy");
continue;
}
/*
* got it! unlock and return.
*/
2001-05-25 08:06:11 +04:00
2007-02-22 09:05:00 +03:00
uvmfault_unlockmaps(ufi, true);
1998-03-09 03:58:55 +03:00
return;
}
/*NOTREACHED*/
}
/*
* uvmfault_anonget: get data in an anon into a non-busy, non-released
* page in that anon.
*
* => Map, amap and thus anon should be locked by caller.
* => If we fail, we unlock everything and error is returned.
* => If we are successful, return with everything still locked.
* => We do not move the page on the queues [gets moved later]. If we
* allocate a new page [we_own], it gets put on the queues. Either way,
* the result is that the page is on the queues at return time
* => For pages which are on loan from a uvm_object (and thus are not owned
* by the anon): if successful, return with the owning object locked.
* The caller must unlock this object when it unlocks everything else.
*/
int
2005-06-27 06:19:48 +04:00
uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
struct vm_anon *anon)
{
1998-03-09 03:58:55 +03:00
struct vm_page *pg;
int error;
UVMHIST_FUNC("uvmfault_anonget"); UVMHIST_CALLED(maphist);
KASSERT(mutex_owned(anon->an_lock));
KASSERT(anon->an_lock == amap->am_lock);
/* Increment the counters.*/
uvmexp.fltanget++;
if (anon->an_page) {
curlwp->l_ru.ru_minflt++;
} else {
curlwp->l_ru.ru_majflt++;
}
error = 0;
2001-05-25 08:06:11 +04:00
/*
* Loop until we get the anon data, or fail.
*/
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
for (;;) {
bool we_own, locked;
/*
* Note: 'we_own' will become true if we set PG_BUSY on a page.
*/
we_own = false;
pg = anon->an_page;
1998-03-09 03:58:55 +03:00
/*
* If there is a resident page and it is loaned, then anon
* may not own it. Call out to uvm_anon_lockloanpg() to
* identify and lock the real owner of the page.
1998-03-09 03:58:55 +03:00
*/
if (pg && pg->loan_count)
pg = uvm_anon_lockloanpg(anon);
1998-03-09 03:58:55 +03:00
/*
* Is page resident? Make sure it is not busy/released.
1998-03-09 03:58:55 +03:00
*/
if (pg) {
/*
* at this point, if the page has a uobject [meaning
* we have it on loan], then that uobject is locked
* by us! if the page is busy, we drop all the
* locks (including uobject) and try again.
*/
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
if ((pg->flags & PG_BUSY) == 0) {
1998-03-09 03:58:55 +03:00
UVMHIST_LOG(maphist, "<- OK",0,0,0,0);
return 0;
1998-03-09 03:58:55 +03:00
}
pg->flags |= PG_WANTED;
uvmexp.fltpgwait++;
/*
* The last unlock must be an atomic unlock and wait
* on the owner of page.
1998-03-09 03:58:55 +03:00
*/
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
if (pg->uobject) {
/* Owner of page is UVM object. */
uvmfault_unlockall(ufi, amap, NULL);
1998-03-09 03:58:55 +03:00
UVMHIST_LOG(maphist, " unlock+wait on uobj",0,
0,0,0);
UVM_UNLOCK_AND_WAIT(pg,
pg->uobject->vmobjlock,
false, "anonget1", 0);
1998-03-09 03:58:55 +03:00
} else {
/* Owner of page is anon. */
uvmfault_unlockall(ufi, NULL, NULL);
1998-03-09 03:58:55 +03:00
UVMHIST_LOG(maphist, " unlock+wait on anon",0,
0,0,0);
UVM_UNLOCK_AND_WAIT(pg, anon->an_lock,
false, "anonget2", 0);
1998-03-09 03:58:55 +03:00
}
} else {
#if defined(VMSWAP)
1998-03-09 03:58:55 +03:00
/*
* No page, therefore allocate one.
1998-03-09 03:58:55 +03:00
*/
pg = uvm_pagealloc(NULL,
ufi != NULL ? ufi->orig_rvaddr : 0,
anon, ufi != NULL ? UVM_FLAG_COLORMATCH : 0);
if (pg == NULL) {
/* Out of memory. Wait a little. */
uvmfault_unlockall(ufi, amap, NULL);
1998-03-09 03:58:55 +03:00
uvmexp.fltnoram++;
UVMHIST_LOG(maphist, " noram -- UVM_WAIT",0,
0,0,0);
if (!uvm_reclaimable()) {
return ENOMEM;
}
1998-03-09 03:58:55 +03:00
uvm_wait("flt_noram1");
} else {
/* PG_BUSY bit is set. */
2007-02-22 09:05:00 +03:00
we_own = true;
uvmfault_unlockall(ufi, amap, NULL);
1998-03-09 03:58:55 +03:00
/*
* Pass a PG_BUSY+PG_FAKE+PG_CLEAN page into
* the uvm_swap_get() function with all data
* structures unlocked. Note that it is OK
* to read an_swslot here, because we hold
* PG_BUSY on the page.
1998-03-09 03:58:55 +03:00
*/
uvmexp.pageins++;
error = uvm_swap_get(pg, anon->an_swslot,
1998-03-09 03:58:55 +03:00
PGO_SYNCIO);
/*
* We clean up after the I/O below in the
* 'we_own' case.
1998-03-09 03:58:55 +03:00
*/
}
#else
panic("%s: no page", __func__);
#endif /* defined(VMSWAP) */
1998-03-09 03:58:55 +03:00
}
/*
* Re-lock the map and anon.
1998-03-09 03:58:55 +03:00
*/
locked = uvmfault_relock(ufi);
if (locked || we_own) {
mutex_enter(anon->an_lock);
1998-03-09 03:58:55 +03:00
}
/*
* If we own the page (i.e. we set PG_BUSY), then we need
* to clean up after the I/O. There are three cases to
1998-03-09 03:58:55 +03:00
* consider:
*
* 1) Page was released during I/O: free anon and ReFault.
* 2) I/O not OK. Free the page and cause the fault to fail.
* 3) I/O OK! Activate the page and sync with the non-we_own
* case (i.e. drop anon lock if not locked).
1998-03-09 03:58:55 +03:00
*/
2001-05-25 08:06:11 +04:00
1998-03-09 03:58:55 +03:00
if (we_own) {
#if defined(VMSWAP)
1998-03-09 03:58:55 +03:00
if (pg->flags & PG_WANTED) {
2001-05-25 08:06:11 +04:00
wakeup(pg);
1998-03-09 03:58:55 +03:00
}
if (error) {
1998-03-09 03:58:55 +03:00
/*
* Remove the swap slot from the anon and
* mark the anon as having no real slot.
* Do not free the swap slot, thus preventing
* it from being used again.
*/
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
if (anon->an_swslot > 0) {
uvm_swap_markbad(anon->an_swslot, 1);
}
anon->an_swslot = SWSLOT_BAD;
if ((pg->flags & PG_RELEASED) != 0) {
goto released;
}
/*
* Note: page was never !PG_BUSY, so it
* cannot be mapped and thus no need to
* pmap_page_protect() it.
1998-03-09 03:58:55 +03:00
*/
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
2008-01-02 14:48:20 +03:00
mutex_enter(&uvm_pageqlock);
1998-03-09 03:58:55 +03:00
uvm_pagefree(pg);
2008-01-02 14:48:20 +03:00
mutex_exit(&uvm_pageqlock);
1998-03-09 03:58:55 +03:00
if (locked) {
uvmfault_unlockall(ufi, NULL, NULL);
}
mutex_exit(anon->an_lock);
1998-03-09 03:58:55 +03:00
UVMHIST_LOG(maphist, "<- ERROR", 0,0,0,0);
return error;
1998-03-09 03:58:55 +03:00
}
2001-05-25 08:06:11 +04:00
if ((pg->flags & PG_RELEASED) != 0) {
released:
KASSERT(anon->an_ref == 0);
/*
* Released while we had unlocked amap.
*/
if (locked) {
uvmfault_unlockall(ufi, NULL, NULL);
}
uvm_anon_release(anon);
if (error) {
UVMHIST_LOG(maphist,
"<- ERROR/RELEASED", 0,0,0,0);
return error;
}
UVMHIST_LOG(maphist, "<- RELEASED", 0,0,0,0);
return ERESTART;
}
1998-03-09 03:58:55 +03:00
/*
* We have successfully read the page, activate it.
1998-03-09 03:58:55 +03:00
*/
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
2008-01-02 14:48:20 +03:00
mutex_enter(&uvm_pageqlock);
1998-03-09 03:58:55 +03:00
uvm_pageactivate(pg);
2008-01-02 14:48:20 +03:00
mutex_exit(&uvm_pageqlock);
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
UVM_PAGE_OWN(pg, NULL);
#else
panic("%s: we_own", __func__);
#endif /* defined(VMSWAP) */
1998-03-09 03:58:55 +03:00
}
/*
* We were not able to re-lock the map - restart the fault.
1998-03-09 03:58:55 +03:00
*/
if (!locked) {
if (we_own) {
mutex_exit(anon->an_lock);
}
1998-03-09 03:58:55 +03:00
UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
return ERESTART;
1998-03-09 03:58:55 +03:00
}
/*
* Verify that no one has touched the amap and moved
* the anon on us.
1998-03-09 03:58:55 +03:00
*/
if (ufi != NULL && amap_lookup(&ufi->entry->aref,
ufi->orig_rvaddr - ufi->entry->start) != anon) {
2001-05-25 08:06:11 +04:00
uvmfault_unlockall(ufi, amap, NULL);
1998-03-09 03:58:55 +03:00
UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
return ERESTART;
1998-03-09 03:58:55 +03:00
}
2001-05-25 08:06:11 +04:00
1998-03-09 03:58:55 +03:00
/*
* Retry..
1998-03-09 03:58:55 +03:00
*/
uvmexp.fltanretry++;
continue;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
}
1998-03-09 03:58:55 +03:00
/*NOTREACHED*/
}
/*
* uvmfault_promote: promote data to a new anon. used for 1B and 2B.
*
* 1. allocate an anon and a page.
* 2. fill its contents.
* 3. put it into amap.
*
* => if we fail (result != 0) we unlock everything.
* => on success, return a new locked anon via 'nanon'.
* (*nanon)->an_page will be a resident, locked, dirty page.
* => it's caller's responsibility to put the promoted nanon->an_page to the
* page queue.
*/
static int
uvmfault_promote(struct uvm_faultinfo *ufi,
struct vm_anon *oanon,
struct vm_page *uobjpage,
struct vm_anon **nanon, /* OUT: allocated anon */
struct vm_anon **spare)
{
struct vm_amap *amap = ufi->entry->aref.ar_amap;
struct uvm_object *uobj;
struct vm_anon *anon;
struct vm_page *pg;
struct vm_page *opg;
int error;
UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
if (oanon) {
/* anon COW */
opg = oanon->an_page;
KASSERT(opg != NULL);
KASSERT(opg->uobject == NULL || opg->loan_count > 0);
} else if (uobjpage != PGO_DONTCARE) {
/* object-backed COW */
opg = uobjpage;
} else {
/* ZFOD */
opg = NULL;
}
if (opg != NULL) {
uobj = opg->uobject;
} else {
uobj = NULL;
}
KASSERT(amap != NULL);
KASSERT(uobjpage != NULL);
KASSERT(uobjpage == PGO_DONTCARE || (uobjpage->flags & PG_BUSY) != 0);
KASSERT(mutex_owned(amap->am_lock));
KASSERT(oanon == NULL || amap->am_lock == oanon->an_lock);
KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));
if (*spare != NULL) {
anon = *spare;
*spare = NULL;
} else {
anon = uvm_analloc();
}
if (anon) {
/*
* The new anon is locked.
*
* if opg == NULL, we want a zero'd, dirty page,
* so have uvm_pagealloc() do that for us.
*/
KASSERT(anon->an_lock == NULL);
anon->an_lock = amap->am_lock;
pg = uvm_pagealloc(NULL, ufi->orig_rvaddr, anon,
UVM_FLAG_COLORMATCH | (opg == NULL ? UVM_PGA_ZERO : 0));
if (pg == NULL) {
anon->an_lock = NULL;
}
} else {
pg = NULL;
}
/*
* out of memory resources?
*/
if (pg == NULL) {
/* save anon for the next try. */
if (anon != NULL) {
*spare = anon;
}
/* unlock and fail ... */
uvm_page_unbusy(&uobjpage, 1);
uvmfault_unlockall(ufi, amap, uobj);
if (!uvm_reclaimable()) {
UVMHIST_LOG(maphist, "out of VM", 0,0,0,0);
uvmexp.fltnoanon++;
error = ENOMEM;
goto done;
}
UVMHIST_LOG(maphist, "out of RAM, waiting for more", 0,0,0,0);
uvmexp.fltnoram++;
uvm_wait("flt_noram5");
error = ERESTART;
goto done;
}
/* copy page [pg now dirty] */
if (opg) {
uvm_pagecopy(opg, pg);
}
amap_add(&ufi->entry->aref, ufi->orig_rvaddr - ufi->entry->start, anon,
oanon != NULL);
*nanon = anon;
error = 0;
done:
return error;
}
/*
* F A U L T - m a i n e n t r y p o i n t
*/
/*
* uvm_fault: page fault handler
*
* => called from MD code to resolve a page fault
2001-05-25 08:06:11 +04:00
* => VM data structures usually should be unlocked. however, it is
* possible to call here with the main map locked if the caller
* gets a write lock, sets it recusive, and then calls us (c.f.
* uvm_map_pageable). this should be avoided because it keeps
* the map locked off during I/O.
* => MUST NEVER BE CALLED IN INTERRUPT CONTEXT
*/
#define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
~VM_PROT_WRITE : VM_PROT_ALL)
/* fault_flag values passed from uvm_fault_wire to uvm_fault_internal */
#define UVM_FAULT_WIRE (1 << 0)
#define UVM_FAULT_MAXPROT (1 << 1)
struct uvm_faultctx {
2011-11-28 18:06:59 +04:00
/*
* the following members are set up by uvm_fault_check() and
* read-only after that.
*
* note that narrow is used by uvm_fault_check() to change
* the behaviour after ERESTART.
*
* most of them might change after RESTART if the underlying
* map entry has been changed behind us. an exception is
* wire_paging, which does never change.
*/
vm_prot_t access_type;
vaddr_t startva;
int npages;
int centeridx;
2011-11-28 18:06:59 +04:00
bool narrow; /* work on a single requested page only */
bool wire_mapping; /* request a PMAP_WIRED mapping
(UVM_FAULT_WIRE or VM_MAPENT_ISWIRED) */
bool wire_paging; /* request uvm_pagewire
(true for UVM_FAULT_WIRE) */
bool cow_now; /* VM_PROT_WRITE is actually requested
(ie. should break COW and page loaning) */
/*
* enter_prot is set up by uvm_fault_check() and clamped
* (ie. drop the VM_PROT_WRITE bit) in various places in case
* of !cow_now.
*/
vm_prot_t enter_prot; /* prot at which we want to enter pages in */
/*
* the following member is for uvmfault_promote() and ERESTART.
*/
struct vm_anon *anon_spare;
2011-11-28 18:06:59 +04:00
/*
* the folloing is actually a uvm_fault_lower() internal.
* it's here merely for debugging.
* (or due to the mechanical separation of the function?)
*/
bool promote;
};
2010-02-05 05:27:15 +03:00
static inline int uvm_fault_check(
struct uvm_faultinfo *, struct uvm_faultctx *,
struct vm_anon ***, bool);
2010-02-05 05:27:15 +03:00
static int uvm_fault_upper(
struct uvm_faultinfo *, struct uvm_faultctx *,
struct vm_anon **);
static inline int uvm_fault_upper_lookup(
struct uvm_faultinfo *, const struct uvm_faultctx *,
2010-02-05 05:27:15 +03:00
struct vm_anon **, struct vm_page **);
static inline void uvm_fault_upper_neighbor(
struct uvm_faultinfo *, const struct uvm_faultctx *,
2010-02-05 05:27:15 +03:00
vaddr_t, struct vm_page *, bool);
static inline int uvm_fault_upper_loan(
struct uvm_faultinfo *, struct uvm_faultctx *,
struct vm_anon *, struct uvm_object **);
static inline int uvm_fault_upper_promote(
struct uvm_faultinfo *, struct uvm_faultctx *,
struct uvm_object *, struct vm_anon *);
static inline int uvm_fault_upper_direct(
struct uvm_faultinfo *, struct uvm_faultctx *,
struct uvm_object *, struct vm_anon *);
static int uvm_fault_upper_enter(
struct uvm_faultinfo *, const struct uvm_faultctx *,
2010-02-05 05:27:15 +03:00
struct uvm_object *, struct vm_anon *,
struct vm_page *, struct vm_anon *);
static inline void uvm_fault_upper_done(
struct uvm_faultinfo *, const struct uvm_faultctx *,
struct vm_anon *, struct vm_page *);
2010-02-05 05:27:15 +03:00
static int uvm_fault_lower(
struct uvm_faultinfo *, struct uvm_faultctx *,
struct vm_page **);
static inline void uvm_fault_lower_lookup(
struct uvm_faultinfo *, const struct uvm_faultctx *,
2010-02-05 05:27:15 +03:00
struct vm_page **);
static inline void uvm_fault_lower_neighbor(
struct uvm_faultinfo *, const struct uvm_faultctx *,
2010-02-05 05:27:15 +03:00
vaddr_t, struct vm_page *, bool);
static inline int uvm_fault_lower_io(
struct uvm_faultinfo *, const struct uvm_faultctx *,
2010-02-05 05:27:15 +03:00
struct uvm_object **, struct vm_page **);
static inline int uvm_fault_lower_direct(
struct uvm_faultinfo *, struct uvm_faultctx *,
struct uvm_object *, struct vm_page *);
static inline int uvm_fault_lower_direct_loan(
struct uvm_faultinfo *, struct uvm_faultctx *,
struct uvm_object *, struct vm_page **,
struct vm_page **);
static inline int uvm_fault_lower_promote(
struct uvm_faultinfo *, struct uvm_faultctx *,
struct uvm_object *, struct vm_page *);
static int uvm_fault_lower_enter(
struct uvm_faultinfo *, const struct uvm_faultctx *,
2010-02-05 05:27:15 +03:00
struct uvm_object *,
struct vm_anon *, struct vm_page *);
static inline void uvm_fault_lower_done(
struct uvm_faultinfo *, const struct uvm_faultctx *,
struct uvm_object *, struct vm_page *);
1998-03-09 03:58:55 +03:00
int
uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
vm_prot_t access_type, int fault_flag)
{
struct cpu_data *cd;
struct uvm_cpu *ucpu;
1998-03-09 03:58:55 +03:00
struct uvm_faultinfo ufi;
struct uvm_faultctx flt = {
.access_type = access_type,
2010-02-01 13:22:40 +03:00
/* don't look for neighborhood * pages on "wire" fault */
.narrow = (fault_flag & UVM_FAULT_WIRE) != 0,
/* "wire" fault causes wiring of both mapping and paging */
.wire_mapping = (fault_flag & UVM_FAULT_WIRE) != 0,
.wire_paging = (fault_flag & UVM_FAULT_WIRE) != 0,
};
const bool maxprot = (fault_flag & UVM_FAULT_MAXPROT) != 0;
struct vm_anon *anons_store[UVM_MAXRANGE], **anons;
struct vm_page *pages_store[UVM_MAXRANGE], **pages;
int error;
1998-03-09 03:58:55 +03:00
UVMHIST_FUNC("uvm_fault"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, "(map=%p, vaddr=%#lx, at=%d, ff=%d)",
orig_map, vaddr, access_type, fault_flag);
cd = &(curcpu()->ci_data);
cd->cpu_nfault++;
ucpu = cd->cpu_uvm;
/* Don't flood RNG subsystem with samples. */
if (cd->cpu_nfault % 503)
goto norng;
/* Don't count anything until user interaction is possible */
if (__predict_true(start_init_exec)) {
kpreempt_disable();
rnd_add_uint32(&ucpu->rs,
sizeof(vaddr_t) == sizeof(uint32_t) ?
(uint32_t)vaddr : sizeof(vaddr_t) ==
sizeof(uint64_t) ?
(uint32_t)(vaddr & 0x00000000ffffffff) :
(uint32_t)(cd->cpu_nfault & 0x00000000ffffffff));
kpreempt_enable();
}
norng:
1998-03-09 03:58:55 +03:00
/*
* init the IN parameters in the ufi
*/
1998-03-09 03:58:55 +03:00
ufi.orig_map = orig_map;
ufi.orig_rvaddr = trunc_page(vaddr);
ufi.orig_size = PAGE_SIZE; /* can't get any smaller than this */
error = ERESTART;
while (error == ERESTART) { /* ReFault: */
2010-02-01 11:23:13 +03:00
anons = anons_store;
pages = pages_store;
error = uvm_fault_check(&ufi, &flt, &anons, maxprot);
2010-02-01 11:23:13 +03:00
if (error != 0)
continue;
2010-02-01 11:23:13 +03:00
error = uvm_fault_upper_lookup(&ufi, &flt, anons, pages);
if (error != 0)
continue;
if (pages[flt.centeridx] == PGO_DONTCARE)
2010-02-01 19:08:27 +03:00
error = uvm_fault_upper(&ufi, &flt, anons);
else {
struct uvm_object * const uobj =
ufi.entry->object.uvm_obj;
if (uobj && uobj->pgops->pgo_fault != NULL) {
/*
* invoke "special" fault routine.
*/
mutex_enter(uobj->vmobjlock);
/* locked: maps(read), amap(if there), uobj */
error = uobj->pgops->pgo_fault(&ufi,
flt.startva, pages, flt.npages,
flt.centeridx, flt.access_type,
PGO_LOCKED|PGO_SYNCIO);
/*
* locked: nothing, pgo_fault has unlocked
* everything
*/
/*
* object fault routine responsible for
* pmap_update().
*/
} else {
error = uvm_fault_lower(&ufi, &flt, pages);
}
}
}
if (flt.anon_spare != NULL) {
flt.anon_spare->an_ref--;
KASSERT(flt.anon_spare->an_ref == 0);
KASSERT(flt.anon_spare->an_lock == NULL);
uvm_anon_free(flt.anon_spare);
}
return error;
}
/*
* uvm_fault_check: check prot, handle needs-copy, etc.
*
* 1. lookup entry.
* 2. check protection.
* 3. adjust fault condition (mainly for simulated fault).
* 4. handle needs-copy (lazy amap copy).
* 5. establish range of interest for neighbor fault (aka pre-fault).
* 6. look up anons (if amap exists).
* 7. flush pages (if MADV_SEQUENTIAL)
*
* => called with nothing locked.
* => if we fail (result != 0) we unlock everything.
* => initialize/adjust many members of flt.
*/
static int
uvm_fault_check(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
struct vm_anon ***ranons, bool maxprot)
{
struct vm_amap *amap;
struct uvm_object *uobj;
vm_prot_t check_prot;
int nback, nforw;
2010-02-08 02:25:07 +03:00
UVMHIST_FUNC("uvm_fault_check"); UVMHIST_CALLED(maphist);
1998-03-09 03:58:55 +03:00
/*
* lookup and lock the maps
*/
if (uvmfault_lookup(ufi, false) == false) {
UVMHIST_LOG(maphist, "<- no mapping @ 0x%x", ufi->orig_rvaddr,
0,0,0);
return EFAULT;
1998-03-09 03:58:55 +03:00
}
/* locked: maps(read) */
#ifdef DIAGNOSTIC
if ((ufi->map->flags & VM_MAP_PAGEABLE) == 0) {
printf("Page fault on non-pageable map:\n");
printf("ufi->map = %p\n", ufi->map);
printf("ufi->orig_map = %p\n", ufi->orig_map);
printf("ufi->orig_rvaddr = 0x%lx\n", (u_long) ufi->orig_rvaddr);
panic("uvm_fault: (ufi->map->flags & VM_MAP_PAGEABLE) == 0");
}
#endif
1998-03-09 03:58:55 +03:00
/*
* check protection
*/
check_prot = maxprot ?
ufi->entry->max_protection : ufi->entry->protection;
if ((check_prot & flt->access_type) != flt->access_type) {
1998-03-09 03:58:55 +03:00
UVMHIST_LOG(maphist,
"<- protection failure (prot=%#x, access=%#x)",
ufi->entry->protection, flt->access_type, 0, 0);
uvmfault_unlockmaps(ufi, false);
return EACCES;
}
1998-03-09 03:58:55 +03:00
/*
* "enter_prot" is the protection we want to enter the page in at.
* for certain pages (e.g. copy-on-write pages) this protection can
* be more strict than ufi->entry->protection. "wired" means either
1998-03-09 03:58:55 +03:00
* the entry is wired or we are fault-wiring the pg.
*/
flt->enter_prot = ufi->entry->protection;
2010-02-01 13:22:40 +03:00
if (VM_MAPENT_ISWIRED(ufi->entry))
flt->wire_mapping = true;
if (flt->wire_mapping) {
flt->access_type = flt->enter_prot; /* full access for wired */
flt->cow_now = (check_prot & VM_PROT_WRITE) != 0;
2002-01-02 01:18:39 +03:00
} else {
flt->cow_now = (flt->access_type & VM_PROT_WRITE) != 0;
2002-01-02 01:18:39 +03:00
}
1998-03-09 03:58:55 +03:00
flt->promote = false;
1998-03-09 03:58:55 +03:00
/*
* handle "needs_copy" case. if we need to copy the amap we will
* have to drop our readlock and relock it with a write lock. (we
* need a write lock to change anything in a map entry [e.g.
* needs_copy]).
*/
if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
if (flt->cow_now || (ufi->entry->object.uvm_obj == NULL)) {
KASSERT(!maxprot);
1998-03-09 03:58:55 +03:00
/* need to clear */
UVMHIST_LOG(maphist,
" need to clear needs_copy and refault",0,0,0,0);
uvmfault_unlockmaps(ufi, false);
uvmfault_amapcopy(ufi);
1998-03-09 03:58:55 +03:00
uvmexp.fltamcopy++;
return ERESTART;
1998-03-09 03:58:55 +03:00
} else {
/*
* ensure that we pmap_enter page R/O since
* needs_copy is still true
*/
flt->enter_prot &= ~VM_PROT_WRITE;
1998-03-09 03:58:55 +03:00
}
}
/*
* identify the players
*/
amap = ufi->entry->aref.ar_amap; /* upper layer */
uobj = ufi->entry->object.uvm_obj; /* lower layer */
1998-03-09 03:58:55 +03:00
/*
* check for a case 0 fault. if nothing backing the entry then
* error now.
*/
if (amap == NULL && uobj == NULL) {
uvmfault_unlockmaps(ufi, false);
1998-03-09 03:58:55 +03:00
UVMHIST_LOG(maphist,"<- no backing store, no overlay",0,0,0,0);
return EFAULT;
1998-03-09 03:58:55 +03:00
}
/*
* establish range of interest based on advice from mapper
* and then clip to fit map entry. note that we only want
2001-05-25 08:06:11 +04:00
* to do this the first time through the fault. if we
1998-03-09 03:58:55 +03:00
* ReFault we will disable this by setting "narrow" to true.
*/
if (flt->narrow == false) {
1998-03-09 03:58:55 +03:00
/* wide fault (!narrow) */
KASSERT(uvmadvice[ufi->entry->advice].advice ==
ufi->entry->advice);
nback = MIN(uvmadvice[ufi->entry->advice].nback,
(ufi->orig_rvaddr - ufi->entry->start) >> PAGE_SHIFT);
flt->startva = ufi->orig_rvaddr - (nback << PAGE_SHIFT);
1998-03-09 03:58:55 +03:00
/*
* note: "-1" because we don't want to count the
* faulting page as forw
*/
nforw = MIN(uvmadvice[ufi->entry->advice].nforw,
((ufi->entry->end - ufi->orig_rvaddr) >>
PAGE_SHIFT) - 1);
flt->npages = nback + nforw + 1;
flt->centeridx = nback;
1998-03-09 03:58:55 +03:00
flt->narrow = true; /* ensure only once per-fault */
1998-03-09 03:58:55 +03:00
} else {
2001-05-25 08:06:11 +04:00
1998-03-09 03:58:55 +03:00
/* narrow fault! */
nback = nforw = 0;
flt->startva = ufi->orig_rvaddr;
flt->npages = 1;
flt->centeridx = 0;
1998-03-09 03:58:55 +03:00
}
/* offset from entry's start to pgs' start */
const voff_t eoff = flt->startva - ufi->entry->start;
1998-03-09 03:58:55 +03:00
/* locked: maps(read) */
UVMHIST_LOG(maphist, " narrow=%d, back=%d, forw=%d, startva=%#lx",
flt->narrow, nback, nforw, flt->startva);
UVMHIST_LOG(maphist, " entry=%p, amap=%p, obj=%p", ufi->entry,
amap, uobj, 0);
1998-03-09 03:58:55 +03:00
/*
* if we've got an amap, lock it and extract current anons.
*/
if (amap) {
amap_lock(amap);
amap_lookups(&ufi->entry->aref, eoff, *ranons, flt->npages);
1998-03-09 03:58:55 +03:00
} else {
*ranons = NULL; /* to be safe */
1998-03-09 03:58:55 +03:00
}
/* locked: maps(read), amap(if there) */
KASSERT(amap == NULL || mutex_owned(amap->am_lock));
1998-03-09 03:58:55 +03:00
/*
* for MADV_SEQUENTIAL mappings we want to deactivate the back pages
* now and then forget about them (for the rest of the fault).
*/
if (ufi->entry->advice == MADV_SEQUENTIAL && nback != 0) {
1998-03-09 03:58:55 +03:00
UVMHIST_LOG(maphist, " MADV_SEQUENTIAL: flushing backpages",
0,0,0,0);
/* flush back-page anons? */
2001-05-25 08:06:11 +04:00
if (amap)
uvmfault_anonflush(*ranons, nback);
1998-03-09 03:58:55 +03:00
/* flush object? */
if (uobj) {
voff_t uoff;
uoff = ufi->entry->offset + eoff;
mutex_enter(uobj->vmobjlock);
(void) (uobj->pgops->pgo_put)(uobj, uoff, uoff +
(nback << PAGE_SHIFT), PGO_DEACTIVATE);
1998-03-09 03:58:55 +03:00
}
/* now forget about the backpages */
if (amap)
*ranons += nback;
flt->startva += (nback << PAGE_SHIFT);
flt->npages -= nback;
flt->centeridx = 0;
1998-03-09 03:58:55 +03:00
}
/*
* => startva is fixed
* => npages is fixed
*/
KASSERT(flt->startva <= ufi->orig_rvaddr);
KASSERT(ufi->orig_rvaddr + ufi->orig_size <=
flt->startva + (flt->npages << PAGE_SHIFT));
return 0;
}
/*
* uvm_fault_upper_lookup: look up existing h/w mapping and amap.
*
* iterate range of interest:
* 1. check if h/w mapping exists. if yes, we don't care
* 2. check if anon exists. if not, page is lower.
* 3. if anon exists, enter h/w mapping for neighbors.
*
* => called with amap locked (if exists).
*/
static int
uvm_fault_upper_lookup(
struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
struct vm_anon **anons, struct vm_page **pages)
{
struct vm_amap *amap = ufi->entry->aref.ar_amap;
int lcv;
vaddr_t currva;
2013-09-15 19:52:35 +04:00
bool shadowed __unused;
2010-02-08 02:25:07 +03:00
UVMHIST_FUNC("uvm_fault_upper_lookup"); UVMHIST_CALLED(maphist);
1998-03-09 03:58:55 +03:00
/* locked: maps(read), amap(if there) */
KASSERT(amap == NULL || mutex_owned(amap->am_lock));
1998-03-09 03:58:55 +03:00
/*
* map in the backpages and frontpages we found in the amap in hopes
* of preventing future faults. we also init the pages[] array as
* we go.
*/
currva = flt->startva;
shadowed = false;
2010-02-05 05:27:15 +03:00
for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
1998-03-09 03:58:55 +03:00
/*
* don't play with VAs that are already mapped
* (except for center)
1998-03-09 03:58:55 +03:00
*/
if (lcv != flt->centeridx &&
pmap_extract(ufi->orig_map->pmap, currva, NULL)) {
pages[lcv] = PGO_DONTCARE;
continue;
1998-03-09 03:58:55 +03:00
}
/*
* unmapped or center page. check if any anon at this level.
*/
if (amap == NULL || anons[lcv] == NULL) {
pages[lcv] = NULL;
continue;
}
/*
* check for present page and map if possible. re-activate it.
*/
pages[lcv] = PGO_DONTCARE;
if (lcv == flt->centeridx) { /* save center for later! */
shadowed = true;
continue;
}
struct vm_anon *anon = anons[lcv];
struct vm_page *pg = anon->an_page;
KASSERT(anon->an_lock == amap->am_lock);
/* Ignore loaned and busy pages. */
if (pg && pg->loan_count == 0 && (pg->flags & PG_BUSY) == 0) {
uvm_fault_upper_neighbor(ufi, flt, currva,
pg, anon->an_ref > 1);
1998-03-09 03:58:55 +03:00
}
}
/* locked: maps(read), amap(if there) */
KASSERT(amap == NULL || mutex_owned(amap->am_lock));
/* (shadowed == true) if there is an anon at the faulting address */
UVMHIST_LOG(maphist, " shadowed=%d, will_get=%d", shadowed,
2010-02-08 02:25:07 +03:00
(ufi->entry->object.uvm_obj && shadowed != false),0,0);
/*
* note that if we are really short of RAM we could sleep in the above
* call to pmap_enter with everything locked. bad?
*
* XXX Actually, that is bad; pmap_enter() should just fail in that
* XXX case. --thorpej
*/
return 0;
}
/*
* uvm_fault_upper_neighbor: enter single lower neighbor page.
*
* => called with amap and anon locked.
*/
static void
2010-02-05 05:27:15 +03:00
uvm_fault_upper_neighbor(
struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
vaddr_t currva, struct vm_page *pg, bool readonly)
{
2010-02-08 02:25:07 +03:00
UVMHIST_FUNC("uvm_fault_upper_neighbor"); UVMHIST_CALLED(maphist);
/* locked: amap, anon */
2010-02-02 08:58:16 +03:00
mutex_enter(&uvm_pageqlock);
uvm_pageenqueue(pg);
2010-02-02 08:58:16 +03:00
mutex_exit(&uvm_pageqlock);
UVMHIST_LOG(maphist,
" MAPPING: n anon: pm=%p, va=%#lx, pg=%p",
ufi->orig_map->pmap, currva, pg, 0);
2010-02-02 08:58:16 +03:00
uvmexp.fltnamap++;
2010-02-02 08:58:16 +03:00
/*
* Since this page isn't the page that's actually faulting,
* ignore pmap_enter() failures; it's not critical that we
* enter these right now.
2010-02-02 08:58:16 +03:00
*/
2010-02-02 08:58:16 +03:00
(void) pmap_enter(ufi->orig_map->pmap, currva,
VM_PAGE_TO_PHYS(pg),
readonly ? (flt->enter_prot & ~VM_PROT_WRITE) :
2010-02-02 08:58:16 +03:00
flt->enter_prot,
PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0));
2010-02-02 08:58:16 +03:00
pmap_update(ufi->orig_map->pmap);
}
/*
* uvm_fault_upper: handle upper fault.
*
* 1. acquire anon lock.
* 2. get anon. let uvmfault_anonget do the dirty work.
* 3. handle loan.
* 4. dispatch direct or promote handlers.
*/
static int
uvm_fault_upper(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
2010-02-01 19:08:27 +03:00
struct vm_anon **anons)
{
2010-02-01 19:08:27 +03:00
struct vm_amap * const amap = ufi->entry->aref.ar_amap;
struct vm_anon * const anon = anons[flt->centeridx];
struct uvm_object *uobj;
int error;
2010-02-08 02:25:07 +03:00
UVMHIST_FUNC("uvm_fault_upper"); UVMHIST_CALLED(maphist);
/* locked: maps(read), amap, anon */
KASSERT(mutex_owned(amap->am_lock));
KASSERT(anon->an_lock == amap->am_lock);
1998-03-09 03:58:55 +03:00
/*
* handle case 1: fault on an anon in our amap
*/
UVMHIST_LOG(maphist, " case 1 fault: anon=%p", anon, 0,0,0);
1998-03-09 03:58:55 +03:00
/*
* no matter if we have case 1A or case 1B we are going to need to
* have the anon's memory resident. ensure that now.
*/
/*
* let uvmfault_anonget do the dirty work.
* if it fails (!OK) it will unlock everything for us.
* if it succeeds, locks are still valid and locked.
1998-03-09 03:58:55 +03:00
* also, if it is OK, then the anon's page is on the queues.
* if the page is on loan from a uvm_object, then anonget will
* lock that object for us if it does not fail.
*/
error = uvmfault_anonget(ufi, amap, anon);
switch (error) {
case 0:
2001-05-25 08:06:11 +04:00
break;
1998-03-09 03:58:55 +03:00
case ERESTART:
return ERESTART;
1998-03-09 03:58:55 +03:00
case EAGAIN:
kpause("fltagain1", false, hz/2, NULL);
return ERESTART;
1998-03-09 03:58:55 +03:00
default:
return error;
}
1998-03-09 03:58:55 +03:00
/*
* uobj is non null if the page is on loan from an object (i.e. uobj)
*/
uobj = anon->an_page->uobject; /* locked by anonget if !NULL */
1998-03-09 03:58:55 +03:00
/* locked: maps(read), amap, anon, uobj(if one) */
KASSERT(mutex_owned(amap->am_lock));
KASSERT(anon->an_lock == amap->am_lock);
KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));
1998-03-09 03:58:55 +03:00
/*
2001-05-25 08:06:11 +04:00
* special handling for loaned pages
1998-03-09 03:58:55 +03:00
*/
if (anon->an_page->loan_count) {
2010-02-01 19:08:27 +03:00
error = uvm_fault_upper_loan(ufi, flt, anon, &uobj);
if (error != 0)
return error;
}
/*
* if we are case 1B then we will need to allocate a new blank
* anon to transfer the data into. note that we have a lock
* on anon, so no one can busy or release the page until we are done.
* also note that the ref count can't drop to zero here because
* it is > 1 and we are only dropping one ref.
*
* in the (hopefully very rare) case that we are out of RAM we
* will unlock, wait for more RAM, and refault.
*
* if we are out of anon VM we kill the process (XXX: could wait?).
*/
if (flt->cow_now && anon->an_ref > 1) {
flt->promote = true;
error = uvm_fault_upper_promote(ufi, flt, uobj, anon);
} else {
error = uvm_fault_upper_direct(ufi, flt, uobj, anon);
}
return error;
2010-02-01 19:08:27 +03:00
}
/*
* uvm_fault_upper_loan: handle loaned upper page.
*
* 1. if not cow'ing now, simply adjust flt->enter_prot.
* 2. if cow'ing now, and if ref count is 1, break loan.
*/
2010-02-01 19:08:27 +03:00
static int
uvm_fault_upper_loan(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
struct vm_anon *anon, struct uvm_object **ruobj)
{
2010-02-01 19:12:36 +03:00
struct vm_amap * const amap = ufi->entry->aref.ar_amap;
int error = 0;
UVMHIST_FUNC("uvm_fault_upper_loan"); UVMHIST_CALLED(maphist);
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
if (!flt->cow_now) {
2001-05-25 08:06:11 +04:00
2010-02-01 19:12:36 +03:00
/*
* for read faults on loaned pages we just cap the
* protection at read-only.
*/
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
} else {
/*
* note that we can't allow writes into a loaned page!
*
* if we have a write fault on a loaned page in an
* anon then we need to look at the anon's ref count.
* if it is greater than one then we are going to do
* a normal copy-on-write fault into a new anon (this
* is not a problem). however, if the reference count
* is one (a case where we would normally allow a
* write directly to the page) then we need to kill
* the loan before we continue.
*/
/* >1 case is already ok */
if (anon->an_ref == 1) {
error = uvm_loanbreak_anon(anon, *ruobj);
if (error != 0) {
uvmfault_unlockall(ufi, amap, *ruobj);
uvm_wait("flt_noram2");
return ERESTART;
}
/* if we were a loan reciever uobj is gone */
if (*ruobj)
*ruobj = NULL;
}
}
return error;
}
/*
* uvm_fault_upper_promote: promote upper page.
*
* 1. call uvmfault_promote.
* 2. enqueue page.
* 3. deref.
* 4. pass page to uvm_fault_upper_enter.
*/
2010-02-01 19:08:27 +03:00
static int
uvm_fault_upper_promote(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
struct uvm_object *uobj, struct vm_anon *anon)
{
2010-02-01 19:12:36 +03:00
struct vm_anon * const oanon = anon;
struct vm_page *pg;
int error;
2010-02-08 02:25:07 +03:00
UVMHIST_FUNC("uvm_fault_upper_promote"); UVMHIST_CALLED(maphist);
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
UVMHIST_LOG(maphist, " case 1B: COW fault",0,0,0,0);
uvmexp.flt_acow++;
1998-03-09 03:58:55 +03:00
error = uvmfault_promote(ufi, oanon, PGO_DONTCARE, &anon,
&flt->anon_spare);
2010-02-01 19:12:36 +03:00
switch (error) {
case 0:
break;
case ERESTART:
return ERESTART;
default:
return error;
}
KASSERT(anon == NULL || anon->an_lock == oanon->an_lock);
2010-02-01 19:12:36 +03:00
pg = anon->an_page;
mutex_enter(&uvm_pageqlock);
uvm_pageenqueue(pg); /* uvm_fault_upper_done will activate the page */
2010-02-01 19:12:36 +03:00
mutex_exit(&uvm_pageqlock);
pg->flags &= ~(PG_BUSY|PG_FAKE);
UVM_PAGE_OWN(pg, NULL);
/* deref: can not drop to zero here by defn! */
KASSERT(oanon->an_ref > 1);
2010-02-01 19:12:36 +03:00
oanon->an_ref--;
/*
* note: oanon is still locked, as is the new anon. we
* need to check for this later when we unlock oanon; if
* oanon != anon, we'll have to unlock anon, too.
*/
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon);
2010-02-01 19:08:27 +03:00
}
/*
* uvm_fault_upper_direct: handle direct fault.
*/
2010-02-01 19:08:27 +03:00
static int
uvm_fault_upper_direct(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
struct uvm_object *uobj, struct vm_anon *anon)
{
2010-02-01 19:12:36 +03:00
struct vm_anon * const oanon = anon;
struct vm_page *pg;
UVMHIST_FUNC("uvm_fault_upper_direct"); UVMHIST_CALLED(maphist);
2010-02-01 19:12:36 +03:00
uvmexp.flt_anon++;
pg = anon->an_page;
if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */
flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon);
2010-02-01 19:08:27 +03:00
}
/*
* uvm_fault_upper_enter: enter h/w mapping of upper page.
*/
2010-02-01 19:08:27 +03:00
static int
uvm_fault_upper_enter(
struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
2010-02-01 19:08:27 +03:00
struct uvm_object *uobj, struct vm_anon *anon, struct vm_page *pg,
struct vm_anon *oanon)
{
struct vm_amap * const amap = ufi->entry->aref.ar_amap;
2010-02-08 02:25:07 +03:00
UVMHIST_FUNC("uvm_fault_upper_enter"); UVMHIST_CALLED(maphist);
1998-03-09 03:58:55 +03:00
/* locked: maps(read), amap, oanon, anon(if different from oanon) */
KASSERT(mutex_owned(amap->am_lock));
KASSERT(anon->an_lock == amap->am_lock);
KASSERT(oanon->an_lock == amap->am_lock);
KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));
1998-03-09 03:58:55 +03:00
/*
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
* now map the page in.
1998-03-09 03:58:55 +03:00
*/
UVMHIST_LOG(maphist,
" MAPPING: anon: pm=%p, va=%#lx, pg=%p, promote=%d",
ufi->orig_map->pmap, ufi->orig_rvaddr, pg, flt->promote);
if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr,
VM_PAGE_TO_PHYS(pg),
flt->enter_prot, flt->access_type | PMAP_CANFAIL |
(flt->wire_mapping ? PMAP_WIRED : 0)) != 0) {
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
/*
* No need to undo what we did; we can simply think of
* this as the pmap throwing away the mapping information.
*
* We do, however, have to go through the ReFault path,
* as the map may change while we're asleep.
*/
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
uvmfault_unlockall(ufi, amap, uobj);
if (!uvm_reclaimable()) {
UVMHIST_LOG(maphist,
"<- failed. out of VM",0,0,0,0);
/* XXX instrumentation */
2010-02-01 19:08:27 +03:00
return ENOMEM;
}
/* XXX instrumentation */
uvm_wait("flt_pmfail1");
return ERESTART;
}
1998-03-09 03:58:55 +03:00
uvm_fault_upper_done(ufi, flt, anon, pg);
/*
* done case 1! finish up by unlocking everything and returning success
*/
pmap_update(ufi->orig_map->pmap);
uvmfault_unlockall(ufi, amap, uobj);
return 0;
2010-02-01 19:08:27 +03:00
}
/*
* uvm_fault_upper_done: queue upper center page.
*/
static void
2010-02-01 19:08:27 +03:00
uvm_fault_upper_done(
struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
struct vm_anon *anon, struct vm_page *pg)
2010-02-01 19:08:27 +03:00
{
const bool wire_paging = flt->wire_paging;
UVMHIST_FUNC("uvm_fault_upper_done"); UVMHIST_CALLED(maphist);
2010-02-01 19:08:27 +03:00
1998-03-09 03:58:55 +03:00
/*
* ... update the page queues.
1998-03-09 03:58:55 +03:00
*/
2008-01-02 14:48:20 +03:00
mutex_enter(&uvm_pageqlock);
if (wire_paging) {
uvm_pagewire(pg);
/*
* since the now-wired page cannot be paged out,
* release its swap resources for others to use.
* since an anon with no swap cannot be PG_CLEAN,
* clear its clean flag now.
*/
pg->flags &= ~(PG_CLEAN);
1998-03-09 03:58:55 +03:00
} else {
uvm_pageactivate(pg);
}
2008-01-02 14:48:20 +03:00
mutex_exit(&uvm_pageqlock);
if (wire_paging) {
uvm_anon_dropswap(anon);
}
}
/*
* uvm_fault_lower: handle lower fault.
*
* 1. check uobj
* 1.1. if null, ZFOD.
* 1.2. if not null, look up unnmapped neighbor pages.
* 2. for center page, check if promote.
* 2.1. ZFOD always needs promotion.
* 2.2. other uobjs, when entry is marked COW (usually MAP_PRIVATE vnode).
* 3. if uobj is not ZFOD and page is not found, do i/o.
* 4. dispatch either direct / promote fault.
*/
static int
uvm_fault_lower(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
struct vm_page **pages)
{
2010-02-01 19:08:27 +03:00
#ifdef DIAGNOSTIC
struct vm_amap *amap = ufi->entry->aref.ar_amap;
2010-02-01 19:08:27 +03:00
#endif
struct uvm_object *uobj = ufi->entry->object.uvm_obj;
struct vm_page *uobjpage;
int error;
UVMHIST_FUNC("uvm_fault_lower"); UVMHIST_CALLED(maphist);
1998-03-09 03:58:55 +03:00
/*
* now, if the desired page is not shadowed by the amap and we have
* a backing object that does not have a special fault routine, then
* we ask (with pgo_get) the object for resident pages that we care
* about and attempt to map them in. we do not let pgo_get block
* (PGO_LOCKED).
*/
if (uobj == NULL) {
/* zero fill; don't care neighbor pages */
uobjpage = NULL;
} else {
uvm_fault_lower_lookup(ufi, flt, pages);
uobjpage = pages[flt->centeridx];
}
/*
* note that at this point we are done with any front or back pages.
* we are now going to focus on the center page (i.e. the one we've
* faulted on). if we have faulted on the upper (anon) layer
* [i.e. case 1], then the anon we want is anons[centeridx] (we have
* not touched it yet). if we have faulted on the bottom (uobj)
* layer [i.e. case 2] and the page was both present and available,
* then we've got a pointer to it as "uobjpage" and we've already
* made it BUSY.
1998-03-09 03:58:55 +03:00
*/
1998-03-09 03:58:55 +03:00
/*
* locked:
* maps(read), amap(if there), uobj(if !null), uobjpage(if !null)
*/
KASSERT(amap == NULL || mutex_owned(amap->am_lock));
KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));
KASSERT(uobjpage == NULL || (uobjpage->flags & PG_BUSY) != 0);
1998-03-09 03:58:55 +03:00
/*
* note that uobjpage can not be PGO_DONTCARE at this point. we now
* set uobjpage to PGO_DONTCARE if we are doing a zero fill. if we
* have a backing object, check and see if we are going to promote
* the data up to an anon during the fault.
*/
1998-03-09 03:58:55 +03:00
if (uobj == NULL) {
2001-05-25 08:06:11 +04:00
uobjpage = PGO_DONTCARE;
flt->promote = true; /* always need anon here */
1998-03-09 03:58:55 +03:00
} else {
KASSERT(uobjpage != PGO_DONTCARE);
flt->promote = flt->cow_now && UVM_ET_ISCOPYONWRITE(ufi->entry);
1998-03-09 03:58:55 +03:00
}
UVMHIST_LOG(maphist, " case 2 fault: promote=%d, zfill=%d",
flt->promote, (uobj == NULL), 0,0);
1998-03-09 03:58:55 +03:00
/*
* if uobjpage is not null then we do not need to do I/O to get the
* uobjpage.
*
2001-05-25 08:06:11 +04:00
* if uobjpage is null, then we need to unlock and ask the pager to
1998-03-09 03:58:55 +03:00
* get the data for us. once we have the data, we need to reverify
* the state the world. we are currently not holding any resources.
*/
if (uobjpage) {
/* update rusage counters */
curlwp->l_ru.ru_minflt++;
} else {
2010-02-05 05:27:15 +03:00
error = uvm_fault_lower_io(ufi, flt, &uobj, &uobjpage);
2010-02-01 19:08:27 +03:00
if (error != 0)
return error;
}
/*
* locked:
* maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj)
*/
KASSERT(amap == NULL || mutex_owned(amap->am_lock));
KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));
KASSERT(uobj == NULL || (uobjpage->flags & PG_BUSY) != 0);
/*
* notes:
* - at this point uobjpage can not be NULL
* - at this point uobjpage can not be PG_RELEASED (since we checked
* for it above)
* - at this point uobjpage could be PG_WANTED (handle later)
*/
KASSERT(uobjpage != NULL);
KASSERT(uobj == NULL || uobj == uobjpage->uobject);
KASSERT(uobj == NULL || !UVM_OBJ_IS_CLEAN(uobjpage->uobject) ||
(uobjpage->flags & PG_CLEAN) != 0);
if (!flt->promote) {
2010-02-05 05:27:15 +03:00
error = uvm_fault_lower_direct(ufi, flt, uobj, uobjpage);
} else {
2010-02-05 05:27:15 +03:00
error = uvm_fault_lower_promote(ufi, flt, uobj, uobjpage);
}
return error;
2010-02-01 19:08:27 +03:00
}
/*
* uvm_fault_lower_lookup: look up on-memory uobj pages.
*
* 1. get on-memory pages.
* 2. if failed, give up (get only center page later).
* 3. if succeeded, enter h/w mapping of neighbor pages.
*/
static void
uvm_fault_lower_lookup(
struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
struct vm_page **pages)
{
struct uvm_object *uobj = ufi->entry->object.uvm_obj;
int lcv, gotpages;
vaddr_t currva;
UVMHIST_FUNC("uvm_fault_lower_lookup"); UVMHIST_CALLED(maphist);
mutex_enter(uobj->vmobjlock);
/* Locked: maps(read), amap(if there), uobj */
uvmexp.fltlget++;
gotpages = flt->npages;
(void) uobj->pgops->pgo_get(uobj,
ufi->entry->offset + flt->startva - ufi->entry->start,
pages, &gotpages, flt->centeridx,
flt->access_type & MASK(ufi->entry), ufi->entry->advice, PGO_LOCKED);
KASSERT(mutex_owned(uobj->vmobjlock));
/*
* check for pages to map, if we got any
*/
if (gotpages == 0) {
pages[flt->centeridx] = NULL;
return;
}
currva = flt->startva;
for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
struct vm_page *curpg;
curpg = pages[lcv];
if (curpg == NULL || curpg == PGO_DONTCARE) {
continue;
}
KASSERT(curpg->uobject == uobj);
/*
* if center page is resident and not PG_BUSY|PG_RELEASED
* then pgo_get made it PG_BUSY for us and gave us a handle
* to it.
*/
if (lcv == flt->centeridx) {
UVMHIST_LOG(maphist, " got uobjpage "
"(0x%x) with locked get",
curpg, 0,0,0);
} else {
bool readonly = (curpg->flags & PG_RDONLY)
|| (curpg->loan_count > 0)
|| UVM_OBJ_NEEDS_WRITEFAULT(curpg->uobject);
uvm_fault_lower_neighbor(ufi, flt,
currva, curpg, readonly);
}
}
pmap_update(ufi->orig_map->pmap);
}
/*
* uvm_fault_lower_neighbor: enter h/w mapping of lower neighbor page.
*/
static void
uvm_fault_lower_neighbor(
struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
vaddr_t currva, struct vm_page *pg, bool readonly)
{
UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
/* locked: maps(read), amap(if there), uobj */
/*
* calling pgo_get with PGO_LOCKED returns us pages which
* are neither busy nor released, so we don't need to check
* for this. we can just directly enter the pages.
*/
mutex_enter(&uvm_pageqlock);
uvm_pageenqueue(pg);
mutex_exit(&uvm_pageqlock);
UVMHIST_LOG(maphist,
" MAPPING: n obj: pm=%p, va=%#lx, pg=%p",
ufi->orig_map->pmap, currva, pg, 0);
uvmexp.fltnomap++;
/*
* Since this page isn't the page that's actually faulting,
* ignore pmap_enter() failures; it's not critical that we
* enter these right now.
* NOTE: page can't be PG_WANTED or PG_RELEASED because we've
* held the lock the whole time we've had the handle.
*/
KASSERT((pg->flags & PG_PAGEOUT) == 0);
KASSERT((pg->flags & PG_RELEASED) == 0);
KASSERT((pg->flags & PG_WANTED) == 0);
KASSERT(!UVM_OBJ_IS_CLEAN(pg->uobject) || (pg->flags & PG_CLEAN) != 0);
pg->flags &= ~(PG_BUSY);
UVM_PAGE_OWN(pg, NULL);
KASSERT(mutex_owned(pg->uobject->vmobjlock));
(void) pmap_enter(ufi->orig_map->pmap, currva,
VM_PAGE_TO_PHYS(pg),
readonly ? (flt->enter_prot & ~VM_PROT_WRITE) :
flt->enter_prot & MASK(ufi->entry),
PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0));
}
/*
* uvm_fault_lower_io: get lower page from backing store.
*
* 1. unlock everything, because i/o will block.
* 2. call pgo_get.
* 3. if failed, recover.
* 4. if succeeded, relock everything and verify things.
*/
2010-02-01 19:08:27 +03:00
static int
2010-02-05 05:27:15 +03:00
uvm_fault_lower_io(
struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
struct uvm_object **ruobj, struct vm_page **ruobjpage)
2010-02-01 19:08:27 +03:00
{
2010-02-01 19:12:36 +03:00
struct vm_amap * const amap = ufi->entry->aref.ar_amap;
struct uvm_object *uobj = *ruobj;
struct vm_page *pg;
2010-02-01 19:12:36 +03:00
bool locked;
int gotpages;
int error;
voff_t uoff;
2010-02-08 02:25:07 +03:00
UVMHIST_FUNC("uvm_fault_lower_io"); UVMHIST_CALLED(maphist);
2010-02-01 19:12:36 +03:00
/* update rusage counters */
curlwp->l_ru.ru_majflt++;
2001-05-25 08:06:11 +04:00
/* Locked: maps(read), amap(if there), uobj */
uvmfault_unlockall(ufi, amap, NULL);
/* Locked: uobj */
KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
uvmexp.fltget++;
gotpages = 1;
pg = NULL;
2010-02-01 19:12:36 +03:00
uoff = (ufi->orig_rvaddr - ufi->entry->start) + ufi->entry->offset;
error = uobj->pgops->pgo_get(uobj, uoff, &pg, &gotpages,
2010-02-01 19:12:36 +03:00
0, flt->access_type & MASK(ufi->entry), ufi->entry->advice,
PGO_SYNCIO);
/* locked: pg(if no error) */
2010-02-01 19:12:36 +03:00
/*
* recover from I/O
*/
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
if (error) {
if (error == EAGAIN) {
UVMHIST_LOG(maphist,
" pgo_get says TRY AGAIN!",0,0,0,0);
kpause("fltagain2", false, hz/2, NULL);
return ERESTART;
}
1998-03-09 03:58:55 +03:00
#if 0
2010-02-01 19:12:36 +03:00
KASSERT(error != ERESTART);
#else
2010-02-01 19:12:36 +03:00
/* XXXUEBS don't re-fault? */
if (error == ERESTART)
error = EIO;
#endif
2010-02-01 19:12:36 +03:00
UVMHIST_LOG(maphist, "<- pgo_get failed (code %d)",
error, 0,0,0);
return error;
}
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
/*
* re-verify the state of the world by first trying to relock
* the maps. always relock the object.
*/
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
locked = uvmfault_relock(ufi);
if (locked && amap)
amap_lock(amap);
/* might be changed */
uobj = pg->uobject;
mutex_enter(uobj->vmobjlock);
KASSERT((pg->flags & PG_BUSY) != 0);
mutex_enter(&uvm_pageqlock);
uvm_pageactivate(pg);
mutex_exit(&uvm_pageqlock);
2001-05-25 08:06:11 +04:00
/* locked(locked): maps(read), amap(if !null), uobj, pg */
/* locked(!locked): uobj, pg */
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
/*
* verify that the page has not be released and re-verify
* that amap slot is still free. if there is a problem,
* we unlock and clean up.
*/
1998-03-09 03:58:55 +03:00
if ((pg->flags & PG_RELEASED) != 0 ||
(locked && amap && amap_lookup(&ufi->entry->aref,
2010-02-01 19:12:36 +03:00
ufi->orig_rvaddr - ufi->entry->start))) {
if (locked)
uvmfault_unlockall(ufi, amap, NULL);
2010-02-01 19:12:36 +03:00
locked = false;
}
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
/*
* didn't get the lock? release the page and retry.
*/
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
if (locked == false) {
UVMHIST_LOG(maphist,
" wasn't able to relock after fault: retry",
0,0,0,0);
if (pg->flags & PG_WANTED) {
wakeup(pg);
}
if ((pg->flags & PG_RELEASED) == 0) {
pg->flags &= ~(PG_BUSY | PG_WANTED);
UVM_PAGE_OWN(pg, NULL);
} else {
2010-02-01 19:12:36 +03:00
uvmexp.fltpgrele++;
uvm_pagefree(pg);
1998-03-09 03:58:55 +03:00
}
mutex_exit(uobj->vmobjlock);
2010-02-01 19:12:36 +03:00
return ERESTART;
}
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
/*
* we have the data in pg which is busy and
2010-02-01 19:12:36 +03:00
* not released. we are holding object lock (so the page
* can't be released on us).
*/
1998-03-09 03:58:55 +03:00
/* locked: maps(read), amap(if !null), uobj, pg */
2010-02-01 19:08:27 +03:00
*ruobj = uobj;
*ruobjpage = pg;
2010-02-01 19:08:27 +03:00
return 0;
}
/*
* uvm_fault_lower_direct: fault lower center page
*
* 1. adjust flt->enter_prot.
* 2. if page is loaned, resolve.
*/
2010-02-01 19:08:27 +03:00
int
2010-02-05 05:27:15 +03:00
uvm_fault_lower_direct(
2010-02-01 19:08:27 +03:00
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
struct uvm_object *uobj, struct vm_page *uobjpage)
2010-02-01 19:08:27 +03:00
{
2010-02-01 19:12:36 +03:00
struct vm_page *pg;
UVMHIST_FUNC("uvm_fault_lower_direct"); UVMHIST_CALLED(maphist);
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
/*
* we are not promoting. if the mapping is COW ensure that we
* don't give more access than we should (e.g. when doing a read
* fault on a COPYONWRITE mapping we want to map the COW page in
* R/O even though the entry protection could be R/W).
*
* set "pg" to the page we want to map in (uobjpage, usually)
*/
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
uvmexp.flt_obj++;
if (UVM_ET_ISCOPYONWRITE(ufi->entry) ||
UVM_OBJ_NEEDS_WRITEFAULT(uobjpage->uobject))
flt->enter_prot &= ~VM_PROT_WRITE;
pg = uobjpage; /* map in the actual object */
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
KASSERT(uobjpage != PGO_DONTCARE);
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
/*
* we are faulting directly on the page. be careful
* about writing to loaned pages...
*/
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
if (uobjpage->loan_count) {
2010-02-05 05:27:15 +03:00
uvm_fault_lower_direct_loan(ufi, flt, uobj, &pg, &uobjpage);
}
KASSERT(pg == uobjpage);
KASSERT(uobj == NULL || (uobjpage->flags & PG_BUSY) != 0);
return uvm_fault_lower_enter(ufi, flt, uobj, NULL, pg);
}
/*
* uvm_fault_lower_direct_loan: resolve loaned page.
*
* 1. if not cow'ing, adjust flt->enter_prot.
* 2. if cow'ing, break loan.
*/
static int
2010-02-05 05:27:15 +03:00
uvm_fault_lower_direct_loan(
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
struct uvm_object *uobj, struct vm_page **rpg,
struct vm_page **ruobjpage)
{
2010-02-02 08:58:16 +03:00
struct vm_amap * const amap = ufi->entry->aref.ar_amap;
struct vm_page *pg;
struct vm_page *uobjpage = *ruobjpage;
2010-02-08 02:25:07 +03:00
UVMHIST_FUNC("uvm_fault_lower_direct_loan"); UVMHIST_CALLED(maphist);
1998-03-09 03:58:55 +03:00
2010-02-02 08:58:16 +03:00
if (!flt->cow_now) {
/* read fault: cap the protection at readonly */
/* cap! */
flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
} else {
/* write fault: must break the loan here */
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
2010-02-02 08:58:16 +03:00
pg = uvm_loanbreak(uobjpage);
if (pg == NULL) {
/*
* drop ownership of page, it can't be released
*/
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
2010-02-02 08:58:16 +03:00
if (uobjpage->flags & PG_WANTED)
wakeup(uobjpage);
uobjpage->flags &= ~(PG_BUSY|PG_WANTED);
UVM_PAGE_OWN(uobjpage, NULL);
1998-03-09 03:58:55 +03:00
uvmfault_unlockall(ufi, amap, uobj);
2010-02-02 08:58:16 +03:00
UVMHIST_LOG(maphist,
" out of RAM breaking loan, waiting",
0,0,0,0);
uvmexp.fltnoram++;
uvm_wait("flt_noram4");
return ERESTART;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
}
2010-02-02 08:58:16 +03:00
*rpg = pg;
*ruobjpage = pg;
}
return 0;
2010-02-01 19:08:27 +03:00
}
/*
* uvm_fault_lower_promote: promote lower page.
*
* 1. call uvmfault_promote.
* 2. fill in data.
* 3. if not ZFOD, dispose old page.
*/
2010-02-01 19:08:27 +03:00
int
2010-02-05 05:27:15 +03:00
uvm_fault_lower_promote(
2010-02-01 19:08:27 +03:00
struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
struct uvm_object *uobj, struct vm_page *uobjpage)
2010-02-01 19:08:27 +03:00
{
2010-02-01 19:12:36 +03:00
struct vm_amap * const amap = ufi->entry->aref.ar_amap;
struct vm_anon *anon;
struct vm_page *pg;
int error;
2010-02-08 02:25:07 +03:00
UVMHIST_FUNC("uvm_fault_lower_promote"); UVMHIST_CALLED(maphist);
2001-05-25 08:06:11 +04:00
KASSERT(amap != NULL);
2010-02-01 19:12:36 +03:00
/*
* If we are going to promote the data to an anon we
2010-02-01 19:12:36 +03:00
* allocate a blank anon here and plug it into our amap.
*/
error = uvmfault_promote(ufi, NULL, uobjpage,
&anon, &flt->anon_spare);
switch (error) {
case 0:
break;
case ERESTART:
return ERESTART;
default:
return error;
}
2010-02-01 19:12:36 +03:00
pg = anon->an_page;
/*
* Fill in the data.
2010-02-01 19:12:36 +03:00
*/
KASSERT(uobj == NULL || (uobjpage->flags & PG_BUSY) != 0);
2010-02-01 19:12:36 +03:00
if (uobjpage != PGO_DONTCARE) {
uvmexp.flt_prcopy++;
1998-03-09 03:58:55 +03:00
/*
2010-02-01 19:12:36 +03:00
* promote to shared amap? make sure all sharing
* procs see it
1998-03-09 03:58:55 +03:00
*/
2010-02-01 19:12:36 +03:00
if ((amap_flags(amap) & AMAP_SHARED) != 0) {
pmap_page_protect(uobjpage, VM_PROT_NONE);
1998-03-09 03:58:55 +03:00
/*
2010-02-01 19:12:36 +03:00
* XXX: PAGE MIGHT BE WIRED!
1998-03-09 03:58:55 +03:00
*/
2010-02-01 19:12:36 +03:00
}
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
2010-02-01 19:12:36 +03:00
/*
* dispose of uobjpage. it can't be PG_RELEASED
* since we still hold the object lock.
*/
1998-03-09 03:58:55 +03:00
if (uobjpage->flags & PG_WANTED) {
2010-02-01 19:12:36 +03:00
/* still have the obj lock */
wakeup(uobjpage);
}
2010-02-01 19:12:36 +03:00
uobjpage->flags &= ~(PG_BUSY|PG_WANTED);
UVM_PAGE_OWN(uobjpage, NULL);
2010-02-01 19:12:36 +03:00
UVMHIST_LOG(maphist,
" promote uobjpage 0x%x to anon/page 0x%x/0x%x",
uobjpage, anon, pg, 0);
1998-03-09 03:58:55 +03:00
2010-02-01 19:12:36 +03:00
} else {
uvmexp.flt_przero++;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
2010-02-01 19:12:36 +03:00
/*
* Page is zero'd and marked dirty by
* uvmfault_promote().
*/
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
2010-02-01 19:12:36 +03:00
UVMHIST_LOG(maphist," zero fill anon/page 0x%x/0%x",
anon, pg, 0, 0);
}
2010-02-01 19:08:27 +03:00
return uvm_fault_lower_enter(ufi, flt, uobj, anon, pg);
2010-02-01 19:08:27 +03:00
}
/*
* uvm_fault_lower_enter: enter h/w mapping of lower page or anon page promoted
* from the lower page.
*/
2010-02-01 19:08:27 +03:00
int
2010-02-05 05:27:15 +03:00
uvm_fault_lower_enter(
struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
2010-02-01 19:08:27 +03:00
struct uvm_object *uobj,
struct vm_anon *anon, struct vm_page *pg)
2010-02-01 19:08:27 +03:00
{
struct vm_amap * const amap = ufi->entry->aref.ar_amap;
int error;
2010-02-08 02:25:07 +03:00
UVMHIST_FUNC("uvm_fault_lower_enter"); UVMHIST_CALLED(maphist);
1998-03-09 03:58:55 +03:00
/*
* Locked:
1998-03-09 03:58:55 +03:00
*
* maps(read), amap(if !null), uobj(if !null),
* anon(if !null), pg(if anon), unlock_uobj(if !null)
*
* Note: pg is either the uobjpage or the new page in the new anon.
1998-03-09 03:58:55 +03:00
*/
KASSERT(amap == NULL || mutex_owned(amap->am_lock));
KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));
KASSERT(anon == NULL || anon->an_lock == amap->am_lock);
KASSERT((pg->flags & PG_BUSY) != 0);
1998-03-09 03:58:55 +03:00
/*
* all resources are present. we can now map it in and free our
* resources.
*/
UVMHIST_LOG(maphist,
" MAPPING: case2: pm=%p, va=%#lx, pg=%#x, promote=%d",
ufi->orig_map->pmap, ufi->orig_rvaddr, pg, flt->promote);
KASSERT((flt->access_type & VM_PROT_WRITE) == 0 ||
(pg->flags & PG_RDONLY) == 0);
if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr,
VM_PAGE_TO_PHYS(pg),
(pg->flags & PG_RDONLY) != 0 ?
flt->enter_prot & ~VM_PROT_WRITE : flt->enter_prot,
flt->access_type | PMAP_CANFAIL |
(flt->wire_mapping ? PMAP_WIRED : 0)) != 0) {
/*
* No need to undo what we did; we can simply think of
* this as the pmap throwing away the mapping information.
*
* We do, however, have to go through the ReFault path,
* as the map may change while we're asleep.
*/
/*
* ensure that the page is queued in the case that
* we just promoted the page.
*/
mutex_enter(&uvm_pageqlock);
uvm_pageenqueue(pg);
mutex_exit(&uvm_pageqlock);
if (pg->flags & PG_WANTED)
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
wakeup(pg);
2001-05-25 08:06:11 +04:00
/*
* note that pg can't be PG_RELEASED since we did not drop
* the object lock since the last time we checked.
*/
2006-04-11 13:28:14 +04:00
KASSERT((pg->flags & PG_RELEASED) == 0);
2001-05-25 08:06:11 +04:00
pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
UVM_PAGE_OWN(pg, NULL);
2010-02-24 08:26:28 +03:00
uvmfault_unlockall(ufi, amap, uobj);
if (!uvm_reclaimable()) {
UVMHIST_LOG(maphist,
"<- failed. out of VM",0,0,0,0);
/* XXX instrumentation */
error = ENOMEM;
return error;
}
/* XXX instrumentation */
uvm_wait("flt_pmfail2");
return ERESTART;
}
uvm_fault_lower_done(ufi, flt, uobj, pg);
/*
* note that pg can't be PG_RELEASED since we did not drop the object
* lock since the last time we checked.
*/
KASSERT((pg->flags & PG_RELEASED) == 0);
if (pg->flags & PG_WANTED)
wakeup(pg);
pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
UVM_PAGE_OWN(pg, NULL);
pmap_update(ufi->orig_map->pmap);
uvmfault_unlockall(ufi, amap, uobj);
UVMHIST_LOG(maphist, "<- done (SUCCESS!)",0,0,0,0);
return 0;
2010-02-01 19:08:27 +03:00
}
/*
* uvm_fault_lower_done: queue lower center page.
*/
void
2010-02-05 05:27:15 +03:00
uvm_fault_lower_done(
struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
struct uvm_object *uobj, struct vm_page *pg)
2010-02-01 19:08:27 +03:00
{
bool dropswap = false;
2010-02-08 02:25:07 +03:00
UVMHIST_FUNC("uvm_fault_lower_done"); UVMHIST_CALLED(maphist);
2010-02-01 19:08:27 +03:00
2008-01-02 14:48:20 +03:00
mutex_enter(&uvm_pageqlock);
2010-02-01 13:22:40 +03:00
if (flt->wire_paging) {
uvm_pagewire(pg);
if (pg->pqflags & PQ_AOBJ) {
/*
* since the now-wired page cannot be paged out,
* release its swap resources for others to use.
* since an aobj page with no swap cannot be PG_CLEAN,
* clear its clean flag now.
*/
2006-10-03 22:26:03 +04:00
KASSERT(uobj != NULL);
pg->flags &= ~(PG_CLEAN);
dropswap = true;
}
1998-03-09 03:58:55 +03:00
} else {
uvm_pageactivate(pg);
}
2008-01-02 14:48:20 +03:00
mutex_exit(&uvm_pageqlock);
2010-02-24 08:26:28 +03:00
if (dropswap) {
uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
}
}
/*
* uvm_fault_wire: wire down a range of virtual addresses in a map.
*
* => map may be read-locked by caller, but MUST NOT be write-locked.
* => if map is read-locked, any operations which may cause map to
* be write-locked in uvm_fault() must be taken care of by
* the caller. See uvm_map_pageable().
*/
1998-03-09 03:58:55 +03:00
int
2005-06-27 06:19:48 +04:00
uvm_fault_wire(struct vm_map *map, vaddr_t start, vaddr_t end,
vm_prot_t access_type, int maxprot)
1998-03-09 03:58:55 +03:00
{
vaddr_t va;
int error;
1998-03-09 03:58:55 +03:00
/*
* now fault it in a page at a time. if the fault fails then we have
2001-05-25 08:06:11 +04:00
* to undo what we have done. note that in uvm_fault VM_PROT_NONE
* is replaced with the max protection if fault_type is VM_FAULT_WIRE.
1998-03-09 03:58:55 +03:00
*/
/*
* XXX work around overflowing a vaddr_t. this prevents us from
* wiring the last page in the address space, though.
*/
if (start > end) {
return EFAULT;
}
2010-02-05 05:27:15 +03:00
for (va = start; va < end; va += PAGE_SIZE) {
error = uvm_fault_internal(map, va, access_type,
(maxprot ? UVM_FAULT_MAXPROT : 0) | UVM_FAULT_WIRE);
if (error) {
1998-03-09 03:58:55 +03:00
if (va != start) {
uvm_fault_unwire(map, start, va);
1998-03-09 03:58:55 +03:00
}
return error;
1998-03-09 03:58:55 +03:00
}
}
return 0;
}
/*
* uvm_fault_unwire(): unwire range of virtual space.
*/
1998-03-09 03:58:55 +03:00
void
2005-06-27 06:19:48 +04:00
uvm_fault_unwire(struct vm_map *map, vaddr_t start, vaddr_t end)
{
vm_map_lock_read(map);
uvm_fault_unwire_locked(map, start, end);
vm_map_unlock_read(map);
}
/*
* uvm_fault_unwire_locked(): the guts of uvm_fault_unwire().
*
* => map must be at least read-locked.
*/
void
2005-06-27 06:19:48 +04:00
uvm_fault_unwire_locked(struct vm_map *map, vaddr_t start, vaddr_t end)
1998-03-09 03:58:55 +03:00
{
struct vm_map_entry *entry, *oentry;
pmap_t pmap = vm_map_pmap(map);
vaddr_t va;
paddr_t pa;
struct vm_page *pg;
1998-03-09 03:58:55 +03:00
/*
* we assume that the area we are unwiring has actually been wired
* in the first place. this means that we should be able to extract
* the PAs from the pmap. we also lock out the page daemon so that
* we can call uvm_pageunwire.
*/
/*
* find the beginning map entry for the region.
*/
KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
2007-02-22 09:05:00 +03:00
if (uvm_map_lookup_entry(map, start, &entry) == false)
panic("uvm_fault_unwire_locked: address not in map");
oentry = NULL;
a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
for (va = start; va < end; va += PAGE_SIZE) {
2007-02-22 09:05:00 +03:00
if (pmap_extract(pmap, va, &pa) == false)
continue;
/*
* find the map entry for the current address.
*/
KASSERT(va >= entry->start);
while (va >= entry->end) {
KASSERT(entry->next != &map->header &&
entry->next->start <= entry->end);
entry = entry->next;
}
/*
* lock it.
*/
if (entry != oentry) {
if (oentry != NULL) {
mutex_exit(&uvm_pageqlock);
uvm_map_unlock_entry(oentry);
}
uvm_map_lock_entry(entry);
mutex_enter(&uvm_pageqlock);
oentry = entry;
}
/*
* if the entry is no longer wired, tell the pmap.
*/
if (VM_MAPENT_ISWIRED(entry) == 0)
pmap_unwire(pmap, va);
pg = PHYS_TO_VM_PAGE(pa);
if (pg)
uvm_pageunwire(pg);
1998-03-09 03:58:55 +03:00
}
if (oentry != NULL) {
mutex_exit(&uvm_pageqlock);
uvm_map_unlock_entry(entry);
}
}