2006-01-21 16:13:07 +03:00
|
|
|
/* $NetBSD: uvm_fault.c,v 1.104 2006/01/21 13:13:07 yamt Exp $ */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by Charles D. Cranor and
|
|
|
|
* Washington University.
|
|
|
|
* 4. The name of the author may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1998-02-07 14:07:38 +03:00
|
|
|
*
|
|
|
|
* from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_fault.c: fault handler
|
|
|
|
*/
|
|
|
|
|
2001-11-10 10:36:59 +03:00
|
|
|
#include <sys/cdefs.h>
|
2006-01-21 16:13:07 +03:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.104 2006/01/21 13:13:07 yamt Exp $");
|
2001-11-10 10:36:59 +03:00
|
|
|
|
|
|
|
#include "opt_uvmhist.h"
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mman.h>
|
1998-03-27 00:50:14 +03:00
|
|
|
#include <sys/user.h>
|
2005-07-17 16:27:47 +04:00
|
|
|
#include <sys/vnode.h>
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
#include <uvm/uvm.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* a word on page faults:
|
|
|
|
*
|
|
|
|
* types of page faults we handle:
|
|
|
|
*
|
|
|
|
* CASE 1: upper layer faults CASE 2: lower layer faults
|
|
|
|
*
|
|
|
|
* CASE 1A CASE 1B CASE 2A CASE 2B
|
|
|
|
* read/write1 write>1 read/write +-cow_write/zero
|
2001-05-25 08:06:11 +04:00
|
|
|
* | | | |
|
1998-02-05 09:25:08 +03:00
|
|
|
* +--|--+ +--|--+ +-----+ + | + | +-----+
|
|
|
|
* amap | V | | ----------->new| | | | ^ |
|
|
|
|
* +-----+ +-----+ +-----+ + | + | +--|--+
|
|
|
|
* | | |
|
|
|
|
* +-----+ +-----+ +--|--+ | +--|--+
|
|
|
|
* uobj | d/c | | d/c | | V | +----| |
|
|
|
|
* +-----+ +-----+ +-----+ +-----+
|
|
|
|
*
|
|
|
|
* d/c = don't care
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-02-05 09:25:08 +03:00
|
|
|
* case [0]: layerless fault
|
|
|
|
* no amap or uobj is present. this is an error.
|
|
|
|
*
|
|
|
|
* case [1]: upper layer fault [anon active]
|
|
|
|
* 1A: [read] or [write with anon->an_ref == 1]
|
|
|
|
* I/O takes place in top level anon and uobj is not touched.
|
|
|
|
* 1B: [write with anon->an_ref > 1]
|
|
|
|
* new anon is alloc'd and data is copied off ["COW"]
|
|
|
|
*
|
|
|
|
* case [2]: lower layer fault [uobj]
|
|
|
|
* 2A: [read on non-NULL uobj] or [write to non-copy_on_write area]
|
|
|
|
* I/O takes place directly in object.
|
|
|
|
* 2B: [write to copy_on_write] or [read on NULL uobj]
|
2001-05-25 08:06:11 +04:00
|
|
|
* data is "promoted" from uobj to a new anon.
|
1998-02-05 09:25:08 +03:00
|
|
|
* if uobj is null, then we zero fill.
|
|
|
|
*
|
|
|
|
* we follow the standard UVM locking protocol ordering:
|
|
|
|
*
|
2001-05-25 08:06:11 +04:00
|
|
|
* MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ)
|
1998-02-05 09:25:08 +03:00
|
|
|
* we hold a PG_BUSY page if we unlock for I/O
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* the code is structured as follows:
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-02-05 09:25:08 +03:00
|
|
|
* - init the "IN" params in the ufi structure
|
|
|
|
* ReFault:
|
|
|
|
* - do lookups [locks maps], check protection, handle needs_copy
|
|
|
|
* - check for case 0 fault (error)
|
|
|
|
* - establish "range" of fault
|
|
|
|
* - if we have an amap lock it and extract the anons
|
|
|
|
* - if sequential advice deactivate pages behind us
|
|
|
|
* - at the same time check pmap for unmapped areas and anon for pages
|
|
|
|
* that we could map in (and do map it if found)
|
|
|
|
* - check object for resident pages that we could map in
|
|
|
|
* - if (case 2) goto Case2
|
|
|
|
* - >>> handle case 1
|
|
|
|
* - ensure source anon is resident in RAM
|
|
|
|
* - if case 1B alloc new anon and copy from source
|
|
|
|
* - map the correct page in
|
|
|
|
* Case2:
|
|
|
|
* - >>> handle case 2
|
|
|
|
* - ensure source page is resident (if uobj)
|
|
|
|
* - if case 2B alloc new anon and copy from source (could be zero
|
|
|
|
* fill if uobj == NULL)
|
|
|
|
* - map the correct page in
|
|
|
|
* - done!
|
|
|
|
*
|
|
|
|
* note on paging:
|
|
|
|
* if we have to do I/O we place a PG_BUSY page in the correct object,
|
|
|
|
* unlock everything, and do the I/O. when I/O is done we must reverify
|
|
|
|
* the state of the world before assuming that our data structures are
|
|
|
|
* valid. [because mappings could change while the map is unlocked]
|
|
|
|
*
|
|
|
|
* alternative 1: unbusy the page in question and restart the page fault
|
|
|
|
* from the top (ReFault). this is easy but does not take advantage
|
2001-05-25 08:06:11 +04:00
|
|
|
* of the information that we already have from our previous lookup,
|
1998-02-05 09:25:08 +03:00
|
|
|
* although it is possible that the "hints" in the vm_map will help here.
|
|
|
|
*
|
|
|
|
* alternative 2: the system already keeps track of a "version" number of
|
|
|
|
* a map. [i.e. every time you write-lock a map (e.g. to change a
|
|
|
|
* mapping) you bump the version number up by one...] so, we can save
|
|
|
|
* the version number of the map before we release the lock and start I/O.
|
|
|
|
* then when I/O is done we can relock and check the version numbers
|
|
|
|
* to see if anything changed. this might save us some over 1 because
|
|
|
|
* we don't have to unbusy the page and may be less compares(?).
|
|
|
|
*
|
|
|
|
* alternative 3: put in backpointers or a way to "hold" part of a map
|
|
|
|
* in place while I/O is in progress. this could be complex to
|
|
|
|
* implement (especially with structures like amap that can be referenced
|
|
|
|
* by multiple map entries, and figuring out what should wait could be
|
|
|
|
* complex as well...).
|
|
|
|
*
|
|
|
|
* given that we are not currently multiprocessor or multithreaded we might
|
|
|
|
* as well choose alternative 2 now. maybe alternative 3 would be useful
|
|
|
|
* in the future. XXX keep in mind for future consideration//rechecking.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* local data structures
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct uvm_advice {
|
1998-03-09 03:58:55 +03:00
|
|
|
int advice;
|
|
|
|
int nback;
|
|
|
|
int nforw;
|
1998-02-05 09:25:08 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* page range array:
|
2001-05-25 08:06:11 +04:00
|
|
|
* note: index in array must match "advice" value
|
1998-02-05 09:25:08 +03:00
|
|
|
* XXX: borrowed numbers from freebsd. do they work well for us?
|
|
|
|
*/
|
|
|
|
|
2005-06-27 06:19:48 +04:00
|
|
|
static const struct uvm_advice uvmadvice[] = {
|
1998-03-09 03:58:55 +03:00
|
|
|
{ MADV_NORMAL, 3, 4 },
|
|
|
|
{ MADV_RANDOM, 0, 0 },
|
|
|
|
{ MADV_SEQUENTIAL, 8, 7},
|
1998-02-05 09:25:08 +03:00
|
|
|
};
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
#define UVM_MAXRANGE 16 /* must be MAX() of nback+nforw+1 */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* private prototypes
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* inline functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvmfault_anonflush: try and deactivate pages in specified anons
|
|
|
|
*
|
|
|
|
* => does not have to deactivate page if it is busy
|
|
|
|
*/
|
|
|
|
|
2005-12-24 23:45:08 +03:00
|
|
|
static inline void
|
2005-06-27 06:19:48 +04:00
|
|
|
uvmfault_anonflush(struct vm_anon **anons, int n)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-03-09 03:58:55 +03:00
|
|
|
int lcv;
|
|
|
|
struct vm_page *pg;
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
for (lcv = 0 ; lcv < n ; lcv++) {
|
|
|
|
if (anons[lcv] == NULL)
|
|
|
|
continue;
|
|
|
|
simple_lock(&anons[lcv]->an_lock);
|
2005-05-11 17:02:25 +04:00
|
|
|
pg = anons[lcv]->an_page;
|
1998-03-09 03:58:55 +03:00
|
|
|
if (pg && (pg->flags & PG_BUSY) == 0 && pg->loan_count == 0) {
|
|
|
|
uvm_lock_pageq();
|
|
|
|
if (pg->wire_count == 0) {
|
Page scanner improvements, behavior is actually a bit more like
Mach VM's now. Specific changes:
- Pages now need not have all of their mappings removed before being
put on the inactive list. They only need to have the "referenced"
attribute cleared. This makes putting pages onto the inactive list
much more efficient. In order to eliminate redundant clearings of
"refrenced", callers of uvm_pagedeactivate() must now do this
themselves.
- When checking the "modified" attribute for a page (for clearing
PG_CLEAN), make sure to only do it if PG_CLEAN is currently set on
the page (saves a potentially expensive pmap operation).
- When scanning the inactive list, if a page is referenced, reactivate
it (this part was actually added in uvm_pdaemon.c,v 1.27). This
now works properly now that pages on the inactive list are allowed to
have mappings.
- When scanning the inactive list and considering a page for freeing,
remove all mappings, and then check the "modified" attribute if the
page is marked PG_CLEAN.
- When scanning the active list, if the page was referenced since its
last sweep by the scanner, don't deactivate it. (This part was
actually added in uvm_pdaemon.c,v 1.28.)
These changes greatly improve interactive performance during
moderate to high memory and I/O load.
2001-01-29 02:30:42 +03:00
|
|
|
pmap_clear_reference(pg);
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_pagedeactivate(pg);
|
|
|
|
}
|
|
|
|
uvm_unlock_pageq();
|
|
|
|
}
|
|
|
|
simple_unlock(&anons[lcv]->an_lock);
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* normal functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvmfault_amapcopy: clear "needs_copy" in a map.
|
|
|
|
*
|
|
|
|
* => called with VM data structures unlocked (usually, see below)
|
|
|
|
* => we get a write lock on the maps and clear needs_copy for a VA
|
|
|
|
* => if we are out of RAM we sleep (waiting for more)
|
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
static void
|
2005-06-27 06:19:48 +04:00
|
|
|
uvmfault_amapcopy(struct uvm_faultinfo *ufi)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
for (;;) {
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* no mapping? give up.
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
if (uvmfault_lookup(ufi, TRUE) == FALSE)
|
|
|
|
return;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* copy if needed.
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
if (UVM_ET_ISNEEDSCOPY(ufi->entry))
|
2001-05-25 08:06:11 +04:00
|
|
|
amap_copy(ufi->map, ufi->entry, M_NOWAIT, TRUE,
|
1998-10-12 03:07:42 +04:00
|
|
|
ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* didn't work? must be out of RAM. unlock and sleep.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
|
|
|
|
uvmfault_unlockmaps(ufi, TRUE);
|
|
|
|
uvm_wait("fltamapcopy");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* got it! unlock and return.
|
|
|
|
*/
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmfault_unlockmaps(ufi, TRUE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/*NOTREACHED*/
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvmfault_anonget: get data in an anon into a non-busy, non-released
|
|
|
|
* page in that anon.
|
|
|
|
*
|
|
|
|
* => maps, amap, and anon locked by caller.
|
2001-03-11 01:46:45 +03:00
|
|
|
* => if we fail (result != 0) we unlock everything.
|
1998-02-05 09:25:08 +03:00
|
|
|
* => if we are successful, we return with everything still locked.
|
|
|
|
* => we don't move the page on the queues [gets moved later]
|
|
|
|
* => if we allocate a new page [we_own], it gets put on the queues.
|
|
|
|
* either way, the result is that the page is on the queues at return time
|
|
|
|
* => for pages which are on loan from a uvm_object (and thus are not
|
|
|
|
* owned by the anon): if successful, we return with the owning object
|
|
|
|
* locked. the caller must unlock this object when it unlocks everything
|
|
|
|
* else.
|
|
|
|
*/
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
int
|
2005-06-27 06:19:48 +04:00
|
|
|
uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
|
|
|
|
struct vm_anon *anon)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-03-09 03:58:55 +03:00
|
|
|
boolean_t we_own; /* we own anon's page? */
|
|
|
|
boolean_t locked; /* did we relock? */
|
|
|
|
struct vm_page *pg;
|
2001-03-15 09:10:32 +03:00
|
|
|
int error;
|
1998-03-09 03:58:55 +03:00
|
|
|
UVMHIST_FUNC("uvmfault_anonget"); UVMHIST_CALLED(maphist);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-01-23 04:56:16 +03:00
|
|
|
LOCK_ASSERT(simple_lock_held(&anon->an_lock));
|
|
|
|
|
2001-03-15 09:10:32 +03:00
|
|
|
error = 0;
|
1998-03-27 00:50:14 +03:00
|
|
|
uvmexp.fltanget++;
|
|
|
|
/* bump rusage counters */
|
2005-05-11 17:02:25 +04:00
|
|
|
if (anon->an_page)
|
2003-01-18 11:51:40 +03:00
|
|
|
curproc->p_stats->p_ru.ru_minflt++;
|
1998-03-27 00:50:14 +03:00
|
|
|
else
|
2003-01-18 11:51:40 +03:00
|
|
|
curproc->p_stats->p_ru.ru_majflt++;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
/*
|
1998-03-09 03:58:55 +03:00
|
|
|
* loop until we get it, or fail.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
for (;;) {
|
1998-03-09 03:58:55 +03:00
|
|
|
we_own = FALSE; /* TRUE if we set PG_BUSY on a page */
|
2005-05-11 17:02:25 +04:00
|
|
|
pg = anon->an_page;
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if there is a resident page and it is loaned, then anon
|
|
|
|
* may not own it. call out to uvm_anon_lockpage() to ensure
|
|
|
|
* the real owner of the page has been identified and locked.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (pg && pg->loan_count)
|
1998-10-12 03:07:42 +04:00
|
|
|
pg = uvm_anon_lockloanpg(anon);
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* page there? make sure it is not busy/released.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (pg) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* at this point, if the page has a uobject [meaning
|
|
|
|
* we have it on loan], then that uobject is locked
|
|
|
|
* by us! if the page is busy, we drop all the
|
|
|
|
* locks (including uobject) and try again.
|
|
|
|
*/
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
if ((pg->flags & PG_BUSY) == 0) {
|
1998-03-09 03:58:55 +03:00
|
|
|
UVMHIST_LOG(maphist, "<- OK",0,0,0,0);
|
2001-03-11 01:46:45 +03:00
|
|
|
return (0);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
pg->flags |= PG_WANTED;
|
|
|
|
uvmexp.fltpgwait++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* the last unlock must be an atomic unlock+wait on
|
|
|
|
* the owner of page
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
if (pg->uobject) { /* owner is uobject ? */
|
|
|
|
uvmfault_unlockall(ufi, amap, NULL, anon);
|
|
|
|
UVMHIST_LOG(maphist, " unlock+wait on uobj",0,
|
|
|
|
0,0,0);
|
|
|
|
UVM_UNLOCK_AND_WAIT(pg,
|
|
|
|
&pg->uobject->vmobjlock,
|
|
|
|
FALSE, "anonget1",0);
|
|
|
|
} else {
|
|
|
|
/* anon owns page */
|
|
|
|
uvmfault_unlockall(ufi, amap, NULL, NULL);
|
|
|
|
UVMHIST_LOG(maphist, " unlock+wait on anon",0,
|
|
|
|
0,0,0);
|
|
|
|
UVM_UNLOCK_AND_WAIT(pg,&anon->an_lock,0,
|
|
|
|
"anonget2",0);
|
|
|
|
}
|
|
|
|
} else {
|
2005-09-14 02:00:05 +04:00
|
|
|
#if defined(VMSWAP)
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* no page, we must try and bring it in.
|
|
|
|
*/
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
pg = uvm_pagealloc(NULL, 0, anon, 0);
|
1998-03-09 03:58:55 +03:00
|
|
|
if (pg == NULL) { /* out of RAM. */
|
|
|
|
uvmfault_unlockall(ufi, amap, NULL, anon);
|
|
|
|
uvmexp.fltnoram++;
|
|
|
|
UVMHIST_LOG(maphist, " noram -- UVM_WAIT",0,
|
|
|
|
0,0,0);
|
2005-04-27 19:19:17 +04:00
|
|
|
if (!uvm_reclaimable()) {
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_wait("flt_noram1");
|
|
|
|
} else {
|
|
|
|
/* we set the PG_BUSY bit */
|
2001-05-25 08:06:11 +04:00
|
|
|
we_own = TRUE;
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmfault_unlockall(ufi, amap, NULL, anon);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we are passing a PG_BUSY+PG_FAKE+PG_CLEAN
|
|
|
|
* page into the uvm_swap_get function with
|
1998-11-20 22:37:06 +03:00
|
|
|
* all data structures unlocked. note that
|
|
|
|
* it is ok to read an_swslot here because
|
|
|
|
* we hold PG_BUSY on the page.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
uvmexp.pageins++;
|
2001-03-15 09:10:32 +03:00
|
|
|
error = uvm_swap_get(pg, anon->an_swslot,
|
1998-03-09 03:58:55 +03:00
|
|
|
PGO_SYNCIO);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we clean up after the i/o below in the
|
|
|
|
* "we_own" case
|
|
|
|
*/
|
|
|
|
}
|
2005-09-14 02:00:05 +04:00
|
|
|
#else /* defined(VMSWAP) */
|
|
|
|
panic("%s: no page", __func__);
|
|
|
|
#endif /* defined(VMSWAP) */
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* now relock and try again
|
|
|
|
*/
|
|
|
|
|
|
|
|
locked = uvmfault_relock(ufi);
|
2000-01-11 09:57:49 +03:00
|
|
|
if (locked && amap != NULL) {
|
1999-01-25 02:53:14 +03:00
|
|
|
amap_lock(amap);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
if (locked || we_own)
|
|
|
|
simple_lock(&anon->an_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we own the page (i.e. we set PG_BUSY), then we need
|
|
|
|
* to clean up after the I/O. there are three cases to
|
|
|
|
* consider:
|
|
|
|
* [1] page released during I/O: free anon and ReFault.
|
2001-05-25 08:06:11 +04:00
|
|
|
* [2] I/O not OK. free the page and cause the fault
|
1998-03-09 03:58:55 +03:00
|
|
|
* to fail.
|
|
|
|
* [3] I/O OK! activate the page and sync with the
|
|
|
|
* non-we_own case (i.e. drop anon lock if not locked).
|
|
|
|
*/
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
if (we_own) {
|
2005-09-14 02:00:05 +04:00
|
|
|
#if defined(VMSWAP)
|
1998-03-09 03:58:55 +03:00
|
|
|
if (pg->flags & PG_WANTED) {
|
2001-05-25 08:06:11 +04:00
|
|
|
wakeup(pg);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
2001-03-15 09:10:32 +03:00
|
|
|
if (error) {
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
/*
|
|
|
|
* remove the swap slot from the anon
|
|
|
|
* and mark the anon as having no real slot.
|
|
|
|
* don't free the swap slot, thus preventing
|
|
|
|
* it from being used again.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2003-08-11 20:44:35 +04:00
|
|
|
if (anon->an_swslot > 0)
|
|
|
|
uvm_swap_markbad(anon->an_swslot, 1);
|
2000-01-11 09:57:49 +03:00
|
|
|
anon->an_swslot = SWSLOT_BAD;
|
|
|
|
|
2004-05-05 15:54:32 +04:00
|
|
|
if ((pg->flags & PG_RELEASED) != 0)
|
|
|
|
goto released;
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
/*
|
1998-03-09 03:58:55 +03:00
|
|
|
* note: page was never !PG_BUSY, so it
|
|
|
|
* can't be mapped and thus no need to
|
|
|
|
* pmap_page_protect it...
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_lock_pageq();
|
|
|
|
uvm_pagefree(pg);
|
|
|
|
uvm_unlock_pageq();
|
|
|
|
|
|
|
|
if (locked)
|
|
|
|
uvmfault_unlockall(ufi, amap, NULL,
|
|
|
|
anon);
|
|
|
|
else
|
|
|
|
simple_unlock(&anon->an_lock);
|
|
|
|
UVMHIST_LOG(maphist, "<- ERROR", 0,0,0,0);
|
2001-03-15 09:10:32 +03:00
|
|
|
return error;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
2001-05-25 08:06:11 +04:00
|
|
|
|
2004-05-05 15:54:32 +04:00
|
|
|
if ((pg->flags & PG_RELEASED) != 0) {
|
|
|
|
released:
|
|
|
|
KASSERT(anon->an_ref == 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* released while we unlocked amap.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (locked)
|
|
|
|
uvmfault_unlockall(ufi, amap, NULL,
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
uvm_anon_release(anon);
|
|
|
|
|
|
|
|
if (error) {
|
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
"<- ERROR/RELEASED", 0,0,0,0);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
UVMHIST_LOG(maphist, "<- RELEASED", 0,0,0,0);
|
|
|
|
return ERESTART;
|
|
|
|
}
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
* we've successfully read the page, activate it.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_lock_pageq();
|
|
|
|
uvm_pageactivate(pg);
|
|
|
|
uvm_unlock_pageq();
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
|
|
|
|
UVM_PAGE_OWN(pg, NULL);
|
1998-03-09 03:58:55 +03:00
|
|
|
if (!locked)
|
|
|
|
simple_unlock(&anon->an_lock);
|
2005-09-14 02:00:05 +04:00
|
|
|
#else /* defined(VMSWAP) */
|
|
|
|
panic("%s: we_own", __func__);
|
|
|
|
#endif /* defined(VMSWAP) */
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we were not able to relock. restart fault.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!locked) {
|
|
|
|
UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
|
2001-03-11 01:46:45 +03:00
|
|
|
return (ERESTART);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* verify no one has touched the amap and moved the anon on us.
|
|
|
|
*/
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
if (ufi != NULL &&
|
2001-05-25 08:06:11 +04:00
|
|
|
amap_lookup(&ufi->entry->aref,
|
2000-01-11 09:57:49 +03:00
|
|
|
ufi->orig_rvaddr - ufi->entry->start) != anon) {
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmfault_unlockall(ufi, amap, NULL, anon);
|
|
|
|
UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
|
2001-03-11 01:46:45 +03:00
|
|
|
return (ERESTART);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
2001-05-25 08:06:11 +04:00
|
|
|
* try it again!
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
uvmexp.fltanretry++;
|
|
|
|
continue;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
/*NOTREACHED*/
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2006-01-21 16:13:07 +03:00
|
|
|
/*
|
|
|
|
* uvmfault_promote: promote data to a new anon. used for 1B and 2B.
|
|
|
|
*
|
|
|
|
* 1. allocate an anon and a page.
|
|
|
|
* 2. fill its contents.
|
|
|
|
* 3. put it into amap.
|
|
|
|
*
|
|
|
|
* => if we fail (result != 0) we unlock everything.
|
|
|
|
* => on success, return a new locked anon via 'nanon'.
|
|
|
|
* (*nanon)->an_page will be a resident, locked, dirty page.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
uvmfault_promote(struct uvm_faultinfo *ufi,
|
|
|
|
struct vm_anon *oanon,
|
|
|
|
struct vm_page *uobjpage,
|
|
|
|
struct vm_anon **nanon, /* OUT: allocated anon */
|
|
|
|
struct vm_anon **spare)
|
|
|
|
{
|
|
|
|
struct vm_amap *amap = ufi->entry->aref.ar_amap;
|
|
|
|
struct uvm_object *uobj;
|
|
|
|
struct vm_anon *anon;
|
|
|
|
struct vm_page *pg;
|
|
|
|
struct vm_page *opg;
|
|
|
|
int error;
|
|
|
|
UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
|
|
|
|
|
|
|
|
if (oanon) {
|
|
|
|
/* anon COW */
|
|
|
|
opg = oanon->an_page;
|
|
|
|
KASSERT(opg != NULL);
|
|
|
|
KASSERT(opg->uobject == NULL || opg->loan_count > 0);
|
|
|
|
} else if (uobjpage != PGO_DONTCARE) {
|
|
|
|
/* object-backed COW */
|
|
|
|
opg = uobjpage;
|
|
|
|
KASSERT(opg->uobject == ufi->entry->object.uvm_obj);
|
|
|
|
} else {
|
|
|
|
/* ZFOD */
|
|
|
|
opg = NULL;
|
|
|
|
}
|
|
|
|
if (opg != NULL) {
|
|
|
|
uobj = opg->uobject;
|
|
|
|
} else {
|
|
|
|
uobj = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
KASSERT(amap != NULL);
|
|
|
|
KASSERT(uobjpage != NULL);
|
|
|
|
KASSERT(uobjpage == PGO_DONTCARE || (uobjpage->flags & PG_BUSY) != 0);
|
|
|
|
LOCK_ASSERT(simple_lock_held(&amap->am_l));
|
|
|
|
LOCK_ASSERT(oanon == NULL || simple_lock_held(&oanon->an_lock));
|
|
|
|
LOCK_ASSERT(uobj == NULL || simple_lock_held(&uobj->vmobjlock));
|
|
|
|
LOCK_ASSERT(*spare == NULL || !simple_lock_held(&(*spare)->an_lock));
|
|
|
|
|
|
|
|
if (*spare != NULL) {
|
|
|
|
anon = *spare;
|
|
|
|
*spare = NULL;
|
|
|
|
simple_lock(&anon->an_lock);
|
|
|
|
} else if (ufi->map != kernel_map) {
|
|
|
|
anon = uvm_analloc();
|
|
|
|
} else {
|
|
|
|
UVMHIST_LOG(maphist, "kernel_map, unlock and retry", 0,0,0,0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we can't allocate anons with kernel_map locked.
|
|
|
|
*/
|
|
|
|
|
|
|
|
uvm_page_unbusy(&uobjpage, 1);
|
|
|
|
uvmfault_unlockall(ufi, amap, uobj, oanon);
|
|
|
|
|
|
|
|
*spare = uvm_analloc();
|
|
|
|
if (*spare == NULL) {
|
|
|
|
goto nomem;
|
|
|
|
}
|
|
|
|
simple_unlock(&(*spare)->an_lock);
|
|
|
|
error = ERESTART;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
if (anon) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The new anon is locked.
|
|
|
|
*
|
|
|
|
* if opg == NULL, we want a zero'd, dirty page,
|
|
|
|
* so have uvm_pagealloc() do that for us.
|
|
|
|
*/
|
|
|
|
|
|
|
|
pg = uvm_pagealloc(NULL, 0, anon,
|
|
|
|
(opg == NULL) ? UVM_PGA_ZERO : 0);
|
|
|
|
} else {
|
|
|
|
pg = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* out of memory resources?
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (pg == NULL) {
|
|
|
|
/* save anon for the next try. */
|
|
|
|
if (anon != NULL) {
|
|
|
|
simple_unlock(&anon->an_lock);
|
|
|
|
*spare = anon;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* unlock and fail ... */
|
|
|
|
uvm_page_unbusy(&uobjpage, 1);
|
|
|
|
uvmfault_unlockall(ufi, amap, uobj, oanon);
|
|
|
|
nomem:
|
|
|
|
if (!uvm_reclaimable()) {
|
|
|
|
UVMHIST_LOG(maphist, "out of VM", 0,0,0,0);
|
|
|
|
uvmexp.fltnoanon++;
|
|
|
|
error = ENOMEM;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
UVMHIST_LOG(maphist, "out of RAM, waiting for more", 0,0,0,0);
|
|
|
|
uvmexp.fltnoram++;
|
|
|
|
uvm_wait("flt_noram5");
|
|
|
|
error = ERESTART;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* copy page [pg now dirty] */
|
|
|
|
if (opg) {
|
|
|
|
uvm_pagecopy(opg, pg);
|
|
|
|
}
|
|
|
|
|
|
|
|
amap_add(&ufi->entry->aref, ufi->orig_rvaddr - ufi->entry->start, anon,
|
|
|
|
oanon != NULL);
|
|
|
|
|
|
|
|
*nanon = anon;
|
|
|
|
error = 0;
|
|
|
|
done:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* F A U L T - m a i n e n t r y p o i n t
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_fault: page fault handler
|
|
|
|
*
|
|
|
|
* => called from MD code to resolve a page fault
|
2001-05-25 08:06:11 +04:00
|
|
|
* => VM data structures usually should be unlocked. however, it is
|
1998-02-05 09:25:08 +03:00
|
|
|
* possible to call here with the main map locked if the caller
|
|
|
|
* gets a write lock, sets it recusive, and then calls us (c.f.
|
|
|
|
* uvm_map_pageable). this should be avoided because it keeps
|
|
|
|
* the map locked off during I/O.
|
2001-06-26 21:27:31 +04:00
|
|
|
* => MUST NEVER BE CALLED IN INTERRUPT CONTEXT
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
1999-03-28 23:53:49 +04:00
|
|
|
#define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
|
|
|
|
~VM_PROT_WRITE : VM_PROT_ALL)
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
int
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_fault(struct vm_map *orig_map, vaddr_t vaddr, vm_fault_t fault_type,
|
|
|
|
vm_prot_t access_type)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-03-09 03:58:55 +03:00
|
|
|
struct uvm_faultinfo ufi;
|
introduce a new UVM fault type, VM_FAULT_WIREMAX. this is different
from VM_FAULT_WIRE in that when the pages being wired are faulted in,
the simulated fault is at the maximum protection allowed for the mapping
instead of the current protection. use this in uvm_map_pageable{,_all}()
to fix the problem where writing via ptrace() to shared libraries that
are also mapped with wired mappings in another process causes a
diagnostic panic when the wired mapping is removed.
this is a really obscure problem so it deserves some more explanation.
ptrace() writing to another process ends up down in uvm_map_extract(),
which for MAP_PRIVATE mappings (such as shared libraries) will cause
the amap to be copied or created. then the amap is made shared
(ie. the AMAP_SHARED flag is set) between the kernel and the ptrace()d
process so that the kernel can modify pages in the amap and have the
ptrace()d process see the changes. then when the page being modified
is actually faulted on, the object pages (from the shared library vnode)
is copied to a new anon page and inserted into the shared amap.
to make all the processes sharing the amap actually see the new anon
page instead of the vnode page that was there before, we need to
invalidate all the pmap-level mappings of the vnode page in the pmaps
of the processes sharing the amap, but we don't have a good way of
doing this. the amap doesn't keep track of the vm_maps which map it.
so all we can do at this point is to remove all the mappings of the
page with pmap_page_protect(), but this has the unfortunate side-effect
of removing wired mappings as well. removing wired mappings with
pmap_page_protect() is a legitimate operation, it can happen when a file
with a wired mapping is truncated. so the pmap has no way of knowing
whether a request to remove a wired mapping is normal or when it's due to
this weird situation. so the pmap has to remove the weird mapping.
the process being ptrace()d goes away and life continues. then,
much later when we go to unwire or remove the wired vm_map mapping,
we discover that the pmap mapping has been removed when it should
still be there, and we panic.
so where did we go wrong? the problem is that we don't have any way
to update just the pmap mappings that need to be updated in this
scenario. we could invent a mechanism to do this, but that is much
more complicated than this change and it doesn't seem like the right
way to go in the long run either.
the real underlying problem here is that wired pmap mappings just
aren't a good concept. one of the original properties of the pmap
design was supposed to be that all the information in the pmap could
be thrown away at any time and the VM system could regenerate it all
through fault processing, but wired pmap mappings don't allow that.
a better design for UVM would not require wired pmap mappings,
and Chuck C. and I are talking about this, but it won't be done
anytime soon, so this change will do for now.
this change has the effect of causing MAP_PRIVATE mappings to be
copied to anonymous memory when they are mlock()d, so that uvm_fault()
doesn't need to copy these pages later when called from ptrace(), thus
avoiding the call to pmap_page_protect() and the panic that results
from this when the mlock()d region is unlocked or freed. note that
this change doesn't help the case where the wired mapping is MAP_SHARED.
discussed at great length with Chuck Cranor.
fixes PRs 10363, 12554, 12604, 13041, 13487, 14580 and 14853.
2002-01-01 01:34:39 +03:00
|
|
|
vm_prot_t enter_prot, check_prot;
|
2002-01-02 01:18:39 +03:00
|
|
|
boolean_t wired, narrow, promote, locked, shadowed, wire_fault, cow_now;
|
2001-03-15 09:10:32 +03:00
|
|
|
int npages, nback, nforw, centeridx, error, lcv, gotpages;
|
2005-02-07 14:57:38 +03:00
|
|
|
vaddr_t startva, currva;
|
2002-10-30 08:24:33 +03:00
|
|
|
voff_t uoff;
|
1998-03-09 03:58:55 +03:00
|
|
|
struct vm_amap *amap;
|
|
|
|
struct uvm_object *uobj;
|
|
|
|
struct vm_anon *anons_store[UVM_MAXRANGE], **anons, *anon, *oanon;
|
2006-01-21 16:13:07 +03:00
|
|
|
struct vm_anon *anon_spare;
|
1998-03-09 03:58:55 +03:00
|
|
|
struct vm_page *pages[UVM_MAXRANGE], *pg, *uobjpage;
|
|
|
|
UVMHIST_FUNC("uvm_fault"); UVMHIST_CALLED(maphist);
|
|
|
|
|
|
|
|
UVMHIST_LOG(maphist, "(map=0x%x, vaddr=0x%x, ft=%d, at=%d)",
|
1998-02-05 09:25:08 +03:00
|
|
|
orig_map, vaddr, fault_type, access_type);
|
|
|
|
|
2006-01-21 16:13:07 +03:00
|
|
|
anon = anon_spare = NULL;
|
2000-11-27 11:39:39 +03:00
|
|
|
pg = NULL;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmexp.faults++; /* XXX: locking? */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* init the IN parameters in the ufi
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
ufi.orig_map = orig_map;
|
|
|
|
ufi.orig_rvaddr = trunc_page(vaddr);
|
|
|
|
ufi.orig_size = PAGE_SIZE; /* can't get any smaller than this */
|
introduce a new UVM fault type, VM_FAULT_WIREMAX. this is different
from VM_FAULT_WIRE in that when the pages being wired are faulted in,
the simulated fault is at the maximum protection allowed for the mapping
instead of the current protection. use this in uvm_map_pageable{,_all}()
to fix the problem where writing via ptrace() to shared libraries that
are also mapped with wired mappings in another process causes a
diagnostic panic when the wired mapping is removed.
this is a really obscure problem so it deserves some more explanation.
ptrace() writing to another process ends up down in uvm_map_extract(),
which for MAP_PRIVATE mappings (such as shared libraries) will cause
the amap to be copied or created. then the amap is made shared
(ie. the AMAP_SHARED flag is set) between the kernel and the ptrace()d
process so that the kernel can modify pages in the amap and have the
ptrace()d process see the changes. then when the page being modified
is actually faulted on, the object pages (from the shared library vnode)
is copied to a new anon page and inserted into the shared amap.
to make all the processes sharing the amap actually see the new anon
page instead of the vnode page that was there before, we need to
invalidate all the pmap-level mappings of the vnode page in the pmaps
of the processes sharing the amap, but we don't have a good way of
doing this. the amap doesn't keep track of the vm_maps which map it.
so all we can do at this point is to remove all the mappings of the
page with pmap_page_protect(), but this has the unfortunate side-effect
of removing wired mappings as well. removing wired mappings with
pmap_page_protect() is a legitimate operation, it can happen when a file
with a wired mapping is truncated. so the pmap has no way of knowing
whether a request to remove a wired mapping is normal or when it's due to
this weird situation. so the pmap has to remove the weird mapping.
the process being ptrace()d goes away and life continues. then,
much later when we go to unwire or remove the wired vm_map mapping,
we discover that the pmap mapping has been removed when it should
still be there, and we panic.
so where did we go wrong? the problem is that we don't have any way
to update just the pmap mappings that need to be updated in this
scenario. we could invent a mechanism to do this, but that is much
more complicated than this change and it doesn't seem like the right
way to go in the long run either.
the real underlying problem here is that wired pmap mappings just
aren't a good concept. one of the original properties of the pmap
design was supposed to be that all the information in the pmap could
be thrown away at any time and the VM system could regenerate it all
through fault processing, but wired pmap mappings don't allow that.
a better design for UVM would not require wired pmap mappings,
and Chuck C. and I are talking about this, but it won't be done
anytime soon, so this change will do for now.
this change has the effect of causing MAP_PRIVATE mappings to be
copied to anonymous memory when they are mlock()d, so that uvm_fault()
doesn't need to copy these pages later when called from ptrace(), thus
avoiding the call to pmap_page_protect() and the panic that results
from this when the mlock()d region is unlocked or freed. note that
this change doesn't help the case where the wired mapping is MAP_SHARED.
discussed at great length with Chuck Cranor.
fixes PRs 10363, 12554, 12604, 13041, 13487, 14580 and 14853.
2002-01-01 01:34:39 +03:00
|
|
|
wire_fault = fault_type == VM_FAULT_WIRE ||
|
|
|
|
fault_type == VM_FAULT_WIREMAX;
|
|
|
|
if (wire_fault)
|
1998-03-09 03:58:55 +03:00
|
|
|
narrow = TRUE; /* don't look for neighborhood
|
|
|
|
* pages on wire */
|
|
|
|
else
|
|
|
|
narrow = FALSE; /* normal fault */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* "goto ReFault" means restart the page fault from ground zero.
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
ReFault:
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* lookup and lock the maps
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (uvmfault_lookup(&ufi, FALSE) == FALSE) {
|
|
|
|
UVMHIST_LOG(maphist, "<- no mapping @ 0x%x", vaddr, 0,0,0);
|
2006-01-21 16:13:07 +03:00
|
|
|
error = EFAULT;
|
|
|
|
goto done;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
/* locked: maps(read) */
|
|
|
|
|
2001-04-24 08:30:50 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0) {
|
|
|
|
printf("Page fault on non-pageable map:\n");
|
|
|
|
printf("ufi.map = %p\n", ufi.map);
|
|
|
|
printf("ufi.orig_map = %p\n", ufi.orig_map);
|
|
|
|
printf("ufi.orig_rvaddr = 0x%lx\n", (u_long) ufi.orig_rvaddr);
|
|
|
|
panic("uvm_fault: (ufi.map->flags & VM_MAP_PAGEABLE) == 0");
|
|
|
|
}
|
|
|
|
#endif
|
2001-03-15 09:10:32 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* check protection
|
|
|
|
*/
|
|
|
|
|
introduce a new UVM fault type, VM_FAULT_WIREMAX. this is different
from VM_FAULT_WIRE in that when the pages being wired are faulted in,
the simulated fault is at the maximum protection allowed for the mapping
instead of the current protection. use this in uvm_map_pageable{,_all}()
to fix the problem where writing via ptrace() to shared libraries that
are also mapped with wired mappings in another process causes a
diagnostic panic when the wired mapping is removed.
this is a really obscure problem so it deserves some more explanation.
ptrace() writing to another process ends up down in uvm_map_extract(),
which for MAP_PRIVATE mappings (such as shared libraries) will cause
the amap to be copied or created. then the amap is made shared
(ie. the AMAP_SHARED flag is set) between the kernel and the ptrace()d
process so that the kernel can modify pages in the amap and have the
ptrace()d process see the changes. then when the page being modified
is actually faulted on, the object pages (from the shared library vnode)
is copied to a new anon page and inserted into the shared amap.
to make all the processes sharing the amap actually see the new anon
page instead of the vnode page that was there before, we need to
invalidate all the pmap-level mappings of the vnode page in the pmaps
of the processes sharing the amap, but we don't have a good way of
doing this. the amap doesn't keep track of the vm_maps which map it.
so all we can do at this point is to remove all the mappings of the
page with pmap_page_protect(), but this has the unfortunate side-effect
of removing wired mappings as well. removing wired mappings with
pmap_page_protect() is a legitimate operation, it can happen when a file
with a wired mapping is truncated. so the pmap has no way of knowing
whether a request to remove a wired mapping is normal or when it's due to
this weird situation. so the pmap has to remove the weird mapping.
the process being ptrace()d goes away and life continues. then,
much later when we go to unwire or remove the wired vm_map mapping,
we discover that the pmap mapping has been removed when it should
still be there, and we panic.
so where did we go wrong? the problem is that we don't have any way
to update just the pmap mappings that need to be updated in this
scenario. we could invent a mechanism to do this, but that is much
more complicated than this change and it doesn't seem like the right
way to go in the long run either.
the real underlying problem here is that wired pmap mappings just
aren't a good concept. one of the original properties of the pmap
design was supposed to be that all the information in the pmap could
be thrown away at any time and the VM system could regenerate it all
through fault processing, but wired pmap mappings don't allow that.
a better design for UVM would not require wired pmap mappings,
and Chuck C. and I are talking about this, but it won't be done
anytime soon, so this change will do for now.
this change has the effect of causing MAP_PRIVATE mappings to be
copied to anonymous memory when they are mlock()d, so that uvm_fault()
doesn't need to copy these pages later when called from ptrace(), thus
avoiding the call to pmap_page_protect() and the panic that results
from this when the mlock()d region is unlocked or freed. note that
this change doesn't help the case where the wired mapping is MAP_SHARED.
discussed at great length with Chuck Cranor.
fixes PRs 10363, 12554, 12604, 13041, 13487, 14580 and 14853.
2002-01-01 01:34:39 +03:00
|
|
|
check_prot = fault_type == VM_FAULT_WIREMAX ?
|
|
|
|
ufi.entry->max_protection : ufi.entry->protection;
|
|
|
|
if ((check_prot & access_type) != access_type) {
|
1998-03-09 03:58:55 +03:00
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
"<- protection failure (prot=0x%x, access=0x%x)",
|
|
|
|
ufi.entry->protection, access_type, 0, 0);
|
|
|
|
uvmfault_unlockmaps(&ufi, FALSE);
|
2006-01-21 16:13:07 +03:00
|
|
|
error = EACCES;
|
|
|
|
goto done;
|
1999-06-03 03:26:21 +04:00
|
|
|
}
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* "enter_prot" is the protection we want to enter the page in at.
|
|
|
|
* for certain pages (e.g. copy-on-write pages) this protection can
|
|
|
|
* be more strict than ufi.entry->protection. "wired" means either
|
|
|
|
* the entry is wired or we are fault-wiring the pg.
|
|
|
|
*/
|
|
|
|
|
|
|
|
enter_prot = ufi.entry->protection;
|
introduce a new UVM fault type, VM_FAULT_WIREMAX. this is different
from VM_FAULT_WIRE in that when the pages being wired are faulted in,
the simulated fault is at the maximum protection allowed for the mapping
instead of the current protection. use this in uvm_map_pageable{,_all}()
to fix the problem where writing via ptrace() to shared libraries that
are also mapped with wired mappings in another process causes a
diagnostic panic when the wired mapping is removed.
this is a really obscure problem so it deserves some more explanation.
ptrace() writing to another process ends up down in uvm_map_extract(),
which for MAP_PRIVATE mappings (such as shared libraries) will cause
the amap to be copied or created. then the amap is made shared
(ie. the AMAP_SHARED flag is set) between the kernel and the ptrace()d
process so that the kernel can modify pages in the amap and have the
ptrace()d process see the changes. then when the page being modified
is actually faulted on, the object pages (from the shared library vnode)
is copied to a new anon page and inserted into the shared amap.
to make all the processes sharing the amap actually see the new anon
page instead of the vnode page that was there before, we need to
invalidate all the pmap-level mappings of the vnode page in the pmaps
of the processes sharing the amap, but we don't have a good way of
doing this. the amap doesn't keep track of the vm_maps which map it.
so all we can do at this point is to remove all the mappings of the
page with pmap_page_protect(), but this has the unfortunate side-effect
of removing wired mappings as well. removing wired mappings with
pmap_page_protect() is a legitimate operation, it can happen when a file
with a wired mapping is truncated. so the pmap has no way of knowing
whether a request to remove a wired mapping is normal or when it's due to
this weird situation. so the pmap has to remove the weird mapping.
the process being ptrace()d goes away and life continues. then,
much later when we go to unwire or remove the wired vm_map mapping,
we discover that the pmap mapping has been removed when it should
still be there, and we panic.
so where did we go wrong? the problem is that we don't have any way
to update just the pmap mappings that need to be updated in this
scenario. we could invent a mechanism to do this, but that is much
more complicated than this change and it doesn't seem like the right
way to go in the long run either.
the real underlying problem here is that wired pmap mappings just
aren't a good concept. one of the original properties of the pmap
design was supposed to be that all the information in the pmap could
be thrown away at any time and the VM system could regenerate it all
through fault processing, but wired pmap mappings don't allow that.
a better design for UVM would not require wired pmap mappings,
and Chuck C. and I are talking about this, but it won't be done
anytime soon, so this change will do for now.
this change has the effect of causing MAP_PRIVATE mappings to be
copied to anonymous memory when they are mlock()d, so that uvm_fault()
doesn't need to copy these pages later when called from ptrace(), thus
avoiding the call to pmap_page_protect() and the panic that results
from this when the mlock()d region is unlocked or freed. note that
this change doesn't help the case where the wired mapping is MAP_SHARED.
discussed at great length with Chuck Cranor.
fixes PRs 10363, 12554, 12604, 13041, 13487, 14580 and 14853.
2002-01-01 01:34:39 +03:00
|
|
|
wired = VM_MAPENT_ISWIRED(ufi.entry) || wire_fault;
|
2002-01-02 01:18:39 +03:00
|
|
|
if (wired) {
|
1998-03-09 03:58:55 +03:00
|
|
|
access_type = enter_prot; /* full access for wired */
|
2002-01-02 01:18:39 +03:00
|
|
|
cow_now = (check_prot & VM_PROT_WRITE) != 0;
|
|
|
|
} else {
|
|
|
|
cow_now = (access_type & VM_PROT_WRITE) != 0;
|
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* handle "needs_copy" case. if we need to copy the amap we will
|
|
|
|
* have to drop our readlock and relock it with a write lock. (we
|
|
|
|
* need a write lock to change anything in a map entry [e.g.
|
|
|
|
* needs_copy]).
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (UVM_ET_ISNEEDSCOPY(ufi.entry)) {
|
introduce a new UVM fault type, VM_FAULT_WIREMAX. this is different
from VM_FAULT_WIRE in that when the pages being wired are faulted in,
the simulated fault is at the maximum protection allowed for the mapping
instead of the current protection. use this in uvm_map_pageable{,_all}()
to fix the problem where writing via ptrace() to shared libraries that
are also mapped with wired mappings in another process causes a
diagnostic panic when the wired mapping is removed.
this is a really obscure problem so it deserves some more explanation.
ptrace() writing to another process ends up down in uvm_map_extract(),
which for MAP_PRIVATE mappings (such as shared libraries) will cause
the amap to be copied or created. then the amap is made shared
(ie. the AMAP_SHARED flag is set) between the kernel and the ptrace()d
process so that the kernel can modify pages in the amap and have the
ptrace()d process see the changes. then when the page being modified
is actually faulted on, the object pages (from the shared library vnode)
is copied to a new anon page and inserted into the shared amap.
to make all the processes sharing the amap actually see the new anon
page instead of the vnode page that was there before, we need to
invalidate all the pmap-level mappings of the vnode page in the pmaps
of the processes sharing the amap, but we don't have a good way of
doing this. the amap doesn't keep track of the vm_maps which map it.
so all we can do at this point is to remove all the mappings of the
page with pmap_page_protect(), but this has the unfortunate side-effect
of removing wired mappings as well. removing wired mappings with
pmap_page_protect() is a legitimate operation, it can happen when a file
with a wired mapping is truncated. so the pmap has no way of knowing
whether a request to remove a wired mapping is normal or when it's due to
this weird situation. so the pmap has to remove the weird mapping.
the process being ptrace()d goes away and life continues. then,
much later when we go to unwire or remove the wired vm_map mapping,
we discover that the pmap mapping has been removed when it should
still be there, and we panic.
so where did we go wrong? the problem is that we don't have any way
to update just the pmap mappings that need to be updated in this
scenario. we could invent a mechanism to do this, but that is much
more complicated than this change and it doesn't seem like the right
way to go in the long run either.
the real underlying problem here is that wired pmap mappings just
aren't a good concept. one of the original properties of the pmap
design was supposed to be that all the information in the pmap could
be thrown away at any time and the VM system could regenerate it all
through fault processing, but wired pmap mappings don't allow that.
a better design for UVM would not require wired pmap mappings,
and Chuck C. and I are talking about this, but it won't be done
anytime soon, so this change will do for now.
this change has the effect of causing MAP_PRIVATE mappings to be
copied to anonymous memory when they are mlock()d, so that uvm_fault()
doesn't need to copy these pages later when called from ptrace(), thus
avoiding the call to pmap_page_protect() and the panic that results
from this when the mlock()d region is unlocked or freed. note that
this change doesn't help the case where the wired mapping is MAP_SHARED.
discussed at great length with Chuck Cranor.
fixes PRs 10363, 12554, 12604, 13041, 13487, 14580 and 14853.
2002-01-01 01:34:39 +03:00
|
|
|
KASSERT(fault_type != VM_FAULT_WIREMAX);
|
2002-01-02 01:18:39 +03:00
|
|
|
if (cow_now || (ufi.entry->object.uvm_obj == NULL)) {
|
1998-03-09 03:58:55 +03:00
|
|
|
/* need to clear */
|
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
" need to clear needs_copy and refault",0,0,0,0);
|
|
|
|
uvmfault_unlockmaps(&ufi, FALSE);
|
|
|
|
uvmfault_amapcopy(&ufi);
|
|
|
|
uvmexp.fltamcopy++;
|
|
|
|
goto ReFault;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ensure that we pmap_enter page R/O since
|
|
|
|
* needs_copy is still true
|
|
|
|
*/
|
|
|
|
|
introduce a new UVM fault type, VM_FAULT_WIREMAX. this is different
from VM_FAULT_WIRE in that when the pages being wired are faulted in,
the simulated fault is at the maximum protection allowed for the mapping
instead of the current protection. use this in uvm_map_pageable{,_all}()
to fix the problem where writing via ptrace() to shared libraries that
are also mapped with wired mappings in another process causes a
diagnostic panic when the wired mapping is removed.
this is a really obscure problem so it deserves some more explanation.
ptrace() writing to another process ends up down in uvm_map_extract(),
which for MAP_PRIVATE mappings (such as shared libraries) will cause
the amap to be copied or created. then the amap is made shared
(ie. the AMAP_SHARED flag is set) between the kernel and the ptrace()d
process so that the kernel can modify pages in the amap and have the
ptrace()d process see the changes. then when the page being modified
is actually faulted on, the object pages (from the shared library vnode)
is copied to a new anon page and inserted into the shared amap.
to make all the processes sharing the amap actually see the new anon
page instead of the vnode page that was there before, we need to
invalidate all the pmap-level mappings of the vnode page in the pmaps
of the processes sharing the amap, but we don't have a good way of
doing this. the amap doesn't keep track of the vm_maps which map it.
so all we can do at this point is to remove all the mappings of the
page with pmap_page_protect(), but this has the unfortunate side-effect
of removing wired mappings as well. removing wired mappings with
pmap_page_protect() is a legitimate operation, it can happen when a file
with a wired mapping is truncated. so the pmap has no way of knowing
whether a request to remove a wired mapping is normal or when it's due to
this weird situation. so the pmap has to remove the weird mapping.
the process being ptrace()d goes away and life continues. then,
much later when we go to unwire or remove the wired vm_map mapping,
we discover that the pmap mapping has been removed when it should
still be there, and we panic.
so where did we go wrong? the problem is that we don't have any way
to update just the pmap mappings that need to be updated in this
scenario. we could invent a mechanism to do this, but that is much
more complicated than this change and it doesn't seem like the right
way to go in the long run either.
the real underlying problem here is that wired pmap mappings just
aren't a good concept. one of the original properties of the pmap
design was supposed to be that all the information in the pmap could
be thrown away at any time and the VM system could regenerate it all
through fault processing, but wired pmap mappings don't allow that.
a better design for UVM would not require wired pmap mappings,
and Chuck C. and I are talking about this, but it won't be done
anytime soon, so this change will do for now.
this change has the effect of causing MAP_PRIVATE mappings to be
copied to anonymous memory when they are mlock()d, so that uvm_fault()
doesn't need to copy these pages later when called from ptrace(), thus
avoiding the call to pmap_page_protect() and the panic that results
from this when the mlock()d region is unlocked or freed. note that
this change doesn't help the case where the wired mapping is MAP_SHARED.
discussed at great length with Chuck Cranor.
fixes PRs 10363, 12554, 12604, 13041, 13487, 14580 and 14853.
2002-01-01 01:34:39 +03:00
|
|
|
enter_prot &= ~VM_PROT_WRITE;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* identify the players
|
|
|
|
*/
|
|
|
|
|
1999-11-13 03:24:38 +03:00
|
|
|
amap = ufi.entry->aref.ar_amap; /* top layer */
|
1998-03-09 03:58:55 +03:00
|
|
|
uobj = ufi.entry->object.uvm_obj; /* bottom layer */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* check for a case 0 fault. if nothing backing the entry then
|
|
|
|
* error now.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (amap == NULL && uobj == NULL) {
|
|
|
|
uvmfault_unlockmaps(&ufi, FALSE);
|
|
|
|
UVMHIST_LOG(maphist,"<- no backing store, no overlay",0,0,0,0);
|
2006-01-21 16:13:07 +03:00
|
|
|
error = EFAULT;
|
|
|
|
goto done;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* establish range of interest based on advice from mapper
|
|
|
|
* and then clip to fit map entry. note that we only want
|
2001-05-25 08:06:11 +04:00
|
|
|
* to do this the first time through the fault. if we
|
1998-03-09 03:58:55 +03:00
|
|
|
* ReFault we will disable this by setting "narrow" to true.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (narrow == FALSE) {
|
|
|
|
|
|
|
|
/* wide fault (!narrow) */
|
2000-11-27 11:39:39 +03:00
|
|
|
KASSERT(uvmadvice[ufi.entry->advice].advice ==
|
|
|
|
ufi.entry->advice);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
nback = MIN(uvmadvice[ufi.entry->advice].nback,
|
1998-10-19 03:49:59 +04:00
|
|
|
(ufi.orig_rvaddr - ufi.entry->start) >> PAGE_SHIFT);
|
|
|
|
startva = ufi.orig_rvaddr - (nback << PAGE_SHIFT);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
nforw = MIN(uvmadvice[ufi.entry->advice].nforw,
|
1998-10-19 03:49:59 +04:00
|
|
|
((ufi.entry->end - ufi.orig_rvaddr) >>
|
|
|
|
PAGE_SHIFT) - 1);
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* note: "-1" because we don't want to count the
|
|
|
|
* faulting page as forw
|
|
|
|
*/
|
|
|
|
npages = nback + nforw + 1;
|
|
|
|
centeridx = nback;
|
|
|
|
|
1999-07-19 23:02:22 +04:00
|
|
|
narrow = TRUE; /* ensure only once per-fault */
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
} else {
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* narrow fault! */
|
|
|
|
nback = nforw = 0;
|
1998-10-12 03:07:42 +04:00
|
|
|
startva = ufi.orig_rvaddr;
|
1998-03-09 03:58:55 +03:00
|
|
|
npages = 1;
|
|
|
|
centeridx = 0;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* locked: maps(read) */
|
1998-10-12 03:07:42 +04:00
|
|
|
UVMHIST_LOG(maphist, " narrow=%d, back=%d, forw=%d, startva=0x%x",
|
1998-11-04 10:07:22 +03:00
|
|
|
narrow, nback, nforw, startva);
|
1998-03-09 03:58:55 +03:00
|
|
|
UVMHIST_LOG(maphist, " entry=0x%x, amap=0x%x, obj=0x%x", ufi.entry,
|
1998-11-04 10:07:22 +03:00
|
|
|
amap, uobj, 0);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* if we've got an amap, lock it and extract current anons.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (amap) {
|
1999-01-25 02:53:14 +03:00
|
|
|
amap_lock(amap);
|
1998-03-09 03:58:55 +03:00
|
|
|
anons = anons_store;
|
|
|
|
amap_lookups(&ufi.entry->aref, startva - ufi.entry->start,
|
|
|
|
anons, npages);
|
|
|
|
} else {
|
|
|
|
anons = NULL; /* to be safe */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* locked: maps(read), amap(if there) */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* for MADV_SEQUENTIAL mappings we want to deactivate the back pages
|
|
|
|
* now and then forget about them (for the rest of the fault).
|
|
|
|
*/
|
|
|
|
|
2001-10-03 09:17:58 +04:00
|
|
|
if (ufi.entry->advice == MADV_SEQUENTIAL && nback != 0) {
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
UVMHIST_LOG(maphist, " MADV_SEQUENTIAL: flushing backpages",
|
|
|
|
0,0,0,0);
|
|
|
|
/* flush back-page anons? */
|
2001-05-25 08:06:11 +04:00
|
|
|
if (amap)
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmfault_anonflush(anons, nback);
|
|
|
|
|
|
|
|
/* flush object? */
|
|
|
|
if (uobj) {
|
2005-02-07 14:57:38 +03:00
|
|
|
uoff = (startva - ufi.entry->start) + ufi.entry->offset;
|
1998-03-09 03:58:55 +03:00
|
|
|
simple_lock(&uobj->vmobjlock);
|
2005-02-07 14:57:38 +03:00
|
|
|
(void) (uobj->pgops->pgo_put)(uobj, uoff, uoff +
|
1998-10-19 03:49:59 +04:00
|
|
|
(nback << PAGE_SHIFT), PGO_DEACTIVATE);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* now forget about the backpages */
|
|
|
|
if (amap)
|
|
|
|
anons += nback;
|
2000-11-27 11:39:39 +03:00
|
|
|
startva += (nback << PAGE_SHIFT);
|
1998-03-09 03:58:55 +03:00
|
|
|
npages -= nback;
|
|
|
|
nback = centeridx = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* locked: maps(read), amap(if there) */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* map in the backpages and frontpages we found in the amap in hopes
|
|
|
|
* of preventing future faults. we also init the pages[] array as
|
|
|
|
* we go.
|
|
|
|
*/
|
|
|
|
|
1998-10-12 03:07:42 +04:00
|
|
|
currva = startva;
|
1998-03-09 03:58:55 +03:00
|
|
|
shadowed = FALSE;
|
|
|
|
for (lcv = 0 ; lcv < npages ; lcv++, currva += PAGE_SIZE) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* dont play with VAs that are already mapped
|
1998-10-12 03:07:42 +04:00
|
|
|
* except for center)
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
if (lcv != centeridx &&
|
2005-01-01 12:14:49 +03:00
|
|
|
pmap_extract(ufi.orig_map->pmap, currva, NULL)) {
|
2000-11-27 11:39:39 +03:00
|
|
|
pages[lcv] = PGO_DONTCARE;
|
|
|
|
continue;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unmapped or center page. check if any anon at this level.
|
|
|
|
*/
|
|
|
|
if (amap == NULL || anons[lcv] == NULL) {
|
|
|
|
pages[lcv] = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* check for present page and map if possible. re-activate it.
|
|
|
|
*/
|
|
|
|
|
|
|
|
pages[lcv] = PGO_DONTCARE;
|
|
|
|
if (lcv == centeridx) { /* save center for later! */
|
|
|
|
shadowed = TRUE;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
anon = anons[lcv];
|
|
|
|
simple_lock(&anon->an_lock);
|
|
|
|
/* ignore loaned pages */
|
2005-05-11 17:02:25 +04:00
|
|
|
if (anon->an_page && anon->an_page->loan_count == 0 &&
|
|
|
|
(anon->an_page->flags & PG_BUSY) == 0) {
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_lock_pageq();
|
2005-05-11 17:02:25 +04:00
|
|
|
uvm_pageactivate(anon->an_page);
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_unlock_pageq();
|
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
" MAPPING: n anon: pm=0x%x, va=0x%x, pg=0x%x",
|
2005-05-11 17:02:25 +04:00
|
|
|
ufi.orig_map->pmap, currva, anon->an_page, 0);
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmexp.fltnamap++;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1999-11-13 03:24:38 +03:00
|
|
|
/*
|
|
|
|
* Since this isn't the page that's actually faulting,
|
|
|
|
* ignore pmap_enter() failures; it's not critical
|
|
|
|
* that we enter these right now.
|
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1999-11-13 03:24:38 +03:00
|
|
|
(void) pmap_enter(ufi.orig_map->pmap, currva,
|
2005-05-11 17:02:25 +04:00
|
|
|
VM_PAGE_TO_PHYS(anon->an_page),
|
1999-03-29 01:01:25 +04:00
|
|
|
(anon->an_ref > 1) ? (enter_prot & ~VM_PROT_WRITE) :
|
1999-11-13 03:24:38 +03:00
|
|
|
enter_prot,
|
|
|
|
PMAP_CANFAIL |
|
|
|
|
(VM_MAPENT_ISWIRED(ufi.entry) ? PMAP_WIRED : 0));
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
simple_unlock(&anon->an_lock);
|
2001-09-11 01:19:08 +04:00
|
|
|
pmap_update(ufi.orig_map->pmap);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* locked: maps(read), amap(if there) */
|
|
|
|
/* (shadowed == TRUE) if there is an anon at the faulting address */
|
2001-05-25 08:06:11 +04:00
|
|
|
UVMHIST_LOG(maphist, " shadowed=%d, will_get=%d", shadowed,
|
1998-11-07 08:50:19 +03:00
|
|
|
(uobj && shadowed == FALSE),0,0);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* note that if we are really short of RAM we could sleep in the above
|
|
|
|
* call to pmap_enter with everything locked. bad?
|
1999-11-13 03:24:38 +03:00
|
|
|
*
|
|
|
|
* XXX Actually, that is bad; pmap_enter() should just fail in that
|
|
|
|
* XXX case. --thorpej
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* if the desired page is not shadowed by the amap and we have a
|
|
|
|
* backing object, then we check to see if the backing object would
|
|
|
|
* prefer to handle the fault itself (rather than letting us do it
|
|
|
|
* with the usual pgo_get hook). the backing object signals this by
|
|
|
|
* providing a pgo_fault routine.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (uobj && shadowed == FALSE && uobj->pgops->pgo_fault != NULL) {
|
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
|
|
|
|
/* locked: maps(read), amap (if there), uobj */
|
2001-03-15 09:10:32 +03:00
|
|
|
error = uobj->pgops->pgo_fault(&ufi, startva, pages, npages,
|
|
|
|
centeridx, fault_type, access_type, PGO_LOCKED|PGO_SYNCIO);
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* locked: nothing, pgo_fault has unlocked everything */
|
|
|
|
|
2001-03-17 07:01:24 +03:00
|
|
|
if (error == ERESTART)
|
1998-03-09 03:58:55 +03:00
|
|
|
goto ReFault; /* try again! */
|
2001-04-24 08:30:50 +04:00
|
|
|
/*
|
|
|
|
* object fault routine responsible for pmap_update().
|
|
|
|
*/
|
2006-01-21 16:13:07 +03:00
|
|
|
goto done;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* now, if the desired page is not shadowed by the amap and we have
|
|
|
|
* a backing object that does not have a special fault routine, then
|
|
|
|
* we ask (with pgo_get) the object for resident pages that we care
|
|
|
|
* about and attempt to map them in. we do not let pgo_get block
|
|
|
|
* (PGO_LOCKED).
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (uobj && shadowed == FALSE) {
|
|
|
|
simple_lock(&uobj->vmobjlock);
|
|
|
|
|
|
|
|
/* locked (!shadowed): maps(read), amap (if there), uobj */
|
|
|
|
/*
|
|
|
|
* the following call to pgo_get does _not_ change locking state
|
|
|
|
*/
|
|
|
|
|
|
|
|
uvmexp.fltlget++;
|
|
|
|
gotpages = npages;
|
2000-11-27 11:39:39 +03:00
|
|
|
(void) uobj->pgops->pgo_get(uobj, ufi.entry->offset +
|
1998-02-05 09:25:08 +03:00
|
|
|
(startva - ufi.entry->start),
|
|
|
|
pages, &gotpages, centeridx,
|
1999-03-28 23:53:49 +04:00
|
|
|
access_type & MASK(ufi.entry),
|
1998-02-05 09:25:08 +03:00
|
|
|
ufi.entry->advice, PGO_LOCKED);
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* check for pages to map, if we got any
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
uobjpage = NULL;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
if (gotpages) {
|
1998-10-12 03:07:42 +04:00
|
|
|
currva = startva;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
for (lcv = 0; lcv < npages;
|
|
|
|
lcv++, currva += PAGE_SIZE) {
|
2004-03-02 14:43:44 +03:00
|
|
|
struct vm_page *curpg;
|
|
|
|
boolean_t readonly;
|
|
|
|
|
|
|
|
curpg = pages[lcv];
|
|
|
|
if (curpg == NULL || curpg == PGO_DONTCARE) {
|
1998-03-09 03:58:55 +03:00
|
|
|
continue;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/*
|
|
|
|
* if center page is resident and not
|
|
|
|
* PG_BUSY|PG_RELEASED then pgo_get
|
|
|
|
* made it PG_BUSY for us and gave
|
|
|
|
* us a handle to it. remember this
|
|
|
|
* page as "uobjpage." (for later use).
|
|
|
|
*/
|
2001-05-25 08:06:11 +04:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
if (lcv == centeridx) {
|
2004-03-02 14:43:44 +03:00
|
|
|
uobjpage = curpg;
|
2000-11-27 11:39:39 +03:00
|
|
|
UVMHIST_LOG(maphist, " got uobjpage "
|
2001-05-25 08:06:11 +04:00
|
|
|
"(0x%x) with locked get",
|
1998-03-09 03:58:55 +03:00
|
|
|
uobjpage, 0,0,0);
|
2000-11-27 11:39:39 +03:00
|
|
|
continue;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
2001-05-25 08:06:11 +04:00
|
|
|
|
|
|
|
/*
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
* calling pgo_get with PGO_LOCKED returns us
|
|
|
|
* pages which are neither busy nor released,
|
|
|
|
* so we don't need to check for this.
|
|
|
|
* we can just directly enter the pages.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
uvm_lock_pageq();
|
2004-03-02 14:43:44 +03:00
|
|
|
uvm_pageactivate(curpg);
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_unlock_pageq();
|
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
" MAPPING: n obj: pm=0x%x, va=0x%x, pg=0x%x",
|
2004-03-02 14:43:44 +03:00
|
|
|
ufi.orig_map->pmap, currva, curpg, 0);
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmexp.fltnomap++;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1999-11-13 03:24:38 +03:00
|
|
|
/*
|
|
|
|
* Since this page isn't the page that's
|
2004-02-10 03:40:06 +03:00
|
|
|
* actually faulting, ignore pmap_enter()
|
1999-11-13 03:24:38 +03:00
|
|
|
* failures; it's not critical that we
|
|
|
|
* enter these right now.
|
|
|
|
*/
|
2004-03-02 14:43:44 +03:00
|
|
|
KASSERT((curpg->flags & PG_PAGEOUT) == 0);
|
|
|
|
KASSERT((curpg->flags & PG_RELEASED) == 0);
|
2005-07-22 18:57:39 +04:00
|
|
|
KASSERT(!UVM_OBJ_IS_CLEAN(curpg->uobject) ||
|
2005-07-17 16:27:47 +04:00
|
|
|
(curpg->flags & PG_CLEAN) != 0);
|
2004-03-02 14:43:44 +03:00
|
|
|
readonly = (curpg->flags & PG_RDONLY)
|
2005-07-17 16:27:47 +04:00
|
|
|
|| (curpg->loan_count > 0)
|
2005-07-23 16:18:41 +04:00
|
|
|
|| UVM_OBJ_NEEDS_WRITEFAULT(curpg->uobject);
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1999-11-13 03:24:38 +03:00
|
|
|
(void) pmap_enter(ufi.orig_map->pmap, currva,
|
2004-03-02 14:43:44 +03:00
|
|
|
VM_PAGE_TO_PHYS(curpg),
|
|
|
|
readonly ?
|
2002-03-25 04:56:48 +03:00
|
|
|
enter_prot & ~VM_PROT_WRITE :
|
|
|
|
enter_prot & MASK(ufi.entry),
|
1999-11-13 03:24:38 +03:00
|
|
|
PMAP_CANFAIL |
|
|
|
|
(wired ? PMAP_WIRED : 0));
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
/*
|
1998-03-09 03:58:55 +03:00
|
|
|
* NOTE: page can't be PG_WANTED or PG_RELEASED
|
|
|
|
* because we've held the lock the whole time
|
|
|
|
* we've had the handle.
|
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2004-03-02 14:43:44 +03:00
|
|
|
curpg->flags &= ~(PG_BUSY);
|
|
|
|
UVM_PAGE_OWN(curpg, NULL);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
2001-09-11 01:19:08 +04:00
|
|
|
pmap_update(ufi.orig_map->pmap);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
} else {
|
|
|
|
uobjpage = NULL;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* locked (shadowed): maps(read), amap */
|
2001-05-25 08:06:11 +04:00
|
|
|
/* locked (!shadowed): maps(read), amap(if there),
|
1998-03-09 03:58:55 +03:00
|
|
|
uobj(if !null), uobjpage(if !null) */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* note that at this point we are done with any front or back pages.
|
|
|
|
* we are now going to focus on the center page (i.e. the one we've
|
|
|
|
* faulted on). if we have faulted on the top (anon) layer
|
|
|
|
* [i.e. case 1], then the anon we want is anons[centeridx] (we have
|
|
|
|
* not touched it yet). if we have faulted on the bottom (uobj)
|
|
|
|
* layer [i.e. case 2] and the page was both present and available,
|
|
|
|
* then we've got a pointer to it as "uobjpage" and we've already
|
1998-03-23 00:29:30 +03:00
|
|
|
* made it BUSY.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* there are four possible cases we must address: 1A, 1B, 2A, and 2B
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* redirect case 2: if we are not shadowed, go to case 2.
|
|
|
|
*/
|
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
if (shadowed == FALSE)
|
1998-03-09 03:58:55 +03:00
|
|
|
goto Case2;
|
|
|
|
|
|
|
|
/* locked: maps(read), amap */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* handle case 1: fault on an anon in our amap
|
|
|
|
*/
|
|
|
|
|
|
|
|
anon = anons[centeridx];
|
|
|
|
UVMHIST_LOG(maphist, " case 1 fault: anon=0x%x", anon, 0,0,0);
|
|
|
|
simple_lock(&anon->an_lock);
|
|
|
|
|
|
|
|
/* locked: maps(read), amap, anon */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* no matter if we have case 1A or case 1B we are going to need to
|
|
|
|
* have the anon's memory resident. ensure that now.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2000-01-11 09:57:49 +03:00
|
|
|
* let uvmfault_anonget do the dirty work.
|
2000-08-06 04:22:53 +04:00
|
|
|
* if it fails (!OK) it will unlock everything for us.
|
2000-01-11 09:57:49 +03:00
|
|
|
* if it succeeds, locks are still valid and locked.
|
1998-03-09 03:58:55 +03:00
|
|
|
* also, if it is OK, then the anon's page is on the queues.
|
|
|
|
* if the page is on loan from a uvm_object, then anonget will
|
|
|
|
* lock that object for us if it does not fail.
|
|
|
|
*/
|
|
|
|
|
2001-03-15 09:10:32 +03:00
|
|
|
error = uvmfault_anonget(&ufi, amap, anon);
|
|
|
|
switch (error) {
|
2001-03-11 01:46:45 +03:00
|
|
|
case 0:
|
2001-05-25 08:06:11 +04:00
|
|
|
break;
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2001-03-11 01:46:45 +03:00
|
|
|
case ERESTART:
|
1998-03-09 03:58:55 +03:00
|
|
|
goto ReFault;
|
|
|
|
|
2001-03-11 01:46:45 +03:00
|
|
|
case EAGAIN:
|
2000-11-27 11:39:39 +03:00
|
|
|
tsleep(&lbolt, PVM, "fltagain1", 0);
|
|
|
|
goto ReFault;
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2000-08-06 04:22:53 +04:00
|
|
|
default:
|
2006-01-21 16:13:07 +03:00
|
|
|
goto done;
|
2000-08-06 04:22:53 +04:00
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* uobj is non null if the page is on loan from an object (i.e. uobj)
|
|
|
|
*/
|
|
|
|
|
2005-05-11 17:02:25 +04:00
|
|
|
uobj = anon->an_page->uobject; /* locked by anonget if !NULL */
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/* locked: maps(read), amap, anon, uobj(if one) */
|
|
|
|
|
|
|
|
/*
|
2001-05-25 08:06:11 +04:00
|
|
|
* special handling for loaned pages
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2005-05-11 17:02:25 +04:00
|
|
|
if (anon->an_page->loan_count) {
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2002-01-02 01:18:39 +03:00
|
|
|
if (!cow_now) {
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* for read faults on loaned pages we just cap the
|
|
|
|
* protection at read-only.
|
|
|
|
*/
|
|
|
|
|
|
|
|
enter_prot = enter_prot & ~VM_PROT_WRITE;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* note that we can't allow writes into a loaned page!
|
|
|
|
*
|
|
|
|
* if we have a write fault on a loaned page in an
|
|
|
|
* anon then we need to look at the anon's ref count.
|
|
|
|
* if it is greater than one then we are going to do
|
|
|
|
* a normal copy-on-write fault into a new anon (this
|
|
|
|
* is not a problem). however, if the reference count
|
|
|
|
* is one (a case where we would normally allow a
|
|
|
|
* write directly to the page) then we need to kill
|
|
|
|
* the loan before we continue.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* >1 case is already ok */
|
|
|
|
if (anon->an_ref == 1) {
|
|
|
|
|
|
|
|
/* get new un-owned replacement page */
|
1999-04-11 08:04:04 +04:00
|
|
|
pg = uvm_pagealloc(NULL, 0, NULL, 0);
|
1998-03-09 03:58:55 +03:00
|
|
|
if (pg == NULL) {
|
|
|
|
uvmfault_unlockall(&ufi, amap, uobj,
|
|
|
|
anon);
|
|
|
|
uvm_wait("flt_noram2");
|
|
|
|
goto ReFault;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* copy data, kill loan, and drop uobj lock
|
|
|
|
* (if any)
|
|
|
|
*/
|
|
|
|
/* copy old -> new */
|
2005-05-11 17:02:25 +04:00
|
|
|
uvm_pagecopy(anon->an_page, pg);
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/* force reload */
|
2005-05-11 17:02:25 +04:00
|
|
|
pmap_page_protect(anon->an_page,
|
1999-09-12 05:16:55 +04:00
|
|
|
VM_PROT_NONE);
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_lock_pageq(); /* KILL loan */
|
2002-09-03 01:09:50 +04:00
|
|
|
|
2005-05-11 17:02:25 +04:00
|
|
|
anon->an_page->uanon = NULL;
|
1998-03-09 03:58:55 +03:00
|
|
|
/* in case we owned */
|
2005-05-11 17:02:25 +04:00
|
|
|
anon->an_page->pqflags &= ~PQ_ANON;
|
2002-09-03 01:09:50 +04:00
|
|
|
|
|
|
|
if (uobj) {
|
|
|
|
/* if we were receiver of loan */
|
2005-05-11 17:02:25 +04:00
|
|
|
anon->an_page->loan_count--;
|
2002-09-03 01:09:50 +04:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* we were the lender (A->K); need
|
|
|
|
* to remove the page from pageq's.
|
|
|
|
*/
|
2005-05-11 17:02:25 +04:00
|
|
|
uvm_pagedequeue(anon->an_page);
|
2002-09-03 01:09:50 +04:00
|
|
|
}
|
|
|
|
|
2002-08-29 09:03:30 +04:00
|
|
|
uvm_pageactivate(pg);
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_unlock_pageq();
|
|
|
|
if (uobj) {
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
|
|
|
uobj = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* install new page in anon */
|
2005-05-11 17:02:25 +04:00
|
|
|
anon->an_page = pg;
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->uanon = anon;
|
|
|
|
pg->pqflags |= PQ_ANON;
|
|
|
|
pg->flags &= ~(PG_BUSY|PG_FAKE);
|
|
|
|
UVM_PAGE_OWN(pg, NULL);
|
|
|
|
|
|
|
|
/* done! */
|
|
|
|
} /* ref == 1 */
|
|
|
|
} /* write fault */
|
|
|
|
} /* loan count */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we are case 1B then we will need to allocate a new blank
|
|
|
|
* anon to transfer the data into. note that we have a lock
|
|
|
|
* on anon, so no one can busy or release the page until we are done.
|
|
|
|
* also note that the ref count can't drop to zero here because
|
|
|
|
* it is > 1 and we are only dropping one ref.
|
|
|
|
*
|
2001-05-25 08:06:11 +04:00
|
|
|
* in the (hopefully very rare) case that we are out of RAM we
|
|
|
|
* will unlock, wait for more RAM, and refault.
|
1998-03-09 03:58:55 +03:00
|
|
|
*
|
|
|
|
* if we are out of anon VM we kill the process (XXX: could wait?).
|
|
|
|
*/
|
|
|
|
|
2002-01-02 01:18:39 +03:00
|
|
|
if (cow_now && anon->an_ref > 1) {
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
UVMHIST_LOG(maphist, " case 1B: COW fault",0,0,0,0);
|
|
|
|
uvmexp.flt_acow++;
|
|
|
|
oanon = anon; /* oanon = old, locked anon */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2006-01-21 16:13:07 +03:00
|
|
|
error = uvmfault_promote(&ufi, oanon, PGO_DONTCARE,
|
|
|
|
&anon, &anon_spare);
|
|
|
|
switch (error) {
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
case ERESTART:
|
1998-03-09 03:58:55 +03:00
|
|
|
goto ReFault;
|
2006-01-21 16:13:07 +03:00
|
|
|
default:
|
|
|
|
goto done;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
2006-01-21 16:13:07 +03:00
|
|
|
pg = anon->an_page;
|
2003-02-10 01:32:21 +03:00
|
|
|
uvm_lock_pageq();
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uvm_pageactivate(pg);
|
2003-02-10 01:32:21 +03:00
|
|
|
uvm_unlock_pageq();
|
2006-01-21 16:13:07 +03:00
|
|
|
pg->flags &= ~(PG_BUSY|PG_FAKE);
|
1998-03-09 03:58:55 +03:00
|
|
|
UVM_PAGE_OWN(pg, NULL);
|
|
|
|
|
|
|
|
/* deref: can not drop to zero here by defn! */
|
|
|
|
oanon->an_ref--;
|
2001-01-23 04:56:16 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
2001-01-23 04:56:16 +03:00
|
|
|
* note: oanon is still locked, as is the new anon. we
|
|
|
|
* need to check for this later when we unlock oanon; if
|
|
|
|
* oanon != anon, we'll have to unlock anon, too.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
} else {
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmexp.flt_anon++;
|
|
|
|
oanon = anon; /* old, locked anon is same as anon */
|
2005-05-11 17:02:25 +04:00
|
|
|
pg = anon->an_page;
|
1998-03-09 03:58:55 +03:00
|
|
|
if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */
|
|
|
|
enter_prot = enter_prot & ~VM_PROT_WRITE;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2001-01-23 04:56:16 +03:00
|
|
|
/* locked: maps(read), amap, oanon, anon (if different from oanon) */
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
* now map the page in.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
UVMHIST_LOG(maphist, " MAPPING: anon: pm=0x%x, va=0x%x, pg=0x%x",
|
|
|
|
ufi.orig_map->pmap, ufi.orig_rvaddr, pg, 0);
|
1999-11-13 03:24:38 +03:00
|
|
|
if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg),
|
|
|
|
enter_prot, access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0))
|
2001-03-15 09:10:32 +03:00
|
|
|
!= 0) {
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1999-11-13 03:24:38 +03:00
|
|
|
/*
|
|
|
|
* No need to undo what we did; we can simply think of
|
|
|
|
* this as the pmap throwing away the mapping information.
|
|
|
|
*
|
|
|
|
* We do, however, have to go through the ReFault path,
|
|
|
|
* as the map may change while we're asleep.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2001-01-23 04:56:16 +03:00
|
|
|
if (anon != oanon)
|
|
|
|
simple_unlock(&anon->an_lock);
|
1999-11-13 03:24:38 +03:00
|
|
|
uvmfault_unlockall(&ufi, amap, uobj, oanon);
|
2005-04-12 17:11:45 +04:00
|
|
|
if (!uvm_reclaimable()) {
|
1999-11-13 03:24:38 +03:00
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
"<- failed. out of VM",0,0,0,0);
|
|
|
|
/* XXX instrumentation */
|
2006-01-21 16:13:07 +03:00
|
|
|
error = ENOMEM;
|
|
|
|
goto done;
|
1999-11-13 03:24:38 +03:00
|
|
|
}
|
|
|
|
/* XXX instrumentation */
|
|
|
|
uvm_wait("flt_pmfail1");
|
|
|
|
goto ReFault;
|
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
1999-11-13 03:24:38 +03:00
|
|
|
* ... update the page queues.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
uvm_lock_pageq();
|
introduce a new UVM fault type, VM_FAULT_WIREMAX. this is different
from VM_FAULT_WIRE in that when the pages being wired are faulted in,
the simulated fault is at the maximum protection allowed for the mapping
instead of the current protection. use this in uvm_map_pageable{,_all}()
to fix the problem where writing via ptrace() to shared libraries that
are also mapped with wired mappings in another process causes a
diagnostic panic when the wired mapping is removed.
this is a really obscure problem so it deserves some more explanation.
ptrace() writing to another process ends up down in uvm_map_extract(),
which for MAP_PRIVATE mappings (such as shared libraries) will cause
the amap to be copied or created. then the amap is made shared
(ie. the AMAP_SHARED flag is set) between the kernel and the ptrace()d
process so that the kernel can modify pages in the amap and have the
ptrace()d process see the changes. then when the page being modified
is actually faulted on, the object pages (from the shared library vnode)
is copied to a new anon page and inserted into the shared amap.
to make all the processes sharing the amap actually see the new anon
page instead of the vnode page that was there before, we need to
invalidate all the pmap-level mappings of the vnode page in the pmaps
of the processes sharing the amap, but we don't have a good way of
doing this. the amap doesn't keep track of the vm_maps which map it.
so all we can do at this point is to remove all the mappings of the
page with pmap_page_protect(), but this has the unfortunate side-effect
of removing wired mappings as well. removing wired mappings with
pmap_page_protect() is a legitimate operation, it can happen when a file
with a wired mapping is truncated. so the pmap has no way of knowing
whether a request to remove a wired mapping is normal or when it's due to
this weird situation. so the pmap has to remove the weird mapping.
the process being ptrace()d goes away and life continues. then,
much later when we go to unwire or remove the wired vm_map mapping,
we discover that the pmap mapping has been removed when it should
still be there, and we panic.
so where did we go wrong? the problem is that we don't have any way
to update just the pmap mappings that need to be updated in this
scenario. we could invent a mechanism to do this, but that is much
more complicated than this change and it doesn't seem like the right
way to go in the long run either.
the real underlying problem here is that wired pmap mappings just
aren't a good concept. one of the original properties of the pmap
design was supposed to be that all the information in the pmap could
be thrown away at any time and the VM system could regenerate it all
through fault processing, but wired pmap mappings don't allow that.
a better design for UVM would not require wired pmap mappings,
and Chuck C. and I are talking about this, but it won't be done
anytime soon, so this change will do for now.
this change has the effect of causing MAP_PRIVATE mappings to be
copied to anonymous memory when they are mlock()d, so that uvm_fault()
doesn't need to copy these pages later when called from ptrace(), thus
avoiding the call to pmap_page_protect() and the panic that results
from this when the mlock()d region is unlocked or freed. note that
this change doesn't help the case where the wired mapping is MAP_SHARED.
discussed at great length with Chuck Cranor.
fixes PRs 10363, 12554, 12604, 13041, 13487, 14580 and 14853.
2002-01-01 01:34:39 +03:00
|
|
|
if (wire_fault) {
|
1998-03-23 00:29:30 +03:00
|
|
|
uvm_pagewire(pg);
|
1999-05-19 10:14:15 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* since the now-wired page cannot be paged out,
|
|
|
|
* release its swap resources for others to use.
|
|
|
|
* since an anon with no swap cannot be PG_CLEAN,
|
|
|
|
* clear its clean flag now.
|
|
|
|
*/
|
|
|
|
|
|
|
|
pg->flags &= ~(PG_CLEAN);
|
1999-03-26 20:34:15 +03:00
|
|
|
uvm_anon_dropswap(anon);
|
1998-03-09 03:58:55 +03:00
|
|
|
} else {
|
|
|
|
uvm_pageactivate(pg);
|
|
|
|
}
|
|
|
|
uvm_unlock_pageq();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* done case 1! finish up by unlocking everything and returning success
|
|
|
|
*/
|
|
|
|
|
2001-01-23 04:56:16 +03:00
|
|
|
if (anon != oanon)
|
|
|
|
simple_unlock(&anon->an_lock);
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmfault_unlockall(&ufi, amap, uobj, oanon);
|
2001-09-11 01:19:08 +04:00
|
|
|
pmap_update(ufi.orig_map->pmap);
|
2006-01-21 16:13:07 +03:00
|
|
|
error = 0;
|
|
|
|
goto done;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
Case2:
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* handle case 2: faulting on backing object or zero fill
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* locked:
|
|
|
|
* maps(read), amap(if there), uobj(if !null), uobjpage(if !null)
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* note that uobjpage can not be PGO_DONTCARE at this point. we now
|
|
|
|
* set uobjpage to PGO_DONTCARE if we are doing a zero fill. if we
|
|
|
|
* have a backing object, check and see if we are going to promote
|
|
|
|
* the data up to an anon during the fault.
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
if (uobj == NULL) {
|
2001-05-25 08:06:11 +04:00
|
|
|
uobjpage = PGO_DONTCARE;
|
1998-03-09 03:58:55 +03:00
|
|
|
promote = TRUE; /* always need anon here */
|
|
|
|
} else {
|
2000-11-27 11:39:39 +03:00
|
|
|
KASSERT(uobjpage != PGO_DONTCARE);
|
2002-01-02 01:18:39 +03:00
|
|
|
promote = cow_now && UVM_ET_ISCOPYONWRITE(ufi.entry);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
UVMHIST_LOG(maphist, " case 2 fault: promote=%d, zfill=%d",
|
1999-11-13 03:24:38 +03:00
|
|
|
promote, (uobj == NULL), 0,0);
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
1998-03-27 00:50:14 +03:00
|
|
|
* if uobjpage is not null then we do not need to do I/O to get the
|
|
|
|
* uobjpage.
|
|
|
|
*
|
2001-05-25 08:06:11 +04:00
|
|
|
* if uobjpage is null, then we need to unlock and ask the pager to
|
1998-03-09 03:58:55 +03:00
|
|
|
* get the data for us. once we have the data, we need to reverify
|
|
|
|
* the state the world. we are currently not holding any resources.
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-27 00:50:14 +03:00
|
|
|
if (uobjpage) {
|
|
|
|
/* update rusage counters */
|
2003-01-18 11:51:40 +03:00
|
|
|
curproc->p_stats->p_ru.ru_minflt++;
|
1998-03-27 00:50:14 +03:00
|
|
|
} else {
|
|
|
|
/* update rusage counters */
|
2003-01-18 11:51:40 +03:00
|
|
|
curproc->p_stats->p_ru.ru_majflt++;
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* locked: maps(read), amap(if there), uobj */
|
|
|
|
uvmfault_unlockall(&ufi, amap, NULL, NULL);
|
|
|
|
/* locked: uobj */
|
|
|
|
|
|
|
|
uvmexp.fltget++;
|
|
|
|
gotpages = 1;
|
2000-11-27 11:39:39 +03:00
|
|
|
uoff = (ufi.orig_rvaddr - ufi.entry->start) + ufi.entry->offset;
|
2001-03-15 09:10:32 +03:00
|
|
|
error = uobj->pgops->pgo_get(uobj, uoff, &uobjpage, &gotpages,
|
2000-11-27 11:39:39 +03:00
|
|
|
0, access_type & MASK(ufi.entry), ufi.entry->advice,
|
|
|
|
PGO_SYNCIO);
|
2001-03-15 09:10:32 +03:00
|
|
|
/* locked: uobjpage(if no error) */
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* recover from I/O
|
|
|
|
*/
|
|
|
|
|
2001-03-15 09:10:32 +03:00
|
|
|
if (error) {
|
|
|
|
if (error == EAGAIN) {
|
1999-11-13 03:24:38 +03:00
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
" pgo_get says TRY AGAIN!",0,0,0,0);
|
2001-03-11 01:46:45 +03:00
|
|
|
tsleep(&lbolt, PVM, "fltagain2", 0);
|
1999-11-13 03:24:38 +03:00
|
|
|
goto ReFault;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
UVMHIST_LOG(maphist, "<- pgo_get failed (code %d)",
|
2001-03-15 09:10:32 +03:00
|
|
|
error, 0,0,0);
|
2006-01-21 16:13:07 +03:00
|
|
|
goto done;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* locked: uobjpage */
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uvm_lock_pageq();
|
|
|
|
uvm_pageactivate(uobjpage);
|
|
|
|
uvm_unlock_pageq();
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* re-verify the state of the world by first trying to relock
|
|
|
|
* the maps. always relock the object.
|
|
|
|
*/
|
|
|
|
|
|
|
|
locked = uvmfault_relock(&ufi);
|
|
|
|
if (locked && amap)
|
1999-01-25 02:53:14 +03:00
|
|
|
amap_lock(amap);
|
1998-03-09 03:58:55 +03:00
|
|
|
simple_lock(&uobj->vmobjlock);
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* locked(locked): maps(read), amap(if !null), uobj, uobjpage */
|
|
|
|
/* locked(!locked): uobj, uobjpage */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* verify that the page has not be released and re-verify
|
|
|
|
* that amap slot is still free. if there is a problem,
|
|
|
|
* we unlock and clean up.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if ((uobjpage->flags & PG_RELEASED) != 0 ||
|
2001-05-25 08:06:11 +04:00
|
|
|
(locked && amap &&
|
1998-03-09 03:58:55 +03:00
|
|
|
amap_lookup(&ufi.entry->aref,
|
1998-10-12 03:07:42 +04:00
|
|
|
ufi.orig_rvaddr - ufi.entry->start))) {
|
2001-05-25 08:06:11 +04:00
|
|
|
if (locked)
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmfault_unlockall(&ufi, amap, NULL, NULL);
|
|
|
|
locked = FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* didn't get the lock? release the page and retry.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (locked == FALSE) {
|
|
|
|
UVMHIST_LOG(maphist,
|
2001-05-25 08:06:11 +04:00
|
|
|
" wasn't able to relock after fault: retry",
|
1998-03-09 03:58:55 +03:00
|
|
|
0,0,0,0);
|
|
|
|
if (uobjpage->flags & PG_WANTED)
|
1999-07-23 02:58:38 +04:00
|
|
|
wakeup(uobjpage);
|
1998-03-09 03:58:55 +03:00
|
|
|
if (uobjpage->flags & PG_RELEASED) {
|
|
|
|
uvmexp.fltpgrele++;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uvm_pagefree(uobjpage);
|
1998-03-09 03:58:55 +03:00
|
|
|
goto ReFault;
|
|
|
|
}
|
|
|
|
uobjpage->flags &= ~(PG_BUSY|PG_WANTED);
|
|
|
|
UVM_PAGE_OWN(uobjpage, NULL);
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
|
|
|
goto ReFault;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
* we have the data in uobjpage which is busy and
|
|
|
|
* not released. we are holding object lock (so the page
|
1998-03-09 03:58:55 +03:00
|
|
|
* can't be released on us).
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* locked: maps(read), amap(if !null), uobj, uobjpage */
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1998-03-09 03:58:55 +03:00
|
|
|
* locked:
|
|
|
|
* maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj)
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
1998-03-09 03:58:55 +03:00
|
|
|
* notes:
|
|
|
|
* - at this point uobjpage can not be NULL
|
|
|
|
* - at this point uobjpage can not be PG_RELEASED (since we checked
|
|
|
|
* for it above)
|
|
|
|
* - at this point uobjpage could be PG_WANTED (handle later)
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
2001-05-25 08:06:11 +04:00
|
|
|
|
2005-07-22 18:57:39 +04:00
|
|
|
KASSERT(uobj == NULL || !UVM_OBJ_IS_CLEAN(uobjpage->uobject) ||
|
2005-07-17 16:27:47 +04:00
|
|
|
(uobjpage->flags & PG_CLEAN) != 0);
|
1998-03-09 03:58:55 +03:00
|
|
|
if (promote == FALSE) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we are not promoting. if the mapping is COW ensure that we
|
|
|
|
* don't give more access than we should (e.g. when doing a read
|
|
|
|
* fault on a COPYONWRITE mapping we want to map the COW page in
|
|
|
|
* R/O even though the entry protection could be R/W).
|
|
|
|
*
|
|
|
|
* set "pg" to the page we want to map in (uobjpage, usually)
|
|
|
|
*/
|
|
|
|
|
2001-01-23 04:56:16 +03:00
|
|
|
/* no anon in this case. */
|
|
|
|
anon = NULL;
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmexp.flt_obj++;
|
2005-07-17 16:27:47 +04:00
|
|
|
if (UVM_ET_ISCOPYONWRITE(ufi.entry) ||
|
2005-07-23 16:18:41 +04:00
|
|
|
UVM_OBJ_NEEDS_WRITEFAULT(uobjpage->uobject))
|
1999-03-28 23:53:49 +04:00
|
|
|
enter_prot &= ~VM_PROT_WRITE;
|
1998-03-09 03:58:55 +03:00
|
|
|
pg = uobjpage; /* map in the actual object */
|
|
|
|
|
|
|
|
/* assert(uobjpage != PGO_DONTCARE) */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we are faulting directly on the page. be careful
|
|
|
|
* about writing to loaned pages...
|
|
|
|
*/
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
if (uobjpage->loan_count) {
|
2002-01-02 01:18:39 +03:00
|
|
|
if (!cow_now) {
|
1998-03-09 03:58:55 +03:00
|
|
|
/* read fault: cap the protection at readonly */
|
|
|
|
/* cap! */
|
|
|
|
enter_prot = enter_prot & ~VM_PROT_WRITE;
|
|
|
|
} else {
|
|
|
|
/* write fault: must break the loan here */
|
|
|
|
|
2003-05-03 21:57:50 +04:00
|
|
|
pg = uvm_loanbreak(uobjpage);
|
1998-03-09 03:58:55 +03:00
|
|
|
if (pg == NULL) {
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* drop ownership of page, it can't
|
|
|
|
* be released
|
1999-11-13 03:24:38 +03:00
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
if (uobjpage->flags & PG_WANTED)
|
1999-07-23 02:58:38 +04:00
|
|
|
wakeup(uobjpage);
|
1998-03-09 03:58:55 +03:00
|
|
|
uobjpage->flags &= ~(PG_BUSY|PG_WANTED);
|
|
|
|
UVM_PAGE_OWN(uobjpage, NULL);
|
|
|
|
|
|
|
|
uvmfault_unlockall(&ufi, amap, uobj,
|
|
|
|
NULL);
|
|
|
|
UVMHIST_LOG(maphist,
|
1999-01-31 12:27:18 +03:00
|
|
|
" out of RAM breaking loan, waiting",
|
|
|
|
0,0,0,0);
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmexp.fltnoram++;
|
|
|
|
uvm_wait("flt_noram4");
|
|
|
|
goto ReFault;
|
|
|
|
}
|
|
|
|
uobjpage = pg;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
} else {
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* if we are going to promote the data to an anon we
|
|
|
|
* allocate a blank anon here and plug it into our amap.
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
#if DIAGNOSTIC
|
1998-03-09 03:58:55 +03:00
|
|
|
if (amap == NULL)
|
|
|
|
panic("uvm_fault: want to promote data, but no anon");
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
2006-01-21 16:13:07 +03:00
|
|
|
error = uvmfault_promote(&ufi, NULL, uobjpage,
|
|
|
|
&anon, &anon_spare);
|
|
|
|
switch (error) {
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
case ERESTART:
|
1998-03-09 03:58:55 +03:00
|
|
|
goto ReFault;
|
2006-01-21 16:13:07 +03:00
|
|
|
default:
|
|
|
|
goto done;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
2006-01-21 16:13:07 +03:00
|
|
|
pg = anon->an_page;
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* fill in the data
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (uobjpage != PGO_DONTCARE) {
|
|
|
|
uvmexp.flt_prcopy++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* promote to shared amap? make sure all sharing
|
|
|
|
* procs see it
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
if ((amap_flags(amap) & AMAP_SHARED) != 0) {
|
1999-09-12 05:16:55 +04:00
|
|
|
pmap_page_protect(uobjpage, VM_PROT_NONE);
|
2001-04-25 18:59:44 +04:00
|
|
|
/*
|
|
|
|
* XXX: PAGE MIGHT BE WIRED!
|
|
|
|
*/
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* dispose of uobjpage. it can't be PG_RELEASED
|
2000-11-27 11:39:39 +03:00
|
|
|
* since we still hold the object lock.
|
|
|
|
* drop handle to uobj as well.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (uobjpage->flags & PG_WANTED)
|
|
|
|
/* still have the obj lock */
|
1999-07-23 02:58:38 +04:00
|
|
|
wakeup(uobjpage);
|
1998-03-09 03:58:55 +03:00
|
|
|
uobjpage->flags &= ~(PG_BUSY|PG_WANTED);
|
|
|
|
UVM_PAGE_OWN(uobjpage, NULL);
|
|
|
|
simple_unlock(&uobj->vmobjlock);
|
|
|
|
uobj = NULL;
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
" promote uobjpage 0x%x to anon/page 0x%x/0x%x",
|
|
|
|
uobjpage, anon, pg, 0);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
uvmexp.flt_przero++;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2000-04-10 05:17:41 +04:00
|
|
|
/*
|
2006-01-21 16:13:07 +03:00
|
|
|
* Page is zero'd and marked dirty by
|
|
|
|
* uvmfault_promote().
|
2000-04-10 05:17:41 +04:00
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
UVMHIST_LOG(maphist," zero fill anon/page 0x%x/0%x",
|
|
|
|
anon, pg, 0, 0);
|
|
|
|
}
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* locked:
|
2001-01-23 04:56:16 +03:00
|
|
|
* maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj),
|
|
|
|
* anon(if !null), pg(if anon)
|
1998-03-09 03:58:55 +03:00
|
|
|
*
|
|
|
|
* note: pg is either the uobjpage or the new page in the new anon
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* all resources are present. we can now map it in and free our
|
|
|
|
* resources.
|
|
|
|
*/
|
|
|
|
|
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
" MAPPING: case2: pm=0x%x, va=0x%x, pg=0x%x, promote=%d",
|
|
|
|
ufi.orig_map->pmap, ufi.orig_rvaddr, pg, promote);
|
2002-03-09 07:29:03 +03:00
|
|
|
KASSERT((access_type & VM_PROT_WRITE) == 0 ||
|
|
|
|
(pg->flags & PG_RDONLY) == 0);
|
1999-11-13 03:24:38 +03:00
|
|
|
if (pmap_enter(ufi.orig_map->pmap, ufi.orig_rvaddr, VM_PAGE_TO_PHYS(pg),
|
2002-03-25 04:56:48 +03:00
|
|
|
pg->flags & PG_RDONLY ? enter_prot & ~VM_PROT_WRITE : enter_prot,
|
2001-03-15 09:10:32 +03:00
|
|
|
access_type | PMAP_CANFAIL | (wired ? PMAP_WIRED : 0)) != 0) {
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1999-11-13 03:24:38 +03:00
|
|
|
/*
|
|
|
|
* No need to undo what we did; we can simply think of
|
|
|
|
* this as the pmap throwing away the mapping information.
|
|
|
|
*
|
|
|
|
* We do, however, have to go through the ReFault path,
|
|
|
|
* as the map may change while we're asleep.
|
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1999-11-13 03:24:38 +03:00
|
|
|
if (pg->flags & PG_WANTED)
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
wakeup(pg);
|
1999-11-13 03:24:38 +03:00
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
/*
|
1999-11-13 03:24:38 +03:00
|
|
|
* note that pg can't be PG_RELEASED since we did not drop
|
|
|
|
* the object lock since the last time we checked.
|
|
|
|
*/
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1999-11-13 03:24:38 +03:00
|
|
|
pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
|
|
|
|
UVM_PAGE_OWN(pg, NULL);
|
2001-01-23 04:56:16 +03:00
|
|
|
uvmfault_unlockall(&ufi, amap, uobj, anon);
|
2005-04-12 17:11:45 +04:00
|
|
|
if (!uvm_reclaimable()) {
|
1999-11-13 03:24:38 +03:00
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
"<- failed. out of VM",0,0,0,0);
|
|
|
|
/* XXX instrumentation */
|
2006-01-21 16:13:07 +03:00
|
|
|
error = ENOMEM;
|
|
|
|
goto done;
|
1999-11-13 03:24:38 +03:00
|
|
|
}
|
|
|
|
/* XXX instrumentation */
|
|
|
|
uvm_wait("flt_pmfail2");
|
|
|
|
goto ReFault;
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
uvm_lock_pageq();
|
introduce a new UVM fault type, VM_FAULT_WIREMAX. this is different
from VM_FAULT_WIRE in that when the pages being wired are faulted in,
the simulated fault is at the maximum protection allowed for the mapping
instead of the current protection. use this in uvm_map_pageable{,_all}()
to fix the problem where writing via ptrace() to shared libraries that
are also mapped with wired mappings in another process causes a
diagnostic panic when the wired mapping is removed.
this is a really obscure problem so it deserves some more explanation.
ptrace() writing to another process ends up down in uvm_map_extract(),
which for MAP_PRIVATE mappings (such as shared libraries) will cause
the amap to be copied or created. then the amap is made shared
(ie. the AMAP_SHARED flag is set) between the kernel and the ptrace()d
process so that the kernel can modify pages in the amap and have the
ptrace()d process see the changes. then when the page being modified
is actually faulted on, the object pages (from the shared library vnode)
is copied to a new anon page and inserted into the shared amap.
to make all the processes sharing the amap actually see the new anon
page instead of the vnode page that was there before, we need to
invalidate all the pmap-level mappings of the vnode page in the pmaps
of the processes sharing the amap, but we don't have a good way of
doing this. the amap doesn't keep track of the vm_maps which map it.
so all we can do at this point is to remove all the mappings of the
page with pmap_page_protect(), but this has the unfortunate side-effect
of removing wired mappings as well. removing wired mappings with
pmap_page_protect() is a legitimate operation, it can happen when a file
with a wired mapping is truncated. so the pmap has no way of knowing
whether a request to remove a wired mapping is normal or when it's due to
this weird situation. so the pmap has to remove the weird mapping.
the process being ptrace()d goes away and life continues. then,
much later when we go to unwire or remove the wired vm_map mapping,
we discover that the pmap mapping has been removed when it should
still be there, and we panic.
so where did we go wrong? the problem is that we don't have any way
to update just the pmap mappings that need to be updated in this
scenario. we could invent a mechanism to do this, but that is much
more complicated than this change and it doesn't seem like the right
way to go in the long run either.
the real underlying problem here is that wired pmap mappings just
aren't a good concept. one of the original properties of the pmap
design was supposed to be that all the information in the pmap could
be thrown away at any time and the VM system could regenerate it all
through fault processing, but wired pmap mappings don't allow that.
a better design for UVM would not require wired pmap mappings,
and Chuck C. and I are talking about this, but it won't be done
anytime soon, so this change will do for now.
this change has the effect of causing MAP_PRIVATE mappings to be
copied to anonymous memory when they are mlock()d, so that uvm_fault()
doesn't need to copy these pages later when called from ptrace(), thus
avoiding the call to pmap_page_protect() and the panic that results
from this when the mlock()d region is unlocked or freed. note that
this change doesn't help the case where the wired mapping is MAP_SHARED.
discussed at great length with Chuck Cranor.
fixes PRs 10363, 12554, 12604, 13041, 13487, 14580 and 14853.
2002-01-01 01:34:39 +03:00
|
|
|
if (wire_fault) {
|
1998-03-23 00:29:30 +03:00
|
|
|
uvm_pagewire(pg);
|
1999-03-26 20:34:15 +03:00
|
|
|
if (pg->pqflags & PQ_AOBJ) {
|
1999-05-19 10:14:15 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* since the now-wired page cannot be paged out,
|
|
|
|
* release its swap resources for others to use.
|
|
|
|
* since an aobj page with no swap cannot be PG_CLEAN,
|
|
|
|
* clear its clean flag now.
|
|
|
|
*/
|
|
|
|
|
|
|
|
pg->flags &= ~(PG_CLEAN);
|
1999-03-26 20:34:15 +03:00
|
|
|
uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
|
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
} else {
|
|
|
|
uvm_pageactivate(pg);
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
uvm_unlock_pageq();
|
1998-03-09 03:58:55 +03:00
|
|
|
if (pg->flags & PG_WANTED)
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
wakeup(pg);
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
/*
|
|
|
|
* note that pg can't be PG_RELEASED since we did not drop the object
|
1998-03-09 03:58:55 +03:00
|
|
|
* lock since the last time we checked.
|
|
|
|
*/
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
|
|
|
|
UVM_PAGE_OWN(pg, NULL);
|
2001-01-23 04:56:16 +03:00
|
|
|
uvmfault_unlockall(&ufi, amap, uobj, anon);
|
2001-09-11 01:19:08 +04:00
|
|
|
pmap_update(ufi.orig_map->pmap);
|
1998-03-09 03:58:55 +03:00
|
|
|
UVMHIST_LOG(maphist, "<- done (SUCCESS!)",0,0,0,0);
|
2006-01-21 16:13:07 +03:00
|
|
|
error = 0;
|
|
|
|
done:
|
|
|
|
if (anon_spare != NULL) {
|
|
|
|
anon_spare->an_ref--;
|
|
|
|
uvm_anfree(anon_spare);
|
|
|
|
}
|
|
|
|
return error;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_fault_wire: wire down a range of virtual addresses in a map.
|
|
|
|
*
|
1999-06-17 02:11:23 +04:00
|
|
|
* => map may be read-locked by caller, but MUST NOT be write-locked.
|
|
|
|
* => if map is read-locked, any operations which may cause map to
|
|
|
|
* be write-locked in uvm_fault() must be taken care of by
|
|
|
|
* the caller. See uvm_map_pageable().
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
int
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_fault_wire(struct vm_map *map, vaddr_t start, vaddr_t end,
|
|
|
|
vm_fault_t fault_type, vm_prot_t access_type)
|
1998-03-09 03:58:55 +03:00
|
|
|
{
|
1998-08-13 06:10:37 +04:00
|
|
|
vaddr_t va;
|
2001-03-15 09:10:32 +03:00
|
|
|
int error;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
2000-01-11 09:57:49 +03:00
|
|
|
* now fault it in a page at a time. if the fault fails then we have
|
2001-05-25 08:06:11 +04:00
|
|
|
* to undo what we have done. note that in uvm_fault VM_PROT_NONE
|
2000-01-11 09:57:49 +03:00
|
|
|
* is replaced with the max protection if fault_type is VM_FAULT_WIRE.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
2001-06-14 09:12:56 +04:00
|
|
|
/*
|
|
|
|
* XXX work around overflowing a vaddr_t. this prevents us from
|
|
|
|
* wiring the last page in the address space, though.
|
|
|
|
*/
|
|
|
|
if (start > end) {
|
|
|
|
return EFAULT;
|
|
|
|
}
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
for (va = start ; va < end ; va += PAGE_SIZE) {
|
introduce a new UVM fault type, VM_FAULT_WIREMAX. this is different
from VM_FAULT_WIRE in that when the pages being wired are faulted in,
the simulated fault is at the maximum protection allowed for the mapping
instead of the current protection. use this in uvm_map_pageable{,_all}()
to fix the problem where writing via ptrace() to shared libraries that
are also mapped with wired mappings in another process causes a
diagnostic panic when the wired mapping is removed.
this is a really obscure problem so it deserves some more explanation.
ptrace() writing to another process ends up down in uvm_map_extract(),
which for MAP_PRIVATE mappings (such as shared libraries) will cause
the amap to be copied or created. then the amap is made shared
(ie. the AMAP_SHARED flag is set) between the kernel and the ptrace()d
process so that the kernel can modify pages in the amap and have the
ptrace()d process see the changes. then when the page being modified
is actually faulted on, the object pages (from the shared library vnode)
is copied to a new anon page and inserted into the shared amap.
to make all the processes sharing the amap actually see the new anon
page instead of the vnode page that was there before, we need to
invalidate all the pmap-level mappings of the vnode page in the pmaps
of the processes sharing the amap, but we don't have a good way of
doing this. the amap doesn't keep track of the vm_maps which map it.
so all we can do at this point is to remove all the mappings of the
page with pmap_page_protect(), but this has the unfortunate side-effect
of removing wired mappings as well. removing wired mappings with
pmap_page_protect() is a legitimate operation, it can happen when a file
with a wired mapping is truncated. so the pmap has no way of knowing
whether a request to remove a wired mapping is normal or when it's due to
this weird situation. so the pmap has to remove the weird mapping.
the process being ptrace()d goes away and life continues. then,
much later when we go to unwire or remove the wired vm_map mapping,
we discover that the pmap mapping has been removed when it should
still be there, and we panic.
so where did we go wrong? the problem is that we don't have any way
to update just the pmap mappings that need to be updated in this
scenario. we could invent a mechanism to do this, but that is much
more complicated than this change and it doesn't seem like the right
way to go in the long run either.
the real underlying problem here is that wired pmap mappings just
aren't a good concept. one of the original properties of the pmap
design was supposed to be that all the information in the pmap could
be thrown away at any time and the VM system could regenerate it all
through fault processing, but wired pmap mappings don't allow that.
a better design for UVM would not require wired pmap mappings,
and Chuck C. and I are talking about this, but it won't be done
anytime soon, so this change will do for now.
this change has the effect of causing MAP_PRIVATE mappings to be
copied to anonymous memory when they are mlock()d, so that uvm_fault()
doesn't need to copy these pages later when called from ptrace(), thus
avoiding the call to pmap_page_protect() and the panic that results
from this when the mlock()d region is unlocked or freed. note that
this change doesn't help the case where the wired mapping is MAP_SHARED.
discussed at great length with Chuck Cranor.
fixes PRs 10363, 12554, 12604, 13041, 13487, 14580 and 14853.
2002-01-01 01:34:39 +03:00
|
|
|
error = uvm_fault(map, va, fault_type, access_type);
|
2001-03-15 09:10:32 +03:00
|
|
|
if (error) {
|
1998-03-09 03:58:55 +03:00
|
|
|
if (va != start) {
|
1999-05-29 00:49:51 +04:00
|
|
|
uvm_fault_unwire(map, start, va);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
2001-03-15 09:10:32 +03:00
|
|
|
return error;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
}
|
2001-03-15 09:10:32 +03:00
|
|
|
return 0;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_fault_unwire(): unwire range of virtual space.
|
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_fault_unwire(struct vm_map *map, vaddr_t start, vaddr_t end)
|
1999-06-17 02:11:23 +04:00
|
|
|
{
|
|
|
|
vm_map_lock_read(map);
|
|
|
|
uvm_fault_unwire_locked(map, start, end);
|
|
|
|
vm_map_unlock_read(map);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_fault_unwire_locked(): the guts of uvm_fault_unwire().
|
|
|
|
*
|
|
|
|
* => map must be at least read-locked.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_fault_unwire_locked(struct vm_map *map, vaddr_t start, vaddr_t end)
|
1998-03-09 03:58:55 +03:00
|
|
|
{
|
2001-06-02 22:09:08 +04:00
|
|
|
struct vm_map_entry *entry;
|
1999-05-29 00:49:51 +04:00
|
|
|
pmap_t pmap = vm_map_pmap(map);
|
1999-07-11 21:47:12 +04:00
|
|
|
vaddr_t va;
|
1998-08-13 06:10:37 +04:00
|
|
|
paddr_t pa;
|
1999-07-11 21:47:12 +04:00
|
|
|
struct vm_page *pg;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
|
1999-05-29 00:49:51 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* we assume that the area we are unwiring has actually been wired
|
|
|
|
* in the first place. this means that we should be able to extract
|
|
|
|
* the PAs from the pmap. we also lock out the page daemon so that
|
|
|
|
* we can call uvm_pageunwire.
|
|
|
|
*/
|
1999-06-17 03:02:40 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_lock_pageq();
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1999-06-17 03:02:40 +04:00
|
|
|
/*
|
|
|
|
* find the beginning map entry for the region.
|
|
|
|
*/
|
2002-01-02 04:10:36 +03:00
|
|
|
|
2001-02-19 00:19:08 +03:00
|
|
|
KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
|
1999-06-17 03:02:40 +04:00
|
|
|
if (uvm_map_lookup_entry(map, start, &entry) == FALSE)
|
|
|
|
panic("uvm_fault_unwire_locked: address not in map");
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
for (va = start; va < end; va += PAGE_SIZE) {
|
1999-07-11 21:47:12 +04:00
|
|
|
if (pmap_extract(pmap, va, &pa) == FALSE)
|
2002-01-02 04:10:36 +03:00
|
|
|
continue;
|
1999-07-11 21:47:12 +04:00
|
|
|
|
|
|
|
/*
|
2002-01-02 04:10:36 +03:00
|
|
|
* find the map entry for the current address.
|
1999-07-11 21:47:12 +04:00
|
|
|
*/
|
2001-02-19 00:19:08 +03:00
|
|
|
|
|
|
|
KASSERT(va >= entry->start);
|
2002-01-02 04:10:36 +03:00
|
|
|
while (va >= entry->end) {
|
2001-02-19 00:19:08 +03:00
|
|
|
KASSERT(entry->next != &map->header &&
|
|
|
|
entry->next->start <= entry->end);
|
1999-07-11 21:47:12 +04:00
|
|
|
entry = entry->next;
|
|
|
|
}
|
1999-06-17 03:02:40 +04:00
|
|
|
|
1999-07-11 21:47:12 +04:00
|
|
|
/*
|
|
|
|
* if the entry is no longer wired, tell the pmap.
|
|
|
|
*/
|
2002-01-02 04:10:36 +03:00
|
|
|
|
1999-07-11 21:47:12 +04:00
|
|
|
if (VM_MAPENT_ISWIRED(entry) == 0)
|
|
|
|
pmap_unwire(pmap, va);
|
1999-07-11 01:46:56 +04:00
|
|
|
|
1999-07-11 21:47:12 +04:00
|
|
|
pg = PHYS_TO_VM_PAGE(pa);
|
|
|
|
if (pg)
|
|
|
|
uvm_pageunwire(pg);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
uvm_unlock_pageq();
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|