2020-10-25 03:05:26 +03:00
|
|
|
/* $NetBSD: uvm_anon.c,v 1.80 2020/10/25 00:05:26 chs Exp $ */
|
1999-01-25 02:53:14 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_anon.c: uvm anon ops
|
|
|
|
*/
|
|
|
|
|
2001-11-10 10:36:59 +03:00
|
|
|
#include <sys/cdefs.h>
|
2020-10-25 03:05:26 +03:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.80 2020/10/25 00:05:26 chs Exp $");
|
2001-11-10 10:36:59 +03:00
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
#include "opt_uvmhist.h"
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/pool.h>
|
2000-01-11 09:57:49 +03:00
|
|
|
#include <sys/kernel.h>
|
2019-12-02 02:14:47 +03:00
|
|
|
#include <sys/atomic.h>
|
1999-01-25 02:53:14 +03:00
|
|
|
|
|
|
|
#include <uvm/uvm.h>
|
|
|
|
#include <uvm/uvm_swap.h>
|
2006-09-15 19:51:12 +04:00
|
|
|
#include <uvm/uvm_pdpolicy.h>
|
1999-01-25 02:53:14 +03:00
|
|
|
|
2011-06-17 06:12:35 +04:00
|
|
|
static struct pool_cache uvm_anon_cache;
|
2000-01-11 09:57:49 +03:00
|
|
|
|
2011-06-17 06:12:35 +04:00
|
|
|
static int uvm_anon_ctor(void *, void *, int);
|
2000-01-11 09:57:49 +03:00
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_anon_init(void)
|
1999-01-25 02:53:14 +03:00
|
|
|
{
|
2000-01-11 09:57:49 +03:00
|
|
|
|
2007-12-21 02:50:00 +03:00
|
|
|
pool_cache_bootstrap(&uvm_anon_cache, sizeof(struct vm_anon), 0, 0,
|
|
|
|
PR_LARGECACHE, "anonpl", NULL, IPL_NONE, uvm_anon_ctor,
|
2011-06-17 06:12:35 +04:00
|
|
|
NULL, NULL);
|
1999-01-25 02:53:14 +03:00
|
|
|
}
|
|
|
|
|
2005-05-11 17:02:25 +04:00
|
|
|
static int
|
2006-11-01 13:17:58 +03:00
|
|
|
uvm_anon_ctor(void *arg, void *object, int flags)
|
1999-01-25 02:53:14 +03:00
|
|
|
{
|
2005-05-11 17:02:25 +04:00
|
|
|
struct vm_anon *anon = object;
|
1999-01-25 02:53:14 +03:00
|
|
|
|
2005-05-11 17:02:25 +04:00
|
|
|
anon->an_ref = 0;
|
2011-08-06 21:25:03 +04:00
|
|
|
anon->an_lock = NULL;
|
2005-05-11 17:02:25 +04:00
|
|
|
anon->an_page = NULL;
|
2005-09-14 02:00:05 +04:00
|
|
|
#if defined(VMSWAP)
|
2005-05-11 17:02:25 +04:00
|
|
|
anon->an_swslot = 0;
|
2011-06-17 06:12:35 +04:00
|
|
|
#endif
|
2000-12-27 12:17:04 +03:00
|
|
|
return 0;
|
1999-01-25 02:53:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-06-17 06:12:35 +04:00
|
|
|
* uvm_analloc: allocate a new anon.
|
2001-01-23 05:27:39 +03:00
|
|
|
*
|
2011-06-17 06:12:35 +04:00
|
|
|
* => anon will have no lock associated.
|
1999-01-25 02:53:14 +03:00
|
|
|
*/
|
|
|
|
struct vm_anon *
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_analloc(void)
|
1999-01-25 02:53:14 +03:00
|
|
|
{
|
2005-05-11 17:02:25 +04:00
|
|
|
struct vm_anon *anon;
|
|
|
|
|
2007-11-07 03:23:13 +03:00
|
|
|
anon = pool_cache_get(&uvm_anon_cache, PR_NOWAIT);
|
2005-05-11 17:02:25 +04:00
|
|
|
if (anon) {
|
|
|
|
KASSERT(anon->an_ref == 0);
|
2011-08-06 21:25:03 +04:00
|
|
|
KASSERT(anon->an_lock == NULL);
|
2005-05-11 17:02:25 +04:00
|
|
|
KASSERT(anon->an_page == NULL);
|
2005-09-14 02:00:05 +04:00
|
|
|
#if defined(VMSWAP)
|
2005-05-11 17:02:25 +04:00
|
|
|
KASSERT(anon->an_swslot == 0);
|
2011-06-17 06:12:35 +04:00
|
|
|
#endif
|
2005-05-11 17:02:25 +04:00
|
|
|
anon->an_ref = 1;
|
1999-01-25 02:53:14 +03:00
|
|
|
}
|
2005-05-11 17:02:25 +04:00
|
|
|
return anon;
|
1999-01-25 02:53:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-03-20 22:08:54 +03:00
|
|
|
* uvm_anfree: free a single anon structure
|
1999-01-25 02:53:14 +03:00
|
|
|
*
|
2011-06-17 06:12:35 +04:00
|
|
|
* => anon must be removed from the amap (if anon was in an amap).
|
2020-03-20 22:08:54 +03:00
|
|
|
* => amap must be locked, if anon was owned by amap.
|
|
|
|
* => we may drop and re-acquire the lock here (to break loans).
|
1999-01-25 02:53:14 +03:00
|
|
|
*/
|
2020-03-20 22:08:54 +03:00
|
|
|
void
|
|
|
|
uvm_anfree(struct vm_anon *anon)
|
1999-01-25 02:53:14 +03:00
|
|
|
{
|
2020-03-20 22:08:54 +03:00
|
|
|
struct vm_page *pg = anon->an_page, *pg2 __diagused;
|
2011-06-17 06:12:35 +04:00
|
|
|
|
2020-07-09 08:57:15 +03:00
|
|
|
UVMHIST_FUNC(__func__);
|
|
|
|
UVMHIST_CALLARGS(maphist,"(anon=%#jx)", (uintptr_t)anon, 0,0,0);
|
1999-01-25 02:53:14 +03:00
|
|
|
|
2020-03-20 22:08:54 +03:00
|
|
|
KASSERT(anon->an_lock == NULL || rw_write_held(anon->an_lock));
|
|
|
|
KASSERT(anon->an_ref == 0);
|
2001-01-23 04:56:16 +03:00
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
/*
|
2020-03-20 22:08:54 +03:00
|
|
|
* Dispose of the page, if it is resident.
|
1999-01-25 02:53:14 +03:00
|
|
|
*/
|
|
|
|
|
2020-03-20 22:08:54 +03:00
|
|
|
if (__predict_true(pg != NULL)) {
|
2011-06-12 07:35:36 +04:00
|
|
|
KASSERT(anon->an_lock != NULL);
|
1999-01-25 02:53:14 +03:00
|
|
|
|
2019-12-01 20:02:50 +03:00
|
|
|
/*
|
|
|
|
* If there is a resident page and it is loaned, then anon
|
|
|
|
* may not own it. Call out to uvm_anon_lockloanpg() to
|
|
|
|
* identify and lock the real owner of the page.
|
|
|
|
*/
|
|
|
|
|
2020-03-20 22:08:54 +03:00
|
|
|
if (__predict_false(pg->loan_count != 0)) {
|
|
|
|
pg2 = uvm_anon_lockloanpg(anon);
|
|
|
|
KASSERT(pg2 == pg);
|
2019-12-01 20:02:50 +03:00
|
|
|
}
|
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
/*
|
2011-06-17 06:12:35 +04:00
|
|
|
* If the page is owned by a UVM object (now locked),
|
|
|
|
* then kill the loan on the page rather than free it,
|
|
|
|
* and release the object lock.
|
1999-01-25 02:53:14 +03:00
|
|
|
*/
|
|
|
|
|
2020-03-20 22:08:54 +03:00
|
|
|
if (__predict_false(pg->uobject != NULL)) {
|
2019-12-13 23:10:21 +03:00
|
|
|
mutex_enter(&pg->interlock);
|
2000-11-25 09:27:59 +03:00
|
|
|
KASSERT(pg->loan_count > 0);
|
1999-01-25 02:53:14 +03:00
|
|
|
pg->loan_count--;
|
|
|
|
pg->uanon = NULL;
|
2019-12-13 23:10:21 +03:00
|
|
|
mutex_exit(&pg->interlock);
|
2020-02-23 18:46:38 +03:00
|
|
|
rw_exit(pg->uobject->vmobjlock);
|
1999-01-25 02:53:14 +03:00
|
|
|
} else {
|
|
|
|
|
|
|
|
/*
|
2011-06-17 06:12:35 +04:00
|
|
|
* If page has no UVM object, then anon is the owner,
|
|
|
|
* and it is already locked.
|
1999-01-25 02:53:14 +03:00
|
|
|
*/
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
KASSERT((pg->flags & PG_RELEASED) == 0);
|
1999-09-12 05:16:55 +04:00
|
|
|
pmap_page_protect(pg, VM_PROT_NONE);
|
2004-05-05 15:54:32 +04:00
|
|
|
|
|
|
|
/*
|
2011-06-17 06:12:35 +04:00
|
|
|
* If the page is busy, mark it as PG_RELEASED, so
|
|
|
|
* that uvm_anon_release(9) would release it later.
|
2004-05-05 15:54:32 +04:00
|
|
|
*/
|
|
|
|
|
2020-03-20 22:08:54 +03:00
|
|
|
if (__predict_false((pg->flags & PG_BUSY) != 0)) {
|
2004-05-05 15:54:32 +04:00
|
|
|
pg->flags |= PG_RELEASED;
|
2020-02-23 18:46:38 +03:00
|
|
|
rw_obj_hold(anon->an_lock);
|
2020-03-20 22:08:54 +03:00
|
|
|
return;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
2019-12-13 23:10:21 +03:00
|
|
|
uvm_pagefree(pg);
|
2020-02-24 15:38:57 +03:00
|
|
|
UVMHIST_LOG(maphist, "anon %#jx, page %#jx: "
|
2019-12-13 23:10:21 +03:00
|
|
|
"freed now!", (uintptr_t)anon, (uintptr_t)pg,
|
|
|
|
0, 0);
|
1999-01-25 02:53:14 +03:00
|
|
|
}
|
2020-03-20 22:08:54 +03:00
|
|
|
} else {
|
2005-09-14 02:00:05 +04:00
|
|
|
#if defined(VMSWAP)
|
2020-03-20 22:08:54 +03:00
|
|
|
if (anon->an_swslot > 0) {
|
|
|
|
/* This page is no longer only in swap. */
|
|
|
|
KASSERT(uvmexp.swpgonly > 0);
|
|
|
|
atomic_dec_uint(&uvmexp.swpgonly);
|
|
|
|
}
|
2011-06-17 06:12:35 +04:00
|
|
|
#endif
|
2020-03-20 22:08:54 +03:00
|
|
|
}
|
|
|
|
anon->an_lock = NULL;
|
1999-01-25 02:53:14 +03:00
|
|
|
|
2019-12-13 23:10:21 +03:00
|
|
|
/*
|
|
|
|
* Free any swap resources, leave a page replacement hint.
|
|
|
|
*/
|
|
|
|
|
|
|
|
uvm_anon_dropswap(anon);
|
|
|
|
uvmpdpol_anfree(anon);
|
2011-08-06 21:25:03 +04:00
|
|
|
UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
|
2007-11-07 03:23:13 +03:00
|
|
|
pool_cache_put(&uvm_anon_cache, anon);
|
1999-01-25 02:53:14 +03:00
|
|
|
}
|
|
|
|
|
1999-03-26 20:34:15 +03:00
|
|
|
/*
|
2011-06-17 06:12:35 +04:00
|
|
|
* uvm_anon_lockloanpg: given a locked anon, lock its resident page owner.
|
1999-01-25 02:53:14 +03:00
|
|
|
*
|
|
|
|
* => anon is locked by caller
|
|
|
|
* => on return: anon is locked
|
|
|
|
* if there is a resident page:
|
|
|
|
* if it has a uobject, it is locked by us
|
|
|
|
* if it is ownerless, we take over as owner
|
|
|
|
* we return the resident page (it can change during
|
|
|
|
* this function)
|
|
|
|
* => note that the only time an anon has an ownerless resident page
|
|
|
|
* is if the page was loaned from a uvm_object and the uvm_object
|
|
|
|
* disowned it
|
|
|
|
* => this only needs to be called when you want to do an operation
|
|
|
|
* on an anon's resident page and that page has a non-zero loan
|
|
|
|
* count.
|
|
|
|
*/
|
|
|
|
struct vm_page *
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_anon_lockloanpg(struct vm_anon *anon)
|
1999-01-25 02:53:14 +03:00
|
|
|
{
|
|
|
|
struct vm_page *pg;
|
2020-02-23 18:46:38 +03:00
|
|
|
krw_t op;
|
1999-01-25 02:53:14 +03:00
|
|
|
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(rw_lock_held(anon->an_lock));
|
2001-01-23 04:56:16 +03:00
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
/*
|
|
|
|
* loop while we have a resident page that has a non-zero loan count.
|
|
|
|
* if we successfully get our lock, we will "break" the loop.
|
|
|
|
* note that the test for pg->loan_count is not protected -- this
|
|
|
|
* may produce false positive results. note that a false positive
|
|
|
|
* result may cause us to do more work than we need to, but it will
|
|
|
|
* not produce an incorrect result.
|
|
|
|
*/
|
|
|
|
|
2005-05-11 17:02:25 +04:00
|
|
|
while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) {
|
2019-12-13 23:10:21 +03:00
|
|
|
mutex_enter(&pg->interlock);
|
1999-01-25 02:53:14 +03:00
|
|
|
if (pg->uobject) {
|
|
|
|
/*
|
|
|
|
* if we didn't get a lock (try lock failed), then we
|
|
|
|
* toggle our anon lock and try again
|
|
|
|
*/
|
|
|
|
|
2020-02-23 18:46:38 +03:00
|
|
|
if (!rw_tryenter(pg->uobject->vmobjlock, RW_WRITER)) {
|
1999-01-25 02:53:14 +03:00
|
|
|
/*
|
|
|
|
* someone locking the object has a chance to
|
|
|
|
* lock us right now
|
2020-07-08 16:26:22 +03:00
|
|
|
*
|
2011-06-12 07:35:36 +04:00
|
|
|
* XXX Better than yielding but inadequate.
|
1999-01-25 02:53:14 +03:00
|
|
|
*/
|
2019-12-13 23:10:21 +03:00
|
|
|
mutex_exit(&pg->interlock);
|
2020-02-24 00:36:29 +03:00
|
|
|
op = rw_lock_op(anon->an_lock);
|
2020-02-23 18:46:38 +03:00
|
|
|
rw_exit(anon->an_lock);
|
|
|
|
kpause("lkloanpg", false, 1, NULL);
|
|
|
|
rw_enter(anon->an_lock, op);
|
2000-11-25 09:27:59 +03:00
|
|
|
continue;
|
1999-01-25 02:53:14 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-06-17 06:12:35 +04:00
|
|
|
* If page is un-owned i.e. the object dropped its ownership,
|
|
|
|
* then we have to take the ownership.
|
1999-01-25 02:53:14 +03:00
|
|
|
*/
|
|
|
|
|
2019-12-13 23:10:21 +03:00
|
|
|
if (pg->uobject == NULL && (pg->flags & PG_ANON) == 0) {
|
|
|
|
pg->flags |= PG_ANON;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
pg->loan_count--;
|
1999-01-25 02:53:14 +03:00
|
|
|
}
|
2019-12-13 23:10:21 +03:00
|
|
|
mutex_exit(&pg->interlock);
|
1999-01-25 02:53:14 +03:00
|
|
|
break;
|
|
|
|
}
|
2011-06-17 06:12:35 +04:00
|
|
|
return pg;
|
1999-01-25 02:53:14 +03:00
|
|
|
}
|
2000-01-11 09:57:49 +03:00
|
|
|
|
2005-09-14 02:00:05 +04:00
|
|
|
#if defined(VMSWAP)
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
/*
|
2011-06-17 06:12:35 +04:00
|
|
|
* uvm_anon_pagein: fetch an anon's page.
|
2000-01-11 09:57:49 +03:00
|
|
|
*
|
|
|
|
* => anon must be locked, and is unlocked upon return.
|
2007-02-22 09:05:00 +03:00
|
|
|
* => returns true if pagein was aborted due to lack of memory.
|
2000-01-11 09:57:49 +03:00
|
|
|
*/
|
|
|
|
|
2007-02-22 01:59:35 +03:00
|
|
|
bool
|
2011-06-24 05:39:22 +04:00
|
|
|
uvm_anon_pagein(struct vm_amap *amap, struct vm_anon *anon)
|
2000-01-11 09:57:49 +03:00
|
|
|
{
|
|
|
|
struct vm_page *pg;
|
|
|
|
struct uvm_object *uobj;
|
2000-08-06 03:40:55 +04:00
|
|
|
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(rw_write_held(anon->an_lock));
|
2011-06-24 05:39:22 +04:00
|
|
|
KASSERT(anon->an_lock == amap->am_lock);
|
2001-01-23 04:56:16 +03:00
|
|
|
|
2000-08-06 03:40:55 +04:00
|
|
|
/*
|
2011-06-17 06:12:35 +04:00
|
|
|
* Get the page of the anon.
|
2000-08-06 03:40:55 +04:00
|
|
|
*/
|
2000-01-11 09:57:49 +03:00
|
|
|
|
2011-06-24 05:39:22 +04:00
|
|
|
switch (uvmfault_anonget(NULL, amap, anon)) {
|
2001-03-11 01:46:45 +03:00
|
|
|
case 0:
|
2011-06-17 06:12:35 +04:00
|
|
|
/* Success - we have the page. */
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(rw_write_held(anon->an_lock));
|
2000-01-11 09:57:49 +03:00
|
|
|
break;
|
2001-03-11 01:46:45 +03:00
|
|
|
case EIO:
|
|
|
|
case ERESTART:
|
2000-01-11 09:57:49 +03:00
|
|
|
/*
|
2011-06-17 06:12:35 +04:00
|
|
|
* Nothing more to do on errors. ERESTART means that the
|
|
|
|
* anon was freed.
|
2000-01-11 09:57:49 +03:00
|
|
|
*/
|
2007-02-22 09:05:00 +03:00
|
|
|
return false;
|
2020-03-22 21:32:41 +03:00
|
|
|
case ENOLCK:
|
|
|
|
panic("uvm_anon_pagein");
|
2003-08-11 20:54:10 +04:00
|
|
|
default:
|
2007-02-22 09:05:00 +03:00
|
|
|
return true;
|
2000-01-11 09:57:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-03-14 23:23:51 +03:00
|
|
|
* Mark the page as dirty and clear its swslot.
|
2000-01-11 09:57:49 +03:00
|
|
|
*/
|
|
|
|
|
2005-05-11 17:02:25 +04:00
|
|
|
pg = anon->an_page;
|
2000-01-11 09:57:49 +03:00
|
|
|
uobj = pg->uobject;
|
2011-06-17 06:12:35 +04:00
|
|
|
if (anon->an_swslot > 0) {
|
2003-08-11 20:44:35 +04:00
|
|
|
uvm_swap_free(anon->an_swslot, 1);
|
2011-06-17 06:12:35 +04:00
|
|
|
}
|
2000-01-11 09:57:49 +03:00
|
|
|
anon->an_swslot = 0;
|
2020-01-15 20:55:43 +03:00
|
|
|
uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
|
2000-01-11 09:57:49 +03:00
|
|
|
|
|
|
|
/*
|
2011-06-17 06:12:35 +04:00
|
|
|
* Deactivate the page (to put it on a page queue).
|
2000-01-11 09:57:49 +03:00
|
|
|
*/
|
|
|
|
|
2020-01-01 01:42:50 +03:00
|
|
|
uvm_pagelock(pg);
|
2019-12-13 23:10:21 +03:00
|
|
|
uvm_pagedeactivate(pg);
|
2020-01-01 01:42:50 +03:00
|
|
|
uvm_pageunlock(pg);
|
2020-02-23 18:46:38 +03:00
|
|
|
rw_exit(anon->an_lock);
|
2000-01-11 09:57:49 +03:00
|
|
|
if (uobj) {
|
2020-02-23 18:46:38 +03:00
|
|
|
rw_exit(uobj->vmobjlock);
|
2000-01-11 09:57:49 +03:00
|
|
|
}
|
2007-02-22 09:05:00 +03:00
|
|
|
return false;
|
2000-01-11 09:57:49 +03:00
|
|
|
}
|
2004-05-05 15:54:32 +04:00
|
|
|
|
2011-06-17 06:12:35 +04:00
|
|
|
/*
|
|
|
|
* uvm_anon_dropswap: release any swap resources from this anon.
|
|
|
|
*
|
|
|
|
* => anon must be locked or have a reference count of 0.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_anon_dropswap(struct vm_anon *anon)
|
|
|
|
{
|
2020-07-09 08:57:15 +03:00
|
|
|
UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
|
2011-06-17 06:12:35 +04:00
|
|
|
|
|
|
|
if (anon->an_swslot == 0)
|
|
|
|
return;
|
|
|
|
|
2020-02-24 15:38:57 +03:00
|
|
|
UVMHIST_LOG(maphist,"freeing swap for anon %#jx, paged to swslot %#jx",
|
Update the kernhist(9) kernel history code to address issues identified
in PR kern/52639, as well as some general cleaning-up...
(As proposed on tech-kern@ with additional changes and enhancements.)
Details of changes:
* All history arguments are now stored as uintmax_t values[1], both in
the kernel and in the structures used for exporting the history data
to userland via sysctl(9). This avoids problems on some architectures
where passing a 64-bit (or larger) value to printf(3) can cause it to
process the value as multiple arguments. (This can be particularly
problematic when printf()'s format string is not a literal, since in
that case the compiler cannot know how large each argument should be.)
* Update the data structures used for exporting kernel history data to
include a version number as well as the length of history arguments.
* All [2] existing users of kernhist(9) have had their format strings
updated. Each format specifier now includes an explicit length
modifier 'j' to refer to numeric values of the size of uintmax_t.
* All [2] existing users of kernhist(9) have had their format strings
updated to replace uses of "%p" with "%#jx", and the pointer
arguments are now cast to (uintptr_t) before being subsequently cast
to (uintmax_t). This is needed to avoid compiler warnings about
casting "pointer to integer of a different size."
* All [2] existing users of kernhist(9) have had instances of "%s" or
"%c" format strings replaced with numeric formats; several instances
of mis-match between format string and argument list have been fixed.
* vmstat(1) has been modified to handle the new size of arguments in the
history data as exported by sysctl(9).
* vmstat(1) now provides a warning message if the history requested with
the -u option does not exist (previously, this condition was silently
ignored, with only a single blank line being printed).
* vmstat(1) now checks the version and argument length included in the
data exported via sysctl(9) and exits if they do not match the values
with which vmstat was built.
* The kernhist(9) man-page has been updated to note the additional
requirements imposed on the format strings, along with several other
minor changes and enhancements.
[1] It would have been possible to use an explicit length (for example,
uint64_t) for the history arguments. But that would require another
"rototill" of all the users in the future when we add support for an
architecture that supports a larger size. Also, the printf(3) format
specifiers for explicitly-sized values, such as "%"PRIu64, are much
more verbose (and less aesthetically appealing, IMHO) than simply
using "%ju".
[2] I've tried very hard to find "all [the] existing users of kernhist(9)"
but it is possible that I've missed some of them. I would be glad to
update any stragglers that anyone identifies.
2017-10-28 03:37:11 +03:00
|
|
|
(uintptr_t)anon, anon->an_swslot, 0, 0);
|
2011-06-17 06:12:35 +04:00
|
|
|
uvm_swap_free(anon->an_swslot, 1);
|
|
|
|
anon->an_swslot = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
2005-09-14 02:00:05 +04:00
|
|
|
|
2004-05-05 15:54:32 +04:00
|
|
|
/*
|
|
|
|
* uvm_anon_release: release an anon and its page.
|
|
|
|
*
|
2011-06-17 06:12:35 +04:00
|
|
|
* => anon should not have any references.
|
|
|
|
* => anon must be locked.
|
2004-05-05 15:54:32 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_anon_release(struct vm_anon *anon)
|
2004-05-05 15:54:32 +04:00
|
|
|
{
|
2005-05-11 17:02:25 +04:00
|
|
|
struct vm_page *pg = anon->an_page;
|
2020-03-20 22:08:54 +03:00
|
|
|
krwlock_t *lock;
|
2004-05-05 15:54:32 +04:00
|
|
|
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(rw_write_held(anon->an_lock));
|
2004-05-05 15:54:32 +04:00
|
|
|
KASSERT(pg != NULL);
|
|
|
|
KASSERT((pg->flags & PG_RELEASED) != 0);
|
|
|
|
KASSERT((pg->flags & PG_BUSY) != 0);
|
|
|
|
KASSERT(pg->uobject == NULL);
|
|
|
|
KASSERT(pg->uanon == anon);
|
|
|
|
KASSERT(pg->loan_count == 0);
|
|
|
|
KASSERT(anon->an_ref == 0);
|
|
|
|
|
2020-10-25 03:05:26 +03:00
|
|
|
if ((pg->flags & PG_PAGEOUT) != 0) {
|
|
|
|
pg->flags &= ~PG_PAGEOUT;
|
|
|
|
uvm_pageout_done(1);
|
|
|
|
}
|
|
|
|
|
2004-05-05 15:54:32 +04:00
|
|
|
uvm_pagefree(pg);
|
2005-05-11 17:02:25 +04:00
|
|
|
KASSERT(anon->an_page == NULL);
|
2020-03-20 22:08:54 +03:00
|
|
|
lock = anon->an_lock;
|
|
|
|
uvm_anfree(anon);
|
|
|
|
rw_exit(lock);
|
2011-08-06 21:25:03 +04:00
|
|
|
/* Note: extra reference is held for PG_RELEASED case. */
|
2020-03-20 22:08:54 +03:00
|
|
|
rw_obj_free(lock);
|
2004-05-05 15:54:32 +04:00
|
|
|
}
|