2003-02-01 09:23:35 +03:00
|
|
|
/* $NetBSD: uvm_page.c,v 1.83 2003/02/01 06:23:55 thorpej Exp $ */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
/*
|
1998-02-05 09:25:08 +03:00
|
|
|
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
2001-05-25 08:06:11 +04:00
|
|
|
* Copyright (c) 1991, 1993, The Regents of the University of California.
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* The Mach Operating System project at Carnegie-Mellon University.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by Charles D. Cranor,
|
2001-05-25 08:06:11 +04:00
|
|
|
* Washington University, the University of California, Berkeley and
|
1998-02-05 09:25:08 +03:00
|
|
|
* its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)vm_page.c 8.3 (Berkeley) 3/21/94
|
1998-02-07 14:07:38 +03:00
|
|
|
* from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
|
|
|
* All rights reserved.
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-02-05 09:25:08 +03:00
|
|
|
* Permission to use, copy, modify and distribute this software and
|
|
|
|
* its documentation is hereby granted, provided that both the copyright
|
|
|
|
* notice and this permission notice appear in all copies of the
|
|
|
|
* software, derivative works or modified versions, and any portions
|
|
|
|
* thereof, and that both notices appear in supporting documentation.
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
|
|
|
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
|
|
|
|
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
|
1998-02-05 09:25:08 +03:00
|
|
|
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-02-05 09:25:08 +03:00
|
|
|
* Carnegie Mellon requests users of this software to return to
|
|
|
|
*
|
|
|
|
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
|
|
|
|
* School of Computer Science
|
|
|
|
* Carnegie Mellon University
|
|
|
|
* Pittsburgh PA 15213-3890
|
|
|
|
*
|
|
|
|
* any improvements or extensions that they make and grant Carnegie the
|
|
|
|
* rights to redistribute these changes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_page.c: page ops.
|
|
|
|
*/
|
|
|
|
|
2001-11-10 10:36:59 +03:00
|
|
|
#include <sys/cdefs.h>
|
2003-02-01 09:23:35 +03:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.83 2003/02/01 06:23:55 thorpej Exp $");
|
2001-11-10 10:36:59 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
#include "opt_uvmhist.h"
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/malloc.h>
|
2000-05-27 01:19:19 +04:00
|
|
|
#include <sys/sched.h>
|
2000-11-27 11:39:39 +03:00
|
|
|
#include <sys/kernel.h>
|
2001-03-09 04:02:10 +03:00
|
|
|
#include <sys/vnode.h>
|
2001-09-28 15:59:51 +04:00
|
|
|
#include <sys/proc.h>
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
#define UVM_PAGE /* pull in uvm_page.h functions */
|
|
|
|
#include <uvm/uvm.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* global vars... XXXCDC: move to uvm. structure.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* physical memory config is stored in vm_physmem.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
|
|
|
|
int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
|
|
|
|
|
2000-04-24 21:12:00 +04:00
|
|
|
/*
|
2000-05-29 23:25:56 +04:00
|
|
|
* Some supported CPUs in a given architecture don't support all
|
|
|
|
* of the things necessary to do idle page zero'ing efficiently.
|
|
|
|
* We therefore provide a way to disable it from machdep code here.
|
2000-04-24 21:12:00 +04:00
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
/*
|
|
|
|
* XXX disabled until we can find a way to do this without causing
|
|
|
|
* problems for either cpu caches or DMA latency.
|
|
|
|
*/
|
|
|
|
boolean_t vm_page_zero_enable = FALSE;
|
2000-04-24 21:12:00 +04:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* local variables
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* these variables record the values returned by vm_page_bootstrap,
|
|
|
|
* for debugging purposes. The implementation of uvm_pageboot_alloc
|
|
|
|
* and pmap_startup here also uses them internally.
|
|
|
|
*/
|
|
|
|
|
1998-08-13 06:10:37 +04:00
|
|
|
static vaddr_t virtual_space_start;
|
|
|
|
static vaddr_t virtual_space_end;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* we use a hash table with only one bucket during bootup. we will
|
2000-02-13 06:34:40 +03:00
|
|
|
* later rehash (resize) the hash table once the allocator is ready.
|
|
|
|
* we static allocate the one bootstrap bucket below...
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
static struct pglist uvm_bootbucket;
|
|
|
|
|
2001-05-02 05:22:19 +04:00
|
|
|
/*
|
|
|
|
* we allocate an initial number of page colors in uvm_page_init(),
|
|
|
|
* and remember them. We may re-color pages as cache sizes are
|
|
|
|
* discovered during the autoconfiguration phase. But we can never
|
|
|
|
* free the initial set of buckets, since they are allocated using
|
|
|
|
* uvm_pageboot_alloc().
|
|
|
|
*/
|
|
|
|
|
|
|
|
static boolean_t have_recolored_pages /* = FALSE */;
|
|
|
|
|
2003-02-01 09:23:35 +03:00
|
|
|
MALLOC_DEFINE(M_VMPAGE, "VM page", "VM page");
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* local prototypes
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void uvm_pageinsert __P((struct vm_page *));
|
2000-11-27 11:39:39 +03:00
|
|
|
static void uvm_pageremove __P((struct vm_page *));
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* inline functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pageinsert: insert a page in the object and the hash table
|
|
|
|
*
|
|
|
|
* => caller must lock object
|
|
|
|
* => caller must lock page queues
|
|
|
|
* => call should have already set pg's object and offset pointers
|
|
|
|
* and bumped the version counter
|
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
__inline static void
|
|
|
|
uvm_pageinsert(pg)
|
|
|
|
struct vm_page *pg;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-03-09 03:58:55 +03:00
|
|
|
struct pglist *buck;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
struct uvm_object *uobj = pg->uobject;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-03-09 04:02:10 +03:00
|
|
|
KASSERT((pg->flags & PG_TABLED) == 0);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
buck = &uvm.page_hash[uvm_pagehash(uobj, pg->offset)];
|
1998-03-09 03:58:55 +03:00
|
|
|
simple_lock(&uvm.hashlock);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
TAILQ_INSERT_TAIL(buck, pg, hashq);
|
1998-03-09 03:58:55 +03:00
|
|
|
simple_unlock(&uvm.hashlock);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2002-06-20 19:05:29 +04:00
|
|
|
if (UVM_OBJ_IS_AOBJ(uobj)) {
|
|
|
|
uvmexp.anonpages++;
|
|
|
|
}
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
TAILQ_INSERT_TAIL(&uobj->memq, pg, listq);
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->flags |= PG_TABLED;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uobj->uo_npages++;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_page_remove: remove page from object and hash
|
|
|
|
*
|
|
|
|
* => caller must lock object
|
|
|
|
* => caller must lock page queues
|
|
|
|
*/
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
static __inline void
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_pageremove(pg)
|
|
|
|
struct vm_page *pg;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-03-09 03:58:55 +03:00
|
|
|
struct pglist *buck;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
struct uvm_object *uobj = pg->uobject;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
KASSERT(pg->flags & PG_TABLED);
|
2002-10-30 05:48:28 +03:00
|
|
|
buck = &uvm.page_hash[uvm_pagehash(uobj, pg->offset)];
|
1998-03-09 03:58:55 +03:00
|
|
|
simple_lock(&uvm.hashlock);
|
|
|
|
TAILQ_REMOVE(buck, pg, hashq);
|
|
|
|
simple_unlock(&uvm.hashlock);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
if (UVM_OBJ_IS_VTEXT(uobj)) {
|
2001-12-09 06:07:19 +03:00
|
|
|
uvmexp.execpages--;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
} else if (UVM_OBJ_IS_VNODE(uobj)) {
|
2001-12-09 06:07:19 +03:00
|
|
|
uvmexp.filepages--;
|
2002-06-20 19:05:29 +04:00
|
|
|
} else if (UVM_OBJ_IS_AOBJ(uobj)) {
|
|
|
|
uvmexp.anonpages--;
|
2001-03-09 04:02:10 +03:00
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* object should be locked */
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uobj->uo_npages--;
|
|
|
|
TAILQ_REMOVE(&uobj->memq, pg, listq);
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->flags &= ~PG_TABLED;
|
|
|
|
pg->uobject = NULL;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2001-05-02 05:22:19 +04:00
|
|
|
static void
|
|
|
|
uvm_page_init_buckets(struct pgfreelist *pgfl)
|
|
|
|
{
|
|
|
|
int color, i;
|
|
|
|
|
|
|
|
for (color = 0; color < uvmexp.ncolors; color++) {
|
|
|
|
for (i = 0; i < PGFL_NQUEUES; i++) {
|
|
|
|
TAILQ_INIT(&pgfl->pgfl_buckets[
|
|
|
|
color].pgfl_queues[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* uvm_page_init: init the page system. called from uvm_init().
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-02-05 09:25:08 +03:00
|
|
|
* => we return the range of kernel virtual memory in kvm_startp/kvm_endp
|
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
void
|
|
|
|
uvm_page_init(kvm_startp, kvm_endp)
|
1998-08-13 06:10:37 +04:00
|
|
|
vaddr_t *kvm_startp, *kvm_endp;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2001-05-02 05:22:19 +04:00
|
|
|
vsize_t freepages, pagecount, bucketcount, n;
|
|
|
|
struct pgflbucket *bucketarray;
|
2001-05-27 01:27:10 +04:00
|
|
|
struct vm_page *pagearray;
|
2002-11-09 23:06:07 +03:00
|
|
|
int lcv;
|
|
|
|
u_int i;
|
1998-08-13 06:10:37 +04:00
|
|
|
paddr_t paddr;
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
2001-05-02 05:22:19 +04:00
|
|
|
* init the page queues and page queue locks, except the free
|
|
|
|
* list; we allocate that later (with the initial vm_page
|
|
|
|
* structures).
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
2001-03-09 04:02:10 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
TAILQ_INIT(&uvm.page_active);
|
2001-05-22 04:44:44 +04:00
|
|
|
TAILQ_INIT(&uvm.page_inactive);
|
1998-03-09 03:58:55 +03:00
|
|
|
simple_lock_init(&uvm.pageqlock);
|
|
|
|
simple_lock_init(&uvm.fpageqlock);
|
|
|
|
|
|
|
|
/*
|
2001-03-09 04:02:10 +03:00
|
|
|
* init the <obj,offset> => <page> hash table. for now
|
|
|
|
* we just have one bucket (the bootstrap bucket). later on we
|
2000-02-13 06:34:40 +03:00
|
|
|
* will allocate new buckets as we dynamically resize the hash table.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
uvm.page_nhash = 1; /* 1 bucket */
|
2000-11-27 11:39:39 +03:00
|
|
|
uvm.page_hashmask = 0; /* mask for hash function */
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm.page_hash = &uvm_bootbucket; /* install bootstrap bucket */
|
|
|
|
TAILQ_INIT(uvm.page_hash); /* init hash table */
|
|
|
|
simple_lock_init(&uvm.hashlock); /* init hash table lock */
|
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
/*
|
2001-03-09 04:02:10 +03:00
|
|
|
* allocate vm_page structures.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sanity check:
|
|
|
|
* before calling this function the MD code is expected to register
|
|
|
|
* some free RAM with the uvm_page_physload() function. our job
|
|
|
|
* now is to allocate vm_page structures for this memory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (vm_nphysseg == 0)
|
2000-10-05 04:37:50 +04:00
|
|
|
panic("uvm_page_bootstrap: no memory pre-allocated");
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
2001-05-25 08:06:11 +04:00
|
|
|
* first calculate the number of free pages...
|
1998-03-09 03:58:55 +03:00
|
|
|
*
|
|
|
|
* note that we use start/end rather than avail_start/avail_end.
|
|
|
|
* this allows us to allocate extra vm_page structures in case we
|
|
|
|
* want to return some memory to the pool after booting.
|
|
|
|
*/
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
freepages = 0;
|
|
|
|
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
|
|
|
|
freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
|
|
|
|
|
2001-05-02 05:22:19 +04:00
|
|
|
/*
|
|
|
|
* Let MD code initialize the number of colors, or default
|
|
|
|
* to 1 color if MD code doesn't care.
|
|
|
|
*/
|
|
|
|
if (uvmexp.ncolors == 0)
|
|
|
|
uvmexp.ncolors = 1;
|
|
|
|
uvmexp.colormask = uvmexp.ncolors - 1;
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* we now know we have (PAGE_SIZE * freepages) bytes of memory we can
|
|
|
|
* use. for each page of memory we use we need a vm_page structure.
|
|
|
|
* thus, the total number of pages we can use is the total size of
|
|
|
|
* the memory divided by the PAGE_SIZE plus the size of the vm_page
|
|
|
|
* structure. we add one to freepages as a fudge factor to avoid
|
|
|
|
* truncation errors (since we can only allocate in terms of whole
|
|
|
|
* pages).
|
|
|
|
*/
|
2001-05-25 08:06:11 +04:00
|
|
|
|
2001-05-02 05:22:19 +04:00
|
|
|
bucketcount = uvmexp.ncolors * VM_NFREELIST;
|
1998-10-19 03:49:59 +04:00
|
|
|
pagecount = ((freepages + 1) << PAGE_SHIFT) /
|
1998-03-09 03:58:55 +03:00
|
|
|
(PAGE_SIZE + sizeof(struct vm_page));
|
2001-05-02 05:22:19 +04:00
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
bucketarray = (void *)uvm_pageboot_alloc((bucketcount *
|
2001-05-02 05:22:19 +04:00
|
|
|
sizeof(struct pgflbucket)) + (pagecount *
|
|
|
|
sizeof(struct vm_page)));
|
|
|
|
pagearray = (struct vm_page *)(bucketarray + bucketcount);
|
|
|
|
|
|
|
|
for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
|
|
|
|
uvm.page_free[lcv].pgfl_buckets =
|
|
|
|
(bucketarray + (lcv * uvmexp.ncolors));
|
|
|
|
uvm_page_init_buckets(&uvm.page_free[lcv]);
|
|
|
|
}
|
1998-08-10 02:36:37 +04:00
|
|
|
memset(pagearray, 0, pagecount * sizeof(struct vm_page));
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
2001-03-09 04:02:10 +03:00
|
|
|
* init the vm_page structures and put them in the correct place.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
|
|
|
|
n = vm_physmem[lcv].end - vm_physmem[lcv].start;
|
2001-03-09 04:02:10 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* set up page array pointers */
|
|
|
|
vm_physmem[lcv].pgs = pagearray;
|
|
|
|
pagearray += n;
|
|
|
|
pagecount -= n;
|
|
|
|
vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
|
|
|
|
|
1998-08-10 02:36:37 +04:00
|
|
|
/* init and free vm_pages (we've already zeroed them) */
|
1998-03-09 03:58:55 +03:00
|
|
|
paddr = ptoa(vm_physmem[lcv].start);
|
|
|
|
for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
|
|
|
|
vm_physmem[lcv].pgs[i].phys_addr = paddr;
|
2001-05-01 06:19:13 +04:00
|
|
|
#ifdef __HAVE_VM_PAGE_MD
|
2001-04-30 02:44:31 +04:00
|
|
|
VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]);
|
2001-05-01 06:19:13 +04:00
|
|
|
#endif
|
1998-03-09 03:58:55 +03:00
|
|
|
if (atop(paddr) >= vm_physmem[lcv].avail_start &&
|
|
|
|
atop(paddr) <= vm_physmem[lcv].avail_end) {
|
|
|
|
uvmexp.npages++;
|
|
|
|
/* add page to free pool */
|
|
|
|
uvm_pagefree(&vm_physmem[lcv].pgs[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
2001-03-09 04:02:10 +03:00
|
|
|
* pass up the values of virtual_space_start and
|
1998-03-09 03:58:55 +03:00
|
|
|
* virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
|
|
|
|
* layers of the VM.
|
|
|
|
*/
|
|
|
|
|
|
|
|
*kvm_startp = round_page(virtual_space_start);
|
|
|
|
*kvm_endp = trunc_page(virtual_space_end);
|
|
|
|
|
|
|
|
/*
|
2001-03-09 04:02:10 +03:00
|
|
|
* init locks for kernel threads
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
simple_lock_init(&uvm.pagedaemon_lock);
|
2000-11-27 11:39:39 +03:00
|
|
|
simple_lock_init(&uvm.aiodoned_lock);
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
2001-03-09 04:02:10 +03:00
|
|
|
* init various thresholds.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
2001-03-09 04:02:10 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmexp.reserve_pagedaemon = 1;
|
|
|
|
uvmexp.reserve_kernel = 5;
|
2001-03-09 04:02:10 +03:00
|
|
|
uvmexp.anonminpct = 10;
|
2001-12-09 06:07:19 +03:00
|
|
|
uvmexp.fileminpct = 10;
|
|
|
|
uvmexp.execminpct = 5;
|
|
|
|
uvmexp.anonmaxpct = 80;
|
|
|
|
uvmexp.filemaxpct = 50;
|
|
|
|
uvmexp.execmaxpct = 30;
|
2001-03-09 04:02:10 +03:00
|
|
|
uvmexp.anonmin = uvmexp.anonminpct * 256 / 100;
|
2001-12-09 06:07:19 +03:00
|
|
|
uvmexp.filemin = uvmexp.fileminpct * 256 / 100;
|
|
|
|
uvmexp.execmin = uvmexp.execminpct * 256 / 100;
|
|
|
|
uvmexp.anonmax = uvmexp.anonmaxpct * 256 / 100;
|
|
|
|
uvmexp.filemax = uvmexp.filemaxpct * 256 / 100;
|
|
|
|
uvmexp.execmax = uvmexp.execmaxpct * 256 / 100;
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2000-04-24 21:12:00 +04:00
|
|
|
/*
|
2001-03-09 04:02:10 +03:00
|
|
|
* determine if we should zero pages in the idle loop.
|
2000-04-24 21:12:00 +04:00
|
|
|
*/
|
2001-03-09 04:02:10 +03:00
|
|
|
|
2000-04-24 21:12:00 +04:00
|
|
|
uvm.page_idle_zero = vm_page_zero_enable;
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* done!
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-04-03 00:39:14 +04:00
|
|
|
uvm.page_init_done = TRUE;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_setpagesize: set the page size
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-02-05 09:25:08 +03:00
|
|
|
* => sets page_shift and page_mask from uvmexp.pagesize.
|
2001-05-25 08:06:11 +04:00
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
void
|
|
|
|
uvm_setpagesize()
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-03-09 03:58:55 +03:00
|
|
|
if (uvmexp.pagesize == 0)
|
|
|
|
uvmexp.pagesize = DEFAULT_PAGE_SIZE;
|
|
|
|
uvmexp.pagemask = uvmexp.pagesize - 1;
|
|
|
|
if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
|
|
|
|
panic("uvm_setpagesize: page size not a power of two");
|
|
|
|
for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
|
|
|
|
if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
|
|
|
|
break;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pageboot_alloc: steal memory from physmem for bootstrapping
|
|
|
|
*/
|
|
|
|
|
1998-08-13 06:10:37 +04:00
|
|
|
vaddr_t
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_pageboot_alloc(size)
|
1998-08-13 06:10:37 +04:00
|
|
|
vsize_t size;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1999-05-21 00:07:55 +04:00
|
|
|
static boolean_t initialized = FALSE;
|
2001-04-22 21:22:57 +04:00
|
|
|
vaddr_t addr;
|
|
|
|
#if !defined(PMAP_STEAL_MEMORY)
|
|
|
|
vaddr_t vaddr;
|
1998-08-13 06:10:37 +04:00
|
|
|
paddr_t paddr;
|
2001-04-22 21:22:57 +04:00
|
|
|
#endif
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
1999-05-21 00:07:55 +04:00
|
|
|
* on first call to this function, initialize ourselves.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
1999-05-21 00:07:55 +04:00
|
|
|
if (initialized == FALSE) {
|
1998-03-09 03:58:55 +03:00
|
|
|
pmap_virtual_space(&virtual_space_start, &virtual_space_end);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* round it the way we like it */
|
|
|
|
virtual_space_start = round_page(virtual_space_start);
|
|
|
|
virtual_space_end = trunc_page(virtual_space_end);
|
1999-05-21 00:07:55 +04:00
|
|
|
|
|
|
|
initialized = TRUE;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-04-22 21:22:57 +04:00
|
|
|
/* round to page size */
|
|
|
|
size = round_page(size);
|
|
|
|
|
|
|
|
#if defined(PMAP_STEAL_MEMORY)
|
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
/*
|
|
|
|
* defer bootstrap allocation to MD code (it may want to allocate
|
2001-04-22 21:22:57 +04:00
|
|
|
* from a direct-mapped segment). pmap_steal_memory should adjust
|
|
|
|
* virtual_space_start/virtual_space_end if necessary.
|
|
|
|
*/
|
|
|
|
|
|
|
|
addr = pmap_steal_memory(size, &virtual_space_start,
|
|
|
|
&virtual_space_end);
|
|
|
|
|
|
|
|
return(addr);
|
|
|
|
|
|
|
|
#else /* !PMAP_STEAL_MEMORY */
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* allocate virtual memory for this request
|
|
|
|
*/
|
1999-05-21 00:07:55 +04:00
|
|
|
if (virtual_space_start == virtual_space_end ||
|
1999-05-21 03:03:23 +04:00
|
|
|
(virtual_space_end - virtual_space_start) < size)
|
1999-05-21 00:07:55 +04:00
|
|
|
panic("uvm_pageboot_alloc: out of virtual space");
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
addr = virtual_space_start;
|
1999-05-21 03:03:23 +04:00
|
|
|
|
|
|
|
#ifdef PMAP_GROWKERNEL
|
|
|
|
/*
|
|
|
|
* If the kernel pmap can't map the requested space,
|
|
|
|
* then allocate more resources for it.
|
|
|
|
*/
|
|
|
|
if (uvm_maxkaddr < (addr + size)) {
|
|
|
|
uvm_maxkaddr = pmap_growkernel(addr + size);
|
|
|
|
if (uvm_maxkaddr < (addr + size))
|
|
|
|
panic("uvm_pageboot_alloc: pmap_growkernel() failed");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
virtual_space_start += size;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-04-16 07:54:35 +04:00
|
|
|
/*
|
1998-03-09 03:58:55 +03:00
|
|
|
* allocate and mapin physical pages to back new virtual pages
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
for (vaddr = round_page(addr) ; vaddr < addr + size ;
|
|
|
|
vaddr += PAGE_SIZE) {
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
if (!uvm_page_physget(&paddr))
|
|
|
|
panic("uvm_pageboot_alloc: out of memory");
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1999-05-25 05:34:13 +04:00
|
|
|
/*
|
|
|
|
* Note this memory is no longer managed, so using
|
|
|
|
* pmap_kenter is safe.
|
|
|
|
*/
|
1998-03-09 03:58:55 +03:00
|
|
|
pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE);
|
|
|
|
}
|
2001-09-11 01:19:08 +04:00
|
|
|
pmap_update(pmap_kernel());
|
1998-03-09 03:58:55 +03:00
|
|
|
return(addr);
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif /* PMAP_STEAL_MEMORY */
|
|
|
|
}
|
|
|
|
|
|
|
|
#if !defined(PMAP_STEAL_MEMORY)
|
|
|
|
/*
|
|
|
|
* uvm_page_physget: "steal" one page from the vm_physmem structure.
|
|
|
|
*
|
|
|
|
* => attempt to allocate it off the end of a segment in which the "avail"
|
|
|
|
* values match the start/end values. if we can't do that, then we
|
|
|
|
* will advance both values (making them equal, and removing some
|
|
|
|
* vm_page structures from the non-avail area).
|
|
|
|
* => return false if out of memory.
|
|
|
|
*/
|
|
|
|
|
1999-12-01 19:08:32 +03:00
|
|
|
/* subroutine: try to allocate from memory chunks on the specified freelist */
|
|
|
|
static boolean_t uvm_page_physget_freelist __P((paddr_t *, int));
|
|
|
|
|
|
|
|
static boolean_t
|
|
|
|
uvm_page_physget_freelist(paddrp, freelist)
|
1998-08-13 06:10:37 +04:00
|
|
|
paddr_t *paddrp;
|
1999-12-01 19:08:32 +03:00
|
|
|
int freelist;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-03-09 03:58:55 +03:00
|
|
|
int lcv, x;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* pass 1: try allocating from a matching end */
|
1998-02-05 09:25:08 +03:00
|
|
|
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
|
1998-03-09 03:58:55 +03:00
|
|
|
for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
|
1998-02-05 09:25:08 +03:00
|
|
|
#else
|
1998-03-09 03:58:55 +03:00
|
|
|
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
1998-03-09 03:58:55 +03:00
|
|
|
{
|
|
|
|
|
2000-04-03 00:39:14 +04:00
|
|
|
if (uvm.page_init_done == TRUE)
|
2000-10-05 04:37:50 +04:00
|
|
|
panic("uvm_page_physget: called _after_ bootstrap");
|
1998-03-09 03:58:55 +03:00
|
|
|
|
1999-12-01 19:08:32 +03:00
|
|
|
if (vm_physmem[lcv].free_list != freelist)
|
|
|
|
continue;
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* try from front */
|
|
|
|
if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
|
|
|
|
vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
|
|
|
|
*paddrp = ptoa(vm_physmem[lcv].avail_start);
|
|
|
|
vm_physmem[lcv].avail_start++;
|
|
|
|
vm_physmem[lcv].start++;
|
|
|
|
/* nothing left? nuke it */
|
|
|
|
if (vm_physmem[lcv].avail_start ==
|
|
|
|
vm_physmem[lcv].end) {
|
|
|
|
if (vm_nphysseg == 1)
|
2000-10-05 04:37:50 +04:00
|
|
|
panic("vum_page_physget: out of memory!");
|
1998-03-09 03:58:55 +03:00
|
|
|
vm_nphysseg--;
|
|
|
|
for (x = lcv ; x < vm_nphysseg ; x++)
|
|
|
|
/* structure copy */
|
|
|
|
vm_physmem[x] = vm_physmem[x+1];
|
|
|
|
}
|
|
|
|
return (TRUE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* try from rear */
|
|
|
|
if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
|
|
|
|
vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
|
|
|
|
*paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
|
|
|
|
vm_physmem[lcv].avail_end--;
|
|
|
|
vm_physmem[lcv].end--;
|
|
|
|
/* nothing left? nuke it */
|
|
|
|
if (vm_physmem[lcv].avail_end ==
|
|
|
|
vm_physmem[lcv].start) {
|
|
|
|
if (vm_nphysseg == 1)
|
2000-10-05 04:37:50 +04:00
|
|
|
panic("uvm_page_physget: out of memory!");
|
1998-03-09 03:58:55 +03:00
|
|
|
vm_nphysseg--;
|
|
|
|
for (x = lcv ; x < vm_nphysseg ; x++)
|
|
|
|
/* structure copy */
|
|
|
|
vm_physmem[x] = vm_physmem[x+1];
|
|
|
|
}
|
|
|
|
return (TRUE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* pass2: forget about matching ends, just allocate something */
|
1998-02-05 09:25:08 +03:00
|
|
|
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
|
1998-03-09 03:58:55 +03:00
|
|
|
for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
|
1998-02-05 09:25:08 +03:00
|
|
|
#else
|
1998-03-09 03:58:55 +03:00
|
|
|
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
1998-03-09 03:58:55 +03:00
|
|
|
{
|
|
|
|
|
|
|
|
/* any room in this bank? */
|
|
|
|
if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
|
|
|
|
continue; /* nope */
|
|
|
|
|
|
|
|
*paddrp = ptoa(vm_physmem[lcv].avail_start);
|
|
|
|
vm_physmem[lcv].avail_start++;
|
|
|
|
/* truncate! */
|
|
|
|
vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
|
|
|
|
|
|
|
|
/* nothing left? nuke it */
|
|
|
|
if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
|
|
|
|
if (vm_nphysseg == 1)
|
2000-10-05 04:37:50 +04:00
|
|
|
panic("uvm_page_physget: out of memory!");
|
1998-03-09 03:58:55 +03:00
|
|
|
vm_nphysseg--;
|
|
|
|
for (x = lcv ; x < vm_nphysseg ; x++)
|
|
|
|
/* structure copy */
|
|
|
|
vm_physmem[x] = vm_physmem[x+1];
|
|
|
|
}
|
|
|
|
return (TRUE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (FALSE); /* whoops! */
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
1999-12-01 19:08:32 +03:00
|
|
|
|
|
|
|
boolean_t
|
|
|
|
uvm_page_physget(paddrp)
|
|
|
|
paddr_t *paddrp;
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* try in the order of freelist preference */
|
|
|
|
for (i = 0; i < VM_NFREELIST; i++)
|
|
|
|
if (uvm_page_physget_freelist(paddrp, i) == TRUE)
|
|
|
|
return (TRUE);
|
|
|
|
return (FALSE);
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif /* PMAP_STEAL_MEMORY */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_page_physload: load physical memory into VM system
|
|
|
|
*
|
|
|
|
* => all args are PFs
|
|
|
|
* => all pages in start/end get vm_page structures
|
|
|
|
* => areas marked by avail_start/avail_end get added to the free page pool
|
|
|
|
* => we are limited to VM_PHYSSEG_MAX physical memory segments
|
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
void
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
uvm_page_physload(start, end, avail_start, avail_end, free_list)
|
1999-12-30 19:09:47 +03:00
|
|
|
paddr_t start, end, avail_start, avail_end;
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
int free_list;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-08-13 06:10:37 +04:00
|
|
|
int preload, lcv;
|
|
|
|
psize_t npages;
|
1998-03-09 03:58:55 +03:00
|
|
|
struct vm_page *pgs;
|
|
|
|
struct vm_physseg *ps;
|
|
|
|
|
|
|
|
if (uvmexp.pagesize == 0)
|
2000-10-05 04:37:50 +04:00
|
|
|
panic("uvm_page_physload: page size not set!");
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
|
2002-09-27 19:35:29 +04:00
|
|
|
panic("uvm_page_physload: bad free list %d", free_list);
|
1999-11-24 21:28:49 +03:00
|
|
|
if (start >= end)
|
|
|
|
panic("uvm_page_physload: start >= end");
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* do we have room?
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
if (vm_nphysseg == VM_PHYSSEG_MAX) {
|
2000-10-05 04:37:50 +04:00
|
|
|
printf("uvm_page_physload: unable to load physical memory "
|
1998-03-09 03:58:55 +03:00
|
|
|
"segment\n");
|
2000-06-09 08:43:19 +04:00
|
|
|
printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
|
|
|
|
VM_PHYSSEG_MAX, (long long)start, (long long)end);
|
2000-11-09 22:15:28 +03:00
|
|
|
printf("\tincrease VM_PHYSSEG_MAX\n");
|
1998-03-09 03:58:55 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
|
|
|
|
* called yet, so malloc is not available).
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
|
|
|
|
if (vm_physmem[lcv].pgs)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
preload = (lcv == vm_nphysseg);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if VM is already running, attempt to malloc() vm_page structures
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
if (!preload) {
|
1998-02-05 09:25:08 +03:00
|
|
|
#if defined(VM_PHYSSEG_NOADD)
|
2000-10-05 04:37:50 +04:00
|
|
|
panic("uvm_page_physload: tried to add RAM after vm_mem_init");
|
1998-02-05 09:25:08 +03:00
|
|
|
#else
|
1998-03-09 03:58:55 +03:00
|
|
|
/* XXXCDC: need some sort of lockout for this case */
|
1998-08-13 06:10:37 +04:00
|
|
|
paddr_t paddr;
|
1998-03-09 03:58:55 +03:00
|
|
|
npages = end - start; /* # of pages */
|
2000-08-03 00:25:11 +04:00
|
|
|
pgs = malloc(sizeof(struct vm_page) * npages,
|
|
|
|
M_VMPAGE, M_NOWAIT);
|
1998-03-09 03:58:55 +03:00
|
|
|
if (pgs == NULL) {
|
2000-10-05 04:37:50 +04:00
|
|
|
printf("uvm_page_physload: can not malloc vm_page "
|
1998-03-09 03:58:55 +03:00
|
|
|
"structs for segment\n");
|
|
|
|
printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
|
|
|
|
return;
|
|
|
|
}
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
/* zero data, init phys_addr and free_list, and free pages */
|
1998-08-10 02:36:37 +04:00
|
|
|
memset(pgs, 0, sizeof(struct vm_page) * npages);
|
1998-03-09 03:58:55 +03:00
|
|
|
for (lcv = 0, paddr = ptoa(start) ;
|
|
|
|
lcv < npages ; lcv++, paddr += PAGE_SIZE) {
|
|
|
|
pgs[lcv].phys_addr = paddr;
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
pgs[lcv].free_list = free_list;
|
1998-03-09 03:58:55 +03:00
|
|
|
if (atop(paddr) >= avail_start &&
|
|
|
|
atop(paddr) <= avail_end)
|
1998-03-31 07:04:59 +04:00
|
|
|
uvm_pagefree(&pgs[lcv]);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
/* XXXCDC: incomplete: need to update uvmexp.free, what else? */
|
|
|
|
/* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
1998-03-09 03:58:55 +03:00
|
|
|
} else {
|
|
|
|
pgs = NULL;
|
|
|
|
npages = 0;
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* now insert us in the proper place in vm_physmem[]
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
|
1998-03-09 03:58:55 +03:00
|
|
|
/* random: put it at the end (easy!) */
|
|
|
|
ps = &vm_physmem[vm_nphysseg];
|
1998-02-05 09:25:08 +03:00
|
|
|
#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
|
1998-03-09 03:58:55 +03:00
|
|
|
{
|
|
|
|
int x;
|
|
|
|
/* sort by address for binary search */
|
|
|
|
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
|
|
|
|
if (start < vm_physmem[lcv].start)
|
|
|
|
break;
|
|
|
|
ps = &vm_physmem[lcv];
|
|
|
|
/* move back other entries, if necessary ... */
|
|
|
|
for (x = vm_nphysseg ; x > lcv ; x--)
|
|
|
|
/* structure copy */
|
|
|
|
vm_physmem[x] = vm_physmem[x - 1];
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
|
1998-03-09 03:58:55 +03:00
|
|
|
{
|
|
|
|
int x;
|
|
|
|
/* sort by largest segment first */
|
|
|
|
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
|
|
|
|
if ((end - start) >
|
|
|
|
(vm_physmem[lcv].end - vm_physmem[lcv].start))
|
|
|
|
break;
|
|
|
|
ps = &vm_physmem[lcv];
|
|
|
|
/* move back other entries, if necessary ... */
|
|
|
|
for (x = vm_nphysseg ; x > lcv ; x--)
|
|
|
|
/* structure copy */
|
|
|
|
vm_physmem[x] = vm_physmem[x - 1];
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
#else
|
2000-10-05 04:37:50 +04:00
|
|
|
panic("uvm_page_physload: unknown physseg strategy selected!");
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
ps->start = start;
|
|
|
|
ps->end = end;
|
|
|
|
ps->avail_start = avail_start;
|
|
|
|
ps->avail_end = avail_end;
|
|
|
|
if (preload) {
|
|
|
|
ps->pgs = NULL;
|
|
|
|
} else {
|
|
|
|
ps->pgs = pgs;
|
|
|
|
ps->lastpg = pgs + npages - 1;
|
|
|
|
}
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
ps->free_list = free_list;
|
1998-03-09 03:58:55 +03:00
|
|
|
vm_nphysseg++;
|
|
|
|
|
|
|
|
if (!preload)
|
|
|
|
uvm_page_rehash();
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_page_rehash: reallocate hash table based on number of free pages.
|
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
void
|
|
|
|
uvm_page_rehash()
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
int freepages, lcv, bucketcount, oldcount;
|
1998-03-09 03:58:55 +03:00
|
|
|
struct pglist *newbuckets, *oldbuckets;
|
|
|
|
struct vm_page *pg;
|
2000-02-13 06:34:40 +03:00
|
|
|
size_t newsize, oldsize;
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* compute number of pages that can go in the free pool
|
|
|
|
*/
|
|
|
|
|
|
|
|
freepages = 0;
|
|
|
|
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
|
|
|
|
freepages +=
|
|
|
|
(vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* compute number of buckets needed for this number of pages
|
|
|
|
*/
|
|
|
|
|
|
|
|
bucketcount = 1;
|
|
|
|
while (bucketcount < freepages)
|
|
|
|
bucketcount = bucketcount * 2;
|
|
|
|
|
|
|
|
/*
|
2000-02-13 06:34:40 +03:00
|
|
|
* compute the size of the current table and new table.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
2000-02-13 06:34:40 +03:00
|
|
|
oldbuckets = uvm.page_hash;
|
|
|
|
oldcount = uvm.page_nhash;
|
|
|
|
oldsize = round_page(sizeof(struct pglist) * oldcount);
|
|
|
|
newsize = round_page(sizeof(struct pglist) * bucketcount);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* allocate the new buckets
|
|
|
|
*/
|
|
|
|
|
|
|
|
newbuckets = (struct pglist *) uvm_km_alloc(kernel_map, newsize);
|
1998-03-09 03:58:55 +03:00
|
|
|
if (newbuckets == NULL) {
|
2000-02-13 06:34:40 +03:00
|
|
|
printf("uvm_page_physrehash: WARNING: could not grow page "
|
1998-03-09 03:58:55 +03:00
|
|
|
"hash table\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
for (lcv = 0 ; lcv < bucketcount ; lcv++)
|
|
|
|
TAILQ_INIT(&newbuckets[lcv]);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* now replace the old buckets with the new ones and rehash everything
|
|
|
|
*/
|
|
|
|
|
|
|
|
simple_lock(&uvm.hashlock);
|
|
|
|
uvm.page_hash = newbuckets;
|
|
|
|
uvm.page_nhash = bucketcount;
|
|
|
|
uvm.page_hashmask = bucketcount - 1; /* power of 2 */
|
|
|
|
|
|
|
|
/* ... and rehash */
|
|
|
|
for (lcv = 0 ; lcv < oldcount ; lcv++) {
|
|
|
|
while ((pg = oldbuckets[lcv].tqh_first) != NULL) {
|
|
|
|
TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq);
|
|
|
|
TAILQ_INSERT_TAIL(
|
|
|
|
&uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)],
|
|
|
|
pg, hashq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
simple_unlock(&uvm.hashlock);
|
|
|
|
|
|
|
|
/*
|
2000-02-13 06:34:40 +03:00
|
|
|
* free old bucket array if is not the boot-time table
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (oldbuckets != &uvm_bootbucket)
|
2000-02-13 06:34:40 +03:00
|
|
|
uvm_km_free(kernel_map, (vaddr_t) oldbuckets, oldsize);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2001-05-02 05:22:19 +04:00
|
|
|
/*
|
|
|
|
* uvm_page_recolor: Recolor the pages if the new bucket count is
|
|
|
|
* larger than the old one.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_page_recolor(int newncolors)
|
|
|
|
{
|
|
|
|
struct pgflbucket *bucketarray, *oldbucketarray;
|
|
|
|
struct pgfreelist pgfl;
|
2001-05-27 01:27:10 +04:00
|
|
|
struct vm_page *pg;
|
2001-05-02 05:22:19 +04:00
|
|
|
vsize_t bucketcount;
|
|
|
|
int s, lcv, color, i, ocolors;
|
|
|
|
|
|
|
|
if (newncolors <= uvmexp.ncolors)
|
|
|
|
return;
|
|
|
|
|
2002-06-19 21:01:18 +04:00
|
|
|
if (uvm.page_init_done == FALSE) {
|
|
|
|
uvmexp.ncolors = newncolors;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2001-05-02 05:22:19 +04:00
|
|
|
bucketcount = newncolors * VM_NFREELIST;
|
|
|
|
bucketarray = malloc(bucketcount * sizeof(struct pgflbucket),
|
|
|
|
M_VMPAGE, M_NOWAIT);
|
|
|
|
if (bucketarray == NULL) {
|
|
|
|
printf("WARNING: unable to allocate %ld page color buckets\n",
|
|
|
|
(long) bucketcount);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
s = uvm_lock_fpageq();
|
|
|
|
|
|
|
|
/* Make sure we should still do this. */
|
|
|
|
if (newncolors <= uvmexp.ncolors) {
|
|
|
|
uvm_unlock_fpageq(s);
|
|
|
|
free(bucketarray, M_VMPAGE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
oldbucketarray = uvm.page_free[0].pgfl_buckets;
|
|
|
|
ocolors = uvmexp.ncolors;
|
|
|
|
|
|
|
|
uvmexp.ncolors = newncolors;
|
|
|
|
uvmexp.colormask = uvmexp.ncolors - 1;
|
|
|
|
|
|
|
|
for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
|
|
|
|
pgfl.pgfl_buckets = (bucketarray + (lcv * newncolors));
|
|
|
|
uvm_page_init_buckets(&pgfl);
|
|
|
|
for (color = 0; color < ocolors; color++) {
|
|
|
|
for (i = 0; i < PGFL_NQUEUES; i++) {
|
|
|
|
while ((pg = TAILQ_FIRST(&uvm.page_free[
|
|
|
|
lcv].pgfl_buckets[color].pgfl_queues[i]))
|
|
|
|
!= NULL) {
|
|
|
|
TAILQ_REMOVE(&uvm.page_free[
|
|
|
|
lcv].pgfl_buckets[
|
|
|
|
color].pgfl_queues[i], pg, pageq);
|
|
|
|
TAILQ_INSERT_TAIL(&pgfl.pgfl_buckets[
|
|
|
|
VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
|
|
|
|
i], pg, pageq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
uvm.page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (have_recolored_pages) {
|
|
|
|
uvm_unlock_fpageq(s);
|
|
|
|
free(oldbucketarray, M_VMPAGE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
have_recolored_pages = TRUE;
|
|
|
|
uvm_unlock_fpageq(s);
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-04-29 08:23:20 +04:00
|
|
|
/*
|
|
|
|
* uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat
|
|
|
|
*/
|
|
|
|
|
|
|
|
static __inline struct vm_page *
|
|
|
|
uvm_pagealloc_pgfl(struct pgfreelist *pgfl, int try1, int try2,
|
2001-11-06 09:31:06 +03:00
|
|
|
int *trycolorp)
|
2001-04-29 08:23:20 +04:00
|
|
|
{
|
|
|
|
struct pglist *freeq;
|
|
|
|
struct vm_page *pg;
|
2001-05-01 18:02:56 +04:00
|
|
|
int color, trycolor = *trycolorp;
|
2001-04-29 08:23:20 +04:00
|
|
|
|
2001-05-01 18:02:56 +04:00
|
|
|
color = trycolor;
|
|
|
|
do {
|
2001-04-29 08:23:20 +04:00
|
|
|
if ((pg = TAILQ_FIRST((freeq =
|
|
|
|
&pgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL)
|
|
|
|
goto gotit;
|
|
|
|
if ((pg = TAILQ_FIRST((freeq =
|
|
|
|
&pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL)
|
|
|
|
goto gotit;
|
2001-05-02 05:22:19 +04:00
|
|
|
color = (color + 1) & uvmexp.colormask;
|
2001-05-01 18:02:56 +04:00
|
|
|
} while (color != trycolor);
|
2001-04-29 08:23:20 +04:00
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
gotit:
|
|
|
|
TAILQ_REMOVE(freeq, pg, pageq);
|
|
|
|
uvmexp.free--;
|
|
|
|
|
|
|
|
/* update zero'd page count */
|
|
|
|
if (pg->flags & PG_ZERO)
|
|
|
|
uvmexp.zeropages--;
|
|
|
|
|
|
|
|
if (color == trycolor)
|
|
|
|
uvmexp.colorhit++;
|
|
|
|
else {
|
|
|
|
uvmexp.colormiss++;
|
|
|
|
*trycolorp = color;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (pg);
|
|
|
|
}
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
* uvm_pagealloc_strat: allocate vm_page from a particular free list.
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
|
|
|
* => return null if no pages free
|
|
|
|
* => wake up pagedaemon if number of free pages drops below low water mark
|
|
|
|
* => if obj != NULL, obj must be locked (to put in hash)
|
|
|
|
* => if anon != NULL, anon must be locked (to put in anon)
|
|
|
|
* => only one of obj or anon can be non-null
|
|
|
|
* => caller must activate/deactivate page if it is not wired.
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
* => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
|
2000-04-24 21:12:00 +04:00
|
|
|
* => policy decision: it is more important to pull a page off of the
|
|
|
|
* appropriate priority free list than it is to get a zero'd or
|
|
|
|
* unknown contents page. This is because we live with the
|
|
|
|
* consequences of a bad free list decision for the entire
|
|
|
|
* lifetime of the page, e.g. if the page comes from memory that
|
|
|
|
* is slower to access.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
struct vm_page *
|
1999-04-11 08:04:04 +04:00
|
|
|
uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
|
1998-03-09 03:58:55 +03:00
|
|
|
struct uvm_object *obj;
|
2000-03-27 00:54:45 +04:00
|
|
|
voff_t off;
|
1999-04-11 08:04:04 +04:00
|
|
|
int flags;
|
1998-03-09 03:58:55 +03:00
|
|
|
struct vm_anon *anon;
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
int strat, free_list;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2001-04-29 08:23:20 +04:00
|
|
|
int lcv, try1, try2, s, zeroit = 0, color;
|
1998-03-09 03:58:55 +03:00
|
|
|
struct vm_page *pg;
|
1999-04-11 08:04:04 +04:00
|
|
|
boolean_t use_reserve;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
KASSERT(obj == NULL || anon == NULL);
|
|
|
|
KASSERT(off == trunc_page(off));
|
2001-01-23 04:56:16 +03:00
|
|
|
LOCK_ASSERT(obj == NULL || simple_lock_held(&obj->vmobjlock));
|
|
|
|
LOCK_ASSERT(anon == NULL || simple_lock_held(&anon->an_lock));
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
s = uvm_lock_fpageq();
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2001-04-29 08:23:20 +04:00
|
|
|
/*
|
|
|
|
* This implements a global round-robin page coloring
|
|
|
|
* algorithm.
|
|
|
|
*
|
|
|
|
* XXXJRT: Should we make the `nextcolor' per-cpu?
|
|
|
|
* XXXJRT: What about virtually-indexed caches?
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2001-04-29 08:23:20 +04:00
|
|
|
color = uvm.page_free_nextcolor;
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* check to see if we need to generate some free pages waking
|
|
|
|
* the pagedaemon.
|
|
|
|
*/
|
|
|
|
|
2001-06-28 01:18:34 +04:00
|
|
|
UVM_KICK_PDAEMON();
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* fail if any of these conditions is true:
|
|
|
|
* [1] there really are no free pages, or
|
|
|
|
* [2] only kernel "reserved" pages remain and
|
|
|
|
* the page isn't being allocated to a kernel object.
|
|
|
|
* [3] only pagedaemon "reserved" pages remain and
|
|
|
|
* the requestor isn't the pagedaemon.
|
|
|
|
*/
|
|
|
|
|
1999-04-11 08:04:04 +04:00
|
|
|
use_reserve = (flags & UVM_PGA_USERESERVE) ||
|
1999-05-25 04:09:00 +04:00
|
|
|
(obj && UVM_OBJ_IS_KERN_OBJECT(obj));
|
1999-04-11 08:04:04 +04:00
|
|
|
if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
|
1998-03-09 03:58:55 +03:00
|
|
|
(uvmexp.free <= uvmexp.reserve_pagedaemon &&
|
1999-04-11 08:04:04 +04:00
|
|
|
!(use_reserve && curproc == uvm.pagedaemon_proc)))
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
goto fail;
|
|
|
|
|
2000-04-24 21:12:00 +04:00
|
|
|
#if PGFL_NQUEUES != 2
|
|
|
|
#error uvm_pagealloc_strat needs to be updated
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we want a zero'd page, try the ZEROS queue first, otherwise
|
|
|
|
* we try the UNKNOWN queue first.
|
|
|
|
*/
|
|
|
|
if (flags & UVM_PGA_ZERO) {
|
|
|
|
try1 = PGFL_ZEROS;
|
|
|
|
try2 = PGFL_UNKNOWN;
|
|
|
|
} else {
|
|
|
|
try1 = PGFL_UNKNOWN;
|
|
|
|
try2 = PGFL_ZEROS;
|
|
|
|
}
|
|
|
|
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
again:
|
|
|
|
switch (strat) {
|
|
|
|
case UVM_PGA_STRAT_NORMAL:
|
|
|
|
/* Check all freelists in descending priority order. */
|
|
|
|
for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
|
2001-04-29 08:23:20 +04:00
|
|
|
pg = uvm_pagealloc_pgfl(&uvm.page_free[lcv],
|
|
|
|
try1, try2, &color);
|
|
|
|
if (pg != NULL)
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
goto gotit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No pages free! */
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
case UVM_PGA_STRAT_ONLY:
|
|
|
|
case UVM_PGA_STRAT_FALLBACK:
|
|
|
|
/* Attempt to allocate from the specified free list. */
|
2000-11-27 11:39:39 +03:00
|
|
|
KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
|
2001-04-29 08:23:20 +04:00
|
|
|
pg = uvm_pagealloc_pgfl(&uvm.page_free[free_list],
|
|
|
|
try1, try2, &color);
|
|
|
|
if (pg != NULL)
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
goto gotit;
|
|
|
|
|
|
|
|
/* Fall back, if possible. */
|
|
|
|
if (strat == UVM_PGA_STRAT_FALLBACK) {
|
|
|
|
strat = UVM_PGA_STRAT_NORMAL;
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No pages free! */
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("uvm_pagealloc_strat: bad strat %d", strat);
|
|
|
|
/* NOTREACHED */
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
gotit:
|
2001-04-29 08:23:20 +04:00
|
|
|
/*
|
|
|
|
* We now know which color we actually allocated from; set
|
|
|
|
* the next color accordingly.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2001-05-02 05:22:19 +04:00
|
|
|
uvm.page_free_nextcolor = (color + 1) & uvmexp.colormask;
|
2000-04-24 21:12:00 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* update allocation statistics and remember if we have to
|
|
|
|
* zero the page
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2000-04-24 21:12:00 +04:00
|
|
|
if (flags & UVM_PGA_ZERO) {
|
|
|
|
if (pg->flags & PG_ZERO) {
|
|
|
|
uvmexp.pga_zerohit++;
|
|
|
|
zeroit = 0;
|
|
|
|
} else {
|
|
|
|
uvmexp.pga_zeromiss++;
|
|
|
|
zeroit = 1;
|
|
|
|
}
|
|
|
|
}
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uvm_unlock_fpageq(s);
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
pg->offset = off;
|
|
|
|
pg->uobject = obj;
|
|
|
|
pg->uanon = anon;
|
|
|
|
pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
|
|
|
|
if (anon) {
|
|
|
|
anon->u.an_page = pg;
|
|
|
|
pg->pqflags = PQ_ANON;
|
2000-11-30 14:04:43 +03:00
|
|
|
uvmexp.anonpages++;
|
1998-03-09 03:58:55 +03:00
|
|
|
} else {
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
if (obj) {
|
1998-03-09 03:58:55 +03:00
|
|
|
uvm_pageinsert(pg);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->pqflags = 0;
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
#if defined(UVM_PAGE_TRKOWN)
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->owner_tag = NULL;
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
1998-03-09 03:58:55 +03:00
|
|
|
UVM_PAGE_OWN(pg, "new alloc");
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-04-10 04:28:05 +04:00
|
|
|
if (flags & UVM_PGA_ZERO) {
|
|
|
|
/*
|
2000-04-24 21:12:00 +04:00
|
|
|
* A zero'd page is not clean. If we got a page not already
|
|
|
|
* zero'd, then we have to zero it ourselves.
|
2000-04-10 04:28:05 +04:00
|
|
|
*/
|
|
|
|
pg->flags &= ~PG_CLEAN;
|
2000-04-24 21:12:00 +04:00
|
|
|
if (zeroit)
|
|
|
|
pmap_zero_page(VM_PAGE_TO_PHYS(pg));
|
2000-04-10 04:28:05 +04:00
|
|
|
}
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
return(pg);
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
|
|
|
|
fail:
|
1999-05-24 23:10:57 +04:00
|
|
|
uvm_unlock_fpageq(s);
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
return (NULL);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pagerealloc: reallocate a page from one object to another
|
|
|
|
*
|
|
|
|
* => both objects must be locked
|
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
void
|
|
|
|
uvm_pagerealloc(pg, newobj, newoff)
|
|
|
|
struct vm_page *pg;
|
|
|
|
struct uvm_object *newobj;
|
2000-03-27 00:54:45 +04:00
|
|
|
voff_t newoff;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* remove it from the old object
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (pg->uobject) {
|
|
|
|
uvm_pageremove(pg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* put it in the new object
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (newobj) {
|
|
|
|
pg->uobject = newobj;
|
|
|
|
pg->offset = newoff;
|
|
|
|
uvm_pageinsert(pg);
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pagefree: free page
|
|
|
|
*
|
|
|
|
* => erase page's identity (i.e. remove from hash/object)
|
|
|
|
* => put page on free list
|
|
|
|
* => caller must lock owning object (either anon or uvm_object)
|
|
|
|
* => caller must lock page queues
|
|
|
|
* => assumes all valid mappings of pg are gone
|
|
|
|
*/
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
void
|
|
|
|
uvm_pagefree(pg)
|
|
|
|
struct vm_page *pg;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-03-09 03:58:55 +03:00
|
|
|
int s;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
|
|
|
KASSERT((pg->flags & PG_PAGEOUT) == 0);
|
|
|
|
LOCK_ASSERT(simple_lock_held(&uvm.pageqlock) ||
|
|
|
|
(pg->pqflags & (PQ_ACTIVE|PQ_INACTIVE)) == 0);
|
2001-11-06 11:07:49 +03:00
|
|
|
LOCK_ASSERT(pg->uobject == NULL ||
|
|
|
|
simple_lock_held(&pg->uobject->vmobjlock));
|
|
|
|
LOCK_ASSERT(pg->uobject != NULL || pg->uanon == NULL ||
|
|
|
|
simple_lock_held(&pg->uanon->an_lock));
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
#ifdef DEBUG
|
|
|
|
if (pg->uobject == (void *)0xdeadbeef &&
|
|
|
|
pg->uanon == (void *)0xdeadbeef) {
|
2002-09-27 19:35:29 +04:00
|
|
|
panic("uvm_pagefree: freeing free page %p", pg);
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
* if the page is loaned, resolve the loan instead of freeing.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
if (pg->loan_count) {
|
2001-11-06 11:07:49 +03:00
|
|
|
KASSERT(pg->wire_count == 0);
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
* if the page is owned by an anon then we just want to
|
2001-11-06 11:07:49 +03:00
|
|
|
* drop anon ownership. the kernel will free the page when
|
|
|
|
* it is done with it. if the page is owned by an object,
|
|
|
|
* remove it from the object and mark it dirty for the benefit
|
|
|
|
* of possible anon owners.
|
|
|
|
*
|
|
|
|
* regardless of previous ownership, wakeup any waiters,
|
|
|
|
* unbusy the page, and we're done.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2001-12-31 22:21:36 +03:00
|
|
|
if (pg->uobject != NULL) {
|
2001-11-06 11:07:49 +03:00
|
|
|
uvm_pageremove(pg);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
pg->flags &= ~PG_CLEAN;
|
2001-12-31 22:21:36 +03:00
|
|
|
} else if (pg->uanon != NULL) {
|
|
|
|
if ((pg->pqflags & PQ_ANON) == 0) {
|
|
|
|
pg->loan_count--;
|
|
|
|
} else {
|
|
|
|
pg->pqflags &= ~PQ_ANON;
|
|
|
|
}
|
|
|
|
pg->uanon = NULL;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
2001-11-06 11:07:49 +03:00
|
|
|
if (pg->flags & PG_WANTED) {
|
|
|
|
wakeup(pg);
|
|
|
|
}
|
2001-12-31 22:21:36 +03:00
|
|
|
pg->flags &= ~(PG_WANTED|PG_BUSY|PG_RELEASED);
|
2001-11-06 11:07:49 +03:00
|
|
|
#ifdef UVM_PAGE_TRKOWN
|
|
|
|
pg->owner_tag = NULL;
|
|
|
|
#endif
|
2001-12-31 22:21:36 +03:00
|
|
|
if (pg->loan_count) {
|
2002-05-15 04:19:12 +04:00
|
|
|
uvm_pagedequeue(pg);
|
2001-12-31 22:21:36 +03:00
|
|
|
return;
|
|
|
|
}
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
/*
|
|
|
|
* remove page from its object or anon.
|
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2001-12-31 22:21:36 +03:00
|
|
|
if (pg->uobject != NULL) {
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uvm_pageremove(pg);
|
2001-12-31 22:21:36 +03:00
|
|
|
} else if (pg->uanon != NULL) {
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
pg->uanon->u.an_page = NULL;
|
2001-12-31 22:21:36 +03:00
|
|
|
uvmexp.anonpages--;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-11-06 11:07:49 +03:00
|
|
|
* now remove the page from the queues.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uvm_pagedequeue(pg);
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if the page was wired, unwire it now.
|
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2000-04-24 21:12:00 +04:00
|
|
|
if (pg->wire_count) {
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->wire_count = 0;
|
|
|
|
uvmexp.wired--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-11-27 11:39:39 +03:00
|
|
|
* and put on free queue
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
2000-04-24 21:12:00 +04:00
|
|
|
pg->flags &= ~PG_ZERO;
|
|
|
|
|
1999-05-24 23:10:57 +04:00
|
|
|
s = uvm_lock_fpageq();
|
2000-04-24 21:12:00 +04:00
|
|
|
TAILQ_INSERT_TAIL(&uvm.page_free[
|
2001-04-29 08:23:20 +04:00
|
|
|
uvm_page_lookup_freelist(pg)].pgfl_buckets[
|
|
|
|
VM_PGCOLOR_BUCKET(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq);
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->pqflags = PQ_FREE;
|
1998-02-07 05:34:08 +03:00
|
|
|
#ifdef DEBUG
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->uobject = (void *)0xdeadbeef;
|
|
|
|
pg->offset = 0xdeadbeef;
|
|
|
|
pg->uanon = (void *)0xdeadbeef;
|
1998-02-07 05:34:08 +03:00
|
|
|
#endif
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmexp.free++;
|
2000-04-24 21:12:00 +04:00
|
|
|
|
|
|
|
if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
|
|
|
|
uvm.page_idle_zero = vm_page_zero_enable;
|
|
|
|
|
1999-05-24 23:10:57 +04:00
|
|
|
uvm_unlock_fpageq(s);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/*
|
|
|
|
* uvm_page_unbusy: unbusy an array of pages.
|
|
|
|
*
|
|
|
|
* => pages must either all belong to the same object, or all belong to anons.
|
|
|
|
* => if pages are object-owned, object must be locked.
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
* => if pages are anon-owned, anons must be locked.
|
2002-05-29 15:04:39 +04:00
|
|
|
* => caller must lock page queues if pages may be released.
|
2000-11-27 11:39:39 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_page_unbusy(pgs, npgs)
|
|
|
|
struct vm_page **pgs;
|
|
|
|
int npgs;
|
|
|
|
{
|
|
|
|
struct vm_page *pg;
|
|
|
|
int i;
|
|
|
|
UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist);
|
|
|
|
|
|
|
|
for (i = 0; i < npgs; i++) {
|
|
|
|
pg = pgs[i];
|
2003-01-27 05:10:20 +03:00
|
|
|
if (pg == NULL || pg == PGO_DONTCARE) {
|
2000-11-27 11:39:39 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (pg->flags & PG_WANTED) {
|
|
|
|
wakeup(pg);
|
|
|
|
}
|
|
|
|
if (pg->flags & PG_RELEASED) {
|
|
|
|
UVMHIST_LOG(ubchist, "releasing pg %p", pg,0,0,0);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
pg->flags &= ~PG_RELEASED;
|
|
|
|
uvm_pagefree(pg);
|
2000-11-27 11:39:39 +03:00
|
|
|
} else {
|
|
|
|
UVMHIST_LOG(ubchist, "unbusying pg %p", pg,0,0,0);
|
|
|
|
pg->flags &= ~(PG_WANTED|PG_BUSY);
|
|
|
|
UVM_PAGE_OWN(pg, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
#if defined(UVM_PAGE_TRKOWN)
|
|
|
|
/*
|
|
|
|
* uvm_page_own: set or release page ownership
|
|
|
|
*
|
|
|
|
* => this is a debugging function that keeps track of who sets PG_BUSY
|
|
|
|
* and where they do it. it can be used to track down problems
|
|
|
|
* such a process setting "PG_BUSY" and never releasing it.
|
|
|
|
* => page's object [if any] must be locked
|
|
|
|
* => if "tag" is NULL then we are releasing page ownership
|
|
|
|
*/
|
1998-03-09 03:58:55 +03:00
|
|
|
void
|
|
|
|
uvm_page_own(pg, tag)
|
|
|
|
struct vm_page *pg;
|
|
|
|
char *tag;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* gain ownership? */
|
|
|
|
if (tag) {
|
|
|
|
if (pg->owner_tag) {
|
|
|
|
printf("uvm_page_own: page %p already owned "
|
|
|
|
"by proc %d [%s]\n", pg,
|
2002-02-20 10:06:56 +03:00
|
|
|
pg->owner, pg->owner_tag);
|
1998-03-09 03:58:55 +03:00
|
|
|
panic("uvm_page_own");
|
|
|
|
}
|
|
|
|
pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1;
|
|
|
|
pg->owner_tag = tag;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* drop ownership */
|
|
|
|
if (pg->owner_tag == NULL) {
|
|
|
|
printf("uvm_page_own: dropping ownership of an non-owned "
|
|
|
|
"page (%p)\n", pg);
|
|
|
|
panic("uvm_page_own");
|
|
|
|
}
|
2001-11-06 11:07:49 +03:00
|
|
|
KASSERT((pg->pqflags & (PQ_ACTIVE|PQ_INACTIVE)) ||
|
2002-02-20 10:06:56 +03:00
|
|
|
(pg->uanon == NULL && pg->uobject == NULL) ||
|
|
|
|
pg->uobject == uvm.kernel_object ||
|
|
|
|
pg->wire_count > 0 ||
|
|
|
|
(pg->loan_count == 1 && pg->uanon == NULL) ||
|
|
|
|
pg->loan_count > 1);
|
|
|
|
pg->owner_tag = NULL;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
#endif
|
2000-04-24 21:12:00 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pageidlezero: zero free pages while the system is idle.
|
|
|
|
*
|
2001-04-29 08:23:20 +04:00
|
|
|
* => try to complete one color bucket at a time, to reduce our impact
|
|
|
|
* on the CPU cache.
|
2000-04-24 21:12:00 +04:00
|
|
|
* => we loop until we either reach the target or whichqs indicates that
|
|
|
|
* there is a process ready to run.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_pageidlezero()
|
|
|
|
{
|
|
|
|
struct vm_page *pg;
|
|
|
|
struct pgfreelist *pgfl;
|
2001-05-01 18:02:56 +04:00
|
|
|
int free_list, s, firstbucket;
|
2001-04-29 08:23:20 +04:00
|
|
|
static int nextbucket;
|
2000-04-24 21:12:00 +04:00
|
|
|
|
2001-04-29 08:23:20 +04:00
|
|
|
s = uvm_lock_fpageq();
|
2001-05-01 18:02:56 +04:00
|
|
|
firstbucket = nextbucket;
|
|
|
|
do {
|
2001-04-29 08:23:20 +04:00
|
|
|
if (sched_whichqs != 0) {
|
2000-04-24 21:12:00 +04:00
|
|
|
uvm_unlock_fpageq(s);
|
|
|
|
return;
|
|
|
|
}
|
2001-04-29 08:23:20 +04:00
|
|
|
if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) {
|
2000-04-24 21:12:00 +04:00
|
|
|
uvm.page_idle_zero = FALSE;
|
|
|
|
uvm_unlock_fpageq(s);
|
|
|
|
return;
|
|
|
|
}
|
2001-04-29 08:23:20 +04:00
|
|
|
for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
|
|
|
|
pgfl = &uvm.page_free[free_list];
|
|
|
|
while ((pg = TAILQ_FIRST(&pgfl->pgfl_buckets[
|
|
|
|
nextbucket].pgfl_queues[PGFL_UNKNOWN])) != NULL) {
|
|
|
|
if (sched_whichqs != 0) {
|
|
|
|
uvm_unlock_fpageq(s);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
TAILQ_REMOVE(&pgfl->pgfl_buckets[
|
|
|
|
nextbucket].pgfl_queues[PGFL_UNKNOWN],
|
|
|
|
pg, pageq);
|
|
|
|
uvmexp.free--;
|
|
|
|
uvm_unlock_fpageq(s);
|
2000-04-24 21:12:00 +04:00
|
|
|
#ifdef PMAP_PAGEIDLEZERO
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
if (!PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg))) {
|
|
|
|
|
2001-04-29 08:23:20 +04:00
|
|
|
/*
|
|
|
|
* The machine-dependent code detected
|
|
|
|
* some reason for us to abort zeroing
|
|
|
|
* pages, probably because there is a
|
|
|
|
* process now ready to run.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2001-04-29 08:23:20 +04:00
|
|
|
s = uvm_lock_fpageq();
|
|
|
|
TAILQ_INSERT_HEAD(&pgfl->pgfl_buckets[
|
|
|
|
nextbucket].pgfl_queues[
|
|
|
|
PGFL_UNKNOWN], pg, pageq);
|
|
|
|
uvmexp.free++;
|
|
|
|
uvmexp.zeroaborts++;
|
|
|
|
uvm_unlock_fpageq(s);
|
|
|
|
return;
|
|
|
|
}
|
2000-04-24 21:12:00 +04:00
|
|
|
#else
|
2001-04-29 08:23:20 +04:00
|
|
|
pmap_zero_page(VM_PAGE_TO_PHYS(pg));
|
|
|
|
#endif /* PMAP_PAGEIDLEZERO */
|
|
|
|
pg->flags |= PG_ZERO;
|
|
|
|
|
|
|
|
s = uvm_lock_fpageq();
|
|
|
|
TAILQ_INSERT_HEAD(&pgfl->pgfl_buckets[
|
|
|
|
nextbucket].pgfl_queues[PGFL_ZEROS],
|
|
|
|
pg, pageq);
|
|
|
|
uvmexp.free++;
|
|
|
|
uvmexp.zeropages++;
|
|
|
|
}
|
|
|
|
}
|
2001-05-02 05:22:19 +04:00
|
|
|
nextbucket = (nextbucket + 1) & uvmexp.colormask;
|
2001-05-01 18:02:56 +04:00
|
|
|
} while (nextbucket != firstbucket);
|
2001-04-29 08:23:20 +04:00
|
|
|
uvm_unlock_fpageq(s);
|
2000-04-24 21:12:00 +04:00
|
|
|
}
|