2001-12-10 04:52:26 +03:00
|
|
|
/* $NetBSD: uvm_extern.h,v 1.70 2001/12/10 01:52:26 thorpej Exp $ */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by Charles D. Cranor and
|
|
|
|
* Washington University.
|
|
|
|
* 4. The name of the author may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1998-02-07 14:07:38 +03:00
|
|
|
*
|
|
|
|
* from: Id: uvm_extern.h,v 1.1.2.21 1998/02/07 01:16:53 chs Exp
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
2000-06-27 13:00:14 +04:00
|
|
|
/*-
|
2000-06-27 20:16:43 +04:00
|
|
|
* Copyright (c) 1991, 1992, 1993
|
2000-06-27 13:00:14 +04:00
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)vm_extern.h 8.5 (Berkeley) 5/3/95
|
|
|
|
*/
|
|
|
|
|
1998-02-10 05:34:17 +03:00
|
|
|
#ifndef _UVM_UVM_EXTERN_H_
|
|
|
|
#define _UVM_UVM_EXTERN_H_
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_extern.h: this file defines the external interface to the VM system.
|
|
|
|
*
|
|
|
|
* this should be the only file included by non-VM parts of the kernel
|
|
|
|
* which need access to VM services. if you want to know the interface
|
|
|
|
* to the MI VM layer without knowing the details, this is the file to
|
|
|
|
* learn.
|
|
|
|
*
|
|
|
|
* NOTE: vm system calls are prototyped in syscallargs.h
|
|
|
|
*/
|
|
|
|
|
2000-06-27 20:16:43 +04:00
|
|
|
/*
|
|
|
|
* typedefs, necessary for standard UVM headers.
|
|
|
|
*/
|
|
|
|
|
2001-06-02 22:09:08 +04:00
|
|
|
typedef unsigned int uvm_flag_t;
|
2000-06-27 20:16:43 +04:00
|
|
|
typedef int vm_fault_t;
|
|
|
|
|
|
|
|
typedef int vm_inherit_t; /* XXX: inheritance codes */
|
|
|
|
typedef off_t voff_t; /* XXX: offset within a uvm_object */
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* defines
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* the following defines are for uvm_map and functions which call it.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* protections bits */
|
|
|
|
#define UVM_PROT_MASK 0x07 /* protection mask */
|
|
|
|
#define UVM_PROT_NONE 0x00 /* protection none */
|
|
|
|
#define UVM_PROT_ALL 0x07 /* everything */
|
|
|
|
#define UVM_PROT_READ 0x01 /* read */
|
|
|
|
#define UVM_PROT_WRITE 0x02 /* write */
|
|
|
|
#define UVM_PROT_EXEC 0x04 /* exec */
|
|
|
|
|
|
|
|
/* protection short codes */
|
|
|
|
#define UVM_PROT_R 0x01 /* read */
|
|
|
|
#define UVM_PROT_W 0x02 /* write */
|
|
|
|
#define UVM_PROT_RW 0x03 /* read-write */
|
|
|
|
#define UVM_PROT_X 0x04 /* exec */
|
|
|
|
#define UVM_PROT_RX 0x05 /* read-exec */
|
|
|
|
#define UVM_PROT_WX 0x06 /* write-exec */
|
|
|
|
#define UVM_PROT_RWX 0x07 /* read-write-exec */
|
|
|
|
|
|
|
|
/* 0x08: not used */
|
|
|
|
|
|
|
|
/* inherit codes */
|
|
|
|
#define UVM_INH_MASK 0x30 /* inherit mask */
|
|
|
|
#define UVM_INH_SHARE 0x00 /* "share" */
|
|
|
|
#define UVM_INH_COPY 0x10 /* "copy" */
|
|
|
|
#define UVM_INH_NONE 0x20 /* "none" */
|
|
|
|
#define UVM_INH_DONATE 0x30 /* "donate" << not used */
|
|
|
|
|
|
|
|
/* 0x40, 0x80: not used */
|
|
|
|
|
|
|
|
/* bits 0x700: max protection, 0x800: not used */
|
|
|
|
|
|
|
|
/* bits 0x7000: advice, 0x8000: not used */
|
|
|
|
/* advice: matches MADV_* from sys/mman.h */
|
|
|
|
#define UVM_ADV_NORMAL 0x0 /* 'normal' */
|
|
|
|
#define UVM_ADV_RANDOM 0x1 /* 'random' */
|
|
|
|
#define UVM_ADV_SEQUENTIAL 0x2 /* 'sequential' */
|
|
|
|
/* 0x3: will need, 0x4: dontneed */
|
|
|
|
#define UVM_ADV_MASK 0x7 /* mask */
|
|
|
|
|
|
|
|
/* mapping flags */
|
|
|
|
#define UVM_FLAG_FIXED 0x010000 /* find space */
|
|
|
|
#define UVM_FLAG_OVERLAY 0x020000 /* establish overlay */
|
|
|
|
#define UVM_FLAG_NOMERGE 0x040000 /* don't merge map entries */
|
|
|
|
#define UVM_FLAG_COPYONW 0x080000 /* set copy_on_write flag */
|
|
|
|
#define UVM_FLAG_AMAPPAD 0x100000 /* for bss: pad amap to reduce malloc() */
|
|
|
|
#define UVM_FLAG_TRYLOCK 0x200000 /* fail if we can not lock map */
|
|
|
|
|
|
|
|
/* macros to extract info */
|
|
|
|
#define UVM_PROTECTION(X) ((X) & UVM_PROT_MASK)
|
|
|
|
#define UVM_INHERIT(X) (((X) & UVM_INH_MASK) >> 4)
|
|
|
|
#define UVM_MAXPROTECTION(X) (((X) >> 8) & UVM_PROT_MASK)
|
|
|
|
#define UVM_ADVICE(X) (((X) >> 12) & UVM_ADV_MASK)
|
|
|
|
|
|
|
|
#define UVM_MAPFLAG(PROT,MAXPROT,INH,ADVICE,FLAGS) \
|
|
|
|
((MAXPROT << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS))
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
/* magic offset value: offset not known(obj) or don't care(!obj) */
|
2000-03-27 00:54:45 +04:00
|
|
|
#define UVM_UNKNOWN_OFFSET ((voff_t) -1)
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* the following defines are for uvm_km_kmemalloc's flags
|
|
|
|
*/
|
|
|
|
#define UVM_KMF_NOWAIT 0x1 /* matches M_NOWAIT */
|
|
|
|
#define UVM_KMF_VALLOC 0x2 /* allocate VA only */
|
|
|
|
#define UVM_KMF_TRYLOCK UVM_FLAG_TRYLOCK /* try locking only */
|
|
|
|
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
/*
|
|
|
|
* the following defines the strategies for uvm_pagealloc_strat()
|
|
|
|
*/
|
|
|
|
#define UVM_PGA_STRAT_NORMAL 0 /* high -> low free list walk */
|
|
|
|
#define UVM_PGA_STRAT_ONLY 1 /* only specified free list */
|
|
|
|
#define UVM_PGA_STRAT_FALLBACK 2 /* ONLY falls back on NORMAL */
|
|
|
|
|
1999-04-11 08:04:04 +04:00
|
|
|
/*
|
|
|
|
* flags for uvm_pagealloc_strat()
|
|
|
|
*/
|
2000-04-10 04:28:05 +04:00
|
|
|
#define UVM_PGA_USERESERVE 0x0001 /* ok to use reserve pages */
|
|
|
|
#define UVM_PGA_ZERO 0x0002 /* returned page must be zero'd */
|
1999-04-11 08:04:04 +04:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/*
|
|
|
|
* the following defines are for ubc_alloc's flags
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
#define UBC_READ 0x01
|
|
|
|
#define UBC_WRITE 0x02
|
|
|
|
#define UBC_FAULTBUSY 0x04
|
2000-11-27 11:39:39 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* flags for uvn_findpages().
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
#define UFP_ALL 0x00
|
|
|
|
#define UFP_NOWAIT 0x01
|
|
|
|
#define UFP_NOALLOC 0x02
|
|
|
|
#define UFP_NOCACHE 0x04
|
|
|
|
#define UFP_NORDONLY 0x08
|
|
|
|
#define UFP_DIRTYONLY 0x10
|
|
|
|
#define UFP_BACKWARD 0x20
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1999-07-18 01:35:49 +04:00
|
|
|
/*
|
|
|
|
* lockflags that control the locking behavior of various functions.
|
|
|
|
*/
|
|
|
|
#define UVM_LK_ENTER 0x00000001 /* map locked on entry */
|
|
|
|
#define UVM_LK_EXIT 0x00000002 /* leave map locked on exit */
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* structures
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct core;
|
|
|
|
struct mount;
|
|
|
|
struct pglist;
|
|
|
|
struct proc;
|
|
|
|
struct ucred;
|
|
|
|
struct uio;
|
|
|
|
struct uvm_object;
|
|
|
|
struct vm_anon;
|
|
|
|
struct vmspace;
|
1998-03-27 04:47:06 +03:00
|
|
|
struct pmap;
|
1998-02-05 09:25:08 +03:00
|
|
|
struct vnode;
|
2000-11-27 11:39:39 +03:00
|
|
|
struct pool;
|
1999-07-23 02:58:38 +04:00
|
|
|
struct simplelock;
|
2001-06-02 22:09:08 +04:00
|
|
|
struct vm_map_entry;
|
|
|
|
struct vm_map;
|
|
|
|
struct vm_page;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
extern struct pool *uvm_aiobuf_pool;
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* uvmexp: global data structures that are exported to parts of the kernel
|
|
|
|
* other than the vm system.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct uvmexp {
|
1998-03-09 03:58:55 +03:00
|
|
|
/* vm_page constants */
|
1998-02-05 09:25:08 +03:00
|
|
|
int pagesize; /* size of a page (PAGE_SIZE): must be power of 2 */
|
|
|
|
int pagemask; /* page mask */
|
|
|
|
int pageshift; /* page shift */
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* vm_page counters */
|
1998-02-05 09:25:08 +03:00
|
|
|
int npages; /* number of pages we manage */
|
|
|
|
int free; /* number of free pages */
|
|
|
|
int active; /* number of active pages */
|
|
|
|
int inactive; /* number of pages that we free'd but may want back */
|
1998-02-07 05:24:02 +03:00
|
|
|
int paging; /* number of pages in the process of being paged out */
|
1998-02-05 09:25:08 +03:00
|
|
|
int wired; /* number of wired pages */
|
2001-03-09 04:02:10 +03:00
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
/*
|
2001-03-09 04:02:10 +03:00
|
|
|
* Adding anything before this line will break binary compatibility
|
|
|
|
* with top(1) on NetBSD 1.5.
|
2000-11-29 12:52:18 +03:00
|
|
|
*/
|
2001-03-09 04:02:10 +03:00
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
int ncolors; /* number of page color buckets: must be p-o-2 */
|
|
|
|
int colormask; /* color bucket mask */
|
|
|
|
|
2000-11-30 14:04:43 +03:00
|
|
|
int zeropages; /* number of zero'd pages */
|
1998-02-07 05:24:02 +03:00
|
|
|
int reserve_pagedaemon; /* number of pages reserved for pagedaemon */
|
2000-11-30 14:04:43 +03:00
|
|
|
int reserve_kernel; /* number of pages reserved for kernel */
|
2001-12-09 06:07:19 +03:00
|
|
|
int anonpages; /* number of pages used by anon mappings */
|
|
|
|
int filepages; /* number of pages used by cached file data */
|
|
|
|
int execpages; /* number of pages used by cached exec date */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* pageout params */
|
|
|
|
int freemin; /* min number of free pages */
|
|
|
|
int freetarg; /* target number of free pages */
|
|
|
|
int inactarg; /* target number of inactive pages */
|
|
|
|
int wiredmax; /* max number of wired pages */
|
2001-03-09 04:02:10 +03:00
|
|
|
int anonmin; /* min threshold for anon pages */
|
2001-12-09 06:07:19 +03:00
|
|
|
int execmin; /* min threshold for executable pages */
|
|
|
|
int filemin; /* min threshold for file pages */
|
2001-03-09 04:02:10 +03:00
|
|
|
int anonminpct; /* min percent anon pages */
|
2001-12-09 06:07:19 +03:00
|
|
|
int execminpct; /* min percent executable pages */
|
|
|
|
int fileminpct; /* min percent file pages */
|
|
|
|
int anonmax; /* max threshold for anon pages */
|
|
|
|
int execmax; /* max threshold for executable pages */
|
|
|
|
int filemax; /* max threshold for file pages */
|
|
|
|
int anonmaxpct; /* max percent anon pages */
|
|
|
|
int execmaxpct; /* max percent executable pages */
|
|
|
|
int filemaxpct; /* max percent file pages */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* swap */
|
1998-02-05 09:25:08 +03:00
|
|
|
int nswapdev; /* number of configured swap devices in system */
|
|
|
|
int swpages; /* number of PAGE_SIZE'ed swap pages */
|
1998-02-07 05:24:02 +03:00
|
|
|
int swpginuse; /* number of swap pages in use */
|
1999-03-26 20:34:15 +03:00
|
|
|
int swpgonly; /* number of swap pages in use, not also in RAM */
|
1998-02-05 09:25:08 +03:00
|
|
|
int nswget; /* number of times fault calls uvm_swap_get() */
|
|
|
|
int nanon; /* number total of anon's in system */
|
1999-07-03 03:20:58 +04:00
|
|
|
int nanonneeded;/* number of anons currently needed */
|
1998-02-05 09:25:08 +03:00
|
|
|
int nfreeanon; /* number of free anon's */
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* stat counters */
|
1998-02-09 16:08:22 +03:00
|
|
|
int faults; /* page fault count */
|
|
|
|
int traps; /* trap count */
|
|
|
|
int intrs; /* interrupt count */
|
|
|
|
int swtch; /* context switch count */
|
|
|
|
int softs; /* software interrupt count */
|
|
|
|
int syscalls; /* system calls */
|
|
|
|
int pageins; /* pagein operation count */
|
|
|
|
/* pageouts are in pdpageouts below */
|
|
|
|
int swapins; /* swapins */
|
|
|
|
int swapouts; /* swapouts */
|
|
|
|
int pgswapin; /* pages swapped in */
|
|
|
|
int pgswapout; /* pages swapped out */
|
|
|
|
int forks; /* forks */
|
1998-02-05 09:25:08 +03:00
|
|
|
int forks_ppwait; /* forks where parent waits */
|
|
|
|
int forks_sharevm; /* forks where vmspace is shared */
|
2000-04-24 21:12:00 +04:00
|
|
|
int pga_zerohit; /* pagealloc where zero wanted and zero
|
|
|
|
was available */
|
|
|
|
int pga_zeromiss; /* pagealloc where zero wanted and zero
|
|
|
|
not available */
|
2000-09-21 21:46:04 +04:00
|
|
|
int zeroaborts; /* number of times page zeroing was
|
|
|
|
aborted */
|
2001-04-29 08:23:20 +04:00
|
|
|
int colorhit; /* pagealloc where we got optimal color */
|
|
|
|
int colormiss; /* pagealloc where we didn't */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* fault subcounters */
|
1998-02-05 09:25:08 +03:00
|
|
|
int fltnoram; /* number of times fault was out of ram */
|
|
|
|
int fltnoanon; /* number of times fault was out of anons */
|
|
|
|
int fltpgwait; /* number of times fault had to wait on a page */
|
|
|
|
int fltpgrele; /* number of times fault found a released page */
|
|
|
|
int fltrelck; /* number of times fault relock called */
|
|
|
|
int fltrelckok; /* number of times fault relock is a success */
|
|
|
|
int fltanget; /* number of times fault gets anon page */
|
|
|
|
int fltanretry; /* number of times fault retrys an anon get */
|
|
|
|
int fltamcopy; /* number of times fault clears "needs copy" */
|
|
|
|
int fltnamap; /* number of times fault maps a neighbor anon page */
|
|
|
|
int fltnomap; /* number of times fault maps a neighbor obj page */
|
|
|
|
int fltlget; /* number of times fault does a locked pgo_get */
|
|
|
|
int fltget; /* number of times fault does an unlocked get */
|
|
|
|
int flt_anon; /* number of times fault anon (case 1a) */
|
|
|
|
int flt_acow; /* number of times fault anon cow (case 1b) */
|
|
|
|
int flt_obj; /* number of times fault is on object page (2a) */
|
|
|
|
int flt_prcopy; /* number of times fault promotes with copy (2b) */
|
|
|
|
int flt_przero; /* number of times fault promotes with zerofill (2b) */
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* daemon counters */
|
1998-02-05 09:25:08 +03:00
|
|
|
int pdwoke; /* number of times daemon woke up */
|
|
|
|
int pdrevs; /* number of times daemon rev'd clock hand */
|
|
|
|
int pdswout; /* number of times daemon called for swapout */
|
|
|
|
int pdfreed; /* number of pages daemon freed since boot */
|
|
|
|
int pdscans; /* number of pages daemon scaned since boot */
|
|
|
|
int pdanscan; /* number of anonymous pages scanned by daemon */
|
|
|
|
int pdobscan; /* number of object pages scanned by daemon */
|
|
|
|
int pdreact; /* number of pages daemon reactivated since boot */
|
|
|
|
int pdbusy; /* number of times daemon found a busy page */
|
|
|
|
int pdpageouts; /* number of times daemon started a pageout */
|
|
|
|
int pdpending; /* number of times daemon got a pending pagout */
|
|
|
|
int pddeact; /* number of pages daemon deactivates */
|
2001-12-09 06:07:19 +03:00
|
|
|
int pdreanon; /* anon pages reactivated due to thresholds */
|
|
|
|
int pdrefile; /* file pages reactivated due to thresholds */
|
|
|
|
int pdreexec; /* executable pages reactivated due to thresholds */
|
1998-02-05 09:25:08 +03:00
|
|
|
};
|
|
|
|
|
2000-11-29 12:52:18 +03:00
|
|
|
/*
|
|
|
|
* The following structure is 64-bit alignment safe. New elements
|
|
|
|
* should only be added to the end of this structure so binary
|
|
|
|
* compatibility can be preserved.
|
|
|
|
*/
|
|
|
|
struct uvmexp_sysctl {
|
|
|
|
int64_t pagesize;
|
|
|
|
int64_t pagemask;
|
|
|
|
int64_t pageshift;
|
|
|
|
int64_t npages;
|
|
|
|
int64_t free;
|
|
|
|
int64_t active;
|
|
|
|
int64_t inactive;
|
|
|
|
int64_t paging;
|
|
|
|
int64_t wired;
|
|
|
|
int64_t zeropages;
|
|
|
|
int64_t reserve_pagedaemon;
|
|
|
|
int64_t reserve_kernel;
|
|
|
|
int64_t freemin;
|
|
|
|
int64_t freetarg;
|
|
|
|
int64_t inactarg;
|
|
|
|
int64_t wiredmax;
|
|
|
|
int64_t nswapdev;
|
|
|
|
int64_t swpages;
|
|
|
|
int64_t swpginuse;
|
|
|
|
int64_t swpgonly;
|
|
|
|
int64_t nswget;
|
|
|
|
int64_t nanon;
|
|
|
|
int64_t nanonneeded;
|
|
|
|
int64_t nfreeanon;
|
|
|
|
int64_t faults;
|
|
|
|
int64_t traps;
|
|
|
|
int64_t intrs;
|
|
|
|
int64_t swtch;
|
|
|
|
int64_t softs;
|
|
|
|
int64_t syscalls;
|
|
|
|
int64_t pageins;
|
|
|
|
int64_t swapins;
|
|
|
|
int64_t swapouts;
|
|
|
|
int64_t pgswapin;
|
|
|
|
int64_t pgswapout;
|
|
|
|
int64_t forks;
|
|
|
|
int64_t forks_ppwait;
|
|
|
|
int64_t forks_sharevm;
|
|
|
|
int64_t pga_zerohit;
|
|
|
|
int64_t pga_zeromiss;
|
|
|
|
int64_t zeroaborts;
|
|
|
|
int64_t fltnoram;
|
|
|
|
int64_t fltnoanon;
|
|
|
|
int64_t fltpgwait;
|
|
|
|
int64_t fltpgrele;
|
|
|
|
int64_t fltrelck;
|
|
|
|
int64_t fltrelckok;
|
|
|
|
int64_t fltanget;
|
|
|
|
int64_t fltanretry;
|
|
|
|
int64_t fltamcopy;
|
|
|
|
int64_t fltnamap;
|
|
|
|
int64_t fltnomap;
|
|
|
|
int64_t fltlget;
|
|
|
|
int64_t fltget;
|
|
|
|
int64_t flt_anon;
|
|
|
|
int64_t flt_acow;
|
|
|
|
int64_t flt_obj;
|
|
|
|
int64_t flt_prcopy;
|
|
|
|
int64_t flt_przero;
|
|
|
|
int64_t pdwoke;
|
|
|
|
int64_t pdrevs;
|
|
|
|
int64_t pdswout;
|
|
|
|
int64_t pdfreed;
|
|
|
|
int64_t pdscans;
|
|
|
|
int64_t pdanscan;
|
|
|
|
int64_t pdobscan;
|
|
|
|
int64_t pdreact;
|
|
|
|
int64_t pdbusy;
|
|
|
|
int64_t pdpageouts;
|
|
|
|
int64_t pdpending;
|
|
|
|
int64_t pddeact;
|
2000-11-30 14:04:43 +03:00
|
|
|
int64_t anonpages;
|
2001-12-09 06:07:19 +03:00
|
|
|
int64_t filepages;
|
|
|
|
int64_t execpages;
|
2001-04-29 08:23:20 +04:00
|
|
|
int64_t colorhit;
|
|
|
|
int64_t colormiss;
|
2001-05-01 23:36:56 +04:00
|
|
|
int64_t ncolors;
|
2000-11-29 12:52:18 +03:00
|
|
|
};
|
|
|
|
|
1999-06-21 21:25:11 +04:00
|
|
|
#ifdef _KERNEL
|
1998-02-05 09:25:08 +03:00
|
|
|
extern struct uvmexp uvmexp;
|
2000-06-27 20:16:43 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Finally, bring in standard UVM headers.
|
|
|
|
*/
|
|
|
|
#include <sys/vmmeter.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <uvm/uvm_param.h>
|
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <uvm/uvm_prot.h>
|
|
|
|
#include <uvm/uvm_page.h>
|
|
|
|
#include <uvm/uvm_pmap.h>
|
|
|
|
#include <uvm/uvm_map.h>
|
|
|
|
#include <uvm/uvm_fault.h>
|
|
|
|
#include <uvm/uvm_pager.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Shareable process virtual address space.
|
|
|
|
* May eventually be merged with vm_map.
|
|
|
|
* Several fields are temporary (text, data stuff).
|
|
|
|
*/
|
|
|
|
struct vmspace {
|
|
|
|
struct vm_map vm_map; /* VM address map */
|
|
|
|
int vm_refcnt; /* number of references */
|
|
|
|
caddr_t vm_shm; /* SYS5 shared memory private data XXX */
|
|
|
|
/* we copy from vm_startcopy to the end of the structure on fork */
|
|
|
|
#define vm_startcopy vm_rssize
|
2001-04-25 22:09:52 +04:00
|
|
|
segsz_t vm_rssize; /* current resident set size in pages */
|
2000-06-27 20:16:43 +04:00
|
|
|
segsz_t vm_swrss; /* resident set size before last swap */
|
|
|
|
segsz_t vm_tsize; /* text size (pages) XXX */
|
|
|
|
segsz_t vm_dsize; /* data size (pages) XXX */
|
|
|
|
segsz_t vm_ssize; /* stack size (pages) */
|
|
|
|
caddr_t vm_taddr; /* user virtual address of text XXX */
|
|
|
|
caddr_t vm_daddr; /* user virtual address of data XXX */
|
|
|
|
caddr_t vm_maxsaddr; /* user VA at max stack growth */
|
2000-09-28 23:05:06 +04:00
|
|
|
caddr_t vm_minsaddr; /* user VA at top of stack */
|
2000-06-27 20:16:43 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef _KERNEL
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-12-10 04:52:26 +03:00
|
|
|
/*
|
|
|
|
* used to keep state while iterating over the map for a core dump.
|
|
|
|
*/
|
|
|
|
struct uvm_coredump_state {
|
|
|
|
void *cookie; /* opaque for the caller */
|
|
|
|
vaddr_t start; /* start of region */
|
|
|
|
vaddr_t end; /* end of region */
|
|
|
|
vm_prot_t prot; /* protection of region */
|
|
|
|
int flags; /* flags; see below */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define UVM_COREDUMP_STACK 0x01 /* region is user stack */
|
|
|
|
#define UVM_COREDUMP_NODUMP 0x02 /* don't actually dump this region */
|
|
|
|
|
2000-06-26 18:20:25 +04:00
|
|
|
/*
|
|
|
|
* the various kernel maps, owned by MD code
|
|
|
|
*/
|
2001-06-02 22:09:08 +04:00
|
|
|
extern struct vm_map *exec_map;
|
|
|
|
extern struct vm_map *kernel_map;
|
|
|
|
extern struct vm_map *kmem_map;
|
|
|
|
extern struct vm_map *mb_map;
|
|
|
|
extern struct vm_map *phys_map;
|
2000-06-26 18:20:25 +04:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* macros
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* zalloc zeros memory, alloc does not */
|
|
|
|
#define uvm_km_zalloc(MAP,SIZE) uvm_km_alloc1(MAP,SIZE,TRUE)
|
|
|
|
#define uvm_km_alloc(MAP,SIZE) uvm_km_alloc1(MAP,SIZE,FALSE)
|
|
|
|
|
1999-06-21 21:25:11 +04:00
|
|
|
#endif /* _KERNEL */
|
|
|
|
|
2000-06-27 20:16:43 +04:00
|
|
|
#define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap))
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-06-27 13:00:14 +04:00
|
|
|
struct buf;
|
|
|
|
struct loadavg;
|
|
|
|
struct proc;
|
|
|
|
struct pmap;
|
|
|
|
struct vmspace;
|
|
|
|
struct vmtotal;
|
|
|
|
struct mount;
|
|
|
|
struct vnode;
|
|
|
|
struct core;
|
|
|
|
|
1999-06-21 21:25:11 +04:00
|
|
|
#ifdef _KERNEL
|
|
|
|
|
2000-06-27 13:00:14 +04:00
|
|
|
/* vm_machdep.c */
|
|
|
|
void vmapbuf __P((struct buf *, vsize_t));
|
|
|
|
void vunmapbuf __P((struct buf *, vsize_t));
|
|
|
|
void pagemove __P((caddr_t, caddr_t, size_t));
|
|
|
|
#ifndef cpu_swapin
|
|
|
|
void cpu_swapin __P((struct proc *));
|
|
|
|
#endif
|
|
|
|
#ifndef cpu_swapout
|
|
|
|
void cpu_swapout __P((struct proc *));
|
|
|
|
#endif
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/* uvm_aobj.c */
|
1998-08-13 06:10:37 +04:00
|
|
|
struct uvm_object *uao_create __P((vsize_t, int));
|
1998-02-05 09:25:08 +03:00
|
|
|
void uao_detach __P((struct uvm_object *));
|
2000-01-11 09:57:49 +03:00
|
|
|
void uao_detach_locked __P((struct uvm_object *));
|
1998-02-05 09:25:08 +03:00
|
|
|
void uao_reference __P((struct uvm_object *));
|
2000-01-11 09:57:49 +03:00
|
|
|
void uao_reference_locked __P((struct uvm_object *));
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/* uvm_bio.c */
|
|
|
|
void ubc_init __P((void));
|
|
|
|
void * ubc_alloc __P((struct uvm_object *, voff_t, vsize_t *,
|
|
|
|
int));
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
void ubc_release __P((void *, int));
|
2000-11-27 11:39:39 +03:00
|
|
|
void ubc_flush __P((struct uvm_object *, voff_t, voff_t));
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/* uvm_fault.c */
|
2001-06-02 22:09:08 +04:00
|
|
|
int uvm_fault __P((struct vm_map *, vaddr_t, vm_fault_t,
|
2000-11-27 11:39:39 +03:00
|
|
|
vm_prot_t));
|
1998-02-05 09:25:08 +03:00
|
|
|
/* handle a page fault */
|
|
|
|
|
|
|
|
/* uvm_glue.c */
|
|
|
|
#if defined(KGDB)
|
1998-05-09 19:04:39 +04:00
|
|
|
void uvm_chgkprot __P((caddr_t, size_t, int));
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
2001-12-10 04:52:26 +03:00
|
|
|
int uvm_coredump_walkmap __P((struct proc *,
|
|
|
|
struct vnode *, struct ucred *,
|
|
|
|
int (*)(struct proc *, struct vnode *,
|
|
|
|
struct ucred *,
|
|
|
|
struct uvm_coredump_state *), void *));
|
1999-05-14 01:58:32 +04:00
|
|
|
void uvm_fork __P((struct proc *, struct proc *, boolean_t,
|
2000-05-28 09:48:59 +04:00
|
|
|
void *, size_t, void (*)(void *), void *));
|
1998-09-09 03:44:21 +04:00
|
|
|
void uvm_exit __P((struct proc *));
|
1998-02-05 09:25:08 +03:00
|
|
|
void uvm_init_limits __P((struct proc *));
|
1998-05-09 19:04:39 +04:00
|
|
|
boolean_t uvm_kernacc __P((caddr_t, size_t, int));
|
1998-03-30 10:24:42 +04:00
|
|
|
__dead void uvm_scheduler __P((void)) __attribute__((noreturn));
|
1998-02-05 09:25:08 +03:00
|
|
|
void uvm_swapin __P((struct proc *));
|
1998-05-09 19:04:39 +04:00
|
|
|
boolean_t uvm_useracc __P((caddr_t, size_t, int));
|
1999-06-17 19:47:22 +04:00
|
|
|
int uvm_vslock __P((struct proc *, caddr_t, size_t,
|
1999-05-26 05:05:24 +04:00
|
|
|
vm_prot_t));
|
1998-05-09 19:04:39 +04:00
|
|
|
void uvm_vsunlock __P((struct proc *, caddr_t, size_t));
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
|
|
|
|
/* uvm_init.c */
|
2001-05-25 08:06:11 +04:00
|
|
|
void uvm_init __P((void));
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/* uvm_io.c */
|
2001-06-02 22:09:08 +04:00
|
|
|
int uvm_io __P((struct vm_map *, struct uio *));
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/* uvm_km.c */
|
2001-06-02 22:09:08 +04:00
|
|
|
vaddr_t uvm_km_alloc1 __P((struct vm_map *, vsize_t,
|
|
|
|
boolean_t));
|
|
|
|
void uvm_km_free __P((struct vm_map *, vaddr_t, vsize_t));
|
|
|
|
void uvm_km_free_wakeup __P((struct vm_map *, vaddr_t,
|
|
|
|
vsize_t));
|
|
|
|
vaddr_t uvm_km_kmemalloc __P((struct vm_map *, struct
|
|
|
|
uvm_object *, vsize_t, int));
|
|
|
|
struct vm_map *uvm_km_suballoc __P((struct vm_map *, vaddr_t *,
|
|
|
|
vaddr_t *, vsize_t, int, boolean_t,
|
|
|
|
struct vm_map *));
|
|
|
|
vaddr_t uvm_km_valloc __P((struct vm_map *, vsize_t));
|
|
|
|
vaddr_t uvm_km_valloc_align __P((struct vm_map *, vsize_t,
|
|
|
|
vsize_t));
|
|
|
|
vaddr_t uvm_km_valloc_wait __P((struct vm_map *, vsize_t));
|
|
|
|
vaddr_t uvm_km_valloc_prefer_wait __P((struct vm_map *, vsize_t,
|
|
|
|
voff_t));
|
|
|
|
vaddr_t uvm_km_alloc_poolpage1 __P((struct vm_map *,
|
|
|
|
struct uvm_object *, boolean_t));
|
|
|
|
void uvm_km_free_poolpage1 __P((struct vm_map *, vaddr_t));
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
#define uvm_km_alloc_poolpage(waitok) \
|
|
|
|
uvm_km_alloc_poolpage1(kmem_map, NULL, (waitok))
|
|
|
|
#define uvm_km_free_poolpage(addr) \
|
2001-06-02 22:09:08 +04:00
|
|
|
uvm_km_free_poolpage1(kmem_map, (addr))
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/* uvm_map.c */
|
2001-06-02 22:09:08 +04:00
|
|
|
int uvm_map __P((struct vm_map *, vaddr_t *, vsize_t,
|
2000-09-13 19:00:15 +04:00
|
|
|
struct uvm_object *, voff_t, vsize_t,
|
|
|
|
uvm_flag_t));
|
2001-06-02 22:09:08 +04:00
|
|
|
int uvm_map_pageable __P((struct vm_map *, vaddr_t,
|
1999-07-18 01:35:49 +04:00
|
|
|
vaddr_t, boolean_t, int));
|
2001-06-02 22:09:08 +04:00
|
|
|
int uvm_map_pageable_all __P((struct vm_map *, int,
|
|
|
|
vsize_t));
|
|
|
|
boolean_t uvm_map_checkprot __P((struct vm_map *, vaddr_t,
|
1998-08-13 06:10:37 +04:00
|
|
|
vaddr_t, vm_prot_t));
|
2001-06-02 22:09:08 +04:00
|
|
|
int uvm_map_protect __P((struct vm_map *, vaddr_t,
|
1998-08-13 06:10:37 +04:00
|
|
|
vaddr_t, vm_prot_t, boolean_t));
|
2001-08-16 05:37:50 +04:00
|
|
|
struct vmspace *uvmspace_alloc __P((vaddr_t, vaddr_t));
|
1998-03-27 04:47:06 +03:00
|
|
|
void uvmspace_init __P((struct vmspace *, struct pmap *,
|
2001-08-16 05:37:50 +04:00
|
|
|
vaddr_t, vaddr_t));
|
2001-02-06 20:01:51 +03:00
|
|
|
void uvmspace_exec __P((struct proc *, vaddr_t, vaddr_t));
|
1998-02-05 09:25:08 +03:00
|
|
|
struct vmspace *uvmspace_fork __P((struct vmspace *));
|
|
|
|
void uvmspace_free __P((struct vmspace *));
|
|
|
|
void uvmspace_share __P((struct proc *, struct proc *));
|
|
|
|
void uvmspace_unshare __P((struct proc *));
|
|
|
|
|
|
|
|
|
|
|
|
/* uvm_meter.c */
|
|
|
|
void uvm_meter __P((void));
|
2001-05-25 08:06:11 +04:00
|
|
|
int uvm_sysctl __P((int *, u_int, void *, size_t *,
|
1998-02-05 09:25:08 +03:00
|
|
|
void *, size_t, struct proc *));
|
|
|
|
|
|
|
|
/* uvm_mmap.c */
|
2001-06-02 22:09:08 +04:00
|
|
|
int uvm_mmap __P((struct vm_map *, vaddr_t *, vsize_t,
|
2001-05-25 08:06:11 +04:00
|
|
|
vm_prot_t, vm_prot_t, int,
|
2001-03-15 09:10:32 +03:00
|
|
|
void *, voff_t, vsize_t));
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/* uvm_page.c */
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
struct vm_page *uvm_pagealloc_strat __P((struct uvm_object *,
|
2000-03-27 00:54:45 +04:00
|
|
|
voff_t, struct vm_anon *, int, int, int));
|
1999-04-11 08:04:04 +04:00
|
|
|
#define uvm_pagealloc(obj, off, anon, flags) \
|
|
|
|
uvm_pagealloc_strat((obj), (off), (anon), (flags), \
|
|
|
|
UVM_PGA_STRAT_NORMAL, 0)
|
2001-05-25 08:06:11 +04:00
|
|
|
void uvm_pagerealloc __P((struct vm_page *,
|
2000-03-27 00:54:45 +04:00
|
|
|
struct uvm_object *, voff_t));
|
1998-08-13 06:10:37 +04:00
|
|
|
/* Actually, uvm_page_physload takes PF#s which need their own type */
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
void uvm_page_physload __P((paddr_t, paddr_t, paddr_t,
|
|
|
|
paddr_t, int));
|
1998-02-05 09:25:08 +03:00
|
|
|
void uvm_setpagesize __P((void));
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/* uvm_pager.c */
|
|
|
|
void uvm_aio_biodone1 __P((struct buf *));
|
|
|
|
void uvm_aio_biodone __P((struct buf *));
|
|
|
|
void uvm_aio_aiodone __P((struct buf *));
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/* uvm_pdaemon.c */
|
2000-08-13 02:41:53 +04:00
|
|
|
void uvm_pageout __P((void *));
|
2000-11-27 11:39:39 +03:00
|
|
|
void uvm_aiodone_daemon __P((void *));
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/* uvm_pglist.c */
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
int uvm_pglistalloc __P((psize_t, paddr_t, paddr_t,
|
|
|
|
paddr_t, paddr_t, struct pglist *, int, int));
|
1998-02-05 09:25:08 +03:00
|
|
|
void uvm_pglistfree __P((struct pglist *));
|
|
|
|
|
|
|
|
/* uvm_swap.c */
|
|
|
|
void uvm_swap_init __P((void));
|
|
|
|
|
|
|
|
/* uvm_unix.c */
|
1998-08-13 06:10:37 +04:00
|
|
|
int uvm_grow __P((struct proc *, vaddr_t));
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/* uvm_user.c */
|
2001-06-02 22:09:08 +04:00
|
|
|
void uvm_deallocate __P((struct vm_map *, vaddr_t, vsize_t));
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/* uvm_vnode.c */
|
2000-03-27 00:54:45 +04:00
|
|
|
void uvm_vnp_setsize __P((struct vnode *, voff_t));
|
1998-02-05 09:25:08 +03:00
|
|
|
void uvm_vnp_sync __P((struct mount *));
|
|
|
|
struct uvm_object *uvn_attach __P((void *, vm_prot_t));
|
2000-11-27 11:39:39 +03:00
|
|
|
void uvn_findpages __P((struct uvm_object *, voff_t,
|
|
|
|
int *, struct vm_page **, int));
|
|
|
|
void uvm_vnp_zerorange __P((struct vnode *, off_t, size_t));
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-02-11 22:22:52 +03:00
|
|
|
/* kern_malloc.c */
|
|
|
|
void kmeminit_nkmempages __P((void));
|
|
|
|
void kmeminit __P((void));
|
|
|
|
extern int nkmempages;
|
|
|
|
|
1999-06-21 21:25:11 +04:00
|
|
|
#endif /* _KERNEL */
|
1998-08-13 06:10:37 +04:00
|
|
|
|
1999-06-21 21:25:11 +04:00
|
|
|
#endif /* _UVM_UVM_EXTERN_H_ */
|