2009-01-16 05:33:14 +03:00
|
|
|
/* $NetBSD: uvm_page.h,v 1.56 2009/01/16 02:33:14 yamt Exp $ */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
/*
|
1998-02-05 09:25:08 +03:00
|
|
|
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
2001-05-25 08:06:11 +04:00
|
|
|
* Copyright (c) 1991, 1993, The Regents of the University of California.
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* The Mach Operating System project at Carnegie-Mellon University.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by Charles D. Cranor,
|
2001-05-25 08:06:11 +04:00
|
|
|
* Washington University, the University of California, Berkeley and
|
1998-02-05 09:25:08 +03:00
|
|
|
* its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)vm_page.h 7.3 (Berkeley) 4/21/91
|
1998-02-07 14:07:38 +03:00
|
|
|
* from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
|
|
|
* All rights reserved.
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-02-05 09:25:08 +03:00
|
|
|
* Permission to use, copy, modify and distribute this software and
|
|
|
|
* its documentation is hereby granted, provided that both the copyright
|
|
|
|
* notice and this permission notice appear in all copies of the
|
|
|
|
* software, derivative works or modified versions, and any portions
|
|
|
|
* thereof, and that both notices appear in supporting documentation.
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
|
|
|
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
|
|
|
|
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
|
1998-02-05 09:25:08 +03:00
|
|
|
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-02-05 09:25:08 +03:00
|
|
|
* Carnegie Mellon requests users of this software to return to
|
|
|
|
*
|
|
|
|
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
|
|
|
|
* School of Computer Science
|
|
|
|
* Carnegie Mellon University
|
|
|
|
* Pittsburgh PA 15213-3890
|
|
|
|
*
|
|
|
|
* any improvements or extensions that they make and grant Carnegie the
|
|
|
|
* rights to redistribute these changes.
|
|
|
|
*/
|
|
|
|
|
1998-02-10 05:34:17 +03:00
|
|
|
#ifndef _UVM_UVM_PAGE_H_
|
|
|
|
#define _UVM_UVM_PAGE_H_
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* uvm_page.h
|
|
|
|
*/
|
|
|
|
|
2000-06-27 13:00:14 +04:00
|
|
|
/*
|
|
|
|
* Resident memory system definitions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Management of resident (logical) pages.
|
|
|
|
*
|
|
|
|
* A small structure is kept for each resident
|
|
|
|
* page, indexed by page number. Each structure
|
|
|
|
* is an element of several lists:
|
|
|
|
*
|
2008-06-04 19:06:04 +04:00
|
|
|
* A red-black tree rooted with the containing
|
|
|
|
* object is used to quickly perform object+
|
|
|
|
* offset lookups
|
2000-06-27 13:00:14 +04:00
|
|
|
*
|
|
|
|
* A list of all pages for a given object,
|
|
|
|
* so they can be quickly deactivated at
|
|
|
|
* time of deallocation.
|
|
|
|
*
|
|
|
|
* An ordered list of pages due for pageout.
|
|
|
|
*
|
|
|
|
* In addition, the structure contains the object
|
|
|
|
* and offset to which this page belongs (for pageout),
|
|
|
|
* and sundry status bits.
|
|
|
|
*
|
|
|
|
* Fields in this structure are locked either by the lock on the
|
|
|
|
* object that the page belongs to (O) or by the lock on the page
|
|
|
|
* queues (P) [or both].
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* locking note: the mach version of this data structure had bit
|
|
|
|
* fields for the flags, and the bit fields were divided into two
|
|
|
|
* items (depending on who locked what). some time, in BSD, the bit
|
|
|
|
* fields were dumped and all the flags were lumped into one short.
|
|
|
|
* that is fine for a single threaded uniprocessor OS, but bad if you
|
2008-02-27 17:23:33 +03:00
|
|
|
* want to actual make use of locking. so, we've separated things
|
|
|
|
* back out again.
|
2000-06-27 13:00:14 +04:00
|
|
|
*
|
|
|
|
* note the page structure has no lock of its own.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <uvm/uvm_extern.h>
|
|
|
|
#include <uvm/uvm_pglist.h>
|
|
|
|
|
2008-06-04 19:06:04 +04:00
|
|
|
#include <sys/rb.h>
|
|
|
|
|
2000-06-27 13:00:14 +04:00
|
|
|
struct vm_page {
|
2008-06-04 19:06:04 +04:00
|
|
|
struct rb_node rb_node; /* tree of pages in obj (O) */
|
2008-06-04 16:45:28 +04:00
|
|
|
|
|
|
|
union {
|
|
|
|
TAILQ_ENTRY(vm_page) queue;
|
|
|
|
LIST_ENTRY(vm_page) list;
|
|
|
|
} pageq; /* queue info for FIFO
|
|
|
|
* queue or free list (P) */
|
|
|
|
union {
|
|
|
|
TAILQ_ENTRY(vm_page) queue;
|
|
|
|
LIST_ENTRY(vm_page) list;
|
|
|
|
} listq; /* pages in same object (O)*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
|
|
|
struct vm_anon *uanon; /* anon (O,P) */
|
|
|
|
struct uvm_object *uobject; /* object (O,P) */
|
|
|
|
voff_t offset; /* offset into object (O,P) */
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uint16_t flags; /* object flags [O] */
|
|
|
|
uint16_t loan_count; /* number of active loans
|
2000-11-27 11:39:39 +03:00
|
|
|
* to read: [O or P]
|
|
|
|
* to modify: [O _and_ P] */
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uint16_t wire_count; /* wired down map refs [P] */
|
|
|
|
uint16_t pqflags; /* page queue flags [P] */
|
2000-11-27 11:39:39 +03:00
|
|
|
paddr_t phys_addr; /* physical address of page */
|
2001-04-30 02:44:31 +04:00
|
|
|
|
2001-05-01 06:19:13 +04:00
|
|
|
#ifdef __HAVE_VM_PAGE_MD
|
|
|
|
struct vm_page_md mdpage; /* pmap-specific data */
|
|
|
|
#endif
|
2001-04-30 02:44:31 +04:00
|
|
|
|
2000-06-27 13:00:14 +04:00
|
|
|
#if defined(UVM_PAGE_TRKOWN)
|
2000-11-27 11:39:39 +03:00
|
|
|
/* debugging fields to track page ownership */
|
|
|
|
pid_t owner; /* proc that set PG_BUSY */
|
2007-04-14 11:01:33 +04:00
|
|
|
lwpid_t lowner; /* lwp that set PG_BUSY */
|
2005-06-04 17:48:35 +04:00
|
|
|
const char *owner_tag; /* why it was set busy */
|
2000-06-27 13:00:14 +04:00
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These are the flags defined for vm_page.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* locking rules:
|
|
|
|
* PG_ ==> locked by object lock
|
2001-05-25 08:06:11 +04:00
|
|
|
* PQ_ ==> lock by page queue lock
|
2000-06-27 13:00:14 +04:00
|
|
|
* PQ_FREE is locked by free queue lock and is mutex with all other PQs
|
|
|
|
*
|
|
|
|
* PG_ZERO is used to indicate that a page has been pre-zero'd. This flag
|
|
|
|
* is only set when the page is on no queues, and is cleared when the page
|
|
|
|
* is placed on the free list.
|
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
|
|
|
#define PG_BUSY 0x0001 /* page is locked */
|
|
|
|
#define PG_WANTED 0x0002 /* someone is waiting for page */
|
|
|
|
#define PG_TABLED 0x0004 /* page is in VP table */
|
2000-06-27 13:00:14 +04:00
|
|
|
#define PG_CLEAN 0x0008 /* page has not been modified */
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
#define PG_PAGEOUT 0x0010 /* page to be freed for pagedaemon */
|
|
|
|
#define PG_RELEASED 0x0020 /* page to be freed when unbusied */
|
2000-11-27 11:39:39 +03:00
|
|
|
#define PG_FAKE 0x0040 /* page is not yet initialized */
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
#define PG_RDONLY 0x0080 /* page must be mapped read-only */
|
|
|
|
#define PG_ZERO 0x0100 /* page is pre-zero'd */
|
2000-11-27 11:39:39 +03:00
|
|
|
|
|
|
|
#define PG_PAGER1 0x1000 /* pager-specific flag */
|
2000-06-27 13:00:14 +04:00
|
|
|
|
2006-09-15 19:51:12 +04:00
|
|
|
#define UVM_PGFLAGBITS \
|
|
|
|
"\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \
|
|
|
|
"\11ZERO\15PAGER1"
|
|
|
|
|
|
|
|
#define PQ_FREE 0x0001 /* page is on free list */
|
|
|
|
#define PQ_ANON 0x0002 /* page is part of an anon, rather
|
2000-06-27 13:00:14 +04:00
|
|
|
than an uvm_object */
|
2006-09-15 19:51:12 +04:00
|
|
|
#define PQ_AOBJ 0x0004 /* page is part of an anonymous
|
2000-06-27 13:00:14 +04:00
|
|
|
uvm_object */
|
|
|
|
#define PQ_SWAPBACKED (PQ_ANON|PQ_AOBJ)
|
2006-09-15 19:51:12 +04:00
|
|
|
#define PQ_READAHEAD 0x0008 /* read-ahead but has not been "hit" yet */
|
|
|
|
|
|
|
|
#define PQ_PRIVATE1 0x0100
|
|
|
|
#define PQ_PRIVATE2 0x0200
|
|
|
|
#define PQ_PRIVATE3 0x0400
|
|
|
|
#define PQ_PRIVATE4 0x0800
|
|
|
|
#define PQ_PRIVATE5 0x1000
|
|
|
|
#define PQ_PRIVATE6 0x2000
|
|
|
|
#define PQ_PRIVATE7 0x4000
|
|
|
|
#define PQ_PRIVATE8 0x8000
|
|
|
|
|
|
|
|
#define UVM_PQFLAGBITS \
|
|
|
|
"\20\1FREE\2ANON\3AOBJ\4READAHEAD" \
|
|
|
|
"\11PRIVATE1\12PRIVATE2\13PRIVATE3\14PRIVATE4" \
|
|
|
|
"\15PRIVATE5\16PRIVATE6\17PRIVATE7\20PRIVATE8"
|
2000-06-27 13:00:14 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* physical memory layout structure
|
|
|
|
*
|
|
|
|
* MD vmparam.h must #define:
|
|
|
|
* VM_PHYSEG_MAX = max number of physical memory segments we support
|
|
|
|
* (if this is "1" then we revert to a "contig" case)
|
|
|
|
* VM_PHYSSEG_STRAT: memory sort/search options (for VM_PHYSEG_MAX > 1)
|
|
|
|
* - VM_PSTRAT_RANDOM: linear search (random order)
|
|
|
|
* - VM_PSTRAT_BSEARCH: binary search (sorted by address)
|
|
|
|
* - VM_PSTRAT_BIGFIRST: linear search (sorted by largest segment first)
|
|
|
|
* - others?
|
2000-10-04 00:50:49 +04:00
|
|
|
* XXXCDC: eventually we should purge all left-over global variables...
|
2000-06-27 13:00:14 +04:00
|
|
|
*/
|
|
|
|
#define VM_PSTRAT_RANDOM 1
|
|
|
|
#define VM_PSTRAT_BSEARCH 2
|
|
|
|
#define VM_PSTRAT_BIGFIRST 3
|
|
|
|
|
|
|
|
/*
|
2006-04-06 11:18:23 +04:00
|
|
|
* vm_physseg: describes one segment of physical memory
|
2000-06-27 13:00:14 +04:00
|
|
|
*/
|
|
|
|
struct vm_physseg {
|
|
|
|
paddr_t start; /* PF# of first page in segment */
|
|
|
|
paddr_t end; /* (PF# of last page in segment) + 1 */
|
|
|
|
paddr_t avail_start; /* PF# of first free page in segment */
|
|
|
|
paddr_t avail_end; /* (PF# of last free page in segment) +1 */
|
|
|
|
int free_list; /* which free list they belong on */
|
|
|
|
struct vm_page *pgs; /* vm_page structures (from start) */
|
|
|
|
struct vm_page *lastpg; /* vm_page structure for end */
|
2001-05-01 06:19:13 +04:00
|
|
|
#ifdef __HAVE_PMAP_PHYSSEG
|
2000-06-27 13:00:14 +04:00
|
|
|
struct pmap_physseg pmseg; /* pmap specific (MD) data */
|
2001-04-30 02:44:31 +04:00
|
|
|
#endif
|
2000-06-27 13:00:14 +04:00
|
|
|
};
|
|
|
|
|
1999-06-21 21:25:11 +04:00
|
|
|
#ifdef _KERNEL
|
|
|
|
|
2000-04-24 21:12:00 +04:00
|
|
|
/*
|
|
|
|
* globals
|
|
|
|
*/
|
|
|
|
|
2007-02-22 01:59:35 +03:00
|
|
|
extern bool vm_page_zero_enable;
|
2000-04-24 21:12:00 +04:00
|
|
|
|
2000-06-27 13:00:14 +04:00
|
|
|
/*
|
|
|
|
* physical memory config is stored in vm_physmem.
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-06-27 13:00:14 +04:00
|
|
|
extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
|
|
|
|
extern int vm_nphysseg;
|
2000-04-24 21:12:00 +04:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
1998-05-28 19:31:31 +04:00
|
|
|
* prototypes: the following prototypes define the interface to pages
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
2004-03-24 10:55:01 +03:00
|
|
|
void uvm_page_init(vaddr_t *, vaddr_t *);
|
1998-02-05 09:25:08 +03:00
|
|
|
#if defined(UVM_PAGE_TRKOWN)
|
2005-06-04 17:48:35 +04:00
|
|
|
void uvm_page_own(struct vm_page *, const char *);
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
1998-05-28 19:31:31 +04:00
|
|
|
#if !defined(PMAP_STEAL_MEMORY)
|
2007-02-22 01:59:35 +03:00
|
|
|
bool uvm_page_physget(paddr_t *);
|
1998-05-28 19:31:31 +04:00
|
|
|
#endif
|
2004-03-24 10:55:01 +03:00
|
|
|
void uvm_page_recolor(int);
|
|
|
|
void uvm_pageidlezero(void);
|
|
|
|
|
2006-02-11 15:45:07 +03:00
|
|
|
void uvm_pageactivate(struct vm_page *);
|
2004-03-24 10:55:01 +03:00
|
|
|
vaddr_t uvm_pageboot_alloc(vsize_t);
|
2006-02-11 15:45:07 +03:00
|
|
|
void uvm_pagecopy(struct vm_page *, struct vm_page *);
|
|
|
|
void uvm_pagedeactivate(struct vm_page *);
|
|
|
|
void uvm_pagedequeue(struct vm_page *);
|
2006-09-15 19:51:12 +04:00
|
|
|
void uvm_pageenqueue(struct vm_page *);
|
2004-03-24 10:55:01 +03:00
|
|
|
void uvm_pagefree(struct vm_page *);
|
|
|
|
void uvm_page_unbusy(struct vm_page **, int);
|
2006-02-11 15:45:07 +03:00
|
|
|
struct vm_page *uvm_pagelookup(struct uvm_object *, voff_t);
|
|
|
|
void uvm_pageunwire(struct vm_page *);
|
|
|
|
void uvm_pagewait(struct vm_page *, int);
|
|
|
|
void uvm_pagewake(struct vm_page *);
|
|
|
|
void uvm_pagewire(struct vm_page *);
|
|
|
|
void uvm_pagezero(struct vm_page *);
|
|
|
|
|
|
|
|
int uvm_page_lookup_freelist(struct vm_page *);
|
2004-03-24 10:55:01 +03:00
|
|
|
|
|
|
|
static struct vm_page *PHYS_TO_VM_PAGE(paddr_t);
|
|
|
|
static int vm_physseg_find(paddr_t, int *);
|
2000-06-27 13:00:14 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* macros
|
|
|
|
*/
|
|
|
|
|
2009-01-16 05:33:14 +03:00
|
|
|
#define UVM_PAGE_TREE_PENALTY 4 /* XXX: a guess */
|
2000-06-27 13:00:14 +04:00
|
|
|
|
|
|
|
#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
|
|
|
|
|
2001-04-29 08:23:20 +04:00
|
|
|
/*
|
|
|
|
* Compute the page color bucket for a given page.
|
|
|
|
*/
|
|
|
|
#define VM_PGCOLOR_BUCKET(pg) \
|
2001-05-02 05:22:19 +04:00
|
|
|
(atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask)
|
2001-04-29 08:23:20 +04:00
|
|
|
|
2000-06-27 13:00:14 +04:00
|
|
|
/*
|
|
|
|
* when VM_PHYSSEG_MAX is 1, we can simplify these functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vm_physseg_find: find vm_physseg structure that belongs to a PA
|
|
|
|
*/
|
2006-02-16 23:17:12 +03:00
|
|
|
static __inline int
|
2008-02-27 22:38:57 +03:00
|
|
|
vm_physseg_find(paddr_t pframe, int *offp)
|
2000-06-27 13:00:14 +04:00
|
|
|
{
|
|
|
|
#if VM_PHYSSEG_MAX == 1
|
|
|
|
|
|
|
|
/* 'contig' case */
|
|
|
|
if (pframe >= vm_physmem[0].start && pframe < vm_physmem[0].end) {
|
|
|
|
if (offp)
|
|
|
|
*offp = pframe - vm_physmem[0].start;
|
|
|
|
return(0);
|
|
|
|
}
|
|
|
|
return(-1);
|
|
|
|
|
|
|
|
#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
|
|
|
|
/* binary search for it */
|
2003-11-10 19:13:05 +03:00
|
|
|
u_int start, len, try;
|
2000-06-27 13:00:14 +04:00
|
|
|
|
|
|
|
/*
|
2002-11-08 05:05:16 +03:00
|
|
|
* if try is too large (thus target is less than try) we reduce
|
2000-06-27 13:00:14 +04:00
|
|
|
* the length to trunc(len/2) [i.e. everything smaller than "try"]
|
|
|
|
*
|
|
|
|
* if the try is too small (thus target is greater than try) then
|
|
|
|
* we set the new start to be (try + 1). this means we need to
|
|
|
|
* reduce the length to (round(len/2) - 1).
|
|
|
|
*
|
|
|
|
* note "adjust" below which takes advantage of the fact that
|
|
|
|
* (round(len/2) - 1) == trunc((len - 1) / 2)
|
|
|
|
* for any value of len we may have
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) {
|
|
|
|
try = start + (len / 2); /* try in the middle */
|
|
|
|
|
|
|
|
/* start past our try? */
|
|
|
|
if (pframe >= vm_physmem[try].start) {
|
|
|
|
/* was try correct? */
|
|
|
|
if (pframe < vm_physmem[try].end) {
|
|
|
|
if (offp)
|
|
|
|
*offp = pframe - vm_physmem[try].start;
|
|
|
|
return(try); /* got it */
|
|
|
|
}
|
|
|
|
start = try + 1; /* next time, start here */
|
|
|
|
len--; /* "adjust" */
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* pframe before try, just reduce length of
|
|
|
|
* region, done in "for" loop
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return(-1);
|
|
|
|
|
|
|
|
#else
|
|
|
|
/* linear search for it */
|
|
|
|
int lcv;
|
|
|
|
|
|
|
|
for (lcv = 0; lcv < vm_nphysseg; lcv++) {
|
|
|
|
if (pframe >= vm_physmem[lcv].start &&
|
|
|
|
pframe < vm_physmem[lcv].end) {
|
|
|
|
if (offp)
|
|
|
|
*offp = pframe - vm_physmem[lcv].start;
|
|
|
|
return(lcv); /* got it */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return(-1);
|
|
|
|
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* IS_VM_PHYSADDR: only used my mips/pmax/pica trap/pmap.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define IS_VM_PHYSADDR(PA) (vm_physseg_find(atop(PA), NULL) != -1)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
|
|
|
|
* back from an I/O mapping (ugh!). used in some MD code as well.
|
|
|
|
*/
|
2006-02-16 23:17:12 +03:00
|
|
|
static __inline struct vm_page *
|
2008-02-27 22:38:57 +03:00
|
|
|
PHYS_TO_VM_PAGE(paddr_t pa)
|
2000-06-27 13:00:14 +04:00
|
|
|
{
|
|
|
|
paddr_t pf = atop(pa);
|
|
|
|
int off;
|
|
|
|
int psi;
|
|
|
|
|
|
|
|
psi = vm_physseg_find(pf, &off);
|
|
|
|
if (psi != -1)
|
|
|
|
return(&vm_physmem[psi].pgs[off]);
|
|
|
|
return(NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define VM_PAGE_IS_FREE(entry) ((entry)->pqflags & PQ_FREE)
|
2008-06-04 16:45:28 +04:00
|
|
|
#define VM_FREE_PAGE_TO_CPU(pg) ((struct uvm_cpu *)((uintptr_t)pg->offset))
|
2000-06-27 13:00:14 +04:00
|
|
|
|
2003-11-03 06:58:28 +03:00
|
|
|
#ifdef DEBUG
|
|
|
|
void uvm_pagezerocheck(struct vm_page *);
|
|
|
|
#endif /* DEBUG */
|
|
|
|
|
1999-06-21 21:25:11 +04:00
|
|
|
#endif /* _KERNEL */
|
|
|
|
|
1998-02-10 05:34:17 +03:00
|
|
|
#endif /* _UVM_UVM_PAGE_H_ */
|