2008-12-03 14:43:51 +03:00
|
|
|
/* $NetBSD: uvm_amap.c,v 1.85 2008/12/03 11:43:51 ad Exp $ */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by Charles D. Cranor and
|
|
|
|
* Washington University.
|
|
|
|
* 4. The name of the author may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
1999-01-28 17:46:27 +03:00
|
|
|
* uvm_amap.c: amap operations
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
1999-01-28 17:46:27 +03:00
|
|
|
/*
|
|
|
|
* this file contains functions that perform operations on amaps. see
|
|
|
|
* uvm_amap.h for a brief explanation of the role of amaps in uvm.
|
|
|
|
*/
|
|
|
|
|
2001-11-10 10:36:59 +03:00
|
|
|
#include <sys/cdefs.h>
|
2008-12-03 14:43:51 +03:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.85 2008/12/03 11:43:51 ad Exp $");
|
1999-01-28 17:46:27 +03:00
|
|
|
|
1998-02-10 17:08:44 +03:00
|
|
|
#include "opt_uvmhist.h"
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/proc.h>
|
2000-11-25 09:27:59 +03:00
|
|
|
#include <sys/kernel.h>
|
2006-06-25 12:03:46 +04:00
|
|
|
#include <sys/kmem.h>
|
1998-08-29 05:05:28 +04:00
|
|
|
#include <sys/pool.h>
|
2008-12-03 14:43:51 +03:00
|
|
|
#include <sys/atomic.h>
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
#include <uvm/uvm.h>
|
|
|
|
#include <uvm/uvm_swap.h>
|
|
|
|
|
1998-08-29 05:05:28 +04:00
|
|
|
/*
|
2007-12-08 18:46:31 +03:00
|
|
|
* cache for allocation of vm_map structures. note that in order to
|
|
|
|
* avoid an endless loop, the amap cache's allocator cannot allocate
|
1999-01-28 17:46:27 +03:00
|
|
|
* memory from an amap (it currently goes through the kernel uobj, so
|
|
|
|
* we are ok).
|
1998-08-29 05:05:28 +04:00
|
|
|
*/
|
2007-12-08 18:46:31 +03:00
|
|
|
static struct pool_cache uvm_amap_cache;
|
2007-07-21 23:21:53 +04:00
|
|
|
static kmutex_t amap_list_lock;
|
2005-05-11 17:02:25 +04:00
|
|
|
static LIST_HEAD(, vm_amap) amap_list;
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* local functions
|
|
|
|
*/
|
|
|
|
|
2005-12-24 23:45:08 +03:00
|
|
|
static inline void
|
2005-05-11 17:02:25 +04:00
|
|
|
amap_list_insert(struct vm_amap *amap)
|
|
|
|
{
|
|
|
|
|
2007-07-21 23:21:53 +04:00
|
|
|
mutex_enter(&amap_list_lock);
|
2005-05-11 17:02:25 +04:00
|
|
|
LIST_INSERT_HEAD(&amap_list, amap, am_list);
|
2007-07-21 23:21:53 +04:00
|
|
|
mutex_exit(&amap_list_lock);
|
2005-05-11 17:02:25 +04:00
|
|
|
}
|
|
|
|
|
2005-12-24 23:45:08 +03:00
|
|
|
static inline void
|
2005-05-11 17:02:25 +04:00
|
|
|
amap_list_remove(struct vm_amap *amap)
|
|
|
|
{
|
|
|
|
|
2007-07-21 23:21:53 +04:00
|
|
|
mutex_enter(&amap_list_lock);
|
2005-05-11 17:02:25 +04:00
|
|
|
LIST_REMOVE(amap, am_list);
|
2007-07-21 23:21:53 +04:00
|
|
|
mutex_exit(&amap_list_lock);
|
2005-05-11 17:02:25 +04:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2006-06-25 12:03:46 +04:00
|
|
|
static int
|
|
|
|
amap_roundup_slots(int slots)
|
|
|
|
{
|
|
|
|
|
|
|
|
return kmem_roundup_size(slots * sizeof(int)) / sizeof(int);
|
|
|
|
}
|
|
|
|
|
1999-01-28 17:46:27 +03:00
|
|
|
#ifdef UVM_AMAP_PPREF
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* what is ppref? ppref is an _optional_ amap feature which is used
|
|
|
|
* to keep track of reference counts on a per-page basis. it is enabled
|
1999-01-28 17:46:27 +03:00
|
|
|
* when UVM_AMAP_PPREF is defined.
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
|
|
|
* when enabled, an array of ints is allocated for the pprefs. this
|
|
|
|
* array is allocated only when a partial reference is added to the
|
|
|
|
* map (either by unmapping part of the amap, or gaining a reference
|
|
|
|
* to only a part of an amap). if the malloc of the array fails
|
|
|
|
* (M_NOWAIT), then we set the array pointer to PPREF_NONE to indicate
|
|
|
|
* that we tried to do ppref's but couldn't alloc the array so just
|
|
|
|
* give up (after all, this is an optional feature!).
|
|
|
|
*
|
|
|
|
* the array is divided into page sized "chunks." for chunks of length 1,
|
|
|
|
* the chunk reference count plus one is stored in that chunk's slot.
|
|
|
|
* for chunks of length > 1 the first slot contains (the reference count
|
|
|
|
* plus one) * -1. [the negative value indicates that the length is
|
|
|
|
* greater than one.] the second slot of the chunk contains the length
|
|
|
|
* of the chunk. here is an example:
|
|
|
|
*
|
|
|
|
* actual REFS: 2 2 2 2 3 1 1 0 0 0 4 4 0 1 1 1
|
|
|
|
* ppref: -3 4 x x 4 -2 2 -1 3 x -5 2 1 -2 3 x
|
|
|
|
* <----------><-><----><-------><----><-><------->
|
|
|
|
* (x = don't care)
|
|
|
|
*
|
|
|
|
* this allows us to allow one int to contain the ref count for the whole
|
|
|
|
* chunk. note that the "plus one" part is needed because a reference
|
|
|
|
* count of zero is neither positive or negative (need a way to tell
|
|
|
|
* if we've got one zero or a bunch of them).
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-02-05 09:25:08 +03:00
|
|
|
* here are some in-line functions to help us.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pp_getreflen: get the reference and length for a specific offset
|
1999-01-28 17:46:27 +03:00
|
|
|
*
|
|
|
|
* => ppref's amap must be locked
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
2005-12-24 23:45:08 +03:00
|
|
|
static inline void
|
2005-06-27 06:19:48 +04:00
|
|
|
pp_getreflen(int *ppref, int offset, int *refp, int *lenp)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-02-08 19:07:57 +03:00
|
|
|
|
|
|
|
if (ppref[offset] > 0) { /* chunk size must be 1 */
|
|
|
|
*refp = ppref[offset] - 1; /* don't forget to adjust */
|
|
|
|
*lenp = 1;
|
|
|
|
} else {
|
|
|
|
*refp = (ppref[offset] * -1) - 1;
|
|
|
|
*lenp = ppref[offset+1];
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pp_setreflen: set the reference and length for a specific offset
|
1999-01-28 17:46:27 +03:00
|
|
|
*
|
|
|
|
* => ppref's amap must be locked
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
2005-12-24 23:45:08 +03:00
|
|
|
static inline void
|
2005-06-27 06:19:48 +04:00
|
|
|
pp_setreflen(int *ppref, int offset, int ref, int len)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2002-12-20 21:21:13 +03:00
|
|
|
if (len == 0)
|
|
|
|
return;
|
1998-02-08 19:07:57 +03:00
|
|
|
if (len == 1) {
|
|
|
|
ppref[offset] = ref + 1;
|
|
|
|
} else {
|
|
|
|
ppref[offset] = (ref + 1) * -1;
|
|
|
|
ppref[offset+1] = len;
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
2005-06-27 06:19:48 +04:00
|
|
|
#endif /* UVM_AMAP_PPREF */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_alloc1: internal function that allocates an amap, but does not
|
|
|
|
* init the overlay.
|
|
|
|
*
|
|
|
|
* => lock on returned amap is init'd
|
|
|
|
*/
|
1998-02-08 19:07:57 +03:00
|
|
|
static inline struct vm_amap *
|
2005-06-27 06:19:48 +04:00
|
|
|
amap_alloc1(int slots, int padslots, int waitf)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-02-08 19:07:57 +03:00
|
|
|
struct vm_amap *amap;
|
2001-12-05 04:33:09 +03:00
|
|
|
int totalslots;
|
2006-06-25 12:03:46 +04:00
|
|
|
km_flag_t kmflags;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2007-12-08 18:46:31 +03:00
|
|
|
amap = pool_cache_get(&uvm_amap_cache,
|
2006-06-25 12:03:46 +04:00
|
|
|
((waitf & UVM_FLAG_NOWAIT) != 0) ? PR_NOWAIT : PR_WAITOK);
|
1998-02-08 19:07:57 +03:00
|
|
|
if (amap == NULL)
|
|
|
|
return(NULL);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2006-06-25 12:03:46 +04:00
|
|
|
kmflags = ((waitf & UVM_FLAG_NOWAIT) != 0) ? KM_NOSLEEP : KM_SLEEP;
|
|
|
|
totalslots = amap_roundup_slots(slots + padslots);
|
2007-07-21 23:21:53 +04:00
|
|
|
mutex_init(&amap->am_l, MUTEX_DEFAULT, IPL_NONE);
|
1998-02-08 19:07:57 +03:00
|
|
|
amap->am_ref = 1;
|
|
|
|
amap->am_flags = 0;
|
1999-01-28 17:46:27 +03:00
|
|
|
#ifdef UVM_AMAP_PPREF
|
1998-02-08 19:07:57 +03:00
|
|
|
amap->am_ppref = NULL;
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
1998-02-08 19:07:57 +03:00
|
|
|
amap->am_maxslot = totalslots;
|
|
|
|
amap->am_nslot = slots;
|
|
|
|
amap->am_nused = 0;
|
|
|
|
|
2006-06-25 12:03:46 +04:00
|
|
|
amap->am_slots = kmem_alloc(totalslots * sizeof(int), kmflags);
|
1999-07-06 06:15:53 +04:00
|
|
|
if (amap->am_slots == NULL)
|
|
|
|
goto fail1;
|
1998-02-08 19:07:57 +03:00
|
|
|
|
2006-06-25 12:03:46 +04:00
|
|
|
amap->am_bckptr = kmem_alloc(totalslots * sizeof(int), kmflags);
|
1999-07-06 06:15:53 +04:00
|
|
|
if (amap->am_bckptr == NULL)
|
|
|
|
goto fail2;
|
|
|
|
|
2006-06-25 12:03:46 +04:00
|
|
|
amap->am_anon = kmem_alloc(totalslots * sizeof(struct vm_anon *),
|
|
|
|
kmflags);
|
1999-07-06 06:15:53 +04:00
|
|
|
if (amap->am_anon == NULL)
|
|
|
|
goto fail3;
|
|
|
|
|
|
|
|
return(amap);
|
|
|
|
|
|
|
|
fail3:
|
2006-06-25 12:03:46 +04:00
|
|
|
kmem_free(amap->am_bckptr, totalslots * sizeof(int));
|
1999-07-06 06:15:53 +04:00
|
|
|
fail2:
|
2006-06-25 12:03:46 +04:00
|
|
|
kmem_free(amap->am_slots, totalslots * sizeof(int));
|
1999-07-06 06:15:53 +04:00
|
|
|
fail1:
|
2007-07-21 23:21:53 +04:00
|
|
|
mutex_destroy(&amap->am_l);
|
2007-12-08 18:46:31 +03:00
|
|
|
pool_cache_put(&uvm_amap_cache, amap);
|
2005-01-30 20:23:05 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX hack to tell the pagedaemon how many pages we need,
|
|
|
|
* since we can need more than it would normally free.
|
|
|
|
*/
|
2006-06-25 12:03:46 +04:00
|
|
|
if ((waitf & UVM_FLAG_NOWAIT) != 0) {
|
2008-12-03 14:43:51 +03:00
|
|
|
extern u_int uvm_extrapages;
|
|
|
|
atomic_add_int(&uvm_extrapages,
|
|
|
|
((sizeof(int) * 2 + sizeof(struct vm_anon *)) *
|
|
|
|
totalslots) >> PAGE_SHIFT);
|
2005-01-30 20:23:05 +03:00
|
|
|
}
|
1998-02-08 19:07:57 +03:00
|
|
|
return (NULL);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_alloc: allocate an amap to manage "sz" bytes of anonymous VM
|
|
|
|
*
|
|
|
|
* => caller should ensure sz is a multiple of PAGE_SIZE
|
|
|
|
* => reference count to new amap is set to one
|
|
|
|
* => new amap is returned unlocked
|
|
|
|
*/
|
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
struct vm_amap *
|
2005-06-27 06:19:48 +04:00
|
|
|
amap_alloc(vaddr_t sz, vaddr_t padsz, int waitf)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-02-08 19:07:57 +03:00
|
|
|
struct vm_amap *amap;
|
|
|
|
int slots, padslots;
|
|
|
|
UVMHIST_FUNC("amap_alloc"); UVMHIST_CALLED(maphist);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
AMAP_B2SLOT(slots, sz);
|
1998-02-08 19:07:57 +03:00
|
|
|
AMAP_B2SLOT(padslots, padsz);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
amap = amap_alloc1(slots, padslots, waitf);
|
2006-01-18 20:03:36 +03:00
|
|
|
if (amap) {
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
memset(amap->am_anon, 0,
|
2001-12-05 04:33:09 +03:00
|
|
|
amap->am_maxslot * sizeof(struct vm_anon *));
|
2006-01-18 20:03:36 +03:00
|
|
|
amap_list_insert(amap);
|
|
|
|
}
|
2005-05-11 17:02:25 +04:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
UVMHIST_LOG(maphist,"<- done, amap = 0x%x, sz=%d", amap, sz, 0, 0);
|
|
|
|
return(amap);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2007-07-21 23:21:53 +04:00
|
|
|
/*
|
|
|
|
* uvm_amap_init: initialize the amap system.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_amap_init(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
mutex_init(&amap_list_lock, MUTEX_DEFAULT, IPL_NONE);
|
2007-12-08 18:46:31 +03:00
|
|
|
|
|
|
|
pool_cache_bootstrap(&uvm_amap_cache, sizeof(struct vm_amap), 0, 0, 0,
|
|
|
|
"amappl", NULL, IPL_NONE, NULL, NULL, NULL);
|
2007-07-21 23:21:53 +04:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_free: free an amap
|
|
|
|
*
|
2001-09-19 07:41:46 +04:00
|
|
|
* => the amap must be unlocked
|
1999-01-28 17:46:27 +03:00
|
|
|
* => the amap should have a zero reference count and be empty
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
1998-02-08 19:07:57 +03:00
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
amap_free(struct vm_amap *amap)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2006-06-25 12:03:46 +04:00
|
|
|
int slots;
|
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
UVMHIST_FUNC("amap_free"); UVMHIST_CALLED(maphist);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-02-19 00:19:08 +03:00
|
|
|
KASSERT(amap->am_ref == 0 && amap->am_nused == 0);
|
2005-05-11 17:02:25 +04:00
|
|
|
KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0);
|
2007-07-21 23:21:53 +04:00
|
|
|
KASSERT(!mutex_owned(&amap->am_l));
|
2006-06-25 12:03:46 +04:00
|
|
|
slots = amap->am_maxslot;
|
|
|
|
kmem_free(amap->am_slots, slots * sizeof(*amap->am_slots));
|
|
|
|
kmem_free(amap->am_bckptr, slots * sizeof(*amap->am_bckptr));
|
|
|
|
kmem_free(amap->am_anon, slots * sizeof(*amap->am_anon));
|
1999-01-28 17:46:27 +03:00
|
|
|
#ifdef UVM_AMAP_PPREF
|
1998-02-08 19:07:57 +03:00
|
|
|
if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
|
2006-06-25 12:03:46 +04:00
|
|
|
kmem_free(amap->am_ppref, slots * sizeof(*amap->am_ppref));
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
2007-07-21 23:21:53 +04:00
|
|
|
mutex_destroy(&amap->am_l);
|
2007-12-08 18:46:31 +03:00
|
|
|
pool_cache_put(&uvm_amap_cache, amap);
|
1998-02-08 19:07:57 +03:00
|
|
|
UVMHIST_LOG(maphist,"<- done, freed amap = 0x%x", amap, 0, 0, 0);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_extend: extend the size of an amap (if needed)
|
|
|
|
*
|
1999-01-28 17:46:27 +03:00
|
|
|
* => called from uvm_map when we want to extend an amap to cover
|
|
|
|
* a new mapping (rather than allocate a new one)
|
|
|
|
* => amap should be unlocked (we will lock it)
|
|
|
|
* => to safely extend an amap it should have a reference count of
|
|
|
|
* one (thus it can't be shared)
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
2002-09-15 20:54:26 +04:00
|
|
|
int
|
2005-06-27 06:19:48 +04:00
|
|
|
amap_extend(struct vm_map_entry *entry, vsize_t addsize, int flags)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-02-08 19:07:57 +03:00
|
|
|
struct vm_amap *amap = entry->aref.ar_amap;
|
1999-01-25 02:53:14 +03:00
|
|
|
int slotoff = entry->aref.ar_pageoff;
|
2001-12-05 04:33:09 +03:00
|
|
|
int slotmapped, slotadd, slotneed, slotadded, slotalloc;
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
int slotadj, slotspace;
|
2006-06-25 12:03:46 +04:00
|
|
|
int oldnslots;
|
1999-01-28 17:46:27 +03:00
|
|
|
#ifdef UVM_AMAP_PPREF
|
1998-02-08 19:07:57 +03:00
|
|
|
int *newppref, *oldppref;
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
int i, *newsl, *newbck, *oldsl, *oldbck;
|
1998-02-08 19:07:57 +03:00
|
|
|
struct vm_anon **newover, **oldover;
|
2006-06-25 12:03:46 +04:00
|
|
|
const km_flag_t kmflags =
|
|
|
|
(flags & AMAP_EXTEND_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
|
2002-11-30 21:28:04 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
UVMHIST_FUNC("amap_extend"); UVMHIST_CALLED(maphist);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2002-11-30 21:28:04 +03:00
|
|
|
UVMHIST_LOG(maphist, " (entry=0x%x, addsize=0x%x, flags=0x%x)",
|
|
|
|
entry, addsize, flags, 0);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
/*
|
1999-01-28 17:46:27 +03:00
|
|
|
* first, determine how many slots we need in the amap. don't
|
|
|
|
* forget that ar_pageoff could be non-zero: this means that
|
|
|
|
* there are some unused slots before us in the amap.
|
1998-02-08 19:07:57 +03:00
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2002-09-15 20:54:26 +04:00
|
|
|
amap_lock(amap);
|
2004-05-13 00:09:50 +04:00
|
|
|
KASSERT(amap_refs(amap) == 1); /* amap can't be shared */
|
1998-02-08 19:07:57 +03:00
|
|
|
AMAP_B2SLOT(slotmapped, entry->end - entry->start); /* slots mapped */
|
|
|
|
AMAP_B2SLOT(slotadd, addsize); /* slots to add */
|
2002-11-30 21:28:04 +03:00
|
|
|
if (flags & AMAP_EXTEND_FORWARDS) {
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
slotneed = slotoff + slotmapped + slotadd;
|
|
|
|
slotadj = 0;
|
|
|
|
slotspace = 0;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
slotneed = slotadd + slotmapped;
|
|
|
|
slotadj = slotadd - slotoff;
|
|
|
|
slotspace = amap->am_maxslot - slotmapped;
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
/*
|
1999-01-28 17:46:27 +03:00
|
|
|
* case 1: we already have enough slots in the map and thus
|
|
|
|
* only need to bump the reference counts on the slots we are
|
|
|
|
* adding.
|
1998-02-08 19:07:57 +03:00
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2002-11-30 21:28:04 +03:00
|
|
|
if (flags & AMAP_EXTEND_FORWARDS) {
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
if (amap->am_nslot >= slotneed) {
|
1999-01-28 17:46:27 +03:00
|
|
|
#ifdef UVM_AMAP_PPREF
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
|
|
|
|
amap_pp_adjref(amap, slotoff + slotmapped,
|
|
|
|
slotadd, 1);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
amap_unlock(amap);
|
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
"<- done (case 1f), amap = 0x%x, sltneed=%d",
|
|
|
|
amap, slotneed, 0, 0);
|
|
|
|
return 0;
|
1998-02-08 19:07:57 +03:00
|
|
|
}
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
} else {
|
|
|
|
if (slotadj <= 0) {
|
|
|
|
slotoff -= slotadd;
|
|
|
|
entry->aref.ar_pageoff = slotoff;
|
|
|
|
#ifdef UVM_AMAP_PPREF
|
|
|
|
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
|
|
|
|
amap_pp_adjref(amap, slotoff, slotadd, 1);
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
amap_unlock(amap);
|
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
"<- done (case 1b), amap = 0x%x, sltneed=%d",
|
|
|
|
amap, slotneed, 0, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
1998-02-08 19:07:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1999-01-28 17:46:27 +03:00
|
|
|
* case 2: we pre-allocated slots for use and we just need to
|
|
|
|
* bump nslot up to take account for these slots.
|
1998-02-08 19:07:57 +03:00
|
|
|
*/
|
2002-09-15 20:54:26 +04:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
if (amap->am_maxslot >= slotneed) {
|
2002-11-30 21:28:04 +03:00
|
|
|
if (flags & AMAP_EXTEND_FORWARDS) {
|
1999-01-28 17:46:27 +03:00
|
|
|
#ifdef UVM_AMAP_PPREF
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
|
|
|
|
if ((slotoff + slotmapped) < amap->am_nslot)
|
|
|
|
amap_pp_adjref(amap,
|
|
|
|
slotoff + slotmapped,
|
|
|
|
(amap->am_nslot -
|
|
|
|
(slotoff + slotmapped)), 1);
|
|
|
|
pp_setreflen(amap->am_ppref, amap->am_nslot, 1,
|
|
|
|
slotneed - amap->am_nslot);
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
amap->am_nslot = slotneed;
|
|
|
|
amap_unlock(amap);
|
2002-09-15 20:54:26 +04:00
|
|
|
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
/*
|
|
|
|
* no need to zero am_anon since that was done at
|
|
|
|
* alloc time and we never shrink an allocation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
UVMHIST_LOG(maphist,"<- done (case 2f), amap = 0x%x, "
|
|
|
|
"slotneed=%d", amap, slotneed, 0, 0);
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
#ifdef UVM_AMAP_PPREF
|
|
|
|
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
|
|
|
|
/*
|
|
|
|
* Slide up the ref counts on the pages that
|
|
|
|
* are actually in use.
|
|
|
|
*/
|
|
|
|
memmove(amap->am_ppref + slotspace,
|
|
|
|
amap->am_ppref + slotoff,
|
|
|
|
slotmapped * sizeof(int));
|
|
|
|
/*
|
|
|
|
* Mark the (adjusted) gap at the front as
|
|
|
|
* referenced/not referenced.
|
|
|
|
*/
|
|
|
|
pp_setreflen(amap->am_ppref,
|
|
|
|
0, 0, slotspace - slotadd);
|
|
|
|
pp_setreflen(amap->am_ppref,
|
|
|
|
slotspace - slotadd, 1, slotadd);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Slide the anon pointers up and clear out
|
|
|
|
* the space we just made.
|
|
|
|
*/
|
|
|
|
memmove(amap->am_anon + slotspace,
|
|
|
|
amap->am_anon + slotoff,
|
|
|
|
slotmapped * sizeof(struct vm_anon*));
|
|
|
|
memset(amap->am_anon + slotoff, 0,
|
|
|
|
(slotspace - slotoff) * sizeof(struct vm_anon *));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Slide the backpointers up, but don't bother
|
|
|
|
* wiping out the old slots.
|
|
|
|
*/
|
|
|
|
memmove(amap->am_bckptr + slotspace,
|
|
|
|
amap->am_bckptr + slotoff,
|
|
|
|
slotmapped * sizeof(int));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Adjust all the useful active slot numbers.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < amap->am_nused; i++)
|
|
|
|
amap->am_slots[i] += (slotspace - slotoff);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We just filled all the empty space in the
|
|
|
|
* front of the amap by activating a few new
|
|
|
|
* slots.
|
|
|
|
*/
|
|
|
|
amap->am_nslot = amap->am_maxslot;
|
|
|
|
entry->aref.ar_pageoff = slotspace - slotadd;
|
|
|
|
amap_unlock(amap);
|
2002-09-15 20:54:26 +04:00
|
|
|
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
UVMHIST_LOG(maphist,"<- done (case 2b), amap = 0x%x, "
|
|
|
|
"slotneed=%d", amap, slotneed, 0, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
1998-02-08 19:07:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1999-01-28 17:46:27 +03:00
|
|
|
* case 3: we need to malloc a new amap and copy all the amap
|
|
|
|
* data over from old amap to the new one.
|
1998-02-08 19:07:57 +03:00
|
|
|
*
|
2002-12-20 21:21:13 +03:00
|
|
|
* note that the use of a kernel realloc() probably would not
|
|
|
|
* help here, since we wish to abort cleanly if one of the
|
|
|
|
* three (or four) mallocs fails.
|
1998-02-08 19:07:57 +03:00
|
|
|
*/
|
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
amap_unlock(amap); /* unlock in case we sleep in malloc */
|
2005-05-05 05:58:51 +04:00
|
|
|
|
|
|
|
if (slotneed >= UVM_AMAP_LARGE) {
|
|
|
|
return E2BIG;
|
|
|
|
}
|
|
|
|
|
2006-06-25 12:03:46 +04:00
|
|
|
slotalloc = amap_roundup_slots(slotneed);
|
1999-01-28 17:46:27 +03:00
|
|
|
#ifdef UVM_AMAP_PPREF
|
1998-02-08 19:07:57 +03:00
|
|
|
newppref = NULL;
|
2002-11-15 20:30:35 +03:00
|
|
|
if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
|
2006-06-25 12:03:46 +04:00
|
|
|
newppref = kmem_alloc(slotalloc * sizeof(*newppref), kmflags);
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
2006-06-25 12:03:46 +04:00
|
|
|
newsl = kmem_alloc(slotalloc * sizeof(*newsl), kmflags);
|
|
|
|
newbck = kmem_alloc(slotalloc * sizeof(*newbck), kmflags);
|
|
|
|
newover = kmem_alloc(slotalloc * sizeof(*newover), kmflags);
|
2002-09-15 20:54:26 +04:00
|
|
|
if (newsl == NULL || newbck == NULL || newover == NULL) {
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
#ifdef UVM_AMAP_PPREF
|
|
|
|
if (newppref != NULL) {
|
2006-06-25 12:03:46 +04:00
|
|
|
kmem_free(newppref, slotalloc * sizeof(*newppref));
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
}
|
|
|
|
#endif
|
2002-09-15 20:54:26 +04:00
|
|
|
if (newsl != NULL) {
|
2006-06-25 12:03:46 +04:00
|
|
|
kmem_free(newsl, slotalloc * sizeof(*newsl));
|
2002-09-15 20:54:26 +04:00
|
|
|
}
|
|
|
|
if (newbck != NULL) {
|
2006-06-25 12:03:46 +04:00
|
|
|
kmem_free(newbck, slotalloc * sizeof(*newbck));
|
2002-09-15 20:54:26 +04:00
|
|
|
}
|
|
|
|
if (newover != NULL) {
|
2006-06-25 12:03:46 +04:00
|
|
|
kmem_free(newover, slotalloc * sizeof(*newover));
|
2002-09-15 20:54:26 +04:00
|
|
|
}
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
amap_lock(amap);
|
2001-02-19 00:19:08 +03:00
|
|
|
KASSERT(amap->am_maxslot < slotneed);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
/*
|
|
|
|
* now copy everything over to new malloc'd areas...
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-12-05 04:33:09 +03:00
|
|
|
slotadded = slotalloc - amap->am_nslot;
|
2002-11-30 21:28:04 +03:00
|
|
|
if (!(flags & AMAP_EXTEND_FORWARDS))
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
slotspace = slotalloc - slotmapped;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
/* do am_slots */
|
|
|
|
oldsl = amap->am_slots;
|
2002-11-30 21:28:04 +03:00
|
|
|
if (flags & AMAP_EXTEND_FORWARDS)
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
memcpy(newsl, oldsl, sizeof(int) * amap->am_nused);
|
|
|
|
else
|
|
|
|
for (i = 0; i < amap->am_nused; i++)
|
|
|
|
newsl[i] = oldsl[i] + slotspace - slotoff;
|
1998-02-08 19:07:57 +03:00
|
|
|
amap->am_slots = newsl;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
/* do am_anon */
|
|
|
|
oldover = amap->am_anon;
|
2002-11-30 21:28:04 +03:00
|
|
|
if (flags & AMAP_EXTEND_FORWARDS) {
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
memcpy(newover, oldover,
|
|
|
|
sizeof(struct vm_anon *) * amap->am_nslot);
|
|
|
|
memset(newover + amap->am_nslot, 0,
|
|
|
|
sizeof(struct vm_anon *) * slotadded);
|
|
|
|
} else {
|
|
|
|
memcpy(newover + slotspace, oldover + slotoff,
|
|
|
|
sizeof(struct vm_anon *) * slotmapped);
|
|
|
|
memset(newover, 0,
|
|
|
|
sizeof(struct vm_anon *) * slotspace);
|
|
|
|
}
|
1998-02-08 19:07:57 +03:00
|
|
|
amap->am_anon = newover;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
/* do am_bckptr */
|
|
|
|
oldbck = amap->am_bckptr;
|
2002-11-30 21:28:04 +03:00
|
|
|
if (flags & AMAP_EXTEND_FORWARDS)
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
memcpy(newbck, oldbck, sizeof(int) * amap->am_nslot);
|
|
|
|
else
|
|
|
|
memcpy(newbck + slotspace, oldbck + slotoff,
|
|
|
|
sizeof(int) * slotmapped);
|
1998-02-08 19:07:57 +03:00
|
|
|
amap->am_bckptr = newbck;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1999-01-28 17:46:27 +03:00
|
|
|
#ifdef UVM_AMAP_PPREF
|
1998-02-08 19:07:57 +03:00
|
|
|
/* do ppref */
|
|
|
|
oldppref = amap->am_ppref;
|
|
|
|
if (newppref) {
|
2002-11-30 21:28:04 +03:00
|
|
|
if (flags & AMAP_EXTEND_FORWARDS) {
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
memcpy(newppref, oldppref,
|
|
|
|
sizeof(int) * amap->am_nslot);
|
|
|
|
memset(newppref + amap->am_nslot, 0,
|
|
|
|
sizeof(int) * slotadded);
|
|
|
|
} else {
|
|
|
|
memcpy(newppref + slotspace, oldppref + slotoff,
|
|
|
|
sizeof(int) * slotmapped);
|
|
|
|
}
|
1998-02-08 19:07:57 +03:00
|
|
|
amap->am_ppref = newppref;
|
2002-11-30 21:28:04 +03:00
|
|
|
if ((flags & AMAP_EXTEND_FORWARDS) &&
|
|
|
|
(slotoff + slotmapped) < amap->am_nslot)
|
2001-05-25 08:06:11 +04:00
|
|
|
amap_pp_adjref(amap, slotoff + slotmapped,
|
2000-11-25 09:27:59 +03:00
|
|
|
(amap->am_nslot - (slotoff + slotmapped)), 1);
|
2002-11-30 21:28:04 +03:00
|
|
|
if (flags & AMAP_EXTEND_FORWARDS)
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
pp_setreflen(newppref, amap->am_nslot, 1,
|
|
|
|
slotneed - amap->am_nslot);
|
|
|
|
else {
|
|
|
|
pp_setreflen(newppref, 0, 0,
|
|
|
|
slotalloc - slotneed);
|
|
|
|
pp_setreflen(newppref, slotalloc - slotneed, 1,
|
|
|
|
slotneed - slotmapped);
|
|
|
|
}
|
2002-11-15 20:30:35 +03:00
|
|
|
} else {
|
|
|
|
if (amap->am_ppref)
|
|
|
|
amap->am_ppref = PPREF_NONE;
|
1998-02-08 19:07:57 +03:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
/* update master values */
|
2002-11-30 21:28:04 +03:00
|
|
|
if (flags & AMAP_EXTEND_FORWARDS)
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
amap->am_nslot = slotneed;
|
|
|
|
else {
|
|
|
|
entry->aref.ar_pageoff = slotspace - slotadd;
|
|
|
|
amap->am_nslot = slotalloc;
|
|
|
|
}
|
2006-06-25 12:03:46 +04:00
|
|
|
oldnslots = amap->am_maxslot;
|
2001-12-05 04:33:09 +03:00
|
|
|
amap->am_maxslot = slotalloc;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
amap_unlock(amap);
|
2006-06-25 12:03:46 +04:00
|
|
|
kmem_free(oldsl, oldnslots * sizeof(*oldsl));
|
|
|
|
kmem_free(oldbck, oldnslots * sizeof(*oldbck));
|
|
|
|
kmem_free(oldover, oldnslots * sizeof(*oldover));
|
1999-01-28 17:46:27 +03:00
|
|
|
#ifdef UVM_AMAP_PPREF
|
1998-02-08 19:07:57 +03:00
|
|
|
if (oldppref && oldppref != PPREF_NONE)
|
2006-06-25 12:03:46 +04:00
|
|
|
kmem_free(oldppref, oldnslots * sizeof(*oldppref));
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
2001-05-25 08:06:11 +04:00
|
|
|
UVMHIST_LOG(maphist,"<- done (case 3), amap = 0x%x, slotneed=%d",
|
1998-02-08 19:07:57 +03:00
|
|
|
amap, slotneed, 0, 0);
|
2002-09-15 20:54:26 +04:00
|
|
|
return 0;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1999-01-28 17:46:27 +03:00
|
|
|
* amap_share_protect: change protection of anons in a shared amap
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
1999-01-28 17:46:27 +03:00
|
|
|
* for shared amaps, given the current data structure layout, it is
|
|
|
|
* not possible for us to directly locate all maps referencing the
|
|
|
|
* shared anon (to change the protection). in order to protect data
|
|
|
|
* in shared maps we use pmap_page_protect(). [this is useful for IPC
|
|
|
|
* mechanisms like map entry passing that may want to write-protect
|
|
|
|
* all mappings of a shared amap.] we traverse am_anon or am_slots
|
|
|
|
* depending on the current state of the amap.
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
1999-01-28 17:46:27 +03:00
|
|
|
* => entry's map and amap must be locked by the caller
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
1998-02-08 19:07:57 +03:00
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
amap_share_protect(struct vm_map_entry *entry, vm_prot_t prot)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-02-08 19:07:57 +03:00
|
|
|
struct vm_amap *amap = entry->aref.ar_amap;
|
|
|
|
int slots, lcv, slot, stop;
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2007-07-21 23:21:53 +04:00
|
|
|
KASSERT(mutex_owned(&amap->am_l));
|
2001-01-23 04:56:16 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
AMAP_B2SLOT(slots, (entry->end - entry->start));
|
1999-01-25 02:53:14 +03:00
|
|
|
stop = entry->aref.ar_pageoff + slots;
|
1998-02-08 19:07:57 +03:00
|
|
|
|
|
|
|
if (slots < amap->am_nused) {
|
|
|
|
/* cheaper to traverse am_anon */
|
1999-01-25 02:53:14 +03:00
|
|
|
for (lcv = entry->aref.ar_pageoff ; lcv < stop ; lcv++) {
|
1998-02-08 19:07:57 +03:00
|
|
|
if (amap->am_anon[lcv] == NULL)
|
|
|
|
continue;
|
2005-05-11 17:02:25 +04:00
|
|
|
if (amap->am_anon[lcv]->an_page != NULL)
|
|
|
|
pmap_page_protect(amap->am_anon[lcv]->an_page,
|
1999-09-12 05:16:55 +04:00
|
|
|
prot);
|
1998-02-08 19:07:57 +03:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* cheaper to traverse am_slots */
|
|
|
|
for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
|
|
|
|
slot = amap->am_slots[lcv];
|
1999-01-25 02:53:14 +03:00
|
|
|
if (slot < entry->aref.ar_pageoff || slot >= stop)
|
1998-02-08 19:07:57 +03:00
|
|
|
continue;
|
2005-05-11 17:02:25 +04:00
|
|
|
if (amap->am_anon[slot]->an_page != NULL)
|
|
|
|
pmap_page_protect(amap->am_anon[slot]->an_page, prot);
|
1998-02-08 19:07:57 +03:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_wipeout: wipeout all anon's in an amap; then free the amap!
|
|
|
|
*
|
2001-05-25 08:06:11 +04:00
|
|
|
* => called from amap_unref when the final reference to an amap is
|
2005-05-11 17:02:25 +04:00
|
|
|
* discarded (i.e. when reference count drops to 0)
|
1999-01-28 17:46:27 +03:00
|
|
|
* => the amap should be locked (by the caller)
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
amap_wipeout(struct vm_amap *amap)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-02-08 19:07:57 +03:00
|
|
|
int lcv, slot;
|
|
|
|
struct vm_anon *anon;
|
|
|
|
UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist);
|
|
|
|
UVMHIST_LOG(maphist,"(amap=0x%x)", amap, 0,0,0);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2005-05-11 17:02:25 +04:00
|
|
|
KASSERT(amap->am_ref == 0);
|
|
|
|
|
|
|
|
if (__predict_false((amap->am_flags & AMAP_SWAPOFF) != 0)) {
|
|
|
|
/*
|
|
|
|
* amap_swap_off will call us again.
|
|
|
|
*/
|
|
|
|
amap_unlock(amap);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
amap_list_remove(amap);
|
2001-09-19 07:41:46 +04:00
|
|
|
amap_unlock(amap);
|
2005-05-11 17:02:25 +04:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
|
1998-11-04 10:07:22 +03:00
|
|
|
int refs;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
slot = amap->am_slots[lcv];
|
|
|
|
anon = amap->am_anon[slot];
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
if (anon == NULL || anon->an_ref == 0)
|
1998-02-08 19:07:57 +03:00
|
|
|
panic("amap_wipeout: corrupt amap");
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&anon->an_lock);
|
2001-05-25 08:06:11 +04:00
|
|
|
UVMHIST_LOG(maphist," processing anon 0x%x, ref=%d", anon,
|
1998-02-08 19:07:57 +03:00
|
|
|
anon->an_ref, 0, 0);
|
1998-11-04 10:07:22 +03:00
|
|
|
refs = --anon->an_ref;
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&anon->an_lock);
|
1998-11-04 10:07:22 +03:00
|
|
|
if (refs == 0) {
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-11-04 10:07:22 +03:00
|
|
|
/*
|
|
|
|
* we had the last reference to a vm_anon. free it.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-11-04 10:07:22 +03:00
|
|
|
uvm_anfree(anon);
|
1998-02-08 19:07:57 +03:00
|
|
|
}
|
2001-09-19 07:41:46 +04:00
|
|
|
|
2003-01-18 11:51:40 +03:00
|
|
|
if (curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
|
2007-02-10 00:55:00 +03:00
|
|
|
preempt();
|
1998-02-08 19:07:57 +03:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
/*
|
|
|
|
* now we free the map
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
amap->am_nused = 0;
|
1999-01-28 17:46:27 +03:00
|
|
|
amap_free(amap); /* will unlock and free amap */
|
1998-02-08 19:07:57 +03:00
|
|
|
UVMHIST_LOG(maphist,"<- done!", 0,0,0,0);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_copy: ensure that a map entry's "needs_copy" flag is false
|
|
|
|
* by copying the amap if necessary.
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-02-05 09:25:08 +03:00
|
|
|
* => an entry with a null amap pointer will get a new (blank) one.
|
|
|
|
* => the map that the map entry belongs to must be locked by caller.
|
|
|
|
* => the amap currently attached to "entry" (if any) must be unlocked.
|
|
|
|
* => if canchunk is true, then we may clip the entry into a chunk
|
1998-05-14 17:51:28 +04:00
|
|
|
* => "startva" and "endva" are used only if canchunk is true. they are
|
|
|
|
* used to limit chunking (e.g. if you have a large space that you
|
|
|
|
* know you are going to need to allocate amaps for, there is no point
|
|
|
|
* in allowing that to be chunked)
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
void
|
2006-02-15 17:06:45 +03:00
|
|
|
amap_copy(struct vm_map *map, struct vm_map_entry *entry, int flags,
|
|
|
|
vaddr_t startva, vaddr_t endva)
|
1998-02-08 19:07:57 +03:00
|
|
|
{
|
|
|
|
struct vm_amap *amap, *srcamap;
|
|
|
|
int slots, lcv;
|
1998-08-13 06:10:37 +04:00
|
|
|
vaddr_t chunksize;
|
2006-06-25 12:03:46 +04:00
|
|
|
const int waitf = (flags & AMAP_COPY_NOWAIT) ? UVM_FLAG_NOWAIT : 0;
|
2007-02-22 01:59:35 +03:00
|
|
|
const bool canchunk = (flags & AMAP_COPY_NOCHUNK) == 0;
|
1998-02-08 19:07:57 +03:00
|
|
|
UVMHIST_FUNC("amap_copy"); UVMHIST_CALLED(maphist);
|
2006-02-15 17:06:45 +03:00
|
|
|
UVMHIST_LOG(maphist, " (map=%p, entry=%p, flags=%d)",
|
|
|
|
map, entry, flags, 0);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2006-01-21 16:13:07 +03:00
|
|
|
KASSERT(map != kernel_map); /* we use nointr pool */
|
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
/*
|
|
|
|
* is there a map to copy? if not, create one from scratch.
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
if (entry->aref.ar_amap == NULL) {
|
|
|
|
|
|
|
|
/*
|
1999-01-28 17:46:27 +03:00
|
|
|
* check to see if we have a large amap that we can
|
|
|
|
* chunk. we align startva/endva to chunk-sized
|
|
|
|
* boundaries and then clip to them.
|
1998-02-08 19:07:57 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (canchunk && atop(entry->end - entry->start) >=
|
|
|
|
UVM_AMAP_LARGE) {
|
|
|
|
/* convert slots to bytes */
|
1998-10-19 03:49:59 +04:00
|
|
|
chunksize = UVM_AMAP_CHUNK << PAGE_SHIFT;
|
1998-02-08 19:07:57 +03:00
|
|
|
startva = (startva / chunksize) * chunksize;
|
|
|
|
endva = roundup(endva, chunksize);
|
|
|
|
UVMHIST_LOG(maphist, " chunk amap ==> clip 0x%x->0x%x"
|
|
|
|
"to 0x%x->0x%x", entry->start, entry->end, startva,
|
|
|
|
endva);
|
2005-01-02 00:00:06 +03:00
|
|
|
UVM_MAP_CLIP_START(map, entry, startva, NULL);
|
1998-05-14 17:51:28 +04:00
|
|
|
/* watch out for endva wrap-around! */
|
|
|
|
if (endva >= startva)
|
2005-01-02 00:00:06 +03:00
|
|
|
UVM_MAP_CLIP_END(map, entry, endva, NULL);
|
1998-02-08 19:07:57 +03:00
|
|
|
}
|
|
|
|
|
2006-02-15 17:06:45 +03:00
|
|
|
if ((flags & AMAP_COPY_NOMERGE) == 0 &&
|
|
|
|
uvm_mapent_trymerge(map, entry, UVM_MERGE_COPYING)) {
|
2005-05-17 17:55:33 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
UVMHIST_LOG(maphist, "<- done [creating new amap 0x%x->0x%x]",
|
1998-02-05 09:25:08 +03:00
|
|
|
entry->start, entry->end, 0, 0);
|
1999-01-25 02:53:14 +03:00
|
|
|
entry->aref.ar_pageoff = 0;
|
1998-02-08 19:07:57 +03:00
|
|
|
entry->aref.ar_amap = amap_alloc(entry->end - entry->start, 0,
|
|
|
|
waitf);
|
|
|
|
if (entry->aref.ar_amap != NULL)
|
|
|
|
entry->etype &= ~UVM_ET_NEEDSCOPY;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1999-01-28 17:46:27 +03:00
|
|
|
* first check and see if we are the only map entry
|
|
|
|
* referencing the amap we currently have. if so, then we can
|
|
|
|
* just take it over rather than copying it. note that we are
|
|
|
|
* reading am_ref with the amap unlocked... the value can only
|
|
|
|
* be one if we have the only reference to the amap (via our
|
|
|
|
* locked map). if we are greater than one we fall through to
|
|
|
|
* the next case (where we double check the value).
|
1998-02-08 19:07:57 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (entry->aref.ar_amap->am_ref == 1) {
|
|
|
|
entry->etype &= ~UVM_ET_NEEDSCOPY;
|
|
|
|
UVMHIST_LOG(maphist, "<- done [ref cnt = 1, took it over]",
|
|
|
|
0, 0, 0, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* looks like we need to copy the map.
|
|
|
|
*/
|
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
UVMHIST_LOG(maphist," amap=%p, ref=%d, must copy it",
|
1998-02-08 19:07:57 +03:00
|
|
|
entry->aref.ar_amap, entry->aref.ar_amap->am_ref, 0, 0);
|
|
|
|
AMAP_B2SLOT(slots, entry->end - entry->start);
|
|
|
|
amap = amap_alloc1(slots, 0, waitf);
|
|
|
|
if (amap == NULL) {
|
|
|
|
UVMHIST_LOG(maphist, " amap_alloc1 failed", 0,0,0,0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
srcamap = entry->aref.ar_amap;
|
1999-01-25 02:53:14 +03:00
|
|
|
amap_lock(srcamap);
|
1998-02-08 19:07:57 +03:00
|
|
|
|
|
|
|
/*
|
1999-01-28 17:46:27 +03:00
|
|
|
* need to double check reference count now that we've got the
|
|
|
|
* src amap locked down. the reference count could have
|
|
|
|
* changed while we were in malloc. if the reference count
|
|
|
|
* dropped down to one we take over the old map rather than
|
|
|
|
* copying the amap.
|
1998-02-08 19:07:57 +03:00
|
|
|
*/
|
|
|
|
|
1999-01-28 17:46:27 +03:00
|
|
|
if (srcamap->am_ref == 1) { /* take it over? */
|
1998-02-08 19:07:57 +03:00
|
|
|
entry->etype &= ~UVM_ET_NEEDSCOPY;
|
|
|
|
amap->am_ref--; /* drop final reference to map */
|
1999-01-28 17:46:27 +03:00
|
|
|
amap_free(amap); /* dispose of new (unused) amap */
|
1999-01-25 02:53:14 +03:00
|
|
|
amap_unlock(srcamap);
|
1998-02-08 19:07:57 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1999-01-28 17:46:27 +03:00
|
|
|
* we must copy it now.
|
1998-02-08 19:07:57 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
UVMHIST_LOG(maphist, " copying amap now",0, 0, 0, 0);
|
|
|
|
for (lcv = 0 ; lcv < slots; lcv++) {
|
|
|
|
amap->am_anon[lcv] =
|
1999-01-25 02:53:14 +03:00
|
|
|
srcamap->am_anon[entry->aref.ar_pageoff + lcv];
|
1998-02-08 19:07:57 +03:00
|
|
|
if (amap->am_anon[lcv] == NULL)
|
|
|
|
continue;
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&amap->am_anon[lcv]->an_lock);
|
1998-02-08 19:07:57 +03:00
|
|
|
amap->am_anon[lcv]->an_ref++;
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&amap->am_anon[lcv]->an_lock);
|
1998-02-08 19:07:57 +03:00
|
|
|
amap->am_bckptr[lcv] = amap->am_nused;
|
|
|
|
amap->am_slots[amap->am_nused] = lcv;
|
|
|
|
amap->am_nused++;
|
|
|
|
}
|
2001-12-05 04:33:09 +03:00
|
|
|
memset(&amap->am_anon[lcv], 0,
|
|
|
|
(amap->am_maxslot - lcv) * sizeof(struct vm_anon *));
|
1998-02-08 19:07:57 +03:00
|
|
|
|
|
|
|
/*
|
1999-01-28 17:46:27 +03:00
|
|
|
* drop our reference to the old amap (srcamap) and unlock.
|
|
|
|
* we know that the reference count on srcamap is greater than
|
|
|
|
* one (we checked above), so there is no way we could drop
|
|
|
|
* the count to zero. [and no need to worry about freeing it]
|
1998-02-08 19:07:57 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
srcamap->am_ref--;
|
|
|
|
if (srcamap->am_ref == 1 && (srcamap->am_flags & AMAP_SHARED) != 0)
|
|
|
|
srcamap->am_flags &= ~AMAP_SHARED; /* clear shared flag */
|
1999-01-28 17:46:27 +03:00
|
|
|
#ifdef UVM_AMAP_PPREF
|
1998-02-08 19:07:57 +03:00
|
|
|
if (srcamap->am_ppref && srcamap->am_ppref != PPREF_NONE) {
|
2001-05-25 08:06:11 +04:00
|
|
|
amap_pp_adjref(srcamap, entry->aref.ar_pageoff,
|
2000-11-25 09:27:59 +03:00
|
|
|
(entry->end - entry->start) >> PAGE_SHIFT, -1);
|
1998-02-08 19:07:57 +03:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
amap_unlock(srcamap);
|
1998-02-08 19:07:57 +03:00
|
|
|
|
2005-05-11 17:02:25 +04:00
|
|
|
amap_list_insert(amap);
|
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
/*
|
|
|
|
* install new amap.
|
|
|
|
*/
|
|
|
|
|
1999-01-25 02:53:14 +03:00
|
|
|
entry->aref.ar_pageoff = 0;
|
1998-02-08 19:07:57 +03:00
|
|
|
entry->aref.ar_amap = amap;
|
|
|
|
entry->etype &= ~UVM_ET_NEEDSCOPY;
|
|
|
|
UVMHIST_LOG(maphist, "<- done",0, 0, 0, 0);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_cow_now: resolve all copy-on-write faults in an amap now for fork(2)
|
|
|
|
*
|
|
|
|
* called during fork(2) when the parent process has a wired map
|
|
|
|
* entry. in that case we want to avoid write-protecting pages
|
|
|
|
* in the parent's map (e.g. like what you'd do for a COW page)
|
|
|
|
* so we resolve the COW here.
|
|
|
|
*
|
|
|
|
* => assume parent's entry was wired, thus all pages are resident.
|
|
|
|
* => assume pages that are loaned out (loan_count) are already mapped
|
|
|
|
* read-only in all maps, and thus no need for us to worry about them
|
|
|
|
* => assume both parent and child vm_map's are locked
|
|
|
|
* => caller passes child's map/entry in to us
|
|
|
|
* => if we run out of memory we will unlock the amap and sleep _with_ the
|
|
|
|
* parent and child vm_map's locked(!). we have to do this since
|
|
|
|
* we are in the middle of a fork(2) and we can't let the parent
|
|
|
|
* map change until we are done copying all the map entrys.
|
|
|
|
* => XXXCDC: out of memory should cause fork to fail, but there is
|
|
|
|
* currently no easy way to do this (needs fix)
|
|
|
|
* => page queues must be unlocked (we may lock them)
|
|
|
|
*/
|
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
void
|
2006-11-01 13:17:58 +03:00
|
|
|
amap_cow_now(struct vm_map *map, struct vm_map_entry *entry)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-02-08 19:07:57 +03:00
|
|
|
struct vm_amap *amap = entry->aref.ar_amap;
|
|
|
|
int lcv, slot;
|
|
|
|
struct vm_anon *anon, *nanon;
|
|
|
|
struct vm_page *pg, *npg;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
/*
|
|
|
|
* note that if we unlock the amap then we must ReStart the "lcv" for
|
|
|
|
* loop because some other process could reorder the anon's in the
|
|
|
|
* am_anon[] array on us while the lock is dropped.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
ReStart:
|
1999-01-25 02:53:14 +03:00
|
|
|
amap_lock(amap);
|
1998-02-08 19:07:57 +03:00
|
|
|
for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get the page
|
|
|
|
*/
|
|
|
|
|
|
|
|
slot = amap->am_slots[lcv];
|
|
|
|
anon = amap->am_anon[slot];
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&anon->an_lock);
|
2005-11-06 18:57:32 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the anon has only one ref, we must have already copied it.
|
|
|
|
* This can happen if we needed to sleep waiting for memory
|
|
|
|
* in a previous run through this loop. The new page might
|
|
|
|
* even have been paged out, since the new page is not wired.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (anon->an_ref == 1) {
|
|
|
|
KASSERT(anon->an_page != NULL || anon->an_swslot != 0);
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&anon->an_lock);
|
2005-11-06 18:57:32 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The old page must be resident since the parent is wired.
|
|
|
|
*/
|
|
|
|
|
2005-05-11 17:02:25 +04:00
|
|
|
pg = anon->an_page;
|
2005-11-06 18:57:32 +03:00
|
|
|
KASSERT(pg != NULL);
|
|
|
|
KASSERT(pg->wire_count > 0);
|
1998-02-08 19:07:57 +03:00
|
|
|
|
|
|
|
/*
|
2005-11-06 18:57:32 +03:00
|
|
|
* If the page is loaned then it must already be mapped
|
|
|
|
* read-only and we don't need to copy it.
|
1998-02-08 19:07:57 +03:00
|
|
|
*/
|
|
|
|
|
2005-11-06 18:57:32 +03:00
|
|
|
if (pg->loan_count != 0) {
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&anon->an_lock);
|
2005-11-06 18:57:32 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
KASSERT(pg->uanon == anon && pg->uobject == NULL);
|
1998-02-08 19:07:57 +03:00
|
|
|
|
|
|
|
/*
|
2005-11-06 18:57:32 +03:00
|
|
|
* if the page is busy then we have to unlock, wait for
|
|
|
|
* it and then restart.
|
1998-02-08 19:07:57 +03:00
|
|
|
*/
|
|
|
|
|
2005-11-06 18:57:32 +03:00
|
|
|
if (pg->flags & PG_BUSY) {
|
|
|
|
pg->flags |= PG_WANTED;
|
|
|
|
amap_unlock(amap);
|
2007-02-22 09:05:00 +03:00
|
|
|
UVM_UNLOCK_AND_WAIT(pg, &anon->an_lock, false,
|
2005-11-06 18:57:32 +03:00
|
|
|
"cownow", 0);
|
|
|
|
goto ReStart;
|
|
|
|
}
|
1998-02-08 19:07:57 +03:00
|
|
|
|
2005-11-06 18:57:32 +03:00
|
|
|
/*
|
|
|
|
* ok, time to do a copy-on-write to a new anon
|
|
|
|
*/
|
|
|
|
|
|
|
|
nanon = uvm_analloc();
|
|
|
|
if (nanon) {
|
|
|
|
npg = uvm_pagealloc(NULL, 0, nanon, 0);
|
|
|
|
} else
|
|
|
|
npg = NULL; /* XXX: quiet gcc warning */
|
|
|
|
if (nanon == NULL || npg == NULL) {
|
1998-02-08 19:07:57 +03:00
|
|
|
|
|
|
|
/*
|
2005-11-06 18:57:32 +03:00
|
|
|
* XXXCDC: we should cause fork to fail, but we can't.
|
1998-02-08 19:07:57 +03:00
|
|
|
*/
|
|
|
|
|
2005-11-06 18:57:32 +03:00
|
|
|
if (nanon) {
|
|
|
|
nanon->an_ref--;
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&nanon->an_lock);
|
2005-11-06 18:57:32 +03:00
|
|
|
uvm_anfree(nanon);
|
1998-02-08 19:07:57 +03:00
|
|
|
}
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&anon->an_lock);
|
2005-11-06 18:57:32 +03:00
|
|
|
amap_unlock(amap);
|
|
|
|
uvm_wait("cownowpage");
|
|
|
|
goto ReStart;
|
|
|
|
}
|
2001-05-25 08:06:11 +04:00
|
|
|
|
2005-11-06 18:57:32 +03:00
|
|
|
/*
|
|
|
|
* got it... now we can copy the data and replace anon
|
|
|
|
* with our new one...
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2005-11-06 18:57:32 +03:00
|
|
|
uvm_pagecopy(pg, npg); /* old -> new */
|
|
|
|
anon->an_ref--; /* can't drop to zero */
|
|
|
|
amap->am_anon[slot] = nanon; /* replace */
|
1998-02-08 19:07:57 +03:00
|
|
|
|
2005-11-06 18:57:32 +03:00
|
|
|
/*
|
|
|
|
* drop PG_BUSY on new page ... since we have had its owner
|
|
|
|
* locked the whole time it can't be PG_RELEASED or PG_WANTED.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&uvm_pageqlock);
|
2005-11-06 18:57:32 +03:00
|
|
|
uvm_pageactivate(npg);
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&uvm_pageqlock);
|
2005-11-06 18:57:32 +03:00
|
|
|
npg->flags &= ~(PG_BUSY|PG_FAKE);
|
|
|
|
UVM_PAGE_OWN(npg, NULL);
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&nanon->an_lock);
|
|
|
|
mutex_exit(&anon->an_lock);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
2000-08-02 23:24:29 +04:00
|
|
|
amap_unlock(amap);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-07-22 17:33:58 +04:00
|
|
|
* amap_splitref: split a single reference into two separate references
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
1999-01-28 17:46:27 +03:00
|
|
|
* => called from uvm_map's clip routines
|
|
|
|
* => origref's map should be locked
|
|
|
|
* => origref->ar_amap should be unlocked (we will lock)
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
1998-02-08 19:07:57 +03:00
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
amap_splitref(struct vm_aref *origref, struct vm_aref *splitref, vaddr_t offset)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
1998-02-08 19:07:57 +03:00
|
|
|
int leftslots;
|
2006-04-21 18:04:45 +04:00
|
|
|
struct vm_amap *amap;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2006-04-21 18:04:45 +04:00
|
|
|
KASSERT(splitref->ar_amap == origref->ar_amap);
|
1998-02-08 19:07:57 +03:00
|
|
|
AMAP_B2SLOT(leftslots, offset);
|
|
|
|
if (leftslots == 0)
|
|
|
|
panic("amap_splitref: split at zero offset");
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2006-04-21 18:04:45 +04:00
|
|
|
amap = origref->ar_amap;
|
|
|
|
amap_lock(amap);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
/*
|
|
|
|
* now: amap is locked and we have a valid am_mapped array.
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2006-04-21 18:04:45 +04:00
|
|
|
if (amap->am_nslot - origref->ar_pageoff - leftslots <= 0)
|
1998-02-08 19:07:57 +03:00
|
|
|
panic("amap_splitref: map size check failed");
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1999-01-28 17:46:27 +03:00
|
|
|
#ifdef UVM_AMAP_PPREF
|
1998-10-08 23:47:50 +04:00
|
|
|
/*
|
|
|
|
* establish ppref before we add a duplicate reference to the amap
|
|
|
|
*/
|
2006-04-21 18:04:45 +04:00
|
|
|
if (amap->am_ppref == NULL)
|
|
|
|
amap_pp_establish(amap, origref->ar_pageoff);
|
1998-10-08 23:47:50 +04:00
|
|
|
#endif
|
|
|
|
|
2006-04-21 18:04:45 +04:00
|
|
|
amap->am_ref++; /* not a share reference */
|
1999-01-25 02:53:14 +03:00
|
|
|
splitref->ar_pageoff = origref->ar_pageoff + leftslots;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2006-04-21 18:04:45 +04:00
|
|
|
amap_unlock(amap);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
1999-01-28 17:46:27 +03:00
|
|
|
#ifdef UVM_AMAP_PPREF
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_pp_establish: add a ppref array to an amap, if possible
|
|
|
|
*
|
|
|
|
* => amap locked by caller
|
|
|
|
*/
|
1998-02-08 19:07:57 +03:00
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
amap_pp_establish(struct vm_amap *amap, vaddr_t offset)
|
1998-02-08 19:07:57 +03:00
|
|
|
{
|
2006-06-25 12:03:46 +04:00
|
|
|
|
|
|
|
amap->am_ppref = kmem_alloc(amap->am_maxslot * sizeof(*amap->am_ppref),
|
|
|
|
KM_NOSLEEP);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
/*
|
|
|
|
* if we fail then we just won't use ppref for this amap
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
if (amap->am_ppref == NULL) {
|
|
|
|
amap->am_ppref = PPREF_NONE; /* not using it */
|
|
|
|
return;
|
|
|
|
}
|
1998-08-10 02:36:37 +04:00
|
|
|
memset(amap->am_ppref, 0, sizeof(int) * amap->am_maxslot);
|
2002-12-20 21:21:13 +03:00
|
|
|
pp_setreflen(amap->am_ppref, 0, 0, offset);
|
|
|
|
pp_setreflen(amap->am_ppref, offset, amap->am_ref,
|
|
|
|
amap->am_nslot - offset);
|
1998-02-08 19:07:57 +03:00
|
|
|
return;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_pp_adjref: adjust reference count to a part of an amap using the
|
|
|
|
* per-page reference count array.
|
|
|
|
*
|
|
|
|
* => map and amap locked by caller
|
|
|
|
* => caller must check that ppref != PPREF_NONE before calling
|
|
|
|
*/
|
1998-02-08 19:07:57 +03:00
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int adjval)
|
1998-02-08 19:07:57 +03:00
|
|
|
{
|
2002-02-25 03:39:16 +03:00
|
|
|
int stopslot, *ppref, lcv, prevlcv;
|
|
|
|
int ref, len, prevref, prevlen;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-25 09:27:59 +03:00
|
|
|
stopslot = curslot + slotlen;
|
1998-02-08 19:07:57 +03:00
|
|
|
ppref = amap->am_ppref;
|
2002-02-25 03:39:16 +03:00
|
|
|
prevlcv = 0;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
/*
|
2002-02-25 03:39:16 +03:00
|
|
|
* first advance to the correct place in the ppref array,
|
|
|
|
* fragment if needed.
|
1998-02-08 19:07:57 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
for (lcv = 0 ; lcv < curslot ; lcv += len) {
|
|
|
|
pp_getreflen(ppref, lcv, &ref, &len);
|
|
|
|
if (lcv + len > curslot) { /* goes past start? */
|
|
|
|
pp_setreflen(ppref, lcv, ref, curslot - lcv);
|
|
|
|
pp_setreflen(ppref, curslot, ref, len - (curslot -lcv));
|
|
|
|
len = curslot - lcv; /* new length of entry @ lcv */
|
|
|
|
}
|
2002-02-25 03:39:16 +03:00
|
|
|
prevlcv = lcv;
|
1998-02-08 19:07:57 +03:00
|
|
|
}
|
2002-03-28 09:06:29 +03:00
|
|
|
if (lcv != 0)
|
|
|
|
pp_getreflen(ppref, prevlcv, &prevref, &prevlen);
|
|
|
|
else {
|
|
|
|
/* Ensure that the "prevref == ref" test below always
|
|
|
|
* fails, since we're starting from the beginning of
|
|
|
|
* the ppref array; that is, there is no previous
|
2004-03-24 10:50:48 +03:00
|
|
|
* chunk.
|
2002-03-28 09:06:29 +03:00
|
|
|
*/
|
|
|
|
prevref = -1;
|
|
|
|
prevlen = 0;
|
|
|
|
}
|
1998-02-08 19:07:57 +03:00
|
|
|
|
|
|
|
/*
|
2002-02-25 03:39:16 +03:00
|
|
|
* now adjust reference counts in range. merge the first
|
|
|
|
* changed entry with the last unchanged entry if possible.
|
1998-02-08 19:07:57 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (lcv != curslot)
|
1999-01-28 17:46:27 +03:00
|
|
|
panic("amap_pp_adjref: overshot target");
|
1998-02-08 19:07:57 +03:00
|
|
|
|
|
|
|
for (/* lcv already set */; lcv < stopslot ; lcv += len) {
|
|
|
|
pp_getreflen(ppref, lcv, &ref, &len);
|
|
|
|
if (lcv + len > stopslot) { /* goes past end? */
|
|
|
|
pp_setreflen(ppref, lcv, ref, stopslot - lcv);
|
|
|
|
pp_setreflen(ppref, stopslot, ref,
|
|
|
|
len - (stopslot - lcv));
|
|
|
|
len = stopslot - lcv;
|
|
|
|
}
|
2002-02-25 03:39:16 +03:00
|
|
|
ref += adjval;
|
1998-02-08 19:07:57 +03:00
|
|
|
if (ref < 0)
|
|
|
|
panic("amap_pp_adjref: negative reference count");
|
2002-02-25 03:39:16 +03:00
|
|
|
if (lcv == prevlcv + prevlen && ref == prevref) {
|
|
|
|
pp_setreflen(ppref, prevlcv, ref, prevlen + len);
|
|
|
|
} else {
|
|
|
|
pp_setreflen(ppref, lcv, ref, len);
|
|
|
|
}
|
1998-02-08 19:07:57 +03:00
|
|
|
if (ref == 0)
|
|
|
|
amap_wiperange(amap, lcv, len);
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_wiperange: wipe out a range of an amap
|
|
|
|
* [different from amap_wipeout because the amap is kept intact]
|
|
|
|
*
|
|
|
|
* => both map and amap must be locked by caller.
|
|
|
|
*/
|
1998-02-08 19:07:57 +03:00
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
|
1998-02-08 19:07:57 +03:00
|
|
|
{
|
2001-12-02 01:11:13 +03:00
|
|
|
int byanon, lcv, stop, curslot, ptr, slotend;
|
1998-02-08 19:07:57 +03:00
|
|
|
struct vm_anon *anon;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
/*
|
|
|
|
* we can either traverse the amap by am_anon or by am_slots depending
|
|
|
|
* on which is cheaper. decide now.
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
if (slots < amap->am_nused) {
|
2007-02-22 09:05:00 +03:00
|
|
|
byanon = true;
|
1998-02-08 19:07:57 +03:00
|
|
|
lcv = slotoff;
|
|
|
|
stop = slotoff + slots;
|
Implement backwards extension of amaps. There are three cases to deal
with:
Case #1 -- adjust offset: The slot offset in the aref can be
decremented to cover the required size addition.
Case #2 -- move pages and adjust offset: The slot offset is not large
enough, but the amap contains enough inactive space *after* the mapped
pages to make up the difference, so active slots are slid to the "end"
of the amap, and the slot offset is, again, adjusted to cover the
required size addition. This optimizes for hitting case #1 again on
the next small extension.
Case #3 -- reallocate, move pages, and adjust offset: There is not
enough inactive space in the amap, so the arrays are reallocated, and
the active pages are copied again to the "end" of the amap, and the
slot offset is adjusted to cover the required size. This also
optimizes for hitting case #1 on the next backwards extension.
This provides the missing piece in the "forward extension of
vm_map_entries" logic, so the merge failure counters have been
removed.
Not many applications will make any use of this at this time (except
for jvms and perhaps gcc3), but a "top-down" memory allocator will use
it extensively.
2002-11-14 20:58:48 +03:00
|
|
|
slotend = 0;
|
1998-02-08 19:07:57 +03:00
|
|
|
} else {
|
2007-02-22 09:05:00 +03:00
|
|
|
byanon = false;
|
1998-02-08 19:07:57 +03:00
|
|
|
lcv = 0;
|
|
|
|
stop = amap->am_nused;
|
2001-12-02 01:11:13 +03:00
|
|
|
slotend = slotoff + slots;
|
1998-02-08 19:07:57 +03:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-12-02 01:11:13 +03:00
|
|
|
while (lcv < stop) {
|
1998-11-04 10:07:22 +03:00
|
|
|
int refs;
|
1998-02-08 19:07:57 +03:00
|
|
|
|
|
|
|
if (byanon) {
|
2001-12-02 01:11:13 +03:00
|
|
|
curslot = lcv++; /* lcv advances here */
|
|
|
|
if (amap->am_anon[curslot] == NULL)
|
1998-02-08 19:07:57 +03:00
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
curslot = amap->am_slots[lcv];
|
2001-12-02 01:11:13 +03:00
|
|
|
if (curslot < slotoff || curslot >= slotend) {
|
|
|
|
lcv++; /* lcv advances here */
|
1998-02-08 19:07:57 +03:00
|
|
|
continue;
|
2001-12-02 01:11:13 +03:00
|
|
|
}
|
|
|
|
stop--; /* drop stop, since anon will be removed */
|
1998-02-08 19:07:57 +03:00
|
|
|
}
|
|
|
|
anon = amap->am_anon[curslot];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* remove it from the amap
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-02-08 19:07:57 +03:00
|
|
|
amap->am_anon[curslot] = NULL;
|
|
|
|
ptr = amap->am_bckptr[curslot];
|
|
|
|
if (ptr != (amap->am_nused - 1)) {
|
|
|
|
amap->am_slots[ptr] =
|
|
|
|
amap->am_slots[amap->am_nused - 1];
|
|
|
|
amap->am_bckptr[amap->am_slots[ptr]] =
|
|
|
|
ptr; /* back ptr. */
|
|
|
|
}
|
|
|
|
amap->am_nused--;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* drop anon reference count
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&anon->an_lock);
|
1998-11-04 10:07:22 +03:00
|
|
|
refs = --anon->an_ref;
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&anon->an_lock);
|
1998-11-04 10:07:22 +03:00
|
|
|
if (refs == 0) {
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-11-04 10:07:22 +03:00
|
|
|
/*
|
|
|
|
* we just eliminated the last reference to an anon.
|
|
|
|
* free it.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-11-04 10:07:22 +03:00
|
|
|
uvm_anfree(anon);
|
1998-02-08 19:07:57 +03:00
|
|
|
}
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
2005-05-11 17:02:25 +04:00
|
|
|
|
2005-09-14 02:00:05 +04:00
|
|
|
#if defined(VMSWAP)
|
|
|
|
|
2005-05-11 17:02:25 +04:00
|
|
|
/*
|
|
|
|
* amap_swap_off: pagein anonymous pages in amaps and drop swap slots.
|
|
|
|
*
|
|
|
|
* => called with swap_syscall_lock held.
|
|
|
|
* => note that we don't always traverse all anons.
|
|
|
|
* eg. amaps being wiped out, released anons.
|
2007-02-22 09:05:00 +03:00
|
|
|
* => return true if failed.
|
2005-05-11 17:02:25 +04:00
|
|
|
*/
|
|
|
|
|
2007-02-22 01:59:35 +03:00
|
|
|
bool
|
2005-05-11 17:02:25 +04:00
|
|
|
amap_swap_off(int startslot, int endslot)
|
|
|
|
{
|
|
|
|
struct vm_amap *am;
|
|
|
|
struct vm_amap *am_next;
|
|
|
|
struct vm_amap marker_prev;
|
|
|
|
struct vm_amap marker_next;
|
|
|
|
struct lwp *l = curlwp;
|
2007-02-22 09:05:00 +03:00
|
|
|
bool rv = false;
|
2005-05-11 17:02:25 +04:00
|
|
|
|
|
|
|
#if defined(DIAGNOSTIC)
|
|
|
|
memset(&marker_prev, 0, sizeof(marker_prev));
|
|
|
|
memset(&marker_next, 0, sizeof(marker_next));
|
|
|
|
#endif /* defined(DIAGNOSTIC) */
|
|
|
|
|
2007-07-10 00:51:58 +04:00
|
|
|
uvm_lwp_hold(l);
|
2007-07-21 23:21:53 +04:00
|
|
|
mutex_enter(&amap_list_lock);
|
2005-05-11 17:02:25 +04:00
|
|
|
for (am = LIST_FIRST(&amap_list); am != NULL && !rv; am = am_next) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
LIST_INSERT_BEFORE(am, &marker_prev, am_list);
|
|
|
|
LIST_INSERT_AFTER(am, &marker_next, am_list);
|
|
|
|
|
|
|
|
if (!amap_lock_try(am)) {
|
2007-07-21 23:21:53 +04:00
|
|
|
mutex_exit(&amap_list_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
preempt();
|
2007-07-21 23:21:53 +04:00
|
|
|
mutex_enter(&amap_list_lock);
|
2005-05-11 17:02:25 +04:00
|
|
|
am_next = LIST_NEXT(&marker_prev, am_list);
|
|
|
|
if (am_next == &marker_next) {
|
|
|
|
am_next = LIST_NEXT(am_next, am_list);
|
|
|
|
} else {
|
|
|
|
KASSERT(LIST_NEXT(am_next, am_list) ==
|
|
|
|
&marker_next);
|
|
|
|
}
|
|
|
|
LIST_REMOVE(&marker_prev, am_list);
|
|
|
|
LIST_REMOVE(&marker_next, am_list);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2007-07-21 23:21:53 +04:00
|
|
|
mutex_exit(&amap_list_lock);
|
2005-05-11 17:02:25 +04:00
|
|
|
|
|
|
|
if (am->am_nused <= 0) {
|
|
|
|
amap_unlock(am);
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < am->am_nused; i++) {
|
|
|
|
int slot;
|
|
|
|
int swslot;
|
|
|
|
struct vm_anon *anon;
|
|
|
|
|
|
|
|
slot = am->am_slots[i];
|
|
|
|
anon = am->am_anon[slot];
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_enter(&anon->an_lock);
|
2005-05-11 17:02:25 +04:00
|
|
|
|
|
|
|
swslot = anon->an_swslot;
|
|
|
|
if (swslot < startslot || endslot <= swslot) {
|
2008-01-02 14:48:20 +03:00
|
|
|
mutex_exit(&anon->an_lock);
|
2005-05-11 17:02:25 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
am->am_flags |= AMAP_SWAPOFF;
|
|
|
|
amap_unlock(am);
|
|
|
|
|
|
|
|
rv = uvm_anon_pagein(anon);
|
|
|
|
|
|
|
|
amap_lock(am);
|
|
|
|
am->am_flags &= ~AMAP_SWAPOFF;
|
|
|
|
if (amap_refs(am) == 0) {
|
|
|
|
amap_wipeout(am);
|
|
|
|
am = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (rv) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
i = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (am) {
|
|
|
|
amap_unlock(am);
|
|
|
|
}
|
|
|
|
|
|
|
|
next:
|
2007-07-21 23:21:53 +04:00
|
|
|
mutex_enter(&amap_list_lock);
|
2005-05-11 17:02:25 +04:00
|
|
|
KASSERT(LIST_NEXT(&marker_prev, am_list) == &marker_next ||
|
|
|
|
LIST_NEXT(LIST_NEXT(&marker_prev, am_list), am_list) ==
|
|
|
|
&marker_next);
|
|
|
|
am_next = LIST_NEXT(&marker_next, am_list);
|
|
|
|
LIST_REMOVE(&marker_prev, am_list);
|
|
|
|
LIST_REMOVE(&marker_next, am_list);
|
|
|
|
}
|
2007-07-21 23:21:53 +04:00
|
|
|
mutex_exit(&amap_list_lock);
|
2007-07-10 00:51:58 +04:00
|
|
|
uvm_lwp_rele(l);
|
2005-05-11 17:02:25 +04:00
|
|
|
|
|
|
|
return rv;
|
|
|
|
}
|
2005-09-14 02:00:05 +04:00
|
|
|
|
|
|
|
#endif /* defined(VMSWAP) */
|
2006-02-11 15:45:07 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_lookup: look up a page in an amap
|
|
|
|
*
|
|
|
|
* => amap should be locked by caller.
|
|
|
|
*/
|
|
|
|
struct vm_anon *
|
|
|
|
amap_lookup(struct vm_aref *aref, vaddr_t offset)
|
|
|
|
{
|
|
|
|
int slot;
|
|
|
|
struct vm_amap *amap = aref->ar_amap;
|
|
|
|
UVMHIST_FUNC("amap_lookup"); UVMHIST_CALLED(maphist);
|
2007-07-21 23:21:53 +04:00
|
|
|
KASSERT(mutex_owned(&amap->am_l));
|
2006-02-11 15:45:07 +03:00
|
|
|
|
|
|
|
AMAP_B2SLOT(slot, offset);
|
|
|
|
slot += aref->ar_pageoff;
|
|
|
|
|
|
|
|
if (slot >= amap->am_nslot)
|
|
|
|
panic("amap_lookup: offset out of range");
|
|
|
|
|
|
|
|
UVMHIST_LOG(maphist, "<- done (amap=0x%x, offset=0x%x, result=0x%x)",
|
|
|
|
amap, offset, amap->am_anon[slot], 0);
|
|
|
|
return(amap->am_anon[slot]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_lookups: look up a range of pages in an amap
|
|
|
|
*
|
|
|
|
* => amap should be locked by caller.
|
|
|
|
* => XXXCDC: this interface is biased toward array-based amaps. fix.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
amap_lookups(struct vm_aref *aref, vaddr_t offset, struct vm_anon **anons,
|
|
|
|
int npages)
|
|
|
|
{
|
|
|
|
int slot;
|
|
|
|
struct vm_amap *amap = aref->ar_amap;
|
|
|
|
UVMHIST_FUNC("amap_lookups"); UVMHIST_CALLED(maphist);
|
2007-07-21 23:21:53 +04:00
|
|
|
KASSERT(mutex_owned(&amap->am_l));
|
2006-02-11 15:45:07 +03:00
|
|
|
|
|
|
|
AMAP_B2SLOT(slot, offset);
|
|
|
|
slot += aref->ar_pageoff;
|
|
|
|
|
|
|
|
UVMHIST_LOG(maphist, " slot=%d, npages=%d, nslot=%d", slot, npages,
|
|
|
|
amap->am_nslot, 0);
|
|
|
|
|
|
|
|
if ((slot + (npages - 1)) >= amap->am_nslot)
|
|
|
|
panic("amap_lookups: offset out of range");
|
|
|
|
|
|
|
|
memcpy(anons, &amap->am_anon[slot], npages * sizeof(struct vm_anon *));
|
|
|
|
|
|
|
|
UVMHIST_LOG(maphist, "<- done", 0, 0, 0, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_add: add (or replace) a page to an amap
|
|
|
|
*
|
|
|
|
* => caller must lock amap.
|
|
|
|
* => if (replace) caller must lock anon because we might have to call
|
|
|
|
* pmap_page_protect on the anon's page.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
amap_add(struct vm_aref *aref, vaddr_t offset, struct vm_anon *anon,
|
2007-02-22 01:59:35 +03:00
|
|
|
bool replace)
|
2006-02-11 15:45:07 +03:00
|
|
|
{
|
|
|
|
int slot;
|
|
|
|
struct vm_amap *amap = aref->ar_amap;
|
|
|
|
UVMHIST_FUNC("amap_add"); UVMHIST_CALLED(maphist);
|
2007-07-21 23:21:53 +04:00
|
|
|
KASSERT(mutex_owned(&amap->am_l));
|
2006-02-11 15:45:07 +03:00
|
|
|
|
|
|
|
AMAP_B2SLOT(slot, offset);
|
|
|
|
slot += aref->ar_pageoff;
|
|
|
|
|
|
|
|
if (slot >= amap->am_nslot)
|
|
|
|
panic("amap_add: offset out of range");
|
|
|
|
|
|
|
|
if (replace) {
|
|
|
|
|
|
|
|
if (amap->am_anon[slot] == NULL)
|
|
|
|
panic("amap_add: replacing null anon");
|
|
|
|
if (amap->am_anon[slot]->an_page != NULL &&
|
|
|
|
(amap->am_flags & AMAP_SHARED) != 0) {
|
|
|
|
pmap_page_protect(amap->am_anon[slot]->an_page,
|
|
|
|
VM_PROT_NONE);
|
|
|
|
/*
|
|
|
|
* XXX: suppose page is supposed to be wired somewhere?
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
} else { /* !replace */
|
|
|
|
if (amap->am_anon[slot] != NULL)
|
|
|
|
panic("amap_add: slot in use");
|
|
|
|
|
|
|
|
amap->am_bckptr[slot] = amap->am_nused;
|
|
|
|
amap->am_slots[amap->am_nused] = slot;
|
|
|
|
amap->am_nused++;
|
|
|
|
}
|
|
|
|
amap->am_anon[slot] = anon;
|
|
|
|
UVMHIST_LOG(maphist,
|
|
|
|
"<- done (amap=0x%x, offset=0x%x, anon=0x%x, rep=%d)",
|
|
|
|
amap, offset, anon, replace);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_unadd: remove a page from an amap
|
|
|
|
*
|
|
|
|
* => caller must lock amap
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
amap_unadd(struct vm_aref *aref, vaddr_t offset)
|
|
|
|
{
|
|
|
|
int ptr, slot;
|
|
|
|
struct vm_amap *amap = aref->ar_amap;
|
|
|
|
UVMHIST_FUNC("amap_unadd"); UVMHIST_CALLED(maphist);
|
2007-07-21 23:21:53 +04:00
|
|
|
KASSERT(mutex_owned(&amap->am_l));
|
2006-02-11 15:45:07 +03:00
|
|
|
|
|
|
|
AMAP_B2SLOT(slot, offset);
|
|
|
|
slot += aref->ar_pageoff;
|
|
|
|
|
|
|
|
if (slot >= amap->am_nslot)
|
|
|
|
panic("amap_unadd: offset out of range");
|
|
|
|
|
|
|
|
if (amap->am_anon[slot] == NULL)
|
|
|
|
panic("amap_unadd: nothing there");
|
|
|
|
|
|
|
|
amap->am_anon[slot] = NULL;
|
|
|
|
ptr = amap->am_bckptr[slot];
|
|
|
|
|
|
|
|
if (ptr != (amap->am_nused - 1)) { /* swap to keep slots contig? */
|
|
|
|
amap->am_slots[ptr] = amap->am_slots[amap->am_nused - 1];
|
|
|
|
amap->am_bckptr[amap->am_slots[ptr]] = ptr; /* back link */
|
|
|
|
}
|
|
|
|
amap->am_nused--;
|
|
|
|
UVMHIST_LOG(maphist, "<- done (amap=0x%x, slot=0x%x)", amap, slot,0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_ref: gain a reference to an amap
|
|
|
|
*
|
|
|
|
* => amap must not be locked (we will lock)
|
|
|
|
* => "offset" and "len" are in units of pages
|
|
|
|
* => called at fork time to gain the child's reference
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
amap_ref(struct vm_amap *amap, vaddr_t offset, vsize_t len, int flags)
|
|
|
|
{
|
|
|
|
UVMHIST_FUNC("amap_ref"); UVMHIST_CALLED(maphist);
|
|
|
|
|
|
|
|
amap_lock(amap);
|
|
|
|
if (flags & AMAP_SHARED)
|
|
|
|
amap->am_flags |= AMAP_SHARED;
|
|
|
|
#ifdef UVM_AMAP_PPREF
|
|
|
|
if (amap->am_ppref == NULL && (flags & AMAP_REFALL) == 0 &&
|
|
|
|
len != amap->am_nslot)
|
|
|
|
amap_pp_establish(amap, offset);
|
|
|
|
#endif
|
|
|
|
amap->am_ref++;
|
|
|
|
#ifdef UVM_AMAP_PPREF
|
|
|
|
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
|
|
|
|
if (flags & AMAP_REFALL)
|
|
|
|
amap_pp_adjref(amap, 0, amap->am_nslot, 1);
|
|
|
|
else
|
|
|
|
amap_pp_adjref(amap, offset, len, 1);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
amap_unlock(amap);
|
|
|
|
UVMHIST_LOG(maphist,"<- done! amap=0x%x", amap, 0, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* amap_unref: remove a reference to an amap
|
|
|
|
*
|
|
|
|
* => caller must remove all pmap-level references to this amap before
|
|
|
|
* dropping the reference
|
|
|
|
* => called from uvm_unmap_detach [only] ... note that entry is no
|
|
|
|
* longer part of a map and thus has no need for locking
|
|
|
|
* => amap must be unlocked (we will lock it).
|
|
|
|
*/
|
|
|
|
void
|
2007-02-22 01:59:35 +03:00
|
|
|
amap_unref(struct vm_amap *amap, vaddr_t offset, vsize_t len, bool all)
|
2006-02-11 15:45:07 +03:00
|
|
|
{
|
|
|
|
UVMHIST_FUNC("amap_unref"); UVMHIST_CALLED(maphist);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* lock it
|
|
|
|
*/
|
|
|
|
amap_lock(amap);
|
|
|
|
UVMHIST_LOG(maphist," amap=0x%x refs=%d, nused=%d",
|
|
|
|
amap, amap->am_ref, amap->am_nused, 0);
|
|
|
|
|
|
|
|
KASSERT(amap_refs(amap) > 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we are the last reference, free the amap and return.
|
|
|
|
*/
|
|
|
|
|
|
|
|
amap->am_ref--;
|
|
|
|
|
|
|
|
if (amap_refs(amap) == 0) {
|
|
|
|
amap_wipeout(amap); /* drops final ref and frees */
|
|
|
|
UVMHIST_LOG(maphist,"<- done (was last ref)!", 0, 0, 0, 0);
|
|
|
|
return; /* no need to unlock */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* otherwise just drop the reference count(s)
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (amap_refs(amap) == 1 && (amap->am_flags & AMAP_SHARED) != 0)
|
|
|
|
amap->am_flags &= ~AMAP_SHARED; /* clear shared flag */
|
|
|
|
#ifdef UVM_AMAP_PPREF
|
|
|
|
if (amap->am_ppref == NULL && all == 0 && len != amap->am_nslot)
|
|
|
|
amap_pp_establish(amap, offset);
|
|
|
|
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
|
|
|
|
if (all)
|
|
|
|
amap_pp_adjref(amap, 0, amap->am_nslot, -1);
|
|
|
|
else
|
|
|
|
amap_pp_adjref(amap, offset, len, -1);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
amap_unlock(amap);
|
|
|
|
|
|
|
|
UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
|
|
|
|
}
|
|
|
|
|