2024-03-05 17:33:50 +03:00
|
|
|
/* $NetBSD: uvm_page.c,v 1.256 2024/03/05 14:33:50 thorpej Exp $ */
|
2019-12-27 15:51:56 +03:00
|
|
|
|
|
|
|
/*-
|
2020-01-15 20:55:43 +03:00
|
|
|
* Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
|
2019-12-27 15:51:56 +03:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Andrew Doran.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
/*
|
1998-02-05 09:25:08 +03:00
|
|
|
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
2001-05-25 08:06:11 +04:00
|
|
|
* Copyright (c) 1991, 1993, The Regents of the University of California.
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* The Mach Operating System project at Carnegie-Mellon University.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2011-02-02 18:25:27 +03:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1998-02-05 09:25:08 +03:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)vm_page.c 8.3 (Berkeley) 3/21/94
|
1998-02-07 14:07:38 +03:00
|
|
|
* from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
|
|
|
* All rights reserved.
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-02-05 09:25:08 +03:00
|
|
|
* Permission to use, copy, modify and distribute this software and
|
|
|
|
* its documentation is hereby granted, provided that both the copyright
|
|
|
|
* notice and this permission notice appear in all copies of the
|
|
|
|
* software, derivative works or modified versions, and any portions
|
|
|
|
* thereof, and that both notices appear in supporting documentation.
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
|
|
|
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
|
|
|
|
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
|
1998-02-05 09:25:08 +03:00
|
|
|
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-02-05 09:25:08 +03:00
|
|
|
* Carnegie Mellon requests users of this software to return to
|
|
|
|
*
|
|
|
|
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
|
|
|
|
* School of Computer Science
|
|
|
|
* Carnegie Mellon University
|
|
|
|
* Pittsburgh PA 15213-3890
|
|
|
|
*
|
|
|
|
* any improvements or extensions that they make and grant Carnegie the
|
|
|
|
* rights to redistribute these changes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_page.c: page ops.
|
|
|
|
*/
|
|
|
|
|
2001-11-10 10:36:59 +03:00
|
|
|
#include <sys/cdefs.h>
|
2024-03-05 17:33:50 +03:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.256 2024/03/05 14:33:50 thorpej Exp $");
|
2001-11-10 10:36:59 +03:00
|
|
|
|
2009-08-18 23:08:39 +04:00
|
|
|
#include "opt_ddb.h"
|
2015-04-11 22:24:13 +03:00
|
|
|
#include "opt_uvm.h"
|
2000-11-27 11:39:39 +03:00
|
|
|
#include "opt_uvmhist.h"
|
2006-09-15 19:51:12 +04:00
|
|
|
#include "opt_readahead.h"
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2000-05-27 01:19:19 +04:00
|
|
|
#include <sys/sched.h>
|
2000-11-27 11:39:39 +03:00
|
|
|
#include <sys/kernel.h>
|
2001-03-09 04:02:10 +03:00
|
|
|
#include <sys/vnode.h>
|
2001-09-28 15:59:51 +04:00
|
|
|
#include <sys/proc.h>
|
2019-12-14 20:28:58 +03:00
|
|
|
#include <sys/radixtree.h>
|
2007-11-29 21:07:11 +03:00
|
|
|
#include <sys/atomic.h>
|
2008-06-04 16:45:28 +04:00
|
|
|
#include <sys/cpu.h>
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2022-10-27 02:38:05 +03:00
|
|
|
#include <ddb/db_active.h>
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
#include <uvm/uvm.h>
|
2009-08-18 23:08:39 +04:00
|
|
|
#include <uvm/uvm_ddb.h>
|
2006-09-15 19:51:12 +04:00
|
|
|
#include <uvm/uvm_pdpolicy.h>
|
2019-12-27 15:51:56 +03:00
|
|
|
#include <uvm/uvm_pgflcache.h>
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2008-07-04 14:56:59 +04:00
|
|
|
/*
|
|
|
|
* number of pages per-CPU to reserve for the kernel.
|
|
|
|
*/
|
2015-04-11 22:24:13 +03:00
|
|
|
#ifndef UVM_RESERVED_PAGES_PER_CPU
|
|
|
|
#define UVM_RESERVED_PAGES_PER_CPU 5
|
|
|
|
#endif
|
|
|
|
int vm_page_reserve_kernel = UVM_RESERVED_PAGES_PER_CPU;
|
2008-07-04 14:56:59 +04:00
|
|
|
|
2009-08-11 20:07:24 +04:00
|
|
|
/*
|
|
|
|
* physical memory size;
|
|
|
|
*/
|
2016-12-22 19:05:14 +03:00
|
|
|
psize_t physmem;
|
2009-08-11 20:07:24 +04:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* local variables
|
|
|
|
*/
|
|
|
|
|
2003-05-11 01:10:23 +04:00
|
|
|
/*
|
|
|
|
* these variables record the values returned by vm_page_bootstrap,
|
|
|
|
* for debugging purposes. The implementation of uvm_pageboot_alloc
|
|
|
|
* and pmap_startup here also uses them internally.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static vaddr_t virtual_space_start;
|
|
|
|
static vaddr_t virtual_space_end;
|
|
|
|
|
2001-05-02 05:22:19 +04:00
|
|
|
/*
|
|
|
|
* we allocate an initial number of page colors in uvm_page_init(),
|
|
|
|
* and remember them. We may re-color pages as cache sizes are
|
|
|
|
* discovered during the autoconfiguration phase. But we can never
|
|
|
|
* free the initial set of buckets, since they are allocated using
|
|
|
|
* uvm_pageboot_alloc().
|
|
|
|
*/
|
|
|
|
|
2012-01-27 23:48:38 +04:00
|
|
|
static size_t recolored_pages_memsize /* = 0 */;
|
2019-12-27 15:51:56 +03:00
|
|
|
static char *recolored_pages_mem;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* freelist locks - one per bucket.
|
|
|
|
*/
|
|
|
|
|
|
|
|
union uvm_freelist_lock uvm_freelist_locks[PGFL_MAX_BUCKETS]
|
|
|
|
__cacheline_aligned;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* basic NUMA information.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static struct uvm_page_numa_region {
|
|
|
|
struct uvm_page_numa_region *next;
|
|
|
|
paddr_t start;
|
|
|
|
paddr_t size;
|
|
|
|
u_int numa_id;
|
|
|
|
} *uvm_page_numa_region;
|
2003-02-01 09:23:35 +03:00
|
|
|
|
2003-11-03 06:58:28 +03:00
|
|
|
#ifdef DEBUG
|
2020-01-11 22:51:01 +03:00
|
|
|
kmutex_t uvm_zerochecklock __cacheline_aligned;
|
2003-11-03 06:58:28 +03:00
|
|
|
vaddr_t uvm_zerocheckkva;
|
|
|
|
#endif /* DEBUG */
|
|
|
|
|
2016-12-23 10:15:27 +03:00
|
|
|
/*
|
|
|
|
* These functions are reserved for uvm(9) internal use and are not
|
|
|
|
* exported in the header file uvm_physseg.h
|
|
|
|
*
|
|
|
|
* Thus they are redefined here.
|
|
|
|
*/
|
|
|
|
void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
|
|
|
|
void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
|
|
|
|
|
|
|
|
/* returns a pgs array */
|
|
|
|
struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* inline functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2008-06-04 19:06:04 +04:00
|
|
|
* uvm_pageinsert: insert a page in the object.
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
|
|
|
* => caller must lock object
|
|
|
|
* => call should have already set pg's object and offset pointers
|
|
|
|
* and bumped the version counter
|
|
|
|
*/
|
|
|
|
|
2008-06-17 06:30:57 +04:00
|
|
|
static inline void
|
2019-12-16 00:11:34 +03:00
|
|
|
uvm_pageinsert_object(struct uvm_object *uobj, struct vm_page *pg)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
|
|
|
|
2008-06-17 06:30:57 +04:00
|
|
|
KASSERT(uobj == pg->uobject);
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(rw_write_held(uobj->vmobjlock));
|
2001-03-09 04:02:10 +03:00
|
|
|
KASSERT((pg->flags & PG_TABLED) == 0);
|
2007-07-21 23:21:53 +04:00
|
|
|
|
2020-01-15 20:55:43 +03:00
|
|
|
if ((pg->flags & PG_STAT) != 0) {
|
|
|
|
/* Cannot use uvm_pagegetdirty(): not yet in radix tree. */
|
|
|
|
const unsigned int status = pg->flags & (PG_CLEAN | PG_DIRTY);
|
2004-01-14 14:28:04 +03:00
|
|
|
|
2020-05-17 20:12:28 +03:00
|
|
|
if ((pg->flags & PG_FILE) != 0) {
|
2020-01-15 20:55:43 +03:00
|
|
|
if (uobj->uo_npages == 0) {
|
2020-02-28 01:12:53 +03:00
|
|
|
struct vnode *vp = (struct vnode *)uobj;
|
|
|
|
mutex_enter(vp->v_interlock);
|
|
|
|
KASSERT((vp->v_iflag & VI_PAGES) == 0);
|
|
|
|
vp->v_iflag |= VI_PAGES;
|
|
|
|
vholdl(vp);
|
|
|
|
mutex_exit(vp->v_interlock);
|
2020-01-15 20:55:43 +03:00
|
|
|
}
|
|
|
|
if (UVM_OBJ_IS_VTEXT(uobj)) {
|
2020-06-12 01:21:05 +03:00
|
|
|
cpu_count(CPU_COUNT_EXECPAGES, 1);
|
2020-01-15 20:55:43 +03:00
|
|
|
}
|
2020-06-12 01:21:05 +03:00
|
|
|
cpu_count(CPU_COUNT_FILEUNKNOWN + status, 1);
|
2004-01-14 14:28:04 +03:00
|
|
|
} else {
|
2020-06-12 01:21:05 +03:00
|
|
|
cpu_count(CPU_COUNT_ANONUNKNOWN + status, 1);
|
2004-01-14 14:28:04 +03:00
|
|
|
}
|
2002-06-20 19:05:29 +04:00
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->flags |= PG_TABLED;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uobj->uo_npages++;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2019-12-14 20:28:58 +03:00
|
|
|
static inline int
|
2008-06-17 06:30:57 +04:00
|
|
|
uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
|
|
|
|
{
|
2019-12-14 20:28:58 +03:00
|
|
|
const uint64_t idx = pg->offset >> PAGE_SHIFT;
|
|
|
|
int error;
|
2008-06-17 06:30:57 +04:00
|
|
|
|
2020-08-14 12:06:14 +03:00
|
|
|
KASSERT(rw_write_held(uobj->vmobjlock));
|
|
|
|
|
2019-12-14 20:28:58 +03:00
|
|
|
error = radix_tree_insert_node(&uobj->uo_pages, idx, pg);
|
|
|
|
if (error != 0) {
|
|
|
|
return error;
|
|
|
|
}
|
2020-01-15 20:55:43 +03:00
|
|
|
if ((pg->flags & PG_CLEAN) == 0) {
|
2020-08-14 12:06:14 +03:00
|
|
|
uvm_obj_page_set_dirty(pg);
|
2020-01-15 20:55:43 +03:00
|
|
|
}
|
|
|
|
KASSERT(((pg->flags & PG_CLEAN) == 0) ==
|
2020-08-14 12:06:14 +03:00
|
|
|
uvm_obj_page_dirty_p(pg));
|
2019-12-14 20:28:58 +03:00
|
|
|
return 0;
|
2008-06-17 06:30:57 +04:00
|
|
|
}
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
2008-06-04 19:06:04 +04:00
|
|
|
* uvm_page_remove: remove page from object.
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
|
|
|
* => caller must lock object
|
|
|
|
*/
|
|
|
|
|
2005-12-24 23:45:08 +03:00
|
|
|
static inline void
|
2019-12-16 00:11:34 +03:00
|
|
|
uvm_pageremove_object(struct uvm_object *uobj, struct vm_page *pg)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
|
|
|
|
2008-06-17 06:30:57 +04:00
|
|
|
KASSERT(uobj == pg->uobject);
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(rw_write_held(uobj->vmobjlock));
|
2000-11-27 11:39:39 +03:00
|
|
|
KASSERT(pg->flags & PG_TABLED);
|
2007-07-21 23:21:53 +04:00
|
|
|
|
2020-01-15 20:55:43 +03:00
|
|
|
if ((pg->flags & PG_STAT) != 0) {
|
|
|
|
/* Cannot use uvm_pagegetdirty(): no longer in radix tree. */
|
|
|
|
const unsigned int status = pg->flags & (PG_CLEAN | PG_DIRTY);
|
2004-01-14 14:28:04 +03:00
|
|
|
|
2020-05-17 20:12:28 +03:00
|
|
|
if ((pg->flags & PG_FILE) != 0) {
|
2020-01-15 20:55:43 +03:00
|
|
|
if (uobj->uo_npages == 1) {
|
2020-02-28 01:12:53 +03:00
|
|
|
struct vnode *vp = (struct vnode *)uobj;
|
|
|
|
mutex_enter(vp->v_interlock);
|
|
|
|
KASSERT((vp->v_iflag & VI_PAGES) != 0);
|
|
|
|
vp->v_iflag &= ~VI_PAGES;
|
|
|
|
holdrelel(vp);
|
|
|
|
mutex_exit(vp->v_interlock);
|
2020-01-15 20:55:43 +03:00
|
|
|
}
|
|
|
|
if (UVM_OBJ_IS_VTEXT(uobj)) {
|
2020-06-12 01:21:05 +03:00
|
|
|
cpu_count(CPU_COUNT_EXECPAGES, -1);
|
2020-01-15 20:55:43 +03:00
|
|
|
}
|
2020-06-12 01:21:05 +03:00
|
|
|
cpu_count(CPU_COUNT_FILEUNKNOWN + status, -1);
|
2004-01-14 14:28:04 +03:00
|
|
|
} else {
|
2020-06-12 01:21:05 +03:00
|
|
|
cpu_count(CPU_COUNT_ANONUNKNOWN + status, -1);
|
2004-01-14 14:28:04 +03:00
|
|
|
}
|
2001-03-09 04:02:10 +03:00
|
|
|
}
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uobj->uo_npages--;
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->flags &= ~PG_TABLED;
|
|
|
|
pg->uobject = NULL;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2008-06-17 06:30:57 +04:00
|
|
|
static inline void
|
|
|
|
uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg)
|
|
|
|
{
|
2019-12-14 20:28:58 +03:00
|
|
|
struct vm_page *opg __unused;
|
2008-06-17 06:30:57 +04:00
|
|
|
|
2020-08-14 12:06:14 +03:00
|
|
|
KASSERT(rw_write_held(uobj->vmobjlock));
|
|
|
|
|
2019-12-14 20:28:58 +03:00
|
|
|
opg = radix_tree_remove_node(&uobj->uo_pages, pg->offset >> PAGE_SHIFT);
|
|
|
|
KASSERT(pg == opg);
|
2008-06-17 06:30:57 +04:00
|
|
|
}
|
|
|
|
|
2001-05-02 05:22:19 +04:00
|
|
|
static void
|
2019-12-27 15:51:56 +03:00
|
|
|
uvm_page_init_bucket(struct pgfreelist *pgfl, struct pgflbucket *pgb, int num)
|
2001-05-02 05:22:19 +04:00
|
|
|
{
|
2019-12-27 15:51:56 +03:00
|
|
|
int i;
|
2001-05-02 05:22:19 +04:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
pgb->pgb_nfree = 0;
|
|
|
|
for (i = 0; i < uvmexp.ncolors; i++) {
|
|
|
|
LIST_INIT(&pgb->pgb_colors[i]);
|
2001-05-02 05:22:19 +04:00
|
|
|
}
|
2019-12-27 15:51:56 +03:00
|
|
|
pgfl->pgfl_buckets[num] = pgb;
|
2001-05-02 05:22:19 +04:00
|
|
|
}
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* uvm_page_init: init the page system. called from uvm_init().
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-02-05 09:25:08 +03:00
|
|
|
* => we return the range of kernel virtual memory in kvm_startp/kvm_endp
|
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2024-03-05 17:33:50 +03:00
|
|
|
static struct uvm_cpu uvm_boot_cpu __cacheline_aligned;
|
2019-12-27 15:51:56 +03:00
|
|
|
psize_t freepages, pagecount, bucketsize, n;
|
|
|
|
struct pgflbucket *pgb;
|
2010-11-11 18:47:43 +03:00
|
|
|
struct vm_page *pagearray;
|
2019-12-27 15:51:56 +03:00
|
|
|
char *bucketarray;
|
2016-12-23 10:15:27 +03:00
|
|
|
uvm_physseg_t bank;
|
2019-12-27 15:51:56 +03:00
|
|
|
int fl, b;
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2008-06-04 16:45:28 +04:00
|
|
|
KASSERT(ncpu <= 1);
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
2019-12-27 15:51:56 +03:00
|
|
|
* init the page queues and free page queue locks, except the
|
2019-12-13 23:10:21 +03:00
|
|
|
* free list; we allocate that later (with the initial vm_page
|
2001-05-02 05:22:19 +04:00
|
|
|
* structures).
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
2001-03-09 04:02:10 +03:00
|
|
|
|
2024-03-05 17:33:50 +03:00
|
|
|
curcpu()->ci_data.cpu_uvm = &uvm_boot_cpu;
|
2006-09-15 19:51:12 +04:00
|
|
|
uvmpdpol_init();
|
2019-12-27 15:51:56 +03:00
|
|
|
for (b = 0; b < __arraycount(uvm_freelist_locks); b++) {
|
|
|
|
mutex_init(&uvm_freelist_locks[b].lock, MUTEX_DEFAULT, IPL_VM);
|
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
/*
|
2001-03-09 04:02:10 +03:00
|
|
|
* allocate vm_page structures.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sanity check:
|
|
|
|
* before calling this function the MD code is expected to register
|
|
|
|
* some free RAM with the uvm_page_physload() function. our job
|
|
|
|
* now is to allocate vm_page structures for this memory.
|
|
|
|
*/
|
|
|
|
|
2016-12-23 10:15:27 +03:00
|
|
|
if (uvm_physseg_get_last() == UVM_PHYSSEG_TYPE_INVALID)
|
2000-10-05 04:37:50 +04:00
|
|
|
panic("uvm_page_bootstrap: no memory pre-allocated");
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
2001-05-25 08:06:11 +04:00
|
|
|
* first calculate the number of free pages...
|
1998-03-09 03:58:55 +03:00
|
|
|
*
|
|
|
|
* note that we use start/end rather than avail_start/avail_end.
|
|
|
|
* this allows us to allocate extra vm_page structures in case we
|
|
|
|
* want to return some memory to the pool after booting.
|
|
|
|
*/
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
freepages = 0;
|
2016-12-23 10:15:27 +03:00
|
|
|
|
|
|
|
for (bank = uvm_physseg_get_first();
|
|
|
|
uvm_physseg_valid_p(bank) ;
|
|
|
|
bank = uvm_physseg_get_next(bank)) {
|
|
|
|
freepages += (uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank));
|
2010-11-11 17:50:54 +03:00
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2001-05-02 05:22:19 +04:00
|
|
|
/*
|
|
|
|
* Let MD code initialize the number of colors, or default
|
|
|
|
* to 1 color if MD code doesn't care.
|
|
|
|
*/
|
|
|
|
if (uvmexp.ncolors == 0)
|
|
|
|
uvmexp.ncolors = 1;
|
|
|
|
uvmexp.colormask = uvmexp.ncolors - 1;
|
2011-10-06 16:26:03 +04:00
|
|
|
KASSERT((uvmexp.colormask & uvmexp.ncolors) == 0);
|
2001-05-02 05:22:19 +04:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/* We always start with only 1 bucket. */
|
|
|
|
uvm.bucketcount = 1;
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* we now know we have (PAGE_SIZE * freepages) bytes of memory we can
|
|
|
|
* use. for each page of memory we use we need a vm_page structure.
|
|
|
|
* thus, the total number of pages we can use is the total size of
|
|
|
|
* the memory divided by the PAGE_SIZE plus the size of the vm_page
|
|
|
|
* structure. we add one to freepages as a fudge factor to avoid
|
|
|
|
* truncation errors (since we can only allocate in terms of whole
|
|
|
|
* pages).
|
|
|
|
*/
|
1998-10-19 03:49:59 +04:00
|
|
|
pagecount = ((freepages + 1) << PAGE_SHIFT) /
|
1998-03-09 03:58:55 +03:00
|
|
|
(PAGE_SIZE + sizeof(struct vm_page));
|
2019-12-27 15:51:56 +03:00
|
|
|
bucketsize = offsetof(struct pgflbucket, pgb_colors[uvmexp.ncolors]);
|
|
|
|
bucketsize = roundup2(bucketsize, coherency_unit);
|
|
|
|
bucketarray = (void *)uvm_pageboot_alloc(
|
|
|
|
bucketsize * VM_NFREELIST +
|
|
|
|
pagecount * sizeof(struct vm_page));
|
|
|
|
pagearray = (struct vm_page *)
|
|
|
|
(bucketarray + bucketsize * VM_NFREELIST);
|
|
|
|
|
|
|
|
for (fl = 0; fl < VM_NFREELIST; fl++) {
|
|
|
|
pgb = (struct pgflbucket *)(bucketarray + bucketsize * fl);
|
|
|
|
uvm_page_init_bucket(&uvm.page_free[fl], pgb, 0);
|
2001-05-02 05:22:19 +04:00
|
|
|
}
|
1998-08-10 02:36:37 +04:00
|
|
|
memset(pagearray, 0, pagecount * sizeof(struct vm_page));
|
2001-05-25 08:06:11 +04:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/*
|
|
|
|
* init the freelist cache in the disabled state.
|
|
|
|
*/
|
|
|
|
uvm_pgflcache_init();
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
2001-03-09 04:02:10 +03:00
|
|
|
* init the vm_page structures and put them in the correct place.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
2016-12-23 10:15:27 +03:00
|
|
|
/* First init the extent */
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2016-12-23 10:15:27 +03:00
|
|
|
for (bank = uvm_physseg_get_first(),
|
|
|
|
uvm_physseg_seg_chomp_slab(bank, pagearray, pagecount);
|
|
|
|
uvm_physseg_valid_p(bank);
|
|
|
|
bank = uvm_physseg_get_next(bank)) {
|
|
|
|
|
|
|
|
n = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
|
|
|
|
uvm_physseg_seg_alloc_from_slab(bank, n);
|
|
|
|
uvm_physseg_init_seg(bank, pagearray);
|
2001-03-09 04:02:10 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* set up page array pointers */
|
|
|
|
pagearray += n;
|
|
|
|
pagecount -= n;
|
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2003-05-11 01:10:23 +04:00
|
|
|
/*
|
|
|
|
* pass up the values of virtual_space_start and
|
|
|
|
* virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
|
|
|
|
* layers of the VM.
|
|
|
|
*/
|
|
|
|
|
|
|
|
*kvm_startp = round_page(virtual_space_start);
|
|
|
|
*kvm_endp = trunc_page(virtual_space_end);
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
2001-03-09 04:02:10 +03:00
|
|
|
* init various thresholds.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
2001-03-09 04:02:10 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmexp.reserve_pagedaemon = 1;
|
2008-07-04 14:56:59 +04:00
|
|
|
uvmexp.reserve_kernel = vm_page_reserve_kernel;
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* done!
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2007-02-22 09:05:00 +03:00
|
|
|
uvm.page_init_done = true;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/*
|
|
|
|
* uvm_pgfl_lock: lock all freelist buckets
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_pgfl_lock(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < __arraycount(uvm_freelist_locks); i++) {
|
|
|
|
mutex_spin_enter(&uvm_freelist_locks[i].lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pgfl_unlock: unlock all freelist buckets
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_pgfl_unlock(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < __arraycount(uvm_freelist_locks); i++) {
|
|
|
|
mutex_spin_exit(&uvm_freelist_locks[i].lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* uvm_setpagesize: set the page size
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-02-05 09:25:08 +03:00
|
|
|
* => sets page_shift and page_mask from uvmexp.pagesize.
|
2001-05-25 08:06:11 +04:00
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_setpagesize(void)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2003-04-09 20:34:10 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If uvmexp.pagesize is 0 at this point, we expect PAGE_SIZE
|
|
|
|
* to be a constant (indicated by being a non-zero value).
|
|
|
|
*/
|
|
|
|
if (uvmexp.pagesize == 0) {
|
|
|
|
if (PAGE_SIZE == 0)
|
|
|
|
panic("uvm_setpagesize: uvmexp.pagesize not set");
|
|
|
|
uvmexp.pagesize = PAGE_SIZE;
|
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
uvmexp.pagemask = uvmexp.pagesize - 1;
|
|
|
|
if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
|
2010-12-12 01:34:03 +03:00
|
|
|
panic("uvm_setpagesize: page size %u (%#x) not a power of two",
|
|
|
|
uvmexp.pagesize, uvmexp.pagesize);
|
1998-03-09 03:58:55 +03:00
|
|
|
for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
|
|
|
|
if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
|
|
|
|
break;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pageboot_alloc: steal memory from physmem for bootstrapping
|
|
|
|
*/
|
|
|
|
|
1998-08-13 06:10:37 +04:00
|
|
|
vaddr_t
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_pageboot_alloc(vsize_t size)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2007-02-22 09:05:00 +03:00
|
|
|
static bool initialized = false;
|
2001-04-22 21:22:57 +04:00
|
|
|
vaddr_t addr;
|
|
|
|
#if !defined(PMAP_STEAL_MEMORY)
|
|
|
|
vaddr_t vaddr;
|
1998-08-13 06:10:37 +04:00
|
|
|
paddr_t paddr;
|
2001-04-22 21:22:57 +04:00
|
|
|
#endif
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
1999-05-21 00:07:55 +04:00
|
|
|
* on first call to this function, initialize ourselves.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
2007-02-22 09:05:00 +03:00
|
|
|
if (initialized == false) {
|
2003-05-11 01:10:23 +04:00
|
|
|
pmap_virtual_space(&virtual_space_start, &virtual_space_end);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* round it the way we like it */
|
2003-05-11 01:10:23 +04:00
|
|
|
virtual_space_start = round_page(virtual_space_start);
|
|
|
|
virtual_space_end = trunc_page(virtual_space_end);
|
1999-05-21 00:07:55 +04:00
|
|
|
|
2007-02-22 09:05:00 +03:00
|
|
|
initialized = true;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-04-22 21:22:57 +04:00
|
|
|
/* round to page size */
|
|
|
|
size = round_page(size);
|
2017-12-02 11:15:42 +03:00
|
|
|
uvmexp.bootpages += atop(size);
|
2001-04-22 21:22:57 +04:00
|
|
|
|
|
|
|
#if defined(PMAP_STEAL_MEMORY)
|
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
/*
|
|
|
|
* defer bootstrap allocation to MD code (it may want to allocate
|
2001-04-22 21:22:57 +04:00
|
|
|
* from a direct-mapped segment). pmap_steal_memory should adjust
|
2003-05-11 01:10:23 +04:00
|
|
|
* virtual_space_start/virtual_space_end if necessary.
|
2001-04-22 21:22:57 +04:00
|
|
|
*/
|
|
|
|
|
2003-05-11 01:10:23 +04:00
|
|
|
addr = pmap_steal_memory(size, &virtual_space_start,
|
|
|
|
&virtual_space_end);
|
2001-04-22 21:22:57 +04:00
|
|
|
|
2020-12-20 14:11:34 +03:00
|
|
|
return addr;
|
2001-04-22 21:22:57 +04:00
|
|
|
|
|
|
|
#else /* !PMAP_STEAL_MEMORY */
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* allocate virtual memory for this request
|
|
|
|
*/
|
2003-05-11 01:10:23 +04:00
|
|
|
if (virtual_space_start == virtual_space_end ||
|
|
|
|
(virtual_space_end - virtual_space_start) < size)
|
1999-05-21 00:07:55 +04:00
|
|
|
panic("uvm_pageboot_alloc: out of virtual space");
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2003-05-11 01:10:23 +04:00
|
|
|
addr = virtual_space_start;
|
1999-05-21 03:03:23 +04:00
|
|
|
|
|
|
|
#ifdef PMAP_GROWKERNEL
|
|
|
|
/*
|
|
|
|
* If the kernel pmap can't map the requested space,
|
|
|
|
* then allocate more resources for it.
|
|
|
|
*/
|
|
|
|
if (uvm_maxkaddr < (addr + size)) {
|
|
|
|
uvm_maxkaddr = pmap_growkernel(addr + size);
|
|
|
|
if (uvm_maxkaddr < (addr + size))
|
|
|
|
panic("uvm_pageboot_alloc: pmap_growkernel() failed");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2003-05-11 01:10:23 +04:00
|
|
|
virtual_space_start += size;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-04-16 07:54:35 +04:00
|
|
|
/*
|
1998-03-09 03:58:55 +03:00
|
|
|
* allocate and mapin physical pages to back new virtual pages
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
for (vaddr = round_page(addr) ; vaddr < addr + size ;
|
|
|
|
vaddr += PAGE_SIZE) {
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
if (!uvm_page_physget(&paddr))
|
|
|
|
panic("uvm_pageboot_alloc: out of memory");
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1999-05-25 05:34:13 +04:00
|
|
|
/*
|
|
|
|
* Note this memory is no longer managed, so using
|
|
|
|
* pmap_kenter is safe.
|
|
|
|
*/
|
2009-11-07 10:27:40 +03:00
|
|
|
pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE, 0);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
2001-09-11 01:19:08 +04:00
|
|
|
pmap_update(pmap_kernel());
|
2020-12-20 14:11:34 +03:00
|
|
|
return addr;
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif /* PMAP_STEAL_MEMORY */
|
|
|
|
}
|
|
|
|
|
|
|
|
#if !defined(PMAP_STEAL_MEMORY)
|
|
|
|
/*
|
|
|
|
* uvm_page_physget: "steal" one page from the vm_physmem structure.
|
|
|
|
*
|
|
|
|
* => attempt to allocate it off the end of a segment in which the "avail"
|
|
|
|
* values match the start/end values. if we can't do that, then we
|
|
|
|
* will advance both values (making them equal, and removing some
|
|
|
|
* vm_page structures from the non-avail area).
|
|
|
|
* => return false if out of memory.
|
|
|
|
*/
|
|
|
|
|
1999-12-01 19:08:32 +03:00
|
|
|
/* subroutine: try to allocate from memory chunks on the specified freelist */
|
2007-02-22 01:59:35 +03:00
|
|
|
static bool uvm_page_physget_freelist(paddr_t *, int);
|
1999-12-01 19:08:32 +03:00
|
|
|
|
2007-02-22 01:59:35 +03:00
|
|
|
static bool
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2016-12-23 10:15:27 +03:00
|
|
|
uvm_physseg_t lcv;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* pass 1: try allocating from a matching end */
|
1998-02-05 09:25:08 +03:00
|
|
|
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
|
2016-12-23 12:36:55 +03:00
|
|
|
for (lcv = uvm_physseg_get_last(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_prev(lcv))
|
1998-02-05 09:25:08 +03:00
|
|
|
#else
|
2016-12-23 12:36:55 +03:00
|
|
|
for (lcv = uvm_physseg_get_first(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_next(lcv))
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
1998-03-09 03:58:55 +03:00
|
|
|
{
|
2007-02-22 09:05:00 +03:00
|
|
|
if (uvm.page_init_done == true)
|
2000-10-05 04:37:50 +04:00
|
|
|
panic("uvm_page_physget: called _after_ bootstrap");
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2016-12-23 10:15:27 +03:00
|
|
|
/* Try to match at front or back on unused segment */
|
2019-09-20 14:09:43 +03:00
|
|
|
if (uvm_page_physunload(lcv, freelist, paddrp))
|
2016-12-23 10:15:27 +03:00
|
|
|
return true;
|
2016-12-23 12:36:55 +03:00
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
|
|
|
|
/* pass2: forget about matching ends, just allocate something */
|
1998-02-05 09:25:08 +03:00
|
|
|
#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
|
2016-12-23 12:36:55 +03:00
|
|
|
for (lcv = uvm_physseg_get_last(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_prev(lcv))
|
1998-02-05 09:25:08 +03:00
|
|
|
#else
|
2016-12-23 12:36:55 +03:00
|
|
|
for (lcv = uvm_physseg_get_first(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_next(lcv))
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
1998-03-09 03:58:55 +03:00
|
|
|
{
|
2016-12-23 10:15:27 +03:00
|
|
|
/* Try the front regardless. */
|
2019-09-20 14:09:43 +03:00
|
|
|
if (uvm_page_physunload_force(lcv, freelist, paddrp))
|
2016-12-23 10:15:27 +03:00
|
|
|
return true;
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
2016-12-23 10:15:27 +03:00
|
|
|
return false;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
1999-12-01 19:08:32 +03:00
|
|
|
|
2007-02-22 01:59:35 +03:00
|
|
|
bool
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_page_physget(paddr_t *paddrp)
|
1999-12-01 19:08:32 +03:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* try in the order of freelist preference */
|
|
|
|
for (i = 0; i < VM_NFREELIST; i++)
|
2007-02-22 09:05:00 +03:00
|
|
|
if (uvm_page_physget_freelist(paddrp, i) == true)
|
|
|
|
return (true);
|
|
|
|
return (false);
|
1999-12-01 19:08:32 +03:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif /* PMAP_STEAL_MEMORY */
|
|
|
|
|
2010-11-12 08:23:41 +03:00
|
|
|
paddr_t
|
|
|
|
uvm_vm_page_to_phys(const struct vm_page *pg)
|
|
|
|
{
|
|
|
|
|
2019-12-21 18:16:14 +03:00
|
|
|
return pg->phys_addr & ~(PAGE_SIZE - 1);
|
2010-11-12 08:23:41 +03:00
|
|
|
}
|
|
|
|
|
2001-05-02 05:22:19 +04:00
|
|
|
/*
|
2019-12-27 15:51:56 +03:00
|
|
|
* uvm_page_numa_load: load NUMA range description.
|
2001-05-02 05:22:19 +04:00
|
|
|
*/
|
|
|
|
void
|
2019-12-27 15:51:56 +03:00
|
|
|
uvm_page_numa_load(paddr_t start, paddr_t size, u_int numa_id)
|
2001-05-02 05:22:19 +04:00
|
|
|
{
|
2019-12-27 15:51:56 +03:00
|
|
|
struct uvm_page_numa_region *d;
|
|
|
|
|
|
|
|
KASSERT(numa_id < PGFL_MAX_BUCKETS);
|
|
|
|
|
|
|
|
d = kmem_alloc(sizeof(*d), KM_SLEEP);
|
|
|
|
d->start = start;
|
|
|
|
d->size = size;
|
|
|
|
d->numa_id = numa_id;
|
|
|
|
d->next = uvm_page_numa_region;
|
|
|
|
uvm_page_numa_region = d;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_page_numa_lookup: lookup NUMA node for the given page.
|
|
|
|
*/
|
|
|
|
static u_int
|
|
|
|
uvm_page_numa_lookup(struct vm_page *pg)
|
|
|
|
{
|
|
|
|
struct uvm_page_numa_region *d;
|
|
|
|
static bool warned;
|
|
|
|
paddr_t pa;
|
|
|
|
|
|
|
|
KASSERT(uvm_page_numa_region != NULL);
|
|
|
|
|
|
|
|
pa = VM_PAGE_TO_PHYS(pg);
|
|
|
|
for (d = uvm_page_numa_region; d != NULL; d = d->next) {
|
|
|
|
if (pa >= d->start && pa < d->start + d->size) {
|
|
|
|
return d->numa_id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!warned) {
|
2019-12-28 11:49:41 +03:00
|
|
|
printf("uvm_page_numa_lookup: failed, first pg=%p pa=%#"
|
|
|
|
PRIxPADDR "\n", pg, VM_PAGE_TO_PHYS(pg));
|
2019-12-27 15:51:56 +03:00
|
|
|
warned = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_page_redim: adjust freelist dimensions if they have changed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
uvm_page_redim(int newncolors, int newnbuckets)
|
|
|
|
{
|
|
|
|
struct pgfreelist npgfl;
|
|
|
|
struct pgflbucket *opgb, *npgb;
|
|
|
|
struct pgflist *ohead, *nhead;
|
2020-03-03 11:13:44 +03:00
|
|
|
struct vm_page *pg;
|
2019-12-27 15:51:56 +03:00
|
|
|
size_t bucketsize, bucketmemsize, oldbucketmemsize;
|
|
|
|
int fl, ob, oc, nb, nc, obuckets, ocolors;
|
|
|
|
char *bucketarray, *oldbucketmem, *bucketmem;
|
2001-05-02 05:22:19 +04:00
|
|
|
|
2011-10-06 16:26:03 +04:00
|
|
|
KASSERT(((newncolors - 1) & newncolors) == 0);
|
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/* Anything to do? */
|
|
|
|
if (newncolors <= uvmexp.ncolors &&
|
|
|
|
newnbuckets == uvm.bucketcount) {
|
2001-05-02 05:22:19 +04:00
|
|
|
return;
|
2019-12-27 15:51:56 +03:00
|
|
|
}
|
2007-02-22 09:05:00 +03:00
|
|
|
if (uvm.page_init_done == false) {
|
2002-06-19 21:01:18 +04:00
|
|
|
uvmexp.ncolors = newncolors;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
bucketsize = offsetof(struct pgflbucket, pgb_colors[newncolors]);
|
|
|
|
bucketsize = roundup2(bucketsize, coherency_unit);
|
|
|
|
bucketmemsize = bucketsize * newnbuckets * VM_NFREELIST +
|
|
|
|
coherency_unit - 1;
|
|
|
|
bucketmem = kmem_zalloc(bucketmemsize, KM_SLEEP);
|
|
|
|
bucketarray = (char *)roundup2((uintptr_t)bucketmem, coherency_unit);
|
2001-05-02 05:22:19 +04:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
ocolors = uvmexp.ncolors;
|
|
|
|
obuckets = uvm.bucketcount;
|
|
|
|
|
2024-02-10 12:24:17 +03:00
|
|
|
/* Freelist cache mustn't be enabled. */
|
2019-12-27 15:51:56 +03:00
|
|
|
uvm_pgflcache_pause();
|
2001-05-02 05:22:19 +04:00
|
|
|
|
|
|
|
/* Make sure we should still do this. */
|
2019-12-27 15:51:56 +03:00
|
|
|
uvm_pgfl_lock();
|
|
|
|
if (newncolors <= uvmexp.ncolors &&
|
|
|
|
newnbuckets == uvm.bucketcount) {
|
|
|
|
uvm_pgfl_unlock();
|
2019-12-28 19:07:41 +03:00
|
|
|
uvm_pgflcache_resume();
|
2019-12-27 15:51:56 +03:00
|
|
|
kmem_free(bucketmem, bucketmemsize);
|
2001-05-02 05:22:19 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
uvmexp.ncolors = newncolors;
|
|
|
|
uvmexp.colormask = uvmexp.ncolors - 1;
|
2019-12-27 15:51:56 +03:00
|
|
|
uvm.bucketcount = newnbuckets;
|
|
|
|
|
|
|
|
for (fl = 0; fl < VM_NFREELIST; fl++) {
|
|
|
|
/* Init new buckets in new freelist. */
|
|
|
|
memset(&npgfl, 0, sizeof(npgfl));
|
|
|
|
for (nb = 0; nb < newnbuckets; nb++) {
|
|
|
|
npgb = (struct pgflbucket *)bucketarray;
|
|
|
|
uvm_page_init_bucket(&npgfl, npgb, nb);
|
|
|
|
bucketarray += bucketsize;
|
|
|
|
}
|
|
|
|
/* Now transfer pages from the old freelist. */
|
|
|
|
for (nb = ob = 0; ob < obuckets; ob++) {
|
|
|
|
opgb = uvm.page_free[fl].pgfl_buckets[ob];
|
|
|
|
for (oc = 0; oc < ocolors; oc++) {
|
|
|
|
ohead = &opgb->pgb_colors[oc];
|
|
|
|
while ((pg = LIST_FIRST(ohead)) != NULL) {
|
|
|
|
LIST_REMOVE(pg, pageq.list);
|
|
|
|
/*
|
|
|
|
* Here we decide on the NEW color &
|
|
|
|
* bucket for the page. For NUMA
|
|
|
|
* we'll use the info that the
|
2020-01-06 01:01:09 +03:00
|
|
|
* hardware gave us. For non-NUMA
|
|
|
|
* assign take physical page frame
|
|
|
|
* number and cache color into
|
|
|
|
* account. We do this to try and
|
|
|
|
* avoid defeating any memory
|
|
|
|
* interleaving in the hardware.
|
2019-12-27 15:51:56 +03:00
|
|
|
*/
|
|
|
|
KASSERT(
|
|
|
|
uvm_page_get_bucket(pg) == ob);
|
|
|
|
KASSERT(fl ==
|
|
|
|
uvm_page_get_freelist(pg));
|
2020-05-17 18:11:57 +03:00
|
|
|
if (uvm_page_numa_region != NULL) {
|
2019-12-27 15:51:56 +03:00
|
|
|
nb = uvm_page_numa_lookup(pg);
|
|
|
|
} else {
|
2020-01-06 01:01:09 +03:00
|
|
|
nb = atop(VM_PAGE_TO_PHYS(pg))
|
|
|
|
/ uvmexp.ncolors / 8
|
|
|
|
% newnbuckets;
|
2019-12-27 15:51:56 +03:00
|
|
|
}
|
|
|
|
uvm_page_set_bucket(pg, nb);
|
|
|
|
npgb = npgfl.pgfl_buckets[nb];
|
|
|
|
npgb->pgb_nfree++;
|
|
|
|
nc = VM_PGCOLOR(pg);
|
|
|
|
nhead = &npgb->pgb_colors[nc];
|
|
|
|
LIST_INSERT_HEAD(nhead, pg, pageq.list);
|
2001-05-02 05:22:19 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-12-27 15:51:56 +03:00
|
|
|
/* Install the new freelist. */
|
|
|
|
memcpy(&uvm.page_free[fl], &npgfl, sizeof(npgfl));
|
2001-05-02 05:22:19 +04:00
|
|
|
}
|
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/* Unlock and free the old memory. */
|
2012-01-27 23:48:38 +04:00
|
|
|
oldbucketmemsize = recolored_pages_memsize;
|
2019-12-27 15:51:56 +03:00
|
|
|
oldbucketmem = recolored_pages_mem;
|
2012-01-27 23:48:38 +04:00
|
|
|
recolored_pages_memsize = bucketmemsize;
|
2019-12-27 15:51:56 +03:00
|
|
|
recolored_pages_mem = bucketmem;
|
2019-12-28 19:07:41 +03:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
uvm_pgfl_unlock();
|
2019-12-28 19:07:41 +03:00
|
|
|
uvm_pgflcache_resume();
|
2011-09-30 09:29:12 +04:00
|
|
|
|
2012-01-27 23:48:38 +04:00
|
|
|
if (oldbucketmemsize) {
|
2019-12-27 15:51:56 +03:00
|
|
|
kmem_free(oldbucketmem, oldbucketmemsize);
|
2012-01-27 23:48:38 +04:00
|
|
|
}
|
2011-09-30 09:29:12 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* this calls uvm_km_alloc() which may want to hold
|
2019-12-27 15:51:56 +03:00
|
|
|
* uvm_freelist_lock.
|
2011-09-30 09:29:12 +04:00
|
|
|
*/
|
|
|
|
uvm_pager_realloc_emerg();
|
2001-05-02 05:22:19 +04:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2008-06-04 16:45:28 +04:00
|
|
|
/*
|
2019-12-27 15:51:56 +03:00
|
|
|
* uvm_page_recolor: Recolor the pages if the new color count is
|
|
|
|
* larger than the old one.
|
2008-06-04 16:45:28 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
2019-12-27 15:51:56 +03:00
|
|
|
uvm_page_recolor(int newncolors)
|
2008-06-04 16:45:28 +04:00
|
|
|
{
|
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
uvm_page_redim(newncolors, uvm.bucketcount);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_page_rebucket: Determine a bucket structure and redim the free
|
|
|
|
* lists to match.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_page_rebucket(void)
|
|
|
|
{
|
|
|
|
u_int min_numa, max_numa, npackage, shift;
|
|
|
|
struct cpu_info *ci, *ci2, *ci3;
|
|
|
|
CPU_INFO_ITERATOR cii;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have more than one NUMA node, and the maximum NUMA node ID
|
|
|
|
* is less than PGFL_MAX_BUCKETS, then we'll use NUMA distribution
|
2020-05-17 18:11:57 +03:00
|
|
|
* for free pages.
|
2019-12-27 15:51:56 +03:00
|
|
|
*/
|
|
|
|
min_numa = (u_int)-1;
|
|
|
|
max_numa = 0;
|
|
|
|
for (CPU_INFO_FOREACH(cii, ci)) {
|
|
|
|
if (ci->ci_numa_id < min_numa) {
|
|
|
|
min_numa = ci->ci_numa_id;
|
|
|
|
}
|
|
|
|
if (ci->ci_numa_id > max_numa) {
|
|
|
|
max_numa = ci->ci_numa_id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (min_numa != max_numa && max_numa < PGFL_MAX_BUCKETS) {
|
|
|
|
aprint_debug("UVM: using NUMA allocation scheme\n");
|
2020-03-03 11:13:44 +03:00
|
|
|
for (CPU_INFO_FOREACH(cii, ci)) {
|
2019-12-27 15:51:56 +03:00
|
|
|
ci->ci_data.cpu_uvm->pgflbucket = ci->ci_numa_id;
|
|
|
|
}
|
|
|
|
uvm_page_redim(uvmexp.ncolors, max_numa + 1);
|
|
|
|
return;
|
2008-06-04 16:45:28 +04:00
|
|
|
}
|
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/*
|
|
|
|
* Otherwise we'll go with a scheme to maximise L2/L3 cache locality
|
|
|
|
* and minimise lock contention. Count the total number of CPU
|
|
|
|
* packages, and then try to distribute the buckets among CPU
|
2020-05-17 18:11:57 +03:00
|
|
|
* packages evenly.
|
2019-12-27 15:51:56 +03:00
|
|
|
*/
|
2020-01-09 19:35:03 +03:00
|
|
|
npackage = curcpu()->ci_nsibling[CPUREL_PACKAGE1ST];
|
2020-03-03 11:13:44 +03:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/*
|
|
|
|
* Figure out how to arrange the packages & buckets, and the total
|
|
|
|
* number of buckets we need. XXX 2 may not be the best factor.
|
|
|
|
*/
|
|
|
|
for (shift = 0; npackage > PGFL_MAX_BUCKETS; shift++) {
|
|
|
|
npackage >>= 1;
|
|
|
|
}
|
|
|
|
uvm_page_redim(uvmexp.ncolors, npackage);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now tell each CPU which bucket to use. In the outer loop, scroll
|
|
|
|
* through all CPU packages.
|
|
|
|
*/
|
|
|
|
npackage = 0;
|
|
|
|
ci = curcpu();
|
2020-01-09 19:35:03 +03:00
|
|
|
ci2 = ci->ci_sibling[CPUREL_PACKAGE1ST];
|
2019-12-27 15:51:56 +03:00
|
|
|
do {
|
|
|
|
/*
|
|
|
|
* In the inner loop, scroll through all CPUs in the package
|
|
|
|
* and assign the same bucket ID.
|
|
|
|
*/
|
|
|
|
ci3 = ci2;
|
|
|
|
do {
|
|
|
|
ci3->ci_data.cpu_uvm->pgflbucket = npackage >> shift;
|
|
|
|
ci3 = ci3->ci_sibling[CPUREL_PACKAGE];
|
|
|
|
} while (ci3 != ci2);
|
|
|
|
npackage++;
|
2020-01-09 19:35:03 +03:00
|
|
|
ci2 = ci2->ci_sibling[CPUREL_PACKAGE1ST];
|
|
|
|
} while (ci2 != ci->ci_sibling[CPUREL_PACKAGE1ST]);
|
2019-12-27 15:51:56 +03:00
|
|
|
|
|
|
|
aprint_debug("UVM: using package allocation scheme, "
|
|
|
|
"%d package(s) per bucket\n", 1 << shift);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_cpu_attach: initialize per-CPU data structures.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_cpu_attach(struct cpu_info *ci)
|
|
|
|
{
|
|
|
|
struct uvm_cpu *ucpu;
|
2008-07-04 14:56:59 +04:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/* Already done in uvm_page_init(). */
|
|
|
|
if (!CPU_IS_PRIMARY(ci)) {
|
|
|
|
/* Add more reserve pages for this CPU. */
|
|
|
|
uvmexp.reserve_kernel += vm_page_reserve_kernel;
|
|
|
|
|
|
|
|
/* Allocate per-CPU data structures. */
|
|
|
|
ucpu = kmem_zalloc(sizeof(struct uvm_cpu) + coherency_unit - 1,
|
|
|
|
KM_SLEEP);
|
|
|
|
ucpu = (struct uvm_cpu *)roundup2((uintptr_t)ucpu,
|
|
|
|
coherency_unit);
|
|
|
|
ci->ci_data.cpu_uvm = ucpu;
|
2019-12-27 16:19:24 +03:00
|
|
|
} else {
|
|
|
|
ucpu = ci->ci_data.cpu_uvm;
|
2008-06-04 16:45:28 +04:00
|
|
|
}
|
2012-02-02 23:42:57 +04:00
|
|
|
|
2020-01-01 01:42:50 +03:00
|
|
|
uvmpdpol_init_cpu(ucpu);
|
2008-06-04 16:45:28 +04:00
|
|
|
}
|
|
|
|
|
2019-12-21 15:58:26 +03:00
|
|
|
/*
|
2019-12-31 16:07:09 +03:00
|
|
|
* uvm_availmem: fetch the total amount of free memory in pages. this can
|
|
|
|
* have a detrimental effect on performance due to false sharing; don't call
|
|
|
|
* unless needed.
|
2020-06-12 01:21:05 +03:00
|
|
|
*
|
|
|
|
* some users can request the amount of free memory so often that it begins
|
|
|
|
* to impact upon performance. if calling frequently and an inexact value
|
|
|
|
* is okay, call with cached = true.
|
2019-12-21 15:58:26 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
2020-06-11 22:20:42 +03:00
|
|
|
uvm_availmem(bool cached)
|
2019-12-21 15:58:26 +03:00
|
|
|
{
|
2020-06-12 01:21:05 +03:00
|
|
|
int64_t fp;
|
2019-12-27 15:51:56 +03:00
|
|
|
|
2020-06-12 01:21:05 +03:00
|
|
|
cpu_count_sync(cached);
|
|
|
|
if ((fp = cpu_count_get(CPU_COUNT_FREEPAGES)) < 0) {
|
|
|
|
/*
|
|
|
|
* XXXAD could briefly go negative because it's impossible
|
|
|
|
* to get a clean snapshot. address this for other counters
|
|
|
|
* used as running totals before NetBSD 10 although less
|
|
|
|
* important for those.
|
|
|
|
*/
|
|
|
|
fp = 0;
|
2019-12-27 15:51:56 +03:00
|
|
|
}
|
2020-06-12 01:21:05 +03:00
|
|
|
return (int)fp;
|
2019-12-21 15:58:26 +03:00
|
|
|
}
|
|
|
|
|
2001-04-29 08:23:20 +04:00
|
|
|
/*
|
2019-12-27 15:51:56 +03:00
|
|
|
* uvm_pagealloc_pgb: helper routine that tries to allocate any color from a
|
|
|
|
* specific freelist and specific bucket only.
|
|
|
|
*
|
|
|
|
* => must be at IPL_VM or higher to protect per-CPU data structures.
|
2001-04-29 08:23:20 +04:00
|
|
|
*/
|
|
|
|
|
2006-09-27 21:18:50 +04:00
|
|
|
static struct vm_page *
|
2019-12-27 15:51:56 +03:00
|
|
|
uvm_pagealloc_pgb(struct uvm_cpu *ucpu, int f, int b, int *trycolorp, int flags)
|
2001-04-29 08:23:20 +04:00
|
|
|
{
|
2019-12-27 15:51:56 +03:00
|
|
|
int c, trycolor, colormask;
|
|
|
|
struct pgflbucket *pgb;
|
2001-04-29 08:23:20 +04:00
|
|
|
struct vm_page *pg;
|
2019-12-27 15:51:56 +03:00
|
|
|
kmutex_t *lock;
|
2019-12-30 20:45:53 +03:00
|
|
|
bool fill;
|
2001-04-29 08:23:20 +04:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/*
|
|
|
|
* Skip the bucket if empty, no lock needed. There could be many
|
|
|
|
* empty freelists/buckets.
|
|
|
|
*/
|
|
|
|
pgb = uvm.page_free[f].pgfl_buckets[b];
|
|
|
|
if (pgb->pgb_nfree == 0) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2008-02-27 17:24:24 +03:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/* Skip bucket if low on memory. */
|
|
|
|
lock = &uvm_freelist_locks[b].lock;
|
|
|
|
mutex_spin_enter(lock);
|
|
|
|
if (__predict_false(pgb->pgb_nfree <= uvmexp.reserve_kernel)) {
|
|
|
|
if ((flags & UVM_PGA_USERESERVE) == 0 ||
|
|
|
|
(pgb->pgb_nfree <= uvmexp.reserve_pagedaemon &&
|
|
|
|
curlwp != uvm.pagedaemon_lwp)) {
|
|
|
|
mutex_spin_exit(lock);
|
|
|
|
return NULL;
|
2008-06-04 16:45:28 +04:00
|
|
|
}
|
2019-12-30 20:45:53 +03:00
|
|
|
fill = false;
|
|
|
|
} else {
|
|
|
|
fill = true;
|
2019-12-27 15:51:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Try all page colors as needed. */
|
|
|
|
c = trycolor = *trycolorp;
|
|
|
|
colormask = uvmexp.colormask;
|
|
|
|
do {
|
|
|
|
pg = LIST_FIRST(&pgb->pgb_colors[c]);
|
|
|
|
if (__predict_true(pg != NULL)) {
|
|
|
|
/*
|
|
|
|
* Got a free page! PG_FREE must be cleared under
|
|
|
|
* lock because of uvm_pglistalloc().
|
|
|
|
*/
|
|
|
|
LIST_REMOVE(pg, pageq.list);
|
2020-06-15 00:41:42 +03:00
|
|
|
KASSERT(pg->flags == PG_FREE);
|
|
|
|
pg->flags = PG_BUSY | PG_CLEAN | PG_FAKE;
|
2019-12-27 15:51:56 +03:00
|
|
|
pgb->pgb_nfree--;
|
2020-10-18 21:31:31 +03:00
|
|
|
CPU_COUNT(CPU_COUNT_FREEPAGES, -1);
|
2020-03-03 11:13:44 +03:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/*
|
|
|
|
* While we have the bucket locked and our data
|
|
|
|
* structures fresh in L1 cache, we have an ideal
|
|
|
|
* opportunity to grab some pages for the freelist
|
|
|
|
* cache without causing extra contention. Only do
|
|
|
|
* so if we found pages in this CPU's preferred
|
|
|
|
* bucket.
|
|
|
|
*/
|
2019-12-30 20:45:53 +03:00
|
|
|
if (__predict_true(b == ucpu->pgflbucket && fill)) {
|
2019-12-27 15:51:56 +03:00
|
|
|
uvm_pgflcache_fill(ucpu, f, b, c);
|
|
|
|
}
|
|
|
|
mutex_spin_exit(lock);
|
|
|
|
KASSERT(uvm_page_get_bucket(pg) == b);
|
|
|
|
CPU_COUNT(c == trycolor ?
|
|
|
|
CPU_COUNT_COLORHIT : CPU_COUNT_COLORMISS, 1);
|
|
|
|
CPU_COUNT(CPU_COUNT_CPUMISS, 1);
|
|
|
|
*trycolorp = c;
|
|
|
|
return pg;
|
2008-06-04 16:45:28 +04:00
|
|
|
}
|
2019-12-27 15:51:56 +03:00
|
|
|
c = (c + 1) & colormask;
|
|
|
|
} while (c != trycolor);
|
|
|
|
mutex_spin_exit(lock);
|
2001-04-29 08:23:20 +04:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
2001-04-29 08:23:20 +04:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/*
|
|
|
|
* uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat that allocates
|
|
|
|
* any color from any bucket, in a specific freelist.
|
|
|
|
*
|
|
|
|
* => must be at IPL_VM or higher to protect per-CPU data structures.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static struct vm_page *
|
|
|
|
uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int f, int *trycolorp, int flags)
|
|
|
|
{
|
|
|
|
int b, trybucket, bucketcount;
|
|
|
|
struct vm_page *pg;
|
2001-04-29 08:23:20 +04:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/* Try for the exact thing in the per-CPU cache. */
|
|
|
|
if ((pg = uvm_pgflcache_alloc(ucpu, f, *trycolorp)) != NULL) {
|
|
|
|
CPU_COUNT(CPU_COUNT_CPUHIT, 1);
|
|
|
|
CPU_COUNT(CPU_COUNT_COLORHIT, 1);
|
|
|
|
return pg;
|
2001-04-29 08:23:20 +04:00
|
|
|
}
|
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/* Walk through all buckets, trying our preferred bucket first. */
|
|
|
|
trybucket = ucpu->pgflbucket;
|
|
|
|
b = trybucket;
|
|
|
|
bucketcount = uvm.bucketcount;
|
|
|
|
do {
|
|
|
|
pg = uvm_pagealloc_pgb(ucpu, f, b, trycolorp, flags);
|
|
|
|
if (pg != NULL) {
|
|
|
|
return pg;
|
|
|
|
}
|
|
|
|
b = (b + 1 == bucketcount ? 0 : b + 1);
|
|
|
|
} while (b != trybucket);
|
|
|
|
|
|
|
|
return NULL;
|
2001-04-29 08:23:20 +04:00
|
|
|
}
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
* uvm_pagealloc_strat: allocate vm_page from a particular free list.
|
1998-02-05 09:25:08 +03:00
|
|
|
*
|
|
|
|
* => return null if no pages free
|
|
|
|
* => wake up pagedaemon if number of free pages drops below low water mark
|
2008-06-04 16:45:28 +04:00
|
|
|
* => if obj != NULL, obj must be locked (to put in obj's tree)
|
1998-02-05 09:25:08 +03:00
|
|
|
* => if anon != NULL, anon must be locked (to put in anon)
|
|
|
|
* => only one of obj or anon can be non-null
|
|
|
|
* => caller must activate/deactivate page if it is not wired.
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
* => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
|
2000-04-24 21:12:00 +04:00
|
|
|
* => policy decision: it is more important to pull a page off of the
|
2020-06-15 00:41:42 +03:00
|
|
|
* appropriate priority free list than it is to get a page from the
|
|
|
|
* correct bucket or color bin. This is because we live with the
|
2000-04-24 21:12:00 +04:00
|
|
|
* consequences of a bad free list decision for the entire
|
|
|
|
* lifetime of the page, e.g. if the page comes from memory that
|
|
|
|
* is slower to access.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
struct vm_page *
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
|
|
|
|
int flags, int strat, int free_list)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2020-06-15 00:41:42 +03:00
|
|
|
int color, lcv, error, s;
|
2008-06-04 16:45:28 +04:00
|
|
|
struct uvm_cpu *ucpu;
|
1998-03-09 03:58:55 +03:00
|
|
|
struct vm_page *pg;
|
2008-12-13 14:34:43 +03:00
|
|
|
lwp_t *l;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
KASSERT(obj == NULL || anon == NULL);
|
2011-01-04 11:26:33 +03:00
|
|
|
KASSERT(anon == NULL || (flags & UVM_FLAG_COLORMATCH) || off == 0);
|
2000-11-27 11:39:39 +03:00
|
|
|
KASSERT(off == trunc_page(off));
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(obj == NULL || rw_write_held(obj->vmobjlock));
|
2011-06-15 23:46:11 +04:00
|
|
|
KASSERT(anon == NULL || anon->an_lock == NULL ||
|
2020-02-23 18:46:38 +03:00
|
|
|
rw_write_held(anon->an_lock));
|
2001-01-23 04:56:16 +03:00
|
|
|
|
2001-04-29 08:23:20 +04:00
|
|
|
/*
|
|
|
|
* This implements a global round-robin page coloring
|
|
|
|
* algorithm.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
s = splvm();
|
2008-06-04 16:45:28 +04:00
|
|
|
ucpu = curcpu()->ci_data.cpu_uvm;
|
2011-01-04 11:26:33 +03:00
|
|
|
if (flags & UVM_FLAG_COLORMATCH) {
|
|
|
|
color = atop(off) & uvmexp.colormask;
|
|
|
|
} else {
|
2019-12-27 15:51:56 +03:00
|
|
|
color = ucpu->pgflcolor;
|
2011-01-04 11:26:33 +03:00
|
|
|
}
|
2001-04-29 08:23:20 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* fail if any of these conditions is true:
|
|
|
|
* [1] there really are no free pages, or
|
|
|
|
* [2] only kernel "reserved" pages remain and
|
2008-12-13 14:34:43 +03:00
|
|
|
* reserved pages have not been requested.
|
1998-03-09 03:58:55 +03:00
|
|
|
* [3] only pagedaemon "reserved" pages remain and
|
|
|
|
* the requestor isn't the pagedaemon.
|
2008-12-13 14:34:43 +03:00
|
|
|
* we make kernel reserve pages available if called by a
|
2020-05-17 18:11:57 +03:00
|
|
|
* kernel thread.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
2008-12-13 14:34:43 +03:00
|
|
|
l = curlwp;
|
2020-05-17 18:11:57 +03:00
|
|
|
if (__predict_true(l != NULL) && (l->l_flag & LW_SYSTEM) != 0) {
|
2008-12-13 14:34:43 +03:00
|
|
|
flags |= UVM_PGA_USERESERVE;
|
|
|
|
}
|
2000-04-24 21:12:00 +04:00
|
|
|
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
again:
|
|
|
|
switch (strat) {
|
|
|
|
case UVM_PGA_STRAT_NORMAL:
|
2019-12-27 15:51:56 +03:00
|
|
|
/* Check freelists: descending priority (ascending id) order. */
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
|
2019-12-27 15:51:56 +03:00
|
|
|
pg = uvm_pagealloc_pgfl(ucpu, lcv, &color, flags);
|
|
|
|
if (pg != NULL) {
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
goto gotit;
|
2019-12-27 15:51:56 +03:00
|
|
|
}
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
}
|
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/* No pages free! Have pagedaemon free some memory. */
|
|
|
|
splx(s);
|
|
|
|
uvm_kick_pdaemon();
|
|
|
|
return NULL;
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
|
|
|
|
case UVM_PGA_STRAT_ONLY:
|
|
|
|
case UVM_PGA_STRAT_FALLBACK:
|
|
|
|
/* Attempt to allocate from the specified free list. */
|
2023-04-09 12:00:56 +03:00
|
|
|
KASSERT(free_list >= 0);
|
|
|
|
KASSERT(free_list < VM_NFREELIST);
|
2019-12-27 15:51:56 +03:00
|
|
|
pg = uvm_pagealloc_pgfl(ucpu, free_list, &color, flags);
|
|
|
|
if (pg != NULL) {
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
goto gotit;
|
2019-12-27 15:51:56 +03:00
|
|
|
}
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
|
|
|
|
/* Fall back, if possible. */
|
|
|
|
if (strat == UVM_PGA_STRAT_FALLBACK) {
|
|
|
|
strat = UVM_PGA_STRAT_NORMAL;
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/* No pages free! Have pagedaemon free some memory. */
|
|
|
|
splx(s);
|
|
|
|
uvm_kick_pdaemon();
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
case UVM_PGA_STRAT_NUMA:
|
|
|
|
/*
|
2020-05-17 18:11:57 +03:00
|
|
|
* NUMA strategy (experimental): allocating from the correct
|
|
|
|
* bucket is more important than observing freelist
|
|
|
|
* priority. Look only to the current NUMA node; if that
|
|
|
|
* fails, we need to look to other NUMA nodes, so retry with
|
|
|
|
* the normal strategy.
|
2019-12-27 15:51:56 +03:00
|
|
|
*/
|
|
|
|
for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
|
|
|
|
pg = uvm_pgflcache_alloc(ucpu, lcv, color);
|
|
|
|
if (pg != NULL) {
|
|
|
|
CPU_COUNT(CPU_COUNT_CPUHIT, 1);
|
|
|
|
CPU_COUNT(CPU_COUNT_COLORHIT, 1);
|
|
|
|
goto gotit;
|
|
|
|
}
|
|
|
|
pg = uvm_pagealloc_pgb(ucpu, lcv,
|
|
|
|
ucpu->pgflbucket, &color, flags);
|
|
|
|
if (pg != NULL) {
|
|
|
|
goto gotit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
strat = UVM_PGA_STRAT_NORMAL;
|
|
|
|
goto again;
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
|
|
|
|
default:
|
|
|
|
panic("uvm_pagealloc_strat: bad strat %d", strat);
|
|
|
|
/* NOTREACHED */
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
Add support for multiple memory free lists. There is at least one
default free list, and 0 - N additional free list, in order of descending
priority.
A new page allocation function, uvm_pagealloc_strat(), has been added,
providing three page allocation strategies:
- normal: high -> low priority free list walk, taking the
page off the first free list that has one.
- only: attempt to allocate a page only from the specified free
list, failing if that free list has none available.
- fallback: if `only' fails, fall back on `normal'.
uvm_pagealloc(...) is provided for normal use (and is a synonym for
uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument
is ignored for the `normal' case).
uvm_page_physload() now specified which free list the pages will be
loaded onto. This means that some platforms which have multiple physical
memory segments may define additional vm_physsegs if they wish to break
individual physical segments into differing priorities.
Machine-dependent code must define _at least_ the following constants
in <machine/vmparam.h>:
VM_NFREELIST: the number of free lists the system will have
VM_FREELIST_DEFAULT: the default freelist (should always be 0,
but is defined in machdep code so that it's with all of the
other free list-related constants).
Additional free list names may be defined by machine-dependent code, but
they will only be used by machine-dependent code (e.g. for loading the
vm_physsegs).
1998-07-08 08:28:27 +04:00
|
|
|
gotit:
|
2001-04-29 08:23:20 +04:00
|
|
|
/*
|
|
|
|
* We now know which color we actually allocated from; set
|
|
|
|
* the next color accordingly.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
ucpu->pgflcolor = (color + 1) & uvmexp.colormask;
|
2000-04-24 21:12:00 +04:00
|
|
|
|
|
|
|
/*
|
2020-06-15 00:41:42 +03:00
|
|
|
* while still at IPL_VM, update allocation statistics.
|
2000-04-24 21:12:00 +04:00
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2019-12-22 19:37:36 +03:00
|
|
|
if (anon) {
|
2020-01-15 20:55:43 +03:00
|
|
|
CPU_COUNT(CPU_COUNT_ANONCLEAN, 1);
|
2019-12-22 19:37:36 +03:00
|
|
|
}
|
2019-12-27 15:51:56 +03:00
|
|
|
splx(s);
|
2020-06-15 00:41:42 +03:00
|
|
|
KASSERT(pg->flags == (PG_BUSY|PG_CLEAN|PG_FAKE));
|
1998-03-09 03:58:55 +03:00
|
|
|
|
2019-12-13 23:10:21 +03:00
|
|
|
/*
|
2019-12-22 19:37:36 +03:00
|
|
|
* assign the page to the object. as the page was free, we know
|
|
|
|
* that pg->uobject and pg->uanon are NULL. we only need to take
|
|
|
|
* the page's interlock if we are changing the values.
|
2019-12-13 23:10:21 +03:00
|
|
|
*/
|
2019-12-22 19:37:36 +03:00
|
|
|
if (anon != NULL || obj != NULL) {
|
|
|
|
mutex_enter(&pg->interlock);
|
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->offset = off;
|
|
|
|
pg->uobject = obj;
|
|
|
|
pg->uanon = anon;
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(uvm_page_owner_locked_p(pg, true));
|
1998-03-09 03:58:55 +03:00
|
|
|
if (anon) {
|
2005-05-11 17:02:25 +04:00
|
|
|
anon->an_page = pg;
|
2019-12-13 23:10:21 +03:00
|
|
|
pg->flags |= PG_ANON;
|
2019-12-22 19:37:36 +03:00
|
|
|
mutex_exit(&pg->interlock);
|
2019-12-13 23:10:21 +03:00
|
|
|
} else if (obj) {
|
2020-01-15 20:55:43 +03:00
|
|
|
/*
|
|
|
|
* set PG_FILE|PG_AOBJ before the first uvm_pageinsert.
|
|
|
|
*/
|
|
|
|
if (UVM_OBJ_IS_VNODE(obj)) {
|
|
|
|
pg->flags |= PG_FILE;
|
2020-05-17 20:12:28 +03:00
|
|
|
} else if (UVM_OBJ_IS_AOBJ(obj)) {
|
2020-01-15 20:55:43 +03:00
|
|
|
pg->flags |= PG_AOBJ;
|
|
|
|
}
|
2019-12-18 23:38:14 +03:00
|
|
|
uvm_pageinsert_object(obj, pg);
|
2019-12-22 19:37:36 +03:00
|
|
|
mutex_exit(&pg->interlock);
|
2019-12-18 23:38:14 +03:00
|
|
|
error = uvm_pageinsert_tree(obj, pg);
|
2019-12-14 20:28:58 +03:00
|
|
|
if (error != 0) {
|
2019-12-22 19:37:36 +03:00
|
|
|
mutex_enter(&pg->interlock);
|
2019-12-18 23:38:14 +03:00
|
|
|
uvm_pageremove_object(obj, pg);
|
2019-12-22 19:37:36 +03:00
|
|
|
mutex_exit(&pg->interlock);
|
2019-12-14 20:28:58 +03:00
|
|
|
uvm_pagefree(pg);
|
|
|
|
return NULL;
|
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
2009-02-26 21:18:14 +03:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
#if defined(UVM_PAGE_TRKOWN)
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->owner_tag = NULL;
|
1998-02-05 09:25:08 +03:00
|
|
|
#endif
|
1998-03-09 03:58:55 +03:00
|
|
|
UVM_PAGE_OWN(pg, "new alloc");
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-04-10 04:28:05 +04:00
|
|
|
if (flags & UVM_PGA_ZERO) {
|
2020-06-15 00:41:42 +03:00
|
|
|
/* A zero'd page is not clean. */
|
2020-01-15 20:55:43 +03:00
|
|
|
if (obj != NULL || anon != NULL) {
|
|
|
|
uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
|
|
|
|
}
|
2020-06-15 00:41:42 +03:00
|
|
|
pmap_zero_page(VM_PAGE_TO_PHYS(pg));
|
2000-04-10 04:28:05 +04:00
|
|
|
}
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
return(pg);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2004-02-13 16:47:16 +03:00
|
|
|
/*
|
|
|
|
* uvm_pagereplace: replace a page with another
|
|
|
|
*
|
|
|
|
* => object must be locked
|
2020-01-01 01:42:50 +03:00
|
|
|
* => page interlocks must be held
|
2004-02-13 16:47:16 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_pagereplace(struct vm_page *oldpg, struct vm_page *newpg)
|
2004-02-13 16:47:16 +03:00
|
|
|
{
|
2008-06-17 06:30:57 +04:00
|
|
|
struct uvm_object *uobj = oldpg->uobject;
|
2019-12-30 20:45:53 +03:00
|
|
|
struct vm_page *pg __diagused;
|
2020-01-15 20:55:43 +03:00
|
|
|
uint64_t idx;
|
2004-03-24 10:50:48 +03:00
|
|
|
|
2004-02-13 16:47:16 +03:00
|
|
|
KASSERT((oldpg->flags & PG_TABLED) != 0);
|
2008-06-17 06:30:57 +04:00
|
|
|
KASSERT(uobj != NULL);
|
2004-02-13 16:47:16 +03:00
|
|
|
KASSERT((newpg->flags & PG_TABLED) == 0);
|
|
|
|
KASSERT(newpg->uobject == NULL);
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(rw_write_held(uobj->vmobjlock));
|
2020-01-01 01:42:50 +03:00
|
|
|
KASSERT(mutex_owned(&oldpg->interlock));
|
|
|
|
KASSERT(mutex_owned(&newpg->interlock));
|
2004-02-13 16:47:16 +03:00
|
|
|
|
2020-01-15 20:55:43 +03:00
|
|
|
newpg->uobject = uobj;
|
2004-02-13 16:47:16 +03:00
|
|
|
newpg->offset = oldpg->offset;
|
2020-01-15 20:55:43 +03:00
|
|
|
idx = newpg->offset >> PAGE_SHIFT;
|
|
|
|
pg = radix_tree_replace_node(&uobj->uo_pages, idx, newpg);
|
2019-12-30 20:45:53 +03:00
|
|
|
KASSERT(pg == oldpg);
|
2020-01-15 20:55:43 +03:00
|
|
|
if (((oldpg->flags ^ newpg->flags) & PG_CLEAN) != 0) {
|
|
|
|
if ((newpg->flags & PG_CLEAN) != 0) {
|
2020-08-14 12:06:14 +03:00
|
|
|
uvm_obj_page_clear_dirty(newpg);
|
2020-01-15 20:55:43 +03:00
|
|
|
} else {
|
2020-08-14 12:06:14 +03:00
|
|
|
uvm_obj_page_set_dirty(newpg);
|
2020-01-15 20:55:43 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* oldpg's PG_STAT is stable. newpg is not reachable by others yet.
|
|
|
|
*/
|
|
|
|
newpg->flags |=
|
|
|
|
(newpg->flags & ~PG_STAT) | (oldpg->flags & PG_STAT);
|
2019-12-16 00:11:34 +03:00
|
|
|
uvm_pageinsert_object(uobj, newpg);
|
|
|
|
uvm_pageremove_object(uobj, oldpg);
|
2004-02-13 16:47:16 +03:00
|
|
|
}
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* uvm_pagerealloc: reallocate a page from one object to another
|
|
|
|
*
|
|
|
|
* => both objects must be locked
|
|
|
|
*/
|
|
|
|
|
2020-06-13 22:55:39 +03:00
|
|
|
int
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2020-06-13 22:55:39 +03:00
|
|
|
int error = 0;
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
|
|
|
* remove it from the old object
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (pg->uobject) {
|
2019-12-18 23:38:14 +03:00
|
|
|
uvm_pageremove_tree(pg->uobject, pg);
|
|
|
|
uvm_pageremove_object(pg->uobject, pg);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* put it in the new object
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (newobj) {
|
2020-06-13 22:55:39 +03:00
|
|
|
mutex_enter(&pg->interlock);
|
|
|
|
pg->uobject = newobj;
|
|
|
|
pg->offset = newoff;
|
|
|
|
if (UVM_OBJ_IS_VNODE(newobj)) {
|
|
|
|
pg->flags |= PG_FILE;
|
|
|
|
} else if (UVM_OBJ_IS_AOBJ(newobj)) {
|
|
|
|
pg->flags |= PG_AOBJ;
|
|
|
|
}
|
|
|
|
uvm_pageinsert_object(newobj, pg);
|
|
|
|
mutex_exit(&pg->interlock);
|
|
|
|
error = uvm_pageinsert_tree(newobj, pg);
|
|
|
|
if (error != 0) {
|
|
|
|
mutex_enter(&pg->interlock);
|
|
|
|
uvm_pageremove_object(newobj, pg);
|
|
|
|
mutex_exit(&pg->interlock);
|
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
2020-06-13 22:55:39 +03:00
|
|
|
|
|
|
|
return error;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pagefree: free page
|
|
|
|
*
|
2008-06-04 16:45:28 +04:00
|
|
|
* => erase page's identity (i.e. remove from object)
|
1998-02-05 09:25:08 +03:00
|
|
|
* => put page on free list
|
|
|
|
* => caller must lock owning object (either anon or uvm_object)
|
|
|
|
* => assumes all valid mappings of pg are gone
|
|
|
|
*/
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_pagefree(struct vm_page *pg)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2019-12-27 15:51:56 +03:00
|
|
|
struct pgfreelist *pgfl;
|
|
|
|
struct pgflbucket *pgb;
|
2008-06-04 16:45:28 +04:00
|
|
|
struct uvm_cpu *ucpu;
|
2019-12-27 15:51:56 +03:00
|
|
|
kmutex_t *lock;
|
|
|
|
int bucket, s;
|
|
|
|
bool locked;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
#ifdef DEBUG
|
|
|
|
if (pg->uobject == (void *)0xdeadbeef &&
|
|
|
|
pg->uanon == (void *)0xdeadbeef) {
|
2002-09-27 19:35:29 +04:00
|
|
|
panic("uvm_pagefree: freeing free page %p", pg);
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
2003-11-03 06:58:28 +03:00
|
|
|
#endif /* DEBUG */
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2007-07-21 23:21:53 +04:00
|
|
|
KASSERT((pg->flags & PG_PAGEOUT) == 0);
|
2019-12-13 23:10:21 +03:00
|
|
|
KASSERT(!(pg->flags & PG_FREE));
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(pg->uobject == NULL || rw_write_held(pg->uobject->vmobjlock));
|
2008-01-02 14:48:20 +03:00
|
|
|
KASSERT(pg->uobject != NULL || pg->uanon == NULL ||
|
2020-02-23 18:46:38 +03:00
|
|
|
rw_write_held(pg->uanon->an_lock));
|
2007-07-21 23:21:53 +04:00
|
|
|
|
2019-12-18 23:38:14 +03:00
|
|
|
/*
|
2020-03-03 10:51:26 +03:00
|
|
|
* remove the page from the object's tree before acquiring any page
|
2019-12-18 23:38:14 +03:00
|
|
|
* interlocks: this can acquire locks to free radixtree nodes.
|
|
|
|
*/
|
|
|
|
if (pg->uobject != NULL) {
|
|
|
|
uvm_pageremove_tree(pg->uobject, pg);
|
|
|
|
}
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
* if the page is loaned, resolve the loan instead of freeing.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
if (pg->loan_count) {
|
2001-11-06 11:07:49 +03:00
|
|
|
KASSERT(pg->wire_count == 0);
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/*
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
* if the page is owned by an anon then we just want to
|
2001-11-06 11:07:49 +03:00
|
|
|
* drop anon ownership. the kernel will free the page when
|
|
|
|
* it is done with it. if the page is owned by an object,
|
|
|
|
* remove it from the object and mark it dirty for the benefit
|
|
|
|
* of possible anon owners.
|
|
|
|
*
|
|
|
|
* regardless of previous ownership, wakeup any waiters,
|
|
|
|
* unbusy the page, and we're done.
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2020-01-01 01:42:50 +03:00
|
|
|
uvm_pagelock(pg);
|
2019-12-13 23:10:21 +03:00
|
|
|
locked = true;
|
2001-12-31 22:21:36 +03:00
|
|
|
if (pg->uobject != NULL) {
|
2019-12-18 23:38:14 +03:00
|
|
|
uvm_pageremove_object(pg->uobject, pg);
|
2020-01-15 20:55:43 +03:00
|
|
|
pg->flags &= ~(PG_FILE|PG_AOBJ);
|
2001-12-31 22:21:36 +03:00
|
|
|
} else if (pg->uanon != NULL) {
|
2019-12-13 23:10:21 +03:00
|
|
|
if ((pg->flags & PG_ANON) == 0) {
|
2001-12-31 22:21:36 +03:00
|
|
|
pg->loan_count--;
|
|
|
|
} else {
|
2020-06-12 01:21:05 +03:00
|
|
|
const unsigned status = uvm_pagegetdirty(pg);
|
2019-12-13 23:10:21 +03:00
|
|
|
pg->flags &= ~PG_ANON;
|
2020-06-12 01:21:05 +03:00
|
|
|
cpu_count(CPU_COUNT_ANONUNKNOWN + status, -1);
|
2001-12-31 22:21:36 +03:00
|
|
|
}
|
2005-05-11 17:02:25 +04:00
|
|
|
pg->uanon->an_page = NULL;
|
2001-12-31 22:21:36 +03:00
|
|
|
pg->uanon = NULL;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
2020-03-14 23:23:51 +03:00
|
|
|
if (pg->pqflags & PQ_WANTED) {
|
2001-11-06 11:07:49 +03:00
|
|
|
wakeup(pg);
|
|
|
|
}
|
2020-03-14 23:23:51 +03:00
|
|
|
pg->pqflags &= ~PQ_WANTED;
|
|
|
|
pg->flags &= ~(PG_BUSY|PG_RELEASED|PG_PAGER1);
|
2001-11-06 11:07:49 +03:00
|
|
|
#ifdef UVM_PAGE_TRKOWN
|
|
|
|
pg->owner_tag = NULL;
|
|
|
|
#endif
|
2020-01-15 20:55:43 +03:00
|
|
|
KASSERT((pg->flags & PG_STAT) == 0);
|
2001-12-31 22:21:36 +03:00
|
|
|
if (pg->loan_count) {
|
2006-12-15 16:51:30 +03:00
|
|
|
KASSERT(pg->uobject == NULL);
|
|
|
|
if (pg->uanon == NULL) {
|
|
|
|
uvm_pagedequeue(pg);
|
|
|
|
}
|
2020-01-01 01:42:50 +03:00
|
|
|
uvm_pageunlock(pg);
|
2001-12-31 22:21:36 +03:00
|
|
|
return;
|
|
|
|
}
|
2019-12-13 23:10:21 +03:00
|
|
|
} else if (pg->uobject != NULL || pg->uanon != NULL ||
|
|
|
|
pg->wire_count != 0) {
|
2020-01-01 01:42:50 +03:00
|
|
|
uvm_pagelock(pg);
|
2019-12-13 23:10:21 +03:00
|
|
|
locked = true;
|
|
|
|
} else {
|
|
|
|
locked = false;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
}
|
1998-03-09 03:58:55 +03:00
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
/*
|
|
|
|
* remove page from its object or anon.
|
|
|
|
*/
|
2001-12-31 22:21:36 +03:00
|
|
|
if (pg->uobject != NULL) {
|
2019-12-18 23:38:14 +03:00
|
|
|
uvm_pageremove_object(pg->uobject, pg);
|
2001-12-31 22:21:36 +03:00
|
|
|
} else if (pg->uanon != NULL) {
|
2020-01-15 20:55:43 +03:00
|
|
|
const unsigned int status = uvm_pagegetdirty(pg);
|
2005-05-11 17:02:25 +04:00
|
|
|
pg->uanon->an_page = NULL;
|
2019-12-13 23:10:21 +03:00
|
|
|
pg->uanon = NULL;
|
2020-06-12 01:21:05 +03:00
|
|
|
cpu_count(CPU_COUNT_ANONUNKNOWN + status, -1);
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if the page was wired, unwire it now.
|
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2000-04-24 21:12:00 +04:00
|
|
|
if (pg->wire_count) {
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->wire_count = 0;
|
2019-12-13 23:10:21 +03:00
|
|
|
atomic_dec_uint(&uvmexp.wired);
|
|
|
|
}
|
|
|
|
if (locked) {
|
2020-03-14 23:23:51 +03:00
|
|
|
/*
|
|
|
|
* wake anyone waiting on the page.
|
|
|
|
*/
|
|
|
|
if ((pg->pqflags & PQ_WANTED) != 0) {
|
|
|
|
pg->pqflags &= ~PQ_WANTED;
|
|
|
|
wakeup(pg);
|
|
|
|
}
|
|
|
|
|
2020-01-01 01:42:50 +03:00
|
|
|
/*
|
|
|
|
* now remove the page from the queues.
|
|
|
|
*/
|
|
|
|
uvm_pagedequeue(pg);
|
|
|
|
uvm_pageunlock(pg);
|
|
|
|
} else {
|
|
|
|
KASSERT(!uvmpdpol_pageisqueued_p(pg));
|
1998-03-09 03:58:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-11-27 11:39:39 +03:00
|
|
|
* and put on free queue
|
1998-03-09 03:58:55 +03:00
|
|
|
*/
|
|
|
|
|
1998-02-07 05:34:08 +03:00
|
|
|
#ifdef DEBUG
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->uobject = (void *)0xdeadbeef;
|
|
|
|
pg->uanon = (void *)0xdeadbeef;
|
2003-11-03 06:58:28 +03:00
|
|
|
#endif /* DEBUG */
|
|
|
|
|
2020-01-06 01:01:09 +03:00
|
|
|
/* Try to send the page to the per-CPU cache. */
|
2019-12-27 15:51:56 +03:00
|
|
|
s = splvm();
|
|
|
|
ucpu = curcpu()->ci_data.cpu_uvm;
|
2020-01-06 01:01:09 +03:00
|
|
|
bucket = uvm_page_get_bucket(pg);
|
2019-12-27 15:51:56 +03:00
|
|
|
if (bucket == ucpu->pgflbucket && uvm_pgflcache_free(ucpu, pg)) {
|
|
|
|
splx(s);
|
|
|
|
return;
|
2008-06-04 16:45:28 +04:00
|
|
|
}
|
2000-04-24 21:12:00 +04:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/* Didn't work. Never mind, send it to a global bucket. */
|
|
|
|
pgfl = &uvm.page_free[uvm_page_get_freelist(pg)];
|
|
|
|
pgb = pgfl->pgfl_buckets[bucket];
|
|
|
|
lock = &uvm_freelist_locks[bucket].lock;
|
|
|
|
|
|
|
|
mutex_spin_enter(lock);
|
|
|
|
/* PG_FREE must be set under lock because of uvm_pglistalloc(). */
|
2020-06-15 00:41:42 +03:00
|
|
|
pg->flags = PG_FREE;
|
2019-12-27 15:51:56 +03:00
|
|
|
LIST_INSERT_HEAD(&pgb->pgb_colors[VM_PGCOLOR(pg)], pg, pageq.list);
|
|
|
|
pgb->pgb_nfree++;
|
2020-10-18 21:31:31 +03:00
|
|
|
CPU_COUNT(CPU_COUNT_FREEPAGES, 1);
|
2019-12-27 15:51:56 +03:00
|
|
|
mutex_spin_exit(lock);
|
|
|
|
splx(s);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/*
|
|
|
|
* uvm_page_unbusy: unbusy an array of pages.
|
|
|
|
*
|
|
|
|
* => pages must either all belong to the same object, or all belong to anons.
|
|
|
|
* => if pages are object-owned, object must be locked.
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
* => if pages are anon-owned, anons must be locked.
|
2004-05-05 15:58:27 +04:00
|
|
|
* => caller must make sure that anon-owned pages are not PG_RELEASED.
|
2000-11-27 11:39:39 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_page_unbusy(struct vm_page **pgs, int npgs)
|
2000-11-27 11:39:39 +03:00
|
|
|
{
|
|
|
|
struct vm_page *pg;
|
2020-10-18 21:22:29 +03:00
|
|
|
int i, pageout_done;
|
2020-07-09 08:57:15 +03:00
|
|
|
UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
|
2000-11-27 11:39:39 +03:00
|
|
|
|
2020-10-18 21:22:29 +03:00
|
|
|
pageout_done = 0;
|
2000-11-27 11:39:39 +03:00
|
|
|
for (i = 0; i < npgs; i++) {
|
|
|
|
pg = pgs[i];
|
2003-01-27 05:10:20 +03:00
|
|
|
if (pg == NULL || pg == PGO_DONTCARE) {
|
2000-11-27 11:39:39 +03:00
|
|
|
continue;
|
|
|
|
}
|
2004-05-05 15:58:27 +04:00
|
|
|
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(uvm_page_owner_locked_p(pg, true));
|
2004-05-05 15:58:27 +04:00
|
|
|
KASSERT(pg->flags & PG_BUSY);
|
2020-10-18 21:22:29 +03:00
|
|
|
|
|
|
|
if (pg->flags & PG_PAGEOUT) {
|
|
|
|
pg->flags &= ~PG_PAGEOUT;
|
|
|
|
pg->flags |= PG_RELEASED;
|
|
|
|
pageout_done++;
|
|
|
|
atomic_inc_uint(&uvmexp.pdfreed);
|
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
if (pg->flags & PG_RELEASED) {
|
Update the kernhist(9) kernel history code to address issues identified
in PR kern/52639, as well as some general cleaning-up...
(As proposed on tech-kern@ with additional changes and enhancements.)
Details of changes:
* All history arguments are now stored as uintmax_t values[1], both in
the kernel and in the structures used for exporting the history data
to userland via sysctl(9). This avoids problems on some architectures
where passing a 64-bit (or larger) value to printf(3) can cause it to
process the value as multiple arguments. (This can be particularly
problematic when printf()'s format string is not a literal, since in
that case the compiler cannot know how large each argument should be.)
* Update the data structures used for exporting kernel history data to
include a version number as well as the length of history arguments.
* All [2] existing users of kernhist(9) have had their format strings
updated. Each format specifier now includes an explicit length
modifier 'j' to refer to numeric values of the size of uintmax_t.
* All [2] existing users of kernhist(9) have had their format strings
updated to replace uses of "%p" with "%#jx", and the pointer
arguments are now cast to (uintptr_t) before being subsequently cast
to (uintmax_t). This is needed to avoid compiler warnings about
casting "pointer to integer of a different size."
* All [2] existing users of kernhist(9) have had instances of "%s" or
"%c" format strings replaced with numeric formats; several instances
of mis-match between format string and argument list have been fixed.
* vmstat(1) has been modified to handle the new size of arguments in the
history data as exported by sysctl(9).
* vmstat(1) now provides a warning message if the history requested with
the -u option does not exist (previously, this condition was silently
ignored, with only a single blank line being printed).
* vmstat(1) now checks the version and argument length included in the
data exported via sysctl(9) and exits if they do not match the values
with which vmstat was built.
* The kernhist(9) man-page has been updated to note the additional
requirements imposed on the format strings, along with several other
minor changes and enhancements.
[1] It would have been possible to use an explicit length (for example,
uint64_t) for the history arguments. But that would require another
"rototill" of all the users in the future when we add support for an
architecture that supports a larger size. Also, the printf(3) format
specifiers for explicitly-sized values, such as "%"PRIu64, are much
more verbose (and less aesthetically appealing, IMHO) than simply
using "%ju".
[2] I've tried very hard to find "all [the] existing users of kernhist(9)"
but it is possible that I've missed some of them. I would be glad to
update any stragglers that anyone identifies.
2017-10-28 03:37:11 +03:00
|
|
|
UVMHIST_LOG(ubchist, "releasing pg %#jx",
|
|
|
|
(uintptr_t)pg, 0, 0, 0);
|
2004-05-05 15:58:27 +04:00
|
|
|
KASSERT(pg->uobject != NULL ||
|
|
|
|
(pg->uanon != NULL && pg->uanon->an_ref > 0));
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
pg->flags &= ~PG_RELEASED;
|
|
|
|
uvm_pagefree(pg);
|
2000-11-27 11:39:39 +03:00
|
|
|
} else {
|
2020-03-17 21:31:38 +03:00
|
|
|
UVMHIST_LOG(ubchist, "unbusying pg %#jx",
|
|
|
|
(uintptr_t)pg, 0, 0, 0);
|
2009-01-16 10:01:28 +03:00
|
|
|
KASSERT((pg->flags & PG_FAKE) == 0);
|
2020-03-17 21:31:38 +03:00
|
|
|
pg->flags &= ~PG_BUSY;
|
2020-03-14 23:23:51 +03:00
|
|
|
uvm_pagelock(pg);
|
2020-03-17 21:31:38 +03:00
|
|
|
uvm_pagewakeup(pg);
|
2020-03-14 23:23:51 +03:00
|
|
|
uvm_pageunlock(pg);
|
2020-03-17 21:31:38 +03:00
|
|
|
UVM_PAGE_OWN(pg, NULL);
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
|
|
|
}
|
2020-10-18 21:22:29 +03:00
|
|
|
if (pageout_done != 0) {
|
|
|
|
uvm_pageout_done(pageout_done);
|
|
|
|
}
|
2000-11-27 11:39:39 +03:00
|
|
|
}
|
|
|
|
|
2020-03-14 23:23:51 +03:00
|
|
|
/*
|
|
|
|
* uvm_pagewait: wait for a busy page
|
|
|
|
*
|
|
|
|
* => page must be known PG_BUSY
|
|
|
|
* => object must be read or write locked
|
|
|
|
* => object will be unlocked on return
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_pagewait(struct vm_page *pg, krwlock_t *lock, const char *wmesg)
|
|
|
|
{
|
|
|
|
|
|
|
|
KASSERT(rw_lock_held(lock));
|
|
|
|
KASSERT((pg->flags & PG_BUSY) != 0);
|
|
|
|
KASSERT(uvm_page_owner_locked_p(pg, false));
|
|
|
|
|
|
|
|
mutex_enter(&pg->interlock);
|
2020-03-17 21:31:38 +03:00
|
|
|
pg->pqflags |= PQ_WANTED;
|
2020-05-24 22:46:59 +03:00
|
|
|
rw_exit(lock);
|
2020-03-14 23:23:51 +03:00
|
|
|
UVM_UNLOCK_AND_WAIT(pg, &pg->interlock, false, wmesg, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-03-17 21:31:38 +03:00
|
|
|
* uvm_pagewakeup: wake anyone waiting on a page
|
2020-03-14 23:23:51 +03:00
|
|
|
*
|
|
|
|
* => page interlock must be held
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
2020-03-17 21:31:38 +03:00
|
|
|
uvm_pagewakeup(struct vm_page *pg)
|
2020-03-14 23:23:51 +03:00
|
|
|
{
|
2020-07-09 08:57:15 +03:00
|
|
|
UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
|
2020-03-14 23:23:51 +03:00
|
|
|
|
|
|
|
KASSERT(mutex_owned(&pg->interlock));
|
|
|
|
|
2020-03-17 21:31:38 +03:00
|
|
|
UVMHIST_LOG(ubchist, "waking pg %#jx", (uintptr_t)pg, 0, 0, 0);
|
2020-03-14 23:23:51 +03:00
|
|
|
|
|
|
|
if ((pg->pqflags & PQ_WANTED) != 0) {
|
|
|
|
wakeup(pg);
|
|
|
|
pg->pqflags &= ~PQ_WANTED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-24 22:46:59 +03:00
|
|
|
/*
|
|
|
|
* uvm_pagewanted_p: return true if someone is waiting on the page
|
|
|
|
*
|
|
|
|
* => object must be write locked (lock out all concurrent access)
|
|
|
|
*/
|
|
|
|
|
|
|
|
bool
|
|
|
|
uvm_pagewanted_p(struct vm_page *pg)
|
|
|
|
{
|
|
|
|
|
|
|
|
KASSERT(uvm_page_owner_locked_p(pg, true));
|
|
|
|
|
|
|
|
return (atomic_load_relaxed(&pg->pqflags) & PQ_WANTED) != 0;
|
|
|
|
}
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
#if defined(UVM_PAGE_TRKOWN)
|
|
|
|
/*
|
|
|
|
* uvm_page_own: set or release page ownership
|
|
|
|
*
|
|
|
|
* => this is a debugging function that keeps track of who sets PG_BUSY
|
|
|
|
* and where they do it. it can be used to track down problems
|
|
|
|
* such a process setting "PG_BUSY" and never releasing it.
|
|
|
|
* => page's object [if any] must be locked
|
|
|
|
* => if "tag" is NULL then we are releasing page ownership
|
|
|
|
*/
|
1998-03-09 03:58:55 +03:00
|
|
|
void
|
2005-06-27 06:19:48 +04:00
|
|
|
uvm_page_own(struct vm_page *pg, const char *tag)
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2006-04-13 12:33:18 +04:00
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(uvm_page_owner_locked_p(pg, true));
|
2006-04-13 12:33:18 +04:00
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* gain ownership? */
|
|
|
|
if (tag) {
|
2006-04-13 12:33:18 +04:00
|
|
|
KASSERT((pg->flags & PG_BUSY) != 0);
|
1998-03-09 03:58:55 +03:00
|
|
|
if (pg->owner_tag) {
|
|
|
|
printf("uvm_page_own: page %p already owned "
|
2020-05-19 23:46:39 +03:00
|
|
|
"by proc %d.%d [%s]\n", pg,
|
|
|
|
pg->owner, pg->lowner, pg->owner_tag);
|
1998-03-09 03:58:55 +03:00
|
|
|
panic("uvm_page_own");
|
|
|
|
}
|
2014-04-21 20:33:48 +04:00
|
|
|
pg->owner = curproc->p_pid;
|
|
|
|
pg->lowner = curlwp->l_lid;
|
1998-03-09 03:58:55 +03:00
|
|
|
pg->owner_tag = tag;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* drop ownership */
|
2006-04-13 12:33:18 +04:00
|
|
|
KASSERT((pg->flags & PG_BUSY) == 0);
|
1998-03-09 03:58:55 +03:00
|
|
|
if (pg->owner_tag == NULL) {
|
|
|
|
printf("uvm_page_own: dropping ownership of an non-owned "
|
|
|
|
"page (%p)\n", pg);
|
|
|
|
panic("uvm_page_own");
|
|
|
|
}
|
2002-02-20 10:06:56 +03:00
|
|
|
pg->owner_tag = NULL;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
#endif
|
2000-04-24 21:12:00 +04:00
|
|
|
|
2006-02-11 15:45:07 +03:00
|
|
|
/*
|
|
|
|
* uvm_pagelookup: look up a page
|
|
|
|
*
|
|
|
|
* => caller should lock object to keep someone from pulling the page
|
|
|
|
* out from under it
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct vm_page *
|
|
|
|
uvm_pagelookup(struct uvm_object *obj, voff_t off)
|
|
|
|
{
|
|
|
|
struct vm_page *pg;
|
|
|
|
|
2022-10-27 02:38:05 +03:00
|
|
|
KASSERT(db_active || rw_lock_held(obj->vmobjlock));
|
2007-07-21 23:21:53 +04:00
|
|
|
|
2019-12-14 20:28:58 +03:00
|
|
|
pg = radix_tree_lookup_node(&obj->uo_pages, off >> PAGE_SHIFT);
|
2008-06-04 19:06:04 +04:00
|
|
|
|
2006-02-11 15:45:07 +03:00
|
|
|
KASSERT(pg == NULL || obj->uo_npages != 0);
|
|
|
|
KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
|
|
|
|
(pg->flags & PG_BUSY) != 0);
|
2010-09-25 02:51:50 +04:00
|
|
|
return pg;
|
2006-02-11 15:45:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pagewire: wire the page, thus removing it from the daemon's grasp
|
|
|
|
*
|
2019-12-13 23:10:21 +03:00
|
|
|
* => caller must lock objects
|
2020-01-01 01:42:50 +03:00
|
|
|
* => caller must hold pg->interlock
|
2006-02-11 15:45:07 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_pagewire(struct vm_page *pg)
|
|
|
|
{
|
2019-12-13 23:10:21 +03:00
|
|
|
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(uvm_page_owner_locked_p(pg, true));
|
2020-01-01 01:42:50 +03:00
|
|
|
KASSERT(mutex_owned(&pg->interlock));
|
2006-09-15 19:51:12 +04:00
|
|
|
#if defined(READAHEAD_STATS)
|
2019-12-13 23:10:21 +03:00
|
|
|
if ((pg->flags & PG_READAHEAD) != 0) {
|
2006-09-15 19:51:12 +04:00
|
|
|
uvm_ra_hit.ev_count++;
|
2019-12-13 23:10:21 +03:00
|
|
|
pg->flags &= ~PG_READAHEAD;
|
2006-09-15 19:51:12 +04:00
|
|
|
}
|
|
|
|
#endif /* defined(READAHEAD_STATS) */
|
2006-02-11 15:45:07 +03:00
|
|
|
if (pg->wire_count == 0) {
|
|
|
|
uvm_pagedequeue(pg);
|
2019-12-13 23:10:21 +03:00
|
|
|
atomic_inc_uint(&uvmexp.wired);
|
2006-02-11 15:45:07 +03:00
|
|
|
}
|
|
|
|
pg->wire_count++;
|
2018-05-19 14:02:33 +03:00
|
|
|
KASSERT(pg->wire_count > 0); /* detect wraparound */
|
2006-02-11 15:45:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pageunwire: unwire the page.
|
|
|
|
*
|
|
|
|
* => activate if wire count goes to zero.
|
2019-12-13 23:10:21 +03:00
|
|
|
* => caller must lock objects
|
2020-01-01 01:42:50 +03:00
|
|
|
* => caller must hold pg->interlock
|
2006-02-11 15:45:07 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_pageunwire(struct vm_page *pg)
|
|
|
|
{
|
2019-12-13 23:10:21 +03:00
|
|
|
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(uvm_page_owner_locked_p(pg, true));
|
Avoid a panic from the sequence
mlock(buf, 0);
munlock(buf, 0);
mlock(buf, page);
munlock(buf, page);
where buf is page aligned, and page is actually anything > 0
(but not too big) which will get rounded up to the next multiple
of the page size.
In that sequence, it is possible that the 1st munlock() is optional.
Add a KASSERT() (or two) to detect the first effects of the problem
(without that, or in !DIAGNOSTIC kernels) the problem eventually
causes some kind of problem or other (most often still a panic.)
After this, mlock(anything, 0) (or munlock) validates "anything"
but is otherwise a no-op (regardless of the alignment of anything).
Also, don't treat mlock(buf, verybig) as equivalent to mlock(buf, 0)
which is (more or less) what we had been doing.
XXX pullup -8 (maybe -7 as well, need to check).
2019-03-14 22:10:04 +03:00
|
|
|
KASSERT(pg->wire_count != 0);
|
2019-12-13 23:10:21 +03:00
|
|
|
KASSERT(!uvmpdpol_pageisqueued_p(pg));
|
2020-01-01 01:42:50 +03:00
|
|
|
KASSERT(mutex_owned(&pg->interlock));
|
2006-02-11 15:45:07 +03:00
|
|
|
pg->wire_count--;
|
|
|
|
if (pg->wire_count == 0) {
|
2006-02-12 12:19:27 +03:00
|
|
|
uvm_pageactivate(pg);
|
Avoid a panic from the sequence
mlock(buf, 0);
munlock(buf, 0);
mlock(buf, page);
munlock(buf, page);
where buf is page aligned, and page is actually anything > 0
(but not too big) which will get rounded up to the next multiple
of the page size.
In that sequence, it is possible that the 1st munlock() is optional.
Add a KASSERT() (or two) to detect the first effects of the problem
(without that, or in !DIAGNOSTIC kernels) the problem eventually
causes some kind of problem or other (most often still a panic.)
After this, mlock(anything, 0) (or munlock) validates "anything"
but is otherwise a no-op (regardless of the alignment of anything).
Also, don't treat mlock(buf, verybig) as equivalent to mlock(buf, 0)
which is (more or less) what we had been doing.
XXX pullup -8 (maybe -7 as well, need to check).
2019-03-14 22:10:04 +03:00
|
|
|
KASSERT(uvmexp.wired != 0);
|
2019-12-13 23:10:21 +03:00
|
|
|
atomic_dec_uint(&uvmexp.wired);
|
2006-02-11 15:45:07 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pagedeactivate: deactivate page
|
|
|
|
*
|
2019-12-13 23:10:21 +03:00
|
|
|
* => caller must lock objects
|
2006-02-11 15:45:07 +03:00
|
|
|
* => caller must check to make sure page is not wired
|
|
|
|
* => object that page belongs to must be locked (so we can adjust pg->flags)
|
|
|
|
* => caller must clear the reference on the page before calling
|
2020-01-01 01:42:50 +03:00
|
|
|
* => caller must hold pg->interlock
|
2006-02-11 15:45:07 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_pagedeactivate(struct vm_page *pg)
|
|
|
|
{
|
2006-09-15 19:51:12 +04:00
|
|
|
|
2020-03-15 00:06:35 +03:00
|
|
|
KASSERT(uvm_page_owner_locked_p(pg, false));
|
2020-01-01 01:42:50 +03:00
|
|
|
KASSERT(mutex_owned(&pg->interlock));
|
2019-12-13 23:10:21 +03:00
|
|
|
if (pg->wire_count == 0) {
|
|
|
|
KASSERT(uvmpdpol_pageisqueued_p(pg));
|
|
|
|
uvmpdpol_pagedeactivate(pg);
|
|
|
|
}
|
2006-02-11 15:45:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pageactivate: activate page
|
|
|
|
*
|
2019-12-13 23:10:21 +03:00
|
|
|
* => caller must lock objects
|
2020-01-01 01:42:50 +03:00
|
|
|
* => caller must hold pg->interlock
|
2006-02-11 15:45:07 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_pageactivate(struct vm_page *pg)
|
|
|
|
{
|
2006-09-15 19:51:12 +04:00
|
|
|
|
2020-03-15 00:06:35 +03:00
|
|
|
KASSERT(uvm_page_owner_locked_p(pg, false));
|
2020-01-01 01:42:50 +03:00
|
|
|
KASSERT(mutex_owned(&pg->interlock));
|
2006-09-15 19:51:12 +04:00
|
|
|
#if defined(READAHEAD_STATS)
|
2019-12-13 23:10:21 +03:00
|
|
|
if ((pg->flags & PG_READAHEAD) != 0) {
|
2006-09-15 19:51:12 +04:00
|
|
|
uvm_ra_hit.ev_count++;
|
2019-12-13 23:10:21 +03:00
|
|
|
pg->flags &= ~PG_READAHEAD;
|
2006-02-11 15:45:07 +03:00
|
|
|
}
|
2006-09-15 19:51:12 +04:00
|
|
|
#endif /* defined(READAHEAD_STATS) */
|
2019-12-13 23:10:21 +03:00
|
|
|
if (pg->wire_count == 0) {
|
|
|
|
uvmpdpol_pageactivate(pg);
|
2006-09-15 19:51:12 +04:00
|
|
|
}
|
2006-02-11 15:45:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pagedequeue: remove a page from any paging queue
|
2020-03-03 11:13:44 +03:00
|
|
|
*
|
2019-12-13 23:10:21 +03:00
|
|
|
* => caller must lock objects
|
2020-01-01 01:42:50 +03:00
|
|
|
* => caller must hold pg->interlock
|
2006-02-11 15:45:07 +03:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_pagedequeue(struct vm_page *pg)
|
|
|
|
{
|
2006-09-15 19:51:12 +04:00
|
|
|
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(uvm_page_owner_locked_p(pg, true));
|
2020-01-01 01:42:50 +03:00
|
|
|
KASSERT(mutex_owned(&pg->interlock));
|
2006-09-15 19:51:12 +04:00
|
|
|
if (uvmpdpol_pageisqueued_p(pg)) {
|
2019-12-13 23:10:21 +03:00
|
|
|
uvmpdpol_pagedequeue(pg);
|
2006-02-11 15:45:07 +03:00
|
|
|
}
|
2006-09-15 19:51:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pageenqueue: add a page to a paging queue without activating.
|
|
|
|
* used where a page is not really demanded (yet). eg. read-ahead
|
2019-12-13 23:10:21 +03:00
|
|
|
*
|
|
|
|
* => caller must lock objects
|
2020-01-01 01:42:50 +03:00
|
|
|
* => caller must hold pg->interlock
|
2006-09-15 19:51:12 +04:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_pageenqueue(struct vm_page *pg)
|
|
|
|
{
|
|
|
|
|
2020-03-15 00:06:35 +03:00
|
|
|
KASSERT(uvm_page_owner_locked_p(pg, false));
|
2020-01-01 01:42:50 +03:00
|
|
|
KASSERT(mutex_owned(&pg->interlock));
|
2019-12-13 23:10:21 +03:00
|
|
|
if (pg->wire_count == 0 && !uvmpdpol_pageisqueued_p(pg)) {
|
|
|
|
uvmpdpol_pageenqueue(pg);
|
2006-09-15 19:51:12 +04:00
|
|
|
}
|
2006-02-11 15:45:07 +03:00
|
|
|
}
|
|
|
|
|
2020-01-01 01:42:50 +03:00
|
|
|
/*
|
|
|
|
* uvm_pagelock: acquire page interlock
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_pagelock(struct vm_page *pg)
|
|
|
|
{
|
|
|
|
|
|
|
|
mutex_enter(&pg->interlock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pagelock2: acquire two page interlocks
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_pagelock2(struct vm_page *pg1, struct vm_page *pg2)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (pg1 < pg2) {
|
|
|
|
mutex_enter(&pg1->interlock);
|
|
|
|
mutex_enter(&pg2->interlock);
|
|
|
|
} else {
|
|
|
|
mutex_enter(&pg2->interlock);
|
|
|
|
mutex_enter(&pg1->interlock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pageunlock: release page interlock, and if a page replacement intent
|
|
|
|
* is set on the page, pass it to uvmpdpol to make real.
|
2020-03-03 11:13:44 +03:00
|
|
|
*
|
2020-01-01 01:42:50 +03:00
|
|
|
* => caller must hold pg->interlock
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_pageunlock(struct vm_page *pg)
|
|
|
|
{
|
|
|
|
|
|
|
|
if ((pg->pqflags & PQ_INTENT_SET) == 0 ||
|
|
|
|
(pg->pqflags & PQ_INTENT_QUEUED) != 0) {
|
|
|
|
mutex_exit(&pg->interlock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
pg->pqflags |= PQ_INTENT_QUEUED;
|
|
|
|
mutex_exit(&pg->interlock);
|
|
|
|
uvmpdpol_pagerealize(pg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pageunlock2: release two page interlocks, and for both pages if a
|
|
|
|
* page replacement intent is set on the page, pass it to uvmpdpol to make
|
|
|
|
* real.
|
2020-03-03 11:13:44 +03:00
|
|
|
*
|
2020-01-01 01:42:50 +03:00
|
|
|
* => caller must hold pg->interlock
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_pageunlock2(struct vm_page *pg1, struct vm_page *pg2)
|
|
|
|
{
|
|
|
|
|
|
|
|
if ((pg1->pqflags & PQ_INTENT_SET) == 0 ||
|
|
|
|
(pg1->pqflags & PQ_INTENT_QUEUED) != 0) {
|
|
|
|
mutex_exit(&pg1->interlock);
|
|
|
|
pg1 = NULL;
|
|
|
|
} else {
|
|
|
|
pg1->pqflags |= PQ_INTENT_QUEUED;
|
|
|
|
mutex_exit(&pg1->interlock);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((pg2->pqflags & PQ_INTENT_SET) == 0 ||
|
|
|
|
(pg2->pqflags & PQ_INTENT_QUEUED) != 0) {
|
|
|
|
mutex_exit(&pg2->interlock);
|
|
|
|
pg2 = NULL;
|
|
|
|
} else {
|
|
|
|
pg2->pqflags |= PQ_INTENT_QUEUED;
|
|
|
|
mutex_exit(&pg2->interlock);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pg1 != NULL) {
|
|
|
|
uvmpdpol_pagerealize(pg1);
|
|
|
|
}
|
|
|
|
if (pg2 != NULL) {
|
|
|
|
uvmpdpol_pagerealize(pg2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-02-11 15:45:07 +03:00
|
|
|
/*
|
|
|
|
* uvm_pagezero: zero fill a page
|
|
|
|
*
|
|
|
|
* => if page is part of an object then the object should be locked
|
|
|
|
* to protect pg->flags.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_pagezero(struct vm_page *pg)
|
|
|
|
{
|
2020-01-15 20:55:43 +03:00
|
|
|
|
|
|
|
uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
|
2006-02-11 15:45:07 +03:00
|
|
|
pmap_zero_page(VM_PAGE_TO_PHYS(pg));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_pagecopy: copy a page
|
|
|
|
*
|
|
|
|
* => if page is part of an object then the object should be locked
|
|
|
|
* to protect pg->flags.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
|
|
|
|
{
|
|
|
|
|
2020-01-15 20:55:43 +03:00
|
|
|
uvm_pagemarkdirty(dst, UVM_PAGE_STATUS_DIRTY);
|
2006-02-11 15:45:07 +03:00
|
|
|
pmap_copy_page(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
|
|
|
|
}
|
|
|
|
|
2009-08-18 22:06:53 +04:00
|
|
|
/*
|
|
|
|
* uvm_pageismanaged: test it see that a page (specified by PA) is managed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
bool
|
|
|
|
uvm_pageismanaged(paddr_t pa)
|
|
|
|
{
|
|
|
|
|
2016-12-23 10:15:27 +03:00
|
|
|
return (uvm_physseg_find(atop(pa), NULL) != UVM_PHYSSEG_TYPE_INVALID);
|
2009-08-18 22:06:53 +04:00
|
|
|
}
|
|
|
|
|
2006-02-11 15:45:07 +03:00
|
|
|
/*
|
|
|
|
* uvm_page_lookup_freelist: look up the free list for the specified page
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
uvm_page_lookup_freelist(struct vm_page *pg)
|
|
|
|
{
|
2016-12-23 10:15:27 +03:00
|
|
|
uvm_physseg_t upm;
|
2006-02-11 15:45:07 +03:00
|
|
|
|
2016-12-23 10:15:27 +03:00
|
|
|
upm = uvm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
|
|
|
|
KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID);
|
|
|
|
return uvm_physseg_get_free_list(upm);
|
2006-02-11 15:45:07 +03:00
|
|
|
}
|
2009-08-18 23:08:39 +04:00
|
|
|
|
2011-06-12 07:35:36 +04:00
|
|
|
/*
|
2019-12-31 15:40:27 +03:00
|
|
|
* uvm_page_owner_locked_p: return true if object associated with page is
|
2011-06-12 07:35:36 +04:00
|
|
|
* locked. this is a weak check for runtime assertions only.
|
|
|
|
*/
|
|
|
|
|
|
|
|
bool
|
2020-02-23 18:46:38 +03:00
|
|
|
uvm_page_owner_locked_p(struct vm_page *pg, bool exclusive)
|
2011-06-12 07:35:36 +04:00
|
|
|
{
|
|
|
|
|
|
|
|
if (pg->uobject != NULL) {
|
2020-02-23 18:46:38 +03:00
|
|
|
return exclusive
|
|
|
|
? rw_write_held(pg->uobject->vmobjlock)
|
|
|
|
: rw_lock_held(pg->uobject->vmobjlock);
|
2011-06-12 07:35:36 +04:00
|
|
|
}
|
|
|
|
if (pg->uanon != NULL) {
|
2020-02-23 18:46:38 +03:00
|
|
|
return exclusive
|
|
|
|
? rw_write_held(pg->uanon->an_lock)
|
|
|
|
: rw_lock_held(pg->uanon->an_lock);
|
2011-06-12 07:35:36 +04:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-01-15 20:55:43 +03:00
|
|
|
/*
|
|
|
|
* uvm_pagereadonly_p: return if the page should be mapped read-only
|
|
|
|
*/
|
|
|
|
|
|
|
|
bool
|
|
|
|
uvm_pagereadonly_p(struct vm_page *pg)
|
|
|
|
{
|
|
|
|
struct uvm_object * const uobj = pg->uobject;
|
|
|
|
|
2020-02-23 18:46:38 +03:00
|
|
|
KASSERT(uobj == NULL || rw_lock_held(uobj->vmobjlock));
|
|
|
|
KASSERT(uobj != NULL || rw_lock_held(pg->uanon->an_lock));
|
2020-01-15 20:55:43 +03:00
|
|
|
if ((pg->flags & PG_RDONLY) != 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (uobj == NULL) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return UVM_OBJ_NEEDS_WRITEFAULT(uobj);
|
|
|
|
}
|
|
|
|
|
2018-05-19 18:03:26 +03:00
|
|
|
#ifdef PMAP_DIRECT
|
|
|
|
/*
|
|
|
|
* Call pmap to translate physical address into a virtual and to run a callback
|
|
|
|
* for it. Used to avoid actually mapping the pages, pmap most likely uses direct map
|
|
|
|
* or equivalent.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
uvm_direct_process(struct vm_page **pgs, u_int npages, voff_t off, vsize_t len,
|
|
|
|
int (*process)(void *, size_t, void *), void *arg)
|
|
|
|
{
|
|
|
|
int error = 0;
|
|
|
|
paddr_t pa;
|
|
|
|
size_t todo;
|
|
|
|
voff_t pgoff = (off & PAGE_MASK);
|
|
|
|
struct vm_page *pg;
|
|
|
|
|
2023-04-09 12:00:56 +03:00
|
|
|
KASSERT(npages > 0);
|
|
|
|
KASSERT(len > 0);
|
2018-05-19 18:03:26 +03:00
|
|
|
|
|
|
|
for (int i = 0; i < npages; i++) {
|
|
|
|
pg = pgs[i];
|
|
|
|
|
|
|
|
KASSERT(len > 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Caller is responsible for ensuring all the pages are
|
|
|
|
* available.
|
|
|
|
*/
|
2023-04-09 12:00:56 +03:00
|
|
|
KASSERT(pg != NULL);
|
|
|
|
KASSERT(pg != PGO_DONTCARE);
|
2018-05-19 18:03:26 +03:00
|
|
|
|
|
|
|
pa = VM_PAGE_TO_PHYS(pg);
|
|
|
|
todo = MIN(len, PAGE_SIZE - pgoff);
|
|
|
|
|
|
|
|
error = pmap_direct_process(pa, pgoff, todo, process, arg);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
|
|
|
pgoff = 0;
|
|
|
|
len -= todo;
|
|
|
|
}
|
|
|
|
|
|
|
|
KASSERTMSG(error != 0 || len == 0, "len %lu != 0 for non-error", len);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
#endif /* PMAP_DIRECT */
|
|
|
|
|
2009-08-18 23:08:39 +04:00
|
|
|
#if defined(DDB) || defined(DEBUGPRINT)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_page_printit: actually print the page
|
|
|
|
*/
|
|
|
|
|
|
|
|
static const char page_flagbits[] = UVM_PGFLAGBITS;
|
2020-01-21 23:37:06 +03:00
|
|
|
static const char page_pqflagbits[] = UVM_PQFLAGBITS;
|
2009-08-18 23:08:39 +04:00
|
|
|
|
|
|
|
void
|
|
|
|
uvm_page_printit(struct vm_page *pg, bool full,
|
|
|
|
void (*pr)(const char *, ...))
|
|
|
|
{
|
|
|
|
struct vm_page *tpg;
|
|
|
|
struct uvm_object *uobj;
|
2019-12-27 15:51:56 +03:00
|
|
|
struct pgflbucket *pgb;
|
2009-08-18 23:08:39 +04:00
|
|
|
struct pgflist *pgl;
|
|
|
|
char pgbuf[128];
|
|
|
|
|
|
|
|
(*pr)("PAGE %p:\n", pg);
|
|
|
|
snprintb(pgbuf, sizeof(pgbuf), page_flagbits, pg->flags);
|
2020-01-21 23:37:06 +03:00
|
|
|
(*pr)(" flags=%s\n", pgbuf);
|
|
|
|
snprintb(pgbuf, sizeof(pgbuf), page_pqflagbits, pg->pqflags);
|
|
|
|
(*pr)(" pqflags=%s\n", pgbuf);
|
|
|
|
(*pr)(" uobject=%p, uanon=%p, offset=0x%llx\n",
|
|
|
|
pg->uobject, pg->uanon, (long long)pg->offset);
|
|
|
|
(*pr)(" loan_count=%d wire_count=%d bucket=%d freelist=%d\n",
|
|
|
|
pg->loan_count, pg->wire_count, uvm_page_get_bucket(pg),
|
|
|
|
uvm_page_get_freelist(pg));
|
|
|
|
(*pr)(" pa=0x%lx\n", (long)VM_PAGE_TO_PHYS(pg));
|
2009-08-18 23:08:39 +04:00
|
|
|
#if defined(UVM_PAGE_TRKOWN)
|
|
|
|
if (pg->flags & PG_BUSY)
|
2020-05-19 23:46:39 +03:00
|
|
|
(*pr)(" owning process = %d.%d, tag=%s\n",
|
|
|
|
pg->owner, pg->lowner, pg->owner_tag);
|
2009-08-18 23:08:39 +04:00
|
|
|
else
|
|
|
|
(*pr)(" page not busy, no owner\n");
|
|
|
|
#else
|
|
|
|
(*pr)(" [page ownership tracking disabled]\n");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (!full)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* cross-verify object/anon */
|
2019-12-13 23:10:21 +03:00
|
|
|
if ((pg->flags & PG_FREE) == 0) {
|
|
|
|
if (pg->flags & PG_ANON) {
|
2009-08-18 23:08:39 +04:00
|
|
|
if (pg->uanon == NULL || pg->uanon->an_page != pg)
|
|
|
|
(*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
|
|
|
|
(pg->uanon) ? pg->uanon->an_page : NULL);
|
|
|
|
else
|
|
|
|
(*pr)(" anon backpointer is OK\n");
|
|
|
|
} else {
|
|
|
|
uobj = pg->uobject;
|
|
|
|
if (uobj) {
|
|
|
|
(*pr)(" checking object list\n");
|
2019-12-16 00:11:34 +03:00
|
|
|
tpg = uvm_pagelookup(uobj, pg->offset);
|
2009-08-18 23:08:39 +04:00
|
|
|
if (tpg)
|
|
|
|
(*pr)(" page found on object list\n");
|
|
|
|
else
|
|
|
|
(*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* cross-verify page queue */
|
2019-12-13 23:10:21 +03:00
|
|
|
if (pg->flags & PG_FREE) {
|
2019-12-21 17:41:44 +03:00
|
|
|
int fl = uvm_page_get_freelist(pg);
|
2019-12-27 15:51:56 +03:00
|
|
|
int b = uvm_page_get_bucket(pg);
|
|
|
|
pgb = uvm.page_free[fl].pgfl_buckets[b];
|
|
|
|
pgl = &pgb->pgb_colors[VM_PGCOLOR(pg)];
|
2009-08-18 23:08:39 +04:00
|
|
|
(*pr)(" checking pageq list\n");
|
|
|
|
LIST_FOREACH(tpg, pgl, pageq.list) {
|
|
|
|
if (tpg == pg) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (tpg)
|
|
|
|
(*pr)(" page found on pageq list\n");
|
|
|
|
else
|
|
|
|
(*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-12-13 23:10:21 +03:00
|
|
|
* uvm_page_printall - print a summary of all managed pages
|
2009-08-18 23:08:39 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_page_printall(void (*pr)(const char *, ...))
|
|
|
|
{
|
2016-12-23 10:15:27 +03:00
|
|
|
uvm_physseg_t i;
|
|
|
|
paddr_t pfn;
|
2009-08-18 23:08:39 +04:00
|
|
|
struct vm_page *pg;
|
|
|
|
|
|
|
|
(*pr)("%18s %4s %4s %18s %18s"
|
|
|
|
#ifdef UVM_PAGE_TRKOWN
|
|
|
|
" OWNER"
|
|
|
|
#endif
|
|
|
|
"\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
|
2016-12-23 10:15:27 +03:00
|
|
|
for (i = uvm_physseg_get_first();
|
|
|
|
uvm_physseg_valid_p(i);
|
|
|
|
i = uvm_physseg_get_next(i)) {
|
|
|
|
for (pfn = uvm_physseg_get_start(i);
|
2017-02-05 10:25:49 +03:00
|
|
|
pfn < uvm_physseg_get_end(i);
|
2016-12-23 10:15:27 +03:00
|
|
|
pfn++) {
|
|
|
|
pg = PHYS_TO_VM_PAGE(ptoa(pfn));
|
|
|
|
|
2019-12-13 23:10:21 +03:00
|
|
|
(*pr)("%18p %04x %08x %18p %18p",
|
2009-08-18 23:08:39 +04:00
|
|
|
pg, pg->flags, pg->pqflags, pg->uobject,
|
|
|
|
pg->uanon);
|
|
|
|
#ifdef UVM_PAGE_TRKOWN
|
|
|
|
if (pg->flags & PG_BUSY)
|
|
|
|
(*pr)(" %d [%s]", pg->owner, pg->owner_tag);
|
|
|
|
#endif
|
|
|
|
(*pr)("\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
/*
|
|
|
|
* uvm_page_print_freelists - print a summary freelists
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
uvm_page_print_freelists(void (*pr)(const char *, ...))
|
|
|
|
{
|
|
|
|
struct pgfreelist *pgfl;
|
|
|
|
struct pgflbucket *pgb;
|
|
|
|
int fl, b, c;
|
|
|
|
|
|
|
|
(*pr)("There are %d freelists with %d buckets of %d colors.\n\n",
|
|
|
|
VM_NFREELIST, uvm.bucketcount, uvmexp.ncolors);
|
2020-03-03 11:13:44 +03:00
|
|
|
|
2019-12-27 15:51:56 +03:00
|
|
|
for (fl = 0; fl < VM_NFREELIST; fl++) {
|
|
|
|
pgfl = &uvm.page_free[fl];
|
|
|
|
(*pr)("freelist(%d) @ %p\n", fl, pgfl);
|
|
|
|
for (b = 0; b < uvm.bucketcount; b++) {
|
|
|
|
pgb = uvm.page_free[fl].pgfl_buckets[b];
|
|
|
|
(*pr)(" bucket(%d) @ %p, nfree = %d, lock @ %p:\n",
|
|
|
|
b, pgb, pgb->pgb_nfree,
|
|
|
|
&uvm_freelist_locks[b].lock);
|
|
|
|
for (c = 0; c < uvmexp.ncolors; c++) {
|
|
|
|
(*pr)(" color(%d) @ %p, ", c,
|
|
|
|
&pgb->pgb_colors[c]);
|
|
|
|
(*pr)("first page = %p\n",
|
|
|
|
LIST_FIRST(&pgb->pgb_colors[c]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-18 23:08:39 +04:00
|
|
|
#endif /* DDB || DEBUGPRINT */
|