2003-08-28 17:12:17 +04:00
|
|
|
/* $NetBSD: uvm_swap.c,v 1.82 2003/08/28 13:12:20 pk Exp $ */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 1995, 1996, 1997 Matthew R. Green
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. The name of the author may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
|
|
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
|
|
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
|
|
|
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
|
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
1998-02-07 14:07:38 +03:00
|
|
|
*
|
|
|
|
* from: NetBSD: vm_swap.c,v 1.52 1997/12/02 13:47:37 pk Exp
|
|
|
|
* from: Id: uvm_swap.c,v 1.1.2.42 1998/02/02 20:38:06 chuck Exp
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
2001-11-10 10:36:59 +03:00
|
|
|
#include <sys/cdefs.h>
|
2003-08-28 17:12:17 +04:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.82 2003/08/28 13:12:20 pk Exp $");
|
2001-11-10 10:36:59 +03:00
|
|
|
|
1998-02-19 03:55:04 +03:00
|
|
|
#include "fs_nfs.h"
|
1998-02-10 17:08:44 +03:00
|
|
|
#include "opt_uvmhist.h"
|
1998-08-29 17:27:50 +04:00
|
|
|
#include "opt_compat_netbsd.h"
|
2000-11-27 11:39:39 +03:00
|
|
|
#include "opt_ddb.h"
|
1998-02-10 17:08:44 +03:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/buf.h>
|
2000-04-15 22:08:12 +04:00
|
|
|
#include <sys/conf.h>
|
1998-02-05 09:25:08 +03:00
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/disklabel.h>
|
|
|
|
#include <sys/errno.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/extent.h>
|
|
|
|
#include <sys/mount.h>
|
1998-07-24 00:51:09 +04:00
|
|
|
#include <sys/pool.h>
|
2003-01-18 11:51:40 +03:00
|
|
|
#include <sys/sa.h>
|
1998-02-05 09:25:08 +03:00
|
|
|
#include <sys/syscallargs.h>
|
1998-08-29 21:01:14 +04:00
|
|
|
#include <sys/swap.h>
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
#include <uvm/uvm.h>
|
|
|
|
|
|
|
|
#include <miscfs/specfs/specdev.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_swap.c: manage configuration and i/o to swap space.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* swap space is managed in the following way:
|
2001-05-25 08:06:11 +04:00
|
|
|
*
|
1998-02-05 09:25:08 +03:00
|
|
|
* each swap partition or file is described by a "swapdev" structure.
|
|
|
|
* each "swapdev" structure contains a "swapent" structure which contains
|
|
|
|
* information that is passed up to the user (via system calls).
|
|
|
|
*
|
|
|
|
* each swap partition is assigned a "priority" (int) which controls
|
|
|
|
* swap parition usage.
|
|
|
|
*
|
|
|
|
* the system maintains a global data structure describing all swap
|
|
|
|
* partitions/files. there is a sorted LIST of "swappri" structures
|
|
|
|
* which describe "swapdev"'s at that priority. this LIST is headed
|
2001-05-25 08:06:11 +04:00
|
|
|
* by the "swap_priority" global var. each "swappri" contains a
|
1998-02-05 09:25:08 +03:00
|
|
|
* CIRCLEQ of "swapdev" structures at that priority.
|
|
|
|
*
|
|
|
|
* locking:
|
|
|
|
* - swap_syscall_lock (sleep lock): this lock serializes the swapctl
|
|
|
|
* system call and prevents the swap priority list from changing
|
|
|
|
* while we are in the middle of a system call (e.g. SWAP_STATS).
|
1999-03-26 20:34:15 +03:00
|
|
|
* - uvm.swap_data_lock (simple_lock): this lock protects all swap data
|
1998-02-05 09:25:08 +03:00
|
|
|
* structures including the priority list, the swapdev structures,
|
|
|
|
* and the swapmap extent.
|
|
|
|
*
|
|
|
|
* each swap device has the following info:
|
|
|
|
* - swap device in use (could be disabled, preventing future use)
|
|
|
|
* - swap enabled (allows new allocations on swap)
|
|
|
|
* - map info in /dev/drum
|
|
|
|
* - vnode pointer
|
|
|
|
* for swap files only:
|
|
|
|
* - block size
|
|
|
|
* - max byte count in buffer
|
|
|
|
* - buffer
|
|
|
|
*
|
|
|
|
* userland controls and configures swap with the swapctl(2) system call.
|
|
|
|
* the sys_swapctl performs the following operations:
|
|
|
|
* [1] SWAP_NSWAP: returns the number of swap devices currently configured
|
2001-05-25 08:06:11 +04:00
|
|
|
* [2] SWAP_STATS: given a pointer to an array of swapent structures
|
1998-02-05 09:25:08 +03:00
|
|
|
* (passed in via "arg") of a size passed in via "misc" ... we load
|
2002-04-01 16:24:11 +04:00
|
|
|
* the current swap config into the array. The actual work is done
|
|
|
|
* in the uvm_swap_stats(9) function.
|
1998-02-05 09:25:08 +03:00
|
|
|
* [3] SWAP_ON: given a pathname in arg (could be device or file) and a
|
|
|
|
* priority in "misc", start swapping on it.
|
|
|
|
* [4] SWAP_OFF: as SWAP_ON, but stops swapping to a device
|
|
|
|
* [5] SWAP_CTL: changes the priority of a swap device (new priority in
|
|
|
|
* "misc")
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* swapdev: describes a single swap partition/file
|
|
|
|
*
|
|
|
|
* note the following should be true:
|
|
|
|
* swd_inuse <= swd_nblks [number of blocks in use is <= total blocks]
|
|
|
|
* swd_nblks <= swd_mapsize [because mapsize includes miniroot+disklabel]
|
|
|
|
*/
|
|
|
|
struct swapdev {
|
1998-08-29 17:27:50 +04:00
|
|
|
struct oswapent swd_ose;
|
|
|
|
#define swd_dev swd_ose.ose_dev /* device id */
|
|
|
|
#define swd_flags swd_ose.ose_flags /* flags:inuse/enable/fake */
|
|
|
|
#define swd_priority swd_ose.ose_priority /* our priority */
|
|
|
|
/* also: swd_ose.ose_nblks, swd_ose.ose_inuse */
|
|
|
|
char *swd_path; /* saved pathname of device */
|
|
|
|
int swd_pathlen; /* length of pathname */
|
|
|
|
int swd_npages; /* #pages we can use */
|
|
|
|
int swd_npginuse; /* #pages in use */
|
2000-01-11 09:57:49 +03:00
|
|
|
int swd_npgbad; /* #pages bad */
|
1998-08-29 17:27:50 +04:00
|
|
|
int swd_drumoffset; /* page0 offset in drum */
|
|
|
|
int swd_drumsize; /* #pages in drum */
|
|
|
|
struct extent *swd_ex; /* extent for this swapdev */
|
2000-12-23 15:13:05 +03:00
|
|
|
char swd_exname[12]; /* name of extent above */
|
1998-08-29 17:27:50 +04:00
|
|
|
struct vnode *swd_vp; /* backing vnode */
|
|
|
|
CIRCLEQ_ENTRY(swapdev) swd_next; /* priority circleq */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-08-29 17:27:50 +04:00
|
|
|
int swd_bsize; /* blocksize (bytes) */
|
|
|
|
int swd_maxactive; /* max active i/o reqs */
|
2002-07-19 20:26:01 +04:00
|
|
|
struct bufq_state swd_tab; /* buffer list */
|
2000-01-22 02:43:10 +03:00
|
|
|
int swd_active; /* number of active buffers */
|
1998-02-05 09:25:08 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* swap device priority entry; the list is kept sorted on `spi_priority'.
|
|
|
|
*/
|
|
|
|
struct swappri {
|
|
|
|
int spi_priority; /* priority */
|
|
|
|
CIRCLEQ_HEAD(spi_swapdev, swapdev) spi_swapdev;
|
|
|
|
/* circleq of swapdevs at this priority */
|
|
|
|
LIST_ENTRY(swappri) spi_swappri; /* global list of pri's */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following two structures are used to keep track of data transfers
|
|
|
|
* on swap devices associated with regular files.
|
|
|
|
* NOTE: this code is more or less a copy of vnd.c; we use the same
|
|
|
|
* structure names here to ease porting..
|
|
|
|
*/
|
|
|
|
struct vndxfer {
|
|
|
|
struct buf *vx_bp; /* Pointer to parent buffer */
|
|
|
|
struct swapdev *vx_sdp;
|
|
|
|
int vx_error;
|
|
|
|
int vx_pending; /* # of pending aux buffers */
|
|
|
|
int vx_flags;
|
|
|
|
#define VX_BUSY 1
|
|
|
|
#define VX_DEAD 2
|
|
|
|
};
|
|
|
|
|
|
|
|
struct vndbuf {
|
|
|
|
struct buf vb_buf;
|
|
|
|
struct vndxfer *vb_xfer;
|
|
|
|
};
|
|
|
|
|
1998-07-24 00:51:09 +04:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
1998-07-24 00:51:09 +04:00
|
|
|
* We keep a of pool vndbuf's and vndxfer structures.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
2001-05-10 03:20:59 +04:00
|
|
|
static struct pool vndxfer_pool;
|
|
|
|
static struct pool vndbuf_pool;
|
1998-07-24 00:51:09 +04:00
|
|
|
|
|
|
|
#define getvndxfer(vnx) do { \
|
|
|
|
int s = splbio(); \
|
2002-03-09 10:28:20 +03:00
|
|
|
vnx = pool_get(&vndxfer_pool, PR_WAITOK); \
|
1998-07-24 00:51:09 +04:00
|
|
|
splx(s); \
|
2002-11-02 10:40:47 +03:00
|
|
|
} while (/*CONSTCOND*/ 0)
|
1998-07-24 00:51:09 +04:00
|
|
|
|
|
|
|
#define putvndxfer(vnx) { \
|
2001-05-10 03:20:59 +04:00
|
|
|
pool_put(&vndxfer_pool, (void *)(vnx)); \
|
1998-07-24 00:51:09 +04:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-07-24 00:51:09 +04:00
|
|
|
#define getvndbuf(vbp) do { \
|
|
|
|
int s = splbio(); \
|
2002-03-09 10:28:20 +03:00
|
|
|
vbp = pool_get(&vndbuf_pool, PR_WAITOK); \
|
1998-07-24 00:51:09 +04:00
|
|
|
splx(s); \
|
2002-11-02 10:40:47 +03:00
|
|
|
} while (/*CONSTCOND*/ 0)
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-07-24 00:51:09 +04:00
|
|
|
#define putvndbuf(vbp) { \
|
2001-05-10 03:20:59 +04:00
|
|
|
pool_put(&vndbuf_pool, (void *)(vbp)); \
|
1998-07-24 00:51:09 +04:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* local variables
|
|
|
|
*/
|
|
|
|
static struct extent *swapmap; /* controls the mapping of /dev/drum */
|
|
|
|
|
2003-02-01 09:23:35 +03:00
|
|
|
MALLOC_DEFINE(M_VMSWAP, "VM swap", "VM swap structures");
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/* list of all active swap devices [by priority] */
|
|
|
|
LIST_HEAD(swap_priority, swappri);
|
|
|
|
static struct swap_priority swap_priority;
|
|
|
|
|
|
|
|
/* locks */
|
2001-05-26 20:32:40 +04:00
|
|
|
struct lock swap_syscall_lock;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* prototypes
|
|
|
|
*/
|
|
|
|
static struct swapdev *swapdrum_getsdp __P((int));
|
|
|
|
|
|
|
|
static struct swapdev *swaplist_find __P((struct vnode *, int));
|
2001-05-25 08:06:11 +04:00
|
|
|
static void swaplist_insert __P((struct swapdev *,
|
1998-02-05 09:25:08 +03:00
|
|
|
struct swappri *, int));
|
|
|
|
static void swaplist_trim __P((void));
|
|
|
|
|
2003-06-30 02:28:00 +04:00
|
|
|
static int swap_on __P((struct proc *, struct swapdev *));
|
|
|
|
static int swap_off __P((struct proc *, struct swapdev *));
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
static void sw_reg_strategy __P((struct swapdev *, struct buf *, int));
|
|
|
|
static void sw_reg_iodone __P((struct buf *));
|
|
|
|
static void sw_reg_start __P((struct swapdev *));
|
|
|
|
|
|
|
|
static int uvm_swap_io __P((struct vm_page **, int, int, int));
|
|
|
|
|
2002-09-06 17:18:43 +04:00
|
|
|
dev_type_read(swread);
|
|
|
|
dev_type_write(swwrite);
|
|
|
|
dev_type_strategy(swstrategy);
|
|
|
|
|
|
|
|
const struct bdevsw swap_bdevsw = {
|
|
|
|
noopen, noclose, swstrategy, noioctl, nodump, nosize,
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct cdevsw swap_cdevsw = {
|
|
|
|
nullopen, nullclose, swread, swwrite, noioctl,
|
2002-10-23 13:10:23 +04:00
|
|
|
nostop, notty, nopoll, nommap, nokqfilter
|
2002-09-06 17:18:43 +04:00
|
|
|
};
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* uvm_swap_init: init the swap system data structures and locks
|
|
|
|
*
|
2001-05-25 08:06:11 +04:00
|
|
|
* => called at boot time from init_main.c after the filesystems
|
1998-02-05 09:25:08 +03:00
|
|
|
* are brought up (which happens after uvm_init())
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_swap_init()
|
|
|
|
{
|
|
|
|
UVMHIST_FUNC("uvm_swap_init");
|
|
|
|
|
|
|
|
UVMHIST_CALLED(pdhist);
|
|
|
|
/*
|
|
|
|
* first, init the swap list, its counter, and its lock.
|
|
|
|
* then get a handle on the vnode for /dev/drum by using
|
|
|
|
* the its dev_t number ("swapdev", from MD conf.c).
|
|
|
|
*/
|
|
|
|
|
|
|
|
LIST_INIT(&swap_priority);
|
|
|
|
uvmexp.nswapdev = 0;
|
|
|
|
lockinit(&swap_syscall_lock, PVM, "swapsys", 0, 0);
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_lock_init(&uvm.swap_data_lock);
|
1998-07-24 00:51:09 +04:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
if (bdevvp(swapdev, &swapdev_vp))
|
|
|
|
panic("uvm_swap_init: can't get vnode for swap device");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* create swap block resource map to map /dev/drum. the range
|
|
|
|
* from 1 to INT_MAX allows 2 gigablocks of swap space. note
|
2001-05-25 08:06:11 +04:00
|
|
|
* that block 0 is reserved (used to indicate an allocation
|
1998-02-05 09:25:08 +03:00
|
|
|
* failure, or no allocation).
|
|
|
|
*/
|
|
|
|
swapmap = extent_create("swapmap", 1, INT_MAX,
|
|
|
|
M_VMSWAP, 0, 0, EX_NOWAIT);
|
|
|
|
if (swapmap == 0)
|
|
|
|
panic("uvm_swap_init: extent_create failed");
|
|
|
|
|
|
|
|
/*
|
2000-11-27 11:39:39 +03:00
|
|
|
* allocate pools for structures used for swapping to files.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
|
2001-05-10 03:20:59 +04:00
|
|
|
pool_init(&vndxfer_pool, sizeof(struct vndxfer), 0, 0, 0,
|
2002-03-08 23:48:27 +03:00
|
|
|
"swp vnx", NULL);
|
2001-05-10 03:20:59 +04:00
|
|
|
|
|
|
|
pool_init(&vndbuf_pool, sizeof(struct vndbuf), 0, 0, 0,
|
2002-03-08 23:48:27 +03:00
|
|
|
"swp vnd", NULL);
|
2001-05-10 03:20:59 +04:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* done!
|
|
|
|
*/
|
|
|
|
UVMHIST_LOG(pdhist, "<- done", 0, 0, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* swaplist functions: functions that operate on the list of swap
|
|
|
|
* devices on the system.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* swaplist_insert: insert swap device "sdp" into the global list
|
|
|
|
*
|
1999-03-26 20:34:15 +03:00
|
|
|
* => caller must hold both swap_syscall_lock and uvm.swap_data_lock
|
1998-02-05 09:25:08 +03:00
|
|
|
* => caller must provide a newly malloc'd swappri structure (we will
|
|
|
|
* FREE it if we don't need it... this it to prevent malloc blocking
|
|
|
|
* here while adding swap)
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
swaplist_insert(sdp, newspp, priority)
|
|
|
|
struct swapdev *sdp;
|
|
|
|
struct swappri *newspp;
|
|
|
|
int priority;
|
|
|
|
{
|
|
|
|
struct swappri *spp, *pspp;
|
|
|
|
UVMHIST_FUNC("swaplist_insert"); UVMHIST_CALLED(pdhist);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* find entry at or after which to insert the new device.
|
|
|
|
*/
|
2001-11-01 06:49:30 +03:00
|
|
|
pspp = NULL;
|
|
|
|
LIST_FOREACH(spp, &swap_priority, spi_swappri) {
|
1998-02-05 09:25:08 +03:00
|
|
|
if (priority <= spp->spi_priority)
|
|
|
|
break;
|
|
|
|
pspp = spp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* new priority?
|
|
|
|
*/
|
|
|
|
if (spp == NULL || spp->spi_priority != priority) {
|
|
|
|
spp = newspp; /* use newspp! */
|
2000-01-11 09:57:49 +03:00
|
|
|
UVMHIST_LOG(pdhist, "created new swappri = %d",
|
|
|
|
priority, 0, 0, 0);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
spp->spi_priority = priority;
|
|
|
|
CIRCLEQ_INIT(&spp->spi_swapdev);
|
|
|
|
|
|
|
|
if (pspp)
|
|
|
|
LIST_INSERT_AFTER(pspp, spp, spi_swappri);
|
|
|
|
else
|
|
|
|
LIST_INSERT_HEAD(&swap_priority, spp, spi_swappri);
|
|
|
|
} else {
|
|
|
|
/* we don't need a new priority structure, free it */
|
|
|
|
FREE(newspp, M_VMSWAP);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* priority found (or created). now insert on the priority's
|
|
|
|
* circleq list and bump the total number of swapdevs.
|
|
|
|
*/
|
|
|
|
sdp->swd_priority = priority;
|
|
|
|
CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
|
|
|
|
uvmexp.nswapdev++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* swaplist_find: find and optionally remove a swap device from the
|
|
|
|
* global list.
|
|
|
|
*
|
1999-03-26 20:34:15 +03:00
|
|
|
* => caller must hold both swap_syscall_lock and uvm.swap_data_lock
|
1998-02-05 09:25:08 +03:00
|
|
|
* => we return the swapdev we found (and removed)
|
|
|
|
*/
|
|
|
|
static struct swapdev *
|
|
|
|
swaplist_find(vp, remove)
|
|
|
|
struct vnode *vp;
|
|
|
|
boolean_t remove;
|
|
|
|
{
|
|
|
|
struct swapdev *sdp;
|
|
|
|
struct swappri *spp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* search the lists for the requested vp
|
|
|
|
*/
|
2001-11-01 06:49:30 +03:00
|
|
|
|
|
|
|
LIST_FOREACH(spp, &swap_priority, spi_swappri) {
|
|
|
|
CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
|
1998-02-05 09:25:08 +03:00
|
|
|
if (sdp->swd_vp == vp) {
|
|
|
|
if (remove) {
|
|
|
|
CIRCLEQ_REMOVE(&spp->spi_swapdev,
|
|
|
|
sdp, swd_next);
|
|
|
|
uvmexp.nswapdev--;
|
|
|
|
}
|
|
|
|
return(sdp);
|
|
|
|
}
|
2001-11-01 06:49:30 +03:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* swaplist_trim: scan priority list for empty priority entries and kill
|
|
|
|
* them.
|
|
|
|
*
|
1999-03-26 20:34:15 +03:00
|
|
|
* => caller must hold both swap_syscall_lock and uvm.swap_data_lock
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
swaplist_trim()
|
|
|
|
{
|
|
|
|
struct swappri *spp, *nextspp;
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
for (spp = LIST_FIRST(&swap_priority); spp != NULL; spp = nextspp) {
|
|
|
|
nextspp = LIST_NEXT(spp, spi_swappri);
|
|
|
|
if (CIRCLEQ_FIRST(&spp->spi_swapdev) !=
|
|
|
|
(void *)&spp->spi_swapdev)
|
1998-02-05 09:25:08 +03:00
|
|
|
continue;
|
|
|
|
LIST_REMOVE(spp, spi_swappri);
|
2000-01-11 09:57:49 +03:00
|
|
|
free(spp, M_VMSWAP);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* swapdrum_getsdp: given a page offset in /dev/drum, convert it back
|
|
|
|
* to the "swapdev" that maps that section of the drum.
|
|
|
|
*
|
|
|
|
* => each swapdev takes one big contig chunk of the drum
|
1999-03-26 20:34:15 +03:00
|
|
|
* => caller must hold uvm.swap_data_lock
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
static struct swapdev *
|
|
|
|
swapdrum_getsdp(pgno)
|
|
|
|
int pgno;
|
|
|
|
{
|
|
|
|
struct swapdev *sdp;
|
|
|
|
struct swappri *spp;
|
2001-05-25 08:06:11 +04:00
|
|
|
|
2001-11-01 06:49:30 +03:00
|
|
|
LIST_FOREACH(spp, &swap_priority, spi_swappri) {
|
|
|
|
CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
|
2001-05-09 23:21:02 +04:00
|
|
|
if (sdp->swd_flags & SWF_FAKE)
|
|
|
|
continue;
|
1998-02-05 09:25:08 +03:00
|
|
|
if (pgno >= sdp->swd_drumoffset &&
|
|
|
|
pgno < (sdp->swd_drumoffset + sdp->swd_drumsize)) {
|
|
|
|
return sdp;
|
|
|
|
}
|
2001-05-09 23:21:02 +04:00
|
|
|
}
|
2001-11-01 06:49:30 +03:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sys_swapctl: main entry point for swapctl(2) system call
|
|
|
|
* [with two helper functions: swap_on and swap_off]
|
|
|
|
*/
|
|
|
|
int
|
2003-01-18 11:51:40 +03:00
|
|
|
sys_swapctl(l, v, retval)
|
|
|
|
struct lwp *l;
|
1998-02-05 09:25:08 +03:00
|
|
|
void *v;
|
|
|
|
register_t *retval;
|
|
|
|
{
|
|
|
|
struct sys_swapctl_args /* {
|
|
|
|
syscallarg(int) cmd;
|
|
|
|
syscallarg(void *) arg;
|
|
|
|
syscallarg(int) misc;
|
|
|
|
} */ *uap = (struct sys_swapctl_args *)v;
|
2003-01-18 11:51:40 +03:00
|
|
|
struct proc *p = l->l_proc;
|
1998-02-05 09:25:08 +03:00
|
|
|
struct vnode *vp;
|
|
|
|
struct nameidata nd;
|
|
|
|
struct swappri *spp;
|
|
|
|
struct swapdev *sdp;
|
|
|
|
struct swapent *sep;
|
1998-08-29 17:27:50 +04:00
|
|
|
char userpath[PATH_MAX + 1];
|
1998-08-30 07:08:43 +04:00
|
|
|
size_t len;
|
2002-03-18 14:43:01 +03:00
|
|
|
int error, misc;
|
1998-02-05 09:25:08 +03:00
|
|
|
int priority;
|
|
|
|
UVMHIST_FUNC("sys_swapctl"); UVMHIST_CALLED(pdhist);
|
|
|
|
|
|
|
|
misc = SCARG(uap, misc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ensure serialized syscall access by grabbing the swap_syscall_lock
|
|
|
|
*/
|
2000-01-11 09:57:49 +03:00
|
|
|
lockmgr(&swap_syscall_lock, LK_EXCLUSIVE, NULL);
|
1999-02-23 18:58:28 +03:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* we handle the non-priv NSWAP and STATS request first.
|
|
|
|
*
|
2001-05-25 08:06:11 +04:00
|
|
|
* SWAP_NSWAP: return number of config'd swap devices
|
1998-02-05 09:25:08 +03:00
|
|
|
* [can also be obtained with uvmexp sysctl]
|
|
|
|
*/
|
|
|
|
if (SCARG(uap, cmd) == SWAP_NSWAP) {
|
1998-03-09 03:58:55 +03:00
|
|
|
UVMHIST_LOG(pdhist, "<- done SWAP_NSWAP=%d", uvmexp.nswapdev,
|
|
|
|
0, 0, 0);
|
1998-02-05 09:25:08 +03:00
|
|
|
*retval = uvmexp.nswapdev;
|
1998-08-29 17:27:50 +04:00
|
|
|
error = 0;
|
|
|
|
goto out;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SWAP_STATS: get stats on current # of configured swap devs
|
|
|
|
*
|
2001-05-25 08:06:11 +04:00
|
|
|
* note that the swap_priority list can't change as long
|
1998-02-05 09:25:08 +03:00
|
|
|
* as we are holding the swap_syscall_lock. we don't want
|
2001-05-25 08:06:11 +04:00
|
|
|
* to grab the uvm.swap_data_lock because we may fault&sleep during
|
1998-02-05 09:25:08 +03:00
|
|
|
* copyout() and we don't want to be holding that lock then!
|
|
|
|
*/
|
1998-08-29 17:27:50 +04:00
|
|
|
if (SCARG(uap, cmd) == SWAP_STATS
|
|
|
|
#if defined(COMPAT_13)
|
|
|
|
|| SCARG(uap, cmd) == SWAP_OSTATS
|
|
|
|
#endif
|
|
|
|
) {
|
2002-03-18 14:43:01 +03:00
|
|
|
misc = MIN(uvmexp.nswapdev, misc);
|
1998-08-29 17:27:50 +04:00
|
|
|
#if defined(COMPAT_13)
|
2002-03-18 14:43:01 +03:00
|
|
|
if (SCARG(uap, cmd) == SWAP_OSTATS)
|
|
|
|
len = sizeof(struct oswapent) * misc;
|
2002-03-26 14:50:26 +03:00
|
|
|
else
|
1998-08-29 17:27:50 +04:00
|
|
|
#endif
|
2002-03-26 14:50:26 +03:00
|
|
|
len = sizeof(struct swapent) * misc;
|
|
|
|
sep = (struct swapent *)malloc(len, M_TEMP, M_WAITOK);
|
|
|
|
|
|
|
|
uvm_swap_stats(SCARG(uap, cmd), sep, misc, retval);
|
2002-03-18 14:43:01 +03:00
|
|
|
error = copyout(sep, (void *)SCARG(uap, arg), len);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2002-03-18 14:43:01 +03:00
|
|
|
free(sep, M_TEMP);
|
1998-08-29 17:27:50 +04:00
|
|
|
UVMHIST_LOG(pdhist, "<- done SWAP_STATS", 0, 0, 0, 0);
|
|
|
|
goto out;
|
2001-05-25 08:06:11 +04:00
|
|
|
}
|
2000-11-17 14:39:39 +03:00
|
|
|
if (SCARG(uap, cmd) == SWAP_GETDUMPDEV) {
|
|
|
|
dev_t *devp = (dev_t *)SCARG(uap, arg);
|
|
|
|
|
|
|
|
error = copyout(&dumpdev, devp, sizeof(dumpdev));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2001-11-01 06:49:30 +03:00
|
|
|
/*
|
|
|
|
* all other requests require superuser privs. verify.
|
|
|
|
*/
|
|
|
|
if ((error = suser(p->p_ucred, &p->p_acflag)))
|
|
|
|
goto out;
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* at this point we expect a path name in arg. we will
|
|
|
|
* use namei() to gain a vnode reference (vref), and lock
|
|
|
|
* the vnode (VOP_LOCK).
|
|
|
|
*
|
|
|
|
* XXX: a NULL arg means use the root vnode pointer (e.g. for
|
1998-08-29 17:27:50 +04:00
|
|
|
* miniroot)
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
if (SCARG(uap, arg) == NULL) {
|
|
|
|
vp = rootvp; /* miniroot */
|
2003-06-29 22:43:21 +04:00
|
|
|
if (vget(vp, LK_EXCLUSIVE)) {
|
1998-08-29 17:27:50 +04:00
|
|
|
error = EBUSY;
|
|
|
|
goto out;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
1998-08-29 17:27:50 +04:00
|
|
|
if (SCARG(uap, cmd) == SWAP_ON &&
|
|
|
|
copystr("miniroot", userpath, sizeof userpath, &len))
|
|
|
|
panic("swapctl: miniroot copy failed");
|
1998-02-05 09:25:08 +03:00
|
|
|
} else {
|
1998-08-29 17:27:50 +04:00
|
|
|
int space;
|
|
|
|
char *where;
|
|
|
|
|
|
|
|
if (SCARG(uap, cmd) == SWAP_ON) {
|
|
|
|
if ((error = copyinstr(SCARG(uap, arg), userpath,
|
|
|
|
sizeof userpath, &len)))
|
|
|
|
goto out;
|
|
|
|
space = UIO_SYSSPACE;
|
|
|
|
where = userpath;
|
|
|
|
} else {
|
|
|
|
space = UIO_USERSPACE;
|
|
|
|
where = (char *)SCARG(uap, arg);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
2003-06-30 02:28:00 +04:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW|LOCKLEAF, space, where, p);
|
1998-08-29 17:27:50 +04:00
|
|
|
if ((error = namei(&nd)))
|
|
|
|
goto out;
|
1998-02-05 09:25:08 +03:00
|
|
|
vp = nd.ni_vp;
|
|
|
|
}
|
|
|
|
/* note: "vp" is referenced and locked */
|
|
|
|
|
|
|
|
error = 0; /* assume no error */
|
|
|
|
switch(SCARG(uap, cmd)) {
|
2000-11-17 14:39:39 +03:00
|
|
|
|
1999-02-23 18:58:28 +03:00
|
|
|
case SWAP_DUMPDEV:
|
|
|
|
if (vp->v_type != VBLK) {
|
|
|
|
error = ENOTBLK;
|
2001-02-12 14:50:50 +03:00
|
|
|
break;
|
1999-02-23 18:58:28 +03:00
|
|
|
}
|
|
|
|
dumpdev = vp->v_rdev;
|
2002-08-31 21:07:59 +04:00
|
|
|
cpu_dumpconf();
|
1999-02-23 18:58:28 +03:00
|
|
|
break;
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
case SWAP_CTL:
|
|
|
|
/*
|
|
|
|
* get new priority, remove old entry (if any) and then
|
|
|
|
* reinsert it in the correct place. finally, prune out
|
|
|
|
* any empty priority structures.
|
|
|
|
*/
|
|
|
|
priority = SCARG(uap, misc);
|
2000-01-11 09:57:49 +03:00
|
|
|
spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_lock(&uvm.swap_data_lock);
|
1998-02-05 09:25:08 +03:00
|
|
|
if ((sdp = swaplist_find(vp, 1)) == NULL) {
|
|
|
|
error = ENOENT;
|
|
|
|
} else {
|
|
|
|
swaplist_insert(sdp, spp, priority);
|
|
|
|
swaplist_trim();
|
|
|
|
}
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
1998-02-05 09:25:08 +03:00
|
|
|
if (error)
|
|
|
|
free(spp, M_VMSWAP);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SWAP_ON:
|
2000-01-11 09:57:49 +03:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* check for duplicates. if none found, then insert a
|
|
|
|
* dummy entry on the list to prevent someone else from
|
|
|
|
* trying to enable this device while we are working on
|
|
|
|
* it.
|
|
|
|
*/
|
2000-01-11 09:57:49 +03:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
priority = SCARG(uap, misc);
|
2001-05-09 23:21:02 +04:00
|
|
|
sdp = malloc(sizeof *sdp, M_VMSWAP, M_WAITOK);
|
|
|
|
spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
|
2002-07-27 18:37:00 +04:00
|
|
|
memset(sdp, 0, sizeof(*sdp));
|
|
|
|
sdp->swd_flags = SWF_FAKE;
|
|
|
|
sdp->swd_vp = vp;
|
|
|
|
sdp->swd_dev = (vp->v_type == VBLK) ? vp->v_rdev : NODEV;
|
2002-07-21 19:32:17 +04:00
|
|
|
bufq_alloc(&sdp->swd_tab, BUFQ_DISKSORT|BUFQ_SORT_RAWBLOCK);
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_lock(&uvm.swap_data_lock);
|
2001-05-09 23:21:02 +04:00
|
|
|
if (swaplist_find(vp, 0) != NULL) {
|
1998-02-05 09:25:08 +03:00
|
|
|
error = EBUSY;
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
2002-07-21 19:32:17 +04:00
|
|
|
bufq_free(&sdp->swd_tab);
|
2001-05-09 23:21:02 +04:00
|
|
|
free(sdp, M_VMSWAP);
|
|
|
|
free(spp, M_VMSWAP);
|
1998-08-29 17:27:50 +04:00
|
|
|
break;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
swaplist_insert(sdp, spp, priority);
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
1998-08-29 17:27:50 +04:00
|
|
|
sdp->swd_pathlen = len;
|
|
|
|
sdp->swd_path = malloc(sdp->swd_pathlen, M_VMSWAP, M_WAITOK);
|
1998-09-07 03:09:39 +04:00
|
|
|
if (copystr(userpath, sdp->swd_path, sdp->swd_pathlen, 0) != 0)
|
|
|
|
panic("swapctl: copystr");
|
2000-01-11 09:57:49 +03:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* we've now got a FAKE placeholder in the swap list.
|
|
|
|
* now attempt to enable swap on it. if we fail, undo
|
|
|
|
* what we've done and kill the fake entry we just inserted.
|
|
|
|
* if swap_on is a success, it will clear the SWF_FAKE flag
|
|
|
|
*/
|
2000-01-11 09:57:49 +03:00
|
|
|
|
2003-06-30 02:28:00 +04:00
|
|
|
if ((error = swap_on(p, sdp)) != 0) {
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_lock(&uvm.swap_data_lock);
|
1998-03-09 03:58:55 +03:00
|
|
|
(void) swaplist_find(vp, 1); /* kill fake entry */
|
1998-02-05 09:25:08 +03:00
|
|
|
swaplist_trim();
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
2002-07-21 19:32:17 +04:00
|
|
|
bufq_free(&sdp->swd_tab);
|
1998-09-07 03:09:39 +04:00
|
|
|
free(sdp->swd_path, M_VMSWAP);
|
2000-01-11 09:57:49 +03:00
|
|
|
free(sdp, M_VMSWAP);
|
1998-02-05 09:25:08 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SWAP_OFF:
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_lock(&uvm.swap_data_lock);
|
1998-02-05 09:25:08 +03:00
|
|
|
if ((sdp = swaplist_find(vp, 0)) == NULL) {
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
1998-02-05 09:25:08 +03:00
|
|
|
error = ENXIO;
|
|
|
|
break;
|
|
|
|
}
|
2000-01-11 09:57:49 +03:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* If a device isn't in use or enabled, we
|
|
|
|
* can't stop swapping from it (again).
|
|
|
|
*/
|
|
|
|
if ((sdp->swd_flags & (SWF_INUSE|SWF_ENABLE)) == 0) {
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
1998-02-05 09:25:08 +03:00
|
|
|
error = EBUSY;
|
1998-08-29 17:27:50 +04:00
|
|
|
break;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-01-11 09:57:49 +03:00
|
|
|
* do the real work.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
2003-06-30 02:28:00 +04:00
|
|
|
error = swap_off(p, sdp);
|
1998-02-05 09:25:08 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
error = EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-11-13 17:50:55 +03:00
|
|
|
* done! release the ref gained by namei() and unlock.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
vput(vp);
|
2000-11-13 17:50:55 +03:00
|
|
|
|
1998-08-29 17:27:50 +04:00
|
|
|
out:
|
2000-01-11 09:57:49 +03:00
|
|
|
lockmgr(&swap_syscall_lock, LK_RELEASE, NULL);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
UVMHIST_LOG(pdhist, "<- done! error=%d", error, 0, 0, 0);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2002-03-18 14:43:01 +03:00
|
|
|
/*
|
|
|
|
* swap_stats: implements swapctl(SWAP_STATS). The function is kept
|
|
|
|
* away from sys_swapctl() in order to allow COMPAT_* swapctl()
|
|
|
|
* emulation to use it directly without going through sys_swapctl().
|
|
|
|
* The problem with using sys_swapctl() there is that it involves
|
|
|
|
* copying the swapent array to the stackgap, and this array's size
|
|
|
|
* is not known at build time. Hence it would not be possible to
|
|
|
|
* ensure it would fit in the stackgap in any case.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_swap_stats(cmd, sep, sec, retval)
|
|
|
|
int cmd;
|
|
|
|
struct swapent *sep;
|
|
|
|
int sec;
|
|
|
|
register_t *retval;
|
|
|
|
{
|
|
|
|
struct swappri *spp;
|
|
|
|
struct swapdev *sdp;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
LIST_FOREACH(spp, &swap_priority, spi_swappri) {
|
|
|
|
for (sdp = CIRCLEQ_FIRST(&spp->spi_swapdev);
|
|
|
|
sdp != (void *)&spp->spi_swapdev && sec-- > 0;
|
|
|
|
sdp = CIRCLEQ_NEXT(sdp, swd_next)) {
|
|
|
|
/*
|
|
|
|
* backwards compatibility for system call.
|
|
|
|
* note that we use 'struct oswapent' as an
|
|
|
|
* overlay into both 'struct swapdev' and
|
|
|
|
* the userland 'struct swapent', as we
|
|
|
|
* want to retain backwards compatibility
|
|
|
|
* with NetBSD 1.3.
|
|
|
|
*/
|
|
|
|
sdp->swd_ose.ose_inuse =
|
|
|
|
btodb((u_int64_t)sdp->swd_npginuse <<
|
|
|
|
PAGE_SHIFT);
|
|
|
|
(void)memcpy(sep, &sdp->swd_ose,
|
|
|
|
sizeof(struct oswapent));
|
|
|
|
|
|
|
|
/* now copy out the path if necessary */
|
|
|
|
#if defined(COMPAT_13)
|
|
|
|
if (cmd == SWAP_STATS)
|
|
|
|
#endif
|
|
|
|
(void)memcpy(&sep->se_path, sdp->swd_path,
|
|
|
|
sdp->swd_pathlen);
|
|
|
|
|
|
|
|
count++;
|
|
|
|
#if defined(COMPAT_13)
|
|
|
|
if (cmd == SWAP_OSTATS)
|
|
|
|
sep = (struct swapent *)
|
|
|
|
((struct oswapent *)sep + 1);
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
sep++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*retval = count;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* swap_on: attempt to enable a swapdev for swapping. note that the
|
|
|
|
* swapdev is already on the global list, but disabled (marked
|
|
|
|
* SWF_FAKE).
|
|
|
|
*
|
|
|
|
* => we avoid the start of the disk (to protect disk labels)
|
|
|
|
* => we also avoid the miniroot, if we are swapping to root.
|
1999-03-26 20:34:15 +03:00
|
|
|
* => caller should leave uvm.swap_data_lock unlocked, we may lock it
|
1998-02-05 09:25:08 +03:00
|
|
|
* if needed.
|
|
|
|
*/
|
|
|
|
static int
|
2003-06-30 02:28:00 +04:00
|
|
|
swap_on(p, sdp)
|
|
|
|
struct proc *p;
|
1998-02-05 09:25:08 +03:00
|
|
|
struct swapdev *sdp;
|
|
|
|
{
|
|
|
|
static int count = 0; /* static */
|
|
|
|
struct vnode *vp;
|
|
|
|
int error, npages, nblocks, size;
|
|
|
|
long addr;
|
2001-05-09 23:21:02 +04:00
|
|
|
u_long result;
|
1998-02-05 09:25:08 +03:00
|
|
|
struct vattr va;
|
|
|
|
#ifdef NFS
|
|
|
|
extern int (**nfsv2_vnodeop_p) __P((void *));
|
|
|
|
#endif /* NFS */
|
2002-09-06 17:18:43 +04:00
|
|
|
const struct bdevsw *bdev;
|
1998-02-05 09:25:08 +03:00
|
|
|
dev_t dev;
|
|
|
|
UVMHIST_FUNC("swap_on"); UVMHIST_CALLED(pdhist);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we want to enable swapping on sdp. the swd_vp contains
|
|
|
|
* the vnode we want (locked and ref'd), and the swd_dev
|
|
|
|
* contains the dev_t of the file, if it a block device.
|
|
|
|
*/
|
|
|
|
|
|
|
|
vp = sdp->swd_vp;
|
|
|
|
dev = sdp->swd_dev;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* open the swap file (mostly useful for block device files to
|
|
|
|
* let device driver know what is up).
|
|
|
|
*
|
|
|
|
* we skip the open/close for root on swap because the root
|
|
|
|
* has already been opened when root was mounted (mountroot).
|
|
|
|
*/
|
|
|
|
if (vp != rootvp) {
|
2003-06-30 02:28:00 +04:00
|
|
|
if ((error = VOP_OPEN(vp, FREAD|FWRITE, p->p_ucred, p)))
|
1998-02-05 09:25:08 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX this only works for block devices */
|
|
|
|
UVMHIST_LOG(pdhist, " dev=%d, major(dev)=%d", dev, major(dev), 0,0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* we now need to determine the size of the swap area. for
|
|
|
|
* block specials we can call the d_psize function.
|
|
|
|
* for normal files, we must stat [get attrs].
|
|
|
|
*
|
|
|
|
* we put the result in nblks.
|
|
|
|
* for normal files, we also want the filesystem block size
|
|
|
|
* (which we get with statfs).
|
|
|
|
*/
|
|
|
|
switch (vp->v_type) {
|
|
|
|
case VBLK:
|
2002-09-06 17:18:43 +04:00
|
|
|
bdev = bdevsw_lookup(dev);
|
|
|
|
if (bdev == NULL || bdev->d_psize == NULL ||
|
|
|
|
(nblocks = (*bdev->d_psize)(dev)) == -1) {
|
1998-02-05 09:25:08 +03:00
|
|
|
error = ENXIO;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VREG:
|
2003-06-30 02:28:00 +04:00
|
|
|
if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)))
|
1998-02-05 09:25:08 +03:00
|
|
|
goto bad;
|
|
|
|
nblocks = (int)btodb(va.va_size);
|
|
|
|
if ((error =
|
2003-06-30 02:28:00 +04:00
|
|
|
VFS_STATFS(vp->v_mount, &vp->v_mount->mnt_stat, p)) != 0)
|
1998-02-05 09:25:08 +03:00
|
|
|
goto bad;
|
|
|
|
|
|
|
|
sdp->swd_bsize = vp->v_mount->mnt_stat.f_iosize;
|
|
|
|
/*
|
|
|
|
* limit the max # of outstanding I/O requests we issue
|
|
|
|
* at any one time. take it easy on NFS servers.
|
|
|
|
*/
|
|
|
|
#ifdef NFS
|
|
|
|
if (vp->v_op == nfsv2_vnodeop_p)
|
|
|
|
sdp->swd_maxactive = 2; /* XXX */
|
|
|
|
else
|
|
|
|
#endif /* NFS */
|
|
|
|
sdp->swd_maxactive = 8; /* XXX */
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
error = ENXIO;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* save nblocks in a safe place and convert to pages.
|
|
|
|
*/
|
|
|
|
|
1998-08-29 17:27:50 +04:00
|
|
|
sdp->swd_ose.ose_nblks = nblocks;
|
1998-10-19 03:49:59 +04:00
|
|
|
npages = dbtob((u_int64_t)nblocks) >> PAGE_SHIFT;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* for block special files, we want to make sure that leave
|
|
|
|
* the disklabel and bootblocks alone, so we arrange to skip
|
2000-01-11 09:57:49 +03:00
|
|
|
* over them (arbitrarily choosing to skip PAGE_SIZE bytes).
|
1998-02-05 09:25:08 +03:00
|
|
|
* note that because of this the "size" can be less than the
|
|
|
|
* actual number of blocks on the device.
|
|
|
|
*/
|
|
|
|
if (vp->v_type == VBLK) {
|
|
|
|
/* we use pages 1 to (size - 1) [inclusive] */
|
|
|
|
size = npages - 1;
|
|
|
|
addr = 1;
|
|
|
|
} else {
|
|
|
|
/* we use pages 0 to (size - 1) [inclusive] */
|
|
|
|
size = npages;
|
|
|
|
addr = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* make sure we have enough blocks for a reasonable sized swap
|
|
|
|
* area. we want at least one page.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (size < 1) {
|
|
|
|
UVMHIST_LOG(pdhist, " size <= 1!!", 0, 0, 0, 0);
|
|
|
|
error = EINVAL;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
UVMHIST_LOG(pdhist, " dev=%x: size=%d addr=%ld\n", dev, size, addr, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* now we need to allocate an extent to manage this swap device
|
|
|
|
*/
|
2000-12-23 15:13:05 +03:00
|
|
|
snprintf(sdp->swd_exname, sizeof(sdp->swd_exname), "swap0x%04x",
|
|
|
|
count++);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/* note that extent_create's 3rd arg is inclusive, thus "- 1" */
|
2000-12-23 15:13:05 +03:00
|
|
|
sdp->swd_ex = extent_create(sdp->swd_exname, 0, npages - 1, M_VMSWAP,
|
1998-07-24 00:51:09 +04:00
|
|
|
0, 0, EX_WAITOK);
|
1998-02-05 09:25:08 +03:00
|
|
|
/* allocate the `saved' region from the extent so it won't be used */
|
|
|
|
if (addr) {
|
|
|
|
if (extent_alloc_region(sdp->swd_ex, 0, addr, EX_WAITOK))
|
|
|
|
panic("disklabel region");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-05-25 08:06:11 +04:00
|
|
|
* if the vnode we are swapping to is the root vnode
|
1998-02-05 09:25:08 +03:00
|
|
|
* (i.e. we are swapping to the miniroot) then we want
|
2001-05-25 08:06:11 +04:00
|
|
|
* to make sure we don't overwrite it. do a statfs to
|
1998-02-05 09:25:08 +03:00
|
|
|
* find its size and skip over it.
|
|
|
|
*/
|
|
|
|
if (vp == rootvp) {
|
|
|
|
struct mount *mp;
|
|
|
|
struct statfs *sp;
|
|
|
|
int rootblocks, rootpages;
|
|
|
|
|
|
|
|
mp = rootvnode->v_mount;
|
|
|
|
sp = &mp->mnt_stat;
|
|
|
|
rootblocks = sp->f_blocks * btodb(sp->f_bsize);
|
2002-05-10 01:43:44 +04:00
|
|
|
/*
|
|
|
|
* XXX: sp->f_blocks isn't the total number of
|
|
|
|
* blocks in the filesystem, it's the number of
|
|
|
|
* data blocks. so, our rootblocks almost
|
|
|
|
* definitely underestimates the total size
|
|
|
|
* of the filesystem - how badly depends on the
|
|
|
|
* details of the filesystem type. there isn't
|
|
|
|
* an obvious way to deal with this cleanly
|
|
|
|
* and perfectly, so for now we just pad our
|
|
|
|
* rootblocks estimate with an extra 5 percent.
|
|
|
|
*/
|
|
|
|
rootblocks += (rootblocks >> 5) +
|
|
|
|
(rootblocks >> 6) +
|
|
|
|
(rootblocks >> 7);
|
1998-10-19 03:49:59 +04:00
|
|
|
rootpages = round_page(dbtob(rootblocks)) >> PAGE_SHIFT;
|
2000-01-11 09:57:49 +03:00
|
|
|
if (rootpages > size)
|
1998-02-05 09:25:08 +03:00
|
|
|
panic("swap_on: miniroot larger than swap?");
|
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
if (extent_alloc_region(sdp->swd_ex, addr,
|
1998-02-05 09:25:08 +03:00
|
|
|
rootpages, EX_WAITOK))
|
|
|
|
panic("swap_on: unable to preserve miniroot");
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
size -= rootpages;
|
1998-02-05 09:25:08 +03:00
|
|
|
printf("Preserved %d pages of miniroot ", rootpages);
|
2000-01-11 09:57:49 +03:00
|
|
|
printf("leaving %d pages of swap\n", size);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2000-12-27 12:17:04 +03:00
|
|
|
/*
|
|
|
|
* try to add anons to reflect the new swap space.
|
|
|
|
*/
|
|
|
|
|
|
|
|
error = uvm_anon_add(size);
|
|
|
|
if (error) {
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2000-11-13 17:50:55 +03:00
|
|
|
/*
|
|
|
|
* add a ref to vp to reflect usage as a swap device.
|
|
|
|
*/
|
|
|
|
vref(vp);
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* now add the new swapdev to the drum and enable.
|
|
|
|
*/
|
2001-05-09 23:21:02 +04:00
|
|
|
if (extent_alloc(swapmap, npages, EX_NOALIGN, EX_NOBOUNDARY,
|
|
|
|
EX_WAITOK, &result))
|
|
|
|
panic("swapdrum_add");
|
|
|
|
|
|
|
|
sdp->swd_drumoffset = (int)result;
|
|
|
|
sdp->swd_drumsize = npages;
|
2000-01-11 09:57:49 +03:00
|
|
|
sdp->swd_npages = size;
|
2001-05-09 23:21:02 +04:00
|
|
|
simple_lock(&uvm.swap_data_lock);
|
1998-02-05 09:25:08 +03:00
|
|
|
sdp->swd_flags &= ~SWF_FAKE; /* going live */
|
|
|
|
sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
|
2000-01-11 09:57:49 +03:00
|
|
|
uvmexp.swpages += size;
|
2003-08-11 20:33:30 +04:00
|
|
|
uvmexp.swpgavail += size;
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
1998-02-05 09:25:08 +03:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
/*
|
2000-12-27 12:17:04 +03:00
|
|
|
* failure: clean up and return error.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
2000-12-27 12:17:04 +03:00
|
|
|
|
|
|
|
bad:
|
|
|
|
if (sdp->swd_ex) {
|
|
|
|
extent_destroy(sdp->swd_ex);
|
|
|
|
}
|
|
|
|
if (vp != rootvp) {
|
2003-06-30 02:28:00 +04:00
|
|
|
(void)VOP_CLOSE(vp, FREAD|FWRITE, p->p_ucred, p);
|
2000-12-27 12:17:04 +03:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* swap_off: stop swapping on swapdev
|
|
|
|
*
|
2000-01-11 09:57:49 +03:00
|
|
|
* => swap data should be locked, we will unlock.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
static int
|
2003-06-30 02:28:00 +04:00
|
|
|
swap_off(p, sdp)
|
|
|
|
struct proc *p;
|
1998-02-05 09:25:08 +03:00
|
|
|
struct swapdev *sdp;
|
|
|
|
{
|
2003-08-11 20:33:30 +04:00
|
|
|
int npages = sdp->swd_npages;
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
UVMHIST_FUNC("swap_off"); UVMHIST_CALLED(pdhist);
|
2003-08-11 20:33:30 +04:00
|
|
|
UVMHIST_LOG(pdhist, " dev=%x, npages=%d", sdp->swd_dev,npages,0,0);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
/* disable the swap area being removed */
|
1998-02-05 09:25:08 +03:00
|
|
|
sdp->swd_flags &= ~SWF_ENABLE;
|
2003-08-11 20:33:30 +04:00
|
|
|
uvmexp.swpgavail -= npages;
|
2000-01-11 09:57:49 +03:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* the idea is to find all the pages that are paged out to this
|
|
|
|
* device, and page them all in. in uvm, swap-backed pageable
|
|
|
|
* memory can take two forms: aobjs and anons. call the
|
|
|
|
* swapoff hook for each subsystem to bring in pages.
|
|
|
|
*/
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
if (uao_swap_off(sdp->swd_drumoffset,
|
|
|
|
sdp->swd_drumoffset + sdp->swd_drumsize) ||
|
|
|
|
anon_swap_off(sdp->swd_drumoffset,
|
|
|
|
sdp->swd_drumoffset + sdp->swd_drumsize)) {
|
2001-05-25 08:06:11 +04:00
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
simple_lock(&uvm.swap_data_lock);
|
|
|
|
sdp->swd_flags |= SWF_ENABLE;
|
2003-08-11 20:33:30 +04:00
|
|
|
uvmexp.swpgavail += npages;
|
2000-01-11 09:57:49 +03:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
2001-02-19 00:19:08 +03:00
|
|
|
KASSERT(sdp->swd_npginuse == sdp->swd_npgbad);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
2001-12-16 07:51:34 +03:00
|
|
|
* done with the vnode.
|
2000-11-13 17:50:55 +03:00
|
|
|
* drop our ref on the vnode before calling VOP_CLOSE()
|
|
|
|
* so that spec_close() can tell if this is the last close.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
2000-11-13 17:50:55 +03:00
|
|
|
vrele(sdp->swd_vp);
|
2000-01-11 09:57:49 +03:00
|
|
|
if (sdp->swd_vp != rootvp) {
|
2003-06-30 02:28:00 +04:00
|
|
|
(void) VOP_CLOSE(sdp->swd_vp, FREAD|FWRITE, p->p_ucred, p);
|
2000-01-11 09:57:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* remove anons from the system */
|
2003-08-11 20:33:30 +04:00
|
|
|
uvm_anon_remove(npages);
|
2000-01-11 09:57:49 +03:00
|
|
|
|
|
|
|
simple_lock(&uvm.swap_data_lock);
|
2003-08-11 20:33:30 +04:00
|
|
|
uvmexp.swpages -= npages;
|
2003-08-28 17:12:17 +04:00
|
|
|
uvmexp.swpginuse -= sdp->swd_npgbad;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
if (swaplist_find(sdp->swd_vp, 1) == NULL)
|
2002-09-27 19:35:29 +04:00
|
|
|
panic("swap_off: swapdev not in list");
|
2000-01-11 09:57:49 +03:00
|
|
|
swaplist_trim();
|
2001-05-09 23:21:02 +04:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
/*
|
|
|
|
* free all resources!
|
|
|
|
*/
|
|
|
|
extent_free(swapmap, sdp->swd_drumoffset, sdp->swd_drumsize,
|
|
|
|
EX_WAITOK);
|
1998-02-05 09:25:08 +03:00
|
|
|
extent_destroy(sdp->swd_ex);
|
2002-07-21 19:32:17 +04:00
|
|
|
bufq_free(&sdp->swd_tab);
|
2000-01-11 09:57:49 +03:00
|
|
|
free(sdp, M_VMSWAP);
|
1998-02-05 09:25:08 +03:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* /dev/drum interface and i/o functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* swread: the read function for the drum (just a call to physio)
|
|
|
|
*/
|
|
|
|
/*ARGSUSED*/
|
|
|
|
int
|
|
|
|
swread(dev, uio, ioflag)
|
|
|
|
dev_t dev;
|
|
|
|
struct uio *uio;
|
|
|
|
int ioflag;
|
|
|
|
{
|
|
|
|
UVMHIST_FUNC("swread"); UVMHIST_CALLED(pdhist);
|
|
|
|
|
|
|
|
UVMHIST_LOG(pdhist, " dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
|
|
|
|
return (physio(swstrategy, NULL, dev, B_READ, minphys, uio));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* swwrite: the write function for the drum (just a call to physio)
|
|
|
|
*/
|
|
|
|
/*ARGSUSED*/
|
|
|
|
int
|
|
|
|
swwrite(dev, uio, ioflag)
|
|
|
|
dev_t dev;
|
|
|
|
struct uio *uio;
|
|
|
|
int ioflag;
|
|
|
|
{
|
|
|
|
UVMHIST_FUNC("swwrite"); UVMHIST_CALLED(pdhist);
|
|
|
|
|
|
|
|
UVMHIST_LOG(pdhist, " dev=%x offset=%qx", dev, uio->uio_offset, 0, 0);
|
|
|
|
return (physio(swstrategy, NULL, dev, B_WRITE, minphys, uio));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* swstrategy: perform I/O on the drum
|
|
|
|
*
|
|
|
|
* => we must map the i/o request from the drum to the correct swapdev.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
swstrategy(bp)
|
|
|
|
struct buf *bp;
|
|
|
|
{
|
|
|
|
struct swapdev *sdp;
|
|
|
|
struct vnode *vp;
|
1999-03-18 04:45:29 +03:00
|
|
|
int s, pageno, bn;
|
1998-02-05 09:25:08 +03:00
|
|
|
UVMHIST_FUNC("swstrategy"); UVMHIST_CALLED(pdhist);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* convert block number to swapdev. note that swapdev can't
|
|
|
|
* be yanked out from under us because we are holding resources
|
|
|
|
* in it (i.e. the blocks we are doing I/O on).
|
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
pageno = dbtob((int64_t)bp->b_blkno) >> PAGE_SHIFT;
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_lock(&uvm.swap_data_lock);
|
1998-02-05 09:25:08 +03:00
|
|
|
sdp = swapdrum_getsdp(pageno);
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
1998-02-05 09:25:08 +03:00
|
|
|
if (sdp == NULL) {
|
|
|
|
bp->b_error = EINVAL;
|
|
|
|
bp->b_flags |= B_ERROR;
|
|
|
|
biodone(bp);
|
|
|
|
UVMHIST_LOG(pdhist, " failed to get swap device", 0, 0, 0, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* convert drum page number to block number on this swapdev.
|
|
|
|
*/
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
pageno -= sdp->swd_drumoffset; /* page # on swapdev */
|
2001-01-04 09:07:18 +03:00
|
|
|
bn = btodb((u_int64_t)pageno << PAGE_SHIFT); /* convert to diskblock */
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
UVMHIST_LOG(pdhist, " %s: mapoff=%x bn=%x bcount=%ld",
|
1998-02-05 09:25:08 +03:00
|
|
|
((bp->b_flags & B_READ) == 0) ? "write" : "read",
|
|
|
|
sdp->swd_drumoffset, bn, bp->b_bcount);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* for block devices we finish up here.
|
2000-01-11 09:57:49 +03:00
|
|
|
* for regular files we have to do more work which we delegate
|
1998-02-05 09:25:08 +03:00
|
|
|
* to sw_reg_strategy().
|
|
|
|
*/
|
|
|
|
|
|
|
|
switch (sdp->swd_vp->v_type) {
|
|
|
|
default:
|
|
|
|
panic("swstrategy: vnode type 0x%x", sdp->swd_vp->v_type);
|
2000-01-11 09:57:49 +03:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
case VBLK:
|
|
|
|
|
|
|
|
/*
|
|
|
|
* must convert "bp" from an I/O on /dev/drum to an I/O
|
|
|
|
* on the swapdev (sdp).
|
|
|
|
*/
|
1999-03-18 04:45:29 +03:00
|
|
|
s = splbio();
|
1998-02-05 09:25:08 +03:00
|
|
|
bp->b_blkno = bn; /* swapdev block number */
|
|
|
|
vp = sdp->swd_vp; /* swapdev vnode pointer */
|
|
|
|
bp->b_dev = sdp->swd_dev; /* swapdev dev_t */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we are doing a write, we have to redirect the i/o on
|
|
|
|
* drum's v_numoutput counter to the swapdevs.
|
|
|
|
*/
|
|
|
|
if ((bp->b_flags & B_READ) == 0) {
|
|
|
|
vwakeup(bp); /* kills one 'v_numoutput' on drum */
|
2003-02-06 00:38:38 +03:00
|
|
|
V_INCR_NUMOUTPUT(vp); /* put it on swapdev */
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
/*
|
1998-02-05 09:25:08 +03:00
|
|
|
* finally plug in swapdev vnode and start I/O
|
|
|
|
*/
|
|
|
|
bp->b_vp = vp;
|
1999-03-18 04:45:29 +03:00
|
|
|
splx(s);
|
1998-02-05 09:25:08 +03:00
|
|
|
VOP_STRATEGY(bp);
|
|
|
|
return;
|
2000-01-11 09:57:49 +03:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
case VREG:
|
|
|
|
/*
|
2000-01-11 09:57:49 +03:00
|
|
|
* delegate to sw_reg_strategy function.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
sw_reg_strategy(sdp, bp, bn);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* NOTREACHED */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sw_reg_strategy: handle swap i/o to regular files
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
sw_reg_strategy(sdp, bp, bn)
|
|
|
|
struct swapdev *sdp;
|
|
|
|
struct buf *bp;
|
|
|
|
int bn;
|
|
|
|
{
|
|
|
|
struct vnode *vp;
|
|
|
|
struct vndxfer *vnx;
|
2001-01-04 09:07:18 +03:00
|
|
|
daddr_t nbn;
|
1998-02-05 09:25:08 +03:00
|
|
|
caddr_t addr;
|
2001-01-04 09:07:18 +03:00
|
|
|
off_t byteoff;
|
1998-05-01 05:40:02 +04:00
|
|
|
int s, off, nra, error, sz, resid;
|
1998-02-05 09:25:08 +03:00
|
|
|
UVMHIST_FUNC("sw_reg_strategy"); UVMHIST_CALLED(pdhist);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* allocate a vndxfer head for this transfer and point it to
|
|
|
|
* our buffer.
|
|
|
|
*/
|
1998-07-24 00:51:09 +04:00
|
|
|
getvndxfer(vnx);
|
1998-02-05 09:25:08 +03:00
|
|
|
vnx->vx_flags = VX_BUSY;
|
|
|
|
vnx->vx_error = 0;
|
|
|
|
vnx->vx_pending = 0;
|
|
|
|
vnx->vx_bp = bp;
|
|
|
|
vnx->vx_sdp = sdp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* setup for main loop where we read filesystem blocks into
|
|
|
|
* our buffer.
|
|
|
|
*/
|
|
|
|
error = 0;
|
|
|
|
bp->b_resid = bp->b_bcount; /* nothing transfered yet! */
|
|
|
|
addr = bp->b_data; /* current position in buffer */
|
2001-01-04 09:07:18 +03:00
|
|
|
byteoff = dbtob((u_int64_t)bn);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
for (resid = bp->b_resid; resid; resid -= sz) {
|
|
|
|
struct vndbuf *nbp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* translate byteoffset into block number. return values:
|
|
|
|
* vp = vnode of underlying device
|
|
|
|
* nbn = new block number (on underlying vnode dev)
|
|
|
|
* nra = num blocks we can read-ahead (excludes requested
|
|
|
|
* block)
|
|
|
|
*/
|
|
|
|
nra = 0;
|
|
|
|
error = VOP_BMAP(sdp->swd_vp, byteoff / sdp->swd_bsize,
|
|
|
|
&vp, &nbn, &nra);
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
if (error == 0 && nbn == (daddr_t)-1) {
|
2001-05-25 08:06:11 +04:00
|
|
|
/*
|
1998-12-26 09:25:59 +03:00
|
|
|
* this used to just set error, but that doesn't
|
|
|
|
* do the right thing. Instead, it causes random
|
|
|
|
* memory errors. The panic() should remain until
|
|
|
|
* this condition doesn't destabilize the system.
|
|
|
|
*/
|
|
|
|
#if 1
|
|
|
|
panic("sw_reg_strategy: swap to sparse file");
|
|
|
|
#else
|
1998-02-05 09:25:08 +03:00
|
|
|
error = EIO; /* failure */
|
1998-12-26 09:25:59 +03:00
|
|
|
#endif
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* punt if there was an error or a hole in the file.
|
|
|
|
* we must wait for any i/o ops we have already started
|
|
|
|
* to finish before returning.
|
|
|
|
*
|
|
|
|
* XXX we could deal with holes here but it would be
|
|
|
|
* a hassle (in the write case).
|
|
|
|
*/
|
|
|
|
if (error) {
|
|
|
|
s = splbio();
|
|
|
|
vnx->vx_error = error; /* pass error up */
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* compute the size ("sz") of this transfer (in bytes).
|
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
off = byteoff % sdp->swd_bsize;
|
|
|
|
sz = (1 + nra) * sdp->swd_bsize - off;
|
|
|
|
if (sz > resid)
|
1998-02-05 09:25:08 +03:00
|
|
|
sz = resid;
|
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
UVMHIST_LOG(pdhist, "sw_reg_strategy: "
|
|
|
|
"vp %p/%p offset 0x%x/0x%x",
|
|
|
|
sdp->swd_vp, vp, byteoff, nbn);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* now get a buf structure. note that the vb_buf is
|
|
|
|
* at the front of the nbp structure so that you can
|
|
|
|
* cast pointers between the two structure easily.
|
|
|
|
*/
|
1998-07-24 00:51:09 +04:00
|
|
|
getvndbuf(nbp);
|
2003-02-25 23:35:31 +03:00
|
|
|
BUF_INIT(&nbp->vb_buf);
|
1998-02-05 09:25:08 +03:00
|
|
|
nbp->vb_buf.b_flags = bp->b_flags | B_CALL;
|
|
|
|
nbp->vb_buf.b_bcount = sz;
|
1998-07-24 00:51:09 +04:00
|
|
|
nbp->vb_buf.b_bufsize = sz;
|
1998-02-05 09:25:08 +03:00
|
|
|
nbp->vb_buf.b_error = 0;
|
|
|
|
nbp->vb_buf.b_data = addr;
|
2000-11-27 11:39:39 +03:00
|
|
|
nbp->vb_buf.b_lblkno = 0;
|
1998-02-05 09:25:08 +03:00
|
|
|
nbp->vb_buf.b_blkno = nbn + btodb(off);
|
2000-02-07 23:16:47 +03:00
|
|
|
nbp->vb_buf.b_rawblkno = nbp->vb_buf.b_blkno;
|
1998-02-05 09:25:08 +03:00
|
|
|
nbp->vb_buf.b_iodone = sw_reg_iodone;
|
2001-08-26 04:43:53 +04:00
|
|
|
nbp->vb_buf.b_vp = vp;
|
|
|
|
if (vp->v_type == VBLK) {
|
|
|
|
nbp->vb_buf.b_dev = vp->v_rdev;
|
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
nbp->vb_xfer = vnx; /* patch it back in to vnx */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Just sort by block number
|
|
|
|
*/
|
|
|
|
s = splbio();
|
|
|
|
if (vnx->vx_error != 0) {
|
|
|
|
putvndbuf(nbp);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
vnx->vx_pending++;
|
|
|
|
|
|
|
|
/* sort it in and start I/O if we are not over our limit */
|
2002-07-19 20:26:01 +04:00
|
|
|
BUFQ_PUT(&sdp->swd_tab, &nbp->vb_buf);
|
1998-02-05 09:25:08 +03:00
|
|
|
sw_reg_start(sdp);
|
|
|
|
splx(s);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* advance to the next I/O
|
|
|
|
*/
|
1998-05-01 05:40:02 +04:00
|
|
|
byteoff += sz;
|
1998-02-05 09:25:08 +03:00
|
|
|
addr += sz;
|
|
|
|
}
|
|
|
|
|
|
|
|
s = splbio();
|
|
|
|
|
|
|
|
out: /* Arrive here at splbio */
|
|
|
|
vnx->vx_flags &= ~VX_BUSY;
|
|
|
|
if (vnx->vx_pending == 0) {
|
|
|
|
if (vnx->vx_error != 0) {
|
|
|
|
bp->b_error = vnx->vx_error;
|
|
|
|
bp->b_flags |= B_ERROR;
|
|
|
|
}
|
|
|
|
putvndxfer(vnx);
|
|
|
|
biodone(bp);
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sw_reg_start: start an I/O request on the requested swapdev
|
|
|
|
*
|
2002-07-19 20:26:01 +04:00
|
|
|
* => reqs are sorted by b_rawblkno (above)
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
sw_reg_start(sdp)
|
|
|
|
struct swapdev *sdp;
|
|
|
|
{
|
|
|
|
struct buf *bp;
|
|
|
|
UVMHIST_FUNC("sw_reg_start"); UVMHIST_CALLED(pdhist);
|
|
|
|
|
1998-03-09 03:58:55 +03:00
|
|
|
/* recursion control */
|
1998-02-05 09:25:08 +03:00
|
|
|
if ((sdp->swd_flags & SWF_BUSY) != 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sdp->swd_flags |= SWF_BUSY;
|
|
|
|
|
2000-01-22 02:43:10 +03:00
|
|
|
while (sdp->swd_active < sdp->swd_maxactive) {
|
2002-07-19 20:26:01 +04:00
|
|
|
bp = BUFQ_GET(&sdp->swd_tab);
|
1998-02-05 09:25:08 +03:00
|
|
|
if (bp == NULL)
|
|
|
|
break;
|
2000-01-22 02:43:10 +03:00
|
|
|
sdp->swd_active++;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
UVMHIST_LOG(pdhist,
|
|
|
|
"sw_reg_start: bp %p vp %p blkno %p cnt %lx",
|
|
|
|
bp, bp->b_vp, bp->b_blkno, bp->b_bcount);
|
|
|
|
if ((bp->b_flags & B_READ) == 0)
|
2003-02-06 00:38:38 +03:00
|
|
|
V_INCR_NUMOUTPUT(bp->b_vp);
|
2000-11-27 11:39:39 +03:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
VOP_STRATEGY(bp);
|
|
|
|
}
|
|
|
|
sdp->swd_flags &= ~SWF_BUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sw_reg_iodone: one of our i/o's has completed and needs post-i/o cleanup
|
|
|
|
*
|
|
|
|
* => note that we can recover the vndbuf struct by casting the buf ptr
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
sw_reg_iodone(bp)
|
|
|
|
struct buf *bp;
|
|
|
|
{
|
|
|
|
struct vndbuf *vbp = (struct vndbuf *) bp;
|
|
|
|
struct vndxfer *vnx = vbp->vb_xfer;
|
|
|
|
struct buf *pbp = vnx->vx_bp; /* parent buffer */
|
|
|
|
struct swapdev *sdp = vnx->vx_sdp;
|
2002-10-27 19:53:20 +03:00
|
|
|
int s, resid, error;
|
1998-02-05 09:25:08 +03:00
|
|
|
UVMHIST_FUNC("sw_reg_iodone"); UVMHIST_CALLED(pdhist);
|
|
|
|
|
|
|
|
UVMHIST_LOG(pdhist, " vbp=%p vp=%p blkno=%x addr=%p",
|
|
|
|
vbp, vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno, vbp->vb_buf.b_data);
|
|
|
|
UVMHIST_LOG(pdhist, " cnt=%lx resid=%lx",
|
|
|
|
vbp->vb_buf.b_bcount, vbp->vb_buf.b_resid, 0, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* protect vbp at splbio and update.
|
|
|
|
*/
|
|
|
|
|
|
|
|
s = splbio();
|
|
|
|
resid = vbp->vb_buf.b_bcount - vbp->vb_buf.b_resid;
|
|
|
|
pbp->b_resid -= resid;
|
|
|
|
vnx->vx_pending--;
|
|
|
|
|
2002-10-27 19:53:20 +03:00
|
|
|
if (vbp->vb_buf.b_flags & B_ERROR) {
|
1998-02-05 09:25:08 +03:00
|
|
|
/* pass error upward */
|
2002-10-27 19:53:20 +03:00
|
|
|
error = vbp->vb_buf.b_error ? vbp->vb_buf.b_error : EIO;
|
|
|
|
UVMHIST_LOG(pdhist, " got error=%d !", error, 0, 0, 0);
|
|
|
|
vnx->vx_error = error;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* kill vbp structure
|
|
|
|
*/
|
|
|
|
putvndbuf(vbp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* wrap up this transaction if it has run to completion or, in
|
|
|
|
* case of an error, when all auxiliary buffers have returned.
|
|
|
|
*/
|
|
|
|
if (vnx->vx_error != 0) {
|
|
|
|
/* pass error upward */
|
|
|
|
pbp->b_flags |= B_ERROR;
|
|
|
|
pbp->b_error = vnx->vx_error;
|
|
|
|
if ((vnx->vx_flags & VX_BUSY) == 0 && vnx->vx_pending == 0) {
|
|
|
|
putvndxfer(vnx);
|
|
|
|
biodone(pbp);
|
|
|
|
}
|
1998-07-08 22:41:24 +04:00
|
|
|
} else if (pbp->b_resid == 0) {
|
2001-02-19 00:19:08 +03:00
|
|
|
KASSERT(vnx->vx_pending == 0);
|
1998-02-05 09:25:08 +03:00
|
|
|
if ((vnx->vx_flags & VX_BUSY) == 0) {
|
1998-03-09 03:58:55 +03:00
|
|
|
UVMHIST_LOG(pdhist, " iodone error=%d !",
|
|
|
|
pbp, vnx->vx_error, 0, 0);
|
|
|
|
putvndxfer(vnx);
|
1998-02-05 09:25:08 +03:00
|
|
|
biodone(pbp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* done! start next swapdev I/O if one is pending
|
|
|
|
*/
|
2000-01-22 02:43:10 +03:00
|
|
|
sdp->swd_active--;
|
1998-02-05 09:25:08 +03:00
|
|
|
sw_reg_start(sdp);
|
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_swap_alloc: allocate space on swap
|
|
|
|
*
|
|
|
|
* => allocation is done "round robin" down the priority list, as we
|
|
|
|
* allocate in a priority we "rotate" the circle queue.
|
|
|
|
* => space can be freed with uvm_swap_free
|
|
|
|
* => we return the page slot number in /dev/drum (0 == invalid slot)
|
1999-03-26 20:34:15 +03:00
|
|
|
* => we lock uvm.swap_data_lock
|
1998-02-05 09:25:08 +03:00
|
|
|
* => XXXMRG: "LESSOK" INTERFACE NEEDED TO EXTENT SYSTEM
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
uvm_swap_alloc(nslots, lessok)
|
|
|
|
int *nslots; /* IN/OUT */
|
|
|
|
boolean_t lessok;
|
|
|
|
{
|
|
|
|
struct swapdev *sdp;
|
|
|
|
struct swappri *spp;
|
|
|
|
u_long result;
|
|
|
|
UVMHIST_FUNC("uvm_swap_alloc"); UVMHIST_CALLED(pdhist);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* no swap devices configured yet? definite failure.
|
|
|
|
*/
|
|
|
|
if (uvmexp.nswapdev < 1)
|
|
|
|
return 0;
|
2001-05-25 08:06:11 +04:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* lock data lock, convert slots into blocks, and enter loop
|
|
|
|
*/
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_lock(&uvm.swap_data_lock);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
ReTry: /* XXXMRG */
|
2001-11-01 06:49:30 +03:00
|
|
|
LIST_FOREACH(spp, &swap_priority, spi_swappri) {
|
|
|
|
CIRCLEQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
|
1998-02-05 09:25:08 +03:00
|
|
|
/* if it's not enabled, then we can't swap from it */
|
|
|
|
if ((sdp->swd_flags & SWF_ENABLE) == 0)
|
|
|
|
continue;
|
|
|
|
if (sdp->swd_npginuse + *nslots > sdp->swd_npages)
|
|
|
|
continue;
|
|
|
|
if (extent_alloc(sdp->swd_ex, *nslots, EX_NOALIGN,
|
|
|
|
EX_NOBOUNDARY, EX_MALLOCOK|EX_NOWAIT,
|
|
|
|
&result) != 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* successful allocation! now rotate the circleq.
|
|
|
|
*/
|
|
|
|
CIRCLEQ_REMOVE(&spp->spi_swapdev, sdp, swd_next);
|
|
|
|
CIRCLEQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
|
|
|
|
sdp->swd_npginuse += *nslots;
|
|
|
|
uvmexp.swpginuse += *nslots;
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
1998-02-05 09:25:08 +03:00
|
|
|
/* done! return drum slot number */
|
|
|
|
UVMHIST_LOG(pdhist,
|
|
|
|
"success! returning %d slots starting at %d",
|
|
|
|
*nslots, result + sdp->swd_drumoffset, 0, 0);
|
2001-11-01 06:49:30 +03:00
|
|
|
return (result + sdp->swd_drumoffset);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXXMRG: BEGIN HACK */
|
|
|
|
if (*nslots > 1 && lessok) {
|
|
|
|
*nslots = 1;
|
|
|
|
goto ReTry; /* XXXMRG: ugh! extent should support this for us */
|
|
|
|
}
|
|
|
|
/* XXXMRG: END HACK */
|
|
|
|
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
2001-11-01 06:49:30 +03:00
|
|
|
return 0;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
2003-08-11 20:33:30 +04:00
|
|
|
boolean_t
|
|
|
|
uvm_swapisfull(void)
|
|
|
|
{
|
|
|
|
boolean_t rv;
|
|
|
|
|
|
|
|
simple_lock(&uvm.swap_data_lock);
|
|
|
|
KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
|
|
|
|
rv = (uvmexp.swpgonly >= uvmexp.swpgavail);
|
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
|
|
|
|
|
|
|
return (rv);
|
|
|
|
}
|
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
/*
|
|
|
|
* uvm_swap_markbad: keep track of swap ranges where we've had i/o errors
|
|
|
|
*
|
|
|
|
* => we lock uvm.swap_data_lock
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_swap_markbad(startslot, nslots)
|
|
|
|
int startslot;
|
|
|
|
int nslots;
|
|
|
|
{
|
|
|
|
struct swapdev *sdp;
|
|
|
|
UVMHIST_FUNC("uvm_swap_markbad"); UVMHIST_CALLED(pdhist);
|
|
|
|
|
|
|
|
simple_lock(&uvm.swap_data_lock);
|
|
|
|
sdp = swapdrum_getsdp(startslot);
|
2003-08-28 17:12:17 +04:00
|
|
|
KASSERT(sdp != NULL);
|
2000-01-11 09:57:49 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* we just keep track of how many pages have been marked bad
|
|
|
|
* in this device, to make everything add up in swap_off().
|
|
|
|
* we assume here that the range of slots will all be within
|
|
|
|
* one swap device.
|
|
|
|
*/
|
|
|
|
|
2003-08-28 17:12:17 +04:00
|
|
|
KASSERT(uvmexp.swpgonly >= nslots);
|
|
|
|
uvmexp.swpgonly -= nslots;
|
2000-11-27 11:39:39 +03:00
|
|
|
sdp->swd_npgbad += nslots;
|
|
|
|
UVMHIST_LOG(pdhist, "now %d bad", sdp->swd_npgbad, 0,0,0);
|
2000-01-11 09:57:49 +03:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
|
|
|
}
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* uvm_swap_free: free swap slots
|
|
|
|
*
|
|
|
|
* => this can be all or part of an allocation made by uvm_swap_alloc
|
1999-03-26 20:34:15 +03:00
|
|
|
* => we lock uvm.swap_data_lock
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
uvm_swap_free(startslot, nslots)
|
|
|
|
int startslot;
|
|
|
|
int nslots;
|
|
|
|
{
|
|
|
|
struct swapdev *sdp;
|
|
|
|
UVMHIST_FUNC("uvm_swap_free"); UVMHIST_CALLED(pdhist);
|
|
|
|
|
|
|
|
UVMHIST_LOG(pdhist, "freeing %d slots starting at %d", nslots,
|
|
|
|
startslot, 0, 0);
|
2000-01-11 09:57:49 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ignore attempts to free the "bad" slot.
|
|
|
|
*/
|
2001-02-19 00:19:08 +03:00
|
|
|
|
2000-01-11 09:57:49 +03:00
|
|
|
if (startslot == SWSLOT_BAD) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
2001-05-25 08:06:11 +04:00
|
|
|
* convert drum slot offset back to sdp, free the blocks
|
|
|
|
* in the extent, and return. must hold pri lock to do
|
1998-02-05 09:25:08 +03:00
|
|
|
* lookup and access the extent.
|
|
|
|
*/
|
2001-02-19 00:19:08 +03:00
|
|
|
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_lock(&uvm.swap_data_lock);
|
1998-02-05 09:25:08 +03:00
|
|
|
sdp = swapdrum_getsdp(startslot);
|
2001-02-19 00:19:08 +03:00
|
|
|
KASSERT(uvmexp.nswapdev >= 1);
|
|
|
|
KASSERT(sdp != NULL);
|
|
|
|
KASSERT(sdp->swd_npginuse >= nslots);
|
1998-07-24 00:51:09 +04:00
|
|
|
if (extent_free(sdp->swd_ex, startslot - sdp->swd_drumoffset, nslots,
|
2000-01-11 09:57:49 +03:00
|
|
|
EX_MALLOCOK|EX_NOWAIT) != 0) {
|
|
|
|
printf("warning: resource shortage: %d pages of swap lost\n",
|
1998-07-24 00:51:09 +04:00
|
|
|
nslots);
|
2000-01-11 09:57:49 +03:00
|
|
|
}
|
1998-02-05 09:25:08 +03:00
|
|
|
sdp->swd_npginuse -= nslots;
|
|
|
|
uvmexp.swpginuse -= nslots;
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_swap_put: put any number of pages into a contig place on swap
|
|
|
|
*
|
|
|
|
* => can be sync or async
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
int
|
|
|
|
uvm_swap_put(swslot, ppsp, npages, flags)
|
|
|
|
int swslot;
|
|
|
|
struct vm_page **ppsp;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
int npages;
|
|
|
|
int flags;
|
1998-02-05 09:25:08 +03:00
|
|
|
{
|
2001-11-06 08:44:25 +03:00
|
|
|
int error;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-11-06 08:44:25 +03:00
|
|
|
error = uvm_swap_io(ppsp, swslot, npages, B_WRITE |
|
1998-02-05 09:25:08 +03:00
|
|
|
((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
|
2001-11-06 08:44:25 +03:00
|
|
|
return error;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_swap_get: get a single page from swap
|
|
|
|
*
|
|
|
|
* => usually a sync op (from fault)
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
int
|
|
|
|
uvm_swap_get(page, swslot, flags)
|
|
|
|
struct vm_page *page;
|
|
|
|
int swslot, flags;
|
|
|
|
{
|
2001-11-06 08:44:25 +03:00
|
|
|
int error;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
uvmexp.nswget++;
|
2001-02-19 00:19:08 +03:00
|
|
|
KASSERT(flags & PGO_SYNCIO);
|
2000-01-11 09:57:49 +03:00
|
|
|
if (swslot == SWSLOT_BAD) {
|
2001-03-11 01:46:45 +03:00
|
|
|
return EIO;
|
2000-01-11 09:57:49 +03:00
|
|
|
}
|
2003-08-11 20:33:30 +04:00
|
|
|
|
2001-11-06 08:44:25 +03:00
|
|
|
error = uvm_swap_io(&page, swslot, 1, B_READ |
|
1998-02-05 09:25:08 +03:00
|
|
|
((flags & PGO_SYNCIO) ? 0 : B_ASYNC));
|
2001-11-06 08:44:25 +03:00
|
|
|
if (error == 0) {
|
2001-03-11 01:46:45 +03:00
|
|
|
|
1999-03-26 20:34:15 +03:00
|
|
|
/*
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
* this page is no longer only in swap.
|
1999-03-26 20:34:15 +03:00
|
|
|
*/
|
2001-03-11 01:46:45 +03:00
|
|
|
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_lock(&uvm.swap_data_lock);
|
2001-11-06 08:44:25 +03:00
|
|
|
KASSERT(uvmexp.swpgonly > 0);
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
uvmexp.swpgonly--;
|
1999-03-26 20:34:15 +03:00
|
|
|
simple_unlock(&uvm.swap_data_lock);
|
|
|
|
}
|
2001-11-06 08:44:25 +03:00
|
|
|
return error;
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uvm_swap_io: do an i/o operation to swap
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
uvm_swap_io(pps, startslot, npages, flags)
|
|
|
|
struct vm_page **pps;
|
|
|
|
int startslot, npages, flags;
|
|
|
|
{
|
|
|
|
daddr_t startblk;
|
|
|
|
struct buf *bp;
|
1998-08-13 06:10:37 +04:00
|
|
|
vaddr_t kva;
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
int error, s, mapinflags;
|
2000-11-27 11:39:39 +03:00
|
|
|
boolean_t write, async;
|
1998-02-05 09:25:08 +03:00
|
|
|
UVMHIST_FUNC("uvm_swap_io"); UVMHIST_CALLED(pdhist);
|
|
|
|
|
|
|
|
UVMHIST_LOG(pdhist, "<- called, startslot=%d, npages=%d, flags=%d",
|
|
|
|
startslot, npages, flags, 0);
|
2000-01-11 09:57:49 +03:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
write = (flags & B_READ) == 0;
|
|
|
|
async = (flags & B_ASYNC) != 0;
|
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
/*
|
|
|
|
* convert starting drum slot to block number
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2001-01-04 09:07:18 +03:00
|
|
|
startblk = btodb((u_int64_t)startslot << PAGE_SHIFT);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
* first, map the pages into the kernel.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
2000-11-27 11:39:39 +03:00
|
|
|
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
mapinflags = !write ?
|
|
|
|
UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_READ :
|
|
|
|
UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_WRITE;
|
2000-11-27 11:39:39 +03:00
|
|
|
kva = uvm_pagermapin(pps, npages, mapinflags);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
/*
|
2000-11-27 11:39:39 +03:00
|
|
|
* now allocate a buf for the i/o.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
s = splbio();
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
bp = pool_get(&bufpool, PR_WAITOK);
|
2000-11-27 11:39:39 +03:00
|
|
|
splx(s);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* fill in the bp/sbp. we currently route our i/o through
|
|
|
|
* /dev/drum's vnode [swapdev_vp].
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2003-02-25 23:35:31 +03:00
|
|
|
BUF_INIT(bp);
|
1998-11-08 22:37:12 +03:00
|
|
|
bp->b_flags = B_BUSY | B_NOCACHE | (flags & (B_READ|B_ASYNC));
|
1998-02-05 09:25:08 +03:00
|
|
|
bp->b_proc = &proc0; /* XXX */
|
1998-07-24 00:51:09 +04:00
|
|
|
bp->b_vnbufs.le_next = NOLIST;
|
1998-02-05 09:25:08 +03:00
|
|
|
bp->b_data = (caddr_t)kva;
|
|
|
|
bp->b_blkno = startblk;
|
|
|
|
bp->b_vp = swapdev_vp;
|
2001-08-26 04:43:53 +04:00
|
|
|
bp->b_dev = swapdev_vp->v_rdev;
|
2000-11-27 11:39:39 +03:00
|
|
|
bp->b_bufsize = bp->b_bcount = npages << PAGE_SHIFT;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
2001-05-25 08:06:11 +04:00
|
|
|
/*
|
2000-11-27 11:39:39 +03:00
|
|
|
* bump v_numoutput (counter of number of active outputs).
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
if (write) {
|
1998-02-05 09:25:08 +03:00
|
|
|
s = splbio();
|
2003-02-06 00:38:38 +03:00
|
|
|
V_INCR_NUMOUTPUT(swapdev_vp);
|
1998-02-05 09:25:08 +03:00
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-11-27 11:39:39 +03:00
|
|
|
* for async ops we must set up the iodone handler.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2000-11-27 11:39:39 +03:00
|
|
|
if (async) {
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
bp->b_flags |= B_CALL;
|
2000-11-27 11:39:39 +03:00
|
|
|
bp->b_iodone = uvm_aio_biodone;
|
1998-02-05 09:25:08 +03:00
|
|
|
UVMHIST_LOG(pdhist, "doing async!", 0, 0, 0, 0);
|
|
|
|
}
|
|
|
|
UVMHIST_LOG(pdhist,
|
2000-11-27 11:39:39 +03:00
|
|
|
"about to start io: data = %p blkno = 0x%x, bcount = %ld",
|
1998-02-05 09:25:08 +03:00
|
|
|
bp->b_data, bp->b_blkno, bp->b_bcount, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* now we start the I/O, and if async, return.
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
VOP_STRATEGY(bp);
|
2000-11-27 11:39:39 +03:00
|
|
|
if (async)
|
2001-03-11 01:46:45 +03:00
|
|
|
return 0;
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* must be sync i/o. wait for it to finish
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
2001-03-11 01:46:45 +03:00
|
|
|
error = biowait(bp);
|
1998-02-05 09:25:08 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* kill the pager mapping
|
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
uvm_pagermapout(kva, npages);
|
|
|
|
|
|
|
|
/*
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
* now dispose of the buf and we're done.
|
1998-02-05 09:25:08 +03:00
|
|
|
*/
|
a whole bunch of changes to improve performance and robustness under load:
- remove special treatment of pager_map mappings in pmaps. this is
required now, since I've removed the globals that expose the address range.
pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's
no longer any need to special-case it.
- eliminate struct uvm_vnode by moving its fields into struct vnode.
- rewrite the pageout path. the pager is now responsible for handling the
high-level requests instead of only getting control after a bunch of work
has already been done on its behalf. this will allow us to UBCify LFS,
which needs tighter control over its pages than other filesystems do.
writing a page to disk no longer requires making it read-only, which
allows us to write wired pages without causing all kinds of havoc.
- use a new PG_PAGEOUT flag to indicate that a page should be freed
on behalf of the pagedaemon when it's unlocked. this flag is very similar
to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the
pageout fails due to eg. an indirect-block buffer being locked.
this allows us to remove the "version" field from struct vm_page,
and together with shrinking "loan_count" from 32 bits to 16,
struct vm_page is now 4 bytes smaller.
- no longer use PG_RELEASED for swap-backed pages. if the page is busy
because it's being paged out, we can't release the swap slot to be
reallocated until that write is complete, but unlike with vnodes we
don't keep a count of in-progress writes so there's no good way to
know when the write is done. instead, when we need to free a busy
swap-backed page, just sleep until we can get it busy ourselves.
- implement a fast-path for extending writes which allows us to avoid
zeroing new pages. this substantially reduces cpu usage.
- encapsulate the data used by the genfs code in a struct genfs_node,
which must be the first element of the filesystem-specific vnode data
for filesystems which use genfs_{get,put}pages().
- eliminate many of the UVM pagerops, since they aren't needed anymore
now that the pager "put" operation is a higher-level operation.
- enhance the genfs code to allow NFS to use the genfs_{get,put}pages
instead of a modified copy.
- clean up struct vnode by removing all the fields that used to be used by
the vfs_cluster.c code (which we don't use anymore with UBC).
- remove kmem_object and mb_object since they were useless.
instead of allocating pages to these objects, we now just allocate
pages with no object. such pages are mapped in the kernel until they
are freed, so we can use the mapping to find the page to free it.
this allows us to remove splvm() protection in several places.
The sum of all these changes improves write throughput on my
decstation 5000/200 to within 1% of the rate of NetBSD 1.5
and reduces the elapsed time for "make release" of a NetBSD 1.5
source tree on my 128MB pc to 10% less than a 1.5 kernel took.
2001-09-16 00:36:31 +04:00
|
|
|
|
1998-02-05 09:25:08 +03:00
|
|
|
s = splbio();
|
2000-11-27 11:39:39 +03:00
|
|
|
if (write)
|
|
|
|
vwakeup(bp);
|
|
|
|
pool_put(&bufpool, bp);
|
1998-02-05 09:25:08 +03:00
|
|
|
splx(s);
|
2001-03-11 01:46:45 +03:00
|
|
|
UVMHIST_LOG(pdhist, "<- done (sync) error=%d", error, 0, 0, 0);
|
|
|
|
return (error);
|
1998-02-05 09:25:08 +03:00
|
|
|
}
|