new VM code, from 4.4-Lite
This commit is contained in:
parent
4ee1fbae2f
commit
3495827959
@ -35,8 +35,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)device_pager.c 8.1 (Berkeley) 6/11/93
|
||||
* $Id: device_pager.c,v 1.14 1994/04/15 07:04:40 cgd Exp $
|
||||
* from: @(#)device_pager.c 8.5 (Berkeley) 1/12/94
|
||||
* $Id: device_pager.c,v 1.15 1994/05/23 03:11:20 cgd Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -48,8 +48,6 @@
|
||||
#include <sys/conf.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/vnode.h> /* XXX arguably shouldn't be here */
|
||||
#include <miscfs/specfs/specdev.h> /* XXX arguably shouldn't be here */
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_kern.h>
|
||||
@ -71,11 +69,11 @@ static vm_pager_t dev_pager_alloc
|
||||
__P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
|
||||
static void dev_pager_dealloc __P((vm_pager_t));
|
||||
static int dev_pager_getpage
|
||||
__P((vm_pager_t, vm_page_t, boolean_t));
|
||||
__P((vm_pager_t, vm_page_t *, int, boolean_t));
|
||||
static boolean_t dev_pager_haspage __P((vm_pager_t, vm_offset_t));
|
||||
static void dev_pager_init __P((void));
|
||||
static int dev_pager_putpage
|
||||
__P((vm_pager_t, vm_page_t, boolean_t));
|
||||
__P((vm_pager_t, vm_page_t *, int, boolean_t));
|
||||
static vm_page_t dev_pager_getfake __P((vm_offset_t));
|
||||
static void dev_pager_putfake __P((vm_page_t));
|
||||
|
||||
@ -85,7 +83,8 @@ struct pagerops devicepagerops = {
|
||||
dev_pager_dealloc,
|
||||
dev_pager_getpage,
|
||||
dev_pager_putpage,
|
||||
dev_pager_haspage
|
||||
dev_pager_haspage,
|
||||
vm_pager_clusternull
|
||||
};
|
||||
|
||||
static void
|
||||
@ -129,7 +128,7 @@ dev_pager_alloc(handle, size, prot, foff)
|
||||
/*
|
||||
* Make sure this device can be mapped.
|
||||
*/
|
||||
dev = (dev_t)(long)handle;
|
||||
dev = (dev_t)handle;
|
||||
mapfunc = cdevsw[major(dev)].d_mmap;
|
||||
if (mapfunc == NULL || mapfunc == enodev || mapfunc == nullop)
|
||||
return(NULL);
|
||||
@ -137,7 +136,7 @@ dev_pager_alloc(handle, size, prot, foff)
|
||||
/*
|
||||
* Offset should be page aligned.
|
||||
*/
|
||||
if (foff & page_mask)
|
||||
if (foff & PAGE_MASK)
|
||||
return(NULL);
|
||||
|
||||
/*
|
||||
@ -171,7 +170,8 @@ top:
|
||||
pager->pg_handle = handle;
|
||||
pager->pg_ops = &devicepagerops;
|
||||
pager->pg_type = PG_DEVICE;
|
||||
pager->pg_data = (caddr_t)devp;
|
||||
pager->pg_flags = 0;
|
||||
pager->pg_data = devp;
|
||||
TAILQ_INIT(&devp->devp_pglist);
|
||||
/*
|
||||
* Allocate object and associate it with the pager.
|
||||
@ -249,9 +249,10 @@ dev_pager_dealloc(pager)
|
||||
}
|
||||
|
||||
static int
|
||||
dev_pager_getpage(pager, m, sync)
|
||||
dev_pager_getpage(pager, mlist, npages, sync)
|
||||
vm_pager_t pager;
|
||||
vm_page_t m;
|
||||
vm_page_t *mlist;
|
||||
int npages;
|
||||
boolean_t sync;
|
||||
{
|
||||
register vm_object_t object;
|
||||
@ -259,14 +260,20 @@ dev_pager_getpage(pager, m, sync)
|
||||
vm_page_t page;
|
||||
dev_t dev;
|
||||
int (*mapfunc)(), prot;
|
||||
vm_page_t m;
|
||||
|
||||
#ifdef DEBUG
|
||||
if (dpagerdebug & DDB_FOLLOW)
|
||||
printf("dev_pager_getpage(%x, %x)\n", pager, m);
|
||||
printf("dev_pager_getpage(%x, %x, %x, %x)\n",
|
||||
pager, mlist, npages, sync);
|
||||
#endif
|
||||
|
||||
if (npages != 1)
|
||||
panic("dev_pager_getpage: cannot handle multiple pages");
|
||||
m = *mlist;
|
||||
|
||||
object = m->object;
|
||||
dev = (dev_t)(long)pager->pg_handle;
|
||||
dev = (dev_t)pager->pg_handle;
|
||||
offset = m->offset + object->paging_offset;
|
||||
prot = PROT_READ; /* XXX should pass in? */
|
||||
mapfunc = cdevsw[major(dev)].d_mmap;
|
||||
@ -289,8 +296,8 @@ dev_pager_getpage(pager, m, sync)
|
||||
vm_object_lock(object);
|
||||
vm_page_lock_queues();
|
||||
vm_page_free(m);
|
||||
vm_page_unlock_queues();
|
||||
vm_page_insert(page, object, offset);
|
||||
vm_page_unlock_queues();
|
||||
PAGE_WAKEUP(m);
|
||||
if (offset + PAGE_SIZE > object->size)
|
||||
object->size = offset + PAGE_SIZE; /* XXX anal */
|
||||
@ -300,17 +307,19 @@ dev_pager_getpage(pager, m, sync)
|
||||
}
|
||||
|
||||
static int
|
||||
dev_pager_putpage(pager, m, sync)
|
||||
dev_pager_putpage(pager, mlist, npages, sync)
|
||||
vm_pager_t pager;
|
||||
vm_page_t m;
|
||||
vm_page_t *mlist;
|
||||
int npages;
|
||||
boolean_t sync;
|
||||
{
|
||||
#ifdef DEBUG
|
||||
if (dpagerdebug & DDB_FOLLOW)
|
||||
printf("dev_pager_putpage(%x, %x)\n", pager, m);
|
||||
printf("dev_pager_putpage(%x, %x, %x, %x)\n",
|
||||
pager, mlist, npages, sync);
|
||||
#endif
|
||||
if (pager == NULL)
|
||||
return(VM_PAGER_OK);
|
||||
return (FALSE);
|
||||
panic("dev_pager_putpage called");
|
||||
}
|
||||
|
||||
|
@ -35,8 +35,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)device_pager.h 8.1 (Berkeley) 6/11/93
|
||||
* $Id: device_pager.h,v 1.6 1994/04/15 07:04:42 cgd Exp $
|
||||
* from: @(#)device_pager.h 8.3 (Berkeley) 12/13/93
|
||||
* $Id: device_pager.h,v 1.7 1994/05/23 03:11:25 cgd Exp $
|
||||
*/
|
||||
|
||||
#ifndef _DEVICE_PAGER_
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)kern_lock.c 7.4 (Berkeley) 4/21/91
|
||||
* $Id: kern_lock.c,v 1.5 1994/01/07 22:22:20 mycroft Exp $
|
||||
* from: @(#)kern_lock.c 8.1 (Berkeley) 6/11/93
|
||||
* $Id: kern_lock.c,v 1.6 1994/05/23 03:11:27 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -68,12 +68,12 @@
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/systm.h>
|
||||
|
||||
#include <vm/vm_param.h>
|
||||
#include <vm/lock.h>
|
||||
#include <vm/vm.h>
|
||||
|
||||
/* XXX */
|
||||
#include <sys/proc.h>
|
||||
typedef int *thread_t;
|
||||
#define current_thread() ((thread_t)&curproc->p_thread)
|
||||
/* XXX */
|
||||
@ -135,12 +135,12 @@ boolean_t simple_lock_try(l)
|
||||
{
|
||||
return (!test_and_set((boolean_t *)l));
|
||||
}
|
||||
#endif notdef
|
||||
#endif NCPUS > 1
|
||||
#endif /* notdef */
|
||||
#endif /* NCPUS > 1 */
|
||||
|
||||
#if NCPUS > 1
|
||||
int lock_wait_time = 100;
|
||||
#else NCPUS > 1
|
||||
#else /* NCPUS > 1 */
|
||||
|
||||
/*
|
||||
* It is silly to spin on a uni-processor as if we
|
||||
@ -148,7 +148,7 @@ int lock_wait_time = 100;
|
||||
* want_write bit while we are executing.
|
||||
*/
|
||||
int lock_wait_time = 0;
|
||||
#endif NCPUS > 1
|
||||
#endif /* NCPUS > 1 */
|
||||
|
||||
|
||||
/*
|
||||
@ -218,7 +218,7 @@ void lock_write(l)
|
||||
|
||||
if (l->can_sleep && l->want_write) {
|
||||
l->waiting = TRUE;
|
||||
thread_sleep((int) l, &l->interlock);
|
||||
thread_sleep((int) l, &l->interlock, FALSE);
|
||||
simple_lock(&l->interlock);
|
||||
}
|
||||
}
|
||||
@ -237,7 +237,7 @@ void lock_write(l)
|
||||
|
||||
if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) {
|
||||
l->waiting = TRUE;
|
||||
thread_sleep((int) l, &l->interlock);
|
||||
thread_sleep((int) l, &l->interlock, FALSE);
|
||||
simple_lock(&l->interlock);
|
||||
}
|
||||
}
|
||||
@ -293,7 +293,7 @@ void lock_read(l)
|
||||
|
||||
if (l->can_sleep && (l->want_write || l->want_upgrade)) {
|
||||
l->waiting = TRUE;
|
||||
thread_sleep((int) l, &l->interlock);
|
||||
thread_sleep((int) l, &l->interlock, FALSE);
|
||||
simple_lock(&l->interlock);
|
||||
}
|
||||
}
|
||||
@ -357,7 +357,7 @@ boolean_t lock_read_to_write(l)
|
||||
|
||||
if (l->can_sleep && l->read_count != 0) {
|
||||
l->waiting = TRUE;
|
||||
thread_sleep((int) l, &l->interlock);
|
||||
thread_sleep((int) l, &l->interlock, FALSE);
|
||||
simple_lock(&l->interlock);
|
||||
}
|
||||
}
|
||||
@ -496,7 +496,7 @@ boolean_t lock_try_read_to_write(l)
|
||||
|
||||
while (l->read_count != 0) {
|
||||
l->waiting = TRUE;
|
||||
thread_sleep((int) l, &l->interlock);
|
||||
thread_sleep((int) l, &l->interlock, FALSE);
|
||||
simple_lock(&l->interlock);
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)lock.h 7.3 (Berkeley) 4/21/91
|
||||
* $Id: lock.h,v 1.4 1993/07/29 21:45:38 jtc Exp $
|
||||
* from: @(#)lock.h 8.1 (Berkeley) 6/11/93
|
||||
* $Id: lock.h,v 1.5 1994/05/23 03:11:28 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -63,13 +63,13 @@
|
||||
* rights to redistribute these changes.
|
||||
*/
|
||||
|
||||
#ifndef _VM_LOCK_H_
|
||||
#define _VM_LOCK_H_
|
||||
|
||||
/*
|
||||
* Locking primitives definitions
|
||||
*/
|
||||
|
||||
#ifndef _LOCK_H_
|
||||
#define _LOCK_H_
|
||||
|
||||
#define NCPUS 1 /* XXX */
|
||||
|
||||
/*
|
||||
@ -102,7 +102,7 @@ struct lock {
|
||||
:0;
|
||||
|
||||
simple_lock_data_t interlock;
|
||||
#else /* vax */
|
||||
#else /* vax */
|
||||
#ifdef ns32000
|
||||
/*
|
||||
* Efficient ns32000 implementation --
|
||||
@ -116,7 +116,7 @@ struct lock {
|
||||
can_sleep:1,
|
||||
:0;
|
||||
|
||||
#else /* ns32000 */
|
||||
#else /* ns32000 */
|
||||
/* Only the "interlock" field is used for hardware exclusion;
|
||||
* other fields are modified with normal instructions after
|
||||
* acquiring the interlock bit.
|
||||
@ -139,38 +139,35 @@ struct lock {
|
||||
typedef struct lock lock_data_t;
|
||||
typedef struct lock *lock_t;
|
||||
|
||||
#if NCPUS > 1
|
||||
void simple_lock_init();
|
||||
void simple_lock();
|
||||
void simple_unlock();
|
||||
boolean_t simple_lock_try();
|
||||
#else /* NCPUS > 1 */
|
||||
/*
|
||||
* No multiprocessor locking is necessary.
|
||||
*/
|
||||
#define simple_lock_init(l)
|
||||
#define simple_lock(l)
|
||||
#define simple_unlock(l)
|
||||
#define simple_lock_try(l) (1) /* always succeeds */
|
||||
#endif /* NCPUS > 1 */
|
||||
#if NCPUS > 1
|
||||
__BEGIN_DECLS
|
||||
void simple_lock __P((simple_lock_t));
|
||||
void simple_lock_init __P((simple_lock_t));
|
||||
boolean_t simple_lock_try __P((simple_lock_t));
|
||||
void simple_unlock __P((simple_lock_t));
|
||||
__END_DECLS
|
||||
#else /* No multiprocessor locking is necessary. */
|
||||
#define simple_lock(l)
|
||||
#define simple_lock_init(l)
|
||||
#define simple_lock_try(l) (1) /* Always succeeds. */
|
||||
#define simple_unlock(l)
|
||||
#endif
|
||||
|
||||
/* Sleep locks must work even if no multiprocessing */
|
||||
|
||||
void lock_init();
|
||||
void lock_sleepable();
|
||||
void lock_write();
|
||||
void lock_read();
|
||||
void lock_done();
|
||||
boolean_t lock_read_to_write();
|
||||
void lock_write_to_read();
|
||||
boolean_t lock_try_write();
|
||||
boolean_t lock_try_read();
|
||||
boolean_t lock_try_read_to_write();
|
||||
/* Sleep locks must work even if no multiprocessing. */
|
||||
|
||||
#define lock_read_done(l) lock_done(l)
|
||||
#define lock_write_done(l) lock_done(l)
|
||||
|
||||
void lock_set_recursive();
|
||||
void lock_clear_recursive();
|
||||
|
||||
#endif /* !_VM_LOCK_H_ */
|
||||
void lock_clear_recursive __P((lock_t));
|
||||
void lock_done __P((lock_t));
|
||||
void lock_init __P((lock_t, boolean_t));
|
||||
void lock_read __P((lock_t));
|
||||
boolean_t lock_read_to_write __P((lock_t));
|
||||
void lock_set_recursive __P((lock_t));
|
||||
void lock_sleepable __P((lock_t, boolean_t));
|
||||
boolean_t lock_try_read __P((lock_t));
|
||||
boolean_t lock_try_read_to_write __P((lock_t));
|
||||
boolean_t lock_try_write __P((lock_t));
|
||||
void lock_write __P((lock_t));
|
||||
void lock_write_to_read __P((lock_t));
|
||||
#endif /* !_LOCK_H_ */
|
||||
|
138
sys/vm/pmap.h
138
sys/vm/pmap.h
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)pmap.h 7.4 (Berkeley) 5/7/91
|
||||
* $Id: pmap.h,v 1.6 1994/04/15 07:04:43 cgd Exp $
|
||||
* from: @(#)pmap.h 8.1 (Berkeley) 6/11/93
|
||||
* $Id: pmap.h,v 1.7 1994/05/23 03:11:30 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -86,97 +86,51 @@ typedef struct pmap_statistics *pmap_statistics_t;
|
||||
#include <machine/pmap.h>
|
||||
|
||||
#ifdef KERNEL
|
||||
/*
|
||||
* Currently this option is used on the i386 to be able to handle the
|
||||
* memory from 0-640k and 1M+.
|
||||
*/
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
/*
|
||||
* Routines used for initialization.
|
||||
* There is traditionally also a pmap_bootstrap,
|
||||
* used very early by machine-dependent code,
|
||||
* but it is not part of the interface.
|
||||
*/
|
||||
extern vm_offset_t pmap_steal_memory(); /* During VM initialization,
|
||||
* steal a chunk of memory.
|
||||
*/
|
||||
extern unsigned int pmap_free_pages(); /* During VM initialization,
|
||||
* report remaining unused
|
||||
* physical pages.
|
||||
*/
|
||||
extern void pmap_startup(); /* During VM initialization,
|
||||
* use remaining physical pages
|
||||
* to allocate page frames.
|
||||
*/
|
||||
extern void pmap_init(); /* Initialization,
|
||||
* after kernel runs
|
||||
* in virtual memory.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Currently the following isn't really an option. So don't define it.
|
||||
*/
|
||||
#undef MACHINE_PAGES
|
||||
#ifndef MACHINE_PAGES
|
||||
/*
|
||||
* If machine/pmap.h defines MACHINE_PAGES, it must implement
|
||||
* the above functions. The pmap modules has complete control.
|
||||
* Otherwise, it must implement
|
||||
* pmap_free_pages
|
||||
* pmap_virtual_space
|
||||
* pmap_next_page
|
||||
* pmap_init
|
||||
* and vm/vm_page.c implements pmap_steal_memory and pmap_startup
|
||||
* using pmap_free_pages, pmap_next_page, pmap_virtual_space,
|
||||
* and pmap_enter. pmap_free_pages may over-estimate the number
|
||||
* of unused physical pages, and pmap_next_page may return FALSE
|
||||
* to indicate that there are no more unused pages to return.
|
||||
* However, for best performance pmap_free_pages should be accurate.
|
||||
*/
|
||||
extern boolean_t pmap_next_page(); /* During VM initialization,
|
||||
* return the next unused
|
||||
* physical page.
|
||||
*/
|
||||
extern void pmap_virtual_space(); /* During VM initialization,
|
||||
* report virtual space
|
||||
* available for the kernel.
|
||||
*/
|
||||
#endif /* MACHINE_PAGES */
|
||||
|
||||
#endif /* MACHINE_NONCONTIG */
|
||||
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
void pmap_init __P((void));
|
||||
__BEGIN_DECLS
|
||||
void *pmap_bootstrap_alloc __P((int));
|
||||
void pmap_bootstrap( /* machine dependent */ );
|
||||
void pmap_change_wiring __P((pmap_t, vm_offset_t, boolean_t));
|
||||
void pmap_clear_modify __P((vm_offset_t pa));
|
||||
void pmap_clear_reference __P((vm_offset_t pa));
|
||||
void pmap_collect __P((pmap_t));
|
||||
void pmap_copy __P((pmap_t,
|
||||
pmap_t, vm_offset_t, vm_size_t, vm_offset_t));
|
||||
void pmap_copy_page __P((vm_offset_t, vm_offset_t));
|
||||
pmap_t pmap_create __P((vm_size_t));
|
||||
void pmap_destroy __P((pmap_t));
|
||||
void pmap_enter __P((pmap_t,
|
||||
vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
|
||||
vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
|
||||
#ifndef MACHINE_NONCONTIG
|
||||
void pmap_init __P((vm_offset_t, vm_offset_t));
|
||||
#else
|
||||
void pmap_init __P((vm_offset_t s, vm_offset_t e));
|
||||
void pmap_init __P((void));
|
||||
#endif
|
||||
void pmap_pinit __P((struct pmap *pmap));
|
||||
void pmap_release __P((struct pmap *pmap));
|
||||
vm_offset_t pmap_map();
|
||||
pmap_t pmap_create();
|
||||
void pmap_destroy();
|
||||
void pmap_reference();
|
||||
void pmap_remove();
|
||||
void pmap_page_protect();
|
||||
void pmap_protect();
|
||||
void pmap_enter();
|
||||
vm_offset_t pmap_extract();
|
||||
void pmap_update();
|
||||
void pmap_collect();
|
||||
void pmap_activate();
|
||||
void pmap_deactivate();
|
||||
void pmap_copy();
|
||||
void pmap_statistics();
|
||||
void pmap_clear_reference();
|
||||
boolean_t pmap_is_referenced();
|
||||
#ifndef pmap_kernel
|
||||
pmap_t pmap_kernel();
|
||||
boolean_t pmap_is_modified __P((vm_offset_t pa));
|
||||
boolean_t pmap_is_referenced __P((vm_offset_t pa));
|
||||
vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
|
||||
void pmap_page_protect __P((vm_offset_t, vm_prot_t));
|
||||
void pmap_pageable __P((pmap_t,
|
||||
vm_offset_t, vm_offset_t, boolean_t));
|
||||
vm_offset_t pmap_phys_address __P((int));
|
||||
void pmap_pinit __P((pmap_t));
|
||||
void pmap_protect __P((pmap_t,
|
||||
vm_offset_t, vm_offset_t, vm_prot_t));
|
||||
void pmap_reference __P((pmap_t));
|
||||
void pmap_release __P((pmap_t));
|
||||
void pmap_remove __P((pmap_t, vm_offset_t, vm_offset_t));
|
||||
void pmap_update __P((void));
|
||||
void pmap_zero_page __P((vm_offset_t));
|
||||
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
u_int pmap_free_pages __P(());
|
||||
void pmap_init __P(());
|
||||
boolean_t pmap_next_page __P(());
|
||||
void pmap_startup __P(());
|
||||
vm_offset_t pmap_steal_memory __P(());
|
||||
void pmap_virtual_space __P(());
|
||||
#endif
|
||||
|
||||
void pmap_redzone();
|
||||
boolean_t pmap_access();
|
||||
|
||||
extern pmap_t kernel_pmap;
|
||||
__END_DECLS
|
||||
#endif
|
||||
|
||||
#endif /* _PMAP_VM_ */
|
||||
|
@ -37,8 +37,8 @@
|
||||
*
|
||||
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
|
||||
*
|
||||
* from: @(#)swap_pager.c 8.1 (Berkeley) 6/11/93
|
||||
* $Id: swap_pager.c,v 1.20 1994/04/29 08:21:49 mycroft Exp $
|
||||
* from: @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
|
||||
* $Id: swap_pager.c,v 1.21 1994/05/23 03:11:32 cgd Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -55,6 +55,7 @@
|
||||
#include <sys/map.h>
|
||||
#include <sys/vnode.h>
|
||||
#include <sys/malloc.h>
|
||||
|
||||
#include <miscfs/specfs/specdev.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
@ -62,11 +63,11 @@
|
||||
#include <vm/vm_pageout.h>
|
||||
#include <vm/swap_pager.h>
|
||||
|
||||
#include <machine/cpu.h>
|
||||
|
||||
#define NSWSIZES 16 /* size of swtab */
|
||||
#define NPENDINGIO 64 /* max # of pending cleans */
|
||||
#define MAXDADDRS 64 /* max # of disk addrs for fixed allocations */
|
||||
#ifndef NPENDINGIO
|
||||
#define NPENDINGIO 64 /* max # of pending cleans */
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
int swpagerdebug = 0x100;
|
||||
@ -80,12 +81,12 @@ int swpagerdebug = 0x100;
|
||||
#define SDB_FULL 0x080
|
||||
#define SDB_ANOM 0x100
|
||||
#define SDB_ANOMPANIC 0x200
|
||||
#define SDB_CLUSTER 0x400
|
||||
#define SDB_PARANOIA 0x800
|
||||
#define SDB_CLUSTER 0x400
|
||||
#define SDB_PARANOIA 0x800
|
||||
#endif
|
||||
|
||||
TAILQ_HEAD(swpclean, swpagerclean);
|
||||
|
||||
|
||||
struct swpagerclean {
|
||||
TAILQ_ENTRY(swpagerclean) spc_list;
|
||||
int spc_flags;
|
||||
@ -93,16 +94,15 @@ struct swpagerclean {
|
||||
sw_pager_t spc_swp;
|
||||
vm_offset_t spc_kva;
|
||||
vm_page_t spc_m;
|
||||
int spc_npages;
|
||||
} swcleanlist[NPENDINGIO];
|
||||
typedef struct swpagerclean *swp_clean_t;
|
||||
|
||||
|
||||
/* spc_flags values */
|
||||
#define SPC_FREE 0x00
|
||||
#define SPC_BUSY 0x01
|
||||
#define SPC_DONE 0x02
|
||||
#define SPC_ERROR 0x04
|
||||
#define SPC_DIRTY 0x08
|
||||
|
||||
struct swtab {
|
||||
vm_size_t st_osize; /* size of object (bytes) */
|
||||
@ -114,28 +114,35 @@ struct swtab {
|
||||
} swtab[NSWSIZES+1];
|
||||
|
||||
#ifdef DEBUG
|
||||
int swap_pager_pendingio; /* max pending async "clean" ops */
|
||||
int swap_pager_poip; /* pageouts in progress */
|
||||
int swap_pager_piip; /* pageins in progress */
|
||||
#endif
|
||||
|
||||
int swap_pager_maxcluster; /* maximum cluster size */
|
||||
int swap_pager_npendingio; /* number of pager clean structs */
|
||||
|
||||
struct swpclean swap_pager_inuse; /* list of pending page cleans */
|
||||
struct swpclean swap_pager_free; /* list of free pager clean structs */
|
||||
struct pagerlst swap_pager_list; /* list of "named" anon regions */
|
||||
|
||||
static int swap_pager_finish __P((swp_clean_t));
|
||||
static void swap_pager_init __P((void));
|
||||
static vm_pager_t swap_pager_alloc
|
||||
__P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
|
||||
static boolean_t swap_pager_clean __P((vm_page_t, int));
|
||||
static void swap_pager_clean __P((int));
|
||||
#ifdef DEBUG
|
||||
static void swap_pager_clean_check __P((vm_page_t *, int, int));
|
||||
#endif
|
||||
static void swap_pager_cluster
|
||||
__P((vm_pager_t, vm_offset_t,
|
||||
vm_offset_t *, vm_offset_t *));
|
||||
static void swap_pager_dealloc __P((vm_pager_t));
|
||||
static int swap_pager_getpage
|
||||
__P((vm_pager_t, vm_page_t, boolean_t));
|
||||
__P((vm_pager_t, vm_page_t *, int, boolean_t));
|
||||
static boolean_t swap_pager_haspage __P((vm_pager_t, vm_offset_t));
|
||||
static int swap_pager_io __P((sw_pager_t, vm_page_t, int));
|
||||
static int swap_pager_io __P((sw_pager_t, vm_page_t *, int, int));
|
||||
static void swap_pager_iodone __P((struct buf *));
|
||||
static int swap_pager_putpage
|
||||
__P((vm_pager_t, vm_page_t, boolean_t));
|
||||
__P((vm_pager_t, vm_page_t *, int, boolean_t));
|
||||
|
||||
struct pagerops swappagerops = {
|
||||
swap_pager_init,
|
||||
@ -143,7 +150,8 @@ struct pagerops swappagerops = {
|
||||
swap_pager_dealloc,
|
||||
swap_pager_getpage,
|
||||
swap_pager_putpage,
|
||||
swap_pager_haspage
|
||||
swap_pager_haspage,
|
||||
swap_pager_cluster
|
||||
};
|
||||
|
||||
static void
|
||||
@ -161,12 +169,22 @@ swap_pager_init()
|
||||
dfltpagerops = &swappagerops;
|
||||
TAILQ_INIT(&swap_pager_list);
|
||||
|
||||
/*
|
||||
* Allocate async IO structures.
|
||||
*
|
||||
* XXX it would be nice if we could do this dynamically based on
|
||||
* the value of nswbuf (since we are ultimately limited by that)
|
||||
* but neither nswbuf or malloc has been initialized yet. So the
|
||||
* structs are statically allocated above.
|
||||
*/
|
||||
swap_pager_npendingio = NPENDINGIO;
|
||||
|
||||
/*
|
||||
* Initialize clean lists
|
||||
*/
|
||||
TAILQ_INIT(&swap_pager_inuse);
|
||||
TAILQ_INIT(&swap_pager_free);
|
||||
for (i = 0, spc = swcleanlist; i < NPENDINGIO; i++, spc++) {
|
||||
for (i = 0, spc = swcleanlist; i < swap_pager_npendingio; i++, spc++) {
|
||||
TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
|
||||
spc->spc_flags = SPC_FREE;
|
||||
}
|
||||
@ -194,6 +212,8 @@ swap_pager_init()
|
||||
for (i = 0; i < NSWSIZES; i++) {
|
||||
swtab[i].st_osize = (vm_size_t) (MAXDADDRS * dbtob(bsize));
|
||||
swtab[i].st_bsize = bsize;
|
||||
if (bsize <= btodb(MAXPHYS))
|
||||
swap_pager_maxcluster = dbtob(bsize);
|
||||
#ifdef DEBUG
|
||||
if (swpagerdebug & SDB_INIT)
|
||||
printf("swpg_init: ix %d, size %x, bsize %x\n",
|
||||
@ -311,6 +331,7 @@ swap_pager_alloc(handle, size, prot, foff)
|
||||
pager->pg_handle = handle;
|
||||
pager->pg_ops = &swappagerops;
|
||||
pager->pg_type = PG_SWAP;
|
||||
pager->pg_flags = PG_CLUSTERPUT;
|
||||
pager->pg_data = swp;
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -361,11 +382,10 @@ swap_pager_dealloc(pager)
|
||||
s = splbio();
|
||||
while (swp->sw_poip) {
|
||||
swp->sw_flags |= SW_WANTED;
|
||||
assert_wait((int)swp, 0);
|
||||
thread_block();
|
||||
(void) tsleep(swp, PVM, "swpgdealloc", 0);
|
||||
}
|
||||
splx(s);
|
||||
(void) swap_pager_clean(NULL, B_WRITE);
|
||||
swap_pager_clean(B_WRITE);
|
||||
|
||||
/*
|
||||
* Free left over swap blocks
|
||||
@ -388,49 +408,66 @@ swap_pager_dealloc(pager)
|
||||
}
|
||||
|
||||
static int
|
||||
swap_pager_getpage(pager, m, sync)
|
||||
swap_pager_getpage(pager, mlist, npages, sync)
|
||||
vm_pager_t pager;
|
||||
vm_page_t m;
|
||||
vm_page_t *mlist;
|
||||
int npages;
|
||||
boolean_t sync;
|
||||
{
|
||||
register int rv;
|
||||
#ifdef DIAGNOSTIC
|
||||
vm_page_t m;
|
||||
int i;
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
if (swpagerdebug & SDB_FOLLOW)
|
||||
printf("swpg_getpage(%x, %x, %d)\n", pager, m, sync);
|
||||
printf("swpg_getpage(%x, %x, %x, %x)\n",
|
||||
pager, mlist, npages, sync);
|
||||
#endif
|
||||
#ifdef DIAGNOSTIC
|
||||
if (m->flags & PG_FAULTING)
|
||||
panic("swap_pager_getpage: page is already faulting");
|
||||
m->flags |= PG_FAULTING;
|
||||
for (i = 0; i < npages; i++) {
|
||||
m = mlist[i];
|
||||
|
||||
if (m->flags & PG_FAULTING)
|
||||
panic("swap_pager_getpage: page is already faulting");
|
||||
m->flags |= PG_FAULTING;
|
||||
}
|
||||
#endif
|
||||
rv = swap_pager_io((sw_pager_t)pager->pg_data, m, B_READ);
|
||||
rv = swap_pager_io((sw_pager_t)pager->pg_data, mlist, npages, B_READ);
|
||||
#ifdef DIAGNOSTIC
|
||||
m->flags &= ~PG_FAULTING;
|
||||
for (i = 0; i < npages; i++) {
|
||||
m = mlist[i];
|
||||
|
||||
m->flags &= ~PG_FAULTING;
|
||||
}
|
||||
#endif
|
||||
return(rv);
|
||||
}
|
||||
|
||||
static int
|
||||
swap_pager_putpage(pager, m, sync)
|
||||
swap_pager_putpage(pager, mlist, npages, sync)
|
||||
vm_pager_t pager;
|
||||
vm_page_t m;
|
||||
vm_page_t *mlist;
|
||||
int npages;
|
||||
boolean_t sync;
|
||||
{
|
||||
int flags;
|
||||
|
||||
#ifdef DEBUG
|
||||
if (swpagerdebug & SDB_FOLLOW)
|
||||
printf("swpg_putpage(%x, %x, %d)\n", pager, m, sync);
|
||||
printf("swpg_putpage(%x, %x, %x, %x)\n",
|
||||
pager, mlist, npages, sync);
|
||||
#endif
|
||||
if (pager == NULL) {
|
||||
(void) swap_pager_clean(NULL, B_WRITE);
|
||||
swap_pager_clean(B_WRITE);
|
||||
return (VM_PAGER_OK); /* ??? */
|
||||
}
|
||||
flags = B_WRITE;
|
||||
if (!sync)
|
||||
flags |= B_ASYNC;
|
||||
return(swap_pager_io((sw_pager_t)pager->pg_data, m, flags));
|
||||
return(swap_pager_io((sw_pager_t)pager->pg_data,
|
||||
mlist, npages, flags));
|
||||
}
|
||||
|
||||
static boolean_t
|
||||
@ -471,6 +508,42 @@ swap_pager_haspage(pager, offset)
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
static void
|
||||
swap_pager_cluster(pager, offset, loffset, hoffset)
|
||||
vm_pager_t pager;
|
||||
vm_offset_t offset;
|
||||
vm_offset_t *loffset;
|
||||
vm_offset_t *hoffset;
|
||||
{
|
||||
sw_pager_t swp;
|
||||
register int bsize;
|
||||
vm_offset_t loff, hoff;
|
||||
|
||||
#ifdef DEBUG
|
||||
if (swpagerdebug & (SDB_FOLLOW|SDB_CLUSTER))
|
||||
printf("swpg_cluster(%x, %x) ", pager, offset);
|
||||
#endif
|
||||
swp = (sw_pager_t) pager->pg_data;
|
||||
bsize = dbtob(swp->sw_bsize);
|
||||
if (bsize > swap_pager_maxcluster)
|
||||
bsize = swap_pager_maxcluster;
|
||||
|
||||
loff = offset - (offset % bsize);
|
||||
if (loff >= swp->sw_osize)
|
||||
panic("swap_pager_cluster: bad offset");
|
||||
|
||||
hoff = loff + bsize;
|
||||
if (hoff > swp->sw_osize)
|
||||
hoff = swp->sw_osize;
|
||||
|
||||
*loffset = loff;
|
||||
*hoffset = hoff;
|
||||
#ifdef DEBUG
|
||||
if (swpagerdebug & (SDB_FOLLOW|SDB_CLUSTER))
|
||||
printf("returns [%x-%x]\n", loff, hoff);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Scaled down version of swap().
|
||||
* Assumes that PAGE_SIZE < MAXPHYS; i.e. only one operation needed.
|
||||
@ -478,51 +551,69 @@ swap_pager_haspage(pager, offset)
|
||||
* provided physical page into the KVA to keep them happy.
|
||||
*/
|
||||
static int
|
||||
swap_pager_io(swp, m, flags)
|
||||
swap_pager_io(swp, mlist, npages, flags)
|
||||
register sw_pager_t swp;
|
||||
vm_page_t m;
|
||||
vm_page_t *mlist;
|
||||
int npages;
|
||||
int flags;
|
||||
{
|
||||
register struct buf *bp;
|
||||
register sw_blk_t swb;
|
||||
register int s;
|
||||
int ix;
|
||||
int ix, mask;
|
||||
boolean_t rv;
|
||||
vm_offset_t kva, off;
|
||||
swp_clean_t spc;
|
||||
vm_page_t m;
|
||||
|
||||
#ifdef DEBUG
|
||||
/* save panic time state */
|
||||
if ((swpagerdebug & SDB_ANOMPANIC) && panicstr)
|
||||
return (VM_PAGER_FAIL); /* XXX: correct return? */
|
||||
if (swpagerdebug & (SDB_FOLLOW|SDB_IO))
|
||||
printf("swpg_io(%x, %x, %x)\n", swp, m, flags);
|
||||
if ((flags & (B_READ|B_ASYNC)) == (B_READ|B_ASYNC))
|
||||
panic("swap_pager_io: cannot do ASYNC reads");
|
||||
printf("swpg_io(%x, %x, %x, %x)\n", swp, mlist, npages, flags);
|
||||
if (flags & B_READ) {
|
||||
if (flags & B_ASYNC)
|
||||
panic("swap_pager_io: cannot do ASYNC reads");
|
||||
if (npages != 1)
|
||||
panic("swap_pager_io: cannot do clustered reads");
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* First determine if the page exists in the pager if this is
|
||||
* a sync read. This quickly handles cases where we are
|
||||
* following shadow chains looking for the top level object
|
||||
* with the page.
|
||||
*/
|
||||
m = *mlist;
|
||||
off = m->offset + m->object->paging_offset;
|
||||
ix = off / dbtob(swp->sw_bsize);
|
||||
if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks) {
|
||||
#ifdef DEBUG
|
||||
if ((flags & B_READ) == 0 && (swpagerdebug & SDB_ANOM)) {
|
||||
printf("swap_pager_io: no swap block on write\n");
|
||||
return(VM_PAGER_BAD);
|
||||
}
|
||||
#endif
|
||||
return(VM_PAGER_FAIL);
|
||||
}
|
||||
swb = &swp->sw_blocks[ix];
|
||||
off = off % dbtob(swp->sw_bsize);
|
||||
if ((flags & B_READ) &&
|
||||
(swb->swb_block == 0 || (swb->swb_mask & (1 << atop(off))) == 0))
|
||||
return(VM_PAGER_FAIL);
|
||||
|
||||
/*
|
||||
* For reads (pageins) and synchronous writes, we clean up
|
||||
* all completed async pageouts.
|
||||
*/
|
||||
if ((flags & B_ASYNC) == 0) {
|
||||
s = splbio();
|
||||
swap_pager_clean(flags&B_READ);
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* Check to see if this page is currently being cleaned.
|
||||
* If it is, we just wait til the operation is done before
|
||||
* continuing.
|
||||
*/
|
||||
while (swap_pager_clean(m, flags&B_READ)) {
|
||||
if (swpagerdebug & SDB_ANOM)
|
||||
printf("swap_pager_io: page %x cleaning\n", m);
|
||||
|
||||
swp->sw_flags |= SW_WANTED;
|
||||
assert_wait((int)swp, 0);
|
||||
thread_block();
|
||||
}
|
||||
#else
|
||||
(void) swap_pager_clean(m, flags&B_READ);
|
||||
if (swpagerdebug & SDB_PARANOIA)
|
||||
swap_pager_clean_check(mlist, npages, flags&B_READ);
|
||||
#endif
|
||||
splx(s);
|
||||
}
|
||||
@ -532,47 +623,26 @@ swap_pager_io(swp, m, flags)
|
||||
* page is already being cleaned. If it is, or no resources
|
||||
* are available, we try again later.
|
||||
*/
|
||||
else if (swap_pager_clean(m, B_WRITE) ||
|
||||
swap_pager_free.tqh_first == NULL) {
|
||||
else {
|
||||
swap_pager_clean(B_WRITE);
|
||||
#ifdef DEBUG
|
||||
if ((swpagerdebug & SDB_ANOM) &&
|
||||
swap_pager_free.tqh_first != NULL)
|
||||
printf("swap_pager_io: page %x already cleaning\n", m);
|
||||
if (swpagerdebug & SDB_PARANOIA)
|
||||
swap_pager_clean_check(mlist, npages, B_WRITE);
|
||||
#endif
|
||||
return(VM_PAGER_FAIL);
|
||||
if (swap_pager_free.tqh_first == NULL) {
|
||||
#ifdef DEBUG
|
||||
if (swpagerdebug & SDB_FAIL)
|
||||
printf("%s: no available io headers\n",
|
||||
"swap_pager_io");
|
||||
#endif
|
||||
return(VM_PAGER_AGAIN);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine swap block and allocate as necessary.
|
||||
* Allocate a swap block if necessary.
|
||||
*/
|
||||
off = m->offset + m->object->paging_offset;
|
||||
ix = off / dbtob(swp->sw_bsize);
|
||||
if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks) {
|
||||
#ifdef DEBUG
|
||||
if (swpagerdebug & SDB_FAIL)
|
||||
printf("swpg_io: bad offset %x+%x(%d) in %x\n",
|
||||
m->offset, m->object->paging_offset,
|
||||
ix, swp->sw_blocks);
|
||||
#endif
|
||||
return(VM_PAGER_FAIL);
|
||||
}
|
||||
swb = &swp->sw_blocks[ix];
|
||||
off = off % dbtob(swp->sw_bsize);
|
||||
if (flags & B_READ) {
|
||||
if (swb->swb_block == 0 ||
|
||||
(swb->swb_mask & (1 << atop(off))) == 0) {
|
||||
#ifdef DEBUG
|
||||
if (swpagerdebug & (SDB_ALLOCBLK|SDB_FAIL))
|
||||
printf("swpg_io: %x bad read: blk %x+%x, mask %x, off %x+%x\n",
|
||||
swp->sw_blocks,
|
||||
swb->swb_block, atop(off),
|
||||
swb->swb_mask,
|
||||
m->offset, m->object->paging_offset);
|
||||
#endif
|
||||
/* XXX: should we zero page here?? */
|
||||
return(VM_PAGER_FAIL);
|
||||
}
|
||||
} else if (swb->swb_block == 0) {
|
||||
if (swb->swb_block == 0) {
|
||||
swb->swb_block = rmalloc(swapmap, swp->sw_bsize);
|
||||
if (swb->swb_block == 0) {
|
||||
#ifdef DEBUG
|
||||
@ -580,6 +650,13 @@ swap_pager_io(swp, m, flags)
|
||||
printf("swpg_io: rmalloc of %x failed\n",
|
||||
swp->sw_bsize);
|
||||
#endif
|
||||
/*
|
||||
* XXX this is technically a resource shortage that
|
||||
* should return AGAIN, but the situation isn't likely
|
||||
* to be remedied just by delaying a little while and
|
||||
* trying again (the pageout daemon's current response
|
||||
* to AGAIN) so we just return FAIL.
|
||||
*/
|
||||
return(VM_PAGER_FAIL);
|
||||
}
|
||||
#ifdef DEBUG
|
||||
@ -593,10 +670,18 @@ swap_pager_io(swp, m, flags)
|
||||
* Allocate a kernel virtual address and initialize so that PTE
|
||||
* is available for lower level IO drivers.
|
||||
*/
|
||||
kva = vm_pager_map_page(m);
|
||||
kva = vm_pager_map_pages(mlist, npages, !(flags & B_ASYNC));
|
||||
if (kva == NULL) {
|
||||
#ifdef DEBUG
|
||||
if (swpagerdebug & SDB_FAIL)
|
||||
printf("%s: no KVA space to map pages\n",
|
||||
"swap_pager_io");
|
||||
#endif
|
||||
return(VM_PAGER_AGAIN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a swap buffer header and perform the IO
|
||||
* Get a swap buffer header and initialize it.
|
||||
*/
|
||||
s = splbio();
|
||||
while (bswlist.b_actf == NULL) {
|
||||
@ -606,30 +691,51 @@ swap_pager_io(swp, m, flags)
|
||||
m, flags);
|
||||
#endif
|
||||
bswlist.b_flags |= B_WANTED;
|
||||
tsleep((caddr_t)&bswlist, PSWP+1, "swpgio", 0);
|
||||
tsleep((caddr_t)&bswlist, PSWP+1, "swpgiobuf", 0);
|
||||
}
|
||||
bp = bswlist.b_actf;
|
||||
bswlist.b_actf = bp->b_actf;
|
||||
splx(s);
|
||||
bp->b_flags = B_BUSY | (flags & B_READ);
|
||||
bp->b_proc = &proc0; /* XXX (but without B_PHYS set this is ok) */
|
||||
/* No need for crhold(), as we hope that proc0 won't go away soon */
|
||||
bp->b_rcred = bp->b_wcred = proc0.p_ucred;
|
||||
bp->b_un.b_addr = (caddr_t)kva;
|
||||
bp->b_data = (caddr_t)kva;
|
||||
bp->b_blkno = swb->swb_block + btodb(off);
|
||||
VHOLD(swapdev_vp);
|
||||
bp->b_vp = swapdev_vp;
|
||||
if (swapdev_vp->v_type == VBLK)
|
||||
bp->b_dev = swapdev_vp->v_rdev;
|
||||
bp->b_bcount = PAGE_SIZE;
|
||||
if ((bp->b_flags & B_READ) == 0) {
|
||||
bp->b_dirtyoff = 0;
|
||||
bp->b_dirtyend = PAGE_SIZE;
|
||||
swapdev_vp->v_numoutput++;
|
||||
}
|
||||
bp->b_bcount = npages * PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* If this is an async write we set up additional buffer fields
|
||||
* For writes we set up additional buffer fields, record a pageout
|
||||
* in progress and mark that these swap blocks are now allocated.
|
||||
*/
|
||||
if ((bp->b_flags & B_READ) == 0) {
|
||||
bp->b_dirtyoff = 0;
|
||||
bp->b_dirtyend = npages * PAGE_SIZE;
|
||||
swapdev_vp->v_numoutput++;
|
||||
s = splbio();
|
||||
swp->sw_poip++;
|
||||
splx(s);
|
||||
mask = (~(~0 << npages)) << atop(off);
|
||||
#ifdef DEBUG
|
||||
swap_pager_poip++;
|
||||
if (swpagerdebug & SDB_WRITE)
|
||||
printf("swpg_io: write: bp=%x swp=%x poip=%d\n",
|
||||
bp, swp, swp->sw_poip);
|
||||
if ((swpagerdebug & SDB_ALLOCBLK) &&
|
||||
(swb->swb_mask & mask) != mask)
|
||||
printf("swpg_io: %x write %d pages at %x+%x\n",
|
||||
swp->sw_blocks, npages, swb->swb_block,
|
||||
atop(off));
|
||||
if (swpagerdebug & SDB_CLUSTER)
|
||||
printf("swpg_io: off=%x, npg=%x, mask=%x, bmask=%x\n",
|
||||
off, npages, mask, swb->swb_mask);
|
||||
#endif
|
||||
swb->swb_mask |= mask;
|
||||
}
|
||||
/*
|
||||
* If this is an async write we set up still more buffer fields
|
||||
* and place a "cleaning" entry on the inuse queue.
|
||||
*/
|
||||
if ((flags & (B_READ|B_ASYNC)) == B_ASYNC) {
|
||||
@ -647,26 +753,26 @@ swap_pager_io(swp, m, flags)
|
||||
spc->spc_bp = bp;
|
||||
spc->spc_swp = swp;
|
||||
spc->spc_kva = kva;
|
||||
/*
|
||||
* Record the first page. This allows swap_pager_clean
|
||||
* to efficiently handle the common case of a single page.
|
||||
* For clusters, it allows us to locate the object easily
|
||||
* and we then reconstruct the rest of the mlist from spc_kva.
|
||||
*/
|
||||
spc->spc_m = m;
|
||||
spc->spc_npages = npages;
|
||||
bp->b_flags |= B_CALL;
|
||||
bp->b_iodone = swap_pager_iodone;
|
||||
s = splbio();
|
||||
swp->sw_poip++;
|
||||
TAILQ_INSERT_TAIL(&swap_pager_inuse, spc, spc_list);
|
||||
|
||||
#ifdef DEBUG
|
||||
swap_pager_poip++;
|
||||
if (swpagerdebug & SDB_WRITE)
|
||||
printf("swpg_io: write: bp=%x swp=%x spc=%x poip=%d\n",
|
||||
bp, swp, spc, swp->sw_poip);
|
||||
if ((swpagerdebug & SDB_ALLOCBLK) &&
|
||||
(swb->swb_mask & (1 << atop(off))) == 0)
|
||||
printf("swpg_io: %x write blk %x+%x\n",
|
||||
swp->sw_blocks, swb->swb_block, atop(off));
|
||||
#endif
|
||||
swb->swb_mask |= (1 << atop(off));
|
||||
splx(s);
|
||||
}
|
||||
|
||||
/*
|
||||
* Finally, start the IO operation.
|
||||
* If it is async we are all done, otherwise we must wait for
|
||||
* completion and cleanup afterwards.
|
||||
*/
|
||||
#ifdef DEBUG
|
||||
if (swpagerdebug & SDB_IO)
|
||||
printf("swpg_io: IO start: bp %x, db %x, va %x, pa %x\n",
|
||||
@ -687,10 +793,10 @@ swap_pager_io(swp, m, flags)
|
||||
else
|
||||
swap_pager_poip++;
|
||||
#endif
|
||||
while ((bp->b_flags & B_DONE) == 0) {
|
||||
assert_wait((int)bp, 0);
|
||||
thread_block();
|
||||
}
|
||||
while ((bp->b_flags & B_DONE) == 0)
|
||||
(void) tsleep(bp, PVM, "swpgio", 0);
|
||||
if ((flags & B_READ) == 0)
|
||||
--swp->sw_poip;
|
||||
#ifdef DEBUG
|
||||
if (flags & B_READ)
|
||||
--swap_pager_piip;
|
||||
@ -705,7 +811,7 @@ swap_pager_io(swp, m, flags)
|
||||
brelvp(bp);
|
||||
if (bswlist.b_flags & B_WANTED) {
|
||||
bswlist.b_flags &= ~B_WANTED;
|
||||
thread_wakeup((int)&bswlist);
|
||||
wakeup(&bswlist);
|
||||
}
|
||||
if ((flags & B_READ) == 0 && rv == VM_PAGER_OK) {
|
||||
m->flags |= PG_CLEAN;
|
||||
@ -718,26 +824,27 @@ swap_pager_io(swp, m, flags)
|
||||
if ((swpagerdebug & SDB_FAIL) && rv == VM_PAGER_ERROR)
|
||||
printf("swpg_io: IO error\n");
|
||||
#endif
|
||||
vm_pager_unmap_page(kva);
|
||||
vm_pager_unmap_pages(kva, npages);
|
||||
return(rv);
|
||||
}
|
||||
|
||||
static boolean_t
|
||||
swap_pager_clean(m, rw)
|
||||
vm_page_t m;
|
||||
static void
|
||||
swap_pager_clean(rw)
|
||||
int rw;
|
||||
{
|
||||
register swp_clean_t spc, tspc;
|
||||
register int s;
|
||||
register swp_clean_t spc;
|
||||
register int s, i;
|
||||
vm_object_t object;
|
||||
vm_page_t m;
|
||||
|
||||
#ifdef DEBUG
|
||||
/* save panic time state */
|
||||
if ((swpagerdebug & SDB_ANOMPANIC) && panicstr)
|
||||
return (FALSE); /* ??? */
|
||||
return;
|
||||
if (swpagerdebug & SDB_FOLLOW)
|
||||
printf("swpg_clean(%x, %d)\n", m, rw);
|
||||
printf("swpg_clean(%x)\n", rw);
|
||||
#endif
|
||||
tspc = NULL;
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* Look up and removal from inuse list must be done
|
||||
@ -747,19 +854,19 @@ swap_pager_clean(m, rw)
|
||||
for (spc = swap_pager_inuse.tqh_first;
|
||||
spc != NULL;
|
||||
spc = spc->spc_list.tqe_next) {
|
||||
/*
|
||||
* If the operation is done, remove it from the
|
||||
* list and process it.
|
||||
*
|
||||
* XXX if we can't get the object lock we also
|
||||
* leave it on the list and try again later.
|
||||
* Is there something better we could do?
|
||||
*/
|
||||
if ((spc->spc_flags & SPC_DONE) &&
|
||||
swap_pager_finish(spc)) {
|
||||
vm_object_lock_try(spc->spc_m->object)) {
|
||||
TAILQ_REMOVE(&swap_pager_inuse, spc, spc_list);
|
||||
break;
|
||||
}
|
||||
if (m && m == spc->spc_m) {
|
||||
#ifdef DEBUG
|
||||
if (swpagerdebug & SDB_ANOM)
|
||||
printf("swap_pager_clean: page %x on list, flags %x\n",
|
||||
m, spc->spc_flags);
|
||||
#endif
|
||||
tspc = spc;
|
||||
}
|
||||
}
|
||||
splx(s);
|
||||
|
||||
@ -770,106 +877,99 @@ swap_pager_clean(m, rw)
|
||||
break;
|
||||
|
||||
/*
|
||||
* The desired page was found to be busy earlier in
|
||||
* the scan but has since completed.
|
||||
* Found a completed operation so finish it off.
|
||||
* Note: no longer at splbio since entry is off the list.
|
||||
*/
|
||||
if (tspc && tspc == spc) {
|
||||
#ifdef DEBUG
|
||||
if (swpagerdebug & SDB_ANOM)
|
||||
printf("swap_pager_clean: page %x done while looking\n",
|
||||
m);
|
||||
#endif
|
||||
tspc = NULL;
|
||||
m = spc->spc_m;
|
||||
object = m->object;
|
||||
|
||||
/*
|
||||
* Process each page in the cluster.
|
||||
* The first page is explicitly kept in the cleaning
|
||||
* entry, others must be reconstructed from the KVA.
|
||||
*/
|
||||
for (i = 0; i < spc->spc_npages; i++) {
|
||||
if (i)
|
||||
m = vm_pager_atop(spc->spc_kva + ptoa(i));
|
||||
/*
|
||||
* If no error mark as clean and inform the pmap
|
||||
* system. If there was an error, mark as dirty
|
||||
* so we will try again.
|
||||
*
|
||||
* XXX could get stuck doing this, should give up
|
||||
* after awhile.
|
||||
*/
|
||||
if (spc->spc_flags & SPC_ERROR) {
|
||||
printf("%s: clean of page %x failed\n",
|
||||
"swap_pager_clean",
|
||||
VM_PAGE_TO_PHYS(m));
|
||||
m->flags |= PG_LAUNDRY;
|
||||
} else {
|
||||
m->flags |= PG_CLEAN;
|
||||
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
|
||||
}
|
||||
m->flags &= ~PG_BUSY;
|
||||
PAGE_WAKEUP(m);
|
||||
}
|
||||
|
||||
/*
|
||||
* Done with the object, decrement the paging count
|
||||
* and unlock it.
|
||||
*/
|
||||
if (--object->paging_in_progress == 0)
|
||||
wakeup(object);
|
||||
vm_object_unlock(object);
|
||||
|
||||
/*
|
||||
* Free up KVM used and put the entry back on the list.
|
||||
*/
|
||||
vm_pager_unmap_pages(spc->spc_kva, spc->spc_npages);
|
||||
spc->spc_flags = SPC_FREE;
|
||||
vm_pager_unmap_page(spc->spc_kva);
|
||||
TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
|
||||
#ifdef DEBUG
|
||||
if (swpagerdebug & SDB_WRITE)
|
||||
printf("swpg_clean: free spc %x\n", spc);
|
||||
#endif
|
||||
}
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* If we found that the desired page is already being cleaned
|
||||
* mark it so that swap_pager_iodone() will not set the clean
|
||||
* flag before the pageout daemon has another chance to clean it.
|
||||
*/
|
||||
if (tspc && rw == B_WRITE) {
|
||||
if (swpagerdebug & SDB_ANOM)
|
||||
printf("swap_pager_clean: page %x on clean list\n",
|
||||
tspc);
|
||||
tspc->spc_flags |= SPC_DIRTY;
|
||||
}
|
||||
#endif
|
||||
splx(s);
|
||||
|
||||
#ifdef DEBUG
|
||||
if (swpagerdebug & SDB_WRITE)
|
||||
printf("swpg_clean: return %d\n", tspc ? TRUE : FALSE);
|
||||
if ((swpagerdebug & SDB_ANOM) && tspc)
|
||||
printf("swpg_clean: %s of cleaning page %x\n",
|
||||
rw == B_READ ? "get" : "put", m);
|
||||
#endif
|
||||
return(tspc ? TRUE : FALSE);
|
||||
}
|
||||
|
||||
static int
|
||||
swap_pager_finish(spc)
|
||||
register swp_clean_t spc;
|
||||
#ifdef DEBUG
|
||||
static void
|
||||
swap_pager_clean_check(mlist, npages, rw)
|
||||
vm_page_t *mlist;
|
||||
int npages;
|
||||
int rw;
|
||||
{
|
||||
vm_object_t object = spc->spc_m->object;
|
||||
register swp_clean_t spc;
|
||||
boolean_t bad;
|
||||
int i, j, s;
|
||||
vm_page_t m;
|
||||
|
||||
/*
|
||||
* Mark the paging operation as done.
|
||||
* (XXX) If we cannot get the lock, leave it til later.
|
||||
* (XXX) Also we are assuming that an async write is a
|
||||
* pageout operation that has incremented the counter.
|
||||
*/
|
||||
if (!vm_object_lock_try(object))
|
||||
return(0);
|
||||
if (panicstr)
|
||||
return;
|
||||
|
||||
if (--object->paging_in_progress == 0)
|
||||
thread_wakeup((int) object);
|
||||
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* XXX: this isn't even close to the right thing to do,
|
||||
* introduces a variety of race conditions.
|
||||
*
|
||||
* If dirty, vm_pageout() has attempted to clean the page
|
||||
* again. In this case we do not do anything as we will
|
||||
* see the page again shortly.
|
||||
*/
|
||||
if (spc->spc_flags & SPC_DIRTY) {
|
||||
if (swpagerdebug & SDB_ANOM)
|
||||
printf("swap_pager_finish: page %x dirty again\n",
|
||||
spc->spc_m);
|
||||
spc->spc_m->flags &= ~PG_BUSY;
|
||||
PAGE_WAKEUP(spc->spc_m);
|
||||
vm_object_unlock(object);
|
||||
return(1);
|
||||
bad = FALSE;
|
||||
s = splbio();
|
||||
for (spc = swap_pager_inuse.tqh_first;
|
||||
spc != NULL;
|
||||
spc = spc->spc_list.tqe_next) {
|
||||
for (j = 0; j < spc->spc_npages; j++) {
|
||||
m = vm_pager_atop(spc->spc_kva + ptoa(j));
|
||||
for (i = 0; i < npages; i++)
|
||||
if (m == mlist[i]) {
|
||||
if (swpagerdebug & SDB_ANOM)
|
||||
printf(
|
||||
"swpg_clean_check: %s: page %x on list, flags %x\n",
|
||||
rw == B_WRITE ? "write" : "read", mlist[i], spc->spc_flags);
|
||||
bad = TRUE;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* If no error mark as clean and inform the pmap system.
|
||||
* If error, mark as dirty so we will try again.
|
||||
* (XXX could get stuck doing this, should give up after awhile)
|
||||
*/
|
||||
if (spc->spc_flags & SPC_ERROR) {
|
||||
printf("swap_pager_finish: clean of page %x failed\n",
|
||||
VM_PAGE_TO_PHYS(spc->spc_m));
|
||||
spc->spc_m->flags |= PG_LAUNDRY;
|
||||
} else {
|
||||
spc->spc_m->flags |= PG_CLEAN;
|
||||
pmap_clear_modify(VM_PAGE_TO_PHYS(spc->spc_m));
|
||||
}
|
||||
spc->spc_m->flags &= ~PG_BUSY;
|
||||
PAGE_WAKEUP(spc->spc_m);
|
||||
|
||||
vm_object_unlock(object);
|
||||
return(1);
|
||||
splx(s);
|
||||
if (bad)
|
||||
panic("swpg_clean_check");
|
||||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
swap_pager_iodone(bp)
|
||||
@ -915,7 +1015,7 @@ swap_pager_iodone(bp)
|
||||
spc->spc_swp->sw_poip--;
|
||||
if (spc->spc_swp->sw_flags & SW_WANTED) {
|
||||
spc->spc_swp->sw_flags &= ~SW_WANTED;
|
||||
thread_wakeup((int)spc->spc_swp);
|
||||
wakeup(spc->spc_swp);
|
||||
}
|
||||
|
||||
bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY);
|
||||
@ -925,8 +1025,8 @@ swap_pager_iodone(bp)
|
||||
brelvp(bp);
|
||||
if (bswlist.b_flags & B_WANTED) {
|
||||
bswlist.b_flags &= ~B_WANTED;
|
||||
thread_wakeup((int)&bswlist);
|
||||
wakeup(&bswlist);
|
||||
}
|
||||
thread_wakeup((int) &vm_pages_needed);
|
||||
wakeup(&vm_pages_needed);
|
||||
splx(s);
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -30,8 +30,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm.h 7.1 (Berkeley) 5/5/91
|
||||
* $Id: vm.h,v 1.10 1994/04/15 07:04:46 cgd Exp $
|
||||
* from: @(#)vm.h 8.2 (Berkeley) 12/13/93
|
||||
* $Id: vm.h,v 1.11 1994/05/23 03:11:34 cgd Exp $
|
||||
*/
|
||||
|
||||
#ifndef VM_H
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
/*-
|
||||
* Copyright (c) 1992, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@ -30,8 +30,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm.h 7.1 (Berkeley) 5/5/91
|
||||
* $Id: vm_extern.h,v 1.4 1994/05/06 22:45:29 cgd Exp $
|
||||
* from: @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
|
||||
* $Id: vm_extern.h,v 1.5 1994/05/23 03:11:35 cgd Exp $
|
||||
*/
|
||||
|
||||
struct buf;
|
||||
@ -42,25 +42,85 @@ struct vmtotal;
|
||||
struct mount;
|
||||
struct vnode;
|
||||
|
||||
struct vmspace *vmspace_alloc __P((vm_offset_t min, vm_offset_t max,
|
||||
int pageable));
|
||||
struct vmspace *vmspace_fork __P((struct vmspace *));
|
||||
void vmspace_free __P((struct vmspace *));
|
||||
#ifdef KGDB
|
||||
void chgkprot __P((caddr_t, int, int));
|
||||
#endif
|
||||
|
||||
int vm_fault __P((vm_map_t, vm_offset_t, vm_prot_t, boolean_t));
|
||||
void vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
|
||||
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
|
||||
void vm_fault_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t));
|
||||
|
||||
void vslock __P((caddr_t, u_int));
|
||||
void vsunlock __P((caddr_t, u_int, int));
|
||||
|
||||
void swapinit __P((void));
|
||||
int swfree __P((struct proc *, int));
|
||||
void swstrategy __P((struct buf *));
|
||||
#ifdef KERNEL
|
||||
#ifdef TYPEDEF_FOR_UAP
|
||||
int ogetpagesize __P((struct proc *p, void *, int *));
|
||||
int madvise __P((struct proc *, void *, int *));
|
||||
int mincore __P((struct proc *, void *, int *));
|
||||
int mprotect __P((struct proc *, void *, int *));
|
||||
int msync __P((struct proc *, void *, int *));
|
||||
int munmap __P((struct proc *, void *, int *));
|
||||
int obreak __P((struct proc *, void *, int *));
|
||||
int sbrk __P((struct proc *, void *, int *));
|
||||
int smmap __P((struct proc *, void *, int *));
|
||||
int sstk __P((struct proc *, void *, int *));
|
||||
#endif
|
||||
|
||||
void assert_wait __P((int, boolean_t));
|
||||
int grow __P((struct proc *, u_int));
|
||||
void iprintf __P((void (*)(const char *, ...), const char *, ...));
|
||||
int kernacc __P((caddr_t, int, int));
|
||||
int kinfo_loadavg __P((int, char *, int *, int, int *));
|
||||
int kinfo_meter __P((int, caddr_t, int *, int, int *));
|
||||
vm_offset_t kmem_alloc __P((vm_map_t, vm_size_t));
|
||||
vm_offset_t kmem_alloc_pageable __P((vm_map_t, vm_size_t));
|
||||
vm_offset_t kmem_alloc_wait __P((vm_map_t, vm_size_t));
|
||||
void kmem_free __P((vm_map_t, vm_offset_t, vm_size_t));
|
||||
void kmem_free_wakeup __P((vm_map_t, vm_offset_t, vm_size_t));
|
||||
void kmem_init __P((vm_offset_t, vm_offset_t));
|
||||
vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
|
||||
vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *,
|
||||
vm_size_t, boolean_t));
|
||||
void loadav __P((struct loadavg *));
|
||||
void munmapfd __P((int));
|
||||
int pager_cache __P((vm_object_t, boolean_t));
|
||||
void sched __P((void));
|
||||
int svm_allocate __P((struct proc *, void *, int *));
|
||||
int svm_deallocate __P((struct proc *, void *, int *));
|
||||
int svm_inherit __P((struct proc *, void *, int *));
|
||||
int svm_protect __P((struct proc *, void *, int *));
|
||||
void swapinit __P((void));
|
||||
int swapon __P((struct proc *, void *, int *));
|
||||
void swapout __P((struct proc *));
|
||||
void swapout_threads __P((void));
|
||||
int swfree __P((struct proc *, int));
|
||||
void swstrategy __P((struct buf *));
|
||||
void thread_block __P((void));
|
||||
void thread_sleep __P((int, simple_lock_t, boolean_t));
|
||||
void thread_wakeup __P((int));
|
||||
int useracc __P((caddr_t, int, int));
|
||||
int vm_allocate __P((vm_map_t,
|
||||
vm_offset_t *, vm_size_t, boolean_t));
|
||||
int vm_allocate_with_pager __P((vm_map_t, vm_offset_t *,
|
||||
vm_size_t, boolean_t, vm_pager_t, vm_offset_t, boolean_t));
|
||||
int vm_deallocate __P((vm_map_t, vm_offset_t, vm_size_t));
|
||||
int vm_fault __P((vm_map_t, vm_offset_t, vm_prot_t, boolean_t));
|
||||
void vm_fault_copy_entry __P((vm_map_t,
|
||||
vm_map_t, vm_map_entry_t, vm_map_entry_t));
|
||||
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
|
||||
int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
|
||||
int vm_fork __P((struct proc *, struct proc *, int));
|
||||
int vm_inherit __P((vm_map_t,
|
||||
vm_offset_t, vm_size_t, vm_inherit_t));
|
||||
void vm_init_limits __P((struct proc *));
|
||||
void vm_mem_init __P((void));
|
||||
int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t,
|
||||
vm_prot_t, vm_prot_t, int, caddr_t, vm_offset_t));
|
||||
int vm_protect __P((vm_map_t,
|
||||
vm_offset_t, vm_size_t, boolean_t, vm_prot_t));
|
||||
void vm_set_page_size __P((void));
|
||||
void vmmeter __P((void));
|
||||
struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t, int));
|
||||
struct vmspace *vmspace_fork __P((struct vmspace *));
|
||||
void vmspace_free __P((struct vmspace *));
|
||||
void vmtotal __P((struct vmtotal *));
|
||||
|
||||
int vm_sysctl __P((int *, u_int, void *, size_t *, void *, size_t,
|
||||
struct proc *));
|
||||
void vnode_pager_setsize __P((struct vnode *, u_long));
|
||||
void vnode_pager_umount __P((struct mount *));
|
||||
boolean_t vnode_pager_uncache __P((struct vnode *));
|
||||
void vslock __P((caddr_t, u_int));
|
||||
void vsunlock __P((caddr_t, u_int, int));
|
||||
#endif
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_fault.c 7.6 (Berkeley) 5/7/91
|
||||
* $Id: vm_fault.c,v 1.12 1994/04/15 07:04:48 cgd Exp $
|
||||
* from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94
|
||||
* $Id: vm_fault.c,v 1.13 1994/05/23 03:11:37 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -68,6 +68,7 @@
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_page.h>
|
||||
@ -114,7 +115,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
|
||||
vm_page_t old_m;
|
||||
vm_object_t next_object;
|
||||
|
||||
cnt.v_vm_faults++; /* needs lock XXX */
|
||||
cnt.v_faults++; /* needs lock XXX */
|
||||
/*
|
||||
* Recovery actions
|
||||
*/
|
||||
@ -255,7 +256,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
|
||||
#else
|
||||
PAGE_ASSERT_WAIT(m, !change_wiring);
|
||||
UNLOCK_THINGS;
|
||||
thread_wakeup(&vm_pages_needed); /* XXX! */
|
||||
cnt.v_intrans++;
|
||||
thread_block();
|
||||
vm_object_deallocate(first_object);
|
||||
goto RetryFault;
|
||||
@ -307,8 +308,7 @@ thread_wakeup(&vm_pages_needed); /* XXX! */
|
||||
}
|
||||
}
|
||||
|
||||
if ((object->pager != NULL) &&
|
||||
(!change_wiring || wired)) {
|
||||
if (object->pager != NULL && (!change_wiring || wired)) {
|
||||
int rv;
|
||||
|
||||
/*
|
||||
@ -322,15 +322,20 @@ thread_wakeup(&vm_pages_needed); /* XXX! */
|
||||
* after releasing the lock on the map.
|
||||
*/
|
||||
UNLOCK_MAP;
|
||||
|
||||
cnt.v_pageins++;
|
||||
rv = vm_pager_get(object->pager, m, TRUE);
|
||||
if (rv == VM_PAGER_OK) {
|
||||
/*
|
||||
* Found the page.
|
||||
* Leave it busy while we play with it.
|
||||
*/
|
||||
vm_object_lock(object);
|
||||
|
||||
/*
|
||||
* Reaquire the object lock to preserve our
|
||||
* invariant.
|
||||
*/
|
||||
vm_object_lock(object);
|
||||
|
||||
/*
|
||||
* Found the page.
|
||||
* Leave it busy while we play with it.
|
||||
*/
|
||||
if (rv == VM_PAGER_OK) {
|
||||
/*
|
||||
* Relookup in case pager changed page.
|
||||
* Pager is responsible for disposition
|
||||
@ -338,43 +343,38 @@ thread_wakeup(&vm_pages_needed); /* XXX! */
|
||||
*/
|
||||
m = vm_page_lookup(object, offset);
|
||||
|
||||
cnt.v_pageins++;
|
||||
cnt.v_pgpgin++;
|
||||
m->flags &= ~PG_FAKE;
|
||||
m->flags |= PG_CLEAN;
|
||||
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the bogus page (which does not
|
||||
* exist at this object/offset); before
|
||||
* doing so, we must get back our object
|
||||
* lock to preserve our invariant.
|
||||
*
|
||||
* Also wake up any other thread that may want
|
||||
* to bring in this page.
|
||||
*
|
||||
* If this is the top-level object, we must
|
||||
* leave the busy page to prevent another
|
||||
* thread from rushing past us, and inserting
|
||||
* the page in that object at the same time
|
||||
* that we are.
|
||||
* IO error or page outside the range of the pager:
|
||||
* cleanup and return an error.
|
||||
*/
|
||||
|
||||
vm_object_lock(object);
|
||||
/*
|
||||
* Data outside the range of the pager; an error
|
||||
*/
|
||||
if (rv == VM_PAGER_BAD) {
|
||||
if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) {
|
||||
FREE_PAGE(m);
|
||||
UNLOCK_AND_DEALLOCATE;
|
||||
return(KERN_PROTECTION_FAILURE); /* XXX */
|
||||
}
|
||||
/*
|
||||
* rv == VM_PAGER_FAIL:
|
||||
*
|
||||
* Page does not exist at this object/offset.
|
||||
* Free the bogus page (waking up anyone waiting
|
||||
* for it) and continue on to the next object.
|
||||
*
|
||||
* If this is the top-level object, we must
|
||||
* leave the busy page to prevent another
|
||||
* thread from rushing past us, and inserting
|
||||
* the page in that object at the same time
|
||||
* that we are.
|
||||
*/
|
||||
if (object != first_object) {
|
||||
FREE_PAGE(m);
|
||||
/*
|
||||
* XXX - we cannot just fall out at this
|
||||
* point, m has been freed and is invalid!
|
||||
*/
|
||||
/* note that `m' is not used after this */
|
||||
}
|
||||
}
|
||||
|
||||
@ -424,7 +424,7 @@ thread_wakeup(&vm_pages_needed); /* XXX! */
|
||||
}
|
||||
|
||||
if ((m->flags & (PG_ACTIVE | PG_INACTIVE | PG_BUSY)) != PG_BUSY)
|
||||
panic("vm_fault: active or inactive or !busy after main loop");
|
||||
panic("vm_fault: active, inactive or !busy after main loop");
|
||||
|
||||
/*
|
||||
* PAGE HAS BEEN FOUND.
|
||||
@ -522,12 +522,12 @@ thread_wakeup(&vm_pages_needed); /* XXX! */
|
||||
object->paging_in_progress++;
|
||||
}
|
||||
else {
|
||||
prot &= (~VM_PROT_WRITE);
|
||||
prot &= ~VM_PROT_WRITE;
|
||||
m->flags |= PG_COPYONWRITE;
|
||||
}
|
||||
}
|
||||
|
||||
if (m->flags & (PG_ACTIVE | PG_INACTIVE))
|
||||
if (m->flags & (PG_ACTIVE|PG_INACTIVE))
|
||||
panic("vm_fault: active or inactive before copy object handling");
|
||||
|
||||
/*
|
||||
@ -601,7 +601,6 @@ thread_wakeup(&vm_pages_needed); /* XXX! */
|
||||
copy_object->ref_count--;
|
||||
vm_object_unlock(copy_object);
|
||||
UNLOCK_THINGS;
|
||||
thread_wakeup(&vm_pages_needed); /* XXX */
|
||||
thread_block();
|
||||
vm_object_deallocate(first_object);
|
||||
goto RetryFault;
|
||||
@ -853,14 +852,14 @@ thread_wakeup(&vm_pages_needed); /* XXX */
|
||||
*
|
||||
* Wire down a range of virtual addresses in a map.
|
||||
*/
|
||||
void
|
||||
int
|
||||
vm_fault_wire(map, start, end)
|
||||
vm_map_t map;
|
||||
vm_offset_t start, end;
|
||||
{
|
||||
|
||||
register vm_offset_t va;
|
||||
register pmap_t pmap;
|
||||
int rv;
|
||||
|
||||
pmap = vm_map_pmap(map);
|
||||
|
||||
@ -878,8 +877,14 @@ vm_fault_wire(map, start, end)
|
||||
*/
|
||||
|
||||
for (va = start; va < end; va += PAGE_SIZE) {
|
||||
(void) vm_fault(map, va, VM_PROT_NONE, TRUE);
|
||||
rv = vm_fault(map, va, VM_PROT_NONE, TRUE);
|
||||
if (rv) {
|
||||
if (va != start)
|
||||
vm_fault_unwire(map, start, va);
|
||||
return(rv);
|
||||
}
|
||||
}
|
||||
return(KERN_SUCCESS);
|
||||
}
|
||||
|
||||
|
||||
@ -937,7 +942,6 @@ vm_fault_unwire(map, start, end)
|
||||
* The source map entry must be wired down (or be a sharing map
|
||||
* entry corresponding to a main map entry that is wired down).
|
||||
*/
|
||||
|
||||
void
|
||||
vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
|
||||
vm_map_t dst_map;
|
||||
@ -957,7 +961,7 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
|
||||
|
||||
#ifdef lint
|
||||
src_map++;
|
||||
#endif lint
|
||||
#endif
|
||||
|
||||
src_object = src_entry->object.vm_object;
|
||||
src_offset = src_entry->offset;
|
||||
|
163
sys/vm/vm_glue.c
163
sys/vm/vm_glue.c
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_glue.c 7.8 (Berkeley) 5/15/91
|
||||
* vm_glue.c,v 1.8 1993/07/15 15:42:17 cgd Exp
|
||||
* from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
|
||||
* $vm_glue.c,v 1.8 1993/07/15 15:42:17 cgd Exp$
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -71,7 +71,6 @@
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_kern.h>
|
||||
#include <vm/vm_user.h>
|
||||
|
||||
#include <machine/cpu.h>
|
||||
|
||||
@ -80,25 +79,14 @@ unsigned maxdmap = MAXDSIZ; /* XXX */
|
||||
unsigned maxsmap = MAXSSIZ; /* XXX */
|
||||
int readbuffers = 0; /* XXX allow kgdb to read kernel buffer pool */
|
||||
|
||||
static void swapout __P((struct proc *));
|
||||
|
||||
int
|
||||
kernacc(addr, len, rw)
|
||||
caddr_t addr;
|
||||
int len, rw;
|
||||
{
|
||||
vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
|
||||
|
||||
return (kerncheckprot(addr, len, prot));
|
||||
}
|
||||
|
||||
int
|
||||
kerncheckprot(addr, len, prot)
|
||||
caddr_t addr;
|
||||
int len, prot;
|
||||
{
|
||||
boolean_t rv;
|
||||
vm_offset_t saddr, eaddr;
|
||||
vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
|
||||
|
||||
saddr = trunc_page(addr);
|
||||
eaddr = round_page(addr+len);
|
||||
@ -126,21 +114,16 @@ useracc(addr, len, rw)
|
||||
boolean_t rv;
|
||||
vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
|
||||
|
||||
#if defined(i386) || defined(pc532)
|
||||
/*
|
||||
* XXX - specially disallow access to user page tables - they are
|
||||
* in the map.
|
||||
*
|
||||
* XXX - don't specially disallow access to the user area - treat
|
||||
* it as incorrectly as elsewhere.
|
||||
*
|
||||
* XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was
|
||||
* only used (as an end address) in trap.c. Use it as an end
|
||||
* address here too.
|
||||
* in the map. This is here until i386 & pc532 pmaps are fixed...
|
||||
*/
|
||||
if ((vm_offset_t) addr >= VM_MAXUSER_ADDRESS
|
||||
|| (vm_offset_t) addr + len > VM_MAXUSER_ADDRESS
|
||||
|| (vm_offset_t) addr + len <= (vm_offset_t) addr)
|
||||
return (FALSE);
|
||||
#endif
|
||||
|
||||
rv = vm_map_check_protection(&curproc->p_vmspace->vm_map,
|
||||
trunc_page(addr), round_page(addr+len), prot);
|
||||
@ -151,16 +134,37 @@ useracc(addr, len, rw)
|
||||
/*
|
||||
* Change protections on kernel pages from addr to addr+len
|
||||
* (presumably so debugger can plant a breakpoint).
|
||||
* All addresses are assumed to reside in the Sysmap,
|
||||
*
|
||||
* We force the protection change at the pmap level. If we were
|
||||
* to use vm_map_protect a change to allow writing would be lazily-
|
||||
* applied meaning we would still take a protection fault, something
|
||||
* we really don't want to do. It would also fragment the kernel
|
||||
* map unnecessarily. We cannot use pmap_protect since it also won't
|
||||
* enforce a write-enable request. Using pmap_enter is the only way
|
||||
* we can ensure the change takes place properly.
|
||||
*/
|
||||
void
|
||||
chgkprot(addr, len, rw)
|
||||
register caddr_t addr;
|
||||
int len, rw;
|
||||
{
|
||||
vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
|
||||
vm_prot_t prot;
|
||||
vm_offset_t pa, sva, eva;
|
||||
|
||||
vm_map_protect(kernel_map, trunc_page(addr),
|
||||
round_page(addr+len), prot, FALSE);
|
||||
prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
|
||||
eva = round_page(addr + len);
|
||||
for (sva = trunc_page(addr); sva < eva; sva += PAGE_SIZE) {
|
||||
/*
|
||||
* Extract physical address for the page.
|
||||
* We use a cheezy hack to differentiate physical
|
||||
* page 0 from an invalid mapping, not that it
|
||||
* really matters...
|
||||
*/
|
||||
pa = pmap_extract(kernel_pmap, sva|1);
|
||||
if (pa == 0)
|
||||
panic("chgkprot: invalid page");
|
||||
pmap_enter(kernel_pmap, sva, pa&~1, prot, TRUE);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -181,7 +185,7 @@ vsunlock(addr, len, dirtied)
|
||||
{
|
||||
#ifdef lint
|
||||
dirtied++;
|
||||
#endif lint
|
||||
#endif
|
||||
vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr),
|
||||
round_page(addr+len), TRUE);
|
||||
}
|
||||
@ -220,14 +224,23 @@ vm_fork(p1, p2, isvfork)
|
||||
shmfork(p1, p2, isvfork);
|
||||
#endif
|
||||
|
||||
#if !defined(i386) && !defined(pc532)
|
||||
/*
|
||||
* Allocate a wired-down (for now) pcb and kernel stack for the process
|
||||
*/
|
||||
#ifdef notyet
|
||||
addr = kmem_alloc_pageable(kernel_map, ctob(UPAGES));
|
||||
if (addr == 0)
|
||||
panic("vm_fork: no more kernel virtual memory");
|
||||
vm_map_pageable(kernel_map, addr, addr + ctob(UPAGES), FALSE);
|
||||
#else
|
||||
/*
|
||||
* XXX somehow, on 386, ocassionally pageout removes active, wired down
|
||||
* kstack and pagetables, WITHOUT going thru vm_page_unwire! Why this
|
||||
* appears to work is not yet clear, yet it does...
|
||||
*/
|
||||
addr = kmem_alloc(kernel_map, ctob(UPAGES));
|
||||
if (addr == 0)
|
||||
panic("vm_fork: no more kernel virtual memory");
|
||||
#endif
|
||||
up = (struct user *)addr;
|
||||
p2->p_addr = up;
|
||||
@ -251,11 +264,11 @@ vm_fork(p1, p2, isvfork)
|
||||
#if defined(i386) || defined(pc532)
|
||||
{ vm_offset_t addr = VM_MAXUSER_ADDRESS; struct vm_map *vp;
|
||||
|
||||
vp = &p2->p_vmspace->vm_map;
|
||||
|
||||
/* ream out old pagetables and kernel stack */
|
||||
vp = &p2->p_vmspace->vm_map;
|
||||
(void)vm_deallocate(vp, addr, VM_MAX_ADDRESS - addr);
|
||||
(void)vm_allocate(vp, &addr, VM_MAX_ADDRESS - addr, FALSE);
|
||||
(void)vm_map_inherit(vp, addr, VM_MAX_ADDRESS, VM_INHERIT_NONE);
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
@ -288,8 +301,7 @@ vm_init_limits(p)
|
||||
p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
|
||||
p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
|
||||
p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
|
||||
p->p_rlimit[RLIMIT_RSS].rlim_cur = p->p_rlimit[RLIMIT_RSS].rlim_max =
|
||||
ptoa(cnt.v_free_count);
|
||||
p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(cnt.v_free_count);
|
||||
}
|
||||
|
||||
#include <vm/vm_pageout.h>
|
||||
@ -321,14 +333,12 @@ scheduler()
|
||||
|
||||
loop:
|
||||
#ifdef DEBUG
|
||||
if (!enableswap) {
|
||||
pp = NULL;
|
||||
goto noswap;
|
||||
}
|
||||
while (!enableswap)
|
||||
tsleep((caddr_t)&proc0, PVM, "noswap", 0);
|
||||
#endif
|
||||
pp = NULL;
|
||||
ppri = INT_MIN;
|
||||
for (p = (struct proc *)allproc; p != NULL; p = p->p_next)
|
||||
for (p = (struct proc *)allproc; p != NULL; p = p->p_next) {
|
||||
if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) {
|
||||
pri = p->p_swtime + p->p_slptime - p->p_nice * 8;
|
||||
if (pri > ppri) {
|
||||
@ -336,6 +346,7 @@ loop:
|
||||
ppri = pri;
|
||||
}
|
||||
}
|
||||
}
|
||||
#ifdef DEBUG
|
||||
if (swapdebug & SDB_FOLLOW)
|
||||
printf("scheduler: running, procp %x pri %d\n", pp, ppri);
|
||||
@ -364,7 +375,13 @@ noswap:
|
||||
ppri, cnt.v_free_count);
|
||||
#endif
|
||||
vm_map_pageable(kernel_map, addr, addr+size, FALSE);
|
||||
(void) splclock();
|
||||
/*
|
||||
* Some architectures need to be notified when the
|
||||
* user area has moved to new physical page(s) (e.g.
|
||||
* see pmax/pmax/vm_machdep.c).
|
||||
*/
|
||||
cpu_swapin(p);
|
||||
(void) splstatclock();
|
||||
if (p->p_stat == SRUN)
|
||||
setrunqueue(p);
|
||||
p->p_flag |= P_INMEM;
|
||||
@ -391,9 +408,9 @@ noswap:
|
||||
goto loop;
|
||||
}
|
||||
|
||||
#define swappable(p) \
|
||||
(((p)->p_flag & (P_SYSTEM|P_INMEM|P_NOSWAP|P_WEXIT|P_PHYSIO)) == \
|
||||
P_INMEM)
|
||||
#define swappable(p) \
|
||||
(((p)->p_flag & \
|
||||
(P_SYSTEM | P_INMEM | P_NOSWAP | P_WEXIT | P_PHYSIO)) == P_INMEM)
|
||||
|
||||
/*
|
||||
* Swapout is driven by the pageout daemon. Very simple, we find eligible
|
||||
@ -431,7 +448,7 @@ swapout_threads()
|
||||
|
||||
case SSLEEP:
|
||||
case SSTOP:
|
||||
if (p->p_slptime > maxslp) {
|
||||
if (p->p_slptime >= maxslp) {
|
||||
swapout(p);
|
||||
didswap++;
|
||||
} else if (p->p_slptime > outpri) {
|
||||
@ -460,7 +477,7 @@ swapout_threads()
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
swapout(p)
|
||||
register struct proc *p;
|
||||
{
|
||||
@ -475,9 +492,8 @@ swapout(p)
|
||||
#endif
|
||||
size = round_page(ctob(UPAGES));
|
||||
addr = (vm_offset_t) p->p_addr;
|
||||
p->p_stats->p_ru.ru_nswap++; /* record that it got swapped out */
|
||||
#ifdef notyet
|
||||
#ifdef hp300
|
||||
#ifdef notyet /* XXX GC -- enable swapping! */
|
||||
#ifdef m68k
|
||||
/*
|
||||
* Ugh! u-area is double mapped to a fixed address behind the
|
||||
* back of the VM system and accesses are usually through that
|
||||
@ -497,8 +513,25 @@ swapout(p)
|
||||
addr = (vm_offset_t) p->p_addr;
|
||||
}
|
||||
#endif
|
||||
#ifdef mips
|
||||
/*
|
||||
* Be sure to save the floating point coprocessor state before
|
||||
* paging out the u-struct.
|
||||
*/
|
||||
{
|
||||
extern struct proc *machFPCurProcPtr;
|
||||
|
||||
if (p == machFPCurProcPtr) {
|
||||
MachSaveCurFPState(p);
|
||||
machFPCurProcPtr = (struct proc *)0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
/* temporary measure till we find spontaneous unwire of kstack */
|
||||
#if !defined(i386) && !defined(pc532)
|
||||
vm_map_pageable(kernel_map, addr, addr+size, TRUE);
|
||||
pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
|
||||
#endif
|
||||
#endif
|
||||
(void) splhigh();
|
||||
p->p_flag &= ~P_INMEM;
|
||||
@ -533,12 +566,17 @@ thread_block()
|
||||
splx(s);
|
||||
}
|
||||
|
||||
thread_sleep(event, lock)
|
||||
void
|
||||
thread_sleep(event, lock, ruptible)
|
||||
int event;
|
||||
simple_lock_t lock;
|
||||
boolean_t ruptible;
|
||||
{
|
||||
int s = splhigh();
|
||||
|
||||
#ifdef lint
|
||||
ruptible++;
|
||||
#endif
|
||||
curproc->p_thread = event;
|
||||
simple_unlock(lock);
|
||||
if (curproc->p_thread)
|
||||
@ -546,6 +584,7 @@ thread_sleep(event, lock)
|
||||
splx(s);
|
||||
}
|
||||
|
||||
void
|
||||
thread_wakeup(event)
|
||||
int event;
|
||||
{
|
||||
@ -561,19 +600,27 @@ thread_wakeup(event)
|
||||
|
||||
int indent = 0;
|
||||
|
||||
#include <machine/stdarg.h> /* see subr_prf.c */
|
||||
|
||||
/*ARGSUSED2*/
|
||||
iprintf(pr, a, b, c, d, e, f, g, h)
|
||||
void
|
||||
#if __STDC__
|
||||
iprintf(void (*pr)(const char *, ...), const char *fmt, ...)
|
||||
#else
|
||||
iprintf(pr, fmt /* , va_alist */)
|
||||
void (*pr)();
|
||||
char *a;
|
||||
char *fmt;
|
||||
/* va_dcl */
|
||||
#endif
|
||||
{
|
||||
register int i;
|
||||
va_list ap;
|
||||
|
||||
i = indent;
|
||||
while (i >= 8) {
|
||||
for (i = indent; i >= 8; i -= 8)
|
||||
(*pr)("\t");
|
||||
i -= 8;
|
||||
}
|
||||
for (; i > 0; --i)
|
||||
while (--i >= 0)
|
||||
(*pr)(" ");
|
||||
(*pr)(a, b, c, d, e, f, g, h);
|
||||
va_start(ap, fmt);
|
||||
(*pr)("%r", fmt, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_inherit.h 7.2 (Berkeley) 4/21/91
|
||||
* $Id: vm_inherit.h,v 1.4 1994/03/17 02:52:09 cgd Exp $
|
||||
* from: @(#)vm_inherit.h 8.1 (Berkeley) 6/11/93
|
||||
* $Id: vm_inherit.h,v 1.5 1994/05/23 03:11:39 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -63,18 +63,12 @@
|
||||
* rights to redistribute these changes.
|
||||
*/
|
||||
|
||||
#ifndef _VM_VM_INHERIT_H_
|
||||
#define _VM_VM_INHERIT_H_
|
||||
|
||||
/*
|
||||
* Virtual memory map inheritance definitions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Types defined:
|
||||
*
|
||||
* vm_inherit_t inheritance codes.
|
||||
*/
|
||||
#ifndef _VM_INHERIT_
|
||||
#define _VM_INHERIT_
|
||||
|
||||
/*
|
||||
* Enumeration of valid values for vm_inherit_t.
|
||||
@ -87,4 +81,4 @@
|
||||
|
||||
#define VM_INHERIT_DEFAULT VM_INHERIT_COPY
|
||||
|
||||
#endif /* !_VM_VM_INHERIT_H_ */
|
||||
#endif /* _VM_INHERIT_ */
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_init.c 7.3 (Berkeley) 4/21/91
|
||||
* $Id: vm_init.c,v 1.6 1994/04/15 07:04:50 cgd Exp $
|
||||
* from: @(#)vm_init.c 8.1 (Berkeley) 6/11/93
|
||||
* $Id: vm_init.c,v 1.7 1994/05/23 03:11:40 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -88,7 +88,7 @@ void vm_mem_init()
|
||||
#else
|
||||
vm_offset_t start, end;
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Initializes resident memory structures.
|
||||
* From here on, all physical memory is accounted for,
|
||||
@ -96,14 +96,19 @@ void vm_mem_init()
|
||||
*/
|
||||
vm_set_page_size();
|
||||
#ifndef MACHINE_NONCONTIG
|
||||
virtual_avail = vm_page_startup(avail_start, avail_end, virtual_avail);
|
||||
vm_page_startup(&avail_start, &avail_end);
|
||||
#else
|
||||
vm_page_bootstrap(&start, &end);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialize other VM packages
|
||||
*/
|
||||
vm_object_init();
|
||||
#ifndef MACHINE_NONCONTIG
|
||||
vm_object_init(virtual_end - VM_MIN_KERNEL_ADDRESS);
|
||||
#else
|
||||
vm_object_init(end - VM_MIN_KERNEL_ADDRESS);
|
||||
#endif
|
||||
vm_map_startup();
|
||||
#ifndef MACHINE_NONCONTIG
|
||||
kmem_init(virtual_avail, virtual_end);
|
||||
|
289
sys/vm/vm_kern.c
289
sys/vm/vm_kern.c
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_kern.c 7.4 (Berkeley) 5/7/91
|
||||
* $Id: vm_kern.c,v 1.10 1994/01/08 05:26:10 mycroft Exp $
|
||||
* from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
|
||||
* $Id: vm_kern.c,v 1.11 1994/05/23 03:11:41 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -68,12 +68,12 @@
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_pageout.h>
|
||||
#include <vm/vm_kern.h>
|
||||
#include <vm/vm_user.h>
|
||||
|
||||
/*
|
||||
* kmem_alloc_pageable:
|
||||
@ -92,7 +92,7 @@ kmem_alloc_pageable(map, size)
|
||||
#if 0
|
||||
if (map != kernel_map)
|
||||
panic("kmem_alloc_pageable: not called with kernel_map");
|
||||
#endif 0
|
||||
#endif
|
||||
|
||||
size = round_page(size);
|
||||
|
||||
@ -103,11 +103,6 @@ kmem_alloc_pageable(map, size)
|
||||
return(0);
|
||||
}
|
||||
|
||||
#ifdef KMEM_DEBUG
|
||||
if (map == kernel_map)
|
||||
printf("kmem_alloc_pageable: %x %x\n", addr, size);
|
||||
#endif
|
||||
|
||||
return(addr);
|
||||
}
|
||||
|
||||
@ -121,7 +116,6 @@ kmem_alloc(map, size)
|
||||
register vm_size_t size;
|
||||
{
|
||||
vm_offset_t addr;
|
||||
register int result;
|
||||
register vm_offset_t offset;
|
||||
extern vm_object_t kernel_object;
|
||||
vm_offset_t i;
|
||||
@ -134,25 +128,18 @@ kmem_alloc(map, size)
|
||||
* referenced more than once.
|
||||
*/
|
||||
|
||||
addr = vm_map_min(map);
|
||||
result = vm_map_find(map, NULL, (vm_offset_t) 0,
|
||||
&addr, size, TRUE);
|
||||
if (result != KERN_SUCCESS) {
|
||||
return(0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Since we didn't know where the new region would
|
||||
* start, we couldn't supply the correct offset into
|
||||
* the kernel object. Re-allocate that address
|
||||
* region with the correct offset.
|
||||
* Locate sufficient space in the map. This will give us the
|
||||
* final virtual address for the new memory, and thus will tell
|
||||
* us the offset within the kernel map.
|
||||
*/
|
||||
|
||||
vm_map_lock(map);
|
||||
if (vm_map_findspace(map, 0, size, &addr)) {
|
||||
vm_map_unlock(map);
|
||||
return (0);
|
||||
}
|
||||
offset = addr - VM_MIN_KERNEL_ADDRESS;
|
||||
vm_object_reference(kernel_object);
|
||||
|
||||
vm_map_lock(map);
|
||||
vm_map_delete(map, addr, addr + size);
|
||||
vm_map_insert(map, kernel_object, offset, addr, addr + size);
|
||||
vm_map_unlock(map);
|
||||
|
||||
@ -206,11 +193,6 @@ kmem_alloc(map, size)
|
||||
|
||||
vm_map_simplify(map, addr);
|
||||
|
||||
#ifdef KMEM_DEBUG
|
||||
if (map == kernel_map)
|
||||
printf("kmem_alloc: %x %x\n", addr, size);
|
||||
#endif
|
||||
|
||||
return(addr);
|
||||
}
|
||||
|
||||
@ -227,14 +209,7 @@ kmem_free(map, addr, size)
|
||||
register vm_offset_t addr;
|
||||
vm_size_t size;
|
||||
{
|
||||
|
||||
#ifdef KMEM_DEBUG
|
||||
if (map == kernel_map)
|
||||
printf("kmem_free: %x %x\n", addr, size);
|
||||
#endif
|
||||
|
||||
(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
|
||||
vm_map_simplify(map, addr);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -276,92 +251,9 @@ kmem_suballoc(parent, min, max, size, pageable)
|
||||
panic("kmem_suballoc: cannot create submap");
|
||||
if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
|
||||
panic("kmem_suballoc: unable to change range to submap");
|
||||
|
||||
#ifdef KMEM_DEBUG
|
||||
if (parent == kernel_map)
|
||||
printf("kmem_suballoc: %x %x %x %x\n", *min, *max, size, pageable);
|
||||
#endif
|
||||
|
||||
return(result);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_move:
|
||||
*
|
||||
* Move memory from source to destination map, possibly deallocating
|
||||
* the source map reference to the memory.
|
||||
*
|
||||
* Parameters are as follows:
|
||||
*
|
||||
* src_map Source address map
|
||||
* src_addr Address within source map
|
||||
* dst_map Destination address map
|
||||
* num_bytes Amount of data (in bytes) to copy/move
|
||||
* src_dealloc Should source be removed after copy?
|
||||
*
|
||||
* Assumes the src and dst maps are not already locked.
|
||||
*
|
||||
* Returns new destination address or 0 (if a failure occurs).
|
||||
*/
|
||||
vm_offset_t
|
||||
vm_move(src_map,src_addr,dst_map,num_bytes,src_dealloc)
|
||||
vm_map_t src_map;
|
||||
register vm_offset_t src_addr;
|
||||
register vm_map_t dst_map;
|
||||
vm_offset_t num_bytes;
|
||||
boolean_t src_dealloc;
|
||||
{
|
||||
register vm_offset_t src_start; /* Beginning of region */
|
||||
register vm_size_t src_size; /* Size of rounded region */
|
||||
vm_offset_t dst_start; /* destination address */
|
||||
register int result;
|
||||
|
||||
/*
|
||||
* Page-align the source region
|
||||
*/
|
||||
|
||||
src_start = trunc_page(src_addr);
|
||||
src_size = round_page(src_addr + num_bytes) - src_start;
|
||||
|
||||
/*
|
||||
* If there's no destination, we can be at most deallocating
|
||||
* the source range.
|
||||
*/
|
||||
if (dst_map == NULL) {
|
||||
if (src_dealloc)
|
||||
if (vm_deallocate(src_map, src_start, src_size)
|
||||
!= KERN_SUCCESS) {
|
||||
printf("vm_move: deallocate of source");
|
||||
printf(" failed, dealloc_only clause\n");
|
||||
}
|
||||
return(0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a place to put the copy
|
||||
*/
|
||||
|
||||
dst_start = (vm_offset_t) 0;
|
||||
if ((result = vm_allocate(dst_map, &dst_start, src_size, TRUE))
|
||||
== KERN_SUCCESS) {
|
||||
/*
|
||||
* Perform the copy, asking for deallocation if desired
|
||||
*/
|
||||
result = vm_map_copy(dst_map, src_map, dst_start, src_size,
|
||||
src_start, FALSE, src_dealloc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the destination address corresponding to
|
||||
* the source address given (rather than the front
|
||||
* of the newly-allocated page).
|
||||
*/
|
||||
|
||||
if (result == KERN_SUCCESS)
|
||||
return(dst_start + (src_addr - src_start));
|
||||
return(0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate wired-down memory in the kernel's address map for the higher
|
||||
* level kernel memory allocator (kern/kern_malloc.c). We cannot use
|
||||
@ -396,27 +288,21 @@ kmem_malloc(map, size, canwait)
|
||||
size = round_page(size);
|
||||
addr = vm_map_min(map);
|
||||
|
||||
if (vm_map_find(map, NULL, (vm_offset_t)0,
|
||||
&addr, size, TRUE) != KERN_SUCCESS) {
|
||||
if (canwait) { /* XXX -- then we should wait */
|
||||
if (map == kmem_map)
|
||||
panic("kmem_malloc: kmem_map too small (should wait)");
|
||||
else if (map == mb_map)
|
||||
panic("kmem_malloc: mb_map too small (should wait)");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since we didn't know where the new region would start,
|
||||
* we couldn't supply the correct offset into the kmem object.
|
||||
* Re-allocate that address region with the correct offset.
|
||||
* Locate sufficient space in the map. This will give us the
|
||||
* final virtual address for the new memory, and thus will tell
|
||||
* us the offset within the kernel map.
|
||||
*/
|
||||
vm_map_lock(map);
|
||||
if (vm_map_findspace(map, 0, size, &addr)) {
|
||||
vm_map_unlock(map);
|
||||
if (canwait) /* XXX should wait */
|
||||
panic("kmem_malloc: %s too small",
|
||||
map == kmem_map ? "kmem_map" : "mb_map");
|
||||
return (0);
|
||||
}
|
||||
offset = addr - vm_map_min(kmem_map);
|
||||
vm_object_reference(kmem_object);
|
||||
|
||||
vm_map_lock(map);
|
||||
vm_map_delete(map, addr, addr + size);
|
||||
vm_map_insert(map, kmem_object, offset, addr, addr + size);
|
||||
|
||||
/*
|
||||
@ -505,95 +391,29 @@ kmem_alloc_wait(map, size)
|
||||
vm_size_t size;
|
||||
{
|
||||
vm_offset_t addr;
|
||||
int result;
|
||||
|
||||
size = round_page(size);
|
||||
|
||||
do {
|
||||
for (;;) {
|
||||
/*
|
||||
* To make this work for more than one map,
|
||||
* use the map's lock to lock out sleepers/wakers.
|
||||
* Unfortunately, vm_map_find also grabs the map lock.
|
||||
* To make this work for more than one map,
|
||||
* use the map's lock to lock out sleepers/wakers.
|
||||
*/
|
||||
vm_map_lock(map);
|
||||
lock_set_recursive(&map->lock);
|
||||
|
||||
addr = vm_map_min(map);
|
||||
result = vm_map_find(map, NULL, (vm_offset_t) 0,
|
||||
&addr, size, TRUE);
|
||||
|
||||
lock_clear_recursive(&map->lock);
|
||||
if (result != KERN_SUCCESS) {
|
||||
|
||||
if ( (vm_map_max(map) - vm_map_min(map)) < size ) {
|
||||
vm_map_unlock(map);
|
||||
return(0);
|
||||
}
|
||||
|
||||
assert_wait((int)map, TRUE);
|
||||
if (vm_map_findspace(map, 0, size, &addr) == 0)
|
||||
break;
|
||||
/* no space now; see if we can ever get space */
|
||||
if (vm_map_max(map) - vm_map_min(map) < size) {
|
||||
vm_map_unlock(map);
|
||||
thread_wakeup(&vm_pages_needed); /* XXX */
|
||||
thread_block();
|
||||
return (0);
|
||||
}
|
||||
else {
|
||||
vm_map_unlock(map);
|
||||
}
|
||||
|
||||
} while (result != KERN_SUCCESS);
|
||||
|
||||
return(addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* kmem_alloc_wired_wait
|
||||
*
|
||||
* Allocates nonpageable memory from a sub-map of the kernel. If the submap
|
||||
* has no room, the caller sleeps waiting for more memory in the submap.
|
||||
*
|
||||
*/
|
||||
vm_offset_t
|
||||
kmem_alloc_wired_wait(map, size)
|
||||
vm_map_t map;
|
||||
vm_size_t size;
|
||||
{
|
||||
vm_offset_t addr;
|
||||
int result;
|
||||
|
||||
size = round_page(size);
|
||||
|
||||
do {
|
||||
/*
|
||||
* To make this work for more than one map,
|
||||
* use the map's lock to lock out sleepers/wakers.
|
||||
* Unfortunately, vm_map_find also grabs the map lock.
|
||||
*/
|
||||
vm_map_lock(map);
|
||||
lock_set_recursive(&map->lock);
|
||||
|
||||
addr = vm_map_min(map);
|
||||
result = vm_map_find(map, NULL, (vm_offset_t) 0,
|
||||
&addr, size, FALSE);
|
||||
|
||||
lock_clear_recursive(&map->lock);
|
||||
if (result != KERN_SUCCESS) {
|
||||
|
||||
if ( (vm_map_max(map) - vm_map_min(map)) < size ) {
|
||||
vm_map_unlock(map);
|
||||
return(0);
|
||||
}
|
||||
|
||||
assert_wait((int)map, TRUE);
|
||||
vm_map_unlock(map);
|
||||
thread_wakeup(&vm_pages_needed); /* XXX */
|
||||
thread_block();
|
||||
}
|
||||
else {
|
||||
vm_map_unlock(map);
|
||||
}
|
||||
|
||||
} while (result != KERN_SUCCESS);
|
||||
|
||||
return(addr);
|
||||
assert_wait((int)map, TRUE);
|
||||
vm_map_unlock(map);
|
||||
thread_block();
|
||||
}
|
||||
vm_map_insert(map, NULL, (vm_offset_t)0, addr, addr + size);
|
||||
vm_map_unlock(map);
|
||||
return (addr);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -612,30 +432,27 @@ kmem_free_wakeup(map, addr, size)
|
||||
(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
|
||||
thread_wakeup((int)map);
|
||||
vm_map_unlock(map);
|
||||
vm_map_simplify(map, addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* kmem_init:
|
||||
*
|
||||
* Initialize the kernel's virtual memory map, taking
|
||||
* into account all memory allocated up to this time.
|
||||
* Create the kernel map; insert a mapping covering kernel text, data, bss,
|
||||
* and all space allocated thus far (`boostrap' data). The new map will thus
|
||||
* map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and
|
||||
* the range between `start' and `end' as free.
|
||||
*/
|
||||
void
|
||||
kmem_init(start, end)
|
||||
vm_offset_t start;
|
||||
vm_offset_t end;
|
||||
{
|
||||
vm_offset_t addr;
|
||||
extern vm_map_t kernel_map;
|
||||
register vm_map_t m;
|
||||
|
||||
#ifdef KMEM_DEBUG
|
||||
printf("kmem_init: %x %x %x\n", VM_MIN_KERNEL_ADDRESS, start, end);
|
||||
#endif
|
||||
|
||||
addr = VM_MIN_KERNEL_ADDRESS;
|
||||
kernel_map = vm_map_create(pmap_kernel(), addr, end, FALSE);
|
||||
(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
|
||||
&addr, (start - VM_MIN_KERNEL_ADDRESS),
|
||||
FALSE);
|
||||
m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end, FALSE);
|
||||
vm_map_lock(m);
|
||||
/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
|
||||
kernel_map = m;
|
||||
(void) vm_map_insert(m, NULL, (vm_offset_t)0,
|
||||
VM_MIN_KERNEL_ADDRESS, start);
|
||||
/* ... and ending with the completion of the above `insert' */
|
||||
vm_map_unlock(m);
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_kern.h 7.2 (Berkeley) 4/21/91
|
||||
* $Id: vm_kern.h,v 1.6 1994/01/13 04:25:01 cgd Exp $
|
||||
* from: @(#)vm_kern.h 8.1 (Berkeley) 6/11/93
|
||||
* $Id: vm_kern.h,v 1.7 1994/05/23 03:11:42 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -63,29 +63,11 @@
|
||||
* rights to redistribute these changes.
|
||||
*/
|
||||
|
||||
#ifndef _VM_VM_KERN_H_
|
||||
#define _VM_VM_KERN_H_
|
||||
/* Kernel memory management definitions. */
|
||||
|
||||
/*
|
||||
* Kernel memory management functions.
|
||||
*/
|
||||
vm_offset_t kmem_alloc_pageable __P((vm_map_t, vm_size_t));
|
||||
vm_offset_t kmem_alloc __P((vm_map_t, vm_size_t));
|
||||
void kmem_free __P((vm_map_t, vm_offset_t, vm_size_t));
|
||||
vm_map_t kmem_suballoc
|
||||
__P((vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t,
|
||||
boolean_t));
|
||||
vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
|
||||
vm_offset_t kmem_alloc_wait __P((vm_map_t, vm_size_t));
|
||||
vm_offset_t kmem_alloc_wired_wait __P((vm_map_t, vm_size_t));
|
||||
void kmem_free_wakeup __P((vm_map_t, vm_offset_t, vm_size_t));
|
||||
void kmem_init __P((vm_offset_t, vm_offset_t));
|
||||
|
||||
vm_map_t kernel_map;
|
||||
vm_map_t mb_map;
|
||||
vm_map_t kmem_map;
|
||||
vm_map_t exec_map;
|
||||
vm_map_t phys_map;
|
||||
vm_map_t buffer_map;
|
||||
|
||||
#endif /* !_VM_VM_KERN_H_ */
|
||||
vm_map_t exec_map;
|
||||
vm_map_t kernel_map;
|
||||
vm_map_t kmem_map;
|
||||
vm_map_t mb_map;
|
||||
vm_map_t phys_map;
|
||||
|
404
sys/vm/vm_map.c
404
sys/vm/vm_map.c
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,7 +33,7 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_map.c 7.3 (Berkeley) 4/21/91
|
||||
* from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94
|
||||
* vm_map.c,v 1.6 1993/07/15 14:25:28 cgd Exp
|
||||
*
|
||||
*
|
||||
@ -68,11 +68,12 @@
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/malloc.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_object.h>
|
||||
|
||||
/*
|
||||
* Virtual memory maps provide for the mapping, protection,
|
||||
@ -137,6 +138,9 @@ vm_size_t kentry_data_size;
|
||||
vm_map_entry_t kentry_free;
|
||||
vm_map_t kmap_free;
|
||||
|
||||
static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
|
||||
static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
|
||||
|
||||
void
|
||||
vm_map_startup()
|
||||
{
|
||||
@ -246,7 +250,7 @@ vm_map_create(pmap, min, max, pageable)
|
||||
*/
|
||||
void
|
||||
vm_map_init(map, min, max, pageable)
|
||||
register vm_map_t map;
|
||||
register struct vm_map *map;
|
||||
vm_offset_t min, max;
|
||||
boolean_t pageable;
|
||||
{
|
||||
@ -272,20 +276,28 @@ vm_map_init(map, min, max, pageable)
|
||||
* Allocates a VM map entry for insertion.
|
||||
* No entry fields are filled in. This routine is
|
||||
*/
|
||||
static vm_map_entry_t
|
||||
vm_map_entry_t
|
||||
vm_map_entry_create(map)
|
||||
vm_map_t map;
|
||||
{
|
||||
vm_map_entry_t entry;
|
||||
extern vm_map_t kernel_map, kmem_map, mb_map, pager_map;
|
||||
#ifdef DEBUG
|
||||
extern vm_map_t kernel_map, kmem_map, mb_map, pager_map;
|
||||
boolean_t isspecial;
|
||||
|
||||
if (map == kernel_map || map == kmem_map || map == mb_map
|
||||
|| map == pager_map) {
|
||||
if (entry = kentry_free)
|
||||
kentry_free = kentry_free->next;
|
||||
} else
|
||||
isspecial = (map == kernel_map || map == kmem_map ||
|
||||
map == mb_map || map == pager_map);
|
||||
if (isspecial && map->entries_pageable ||
|
||||
!isspecial && !map->entries_pageable)
|
||||
panic("vm_map_entry_create: bogus map");
|
||||
#endif
|
||||
if (map->entries_pageable) {
|
||||
MALLOC(entry, vm_map_entry_t, sizeof(struct vm_map_entry),
|
||||
M_VMMAPENT, M_WAITOK);
|
||||
} else {
|
||||
if (entry = kentry_free)
|
||||
kentry_free = kentry_free->next;
|
||||
}
|
||||
if (entry == NULL)
|
||||
panic("vm_map_entry_create: out of map entries");
|
||||
|
||||
@ -297,19 +309,27 @@ vm_map_entry_create(map)
|
||||
*
|
||||
* Inverse of vm_map_entry_create.
|
||||
*/
|
||||
static void
|
||||
void
|
||||
vm_map_entry_dispose(map, entry)
|
||||
vm_map_t map;
|
||||
vm_map_entry_t entry;
|
||||
{
|
||||
#ifdef DEBUG
|
||||
extern vm_map_t kernel_map, kmem_map, mb_map, pager_map;
|
||||
boolean_t isspecial;
|
||||
|
||||
if (map == kernel_map || map == kmem_map || map == mb_map
|
||||
|| map == pager_map) {
|
||||
isspecial = (map == kernel_map || map == kmem_map ||
|
||||
map == mb_map || map == pager_map);
|
||||
if (isspecial && map->entries_pageable ||
|
||||
!isspecial && !map->entries_pageable)
|
||||
panic("vm_map_entry_dispose: bogus map");
|
||||
#endif
|
||||
if (map->entries_pageable) {
|
||||
FREE(entry, M_VMMAPENT);
|
||||
} else {
|
||||
entry->next = kentry_free;
|
||||
kentry_free = entry;
|
||||
} else
|
||||
FREE(entry, M_VMMAPENT);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -338,7 +358,7 @@ vm_map_entry_dispose(map, entry)
|
||||
* Creates another valid reference to the given map.
|
||||
*
|
||||
*/
|
||||
static void
|
||||
void
|
||||
vm_map_reference(map)
|
||||
register vm_map_t map;
|
||||
{
|
||||
@ -389,7 +409,7 @@ vm_map_deallocate(map)
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_map_insert: [ internal use only ]
|
||||
* vm_map_insert:
|
||||
*
|
||||
* Inserts the given whole VM object into the target
|
||||
* map at the specified address range. The object's
|
||||
@ -605,6 +625,63 @@ vm_map_lookup_entry(map, address, entry)
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find sufficient space for `length' bytes in the given map, starting at
|
||||
* `start'. The map must be locked. Returns 0 on success, 1 on no space.
|
||||
*/
|
||||
int
|
||||
vm_map_findspace(map, start, length, addr)
|
||||
register vm_map_t map;
|
||||
register vm_offset_t start;
|
||||
vm_size_t length;
|
||||
vm_offset_t *addr;
|
||||
{
|
||||
register vm_map_entry_t entry, next;
|
||||
register vm_offset_t end;
|
||||
|
||||
if (start < map->min_offset)
|
||||
start = map->min_offset;
|
||||
if (start > map->max_offset)
|
||||
return (1);
|
||||
|
||||
/*
|
||||
* Look for the first possible address; if there's already
|
||||
* something at this address, we have to start after it.
|
||||
*/
|
||||
if (start == map->min_offset) {
|
||||
if ((entry = map->first_free) != &map->header)
|
||||
start = entry->end;
|
||||
} else {
|
||||
vm_map_entry_t tmp;
|
||||
if (vm_map_lookup_entry(map, start, &tmp))
|
||||
start = tmp->end;
|
||||
entry = tmp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Look through the rest of the map, trying to fit a new region in
|
||||
* the gap between existing regions, or after the very last region.
|
||||
*/
|
||||
for (;; start = (entry = next)->end) {
|
||||
/*
|
||||
* Find the end of the proposed new region. Be sure we didn't
|
||||
* go beyond the end of the map, or wrap around the address;
|
||||
* if so, we lose. Otherwise, if this is the last entry, or
|
||||
* if the proposed new region fits before the next entry, we
|
||||
* win.
|
||||
*/
|
||||
end = start + length;
|
||||
if (end > map->max_offset || end < start)
|
||||
return (1);
|
||||
next = entry->next;
|
||||
if (next == &map->header || next->start >= end)
|
||||
break;
|
||||
}
|
||||
SAVE_HINT(map, entry);
|
||||
*addr = start;
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_map_find finds an unallocated region in the target address
|
||||
* map with the given length. The search is defined to be
|
||||
@ -621,97 +698,21 @@ vm_map_find(map, object, offset, addr, length, find_space)
|
||||
vm_size_t length;
|
||||
boolean_t find_space;
|
||||
{
|
||||
register vm_map_entry_t entry;
|
||||
register vm_offset_t start;
|
||||
register vm_offset_t end;
|
||||
int result;
|
||||
|
||||
start = *addr;
|
||||
|
||||
vm_map_lock(map);
|
||||
|
||||
if (find_space) {
|
||||
/*
|
||||
* Calculate the first possible address.
|
||||
*/
|
||||
|
||||
if (start < map->min_offset)
|
||||
start = map->min_offset;
|
||||
if (start > map->max_offset) {
|
||||
if (vm_map_findspace(map, start, length, addr)) {
|
||||
vm_map_unlock(map);
|
||||
return (KERN_NO_SPACE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Look for the first possible address;
|
||||
* if there's already something at this
|
||||
* address, we have to start after it.
|
||||
*/
|
||||
|
||||
if (start == map->min_offset) {
|
||||
if ((entry = map->first_free) != &map->header)
|
||||
start = entry->end;
|
||||
} else {
|
||||
vm_map_entry_t tmp_entry;
|
||||
if (vm_map_lookup_entry(map, start, &tmp_entry))
|
||||
start = tmp_entry->end;
|
||||
entry = tmp_entry;
|
||||
}
|
||||
|
||||
/*
|
||||
* In any case, the "entry" always precedes
|
||||
* the proposed new region throughout the
|
||||
* loop:
|
||||
*/
|
||||
|
||||
while (TRUE) {
|
||||
register vm_map_entry_t next;
|
||||
|
||||
/*
|
||||
* Find the end of the proposed new region.
|
||||
* Be sure we didn't go beyond the end, or
|
||||
* wrap around the address.
|
||||
*/
|
||||
|
||||
end = start + length;
|
||||
|
||||
if ((end > map->max_offset) || (end < start)) {
|
||||
vm_map_unlock(map);
|
||||
return (KERN_NO_SPACE);
|
||||
}
|
||||
|
||||
/*
|
||||
* If there are no more entries, we must win.
|
||||
*/
|
||||
|
||||
next = entry->next;
|
||||
if (next == &map->header)
|
||||
break;
|
||||
|
||||
/*
|
||||
* If there is another entry, it must be
|
||||
* after the end of the potential new region.
|
||||
*/
|
||||
|
||||
if (next->start >= end)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Didn't fit -- move to the next entry.
|
||||
*/
|
||||
|
||||
entry = next;
|
||||
start = entry->end;
|
||||
}
|
||||
*addr = start;
|
||||
|
||||
SAVE_HINT(map, entry);
|
||||
start = *addr;
|
||||
}
|
||||
|
||||
result = vm_map_insert(map, object, offset, start, start + length);
|
||||
|
||||
vm_map_unlock(map);
|
||||
return(result);
|
||||
return (result);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -721,14 +722,14 @@ vm_map_find(map, object, offset, addr, length, find_space)
|
||||
* removing extra sharing maps
|
||||
* [XXX maybe later] merging with a neighbor
|
||||
*/
|
||||
static void
|
||||
void
|
||||
vm_map_simplify_entry(map, entry)
|
||||
vm_map_t map;
|
||||
vm_map_entry_t entry;
|
||||
{
|
||||
#ifdef lint
|
||||
map++;
|
||||
#endif lint
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If this entry corresponds to a sharing map, then
|
||||
@ -757,7 +758,7 @@ vm_map_simplify_entry(map, entry)
|
||||
* Later.
|
||||
*/
|
||||
}
|
||||
#endif 0
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
/*
|
||||
@ -840,6 +841,7 @@ _vm_map_clip_start(map, entry, start)
|
||||
* the specified address; if necessary,
|
||||
* it splits the entry into two.
|
||||
*/
|
||||
|
||||
#define vm_map_clip_end(map, entry, endaddr) \
|
||||
{ \
|
||||
if (endaddr < entry->end) \
|
||||
@ -1138,7 +1140,9 @@ vm_map_pageable(map, start, end, new_pageable)
|
||||
register boolean_t new_pageable;
|
||||
{
|
||||
register vm_map_entry_t entry;
|
||||
vm_map_entry_t temp_entry;
|
||||
vm_map_entry_t start_entry;
|
||||
register vm_offset_t failed;
|
||||
int rv;
|
||||
|
||||
vm_map_lock(map);
|
||||
|
||||
@ -1152,13 +1156,11 @@ vm_map_pageable(map, start, end, new_pageable)
|
||||
* for the entire region. We do so before making any changes.
|
||||
*/
|
||||
|
||||
if (vm_map_lookup_entry(map, start, &temp_entry)) {
|
||||
entry = temp_entry;
|
||||
vm_map_clip_start(map, entry, start);
|
||||
if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) {
|
||||
vm_map_unlock(map);
|
||||
return(KERN_INVALID_ADDRESS);
|
||||
}
|
||||
else
|
||||
entry = temp_entry->next;
|
||||
temp_entry = entry;
|
||||
entry = start_entry;
|
||||
|
||||
/*
|
||||
* Actions are rather different for wiring and unwiring,
|
||||
@ -1167,13 +1169,19 @@ vm_map_pageable(map, start, end, new_pageable)
|
||||
|
||||
if (new_pageable) {
|
||||
|
||||
vm_map_clip_start(map, entry, start);
|
||||
|
||||
/*
|
||||
* Unwiring. First ensure that the range to be
|
||||
* unwired is really wired down.
|
||||
* unwired is really wired down and that there
|
||||
* are no holes.
|
||||
*/
|
||||
while ((entry != &map->header) && (entry->start < end)) {
|
||||
|
||||
if (entry->wired_count == 0) {
|
||||
if (entry->wired_count == 0 ||
|
||||
(entry->end < end &&
|
||||
(entry->next == &map->header ||
|
||||
entry->next->start > entry->end))) {
|
||||
vm_map_unlock(map);
|
||||
return(KERN_INVALID_ARGUMENT);
|
||||
}
|
||||
@ -1187,7 +1195,7 @@ vm_map_pageable(map, start, end, new_pageable)
|
||||
*/
|
||||
lock_set_recursive(&map->lock);
|
||||
|
||||
entry = temp_entry;
|
||||
entry = start_entry;
|
||||
while ((entry != &map->header) && (entry->start < end)) {
|
||||
vm_map_clip_end(map, entry, end);
|
||||
|
||||
@ -1204,10 +1212,12 @@ vm_map_pageable(map, start, end, new_pageable)
|
||||
/*
|
||||
* Wiring. We must do this in two passes:
|
||||
*
|
||||
* 1. Holding the write lock, we increment the
|
||||
* wiring count. For any area that is not already
|
||||
* wired, we create any shadow objects that need
|
||||
* to be created.
|
||||
* 1. Holding the write lock, we create any shadow
|
||||
* or zero-fill objects that need to be created.
|
||||
* Then we clip each map entry to the region to be
|
||||
* wired and increment its wiring count. We
|
||||
* create objects before clipping the map entries
|
||||
* to avoid object proliferation.
|
||||
*
|
||||
* 2. We downgrade to a read lock, and call
|
||||
* vm_fault_wire to fault in the pages for any
|
||||
@ -1228,12 +1238,8 @@ vm_map_pageable(map, start, end, new_pageable)
|
||||
/*
|
||||
* Pass 1.
|
||||
*/
|
||||
entry = temp_entry;
|
||||
while ((entry != &map->header) && (entry->start < end)) {
|
||||
vm_map_clip_end(map, entry, end);
|
||||
|
||||
entry->wired_count++;
|
||||
if (entry->wired_count == 1) {
|
||||
if (entry->wired_count == 0) {
|
||||
|
||||
/*
|
||||
* Perform actions of vm_map_lookup that need
|
||||
@ -1263,7 +1269,28 @@ vm_map_pageable(map, start, end, new_pageable)
|
||||
}
|
||||
}
|
||||
}
|
||||
vm_map_clip_start(map, entry, start);
|
||||
vm_map_clip_end(map, entry, end);
|
||||
entry->wired_count++;
|
||||
|
||||
/*
|
||||
* Check for holes
|
||||
*/
|
||||
if (entry->end < end &&
|
||||
(entry->next == &map->header ||
|
||||
entry->next->start > entry->end)) {
|
||||
/*
|
||||
* Found one. Object creation actions
|
||||
* do not need to be undone, but the
|
||||
* wired counts need to be restored.
|
||||
*/
|
||||
while (entry != &map->header && entry->end > start) {
|
||||
entry->wired_count--;
|
||||
entry = entry->prev;
|
||||
}
|
||||
vm_map_unlock(map);
|
||||
return(KERN_INVALID_ARGUMENT);
|
||||
}
|
||||
entry = entry->next;
|
||||
}
|
||||
|
||||
@ -1291,10 +1318,26 @@ vm_map_pageable(map, start, end, new_pageable)
|
||||
lock_write_to_read(&map->lock);
|
||||
}
|
||||
|
||||
entry = temp_entry;
|
||||
rv = 0;
|
||||
entry = start_entry;
|
||||
while (entry != &map->header && entry->start < end) {
|
||||
if (entry->wired_count == 1) {
|
||||
vm_fault_wire(map, entry->start, entry->end);
|
||||
/*
|
||||
* If vm_fault_wire fails for any page we need to
|
||||
* undo what has been done. We decrement the wiring
|
||||
* count for those pages which have not yet been
|
||||
* wired (now) and unwire those that have (later).
|
||||
*
|
||||
* XXX this violates the locking protocol on the map,
|
||||
* needs to be fixed.
|
||||
*/
|
||||
if (rv)
|
||||
entry->wired_count--;
|
||||
else if (entry->wired_count == 1) {
|
||||
rv = vm_fault_wire(map, entry->start, entry->end);
|
||||
if (rv) {
|
||||
failed = entry->start;
|
||||
entry->wired_count--;
|
||||
}
|
||||
}
|
||||
entry = entry->next;
|
||||
}
|
||||
@ -1305,6 +1348,11 @@ vm_map_pageable(map, start, end, new_pageable)
|
||||
else {
|
||||
lock_clear_recursive(&map->lock);
|
||||
}
|
||||
if (rv) {
|
||||
vm_map_unlock(map);
|
||||
(void) vm_map_pageable(map, start, failed, TRUE);
|
||||
return(rv);
|
||||
}
|
||||
}
|
||||
|
||||
vm_map_unlock(map);
|
||||
@ -1312,6 +1360,99 @@ vm_map_pageable(map, start, end, new_pageable)
|
||||
return(KERN_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_map_clean
|
||||
*
|
||||
* Push any dirty cached pages in the address range to their pager.
|
||||
* If syncio is TRUE, dirty pages are written synchronously.
|
||||
* If invalidate is TRUE, any cached pages are freed as well.
|
||||
*
|
||||
* Returns an error if any part of the specified range is not mapped.
|
||||
*/
|
||||
int
|
||||
vm_map_clean(map, start, end, syncio, invalidate)
|
||||
vm_map_t map;
|
||||
vm_offset_t start;
|
||||
vm_offset_t end;
|
||||
boolean_t syncio;
|
||||
boolean_t invalidate;
|
||||
{
|
||||
register vm_map_entry_t current;
|
||||
vm_map_entry_t entry;
|
||||
vm_size_t size;
|
||||
vm_object_t object;
|
||||
vm_offset_t offset;
|
||||
|
||||
vm_map_lock_read(map);
|
||||
VM_MAP_RANGE_CHECK(map, start, end);
|
||||
if (!vm_map_lookup_entry(map, start, &entry)) {
|
||||
vm_map_unlock_read(map);
|
||||
return(KERN_INVALID_ADDRESS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make a first pass to check for holes.
|
||||
*/
|
||||
for (current = entry; current->start < end; current = current->next) {
|
||||
if (current->is_sub_map) {
|
||||
vm_map_unlock_read(map);
|
||||
return(KERN_INVALID_ARGUMENT);
|
||||
}
|
||||
if (end > current->end &&
|
||||
(current->next == &map->header ||
|
||||
current->end != current->next->start)) {
|
||||
vm_map_unlock_read(map);
|
||||
return(KERN_INVALID_ADDRESS);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Make a second pass, cleaning/uncaching pages from the indicated
|
||||
* objects as we go.
|
||||
*/
|
||||
for (current = entry; current->start < end; current = current->next) {
|
||||
offset = current->offset + (start - current->start);
|
||||
size = (end <= current->end ? end : current->end) - start;
|
||||
if (current->is_a_map) {
|
||||
register vm_map_t smap;
|
||||
vm_map_entry_t tentry;
|
||||
vm_size_t tsize;
|
||||
|
||||
smap = current->object.share_map;
|
||||
vm_map_lock_read(smap);
|
||||
(void) vm_map_lookup_entry(smap, offset, &tentry);
|
||||
tsize = tentry->end - offset;
|
||||
if (tsize < size)
|
||||
size = tsize;
|
||||
object = tentry->object.vm_object;
|
||||
offset = tentry->offset + (offset - tentry->start);
|
||||
vm_object_lock(object);
|
||||
vm_map_unlock_read(smap);
|
||||
} else {
|
||||
object = current->object.vm_object;
|
||||
vm_object_lock(object);
|
||||
}
|
||||
/*
|
||||
* Flush pages if writing is allowed.
|
||||
* XXX should we continue on an error?
|
||||
*/
|
||||
if ((current->protection & VM_PROT_WRITE) &&
|
||||
!vm_object_page_clean(object, offset, offset+size,
|
||||
syncio, FALSE)) {
|
||||
vm_object_unlock(object);
|
||||
vm_map_unlock_read(map);
|
||||
return(KERN_FAILURE);
|
||||
}
|
||||
if (invalidate)
|
||||
vm_object_page_remove(object, offset, offset+size);
|
||||
vm_object_unlock(object);
|
||||
start += size;
|
||||
}
|
||||
|
||||
vm_map_unlock_read(map);
|
||||
return(KERN_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_map_entry_unwire: [ internal use only ]
|
||||
*
|
||||
@ -1320,7 +1461,7 @@ vm_map_pageable(map, start, end, new_pageable)
|
||||
* The map in question should be locked.
|
||||
* [This is the reason for this routine's existence.]
|
||||
*/
|
||||
static void
|
||||
void
|
||||
vm_map_entry_unwire(map, entry)
|
||||
vm_map_t map;
|
||||
register vm_map_entry_t entry;
|
||||
@ -1334,7 +1475,7 @@ vm_map_entry_unwire(map, entry)
|
||||
*
|
||||
* Deallocate the given entry from the target map.
|
||||
*/
|
||||
static void
|
||||
void
|
||||
vm_map_entry_delete(map, entry)
|
||||
register vm_map_t map;
|
||||
register vm_map_entry_t entry;
|
||||
@ -1532,7 +1673,7 @@ vm_map_check_protection(map, start, end, protection)
|
||||
* Copies the contents of the source entry to the destination
|
||||
* entry. The entries *must* be aligned properly.
|
||||
*/
|
||||
static void
|
||||
void
|
||||
vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
|
||||
vm_map_t src_map, dst_map;
|
||||
register vm_map_entry_t src_entry, dst_entry;
|
||||
@ -1543,7 +1684,7 @@ vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
|
||||
return;
|
||||
|
||||
if (dst_entry->object.vm_object != NULL &&
|
||||
!(dst_entry->object.vm_object->flags & OBJ_INTERNAL))
|
||||
(dst_entry->object.vm_object->flags & OBJ_INTERNAL) == 0)
|
||||
printf("vm_map_copy_entry: copying over permanent data!\n");
|
||||
|
||||
/*
|
||||
@ -2023,6 +2164,7 @@ vmspace_fork(vm1)
|
||||
new_share_entry =
|
||||
vm_map_entry_create(new_share_map);
|
||||
*new_share_entry = *old_entry;
|
||||
new_share_entry->wired_count = 0;
|
||||
|
||||
/*
|
||||
* Insert the entry into the new sharing
|
||||
@ -2049,6 +2191,7 @@ vmspace_fork(vm1)
|
||||
|
||||
new_entry = vm_map_entry_create(new_map);
|
||||
*new_entry = *old_entry;
|
||||
new_entry->wired_count = 0;
|
||||
vm_map_reference(new_entry->object.share_map);
|
||||
|
||||
/*
|
||||
@ -2429,7 +2572,6 @@ vm_map_simplify(map, start)
|
||||
vm_map_unlock(map);
|
||||
}
|
||||
|
||||
#if defined(DDB) || defined(DEBUG)
|
||||
/*
|
||||
* vm_map_print: [ debug ]
|
||||
*/
|
||||
@ -2447,7 +2589,7 @@ void
|
||||
_vm_map_print(map, full, pr)
|
||||
register vm_map_t map;
|
||||
boolean_t full;
|
||||
int (*pr)();
|
||||
void (*pr) __P((const char *, ...));
|
||||
{
|
||||
register vm_map_entry_t entry;
|
||||
extern int indent;
|
||||
@ -2504,11 +2646,11 @@ _vm_map_print(map, full, pr)
|
||||
(entry->prev->object.vm_object !=
|
||||
entry->object.vm_object)) {
|
||||
indent += 2;
|
||||
_vm_object_print(entry->object.vm_object, full, pr);
|
||||
_vm_object_print(entry->object.vm_object,
|
||||
full, pr);
|
||||
indent -= 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
indent -= 2;
|
||||
}
|
||||
#endif
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_map.h 7.3 (Berkeley) 4/21/91
|
||||
* $Id: vm_map.h,v 1.7 1994/03/17 02:52:11 cgd Exp $
|
||||
* from: @(#)vm_map.h 8.3 (Berkeley) 3/15/94
|
||||
* $Id: vm_map.h,v 1.8 1994/05/23 03:11:47 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -176,24 +176,29 @@ typedef struct {
|
||||
#define vm_map_pmap(map) ((map)->pmap)
|
||||
|
||||
/* XXX: number of kernel maps and entries to statically allocate */
|
||||
#define MAX_KMAP 10
|
||||
#define MAX_KMAPENT 1000 /* XXX 250 */
|
||||
#define MAX_KMAP 10
|
||||
#define MAX_KMAPENT 500
|
||||
|
||||
#ifdef KERNEL
|
||||
boolean_t vm_map_check_protection __P((vm_map_t,
|
||||
vm_offset_t, vm_offset_t, vm_prot_t));
|
||||
int vm_map_copy __P((vm_map_t, vm_map_t, vm_offset_t,
|
||||
vm_size_t, vm_offset_t, boolean_t, boolean_t));
|
||||
/* XXX vm_map_copy_entry */
|
||||
void vm_map_copy_entry __P((vm_map_t,
|
||||
vm_map_t, vm_map_entry_t, vm_map_entry_t));
|
||||
struct pmap;
|
||||
vm_map_t vm_map_create __P((struct pmap *,
|
||||
vm_offset_t, vm_offset_t, boolean_t));
|
||||
void vm_map_deallocate __P((vm_map_t));
|
||||
int vm_map_delete __P((vm_map_t, vm_offset_t, vm_offset_t));
|
||||
/* XXX vm_map_entry_* */
|
||||
vm_map_entry_t vm_map_entry_create __P((vm_map_t));
|
||||
void vm_map_entry_delete __P((vm_map_t, vm_map_entry_t));
|
||||
void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
|
||||
void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
|
||||
int vm_map_find __P((vm_map_t, vm_object_t,
|
||||
vm_offset_t, vm_offset_t *, vm_size_t, boolean_t));
|
||||
/* XXX vm_map_findspace */
|
||||
int vm_map_findspace __P((vm_map_t,
|
||||
vm_offset_t, vm_size_t, vm_offset_t *));
|
||||
int vm_map_inherit __P((vm_map_t,
|
||||
vm_offset_t, vm_offset_t, vm_inherit_t));
|
||||
void vm_map_init __P((struct vm_map *,
|
||||
@ -208,16 +213,17 @@ boolean_t vm_map_lookup_entry __P((vm_map_t,
|
||||
vm_offset_t, vm_map_entry_t *));
|
||||
int vm_map_pageable __P((vm_map_t,
|
||||
vm_offset_t, vm_offset_t, boolean_t));
|
||||
/* XXX vm_map_clean */
|
||||
int vm_map_clean __P((vm_map_t,
|
||||
vm_offset_t, vm_offset_t, boolean_t, boolean_t));
|
||||
void vm_map_print __P((vm_map_t, boolean_t));
|
||||
/* XXX what the hell is this? */
|
||||
void _vm_map_print __P((vm_map_t, boolean_t, int (*)()));
|
||||
void _vm_map_print __P((vm_map_t, boolean_t,
|
||||
void (*)(const char *, ...)));
|
||||
int vm_map_protect __P((vm_map_t,
|
||||
vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
|
||||
/* XXX vm_map_reference */
|
||||
void vm_map_reference __P((vm_map_t));
|
||||
int vm_map_remove __P((vm_map_t, vm_offset_t, vm_offset_t));
|
||||
void vm_map_simplify __P((vm_map_t, vm_offset_t));
|
||||
/* XXX vm_map_simplify_entry */
|
||||
void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
|
||||
void vm_map_startup __P((void));
|
||||
int vm_map_submap __P((vm_map_t,
|
||||
vm_offset_t, vm_offset_t, vm_map_t));
|
||||
|
762
sys/vm/vm_mmap.c
762
sys/vm/vm_mmap.c
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_object.c 7.4 (Berkeley) 5/7/91
|
||||
* $Id: vm_object.c,v 1.21 1994/04/21 07:49:33 cgd Exp $
|
||||
* from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94
|
||||
* $Id: vm_object.c,v 1.22 1994/05/23 03:11:50 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -68,19 +68,12 @@
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/malloc.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_page.h>
|
||||
|
||||
static void _vm_object_allocate __P((vm_size_t, vm_object_t));
|
||||
static void vm_object_terminate __P((vm_object_t));
|
||||
static void vm_object_deactivate_pages __P((vm_object_t));
|
||||
static void vm_object_cache_trim __P((void));
|
||||
static void vm_object_remove __P((vm_pager_t));
|
||||
static void vm_object_cache_clear __P((void));
|
||||
|
||||
/*
|
||||
* Virtual memory objects maintain the actual data
|
||||
* associated with allocated virtual memory. A given
|
||||
@ -118,13 +111,16 @@ struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT];
|
||||
long object_collapses = 0;
|
||||
long object_bypasses = 0;
|
||||
|
||||
static void _vm_object_allocate __P((vm_size_t, vm_object_t));
|
||||
|
||||
/*
|
||||
* vm_object_init:
|
||||
*
|
||||
* Initialize the VM objects module.
|
||||
*/
|
||||
void
|
||||
vm_object_init()
|
||||
vm_object_init(size)
|
||||
vm_size_t size;
|
||||
{
|
||||
register int i;
|
||||
|
||||
@ -138,8 +134,7 @@ vm_object_init()
|
||||
TAILQ_INIT(&vm_object_hashtable[i]);
|
||||
|
||||
kernel_object = &kernel_object_store;
|
||||
_vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
|
||||
kernel_object);
|
||||
_vm_object_allocate(size, kernel_object);
|
||||
|
||||
kmem_object = &kmem_object_store;
|
||||
_vm_object_allocate(VM_KMEM_SIZE + VM_MBUF_SIZE, kmem_object);
|
||||
@ -174,7 +169,7 @@ _vm_object_allocate(size, object)
|
||||
object->ref_count = 1;
|
||||
object->resident_page_count = 0;
|
||||
object->size = size;
|
||||
object->flags &= ~OBJ_CANPERSIST;
|
||||
object->flags = OBJ_INTERNAL; /* vm_allocate_with_pager will reset */
|
||||
object->paging_in_progress = 0;
|
||||
object->copy = NULL;
|
||||
|
||||
@ -183,7 +178,6 @@ _vm_object_allocate(size, object)
|
||||
*/
|
||||
|
||||
object->pager = NULL;
|
||||
object->flags |= OBJ_INTERNAL; /* vm_allocate_with_pager will reset */
|
||||
object->paging_offset = 0;
|
||||
object->shadow = NULL;
|
||||
object->shadow_offset = (vm_offset_t) 0;
|
||||
@ -191,6 +185,7 @@ _vm_object_allocate(size, object)
|
||||
simple_lock(&vm_object_list_lock);
|
||||
TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
|
||||
vm_object_count++;
|
||||
cnt.v_nzfod += atop(size);
|
||||
simple_unlock(&vm_object_list_lock);
|
||||
}
|
||||
|
||||
@ -260,26 +255,6 @@ vm_object_deallocate(object)
|
||||
*/
|
||||
|
||||
if (object->flags & OBJ_CANPERSIST) {
|
||||
#ifdef DIAGNOSTIC
|
||||
register vm_page_t p;
|
||||
|
||||
/*
|
||||
* Check for dirty pages in object
|
||||
* Print warning as this may signify kernel bugs
|
||||
*/
|
||||
for (p = object->memq.tqh_first;
|
||||
p != NULL;
|
||||
p = p->listq.tqe_next) {
|
||||
VM_PAGE_CHECK(p);
|
||||
|
||||
if (pmap_is_modified(VM_PAGE_TO_PHYS(p)) ||
|
||||
!(p->flags & PG_CLEAN)) {
|
||||
|
||||
printf("vm_object_dealloc: persistent object %x isn't clean\n", object);
|
||||
goto cant_persist;
|
||||
}
|
||||
}
|
||||
#endif /* DIAGNOSTIC */
|
||||
|
||||
TAILQ_INSERT_TAIL(&vm_object_cached_list, object,
|
||||
cached_list);
|
||||
@ -292,35 +267,11 @@ vm_object_deallocate(object)
|
||||
vm_object_cache_trim();
|
||||
return;
|
||||
}
|
||||
cant_persist:;
|
||||
|
||||
/*
|
||||
* Make sure no one can look us up now.
|
||||
*/
|
||||
vm_object_remove(object->pager);
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* Sanity check on the object hash table.
|
||||
*/
|
||||
{
|
||||
register vm_object_hash_entry_t entry;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) {
|
||||
struct vm_object_hash_head *bucket =
|
||||
&vm_object_hashtable[i];
|
||||
|
||||
for (entry = bucket->tqh_first;
|
||||
entry != NULL;
|
||||
entry = entry->hash_links.tqe_next) {
|
||||
if (object == entry->object) {
|
||||
vm_object_print(object,0);
|
||||
panic("object hashtable burb");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
vm_object_cache_unlock();
|
||||
|
||||
temp = object->shadow;
|
||||
@ -337,7 +288,7 @@ vm_object_deallocate(object)
|
||||
*
|
||||
* The object must be locked.
|
||||
*/
|
||||
static void
|
||||
void
|
||||
vm_object_terminate(object)
|
||||
register vm_object_t object;
|
||||
{
|
||||
@ -360,88 +311,52 @@ vm_object_terminate(object)
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait until the pageout daemon is through
|
||||
* with the object.
|
||||
* Wait until the pageout daemon is through with the object.
|
||||
*/
|
||||
|
||||
while (object->paging_in_progress != 0) {
|
||||
vm_object_sleep(object, object);
|
||||
while (object->paging_in_progress) {
|
||||
vm_object_sleep((int)object, object, FALSE);
|
||||
vm_object_lock(object);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* While the paging system is locked,
|
||||
* pull the object's pages off the active
|
||||
* and inactive queues. This keeps the
|
||||
* pageout daemon from playing with them
|
||||
* during vm_pager_deallocate.
|
||||
* If not an internal object clean all the pages, removing them
|
||||
* from paging queues as we go.
|
||||
*
|
||||
* We can't free the pages yet, because the
|
||||
* object's pager may have to write them out
|
||||
* before deallocating the paging space.
|
||||
*
|
||||
* XXX
|
||||
* XXX need to do something in the event of a cleaning error.
|
||||
*/
|
||||
|
||||
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
|
||||
VM_PAGE_CHECK(p);
|
||||
|
||||
vm_page_lock_queues();
|
||||
if (p->flags & PG_ACTIVE) {
|
||||
TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
|
||||
p->flags &= ~PG_ACTIVE;
|
||||
cnt.v_active_count--;
|
||||
}
|
||||
if (p->flags & PG_INACTIVE) {
|
||||
TAILQ_REMOVE(&vm_page_queue_inactive, p, pageq);
|
||||
p->flags &= ~PG_INACTIVE;
|
||||
cnt.v_inactive_count--;
|
||||
}
|
||||
/* vm_page_free(p); XXX */
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
|
||||
vm_object_unlock(object);
|
||||
|
||||
if (object->paging_in_progress != 0)
|
||||
panic("vm_object_deallocate: pageout in progress");
|
||||
|
||||
/*
|
||||
* Clean and free the pages, as appropriate.
|
||||
* All references to the object are gone,
|
||||
* so we don't need to lock it.
|
||||
*/
|
||||
|
||||
if (!(object->flags & OBJ_INTERNAL)) {
|
||||
vm_object_lock(object);
|
||||
vm_object_page_clean(object, 0, 0);
|
||||
if ((object->flags & OBJ_INTERNAL) == 0) {
|
||||
(void) vm_object_page_clean(object, 0, 0, TRUE, TRUE);
|
||||
vm_object_unlock(object);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now free the pages.
|
||||
* For internal objects, this also removes them from paging queues.
|
||||
*/
|
||||
while ((p = object->memq.tqh_first) != NULL) {
|
||||
VM_PAGE_CHECK(p);
|
||||
vm_page_lock_queues();
|
||||
vm_page_free(p);
|
||||
cnt.v_pfree++;
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
if ((object->flags & OBJ_INTERNAL) == 0)
|
||||
vm_object_unlock(object);
|
||||
|
||||
/*
|
||||
* Let the pager know object is dead.
|
||||
* Let the pager know object is dead.
|
||||
*/
|
||||
|
||||
if (object->pager != NULL)
|
||||
vm_pager_deallocate(object->pager);
|
||||
|
||||
|
||||
simple_lock(&vm_object_list_lock);
|
||||
TAILQ_REMOVE(&vm_object_list, object, object_list);
|
||||
vm_object_count--;
|
||||
simple_unlock(&vm_object_list_lock);
|
||||
|
||||
/*
|
||||
* Free the space for the object.
|
||||
* Free the space for the object.
|
||||
*/
|
||||
|
||||
free((caddr_t)object, M_VMOBJ);
|
||||
}
|
||||
|
||||
@ -449,44 +364,139 @@ vm_object_terminate(object)
|
||||
* vm_object_page_clean
|
||||
*
|
||||
* Clean all dirty pages in the specified range of object.
|
||||
* Leaves page on whatever queue it is currently on.
|
||||
* If syncio is TRUE, page cleaning is done synchronously.
|
||||
* If de_queue is TRUE, pages are removed from any paging queue
|
||||
* they were on, otherwise they are left on whatever queue they
|
||||
* were on before the cleaning operation began.
|
||||
*
|
||||
* Odd semantics: if start == end, we clean everything.
|
||||
*
|
||||
* The object must be locked.
|
||||
*
|
||||
* Returns TRUE if all was well, FALSE if there was a pager error
|
||||
* somewhere. We attempt to clean (and dequeue) all pages regardless
|
||||
* of where an error occurs.
|
||||
*/
|
||||
void
|
||||
vm_object_page_clean(object, start, end)
|
||||
boolean_t
|
||||
vm_object_page_clean(object, start, end, syncio, de_queue)
|
||||
register vm_object_t object;
|
||||
register vm_offset_t start;
|
||||
register vm_offset_t end;
|
||||
boolean_t syncio;
|
||||
boolean_t de_queue;
|
||||
{
|
||||
register vm_page_t p;
|
||||
int onqueue;
|
||||
boolean_t noerror = TRUE;
|
||||
|
||||
if (object == NULL)
|
||||
return (TRUE);
|
||||
|
||||
/*
|
||||
* If it is an internal object and there is no pager, attempt to
|
||||
* allocate one. Note that vm_object_collapse may relocate one
|
||||
* from a collapsed object so we must recheck afterward.
|
||||
*/
|
||||
if ((object->flags & OBJ_INTERNAL) && object->pager == NULL) {
|
||||
vm_object_collapse(object);
|
||||
if (object->pager == NULL) {
|
||||
vm_pager_t pager;
|
||||
|
||||
vm_object_unlock(object);
|
||||
pager = vm_pager_allocate(PG_DFLT, (caddr_t)0,
|
||||
object->size, VM_PROT_ALL,
|
||||
(vm_offset_t)0);
|
||||
if (pager)
|
||||
vm_object_setpager(object, pager, 0, FALSE);
|
||||
vm_object_lock(object);
|
||||
}
|
||||
}
|
||||
if (object->pager == NULL)
|
||||
return;
|
||||
return (FALSE);
|
||||
|
||||
again:
|
||||
/*
|
||||
* Wait until the pageout daemon is through with the object.
|
||||
*/
|
||||
while (object->paging_in_progress) {
|
||||
vm_object_sleep((int)object, object, FALSE);
|
||||
vm_object_lock(object);
|
||||
}
|
||||
/*
|
||||
* Loop through the object page list cleaning as necessary.
|
||||
*/
|
||||
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
|
||||
if ((start == end || p->offset >= start && p->offset < end) &&
|
||||
!(p->flags & PG_FICTITIOUS)) {
|
||||
if ((p->flags & PG_CLEAN) &&
|
||||
pmap_is_modified(VM_PAGE_TO_PHYS(p)))
|
||||
p->flags &= ~PG_CLEAN;
|
||||
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
|
||||
/*
|
||||
* Remove the page from any paging queue.
|
||||
* This needs to be done if either we have been
|
||||
* explicitly asked to do so or it is about to
|
||||
* be cleaned (see comment below).
|
||||
*/
|
||||
if (de_queue || !(p->flags & PG_CLEAN)) {
|
||||
vm_page_lock_queues();
|
||||
if (p->flags & PG_ACTIVE) {
|
||||
TAILQ_REMOVE(&vm_page_queue_active,
|
||||
p, pageq);
|
||||
p->flags &= ~PG_ACTIVE;
|
||||
cnt.v_active_count--;
|
||||
onqueue = 1;
|
||||
} else if (p->flags & PG_INACTIVE) {
|
||||
TAILQ_REMOVE(&vm_page_queue_inactive,
|
||||
p, pageq);
|
||||
p->flags &= ~PG_INACTIVE;
|
||||
cnt.v_inactive_count--;
|
||||
onqueue = -1;
|
||||
} else
|
||||
onqueue = 0;
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
/*
|
||||
* To ensure the state of the page doesn't change
|
||||
* during the clean operation we do two things.
|
||||
* First we set the busy bit and write-protect all
|
||||
* mappings to ensure that write accesses to the
|
||||
* page block (in vm_fault). Second, we remove
|
||||
* the page from any paging queue to foil the
|
||||
* pageout daemon (vm_pageout_scan).
|
||||
*/
|
||||
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ);
|
||||
if (!(p->flags & PG_CLEAN)) {
|
||||
p->flags |= PG_BUSY;
|
||||
object->paging_in_progress++;
|
||||
vm_object_unlock(object);
|
||||
(void) vm_pager_put(object->pager, p, TRUE);
|
||||
/*
|
||||
* XXX if put fails we mark the page as
|
||||
* clean to avoid an infinite loop.
|
||||
* Will loose changes to the page.
|
||||
*/
|
||||
if (vm_pager_put(object->pager, p, syncio)) {
|
||||
printf("%s: pager_put error\n",
|
||||
"vm_object_page_clean");
|
||||
p->flags |= PG_CLEAN;
|
||||
noerror = FALSE;
|
||||
}
|
||||
vm_object_lock(object);
|
||||
object->paging_in_progress--;
|
||||
if (!de_queue && onqueue) {
|
||||
vm_page_lock_queues();
|
||||
if (onqueue > 0)
|
||||
vm_page_activate(p);
|
||||
else
|
||||
vm_page_deactivate(p);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
p->flags &= ~PG_BUSY;
|
||||
PAGE_WAKEUP(p);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
}
|
||||
return (noerror);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -497,7 +507,7 @@ again:
|
||||
*
|
||||
* The object must be locked.
|
||||
*/
|
||||
static void
|
||||
void
|
||||
vm_object_deactivate_pages(object)
|
||||
register vm_object_t object;
|
||||
{
|
||||
@ -506,8 +516,7 @@ vm_object_deactivate_pages(object)
|
||||
for (p = object->memq.tqh_first; p != NULL; p = next) {
|
||||
next = p->listq.tqe_next;
|
||||
vm_page_lock_queues();
|
||||
if (!(p->flags & PG_BUSY))
|
||||
vm_page_deactivate(p);
|
||||
vm_page_deactivate(p);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
}
|
||||
@ -515,7 +524,7 @@ vm_object_deactivate_pages(object)
|
||||
/*
|
||||
* Trim the object cache to size.
|
||||
*/
|
||||
static void
|
||||
void
|
||||
vm_object_cache_trim()
|
||||
{
|
||||
register vm_object_t object;
|
||||
@ -851,7 +860,7 @@ vm_object_setpager(object, pager, paging_offset,
|
||||
{
|
||||
#ifdef lint
|
||||
read_only++; /* No longer used */
|
||||
#endif lint
|
||||
#endif
|
||||
|
||||
vm_object_lock(object); /* XXX ? */
|
||||
object->pager = pager;
|
||||
@ -862,6 +871,7 @@ vm_object_setpager(object, pager, paging_offset,
|
||||
/*
|
||||
* vm_object_hash hashes the pager/id pair.
|
||||
*/
|
||||
|
||||
#define vm_object_hash(pager) \
|
||||
(((unsigned)pager)%VM_OBJECT_HASH_COUNT)
|
||||
|
||||
@ -886,7 +896,7 @@ vm_object_lookup(pager)
|
||||
vm_object_lock(object);
|
||||
if (object->ref_count == 0) {
|
||||
TAILQ_REMOVE(&vm_object_cached_list, object,
|
||||
cached_list);
|
||||
cached_list);
|
||||
vm_object_cached--;
|
||||
}
|
||||
object->ref_count++;
|
||||
@ -942,7 +952,7 @@ vm_object_enter(object, pager)
|
||||
* is locked. XXX this should be fixed
|
||||
* by reorganizing vm_object_deallocate.
|
||||
*/
|
||||
static void
|
||||
void
|
||||
vm_object_remove(pager)
|
||||
register vm_pager_t pager;
|
||||
{
|
||||
@ -968,7 +978,7 @@ vm_object_remove(pager)
|
||||
* vm_object_cache_clear removes all objects from the cache.
|
||||
*
|
||||
*/
|
||||
static void
|
||||
void
|
||||
vm_object_cache_clear()
|
||||
{
|
||||
register vm_object_t object;
|
||||
@ -1050,7 +1060,7 @@ vm_object_collapse(object)
|
||||
* The backing object is internal.
|
||||
*/
|
||||
|
||||
if (!(backing_object->flags & OBJ_INTERNAL) ||
|
||||
if ((backing_object->flags & OBJ_INTERNAL) == 0 ||
|
||||
backing_object->paging_in_progress != 0) {
|
||||
vm_object_unlock(backing_object);
|
||||
return;
|
||||
@ -1123,29 +1133,12 @@ vm_object_collapse(object)
|
||||
}
|
||||
else {
|
||||
if (pp) {
|
||||
#if 1
|
||||
/*
|
||||
* This should never happen -- the
|
||||
* parent cannot have ever had an
|
||||
* external memory object, and thus
|
||||
* cannot have absent pages.
|
||||
*/
|
||||
panic("vm_object_collapse: bad case");
|
||||
/* andrew@werple.apana.org.au - from
|
||||
mach 3.0 VM */
|
||||
#else
|
||||
/* may be someone waiting for it */
|
||||
PAGE_WAKEUP(pp);
|
||||
vm_page_lock_queues();
|
||||
vm_page_free(pp);
|
||||
vm_page_unlock_queues();
|
||||
#endif
|
||||
}
|
||||
/*
|
||||
* Parent now has no page.
|
||||
* Move the backing object's page
|
||||
* up.
|
||||
*/
|
||||
vm_page_rename(p, object, new_offset);
|
||||
}
|
||||
}
|
||||
@ -1159,40 +1152,12 @@ vm_object_collapse(object)
|
||||
* unused portion.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Remove backing_object from the object hashtable now.
|
||||
* This is necessary since its pager is going away
|
||||
* and therefore it is not going to be removed from
|
||||
* hashtable in vm_object_deallocate().
|
||||
*
|
||||
* NOTE - backing_object can only get at this stage if
|
||||
* it has an internal pager. It is not normally on the
|
||||
* hashtable unless it was put there by eg. vm_mmap()
|
||||
*
|
||||
* XXX - Need I worry here about *named* ANON pagers ?
|
||||
*/
|
||||
|
||||
if (backing_object->pager) {
|
||||
vm_object_remove(backing_object->pager);
|
||||
object->pager = backing_object->pager;
|
||||
object->paging_offset = backing_offset +
|
||||
backing_object->paging_offset;
|
||||
backing_object->pager = NULL;
|
||||
}
|
||||
object->pager = backing_object->pager;
|
||||
#if 1
|
||||
/* Mach 3.0 code */
|
||||
/* andrew@werple.apana.org.au, 12 Feb 1993 */
|
||||
|
||||
/*
|
||||
* If there is no pager, leave paging-offset alone.
|
||||
*/
|
||||
if (object->pager)
|
||||
object->paging_offset =
|
||||
backing_object->paging_offset +
|
||||
backing_offset;
|
||||
#else
|
||||
/* old VM 2.5 version */
|
||||
object->paging_offset += backing_offset;
|
||||
#endif
|
||||
|
||||
backing_object->pager = NULL;
|
||||
|
||||
/*
|
||||
* Object now shadows whatever backing_object did.
|
||||
@ -1264,9 +1229,10 @@ vm_object_collapse(object)
|
||||
*/
|
||||
|
||||
if (p->offset >= backing_offset &&
|
||||
new_offset <= size &&
|
||||
new_offset < size &&
|
||||
((pp = vm_page_lookup(object, new_offset))
|
||||
== NULL || (pp->flags & PG_FAKE))) {
|
||||
== NULL ||
|
||||
(pp->flags & PG_FAKE))) {
|
||||
/*
|
||||
* Page still needed.
|
||||
* Can't go any further.
|
||||
@ -1283,21 +1249,18 @@ vm_object_collapse(object)
|
||||
* count is at least 2.
|
||||
*/
|
||||
|
||||
vm_object_reference(object->shadow = backing_object->shadow);
|
||||
object->shadow = backing_object->shadow;
|
||||
vm_object_reference(object->shadow);
|
||||
object->shadow_offset += backing_object->shadow_offset;
|
||||
|
||||
#if 1
|
||||
/* Mach 3.0 code */
|
||||
/* andrew@werple.apana.org.au, 12 Feb 1993 */
|
||||
|
||||
/*
|
||||
* Backing object might have had a copy pointer
|
||||
* to us. If it did, clear it.
|
||||
* Backing object might have had a copy pointer
|
||||
* to us. If it did, clear it.
|
||||
*/
|
||||
if (backing_object->copy == object)
|
||||
if (backing_object->copy == object) {
|
||||
backing_object->copy = NULL;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
/* Drop the reference count on backing_object.
|
||||
* Since its ref_count was at least 2, it
|
||||
* will not vanish; so we don't need to call
|
||||
@ -1382,7 +1345,7 @@ vm_object_coalesce(prev_object, next_object,
|
||||
|
||||
#ifdef lint
|
||||
next_offset++;
|
||||
#endif lint
|
||||
#endif
|
||||
|
||||
if (next_object != NULL) {
|
||||
return(FALSE);
|
||||
@ -1437,7 +1400,6 @@ vm_object_coalesce(prev_object, next_object,
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
#if defined(DDB) || defined(DEBUG)
|
||||
/*
|
||||
* vm_object_print: [ debug ]
|
||||
*/
|
||||
@ -1455,7 +1417,7 @@ void
|
||||
_vm_object_print(object, full, pr)
|
||||
vm_object_t object;
|
||||
boolean_t full;
|
||||
int (*pr)();
|
||||
void (*pr) __P((const char *, ...));
|
||||
{
|
||||
register vm_page_t p;
|
||||
extern indent;
|
||||
@ -1496,4 +1458,3 @@ _vm_object_print(object, full, pr)
|
||||
(*pr)("\n");
|
||||
indent -= 2;
|
||||
}
|
||||
#endif
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_object.h 7.3 (Berkeley) 4/21/91
|
||||
* $Id: vm_object.h,v 1.10 1994/04/15 07:04:54 cgd Exp $
|
||||
* from: @(#)vm_object.h 8.3 (Berkeley) 1/12/94
|
||||
* $Id: vm_object.h,v 1.11 1994/05/23 03:11:52 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -63,13 +63,13 @@
|
||||
* rights to redistribute these changes.
|
||||
*/
|
||||
|
||||
#ifndef _VM_VM_OBJECT_H_
|
||||
#define _VM_VM_OBJECT_H_
|
||||
|
||||
/*
|
||||
* Virtual memory object module definitions.
|
||||
*/
|
||||
|
||||
#ifndef _VM_OBJECT_
|
||||
#define _VM_OBJECT_
|
||||
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_pager.h>
|
||||
|
||||
@ -82,6 +82,9 @@
|
||||
struct vm_object {
|
||||
struct pglist memq; /* Resident memory */
|
||||
TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
|
||||
u_short flags; /* see below */
|
||||
u_short paging_in_progress; /* Paging (in or out) so
|
||||
don't collapse or destroy */
|
||||
simple_lock_data_t Lock; /* Synchronization */
|
||||
int ref_count; /* How many refs?? */
|
||||
vm_size_t size; /* Object size */
|
||||
@ -93,18 +96,14 @@ struct vm_object {
|
||||
vm_offset_t paging_offset; /* Offset into paging space */
|
||||
struct vm_object *shadow; /* My shadow */
|
||||
vm_offset_t shadow_offset; /* Offset in shadow */
|
||||
u_short paging_in_progress;
|
||||
/* Paging (in or out) - don't
|
||||
collapse or destroy */
|
||||
u_short flags; /* object flags; see below */
|
||||
TAILQ_ENTRY(vm_object) cached_list; /* for persistence */
|
||||
};
|
||||
/*
|
||||
* Flags
|
||||
*/
|
||||
#define OBJ_CANPERSIST 0x0001 /* allow to persist */
|
||||
#define OBJ_INTERNAL 0x0002 /* internally created object */
|
||||
#define OBJ_ACTIVE 0x0004 /* used to mark active objects */
|
||||
#define OBJ_CANPERSIST 0x0001 /* allow to persist */
|
||||
#define OBJ_INTERNAL 0x0002 /* internally created object */
|
||||
#define OBJ_ACTIVE 0x0004 /* used to mark active objects */
|
||||
|
||||
TAILQ_HEAD(vm_object_hash_head, vm_object_hash_entry);
|
||||
|
||||
@ -130,57 +129,48 @@ simple_lock_data_t vm_object_list_lock;
|
||||
vm_object_t kernel_object; /* the single kernel object */
|
||||
vm_object_t kmem_object;
|
||||
|
||||
/*
|
||||
* Declare procedures that operate on VM objects.
|
||||
*/
|
||||
void vm_object_init __P((void));
|
||||
vm_object_t vm_object_allocate __P((vm_size_t));
|
||||
void vm_object_reference __P((vm_object_t));
|
||||
void vm_object_deallocate __P((vm_object_t));
|
||||
void vm_object_page_clean
|
||||
__P((vm_object_t, vm_offset_t, vm_offset_t));
|
||||
void vm_object_pmap_copy
|
||||
__P((vm_object_t, vm_offset_t, vm_offset_t));
|
||||
void vm_object_pmap_remove
|
||||
__P((vm_object_t, vm_offset_t, vm_offset_t));
|
||||
void vm_object_copy
|
||||
__P((vm_object_t, vm_offset_t, vm_size_t,
|
||||
vm_object_t *, vm_offset_t *, boolean_t *));
|
||||
void vm_object_shadow
|
||||
__P((vm_object_t *, vm_offset_t *, vm_size_t));
|
||||
void vm_object_setpager
|
||||
__P((vm_object_t, vm_pager_t, vm_offset_t, boolean_t));
|
||||
vm_object_t vm_object_lookup __P((vm_pager_t));
|
||||
void vm_object_enter __P((vm_object_t, vm_pager_t));
|
||||
void vm_object_collapse __P((vm_object_t));
|
||||
void vm_object_page_remove
|
||||
__P((vm_object_t, vm_offset_t, vm_offset_t));
|
||||
boolean_t vm_object_coalesce
|
||||
__P((vm_object_t, vm_object_t, vm_offset_t, vm_offset_t,
|
||||
vm_size_t, vm_size_t));
|
||||
void vm_object_print __P((vm_object_t, boolean_t));
|
||||
void _vm_object_print __P((vm_object_t, boolean_t, int (*)()));
|
||||
#define vm_object_cache_lock() simple_lock(&vm_cache_lock)
|
||||
#define vm_object_cache_unlock() simple_unlock(&vm_cache_lock)
|
||||
#endif /* KERNEL */
|
||||
|
||||
/*
|
||||
* Functions implemented as macros
|
||||
*/
|
||||
#define vm_object_cache_lock() simple_lock(&vm_cache_lock)
|
||||
#define vm_object_cache_unlock() simple_unlock(&vm_cache_lock)
|
||||
#define vm_object_lock_init(object) simple_lock_init(&(object)->Lock)
|
||||
#define vm_object_lock(object) simple_lock(&(object)->Lock)
|
||||
#define vm_object_unlock(object) simple_unlock(&(object)->Lock)
|
||||
#define vm_object_lock_try(object) simple_lock_try(&(object)->Lock)
|
||||
#define vm_object_sleep(event, object, interruptible) \
|
||||
thread_sleep((event), &(object)->Lock, (interruptible))
|
||||
|
||||
#define vm_object_cache(pager) pager_cache(vm_object_lookup(pager),TRUE)
|
||||
#define vm_object_uncache(pager) pager_cache(vm_object_lookup(pager),FALSE)
|
||||
|
||||
#define vm_object_lock_init(object) \
|
||||
simple_lock_init(&(object)->Lock)
|
||||
#define vm_object_lock(object) \
|
||||
simple_lock(&(object)->Lock)
|
||||
#define vm_object_unlock(object) \
|
||||
simple_unlock(&(object)->Lock)
|
||||
#define vm_object_lock_try(object) \
|
||||
simple_lock_try(&(object)->Lock)
|
||||
#define vm_object_sleep(event, object) \
|
||||
thread_sleep((event), &(object)->Lock)
|
||||
|
||||
#endif /* KERNEL */
|
||||
|
||||
#endif /* !_VM_VM_OBJECT_H_ */
|
||||
#ifdef KERNEL
|
||||
vm_object_t vm_object_allocate __P((vm_size_t));
|
||||
void vm_object_cache_clear __P((void));
|
||||
void vm_object_cache_trim __P((void));
|
||||
boolean_t vm_object_coalesce __P((vm_object_t, vm_object_t,
|
||||
vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t));
|
||||
void vm_object_collapse __P((vm_object_t));
|
||||
void vm_object_copy __P((vm_object_t, vm_offset_t, vm_size_t,
|
||||
vm_object_t *, vm_offset_t *, boolean_t *));
|
||||
void vm_object_deactivate_pages __P((vm_object_t));
|
||||
void vm_object_deallocate __P((vm_object_t));
|
||||
void vm_object_enter __P((vm_object_t, vm_pager_t));
|
||||
void vm_object_init __P((vm_size_t));
|
||||
vm_object_t vm_object_lookup __P((vm_pager_t));
|
||||
boolean_t vm_object_page_clean __P((vm_object_t,
|
||||
vm_offset_t, vm_offset_t, boolean_t, boolean_t));
|
||||
void vm_object_page_remove __P((vm_object_t,
|
||||
vm_offset_t, vm_offset_t));
|
||||
void vm_object_pmap_copy __P((vm_object_t,
|
||||
vm_offset_t, vm_offset_t));
|
||||
void vm_object_pmap_remove __P((vm_object_t,
|
||||
vm_offset_t, vm_offset_t));
|
||||
void vm_object_print __P((vm_object_t, boolean_t));
|
||||
void _vm_object_print __P((vm_object_t, boolean_t,
|
||||
void (*)(const char *, ...)));
|
||||
void vm_object_reference __P((vm_object_t));
|
||||
void vm_object_remove __P((vm_pager_t));
|
||||
void vm_object_setpager __P((vm_object_t,
|
||||
vm_pager_t, vm_offset_t, boolean_t));
|
||||
void vm_object_shadow __P((vm_object_t *,
|
||||
vm_offset_t *, vm_size_t));
|
||||
void vm_object_terminate __P((vm_object_t));
|
||||
#endif
|
||||
#endif /* _VM_OBJECT_ */
|
||||
|
152
sys/vm/vm_page.c
152
sys/vm/vm_page.c
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
|
||||
* $Id: vm_page.c,v 1.14 1994/04/29 08:21:50 mycroft Exp $
|
||||
* from: @(#)vm_page.c 8.3 (Berkeley) 3/21/94
|
||||
* $Id: vm_page.c,v 1.15 1994/05/23 03:11:55 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -68,19 +68,15 @@
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_map.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_map.h>
|
||||
#include <vm/vm_pageout.h>
|
||||
|
||||
#include <machine/cpu.h>
|
||||
|
||||
/*
|
||||
* Associated with each page of user-allocatable memory is a
|
||||
* page structure.
|
||||
*/
|
||||
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
/*
|
||||
* These variables record the values returned by vm_page_bootstrap,
|
||||
@ -91,6 +87,11 @@ vm_offset_t virtual_space_start;
|
||||
vm_offset_t virtual_space_end;
|
||||
#endif /* MACHINE_NONCONTIG */
|
||||
|
||||
/*
|
||||
* Associated with page of user-allocatable memory is a
|
||||
* page structure.
|
||||
*/
|
||||
|
||||
struct pglist *vm_page_buckets; /* Array of buckets */
|
||||
int vm_page_bucket_count = 0; /* How big is array? */
|
||||
int vm_page_hash_mask; /* Mask for hash function */
|
||||
@ -102,8 +103,10 @@ struct pglist vm_page_queue_inactive;
|
||||
simple_lock_data_t vm_page_queue_lock;
|
||||
simple_lock_data_t vm_page_queue_free_lock;
|
||||
|
||||
vm_page_t vm_page_array;
|
||||
/* has physical page allocation been initialized? */
|
||||
boolean_t vm_page_startup_initialized;
|
||||
|
||||
vm_page_t vm_page_array;
|
||||
#ifndef MACHINE_NONCONTIG
|
||||
long first_page;
|
||||
long last_page;
|
||||
@ -123,7 +126,7 @@ int page_shift;
|
||||
* size. Must be called before any use of page-size
|
||||
* dependent functions.
|
||||
*
|
||||
* Sets page_shift and page_mask from page_size.
|
||||
* Sets page_shift and page_mask from cnt.v_page_size.
|
||||
*/
|
||||
void
|
||||
vm_set_page_size()
|
||||
@ -265,16 +268,13 @@ vm_page_bootstrap(startp, endp)
|
||||
* Each page cell is initialized and placed on the free list.
|
||||
*/
|
||||
vm_offset_t
|
||||
vm_page_startup(start, end, vaddr)
|
||||
register vm_offset_t start;
|
||||
vm_offset_t end;
|
||||
register vm_offset_t vaddr;
|
||||
vm_page_startup(start, end)
|
||||
register vm_offset_t *start;
|
||||
vm_offset_t *end;
|
||||
{
|
||||
register vm_page_t m;
|
||||
register vm_offset_t mapped;
|
||||
register struct pglist *bucket;
|
||||
vm_size_t npages;
|
||||
register vm_offset_t new_start;
|
||||
int i;
|
||||
vm_offset_t pa;
|
||||
extern vm_offset_t kentry_data;
|
||||
@ -298,7 +298,7 @@ vm_page_startup(start, end, vaddr)
|
||||
TAILQ_INIT(&vm_page_queue_inactive);
|
||||
|
||||
/*
|
||||
* Allocate (and initialize) the hash table buckets.
|
||||
* Calculate the number of hash table buckets.
|
||||
*
|
||||
* The number of buckets MUST BE a power of 2, and
|
||||
* the actual value is the next power of 2 greater
|
||||
@ -308,28 +308,21 @@ vm_page_startup(start, end, vaddr)
|
||||
* This computation can be tweaked if desired.
|
||||
*/
|
||||
|
||||
vm_page_buckets = (struct pglist *) vaddr;
|
||||
if (vm_page_bucket_count == 0) {
|
||||
vm_page_bucket_count = 1;
|
||||
while (vm_page_bucket_count < atop(end - start))
|
||||
while (vm_page_bucket_count < atop(*end - *start))
|
||||
vm_page_bucket_count <<= 1;
|
||||
}
|
||||
|
||||
vm_page_hash_mask = vm_page_bucket_count - 1;
|
||||
|
||||
/*
|
||||
* Validate these addresses.
|
||||
* Allocate (and initialize) the hash table buckets.
|
||||
*/
|
||||
|
||||
new_start = round_page(((struct pglist *)start) + vm_page_bucket_count);
|
||||
mapped = vaddr;
|
||||
vaddr = pmap_map(mapped, start, new_start,
|
||||
VM_PROT_READ|VM_PROT_WRITE);
|
||||
start = new_start;
|
||||
bzero((caddr_t) mapped, vaddr - mapped);
|
||||
mapped = vaddr;
|
||||
|
||||
vm_page_buckets = (struct pglist *)
|
||||
pmap_bootstrap_alloc(vm_page_bucket_count * sizeof(struct pglist));
|
||||
bucket = vm_page_buckets;
|
||||
|
||||
for (i = vm_page_bucket_count; i--;) {
|
||||
TAILQ_INIT(bucket);
|
||||
bucket++;
|
||||
@ -338,10 +331,10 @@ vm_page_startup(start, end, vaddr)
|
||||
simple_lock_init(&bucket_lock);
|
||||
|
||||
/*
|
||||
* round (or truncate) the addresses to our page size.
|
||||
* Truncate the remainder of physical memory to our page size.
|
||||
*/
|
||||
|
||||
end = trunc_page(end);
|
||||
*end = trunc_page(*end);
|
||||
|
||||
/*
|
||||
* Pre-allocate maps and map entries that cannot be dynamically
|
||||
@ -355,21 +348,9 @@ vm_page_startup(start, end, vaddr)
|
||||
* map (they should use their own maps).
|
||||
*/
|
||||
|
||||
kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
|
||||
MAX_KMAPENT * sizeof(struct vm_map_entry);
|
||||
kentry_data_size = round_page(kentry_data_size);
|
||||
kentry_data = (vm_offset_t) vaddr;
|
||||
vaddr += kentry_data_size;
|
||||
|
||||
/*
|
||||
* Validate these zone addresses.
|
||||
*/
|
||||
|
||||
new_start = start + (vaddr - mapped);
|
||||
pmap_map(mapped, start, new_start, VM_PROT_READ|VM_PROT_WRITE);
|
||||
bzero((caddr_t) mapped, (vaddr - mapped));
|
||||
mapped = vaddr;
|
||||
start = new_start;
|
||||
kentry_data_size = round_page(MAX_KMAP*sizeof(struct vm_map) +
|
||||
MAX_KMAPENT*sizeof(struct vm_map_entry));
|
||||
kentry_data = (vm_offset_t) pmap_bootstrap_alloc(kentry_data_size);
|
||||
|
||||
/*
|
||||
* Compute the number of pages of memory that will be
|
||||
@ -377,16 +358,15 @@ vm_page_startup(start, end, vaddr)
|
||||
* of a page structure per page).
|
||||
*/
|
||||
|
||||
cnt.v_free_count = npages =
|
||||
(end - start + sizeof(struct vm_page))/(PAGE_SIZE + sizeof(struct vm_page));
|
||||
cnt.v_free_count = npages = (*end - *start + sizeof(struct vm_page))
|
||||
/ (PAGE_SIZE + sizeof(struct vm_page));
|
||||
|
||||
/*
|
||||
* Initialize the mem entry structures now, and
|
||||
* put them in the free queue.
|
||||
* Record the extent of physical memory that the
|
||||
* virtual memory system manages.
|
||||
*/
|
||||
|
||||
m = vm_page_array = (vm_page_t) vaddr;
|
||||
first_page = start;
|
||||
first_page = *start;
|
||||
first_page += npages*sizeof(struct vm_page);
|
||||
first_page = atop(round_page(first_page));
|
||||
last_page = first_page + npages - 1;
|
||||
@ -394,24 +374,35 @@ vm_page_startup(start, end, vaddr)
|
||||
first_phys_addr = ptoa(first_page);
|
||||
last_phys_addr = ptoa(last_page) + PAGE_MASK;
|
||||
|
||||
/*
|
||||
* Validate these addresses.
|
||||
*/
|
||||
|
||||
new_start = start + (round_page(m + npages) - mapped);
|
||||
mapped = pmap_map(mapped, start, new_start,
|
||||
VM_PROT_READ|VM_PROT_WRITE);
|
||||
start = new_start;
|
||||
|
||||
/*
|
||||
* Clear all of the page structures
|
||||
* Allocate and clear the mem entry structures.
|
||||
*/
|
||||
|
||||
m = vm_page_array = (vm_page_t)
|
||||
pmap_bootstrap_alloc(npages * sizeof(struct vm_page));
|
||||
|
||||
/*
|
||||
* Initialize the mem entry structures now, and
|
||||
* put them in the free queue.
|
||||
*/
|
||||
bzero((caddr_t)m, npages * sizeof(*m));
|
||||
|
||||
pa = first_phys_addr;
|
||||
while (npages--) {
|
||||
m->flags = 0;
|
||||
m->object = NULL;
|
||||
m->phys_addr = pa;
|
||||
#ifdef i386 /* XXX will never be used */
|
||||
if (pmap_isvalidphys(m->phys_addr)) {
|
||||
TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
|
||||
} else {
|
||||
/* perhaps iomem needs it's own type, or dev pager? */
|
||||
m->flags |= PG_FICTITIOUS | PG_BUSY;
|
||||
cnt.v_free_count--;
|
||||
}
|
||||
#else /* i386 */
|
||||
TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
|
||||
#endif /* i386 */
|
||||
m++;
|
||||
pa += PAGE_SIZE;
|
||||
}
|
||||
@ -422,7 +413,8 @@ vm_page_startup(start, end, vaddr)
|
||||
*/
|
||||
simple_lock_init(&vm_pages_needed_lock);
|
||||
|
||||
return(mapped);
|
||||
/* from now on, pmap_bootstrap_alloc can't be used */
|
||||
vm_page_startup_initialized = TRUE;
|
||||
}
|
||||
#endif /* MACHINE_NONCONTIG */
|
||||
|
||||
@ -604,6 +596,7 @@ vm_page_insert(mem, object, offset)
|
||||
|
||||
/*
|
||||
* vm_page_remove: [ internal use only ]
|
||||
* NOTE: used by device pager as well -wfj
|
||||
*
|
||||
* Removes the given mem entry from the object/offset-page
|
||||
* table and the object page list.
|
||||
@ -761,11 +754,11 @@ vm_page_alloc(object, offset)
|
||||
* it doesn't really matter.
|
||||
*/
|
||||
|
||||
if ((cnt.v_free_count < cnt.v_free_min) ||
|
||||
((cnt.v_free_count < cnt.v_free_target) &&
|
||||
(cnt.v_inactive_count < cnt.v_inactive_target)))
|
||||
thread_wakeup(&vm_pages_needed);
|
||||
return(mem);
|
||||
if (cnt.v_free_count < cnt.v_free_min ||
|
||||
(cnt.v_free_count < cnt.v_free_target &&
|
||||
cnt.v_inactive_count < cnt.v_inactive_target))
|
||||
thread_wakeup((int)&vm_pages_needed);
|
||||
return (mem);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -781,12 +774,12 @@ vm_page_free(mem)
|
||||
register vm_page_t mem;
|
||||
{
|
||||
vm_page_remove(mem);
|
||||
|
||||
if (mem->flags & PG_ACTIVE) {
|
||||
TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
|
||||
mem->flags &= ~PG_ACTIVE;
|
||||
cnt.v_active_count--;
|
||||
}
|
||||
|
||||
if (mem->flags & PG_INACTIVE) {
|
||||
TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
|
||||
mem->flags &= ~PG_INACTIVE;
|
||||
@ -878,22 +871,15 @@ vm_page_deactivate(m)
|
||||
/*
|
||||
* Only move active pages -- ignore locked or already
|
||||
* inactive ones.
|
||||
*
|
||||
* XXX: sometimes we get pages which aren't wired down
|
||||
* or on any queue - we need to put them on the inactive
|
||||
* queue also, otherwise we lose track of them.
|
||||
* Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
|
||||
*/
|
||||
|
||||
if (!(m->flags & PG_INACTIVE) && m->wire_count == 0) {
|
||||
if (m->flags & PG_ACTIVE) {
|
||||
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
|
||||
if (m->flags & PG_ACTIVE) {
|
||||
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
|
||||
m->flags &= ~PG_ACTIVE;
|
||||
cnt.v_active_count--;
|
||||
}
|
||||
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
|
||||
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
|
||||
m->flags &= ~PG_ACTIVE;
|
||||
m->flags |= PG_INACTIVE;
|
||||
cnt.v_active_count--;
|
||||
cnt.v_inactive_count++;
|
||||
if (pmap_is_modified(VM_PAGE_TO_PHYS(m)))
|
||||
m->flags &= ~PG_CLEAN;
|
||||
@ -945,6 +931,7 @@ vm_page_zero_fill(m)
|
||||
{
|
||||
VM_PAGE_CHECK(m);
|
||||
|
||||
m->flags &= ~PG_CLEAN;
|
||||
pmap_zero_page(VM_PAGE_TO_PHYS(m));
|
||||
return(TRUE);
|
||||
}
|
||||
@ -962,5 +949,6 @@ vm_page_copy(src_m, dest_m)
|
||||
VM_PAGE_CHECK(src_m);
|
||||
VM_PAGE_CHECK(dest_m);
|
||||
|
||||
dest_m->flags &= ~PG_CLEAN;
|
||||
pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -34,7 +34,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_page.h 7.3 (Berkeley) 4/21/91
|
||||
* $Id: vm_page.h,v 1.11 1994/04/29 08:21:52 mycroft Exp $
|
||||
* $Id: vm_page.h,v 1.12 1994/05/23 03:11:57 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -286,7 +286,7 @@ vm_page_t vm_page_lookup __P((vm_object_t, vm_offset_t));
|
||||
void vm_page_remove __P((vm_page_t));
|
||||
void vm_page_rename __P((vm_page_t, vm_object_t, vm_offset_t));
|
||||
#ifndef MACHINE_NONCONTIG
|
||||
vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
|
||||
vm_offset_t vm_page_startup __P((vm_offset_t *, vm_offset_t *));
|
||||
#endif
|
||||
void vm_page_unwire __P((vm_page_t));
|
||||
void vm_page_wire __P((vm_page_t));
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91
|
||||
* $Id: vm_pageout.c,v 1.13 1994/05/21 04:00:13 cgd Exp $
|
||||
* from: @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94
|
||||
* $Id: vm_pageout.c,v 1.14 1994/05/23 03:11:58 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -73,12 +73,25 @@
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_pageout.h>
|
||||
|
||||
#include <machine/cpu.h>
|
||||
#ifndef VM_PAGE_FREE_MIN
|
||||
#define VM_PAGE_FREE_MIN (cnt.v_free_count / 20)
|
||||
#endif
|
||||
|
||||
int vm_pages_needed; /* Event on which pageout daemon sleeps */
|
||||
int vm_pageout_free_min = 0; /* Stop pageout to wait for pagers at this free level */
|
||||
#ifndef VM_PAGE_FREE_TARGET
|
||||
#define VM_PAGE_FREE_TARGET ((cnt.v_free_min * 4) / 3)
|
||||
#endif
|
||||
|
||||
int vm_page_free_min_sanity = 40;
|
||||
int vm_page_free_min_min = 16 * 1024;
|
||||
int vm_page_free_min_max = 256 * 1024;
|
||||
|
||||
int vm_pages_needed; /* Event on which pageout daemon sleeps */
|
||||
|
||||
int vm_page_max_wired = 0; /* XXX max # of wired pages system-wide */
|
||||
|
||||
#ifdef CLUSTERED_PAGEOUT
|
||||
#define MAXPOCLUSTER (MAXPHYS/NBPG) /* XXX */
|
||||
int doclustered_pageout = 1;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* vm_pageout_scan does the dirty work for the pageout daemon.
|
||||
@ -97,6 +110,8 @@ vm_pageout_scan()
|
||||
* Only continue when we want more pages to be "free"
|
||||
*/
|
||||
|
||||
cnt.v_rev++;
|
||||
|
||||
s = splimp();
|
||||
simple_lock(&vm_page_queue_free_lock);
|
||||
free = cnt.v_free_count;
|
||||
@ -104,7 +119,7 @@ vm_pageout_scan()
|
||||
splx(s);
|
||||
|
||||
if (free < cnt.v_free_target) {
|
||||
#ifdef OMIT
|
||||
#ifdef OMIT /* XXX */
|
||||
swapout_threads();
|
||||
#endif /* OMIT*/
|
||||
|
||||
@ -183,6 +198,12 @@ vm_pageout_scan()
|
||||
if (!vm_object_lock_try(object))
|
||||
continue;
|
||||
cnt.v_pageouts++;
|
||||
#ifdef CLUSTERED_PAGEOUT
|
||||
if (object->pager &&
|
||||
vm_pager_cancluster(object->pager, PG_CLUSTERPUT))
|
||||
vm_pageout_cluster(m, object);
|
||||
else
|
||||
#endif
|
||||
vm_pageout_page(m, object);
|
||||
thread_wakeup((int) object);
|
||||
vm_object_unlock(object);
|
||||
@ -294,7 +315,20 @@ vm_pageout_page(m, object)
|
||||
m->flags |= PG_CLEAN;
|
||||
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
|
||||
break;
|
||||
case VM_PAGER_AGAIN:
|
||||
{
|
||||
extern int lbolt;
|
||||
|
||||
/*
|
||||
* FAIL on a write is interpreted to mean a resource
|
||||
* shortage, so we put pause for awhile and try again.
|
||||
* XXX could get stuck here.
|
||||
*/
|
||||
(void) tsleep((caddr_t)&lbolt, PZERO|PCATCH, "pageout", 0);
|
||||
break;
|
||||
}
|
||||
case VM_PAGER_FAIL:
|
||||
case VM_PAGER_ERROR:
|
||||
/*
|
||||
* If page couldn't be paged out, then reactivate
|
||||
* the page so it doesn't clog the inactive list.
|
||||
@ -320,6 +354,156 @@ vm_pageout_page(m, object)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CLUSTERED_PAGEOUT
|
||||
#define PAGEOUTABLE(p) \
|
||||
((((p)->flags & (PG_INACTIVE|PG_CLEAN|PG_LAUNDRY)) == \
|
||||
(PG_INACTIVE|PG_LAUNDRY)) && !pmap_is_referenced(VM_PAGE_TO_PHYS(p)))
|
||||
|
||||
/*
|
||||
* Attempt to pageout as many contiguous (to ``m'') dirty pages as possible
|
||||
* from ``object''. Using information returned from the pager, we assemble
|
||||
* a sorted list of contiguous dirty pages and feed them to the pager in one
|
||||
* chunk. Called with paging queues and object locked. Also, object must
|
||||
* already have a pager.
|
||||
*/
|
||||
void
|
||||
vm_pageout_cluster(m, object)
|
||||
vm_page_t m;
|
||||
vm_object_t object;
|
||||
{
|
||||
vm_offset_t offset, loff, hoff;
|
||||
vm_page_t plist[MAXPOCLUSTER], *plistp, p;
|
||||
int postatus, ix, count;
|
||||
|
||||
/*
|
||||
* Determine the range of pages that can be part of a cluster
|
||||
* for this object/offset. If it is only our single page, just
|
||||
* do it normally.
|
||||
*/
|
||||
vm_pager_cluster(object->pager, m->offset, &loff, &hoff);
|
||||
if (hoff - loff == PAGE_SIZE) {
|
||||
vm_pageout_page(m, object);
|
||||
return;
|
||||
}
|
||||
|
||||
plistp = plist;
|
||||
|
||||
/*
|
||||
* Target page is always part of the cluster.
|
||||
*/
|
||||
pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
|
||||
m->flags |= PG_BUSY;
|
||||
plistp[atop(m->offset - loff)] = m;
|
||||
count = 1;
|
||||
|
||||
/*
|
||||
* Backup from the given page til we find one not fulfilling
|
||||
* the pageout criteria or we hit the lower bound for the
|
||||
* cluster. For each page determined to be part of the
|
||||
* cluster, unmap it and busy it out so it won't change.
|
||||
*/
|
||||
ix = atop(m->offset - loff);
|
||||
offset = m->offset;
|
||||
while (offset > loff && count < MAXPOCLUSTER-1) {
|
||||
p = vm_page_lookup(object, offset - PAGE_SIZE);
|
||||
if (p == NULL || !PAGEOUTABLE(p))
|
||||
break;
|
||||
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
|
||||
p->flags |= PG_BUSY;
|
||||
plistp[--ix] = p;
|
||||
offset -= PAGE_SIZE;
|
||||
count++;
|
||||
}
|
||||
plistp += atop(offset - loff);
|
||||
loff = offset;
|
||||
|
||||
/*
|
||||
* Now do the same moving forward from the target.
|
||||
*/
|
||||
ix = atop(m->offset - loff) + 1;
|
||||
offset = m->offset + PAGE_SIZE;
|
||||
while (offset < hoff && count < MAXPOCLUSTER) {
|
||||
p = vm_page_lookup(object, offset);
|
||||
if (p == NULL || !PAGEOUTABLE(p))
|
||||
break;
|
||||
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
|
||||
p->flags |= PG_BUSY;
|
||||
plistp[ix++] = p;
|
||||
offset += PAGE_SIZE;
|
||||
count++;
|
||||
}
|
||||
hoff = offset;
|
||||
|
||||
/*
|
||||
* Pageout the page.
|
||||
* Unlock everything and do a wakeup prior to the pager call
|
||||
* in case it blocks.
|
||||
*/
|
||||
vm_page_unlock_queues();
|
||||
object->paging_in_progress++;
|
||||
vm_object_unlock(object);
|
||||
again:
|
||||
thread_wakeup((int) &cnt.v_free_count);
|
||||
postatus = vm_pager_put_pages(object->pager, plistp, count, FALSE);
|
||||
/*
|
||||
* XXX rethink this
|
||||
*/
|
||||
if (postatus == VM_PAGER_AGAIN) {
|
||||
extern int lbolt;
|
||||
|
||||
(void) tsleep((caddr_t)&lbolt, PZERO|PCATCH, "pageout", 0);
|
||||
goto again;
|
||||
} else if (postatus == VM_PAGER_BAD)
|
||||
panic("vm_pageout_cluster: VM_PAGER_BAD");
|
||||
vm_object_lock(object);
|
||||
vm_page_lock_queues();
|
||||
|
||||
/*
|
||||
* Loop through the affected pages, reflecting the outcome of
|
||||
* the operation.
|
||||
*/
|
||||
for (ix = 0; ix < count; ix++) {
|
||||
p = *plistp++;
|
||||
switch (postatus) {
|
||||
case VM_PAGER_OK:
|
||||
case VM_PAGER_PEND:
|
||||
cnt.v_pgpgout++;
|
||||
p->flags &= ~PG_LAUNDRY;
|
||||
break;
|
||||
case VM_PAGER_FAIL:
|
||||
case VM_PAGER_ERROR:
|
||||
/*
|
||||
* Pageout failed, reactivate the target page so it
|
||||
* doesn't clog the inactive list. Other pages are
|
||||
* left as they are.
|
||||
*/
|
||||
if (p == m) {
|
||||
vm_page_activate(p);
|
||||
cnt.v_reactivated++;
|
||||
}
|
||||
break;
|
||||
}
|
||||
pmap_clear_reference(VM_PAGE_TO_PHYS(p));
|
||||
/*
|
||||
* If the operation is still going, leave the page busy
|
||||
* to block all other accesses.
|
||||
*/
|
||||
if (postatus != VM_PAGER_PEND) {
|
||||
p->flags &= ~PG_BUSY;
|
||||
PAGE_WAKEUP(p);
|
||||
|
||||
}
|
||||
}
|
||||
/*
|
||||
* If the operation is still going, leave the paging in progress
|
||||
* indicator set so that we don't attempt an object collapse.
|
||||
*/
|
||||
if (postatus != VM_PAGER_PEND)
|
||||
object->paging_in_progress--;
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* vm_pageout is the high level pageout daemon.
|
||||
*/
|
||||
@ -334,25 +518,24 @@ vm_pageout()
|
||||
*/
|
||||
|
||||
if (cnt.v_free_min == 0) {
|
||||
cnt.v_free_min = cnt.v_free_count / 20;
|
||||
if (cnt.v_free_min < 3)
|
||||
cnt.v_free_min = 3;
|
||||
|
||||
if (cnt.v_free_min > vm_page_free_min_sanity)
|
||||
cnt.v_free_min = vm_page_free_min_sanity;
|
||||
cnt.v_free_min = VM_PAGE_FREE_MIN;
|
||||
vm_page_free_min_min /= cnt.v_page_size;
|
||||
vm_page_free_min_max /= cnt.v_page_size;
|
||||
if (cnt.v_free_min < vm_page_free_min_min)
|
||||
cnt.v_free_min = vm_page_free_min_min;
|
||||
if (cnt.v_free_min > vm_page_free_min_max)
|
||||
cnt.v_free_min = vm_page_free_min_max;
|
||||
}
|
||||
|
||||
if (cnt.v_free_target == 0)
|
||||
cnt.v_free_target = (cnt.v_free_min * 4) / 3;
|
||||
|
||||
if (cnt.v_inactive_target == 0)
|
||||
cnt.v_inactive_target = cnt.v_free_min * 2;
|
||||
cnt.v_free_target = VM_PAGE_FREE_TARGET;
|
||||
|
||||
if (cnt.v_free_target <= cnt.v_free_min)
|
||||
cnt.v_free_target = cnt.v_free_min + 1;
|
||||
|
||||
if (cnt.v_inactive_target <= cnt.v_free_target)
|
||||
cnt.v_inactive_target = cnt.v_free_target + 1;
|
||||
/* XXX does not really belong here */
|
||||
if (vm_page_max_wired == 0)
|
||||
vm_page_max_wired = cnt.v_free_count / 3;
|
||||
|
||||
/*
|
||||
* The pageout daemon is never done, so loop
|
||||
@ -361,9 +544,26 @@ vm_pageout()
|
||||
|
||||
simple_lock(&vm_pages_needed_lock);
|
||||
while (TRUE) {
|
||||
thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock);
|
||||
cnt.v_scan++;
|
||||
vm_pageout_scan();
|
||||
thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock,
|
||||
FALSE);
|
||||
/*
|
||||
* Compute the inactive target for this scan.
|
||||
* We need to keep a reasonable amount of memory in the
|
||||
* inactive list to better simulate LRU behavior.
|
||||
*/
|
||||
cnt.v_inactive_target =
|
||||
(cnt.v_active_count + cnt.v_inactive_count) / 3;
|
||||
if (cnt.v_inactive_target <= cnt.v_free_target)
|
||||
cnt.v_inactive_target = cnt.v_free_target + 1;
|
||||
|
||||
/*
|
||||
* Only make a scan if we are likely to do something.
|
||||
* Otherwise we might have been awakened by a pager
|
||||
* to clean up async pageouts.
|
||||
*/
|
||||
if (cnt.v_free_count < cnt.v_free_target ||
|
||||
cnt.v_inactive_count < cnt.v_inactive_target)
|
||||
vm_pageout_scan();
|
||||
vm_pager_sync();
|
||||
simple_lock(&vm_pages_needed_lock);
|
||||
thread_wakeup((int) &cnt.v_free_count);
|
||||
|
@ -34,7 +34,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_pageout.h 8.2 (Berkeley) 1/12/94
|
||||
* $Id: vm_pageout.h,v 1.6 1994/05/05 20:35:12 mycroft Exp $
|
||||
* $Id: vm_pageout.h,v 1.7 1994/05/23 03:12:00 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -63,9 +63,6 @@
|
||||
* rights to redistribute these changes.
|
||||
*/
|
||||
|
||||
#ifndef _VM_VM_PAGEOUT_H_
|
||||
#define _VM_VM_PAGEOUT_H_
|
||||
|
||||
/*
|
||||
* Header file for pageout daemon.
|
||||
*/
|
||||
@ -90,7 +87,7 @@ simple_lock_data_t vm_pages_needed_lock;
|
||||
simple_lock(&vm_pages_needed_lock); \
|
||||
thread_wakeup((int)&vm_pages_needed); \
|
||||
thread_sleep((int)&cnt.v_free_count, \
|
||||
&vm_pages_needed_lock); \
|
||||
&vm_pages_needed_lock, FALSE); \
|
||||
}
|
||||
#ifdef KERNEL
|
||||
void vm_pageout __P((void));
|
||||
@ -98,5 +95,3 @@ void vm_pageout_scan __P((void));
|
||||
void vm_pageout_page __P((vm_page_t, vm_object_t));
|
||||
void vm_pageout_cluster __P((vm_page_t, vm_object_t));
|
||||
#endif
|
||||
|
||||
#endif /* !_VM_VM_PAGEOUT_H_ */
|
||||
|
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_pager.c 8.1 (Berkeley) 6/11/93
|
||||
* $Id: vm_pager.c,v 1.11 1994/04/15 07:05:02 cgd Exp $
|
||||
* from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
|
||||
* $Id: vm_pager.c,v 1.12 1994/05/23 03:12:02 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -112,9 +112,15 @@ struct pagerops *dfltpagerops = NULL; /* default pager */
|
||||
/*
|
||||
* Kernel address space for mapping pages.
|
||||
* Used by pagers where KVAs are needed for IO.
|
||||
*
|
||||
* XXX needs to be large enough to support the number of pending async
|
||||
* cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
|
||||
* (MAXPHYS == 64k) if you want to get the most efficiency.
|
||||
*/
|
||||
#define PAGER_MAP_SIZE (256 * PAGE_SIZE)
|
||||
#define PAGER_MAP_SIZE (4 * 1024 * 1024)
|
||||
|
||||
vm_map_t pager_map;
|
||||
boolean_t pager_map_wanted;
|
||||
vm_offset_t pager_sva, pager_eva;
|
||||
|
||||
void
|
||||
@ -131,7 +137,7 @@ vm_pager_init()
|
||||
* Initialize known pagers
|
||||
*/
|
||||
for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
|
||||
if (*pgops != NULL)
|
||||
if (pgops)
|
||||
(*(*pgops)->pgo_init)();
|
||||
if (dfltpagerops == NULL)
|
||||
panic("no default pager");
|
||||
@ -150,14 +156,12 @@ vm_pager_allocate(type, handle, size, prot, off)
|
||||
vm_prot_t prot;
|
||||
vm_offset_t off;
|
||||
{
|
||||
vm_pager_t pager;
|
||||
struct pagerops *ops;
|
||||
|
||||
ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type];
|
||||
if (ops == NULL)
|
||||
return NULL; /* not compiled in; punt */
|
||||
else
|
||||
return((*ops->pgo_alloc)(handle, size, prot, off));
|
||||
if (ops)
|
||||
return ((*ops->pgo_alloc)(handle, size, prot, off));
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
void
|
||||
@ -167,31 +171,41 @@ vm_pager_deallocate(pager)
|
||||
if (pager == NULL)
|
||||
panic("vm_pager_deallocate: null pager");
|
||||
|
||||
VM_PAGER_DEALLOC(pager);
|
||||
(*pager->pg_ops->pgo_dealloc)(pager);
|
||||
}
|
||||
|
||||
int
|
||||
vm_pager_get(pager, m, sync)
|
||||
vm_pager_get_pages(pager, mlist, npages, sync)
|
||||
vm_pager_t pager;
|
||||
vm_page_t m;
|
||||
vm_page_t *mlist;
|
||||
int npages;
|
||||
boolean_t sync;
|
||||
{
|
||||
extern boolean_t vm_page_zero_fill();
|
||||
int rv;
|
||||
|
||||
if (pager == NULL)
|
||||
return(vm_page_zero_fill(m) ? VM_PAGER_OK : VM_PAGER_FAIL);
|
||||
return(VM_PAGER_GET(pager, m, sync));
|
||||
if (pager == NULL) {
|
||||
rv = VM_PAGER_OK;
|
||||
while (npages--)
|
||||
if (!vm_page_zero_fill(*mlist)) {
|
||||
rv = VM_PAGER_FAIL;
|
||||
break;
|
||||
} else
|
||||
mlist++;
|
||||
return (rv);
|
||||
}
|
||||
return ((*pager->pg_ops->pgo_getpages)(pager, mlist, npages, sync));
|
||||
}
|
||||
|
||||
int
|
||||
vm_pager_put(pager, m, sync)
|
||||
vm_pager_put_pages(pager, mlist, npages, sync)
|
||||
vm_pager_t pager;
|
||||
vm_page_t m;
|
||||
vm_page_t *mlist;
|
||||
int npages;
|
||||
boolean_t sync;
|
||||
{
|
||||
if (pager == NULL)
|
||||
panic("vm_pager_put: null pager");
|
||||
return(VM_PAGER_PUT(pager, m, sync));
|
||||
panic("vm_pager_put_pages: null pager");
|
||||
return ((*pager->pg_ops->pgo_putpages)(pager, mlist, npages, sync));
|
||||
}
|
||||
|
||||
boolean_t
|
||||
@ -200,8 +214,8 @@ vm_pager_has_page(pager, offset)
|
||||
vm_offset_t offset;
|
||||
{
|
||||
if (pager == NULL)
|
||||
panic("vm_pager_has_page");
|
||||
return(VM_PAGER_HASPAGE(pager, offset));
|
||||
panic("vm_pager_has_page: null pager");
|
||||
return ((*pager->pg_ops->pgo_haspage)(pager, offset));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -214,49 +228,118 @@ vm_pager_sync()
|
||||
struct pagerops **pgops;
|
||||
|
||||
for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++)
|
||||
if (*pgops != NULL)
|
||||
(*(*pgops)->pgo_putpage)(NULL, NULL, FALSE);
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
vm_pager_map_page(m)
|
||||
vm_page_t m;
|
||||
{
|
||||
vm_offset_t kva;
|
||||
|
||||
#ifdef DEBUG
|
||||
if (!(m->flags & PG_BUSY))
|
||||
panic("vm_pager_map_page: page not busy");
|
||||
if (m->flags & PG_PAGEROWNED)
|
||||
printf("vm_pager_map_page: page %x already in pager\n", m);
|
||||
#endif
|
||||
kva = kmem_alloc_wait(pager_map, PAGE_SIZE);
|
||||
#ifdef DEBUG
|
||||
m->flags |= PG_PAGEROWNED;
|
||||
#endif
|
||||
pmap_enter(vm_map_pmap(pager_map), kva, VM_PAGE_TO_PHYS(m),
|
||||
VM_PROT_DEFAULT, TRUE);
|
||||
return(kva);
|
||||
if (pgops)
|
||||
(*(*pgops)->pgo_putpages)(NULL, NULL, 0, FALSE);
|
||||
}
|
||||
|
||||
void
|
||||
vm_pager_unmap_page(kva)
|
||||
vm_offset_t kva;
|
||||
vm_pager_cluster(pager, offset, loff, hoff)
|
||||
vm_pager_t pager;
|
||||
vm_offset_t offset;
|
||||
vm_offset_t *loff;
|
||||
vm_offset_t *hoff;
|
||||
{
|
||||
#ifdef DEBUG
|
||||
if (pager == NULL)
|
||||
panic("vm_pager_cluster: null pager");
|
||||
return ((*pager->pg_ops->pgo_cluster)(pager, offset, loff, hoff));
|
||||
}
|
||||
|
||||
void
|
||||
vm_pager_clusternull(pager, offset, loff, hoff)
|
||||
vm_pager_t pager;
|
||||
vm_offset_t offset;
|
||||
vm_offset_t *loff;
|
||||
vm_offset_t *hoff;
|
||||
{
|
||||
panic("vm_pager_nullcluster called");
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
vm_pager_map_pages(mlist, npages, canwait)
|
||||
vm_page_t *mlist;
|
||||
int npages;
|
||||
boolean_t canwait;
|
||||
{
|
||||
vm_offset_t kva, va;
|
||||
vm_size_t size;
|
||||
vm_page_t m;
|
||||
|
||||
m = PHYS_TO_VM_PAGE(pmap_extract(vm_map_pmap(pager_map), kva));
|
||||
#endif
|
||||
pmap_remove(vm_map_pmap(pager_map), kva, kva + PAGE_SIZE);
|
||||
kmem_free_wakeup(pager_map, kva, PAGE_SIZE);
|
||||
/*
|
||||
* Allocate space in the pager map, if none available return 0.
|
||||
* This is basically an expansion of kmem_alloc_wait with optional
|
||||
* blocking on no space.
|
||||
*/
|
||||
size = npages * PAGE_SIZE;
|
||||
vm_map_lock(pager_map);
|
||||
while (vm_map_findspace(pager_map, 0, size, &kva)) {
|
||||
if (!canwait) {
|
||||
vm_map_unlock(pager_map);
|
||||
return (0);
|
||||
}
|
||||
pager_map_wanted = TRUE;
|
||||
vm_map_unlock(pager_map);
|
||||
(void) tsleep(pager_map, PVM, "pager_map", 0);
|
||||
vm_map_lock(pager_map);
|
||||
}
|
||||
vm_map_insert(pager_map, NULL, 0, kva, kva + size);
|
||||
vm_map_unlock(pager_map);
|
||||
|
||||
for (va = kva; npages--; va += PAGE_SIZE) {
|
||||
m = *mlist++;
|
||||
#ifdef DEBUG
|
||||
if (m->flags & PG_PAGEROWNED)
|
||||
m->flags &= ~PG_PAGEROWNED;
|
||||
else
|
||||
printf("vm_pager_unmap_page: page %x(%x/%x) not owned\n",
|
||||
m, kva, VM_PAGE_TO_PHYS(m));
|
||||
if ((m->flags & PG_BUSY) == 0)
|
||||
panic("vm_pager_map_pages: page not busy");
|
||||
if (m->flags & PG_PAGEROWNED)
|
||||
panic("vm_pager_map_pages: page already in pager");
|
||||
#endif
|
||||
#ifdef DEBUG
|
||||
m->flags |= PG_PAGEROWNED;
|
||||
#endif
|
||||
pmap_enter(vm_map_pmap(pager_map), va, VM_PAGE_TO_PHYS(m),
|
||||
VM_PROT_DEFAULT, TRUE);
|
||||
}
|
||||
return (kva);
|
||||
}
|
||||
|
||||
void
|
||||
vm_pager_unmap_pages(kva, npages)
|
||||
vm_offset_t kva;
|
||||
int npages;
|
||||
{
|
||||
vm_size_t size = npages * PAGE_SIZE;
|
||||
|
||||
#ifdef DEBUG
|
||||
vm_offset_t va;
|
||||
vm_page_t m;
|
||||
int np = npages;
|
||||
|
||||
for (va = kva; np--; va += PAGE_SIZE) {
|
||||
m = vm_pager_atop(va);
|
||||
if (m->flags & PG_PAGEROWNED)
|
||||
m->flags &= ~PG_PAGEROWNED;
|
||||
else
|
||||
printf("vm_pager_unmap_pages: %x(%x/%x) not owned\n",
|
||||
m, va, VM_PAGE_TO_PHYS(m));
|
||||
}
|
||||
#endif
|
||||
pmap_remove(vm_map_pmap(pager_map), kva, kva + size);
|
||||
vm_map_lock(pager_map);
|
||||
(void) vm_map_delete(pager_map, kva, kva + size);
|
||||
if (pager_map_wanted)
|
||||
wakeup(pager_map);
|
||||
vm_map_unlock(pager_map);
|
||||
}
|
||||
|
||||
vm_page_t
|
||||
vm_pager_atop(kva)
|
||||
vm_offset_t kva;
|
||||
{
|
||||
vm_offset_t pa;
|
||||
|
||||
pa = pmap_extract(vm_map_pmap(pager_map), kva);
|
||||
if (pa == 0)
|
||||
panic("vm_pager_atop");
|
||||
return (PHYS_TO_VM_PAGE(pa));
|
||||
}
|
||||
|
||||
vm_pager_t
|
||||
@ -282,7 +365,7 @@ pager_cache(object, should_cache)
|
||||
boolean_t should_cache;
|
||||
{
|
||||
if (object == NULL)
|
||||
return(KERN_INVALID_ARGUMENT);
|
||||
return (KERN_INVALID_ARGUMENT);
|
||||
|
||||
vm_object_cache_lock();
|
||||
vm_object_lock(object);
|
||||
@ -295,5 +378,5 @@ pager_cache(object, should_cache)
|
||||
|
||||
vm_object_deallocate(object);
|
||||
|
||||
return(KERN_SUCCESS);
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
|
@ -35,8 +35,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_pager.h 8.2 (Berkeley) 11/10/93
|
||||
* $Id: vm_pager.h,v 1.5 1994/04/15 08:14:29 cgd Exp $
|
||||
* from: @(#)vm_pager.h 8.4 (Berkeley) 1/12/94
|
||||
* $Id: vm_pager.h,v 1.6 1994/05/23 03:12:03 cgd Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -47,12 +47,13 @@
|
||||
#ifndef _VM_PAGER_
|
||||
#define _VM_PAGER_
|
||||
|
||||
TAILQ_HEAD(pagerlst, pager_struct);
|
||||
TAILQ_HEAD(pagerlst, pager_struct);
|
||||
|
||||
struct pager_struct {
|
||||
TAILQ_ENTRY(pager_struct) pg_list; /* links for list management */
|
||||
caddr_t pg_handle; /* ext. handle (vp, dev, fp) */
|
||||
int pg_type; /* type of pager */
|
||||
int pg_flags; /* flags */
|
||||
struct pagerops *pg_ops; /* pager operations */
|
||||
void *pg_data; /* private pager data */
|
||||
};
|
||||
@ -63,6 +64,10 @@ struct pager_struct {
|
||||
#define PG_VNODE 1
|
||||
#define PG_DEVICE 2
|
||||
|
||||
/* flags */
|
||||
#define PG_CLUSTERGET 1
|
||||
#define PG_CLUSTERPUT 2
|
||||
|
||||
struct pagerops {
|
||||
void (*pgo_init) /* Initialize pager. */
|
||||
__P((void));
|
||||
@ -70,12 +75,15 @@ struct pagerops {
|
||||
__P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
|
||||
void (*pgo_dealloc) /* Disassociate. */
|
||||
__P((vm_pager_t));
|
||||
int (*pgo_getpage) /* Get (read) page. */
|
||||
__P((vm_pager_t, vm_page_t, boolean_t));
|
||||
int (*pgo_putpage) /* Put (write) page. */
|
||||
__P((vm_pager_t, vm_page_t, boolean_t));
|
||||
int (*pgo_getpages) /* Get (read) page. */
|
||||
__P((vm_pager_t, vm_page_t *, int, boolean_t));
|
||||
int (*pgo_putpages) /* Put (write) page. */
|
||||
__P((vm_pager_t, vm_page_t *, int, boolean_t));
|
||||
boolean_t (*pgo_haspage) /* Does pager have page? */
|
||||
__P((vm_pager_t, vm_offset_t));
|
||||
void (*pgo_cluster) /* Return range of cluster. */
|
||||
__P((vm_pager_t, vm_offset_t,
|
||||
vm_offset_t *, vm_offset_t *));
|
||||
};
|
||||
|
||||
/*
|
||||
@ -85,37 +93,56 @@ struct pagerops {
|
||||
* FAIL specified data was in range, but doesn't exist
|
||||
* PEND operations was initiated but not completed
|
||||
* ERROR error while accessing data that is in range and exists
|
||||
* AGAIN temporary resource shortage prevented operation from happening
|
||||
*/
|
||||
#define VM_PAGER_OK 0
|
||||
#define VM_PAGER_BAD 1
|
||||
#define VM_PAGER_FAIL 2
|
||||
#define VM_PAGER_PEND 3
|
||||
#ifdef notyet /* XXX */
|
||||
#define VM_PAGER_ERROR 4
|
||||
#else /* notdef */
|
||||
#define VM_PAGER_ERROR VM_PAGER_FAIL
|
||||
#endif /* notdef */
|
||||
|
||||
#define VM_PAGER_ALLOC(h, s, p, o) (*(pg)->pg_ops->pgo_alloc)(h, s, p, o)
|
||||
#define VM_PAGER_DEALLOC(pg) (*(pg)->pg_ops->pgo_dealloc)(pg)
|
||||
#define VM_PAGER_GET(pg, m, s) (*(pg)->pg_ops->pgo_getpage)(pg, m, s)
|
||||
#define VM_PAGER_PUT(pg, m, s) (*(pg)->pg_ops->pgo_putpage)(pg, m, s)
|
||||
#define VM_PAGER_HASPAGE(pg, o) (*(pg)->pg_ops->pgo_haspage)(pg, o)
|
||||
#define VM_PAGER_AGAIN 5
|
||||
|
||||
#ifdef KERNEL
|
||||
extern struct pagerops *dfltpagerops;
|
||||
|
||||
vm_pager_t vm_pager_allocate
|
||||
__P((int, caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
|
||||
vm_page_t vm_pager_atop __P((vm_offset_t));
|
||||
void vm_pager_cluster
|
||||
__P((vm_pager_t, vm_offset_t,
|
||||
vm_offset_t *, vm_offset_t *));
|
||||
void vm_pager_clusternull
|
||||
__P((vm_pager_t, vm_offset_t,
|
||||
vm_offset_t *, vm_offset_t *));
|
||||
void vm_pager_deallocate __P((vm_pager_t));
|
||||
int vm_pager_get __P((vm_pager_t, vm_page_t, boolean_t));
|
||||
int vm_pager_get_pages
|
||||
__P((vm_pager_t, vm_page_t *, int, boolean_t));
|
||||
boolean_t vm_pager_has_page __P((vm_pager_t, vm_offset_t));
|
||||
void vm_pager_init __P((void));
|
||||
vm_pager_t vm_pager_lookup __P((struct pagerlst *, caddr_t));
|
||||
vm_offset_t vm_pager_map_page __P((vm_page_t));
|
||||
int vm_pager_put __P((vm_pager_t, vm_page_t, boolean_t));
|
||||
vm_offset_t vm_pager_map_pages __P((vm_page_t *, int, boolean_t));
|
||||
int vm_pager_put_pages
|
||||
__P((vm_pager_t, vm_page_t *, int, boolean_t));
|
||||
void vm_pager_sync __P((void));
|
||||
void vm_pager_unmap_page __P((vm_offset_t));
|
||||
void vm_pager_unmap_pages __P((vm_offset_t, int));
|
||||
|
||||
#define vm_pager_cancluster(p, b) ((p)->pg_flags & (b))
|
||||
|
||||
/*
|
||||
* XXX compat with old interface
|
||||
*/
|
||||
#define vm_pager_get(p, m, s) \
|
||||
({ \
|
||||
vm_page_t ml[1]; \
|
||||
ml[0] = (m); \
|
||||
vm_pager_get_pages(p, ml, 1, s); \
|
||||
})
|
||||
#define vm_pager_put(p, m, s) \
|
||||
({ \
|
||||
vm_page_t ml[1]; \
|
||||
ml[0] = (m); \
|
||||
vm_pager_put_pages(p, ml, 1, s); \
|
||||
})
|
||||
#endif
|
||||
|
||||
#endif /* _VM_PAGER_ */
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_param.h 7.2 (Berkeley) 4/21/91
|
||||
* $Id: vm_param.h,v 1.6 1994/05/06 22:44:22 cgd Exp $
|
||||
* from: @(#)vm_param.h 8.1 (Berkeley) 6/11/93
|
||||
* $Id: vm_param.h,v 1.7 1994/05/23 03:12:04 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -63,18 +63,14 @@
|
||||
* rights to redistribute these changes.
|
||||
*/
|
||||
|
||||
#ifndef _VM_VM_PARAM_H_
|
||||
#define _VM_VM_PARAM_H_
|
||||
|
||||
/*
|
||||
* Machine independent virtual memory parameters.
|
||||
*/
|
||||
|
||||
#ifdef KERNEL
|
||||
#include "machine/vmparam.h"
|
||||
#else
|
||||
#ifndef _VM_PARAM_
|
||||
#define _VM_PARAM_
|
||||
|
||||
#include <machine/vmparam.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This belongs in types.h, but breaks too many existing programs.
|
||||
@ -94,7 +90,6 @@ typedef int boolean_t;
|
||||
* or PAGE_SHIFT. The fact they are variables is hidden here so that
|
||||
* we can easily make them constant if we so desire.
|
||||
*/
|
||||
|
||||
#define PAGE_SIZE cnt.v_page_size /* size of page */
|
||||
#define PAGE_MASK page_mask /* size of page - 1 */
|
||||
#define PAGE_SHIFT page_shift /* bits to shift for pages */
|
||||
@ -109,7 +104,7 @@ extern int page_shift;
|
||||
#define VM_METER 1 /* struct vmmeter */
|
||||
#define VM_LOADAVG 2 /* struct loadavg */
|
||||
#define VM_MAXID 3 /* number of valid vm ids */
|
||||
|
||||
|
||||
#define CTL_VM_NAMES { \
|
||||
{ 0, 0 }, \
|
||||
{ "vmmeter", CTLTYPE_STRUCT }, \
|
||||
@ -129,40 +124,37 @@ extern int page_shift;
|
||||
#define KERN_NOT_RECEIVER 7
|
||||
#define KERN_NO_ACCESS 8
|
||||
|
||||
#ifdef ASSEMBLER
|
||||
#else /* ASSEMBLER */
|
||||
#ifndef ASSEMBLER
|
||||
/*
|
||||
* Convert addresses to pages and vice versa.
|
||||
* No rounding is used.
|
||||
*/
|
||||
|
||||
#ifdef KERNEL
|
||||
#ifdef KERNEL
|
||||
#define atop(x) (((unsigned)(x)) >> PAGE_SHIFT)
|
||||
#define ptoa(x) ((vm_offset_t)((x) << PAGE_SHIFT))
|
||||
#endif /* KERNEL */
|
||||
|
||||
/*
|
||||
* Round off or truncate to the nearest page. These will work
|
||||
* for either addresses or counts. (i.e. 1 byte rounds to 1 page
|
||||
* bytes.
|
||||
* Round off or truncate to the nearest page. These will work
|
||||
* for either addresses or counts (i.e., 1 byte rounds to 1 page).
|
||||
*/
|
||||
|
||||
#ifdef KERNEL
|
||||
#define round_page(x) ((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) & ~PAGE_MASK))
|
||||
#define trunc_page(x) ((vm_offset_t)(((vm_offset_t)(x)) & ~PAGE_MASK))
|
||||
#define round_page(x) \
|
||||
((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) & ~PAGE_MASK))
|
||||
#define trunc_page(x) \
|
||||
((vm_offset_t)(((vm_offset_t)(x)) & ~PAGE_MASK))
|
||||
#define num_pages(x) \
|
||||
((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) >> PAGE_SHIFT))
|
||||
#else /* KERNEL */
|
||||
#define round_page(x) ((((vm_offset_t)(x) + (vm_page_size - 1)) / vm_page_size) * vm_page_size)
|
||||
#define trunc_page(x) ((((vm_offset_t)(x)) / vm_page_size) * vm_page_size)
|
||||
#endif /* KERNEL */
|
||||
|
||||
#ifdef KERNEL
|
||||
extern vm_size_t mem_size; /* size of physical memory (bytes) */
|
||||
extern vm_offset_t first_addr; /* first physical page */
|
||||
extern vm_offset_t last_addr; /* last physical page */
|
||||
#endif /* KERNEL */
|
||||
|
||||
#endif /* ASSEMBLER */
|
||||
#else
|
||||
/* out-of-kernel versions of round_page and trunc_page */
|
||||
#define round_page(x) \
|
||||
((((vm_offset_t)(x) + (vm_page_size - 1)) / vm_page_size) * vm_page_size)
|
||||
#define trunc_page(x) \
|
||||
((((vm_offset_t)(x)) / vm_page_size) * vm_page_size)
|
||||
|
||||
#endif /* !_VM_VM_PARAM_H_ */
|
||||
#endif /* KERNEL */
|
||||
#endif /* ASSEMBLER */
|
||||
#endif /* _VM_PARAM_ */
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_prot.h 7.2 (Berkeley) 4/21/91
|
||||
* $Id: vm_prot.h,v 1.3 1993/05/20 03:59:45 cgd Exp $
|
||||
* from: @(#)vm_prot.h 8.1 (Berkeley) 6/11/93
|
||||
* $Id: vm_prot.h,v 1.4 1994/05/23 03:12:06 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -63,13 +63,13 @@
|
||||
* rights to redistribute these changes.
|
||||
*/
|
||||
|
||||
#ifndef _VM_VM_PROT_H_
|
||||
#define _VM_VM_PROT_H_
|
||||
|
||||
/*
|
||||
* Virtual memory protection definitions.
|
||||
*/
|
||||
|
||||
#ifndef _VM_PROT_
|
||||
#define _VM_PROT_
|
||||
|
||||
/*
|
||||
* Types defined:
|
||||
*
|
||||
@ -100,4 +100,4 @@ typedef int vm_prot_t;
|
||||
|
||||
#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
|
||||
|
||||
#endif /* !_VM_VM_PROT_H_ */
|
||||
#endif /* _VM_PROT_ */
|
||||
|
@ -30,8 +30,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_swap.c 8.1 (Berkeley) 6/11/93
|
||||
* $Id: vm_swap.c,v 1.18 1994/04/25 23:53:53 cgd Exp $
|
||||
* from: @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
|
||||
* $Id: vm_swap.c,v 1.19 1994/05/23 03:12:08 cgd Exp $
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
@ -46,7 +46,6 @@
|
||||
#include <sys/file.h>
|
||||
|
||||
#include <miscfs/specfs/specdev.h>
|
||||
#include <vm/vm.h>
|
||||
|
||||
/*
|
||||
* Indirect driver for multi-controller paging.
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Copyright (c) 1988 University of Utah.
|
||||
* Copyright (c) 1991 The Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* the Systems Programming Group of the University of Utah Computer
|
||||
@ -35,9 +35,10 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: Utah Hdr: vm_unix.c 1.1 89/11/07
|
||||
* from: @(#)vm_unix.c 7.2 (Berkeley) 4/20/91
|
||||
* $Id: vm_unix.c,v 1.9 1994/01/08 04:17:33 mycroft Exp $
|
||||
* from: Utah $Hdr: vm_unix.c 1.1 89/11/07$
|
||||
*
|
||||
* from: @(#)vm_unix.c 8.1 (Berkeley) 6/11/93
|
||||
* $Id: vm_unix.c,v 1.10 1994/05/23 03:12:09 cgd Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -49,12 +50,10 @@
|
||||
#include <sys/resourcevar.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_user.h>
|
||||
|
||||
struct obreak_args {
|
||||
char *nsiz;
|
||||
};
|
||||
|
||||
/* ARGSUSED */
|
||||
int
|
||||
obreak(p, uap, retval)
|
||||
@ -96,6 +95,7 @@ obreak(p, uap, retval)
|
||||
* Enlarge the "stack segment" to include the specified
|
||||
* stack pointer for the process.
|
||||
*/
|
||||
int
|
||||
grow(p, sp)
|
||||
struct proc *p;
|
||||
unsigned sp;
|
||||
@ -126,7 +126,6 @@ grow(p, sp)
|
||||
struct ovadvise_args {
|
||||
int anom;
|
||||
};
|
||||
|
||||
/* ARGSUSED */
|
||||
int
|
||||
ovadvise(p, uap, retval)
|
||||
|
120
sys/vm/vm_user.c
120
sys/vm/vm_user.c
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 1991, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* The Mach Operating System project at Carnegie-Mellon University.
|
||||
@ -33,8 +33,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vm_user.c 7.3 (Berkeley) 4/21/91
|
||||
* $Id: vm_user.c,v 1.7 1994/01/08 04:17:35 mycroft Exp $
|
||||
* from: @(#)vm_user.c 8.2 (Berkeley) 1/12/94
|
||||
* $Id: vm_user.c,v 1.8 1994/05/23 03:12:10 cgd Exp $
|
||||
*
|
||||
*
|
||||
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
|
||||
@ -72,8 +72,6 @@
|
||||
#include <sys/proc.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_user.h>
|
||||
|
||||
simple_lock_data_t vm_alloc_lock; /* XXX */
|
||||
|
||||
@ -88,7 +86,6 @@ struct svm_allocate_args {
|
||||
vm_size_t size;
|
||||
boolean_t anywhere;
|
||||
};
|
||||
|
||||
/* ARGSUSED */
|
||||
int
|
||||
svm_allocate(p, uap, retval)
|
||||
@ -117,7 +114,6 @@ struct svm_deallocate_args {
|
||||
vm_offset_t addr;
|
||||
vm_size_t size;
|
||||
};
|
||||
|
||||
/* ARGSUSED */
|
||||
int
|
||||
svm_deallocate(p, uap, retval)
|
||||
@ -138,7 +134,6 @@ struct svm_inherit_args {
|
||||
vm_size_t size;
|
||||
vm_inherit_t inherit;
|
||||
};
|
||||
|
||||
/* ARGSUSED */
|
||||
int
|
||||
svm_inherit(p, uap, retval)
|
||||
@ -160,7 +155,6 @@ struct svm_protect_args {
|
||||
boolean_t setmax;
|
||||
vm_prot_t prot;
|
||||
};
|
||||
|
||||
/* ARGSUSED */
|
||||
int
|
||||
svm_protect(p, uap, retval)
|
||||
@ -174,6 +168,42 @@ svm_protect(p, uap, retval)
|
||||
rv = vm_protect(uap->map, uap->addr, uap->size, uap->setmax, uap->prot);
|
||||
return((int)rv);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_inherit sets the inheritence of the specified range in the
|
||||
* specified map.
|
||||
*/
|
||||
int
|
||||
vm_inherit(map, start, size, new_inheritance)
|
||||
register vm_map_t map;
|
||||
vm_offset_t start;
|
||||
vm_size_t size;
|
||||
vm_inherit_t new_inheritance;
|
||||
{
|
||||
if (map == NULL)
|
||||
return(KERN_INVALID_ARGUMENT);
|
||||
|
||||
return(vm_map_inherit(map, trunc_page(start), round_page(start+size), new_inheritance));
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_protect sets the protection of the specified range in the
|
||||
* specified map.
|
||||
*/
|
||||
|
||||
int
|
||||
vm_protect(map, start, size, set_maximum, new_protection)
|
||||
register vm_map_t map;
|
||||
vm_offset_t start;
|
||||
vm_size_t size;
|
||||
boolean_t set_maximum;
|
||||
vm_prot_t new_protection;
|
||||
{
|
||||
if (map == NULL)
|
||||
return(KERN_INVALID_ARGUMENT);
|
||||
|
||||
return(vm_map_protect(map, trunc_page(start), round_page(start+size), new_protection, set_maximum));
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -202,8 +232,7 @@ vm_allocate(map, addr, size, anywhere)
|
||||
*addr = trunc_page(*addr);
|
||||
size = round_page(size);
|
||||
|
||||
result = vm_map_find(map, NULL, (vm_offset_t) 0, addr,
|
||||
size, anywhere);
|
||||
result = vm_map_find(map, NULL, (vm_offset_t) 0, addr, size, anywhere);
|
||||
|
||||
return(result);
|
||||
}
|
||||
@ -228,36 +257,57 @@ vm_deallocate(map, start, size)
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_inherit sets the inheritence of the specified range in the
|
||||
* specified map.
|
||||
* Similar to vm_allocate but assigns an explicit pager.
|
||||
*/
|
||||
int
|
||||
vm_inherit(map, start, size, new_inheritance)
|
||||
vm_allocate_with_pager(map, addr, size, anywhere, pager, poffset, internal)
|
||||
register vm_map_t map;
|
||||
vm_offset_t start;
|
||||
vm_size_t size;
|
||||
vm_inherit_t new_inheritance;
|
||||
register vm_offset_t *addr;
|
||||
register vm_size_t size;
|
||||
boolean_t anywhere;
|
||||
vm_pager_t pager;
|
||||
vm_offset_t poffset;
|
||||
boolean_t internal;
|
||||
{
|
||||
register vm_object_t object;
|
||||
register int result;
|
||||
|
||||
if (map == NULL)
|
||||
return(KERN_INVALID_ARGUMENT);
|
||||
|
||||
return(vm_map_inherit(map, trunc_page(start), round_page(start+size), new_inheritance));
|
||||
}
|
||||
*addr = trunc_page(*addr);
|
||||
size = round_page(size);
|
||||
|
||||
/*
|
||||
* vm_protect sets the protection of the specified range in the
|
||||
* specified map.
|
||||
*/
|
||||
int
|
||||
vm_protect(map, start, size, set_maximum, new_protection)
|
||||
register vm_map_t map;
|
||||
vm_offset_t start;
|
||||
vm_size_t size;
|
||||
boolean_t set_maximum;
|
||||
vm_prot_t new_protection;
|
||||
{
|
||||
if (map == NULL)
|
||||
return(KERN_INVALID_ARGUMENT);
|
||||
/*
|
||||
* Lookup the pager/paging-space in the object cache.
|
||||
* If it's not there, then create a new object and cache
|
||||
* it.
|
||||
*/
|
||||
object = vm_object_lookup(pager);
|
||||
cnt.v_lookups++;
|
||||
if (object == NULL) {
|
||||
object = vm_object_allocate(size);
|
||||
/*
|
||||
* From Mike Hibler: "unnamed anonymous objects should never
|
||||
* be on the hash list ... For now you can just change
|
||||
* vm_allocate_with_pager to not do vm_object_enter if this
|
||||
* is an internal object ..."
|
||||
*/
|
||||
if (!internal)
|
||||
vm_object_enter(object, pager);
|
||||
} else
|
||||
cnt.v_hits++;
|
||||
if (internal)
|
||||
object->flags |= OBJ_INTERNAL;
|
||||
else {
|
||||
object->flags &= ~OBJ_INTERNAL;
|
||||
cnt.v_nzfod -= atop(size);
|
||||
}
|
||||
|
||||
return(vm_map_protect(map, trunc_page(start), round_page(start+size), new_protection, set_maximum));
|
||||
result = vm_map_find(map, object, poffset, addr, size, anywhere);
|
||||
if (result != KERN_SUCCESS)
|
||||
vm_object_deallocate(object);
|
||||
else if (pager != NULL)
|
||||
vm_object_setpager(object, pager, (vm_offset_t) 0, TRUE);
|
||||
return(result);
|
||||
}
|
||||
|
@ -35,8 +35,8 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* from: @(#)vnode_pager.c 8.1 (Berkeley) 6/11/93
|
||||
* $Id: vnode_pager.c,v 1.8 1994/04/21 07:49:36 cgd Exp $
|
||||
* from: @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
|
||||
* $Id: vnode_pager.c,v 1.9 1994/05/23 03:12:12 cgd Exp $
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -73,15 +73,19 @@ int vpagerdebug = 0x00;
|
||||
|
||||
static vm_pager_t vnode_pager_alloc
|
||||
__P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
|
||||
static void vnode_pager_cluster
|
||||
__P((vm_pager_t, vm_offset_t,
|
||||
vm_offset_t *, vm_offset_t *));
|
||||
static void vnode_pager_dealloc __P((vm_pager_t));
|
||||
static int vnode_pager_getpage
|
||||
__P((vm_pager_t, vm_page_t, boolean_t));
|
||||
__P((vm_pager_t, vm_page_t *, int, boolean_t));
|
||||
static boolean_t vnode_pager_haspage __P((vm_pager_t, vm_offset_t));
|
||||
static void vnode_pager_init __P((void));
|
||||
static int vnode_pager_io
|
||||
__P((vn_pager_t, vm_page_t, enum uio_rw));
|
||||
static int vnode_pager_putpage
|
||||
__P((vm_pager_t, vm_page_t, boolean_t));
|
||||
__P((vn_pager_t, vm_page_t *, int,
|
||||
boolean_t, enum uio_rw));
|
||||
static boolean_t vnode_pager_putpage
|
||||
__P((vm_pager_t, vm_page_t *, int, boolean_t));
|
||||
|
||||
struct pagerops vnodepagerops = {
|
||||
vnode_pager_init,
|
||||
@ -89,7 +93,8 @@ struct pagerops vnodepagerops = {
|
||||
vnode_pager_dealloc,
|
||||
vnode_pager_getpage,
|
||||
vnode_pager_putpage,
|
||||
vnode_pager_haspage
|
||||
vnode_pager_haspage,
|
||||
vnode_pager_cluster
|
||||
};
|
||||
|
||||
static void
|
||||
@ -170,6 +175,7 @@ vnode_pager_alloc(handle, size, prot, foff)
|
||||
TAILQ_INSERT_TAIL(&vnode_pager_list, pager, pg_list);
|
||||
pager->pg_handle = handle;
|
||||
pager->pg_type = PG_VNODE;
|
||||
pager->pg_flags = 0;
|
||||
pager->pg_ops = &vnodepagerops;
|
||||
pager->pg_data = vnp;
|
||||
vp->v_vmdata = (caddr_t)pager;
|
||||
@ -197,7 +203,9 @@ vnode_pager_dealloc(pager)
|
||||
{
|
||||
register vn_pager_t vnp = (vn_pager_t)pager->pg_data;
|
||||
register struct vnode *vp;
|
||||
#ifdef NOTDEF
|
||||
struct proc *p = curproc; /* XXX */
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
if (vpagerdebug & VDB_FOLLOW)
|
||||
@ -206,7 +214,7 @@ vnode_pager_dealloc(pager)
|
||||
if (vp = vnp->vnp_vp) {
|
||||
vp->v_vmdata = NULL;
|
||||
vp->v_flag &= ~VTEXT;
|
||||
#if 0
|
||||
#if NOTDEF
|
||||
/* can hang if done at reboot on NFS FS */
|
||||
(void) VOP_FSYNC(vp, p->p_ucred, p);
|
||||
#endif
|
||||
@ -218,37 +226,49 @@ vnode_pager_dealloc(pager)
|
||||
}
|
||||
|
||||
static int
|
||||
vnode_pager_getpage(pager, m, sync)
|
||||
vnode_pager_getpage(pager, mlist, npages, sync)
|
||||
vm_pager_t pager;
|
||||
vm_page_t m;
|
||||
vm_page_t *mlist;
|
||||
int npages;
|
||||
boolean_t sync;
|
||||
{
|
||||
|
||||
#ifdef DEBUG
|
||||
if (vpagerdebug & VDB_FOLLOW)
|
||||
printf("vnode_pager_getpage(%x, %x)\n", pager, m);
|
||||
printf("vnode_pager_getpage(%x, %x, %x, %x)\n",
|
||||
pager, mlist, npages, sync);
|
||||
#endif
|
||||
return(vnode_pager_io((vn_pager_t)pager->pg_data, m, UIO_READ));
|
||||
return(vnode_pager_io((vn_pager_t)pager->pg_data,
|
||||
mlist, npages, sync, UIO_READ));
|
||||
}
|
||||
|
||||
static int
|
||||
vnode_pager_putpage(pager, m, sync)
|
||||
static boolean_t
|
||||
vnode_pager_putpage(pager, mlist, npages, sync)
|
||||
vm_pager_t pager;
|
||||
vm_page_t m;
|
||||
vm_page_t *mlist;
|
||||
int npages;
|
||||
boolean_t sync;
|
||||
{
|
||||
int err;
|
||||
|
||||
#ifdef DEBUG
|
||||
if (vpagerdebug & VDB_FOLLOW)
|
||||
printf("vnode_pager_putpage(%x, %x)\n", pager, m);
|
||||
printf("vnode_pager_putpage(%x, %x, %x, %x)\n",
|
||||
pager, mlist, npages, sync);
|
||||
#endif
|
||||
if (pager == NULL)
|
||||
return (VM_PAGER_OK); /* ??? */
|
||||
err = vnode_pager_io((vn_pager_t)pager->pg_data, m, UIO_WRITE);
|
||||
return (FALSE); /* ??? */
|
||||
err = vnode_pager_io((vn_pager_t)pager->pg_data,
|
||||
mlist, npages, sync, UIO_WRITE);
|
||||
/*
|
||||
* If the operation was successful, mark the pages clean.
|
||||
*/
|
||||
if (err == VM_PAGER_OK) {
|
||||
m->flags |= PG_CLEAN; /* XXX - wrong place */
|
||||
pmap_clear_modify(VM_PAGE_TO_PHYS(m)); /* XXX - wrong place */
|
||||
while (npages--) {
|
||||
(*mlist)->flags |= PG_CLEAN;
|
||||
pmap_clear_modify(VM_PAGE_TO_PHYS(*mlist));
|
||||
mlist++;
|
||||
}
|
||||
}
|
||||
return(err);
|
||||
}
|
||||
@ -269,8 +289,12 @@ vnode_pager_haspage(pager, offset)
|
||||
|
||||
/*
|
||||
* Offset beyond end of file, do not have the page
|
||||
* Lock the vnode first to make sure we have the most recent
|
||||
* version of the size.
|
||||
*/
|
||||
VOP_LOCK(vnp->vnp_vp);
|
||||
if (offset >= vnp->vnp_size) {
|
||||
VOP_UNLOCK(vnp->vnp_vp);
|
||||
#ifdef DEBUG
|
||||
if (vpagerdebug & (VDB_FAIL|VDB_SIZE))
|
||||
printf("vnode_pager_haspage: pg %x, off %x, size %x\n",
|
||||
@ -295,6 +319,7 @@ vnode_pager_haspage(pager, offset)
|
||||
offset / vnp->vnp_vp->v_mount->mnt_stat.f_iosize,
|
||||
(struct vnode **)0, &bn);
|
||||
#endif /* notyet */
|
||||
VOP_UNLOCK(vnp->vnp_vp);
|
||||
if (err) {
|
||||
#ifdef DEBUG
|
||||
if (vpagerdebug & VDB_FAIL)
|
||||
@ -306,6 +331,38 @@ vnode_pager_haspage(pager, offset)
|
||||
return((long)bn < 0 ? FALSE : TRUE);
|
||||
}
|
||||
|
||||
static void
|
||||
vnode_pager_cluster(pager, offset, loffset, hoffset)
|
||||
vm_pager_t pager;
|
||||
vm_offset_t offset;
|
||||
vm_offset_t *loffset;
|
||||
vm_offset_t *hoffset;
|
||||
{
|
||||
vn_pager_t vnp = (vn_pager_t)pager->pg_data;
|
||||
vm_offset_t loff, hoff;
|
||||
|
||||
#ifdef DEBUG
|
||||
if (vpagerdebug & VDB_FOLLOW)
|
||||
printf("vnode_pager_cluster(%x, %x) ", pager, offset);
|
||||
#endif
|
||||
loff = offset;
|
||||
if (loff >= vnp->vnp_size)
|
||||
panic("vnode_pager_cluster: bad offset");
|
||||
/*
|
||||
* XXX could use VOP_BMAP to get maxcontig value
|
||||
*/
|
||||
hoff = loff + MAXBSIZE;
|
||||
if (hoff > round_page(vnp->vnp_size))
|
||||
hoff = round_page(vnp->vnp_size);
|
||||
|
||||
*loffset = loff;
|
||||
*hoffset = hoff;
|
||||
#ifdef DEBUG
|
||||
if (vpagerdebug & VDB_FOLLOW)
|
||||
printf("returns [%x-%x]\n", loff, hoff);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* (XXX)
|
||||
* Lets the VM system know about a change in size for a file.
|
||||
@ -375,31 +432,35 @@ vnode_pager_umount(mp)
|
||||
register vm_pager_t pager, npager;
|
||||
struct vnode *vp;
|
||||
|
||||
for (pager = vnode_pager_list.tqh_first; pager != NULL;
|
||||
pager = npager){
|
||||
for (pager = vnode_pager_list.tqh_first; pager != NULL; pager = npager){
|
||||
/*
|
||||
* Save the next pointer now since uncaching may
|
||||
* terminate the object and render pager invalid
|
||||
*/
|
||||
npager = pager->pg_list.tqe_next;
|
||||
vp = ((vn_pager_t)pager->pg_data)->vnp_vp;
|
||||
if (mp == (struct mount *)0 || vp->v_mount == mp)
|
||||
if (mp == (struct mount *)0 || vp->v_mount == mp) {
|
||||
VOP_LOCK(vp);
|
||||
(void) vnode_pager_uncache(vp);
|
||||
VOP_UNLOCK(vp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove vnode associated object from the object cache.
|
||||
*
|
||||
* Note: this routine may be invoked as a result of a pager put
|
||||
* operation (possibly at object termination time), so we must be careful.
|
||||
* XXX unlock the vnode if it is currently locked.
|
||||
* We must do this since uncaching the object may result in its
|
||||
* destruction which may initiate paging activity which may necessitate
|
||||
* re-locking the vnode.
|
||||
*/
|
||||
boolean_t
|
||||
vnode_pager_uncache(vp)
|
||||
register struct vnode *vp;
|
||||
{
|
||||
register vm_object_t object;
|
||||
boolean_t uncached, locked;
|
||||
boolean_t uncached;
|
||||
vm_pager_t pager;
|
||||
|
||||
/*
|
||||
@ -408,15 +469,19 @@ vnode_pager_uncache(vp)
|
||||
pager = (vm_pager_t)vp->v_vmdata;
|
||||
if (pager == NULL)
|
||||
return (TRUE);
|
||||
/*
|
||||
* Unlock the vnode if it is currently locked.
|
||||
* We do this since uncaching the object may result
|
||||
* in its destruction which may initiate paging
|
||||
* activity which may necessitate locking the vnode.
|
||||
*/
|
||||
locked = VOP_ISLOCKED(vp);
|
||||
if (locked)
|
||||
VOP_UNLOCK(vp);
|
||||
#ifdef DEBUG
|
||||
if (!VOP_ISLOCKED(vp)) {
|
||||
#warning notyet should go away soon
|
||||
#ifdef notyet /* XXX XXX */
|
||||
#ifdef NFSCLIENT
|
||||
extern int (**nfsv2_vnodeop_p)();
|
||||
|
||||
if (vp->v_op != nfsv2_vnodeop_p)
|
||||
#endif
|
||||
#endif
|
||||
panic("vnode_pager_uncache: vnode not locked!");
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Must use vm_object_lookup() as it actually removes
|
||||
* the object from the cache list.
|
||||
@ -424,18 +489,20 @@ vnode_pager_uncache(vp)
|
||||
object = vm_object_lookup(pager);
|
||||
if (object) {
|
||||
uncached = (object->ref_count <= 1);
|
||||
VOP_UNLOCK(vp);
|
||||
pager_cache(object, FALSE);
|
||||
VOP_LOCK(vp);
|
||||
} else
|
||||
uncached = TRUE;
|
||||
if (locked)
|
||||
VOP_LOCK(vp);
|
||||
return(uncached);
|
||||
}
|
||||
|
||||
static int
|
||||
vnode_pager_io(vnp, m, rw)
|
||||
vnode_pager_io(vnp, mlist, npages, sync, rw)
|
||||
register vn_pager_t vnp;
|
||||
vm_page_t m;
|
||||
vm_page_t *mlist;
|
||||
int npages;
|
||||
boolean_t sync;
|
||||
enum uio_rw rw;
|
||||
{
|
||||
struct uio auio;
|
||||
@ -444,6 +511,13 @@ vnode_pager_io(vnp, m, rw)
|
||||
int error, size;
|
||||
struct proc *p = curproc; /* XXX */
|
||||
|
||||
/* XXX */
|
||||
vm_page_t m;
|
||||
if (npages != 1)
|
||||
panic("vnode_pager_io: cannot handle multiple pages");
|
||||
m = *mlist;
|
||||
/* XXX */
|
||||
|
||||
#ifdef DEBUG
|
||||
if (vpagerdebug & VDB_FOLLOW)
|
||||
printf("vnode_pager_io(%x, %x, %c): vnode %x\n",
|
||||
@ -451,9 +525,22 @@ vnode_pager_io(vnp, m, rw)
|
||||
#endif
|
||||
foff = m->offset + m->object->paging_offset;
|
||||
/*
|
||||
* Return failure if beyond current EOF
|
||||
* Allocate a kernel virtual address and initialize so that
|
||||
* we can use VOP_READ/WRITE routines.
|
||||
*/
|
||||
kva = vm_pager_map_pages(mlist, npages, sync);
|
||||
if (kva == NULL)
|
||||
return(VM_PAGER_AGAIN);
|
||||
/*
|
||||
* After all of the potentially blocking operations have been
|
||||
* performed, we can do the size checks:
|
||||
* read beyond EOF (returns error)
|
||||
* short read
|
||||
*/
|
||||
VOP_LOCK(vnp->vnp_vp);
|
||||
if (foff >= vnp->vnp_size) {
|
||||
VOP_UNLOCK(vnp->vnp_vp);
|
||||
vm_pager_unmap_pages(kva, npages);
|
||||
#ifdef DEBUG
|
||||
if (vpagerdebug & VDB_SIZE)
|
||||
printf("vnode_pager_io: vp %x, off %d size %d\n",
|
||||
@ -465,11 +552,6 @@ vnode_pager_io(vnp, m, rw)
|
||||
size = vnp->vnp_size - foff;
|
||||
else
|
||||
size = PAGE_SIZE;
|
||||
/*
|
||||
* Allocate a kernel virtual address and initialize so that
|
||||
* we can use VOP_READ/WRITE routines.
|
||||
*/
|
||||
kva = vm_pager_map_page(m);
|
||||
aiov.iov_base = (caddr_t)kva;
|
||||
aiov.iov_len = size;
|
||||
auio.uio_iov = &aiov;
|
||||
@ -488,6 +570,7 @@ vnode_pager_io(vnp, m, rw)
|
||||
error = VOP_READ(vnp->vnp_vp, &auio, 0, p->p_ucred);
|
||||
else
|
||||
error = VOP_WRITE(vnp->vnp_vp, &auio, 0, p->p_ucred);
|
||||
VOP_UNLOCK(vnp->vnp_vp);
|
||||
#ifdef DEBUG
|
||||
if (vpagerdebug & VDB_IO) {
|
||||
if (error || auio.uio_resid)
|
||||
@ -504,6 +587,6 @@ vnode_pager_io(vnp, m, rw)
|
||||
else if (count != PAGE_SIZE && rw == UIO_READ)
|
||||
bzero((void *)(kva + count), PAGE_SIZE - count);
|
||||
}
|
||||
vm_pager_unmap_page(kva);
|
||||
vm_pager_unmap_pages(kva, npages);
|
||||
return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user