Tell uvm_pagermapin() the direction of the I/O so that it can map

with only the protection that it needs.
This commit is contained in:
thorpej 2000-05-19 03:45:04 +00:00
parent fa5c89d64a
commit 655b21e17d
4 changed files with 53 additions and 29 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pager.c,v 1.28 2000/04/03 08:09:02 chs Exp $ */
/* $NetBSD: uvm_pager.c,v 1.29 2000/05/19 03:45:04 thorpej Exp $ */
/*
*
@ -114,31 +114,39 @@ uvm_pager_init()
*
* we basically just map in a blank map entry to reserve the space in the
* map and then use pmap_enter() to put the mappings in by hand.
*
* XXX It would be nice to know the direction of the I/O, so that we can
* XXX map only what is necessary.
*/
vaddr_t
uvm_pagermapin(pps, npages, aiop, waitf)
uvm_pagermapin(pps, npages, aiop, flags)
struct vm_page **pps;
int npages;
struct uvm_aiodesc **aiop; /* OUT */
int waitf;
int flags;
{
vsize_t size;
vaddr_t kva;
struct uvm_aiodesc *aio;
vaddr_t cva;
struct vm_page *pp;
vm_prot_t prot;
UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, waitf=%d)",
pps, npages, aiop, waitf);
UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, flags=0x%x)",
pps, npages, aiop, flags);
/*
* compute protection. outgoing I/O only needs read
* access to the page, whereas incoming needs read/write.
*/
prot = VM_PROT_READ;
if (flags & UVMPAGER_MAPIN_READ)
prot |= VM_PROT_WRITE;
ReStart:
if (aiop) {
MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP, waitf);
MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP,
(flags & UVMPAGER_MAPIN_WAITOK));
if (aio == NULL)
return(0);
*aiop = aio;
@ -147,15 +155,15 @@ ReStart:
}
size = npages << PAGE_SHIFT;
kva = NULL; /* let system choose VA */
kva = 0; /* let system choose VA */
if (uvm_map(pager_map, &kva, size, NULL,
UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
if (waitf == M_NOWAIT) {
if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
if (aio)
FREE(aio, M_TEMP);
UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
return(NULL);
return(0);
}
simple_lock(&pager_map_wanted_lock);
pager_map_wanted = TRUE;
@ -174,12 +182,17 @@ ReStart:
#endif
/*
* XXX VM_PROT_DEFAULT includes VM_PROT_EXEC; is that
* XXX really necessary? It could lead to unnecessary
* XXX instruction cache flushes.
* XXX We used to use VM_PROT_DEFAULT here, but
* XXX we don't since we know the direction of
* XXX the I/O now. However, VM_PROT_DEFAULT
* XXX included VM_PROT_EXECUTE. While that could
* XXX lead to unnecessary I-cache flushes, something
* XXX in the path might rely on that being done,
* XXX so we still include it, for now.
* XXX DOUBLE CHECK THIS!
*/
pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
VM_PROT_DEFAULT, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
prot | VM_PROT_EXECUTE, PMAP_WIRED | prot);
}
UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pager.h,v 1.14 2000/04/03 08:09:02 chs Exp $ */
/* $NetBSD: uvm_pager.h,v 1.15 2000/05/19 03:45:04 thorpej Exp $ */
/*
*
@ -146,6 +146,11 @@ struct vm_page **uvm_mk_pcluster __P((struct uvm_object *, struct vm_page **,
int *, struct vm_page *, int,
voff_t, voff_t));
/* Flags to uvm_pagermapin() */
#define UVMPAGER_MAPIN_WAITOK 0x01 /* it's okay to wait */
#define UVMPAGER_MAPIN_READ 0x02 /* host <- device */
#define UVMPAGER_MAPIN_WRITE 0x00 /* device -> host (pseudo flag) */
#endif /* _KERNEL */
#endif /* _UVM_UVM_PAGER_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_swap.c,v 1.36 2000/04/15 18:08:14 mrg Exp $ */
/* $NetBSD: uvm_swap.c,v 1.37 2000/05/19 03:45:04 thorpej Exp $ */
/*
* Copyright (c) 1995, 1996, 1997 Matthew R. Green
@ -1731,7 +1731,7 @@ uvm_swap_io(pps, startslot, npages, flags)
struct swapbuf *sbp;
struct buf *bp;
vaddr_t kva;
int result, s, waitf, pflag;
int result, s, mapinflags, pflag;
UVMHIST_FUNC("uvm_swap_io"); UVMHIST_CALLED(pdhist);
UVMHIST_LOG(pdhist, "<- called, startslot=%d, npages=%d, flags=%d",
@ -1748,9 +1748,12 @@ uvm_swap_io(pps, startslot, npages, flags)
* an aiodesc structure because we don't want to chance a malloc.
* we've got our own pool of aiodesc structures (in swapbuf).
*/
waitf = (flags & B_ASYNC) ? M_NOWAIT : M_WAITOK;
kva = uvm_pagermapin(pps, npages, NULL, waitf);
if (kva == NULL)
mapinflags = (flags & B_READ) ? UVMPAGER_MAPIN_READ :
UVMPAGER_MAPIN_WRITE;
if ((flags & B_ASYNC) == 0)
mapinflags |= UVMPAGER_MAPIN_WAITOK;
kva = uvm_pagermapin(pps, npages, NULL, mapinflags);
if (kva == 0)
return (VM_PAGER_AGAIN);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_vnode.c,v 1.32 2000/04/03 07:35:24 chs Exp $ */
/* $NetBSD: uvm_vnode.c,v 1.33 2000/05/19 03:45:05 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -1594,7 +1594,7 @@ uvn_io(uvn, pps, npages, flags, rw)
struct iovec iov;
vaddr_t kva;
off_t file_offset;
int waitf, result;
int waitf, result, mapinflags;
size_t got, wanted;
UVMHIST_FUNC("uvn_io"); UVMHIST_CALLED(maphist);
@ -1638,8 +1638,11 @@ uvn_io(uvn, pps, npages, flags, rw)
* first try and map the pages in (without waiting)
*/
kva = uvm_pagermapin(pps, npages, NULL, M_NOWAIT);
if (kva == NULL && waitf == M_NOWAIT) {
mapinflags = (rw == UIO_READ) ?
UVMPAGER_MAPIN_READ : UVMPAGER_MAPIN_WRITE;
kva = uvm_pagermapin(pps, npages, NULL, mapinflags);
if (kva == 0 && waitf == M_NOWAIT) {
simple_unlock(&uvn->u_obj.vmobjlock);
UVMHIST_LOG(maphist,"<- mapin failed (try again)",0,0,0,0);
return(VM_PAGER_AGAIN);
@ -1654,9 +1657,9 @@ uvn_io(uvn, pps, npages, flags, rw)
uvn->u_nio++; /* we have an I/O in progress! */
simple_unlock(&uvn->u_obj.vmobjlock);
/* NOTE: object now unlocked */
if (kva == NULL) {
kva = uvm_pagermapin(pps, npages, NULL, M_WAITOK);
}
if (kva == 0)
kva = uvm_pagermapin(pps, npages, NULL,
mapinflags | UVMPAGER_MAPIN_WAITOK);
/*
* ok, mapped in. our pages are PG_BUSY so they are not going to