diff --git a/sys/miscfs/genfs/genfs_vnops.c b/sys/miscfs/genfs/genfs_vnops.c index 8278507050d9..512e5d6fc116 100644 --- a/sys/miscfs/genfs/genfs_vnops.c +++ b/sys/miscfs/genfs/genfs_vnops.c @@ -1,4 +1,4 @@ -/* $NetBSD: genfs_vnops.c,v 1.99 2005/07/16 03:54:08 yamt Exp $ */ +/* $NetBSD: genfs_vnops.c,v 1.100 2005/07/17 09:13:35 yamt Exp $ */ /* * Copyright (c) 1982, 1986, 1989, 1993 @@ -31,7 +31,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.99 2005/07/16 03:54:08 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.100 2005/07/17 09:13:35 yamt Exp $"); #if defined(_KERNEL_OPT) #include "opt_nfsserver.h" @@ -496,6 +496,7 @@ genfs_getpages(void *v) boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0; boolean_t sawhole = FALSE; boolean_t overwrite = (flags & PGO_OVERWRITE) != 0; + boolean_t blockalloc = write && (flags & PGO_NOBLOCKALLOC) == 0; UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d", @@ -606,7 +607,7 @@ genfs_getpages(void *v) struct vm_page *pg1 = pgs[ridx + i]; if ((pg1->flags & PG_FAKE) || - (write && (pg1->flags & PG_RDONLY))) { + (blockalloc && (pg1->flags & PG_RDONLY))) { break; } } @@ -711,7 +712,7 @@ genfs_getpages(void *v) * now loop over the pages, reading as needed. */ - if (write) { + if (blockalloc) { lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL); } else { lockmgr(&gp->g_glock, LK_SHARED, NULL); @@ -727,10 +728,13 @@ genfs_getpages(void *v) */ pidx = (offset - startoffset) >> PAGE_SHIFT; - while ((pgs[pidx]->flags & (PG_FAKE|PG_RDONLY)) == 0) { + while ((pgs[pidx]->flags & PG_FAKE) == 0) { size_t b; KASSERT((offset & (PAGE_SIZE - 1)) == 0); + if ((pgs[pidx]->flags & PG_RDONLY)) { + sawhole = TRUE; + } b = MIN(PAGE_SIZE, bytes); offset += b; bytes -= b; @@ -778,8 +782,8 @@ genfs_getpages(void *v) /* * if this block isn't allocated, zero it instead of - * reading it. if this is a read access, mark the - * pages we zeroed PG_RDONLY. + * reading it. unless we are going to allocate blocks, + * mark the pages we zeroed PG_RDONLY. */ if (blkno < 0) { @@ -795,7 +799,8 @@ genfs_getpages(void *v) for (i = 0; i < holepages; i++) { if (write) { pgs[pidx + i]->flags &= ~PG_CLEAN; - } else { + } + if (!blockalloc) { pgs[pidx + i]->flags |= PG_RDONLY; } } @@ -881,18 +886,21 @@ loopdone: * the page is completely allocated while the pages are locked. */ - if (!error && sawhole && write) { - for (i = 0; i < npages; i++) { - if (pgs[i] == NULL) { - continue; - } - pgs[i]->flags &= ~PG_CLEAN; - UVMHIST_LOG(ubchist, "mark dirty pg %p", pgs[i],0,0,0); - } + if (!error && sawhole && blockalloc) { error = GOP_ALLOC(vp, startoffset, npages << PAGE_SHIFT, 0, cred); UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d", startoffset, npages << PAGE_SHIFT, error,0); + if (!error) { + for (i = 0; i < npages; i++) { + if (pgs[i] == NULL) { + continue; + } + pgs[i]->flags &= ~(PG_CLEAN|PG_RDONLY); + UVMHIST_LOG(ubchist, "mark dirty pg %p", + pgs[i],0,0,0); + } + } } lockmgr(&gp->g_glock, LK_RELEASE, NULL); simple_lock(&uobj->vmobjlock); @@ -974,9 +982,7 @@ out: pg->flags &= ~(PG_FAKE); pmap_clear_modify(pgs[i]); } - if (write) { - pg->flags &= ~(PG_RDONLY); - } + KASSERT(!write || !blockalloc || (pg->flags & PG_RDONLY) == 0); if (i < ridx || i >= ridx + orignpages || async) { UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x", pg, pg->offset,0,0); diff --git a/sys/ufs/ufs/ufs_inode.c b/sys/ufs/ufs/ufs_inode.c index e70c71fc7ae4..7570266e14c7 100644 --- a/sys/ufs/ufs/ufs_inode.c +++ b/sys/ufs/ufs/ufs_inode.c @@ -1,4 +1,4 @@ -/* $NetBSD: ufs_inode.c,v 1.49 2005/07/10 01:08:52 thorpej Exp $ */ +/* $NetBSD: ufs_inode.c,v 1.50 2005/07/17 09:13:35 yamt Exp $ */ /* * Copyright (c) 1991, 1993 @@ -37,7 +37,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: ufs_inode.c,v 1.49 2005/07/10 01:08:52 thorpej Exp $"); +__KERNEL_RCSID(0, "$NetBSD: ufs_inode.c,v 1.50 2005/07/17 09:13:35 yamt Exp $"); #if defined(_KERNEL_OPT) #include "opt_ffs.h" @@ -235,7 +235,7 @@ ufs_balloc_range(struct vnode *vp, off_t off, off_t len, struct ucred *cred, memset(pgs, 0, npages * sizeof(struct vm_page *)); simple_lock(&uobj->vmobjlock); error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0, - VM_PROT_READ, 0, PGO_SYNCIO|PGO_PASTEOF); + VM_PROT_WRITE, 0, PGO_SYNCIO|PGO_PASTEOF|PGO_NOBLOCKALLOC); if (error) { return error; } @@ -273,9 +273,11 @@ ufs_balloc_range(struct vnode *vp, off_t off, off_t len, struct ucred *cred, simple_lock(&uobj->vmobjlock); for (i = 0; i < npages; i++) { - pgs[i]->flags &= ~PG_RDONLY; if (error) { pgs[i]->flags |= PG_RELEASED; + } else if (off <= pagestart + (i << PAGE_SHIFT) && + pagestart + ((i + 1) << PAGE_SHIFT) <= off + len) { + pgs[i]->flags &= ~PG_RDONLY; } } if (error) { diff --git a/sys/uvm/uvm_bio.c b/sys/uvm/uvm_bio.c index 113678685546..32fcbb16fdf9 100644 --- a/sys/uvm/uvm_bio.c +++ b/sys/uvm/uvm_bio.c @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_bio.c,v 1.39 2005/06/27 02:19:48 thorpej Exp $ */ +/* $NetBSD: uvm_bio.c,v 1.40 2005/07/17 09:13:35 yamt Exp $ */ /* * Copyright (c) 1998 Chuck Silvers. @@ -34,7 +34,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.39 2005/06/27 02:19:48 thorpej Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.40 2005/07/17 09:13:35 yamt Exp $"); #include "opt_uvmhist.h" @@ -276,7 +276,7 @@ again: uobj, umap->offset + slot_offset, npages, 0); error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs, - &npages, 0, access_type, 0, flags); + &npages, 0, access_type, 0, flags | PGO_NOBLOCKALLOC); UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0, 0); @@ -295,6 +295,8 @@ again: simple_lock(&uobj->vmobjlock); uvm_lock_pageq(); for (i = 0; va < eva; i++, va += PAGE_SIZE) { + boolean_t rdonly; + vm_prot_t mask; /* * for virtually-indexed, virtually-tagged caches we should @@ -340,11 +342,21 @@ again: continue; /* will re-fault */ } } - KASSERT(access_type == VM_PROT_READ || - (pg->flags & PG_RDONLY) == 0); + + /* + * note that a page whose backing store is partially allocated + * is marked as PG_RDONLY. + */ + + rdonly = (access_type & VM_PROT_WRITE) == 0 && + (pg->flags & PG_RDONLY) != 0; + KASSERT((pg->flags & PG_RDONLY) == 0 || + (access_type & VM_PROT_WRITE) == 0 || + pg->offset < umap->writeoff || + pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen); + mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL; pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg), - (pg->flags & PG_RDONLY) ? prot & ~VM_PROT_WRITE : prot, - access_type); + prot & mask, access_type & mask); uvm_pageactivate(pg); pg->flags &= ~(PG_BUSY); UVM_PAGE_OWN(pg, NULL); @@ -456,7 +468,8 @@ again: if (flags & UBC_FAULTBUSY) { int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT; struct vm_page *pgs[npages]; - int gpflags = PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF; + int gpflags = + PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC; int i; KDASSERT(flags & UBC_WRITE); diff --git a/sys/uvm/uvm_pager.h b/sys/uvm/uvm_pager.h index 9967456702f0..442e54b6b123 100644 --- a/sys/uvm/uvm_pager.h +++ b/sys/uvm/uvm_pager.h @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_pager.h,v 1.28 2004/03/24 07:55:01 junyoung Exp $ */ +/* $NetBSD: uvm_pager.h,v 1.29 2005/07/17 09:13:35 yamt Exp $ */ /* * @@ -160,6 +160,7 @@ struct uvm_pagerops { #define PGO_BUSYFAIL 0x080 /* fail if a page is busy [put] */ #define PGO_OVERWRITE 0x200 /* pages will be overwritten before unlocked */ #define PGO_PASTEOF 0x400 /* allow allocation of pages past EOF */ +#define PGO_NOBLOCKALLOC 0x800 /* backing block allocation is not needed */ /* page we are not interested in getting */ #define PGO_DONTCARE ((struct vm_page *) -1L) /* [get only] */