- in genfs_getpages, take g_glock earlier so that it can't be
intervened by truncation. it also fixes a deadlock. (g_glock vs pages locking order) - uvm_vnp_setsize: modify v_size while holding v_interlock. reviewed by Chuck Silvers.
This commit is contained in:
parent
f94a2b9563
commit
f9458a6ba1
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: genfs_vnops.c,v 1.125 2006/05/14 21:31:52 elad Exp $ */
|
||||
/* $NetBSD: genfs_vnops.c,v 1.126 2006/07/22 08:47:56 yamt Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1982, 1986, 1989, 1993
|
||||
|
@ -31,7 +31,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.125 2006/05/14 21:31:52 elad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.126 2006/07/22 08:47:56 yamt Exp $");
|
||||
|
||||
#if defined(_KERNEL_OPT)
|
||||
#include "opt_nfsserver.h"
|
||||
|
@ -460,6 +460,7 @@ genfs_getpages(void *v)
|
|||
boolean_t sawhole = FALSE;
|
||||
boolean_t overwrite = (flags & PGO_OVERWRITE) != 0;
|
||||
boolean_t blockalloc = write && (flags & PGO_NOBLOCKALLOC) == 0;
|
||||
voff_t origvsize;
|
||||
UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
|
||||
|
||||
UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
|
||||
|
@ -473,7 +474,9 @@ genfs_getpages(void *v)
|
|||
panic("genfs_getpages: too many pages");
|
||||
}
|
||||
|
||||
startover:
|
||||
error = 0;
|
||||
origvsize = vp->v_size;
|
||||
origoffset = ap->a_offset;
|
||||
orignpages = *ap->a_count;
|
||||
GOP_SIZE(vp, vp->v_size, &diskeof, 0);
|
||||
|
@ -539,6 +542,7 @@ genfs_getpages(void *v)
|
|||
|
||||
return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
|
||||
}
|
||||
simple_unlock(&uobj->vmobjlock);
|
||||
|
||||
/*
|
||||
* find the requested pages and make some simple checks.
|
||||
|
@ -566,9 +570,9 @@ genfs_getpages(void *v)
|
|||
pgs_size = sizeof(struct vm_page *) *
|
||||
((endoffset - startoffset) >> PAGE_SHIFT);
|
||||
if (pgs_size > sizeof(pgs_onstack)) {
|
||||
pgs = malloc(pgs_size, M_DEVBUF, M_NOWAIT | M_ZERO);
|
||||
pgs = malloc(pgs_size, M_DEVBUF,
|
||||
(async ? M_NOWAIT : M_WAITOK) | M_ZERO);
|
||||
if (pgs == NULL) {
|
||||
simple_unlock(&uobj->vmobjlock);
|
||||
return (ENOMEM);
|
||||
}
|
||||
} else {
|
||||
|
@ -577,8 +581,29 @@ genfs_getpages(void *v)
|
|||
}
|
||||
UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
|
||||
ridx, npages, startoffset, endoffset);
|
||||
|
||||
/*
|
||||
* hold g_glock to prevent a race with truncate.
|
||||
*
|
||||
* check if our idea of v_size is still valid.
|
||||
*/
|
||||
|
||||
if (blockalloc) {
|
||||
lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
|
||||
} else {
|
||||
lockmgr(&gp->g_glock, LK_SHARED, NULL);
|
||||
}
|
||||
simple_lock(&uobj->vmobjlock);
|
||||
if (vp->v_size < origvsize) {
|
||||
lockmgr(&gp->g_glock, LK_RELEASE, NULL);
|
||||
if (pgs != pgs_onstack)
|
||||
free(pgs, M_DEVBUF);
|
||||
goto startover;
|
||||
}
|
||||
|
||||
if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
|
||||
async ? UFP_NOWAIT : UFP_ALL) != orignpages) {
|
||||
lockmgr(&gp->g_glock, LK_RELEASE, NULL);
|
||||
KASSERT(async != 0);
|
||||
genfs_rel_pages(&pgs[ridx], orignpages);
|
||||
simple_unlock(&uobj->vmobjlock);
|
||||
|
@ -600,6 +625,7 @@ genfs_getpages(void *v)
|
|||
}
|
||||
}
|
||||
if (i == npages) {
|
||||
lockmgr(&gp->g_glock, LK_RELEASE, NULL);
|
||||
UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
|
||||
npages += ridx;
|
||||
goto out;
|
||||
|
@ -610,6 +636,7 @@ genfs_getpages(void *v)
|
|||
*/
|
||||
|
||||
if (overwrite) {
|
||||
lockmgr(&gp->g_glock, LK_RELEASE, NULL);
|
||||
UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
|
@ -644,6 +671,7 @@ genfs_getpages(void *v)
|
|||
npgs = npages;
|
||||
if (uvn_findpages(uobj, startoffset, &npgs, pgs,
|
||||
async ? UFP_NOWAIT : UFP_ALL) != npages) {
|
||||
lockmgr(&gp->g_glock, LK_RELEASE, NULL);
|
||||
KASSERT(async != 0);
|
||||
genfs_rel_pages(pgs, npages);
|
||||
simple_unlock(&uobj->vmobjlock);
|
||||
|
@ -700,12 +728,6 @@ genfs_getpages(void *v)
|
|||
* now loop over the pages, reading as needed.
|
||||
*/
|
||||
|
||||
if (blockalloc) {
|
||||
lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
|
||||
} else {
|
||||
lockmgr(&gp->g_glock, LK_SHARED, NULL);
|
||||
}
|
||||
|
||||
bp = NULL;
|
||||
for (offset = startoffset;
|
||||
bytes > 0;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: uvm_vnode.c,v 1.71 2006/07/21 16:48:54 ad Exp $ */
|
||||
/* $NetBSD: uvm_vnode.c,v 1.72 2006/07/22 08:47:56 yamt Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
||||
|
@ -50,7 +50,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.71 2006/07/21 16:48:54 ad Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.72 2006/07/22 08:47:56 yamt Exp $");
|
||||
|
||||
#include "fs_nfs.h"
|
||||
#include "opt_uvmhist.h"
|
||||
|
@ -482,6 +482,7 @@ uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
|
|||
{
|
||||
struct uvm_object *uobj = &vp->v_uobj;
|
||||
voff_t pgend = round_page(newsize);
|
||||
voff_t oldsize;
|
||||
UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
|
||||
|
||||
simple_lock(&uobj->vmobjlock);
|
||||
|
@ -493,12 +494,13 @@ uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
|
|||
* toss some pages...
|
||||
*/
|
||||
|
||||
if (vp->v_size > pgend && vp->v_size != VSIZENOTSET) {
|
||||
oldsize = vp->v_size;
|
||||
vp->v_size = newsize;
|
||||
if (oldsize > pgend && oldsize != VSIZENOTSET) {
|
||||
(void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
|
||||
} else {
|
||||
simple_unlock(&uobj->vmobjlock);
|
||||
}
|
||||
vp->v_size = newsize;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue