in genfs_getpages(), mark the vnode dirty (ie. add to syncer worklist

and set VI_WRMAPDIRTY) after we have busied the pages rather than
before.  this prevents other threads calling genfs_do_putpages() from
marking the vnode clean again while we're in the process of creating
new writable mappings, since such threads will wait for the page(s) to
become unbusy before proceeding.
fixes the problem recently reported by hannken@ on tech-kern.
This commit is contained in:
chs 2010-08-08 18:17:11 +00:00
parent 0c9fb0464e
commit e15697fcb4
1 changed files with 37 additions and 21 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: genfs_io.c,v 1.37 2010/07/29 10:54:50 hannken Exp $ */
/* $NetBSD: genfs_io.c,v 1.38 2010/08/08 18:17:11 chs Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.37 2010/07/29 10:54:50 hannken Exp $");
__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.38 2010/08/08 18:17:11 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -62,11 +62,12 @@ static void genfs_dio_iodone(struct buf *);
static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
void (*)(struct buf *));
static inline void genfs_rel_pages(struct vm_page **, int);
static void genfs_rel_pages(struct vm_page **, int);
static void genfs_markdirty(struct vnode *);
int genfs_maxdio = MAXPHYS;
static inline void
static void
genfs_rel_pages(struct vm_page **pgs, int npages)
{
int i;
@ -85,6 +86,21 @@ genfs_rel_pages(struct vm_page **pgs, int npages)
mutex_exit(&uvm_pageqlock);
}
static void
genfs_markdirty(struct vnode *vp)
{
struct genfs_node * const gp = VTOG(vp);
KASSERT(mutex_owned(&vp->v_interlock));
gp->g_dirtygen++;
if ((vp->v_iflag & VI_ONWORKLST) == 0) {
vn_syncer_add_to_worklist(vp, filedelay);
}
if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
vp->v_iflag |= VI_WRMAPDIRTY;
}
}
/*
* generic VM getpages routine.
* Return PG_BUSY pages for the given range,
@ -186,16 +202,6 @@ startover:
}
}
if (memwrite) {
gp->g_dirtygen++;
if ((vp->v_iflag & VI_ONWORKLST) == 0) {
vn_syncer_add_to_worklist(vp, filedelay);
}
if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
vp->v_iflag |= VI_WRMAPDIRTY;
}
}
/*
* For PGO_LOCKED requests, just return whatever's in memory.
*/
@ -236,6 +242,9 @@ startover:
genfs_node_unlock(vp);
}
error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
if (error == 0 && memwrite) {
genfs_markdirty(vp);
}
goto out_err;
}
mutex_exit(&uobj->vmobjlock);
@ -696,6 +705,9 @@ out:
}
}
mutex_exit(&uvm_pageqlock);
if (memwrite) {
genfs_markdirty(vp);
}
mutex_exit(&uobj->vmobjlock);
if (ap->a_m != NULL) {
memcpy(ap->a_m, &pgs[ridx],
@ -708,7 +720,7 @@ out_err_free:
out_err:
if (has_trans)
fstrans_done(vp->v_mount);
return (error);
return error;
}
/*
@ -1467,18 +1479,19 @@ genfs_compat_getpages(void *v)
orignpages = *ap->a_count;
pgs = ap->a_m;
if (memwrite && (vp->v_iflag & VI_ONWORKLST) == 0) {
vn_syncer_add_to_worklist(vp, filedelay);
}
if (ap->a_flags & PGO_LOCKED) {
uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
if (error == 0 && memwrite) {
genfs_markdirty(vp);
}
return error;
}
if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
mutex_exit(&uobj->vmobjlock);
return (EINVAL);
return EINVAL;
}
if ((ap->a_flags & PGO_SYNCIO) == 0) {
mutex_exit(&uobj->vmobjlock);
@ -1527,8 +1540,11 @@ genfs_compat_getpages(void *v)
uvm_page_unbusy(pgs, npages);
}
mutex_exit(&uvm_pageqlock);
if (error == 0 && memwrite) {
genfs_markdirty(vp);
}
mutex_exit(&uobj->vmobjlock);
return (error);
return error;
}
int