tmpfs has two layers of uvm objects (vnode->uobj and the anon object

in tmpfs_node), so when playing with pages make sure we lock the
uvm object the pages belong to instead of the vnode's uvm object.

per test from Nicolas Joly (which I'm sure he will commit soon ;)
This commit is contained in:
pooka 2011-02-27 13:37:39 +00:00
parent 23092b13ed
commit 9bd36d4186

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_vfs.c,v 1.25 2011/02/22 20:17:37 pooka Exp $ */
/* $NetBSD: vm_vfs.c,v 1.26 2011/02/27 13:37:39 pooka Exp $ */
/*
* Copyright (c) 2008 Antti Kantee. All Rights Reserved.
@ -29,7 +29,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm_vfs.c,v 1.25 2011/02/22 20:17:37 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_vfs.c,v 1.26 2011/02/27 13:37:39 pooka Exp $");
#include <sys/param.h>
@ -109,6 +109,7 @@ uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
{
struct uvm_object *uobj = &vp->v_uobj;
struct vm_page **pgs;
struct uvm_object *pguobj;
int maxpages = MIN(32, round_page(len) >> PAGE_SHIFT);
int rv, npages, i;
@ -125,7 +126,7 @@ uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
0, PAGERFLAGS | PGO_PASTEOF);
KASSERT(npages > 0);
for (i = 0; i < npages; i++) {
for (i = 0, pguobj = NULL; i < npages; i++) {
struct vm_page *pg;
uint8_t *start;
size_t chunkoff, chunklen;
@ -133,6 +134,9 @@ uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
pg = pgs[i];
if (pg == NULL)
break;
if (pguobj == NULL)
pguobj = pg->uobject;
KASSERT(pguobj == pg->uobject);
chunkoff = off & PAGE_MASK;
chunklen = MIN(PAGE_SIZE - chunkoff, len);
@ -144,8 +148,12 @@ uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
off += chunklen;
len -= chunklen;
}
mutex_enter(&uobj->vmobjlock);
mutex_enter(&pguobj->vmobjlock);
uvm_page_unbusy(pgs, npages);
if (pguobj != uobj) {
mutex_exit(&pguobj->vmobjlock);
mutex_enter(&uobj->vmobjlock);
}
}
mutex_exit(&uobj->vmobjlock);
kmem_free(pgs, maxpages * sizeof(pgs));
@ -161,6 +169,7 @@ ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo,
int advice, int flags)
{
struct vm_page **pgs;
struct uvm_object *pguobj;
int npages = len2npages(uio->uio_offset, todo);
size_t pgalloc;
int i, rv, pagerflags;
@ -184,7 +193,7 @@ ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo,
if (rv)
goto out;
for (i = 0; i < npages; i++) {
for (i = 0, pguobj = NULL; i < npages; i++) {
struct vm_page *pg;
size_t xfersize;
off_t pageoff;
@ -192,6 +201,9 @@ ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo,
pg = pgs[i];
if (pg == NULL)
break;
if (pguobj == NULL)
pguobj = pg->uobject;
KASSERT(pguobj == pg->uobject);
pageoff = uio->uio_offset & PAGE_MASK;
xfersize = MIN(MIN(todo, PAGE_SIZE), PAGE_SIZE-pageoff);
@ -199,17 +211,21 @@ ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo,
rv = uiomove((uint8_t *)pg->uanon + pageoff,
xfersize, uio);
if (rv) {
mutex_enter(&uobj->vmobjlock);
mutex_enter(&pguobj->vmobjlock);
uvm_page_unbusy(pgs, npages);
mutex_exit(&uobj->vmobjlock);
mutex_exit(&pguobj->vmobjlock);
goto out;
}
if (uio->uio_rw == UIO_WRITE)
pg->flags &= ~(PG_CLEAN | PG_FAKE);
todo -= xfersize;
}
mutex_enter(&uobj->vmobjlock);
mutex_enter(&pguobj->vmobjlock);
uvm_page_unbusy(pgs, npages);
if (pguobj != uobj) {
mutex_exit(&pguobj->vmobjlock);
mutex_enter(&uobj->vmobjlock);
}
} while (todo);
mutex_exit(&uobj->vmobjlock);