* move rump_vopwrite_fault() into history - we now support the file

system faulting in pages if it does e.g. fragment reallocation
* get rid of rumpvm_findpage() and always use uvm_pagelookup()
* determine a vnode's cleanness by flagging it as being on the work
  list if we "take" a write fault and removing it from the worklist
  once pages are flushed.  There is no work list here, but at least
  there is symmetry with the kernel.
This commit is contained in:
pooka 2007-08-11 17:52:12 +00:00
parent a7fddb6456
commit 643e56125f
4 changed files with 37 additions and 65 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: p2k.c,v 1.7 2007/08/09 11:59:17 pooka Exp $ */
/* $NetBSD: p2k.c,v 1.8 2007/08/11 17:52:12 pooka Exp $ */
/*
* Copyright (c) 2007 Antti Kantee. All Rights Reserved.
@ -531,9 +531,6 @@ p2k_node_write(struct puffs_cc *pcc, void *opc, uint8_t *buf, off_t offset,
uio.uio_rw = UIO_WRITE;
uio.uio_vmspace = UIO_VMSPACE_SYS;
rv = rump_vopwrite_fault(opc, offset, *resid, NULL);
if (rv)
return rv;
rv = VOP_WRITE(opc, &uio, ioflag, NULL);
if (rv == 0)
*resid = uio.uio_resid;

View File

@ -1,4 +1,4 @@
/* $NetBSD: genfs.c,v 1.11 2007/08/09 13:53:36 pooka Exp $ */
/* $NetBSD: genfs.c,v 1.12 2007/08/11 17:52:12 pooka Exp $ */
/*
* Copyright (c) 2007 Antti Kantee. All Rights Reserved.
@ -120,18 +120,15 @@ genfs_getpages(void *v)
int count = *ap->a_count;
int i, error;
/* we'll allocate pages using other means */
if (ap->a_flags & PGO_NOBLOCKALLOC) {
*ap->a_count = 0;
return 0;
}
if (ap->a_centeridx != 0)
panic("%s: centeridx != not supported", __func__);
if (ap->a_access_type & VM_PROT_WRITE)
vp->v_flag |= VONWORKLST;
curoff = ap->a_offset & ~PAGE_MASK;
for (i = 0; i < count; i++, curoff += PAGE_SIZE) {
pg = rumpvm_findpage(&vp->v_uobj, curoff);
pg = uvm_pagelookup(&vp->v_uobj, curoff);
if (pg == NULL)
break;
ap->a_m[i] = pg;
@ -197,14 +194,16 @@ genfs_getpages(void *v)
printf("first page offset 0x%x\n", (int)(curoff + bufoff));
for (i = 0; i < count; i++, bufoff += PAGE_SIZE) {
/* past our prime? */
if (curoff + bufoff >= endoff)
break;
pg = rumpvm_findpage(&vp->v_uobj, curoff + bufoff);
pg = uvm_pagelookup(&vp->v_uobj, curoff + bufoff);
printf("got page %p (off 0x%x)\n", pg, (int)(curoff+bufoff));
if (pg == NULL) {
pg = rumpvm_makepage(&vp->v_uobj, curoff + bufoff);
memcpy((void *)pg->uanon, tmpbuf+bufoff, PAGE_SIZE);
RUMPVM_CLEANPAGE(pg);
pg->flags |= PG_CLEAN;
}
ap->a_m[i] = pg;
}
@ -244,10 +243,13 @@ genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff, int flags,
struct vm_page *pg, *pg_next;
voff_t smallest;
voff_t curoff, bufoff;
off_t eof;
size_t xfersize;
int bshift = vp->v_mount->mnt_fs_bshift;
int bsize = 1 << bshift;
GOP_SIZE(vp, vp->v_writesize, &eof, 0);
restart:
/* check if all pages are clean */
smallest = -1;
@ -263,19 +265,21 @@ genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff, int flags,
}
/* all done? */
if (TAILQ_EMPTY(&uobj->memq))
if (TAILQ_EMPTY(&uobj->memq)) {
vp->v_flag &= ~VONWORKLST;
return 0;
}
/* we need to flush */
for (curoff = smallest; curoff < vp->v_writesize; curoff += PAGE_SIZE) {
if (curoff - smallest > MAXPHYS)
for (curoff = smallest; curoff < eof; curoff += PAGE_SIZE) {
if (curoff - smallest >= MAXPHYS)
break;
pg = rumpvm_findpage(uobj, curoff);
pg = uvm_pagelookup(uobj, curoff);
if (pg == NULL)
break;
memcpy(databuf + (curoff-smallest),
(void *)pg->uanon, PAGE_SIZE);
RUMPVM_CLEANPAGE(pg);
pg->flags |= PG_CLEAN;
}
assert(curoff > smallest);
@ -295,9 +299,8 @@ genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff, int flags,
/* only write max what we are allowed to write */
buf.b_bcount = xfersize;
if (smallest + bufoff + xfersize > vp->v_writesize)
buf.b_bcount -= (smallest+bufoff+xfersize)
- vp->v_writesize;
if (smallest + bufoff + xfersize > eof)
buf.b_bcount -= (smallest+bufoff+xfersize) - eof;
buf.b_bcount = (buf.b_bcount + DEV_BSIZE-1) & ~(DEV_BSIZE-1);
KASSERT(buf.b_bcount > 0);
@ -306,7 +309,7 @@ genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff, int flags,
printf("putpages writing from %x to %x (vp size %x)\n",
(int)(smallest + bufoff),
(int)(smallest + bufoff + buf.b_bcount),
(int)vp->v_writesize);
(int)eof);
buf.b_lblkno = 0;
buf.b_blkno = bn + (((smallest+bufoff)&(bsize-1))>>DEV_BSHIFT);

View File

@ -1,4 +1,4 @@
/* $NetBSD: rump.h,v 1.5 2007/08/08 14:09:07 pooka Exp $ */
/* $NetBSD: rump.h,v 1.6 2007/08/11 17:52:12 pooka Exp $ */
/*
* Copyright (c) 2007 Antti Kantee. All Rights Reserved.
@ -65,7 +65,6 @@ void rump_putnode(struct vnode *);
int rump_recyclenode(struct vnode *);
int rump_ubc_magic_uiomove(size_t, struct uio *);
int rump_vopwrite_fault(struct vnode *, voff_t, size_t, kauth_cred_t);
void rump_getvninfo(struct vnode *, enum vtype *, voff_t *, dev_t *);
@ -74,7 +73,6 @@ int rump_fakeblk_find(const char *);
void rump_fakeblk_deregister(const char *);
void rumpvm_init(void);
struct vm_page *rumpvm_findpage(struct uvm_object *, voff_t);
struct vm_page *rumpvm_makepage(struct uvm_object *, voff_t);
void rumpvm_freepage(struct vm_page *);

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm.c,v 1.9 2007/08/09 20:57:23 pooka Exp $ */
/* $NetBSD: vm.c,v 1.10 2007/08/11 17:52:12 pooka Exp $ */
/*
* Copyright (c) 2007 Antti Kantee. All Rights Reserved.
@ -79,19 +79,6 @@ struct vm_map rump_vmmap;
* vm pages
*/
/* XXX: we could be smarter about this */
struct vm_page *
rumpvm_findpage(struct uvm_object *uobj, voff_t off)
{
struct vm_page *pg;
TAILQ_FOREACH(pg, &uobj->memq, listq)
if (pg->offset == off)
return pg;
return NULL;
}
struct vm_page *
rumpvm_makepage(struct uvm_object *uobj, voff_t off)
{
@ -124,24 +111,6 @@ rumpvm_freepage(struct vm_page *pg)
* vnode pager
*/
int
rump_vopwrite_fault(struct vnode *vp, voff_t offset, size_t len,
kauth_cred_t cred)
{
int npages = len2npages(offset, len);
struct vm_page *pgs[npages];
int rv;
if (trunc_page(offset) >= vp->v_size)
return 0;
rv = VOP_GETPAGES(vp, trunc_page(offset), pgs, &npages, 0, 0, 0, 0);
if (rv)
return rv;
return 0;
}
static int
vn_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
int *npages, int centeridx, vm_prot_t access_type,
@ -179,7 +148,7 @@ ao_get(struct uvm_object *uobj, voff_t off, struct vm_page **pgs,
/* loop over pages */
off = trunc_page(off);
for (i = 0; i < *npages; i++) {
pg = rumpvm_findpage(uobj, off + (i << PAGE_SHIFT));
pg = uvm_pagelookup(uobj, off + (i << PAGE_SHIFT));
if (pg) {
pgs[i] = pg;
} else {
@ -242,6 +211,7 @@ rump_ubc_magic_uiomove(size_t n, struct uio *uio)
if (ubc_winvalid == 0)
panic("%s: ubc window not allocated", __func__);
memset(pgs, 0, sizeof(pgs));
rv = ubc_uobj->pgops->pgo_get(ubc_uobj, ubc_offset,
pgs, &npages, 0, 0, 0, 0);
if (rv)
@ -255,7 +225,7 @@ rump_ubc_magic_uiomove(size_t n, struct uio *uio)
xfersize = MIN(MIN(n, PAGE_SIZE), PAGE_SIZE-pageoff);
uiomove((uint8_t *)pgs[i]->uanon + pageoff, xfersize, uio);
if (uio->uio_rw == UIO_WRITE)
RUMPVM_SOILPAGE(pgs[i]);
pgs[i]->flags &= ~PG_CLEAN;
ubc_offset += xfersize;
n -= xfersize;
}
@ -366,8 +336,13 @@ uvm_pagermapin(struct vm_page **pps, int npages, int flags)
struct vm_page *
uvm_pagelookup(struct uvm_object *uobj, voff_t off)
{
struct vm_page *pg;
return rumpvm_findpage(uobj, off);
TAILQ_FOREACH(pg, &uobj->memq, listq)
if (pg->offset == off)
return pg;
return NULL;
}
void
@ -417,6 +392,7 @@ uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
while (len) {
npages = MIN(maxpages, round_page(len) >> PAGE_SHIFT);
memset(pgs, 0, npages * sizeof(struct vm_page *));
rv = uobj->pgops->pgo_get(uobj, off, pgs, &npages, 0, 0, 0, 0);
assert(npages > 0);
@ -442,11 +418,9 @@ uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
bool
uvn_clean_p(struct uvm_object *uobj)
{
struct vnode *vp = (void *)uobj;
printf("pages %d\n", (int)uobj->uo_npages);
if (uobj->uo_npages < 0)
panic("%s: uo_npages < 0", __func__);
return uobj->uo_npages == 0;
return (vp->v_flag & VONWORKLST) == 0;
}
/*