- get pages to loan out in uvm_loanuobjpages() rather than

having caller (nfsd, in this case) do so.
- tweak locking so that nfs loaned READ works on layered filesystems.
This commit is contained in:
yamt 2004-01-07 12:17:10 +00:00
parent e0d82d7dce
commit 59afac32fe
3 changed files with 139 additions and 77 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: nfs_serv.c,v 1.86 2003/11/05 10:18:38 hannken Exp $ */
/* $NetBSD: nfs_serv.c,v 1.87 2004/01/07 12:17:10 yamt Exp $ */
/*
* Copyright (c) 1989, 1993
@ -55,7 +55,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nfs_serv.c,v 1.86 2003/11/05 10:18:38 hannken Exp $");
__KERNEL_RCSID(0, "$NetBSD: nfs_serv.c,v 1.87 2004/01/07 12:17:10 yamt Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -569,13 +569,6 @@ out:
nfsm_srvdone;
}
/*
* XXX UBC temp limit
* maximum number of pages we can do VOP_GETPAGES and loan-out at once.
* should be <= MAX_READ_AHEAD in genfs_vnops.c
*/
#define NFSD_READ_GETPAGES_CHUNK 16
/*
* nfs read service
*/
@ -671,81 +664,48 @@ nfsrv_read(nfsd, slp, procp, mrq)
if (nfsd_use_loan) {
struct vm_page **pgpp;
voff_t pgoff = trunc_page(off);
int orignpages, nleftpages;
vaddr_t lva, curlva;
int npages;
vaddr_t lva;
orignpages = (round_page(off + cnt) - pgoff)
>> PAGE_SHIFT;
KASSERT(orignpages <= M_EXT_MAXPAGES); /* XXX */
npages = (round_page(off + cnt) - pgoff) >> PAGE_SHIFT;
KASSERT(npages <= M_EXT_MAXPAGES); /* XXX */
lva = sokvaalloc(orignpages << PAGE_SHIFT, slp->ns_so);
/* allocate kva for mbuf data */
lva = sokvaalloc(npages << PAGE_SHIFT, slp->ns_so);
if (lva == 0) {
/* fall back to VOP_READ */
goto loan_fail;
}
/* allocate mbuf */
m = m_get(M_WAIT, MT_DATA);
MCLAIM(m, &nfs_mowner);
pgpp = m->m_ext.ext_pgs;
curlva = lva;
nleftpages = orignpages;
while (nleftpages > 0) {
int npages = nleftpages;
if (npages > NFSD_READ_GETPAGES_CHUNK)
npages = NFSD_READ_GETPAGES_CHUNK;
again:
simple_lock(&vp->v_interlock);
error = VOP_GETPAGES(vp, pgoff, pgpp, &npages,
0, VM_PROT_READ, 0, PGO_SYNCIO);
if (error == EAGAIN) {
tsleep(&lbolt, PVM, "nfsread", 0);
goto again;
}
if (error) {
uvm_unloan(m->m_ext.ext_pgs,
orignpages - nleftpages,
UVM_LOAN_TOPAGE);
sokvafree(lva,
orignpages << PAGE_SHIFT);
m_free(m);
goto read_error;
}
/* loan and unbusy pages */
simple_lock(&vp->v_interlock);
for (i = 0; i < npages; i++) {
if (pgpp[i]->flags & PG_RELEASED) {
uvm_lock_pageq();
uvm_page_unbusy(pgpp, npages);
uvm_unlock_pageq();
simple_unlock(&vp->v_interlock);
continue;
}
}
uvm_loanuobjpages(pgpp, npages);
simple_unlock(&vp->v_interlock);
/* map pages */
for (i = 0; i < npages; i++) {
pmap_kenter_pa(curlva,
VM_PAGE_TO_PHYS(pgpp[i]),
VM_PROT_READ);
curlva += PAGE_SIZE;
}
nleftpages -= npages;
pgpp += npages;
pgoff += npages << PAGE_SHIFT;
/* loan pages */
error = uvm_loanuobjpages(&vp->v_uobj, pgoff, npages,
pgpp);
if (error) {
sokvafree(lva, npages << PAGE_SHIFT);
m_free(m);
goto read_error;
}
lva += off & PAGE_MASK;
MCLAIM(m, &nfs_mowner);
MEXTADD(m, (void *)lva, cnt, M_MBUF, soloanfree,
slp->ns_so);
/* associate kva to mbuf */
MEXTADD(m, (void *)(lva + ((vaddr_t)off & PAGE_MASK)),
cnt, M_MBUF, soloanfree, slp->ns_so);
m->m_flags |= M_EXT_PAGES | M_EXT_ROMAP;
m->m_len = cnt;
/* map pages */
for (i = 0; i < npages; i++) {
pmap_kenter_pa(lva, VM_PAGE_TO_PHYS(pgpp[i]),
VM_PROT_READ);
lva += PAGE_SIZE;
}
pmap_update(pmap_kernel());
mb->m_next = m;
mb = m;
error = 0;

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_loan.c,v 1.44 2003/10/27 12:47:33 yamt Exp $ */
/* $NetBSD: uvm_loan.c,v 1.45 2004/01/07 12:17:10 yamt Exp $ */
/*
*
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.44 2003/10/27 12:47:33 yamt Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.45 2004/01/07 12:17:10 yamt Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -115,6 +115,7 @@ static int uvm_loanuobj __P((struct uvm_faultinfo *, void ***,
static int uvm_loanzero __P((struct uvm_faultinfo *, void ***, int));
static void uvm_unloananon __P((struct vm_anon **, int));
static void uvm_unloanpage __P((struct vm_page **, int));
static void uvm_loanpage __P((struct vm_page **, int));
/*
@ -419,13 +420,13 @@ uvm_loananon(ufi, output, flags, anon)
}
/*
* uvm_loanuobjpages: loan pages from a uobj out (O->K)
* uvm_loanpage: loan out pages to kernel (->K)
*
* => called with uobj locked.
* => page's owner should be locked.
* => caller should own the pages.
*/
void
uvm_loanuobjpages(pgpp, npages)
static void
uvm_loanpage(pgpp, npages)
struct vm_page **pgpp;
int npages;
{
@ -454,6 +455,106 @@ uvm_loanuobjpages(pgpp, npages)
}
}
/*
* XXX UBC temp limit
* number of pages to get at once.
* should be <= MAX_READ_AHEAD in genfs_vnops.c
*/
#define UVM_LOAN_GET_CHUNK 16
/*
* uvm_loanpage: loan pages from a uobj out (O->K)
*
* => uobj shouldn't be locked.
*/
int
uvm_loanuobjpages(uobj, pgoff, orignpages, origpgpp)
struct uvm_object *uobj;
voff_t pgoff;
int orignpages;
struct vm_page **origpgpp;
{
int ndone;
struct vm_page **pgpp;
int error;
int i;
struct simplelock *slock;
pgpp = origpgpp;
for (ndone = 0; ndone < orignpages; ) {
int npages;
int npendloan = 0xdead; /* XXX gcc */
reget:
npages = MIN(UVM_LOAN_GET_CHUNK, orignpages - ndone);
simple_lock(&uobj->vmobjlock);
error = (*uobj->pgops->pgo_get)(uobj,
pgoff + (ndone << PAGE_SHIFT), pgpp, &npages, 0,
VM_PROT_READ, 0, PGO_SYNCIO);
if (error == EAGAIN) {
tsleep(&lbolt, PVM, "nfsread", 0);
continue;
}
if (error) {
uvm_unloan(origpgpp, ndone, UVM_LOAN_TOPAGE);
return error;
}
KASSERT(npages > 0);
/* loan and unbusy pages */
slock = NULL;
for (i = 0; i < npages; i++) {
struct simplelock *nextslock; /* slock for next page */
struct vm_page *pg = *pgpp;
/* XXX assuming that the page is owned by uobj */
KASSERT(pg->uobject != NULL);
nextslock = &pg->uobject->vmobjlock;
if (slock != nextslock) {
if (slock) {
KASSERT(npendloan > 0);
uvm_loanpage(pgpp - npendloan,
npendloan);
simple_unlock(slock);
}
slock = nextslock;
simple_lock(slock);
npendloan = 0;
}
if (pg->flags & PG_RELEASED) {
/*
* release pages and try again.
*/
simple_unlock(slock);
for (; i < npages; i++) {
pg = pgpp[i];
slock = &pg->uobject->vmobjlock;
simple_lock(slock);
uvm_lock_pageq();
uvm_page_unbusy(&pg, 1);
uvm_unlock_pageq();
simple_unlock(slock);
}
goto reget;
}
npendloan++;
pgpp++;
ndone++;
KASSERT(pgpp - origpgpp == ndone);
}
KASSERT(slock != NULL);
KASSERT(npendloan > 0);
uvm_loanpage(pgpp - npendloan, npendloan);
simple_unlock(slock);
}
return 0;
}
/*
* uvm_loanuobj: loan a page from a uobj out
*
@ -581,7 +682,7 @@ uvm_loanuobj(ufi, output, flags, va)
*/
if ((flags & UVM_LOAN_TOANON) == 0) {
uvm_loanuobjpages(&pg, 1);
uvm_loanpage(&pg, 1);
**output = pg;
(*output)++;
return (1);

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_loan.h,v 1.11 2003/05/03 17:54:32 yamt Exp $ */
/* $NetBSD: uvm_loan.h,v 1.12 2004/01/07 12:17:10 yamt Exp $ */
/*
*
@ -53,7 +53,8 @@
void uvm_loan_init __P((void));
int uvm_loan __P((struct vm_map *, vaddr_t, vsize_t, void *, int));
void uvm_unloan __P((void *, int, int));
void uvm_loanuobjpages __P((struct vm_page **, int));
int uvm_loanuobjpages __P((struct uvm_object *, voff_t, int,
struct vm_page **));
struct vm_page *uvm_loanbreak __P((struct vm_page *));
#endif /* _KERNEL */