Fix the following pathological scanario:

* User allocates ZFOD region, but does not actually touch the buffer
  to fault in the pages.
* In a loop, user writes this buffer to a network socket, triggering
  sosend_loan().
* uvm_loan() calls uvm_loanzero() once for each page in the loaned
  region (since the pages have not yet faulted in).  This causes a
  page to be allocated and zero'd.  The result is the kernel spends
  a lot of time allocating and zero'ing pages.

This fixes creates a special object which owns a single zero'd page.
This single zero'd page is used to satisfy all loans of non-resident
ZFOD mappings.

Thanks to Allen Briggs for discovering the problem and for providing
an initial patch.
This commit is contained in:
thorpej 2003-03-04 06:18:54 +00:00
parent 57fdc950ca
commit d3f54e81dd
3 changed files with 79 additions and 47 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_init.c,v 1.15 2001/11/10 07:37:00 lukem Exp $ */
/* $NetBSD: uvm_init.c,v 1.16 2003/03/04 06:18:54 thorpej Exp $ */
/*
*
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.15 2001/11/10 07:37:00 lukem Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.16 2003/03/04 06:18:54 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -142,6 +142,12 @@ uvm_init()
amap_init(); /* init amap module */
uvm_anon_init(); /* allocate initial anons */
/*
* step 9: init the uvm_loan() facility.
*/
uvm_loan_init();
/*
* the VM system is now up! now that malloc is up we can resize the
* <obj,off> => <page> hash table for general use and enable paging

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_loan.c,v 1.39 2002/07/14 23:53:41 chs Exp $ */
/* $NetBSD: uvm_loan.c,v 1.40 2003/03/04 06:18:54 thorpej Exp $ */
/*
*
@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.39 2002/07/14 23:53:41 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.40 2003/03/04 06:18:54 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -619,7 +619,7 @@ uvm_loanuobj(ufi, output, flags, va)
}
/*
* uvm_loanzero: "loan" a zero-fill page out
* uvm_loanzero: loan a zero-fill page out
*
* => called with map, amap, uobj locked
* => return value:
@ -629,6 +629,8 @@ uvm_loanuobj(ufi, output, flags, va)
* 1 = got it, everything still locked
*/
static struct uvm_object uvm_loanzero_object;
static int
uvm_loanzero(ufi, output, flags)
struct uvm_faultinfo *ufi;
@ -640,11 +642,19 @@ uvm_loanzero(ufi, output, flags)
struct uvm_object *uobj = ufi->entry->object.uvm_obj;
struct vm_amap *amap = ufi->entry->aref.ar_amap;
if ((flags & UVM_LOAN_TOANON) == 0) { /* loaning to kernel-page */
while ((pg = uvm_pagealloc(NULL, 0, NULL,
UVM_PGA_ZERO)) == NULL) {
simple_lock(&uvm_loanzero_object.vmobjlock);
/*
* first, get ahold of our single zero page.
*/
if (__predict_false((pg =
TAILQ_FIRST(&uvm_loanzero_object.memq)) == NULL)) {
while ((pg = uvm_pagealloc(&uvm_loanzero_object, 0, NULL,
UVM_PGA_ZERO)) == NULL) {
simple_unlock(&uvm_loanzero_object.vmobjlock);
uvmfault_unlockall(ufi, amap, uobj, NULL);
uvm_wait("loanzero1");
uvm_wait("loanzero");
if (!uvmfault_relock(ufi)) {
return (0);
}
@ -654,57 +664,60 @@ uvm_loanzero(ufi, output, flags)
if (uobj) {
simple_lock(&uobj->vmobjlock);
}
simple_lock(&uvm_loanzero_object.vmobjlock);
}
/* got a zero'd page; return */
pg->flags &= ~(PG_WANTED|PG_BUSY);
/* got a zero'd page. */
pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
pg->flags |= PG_RDONLY;
UVM_PAGE_OWN(pg, NULL);
}
if ((flags & UVM_LOAN_TOANON) == 0) { /* loaning to kernel-page */
uvm_lock_pageq();
pg->loan_count++;
uvm_unlock_pageq();
simple_unlock(&uvm_loanzero_object.vmobjlock);
**output = pg;
(*output)++;
pg->loan_count = 1;
return (1);
}
/* loaning to an anon */
while ((anon = uvm_analloc()) == NULL ||
(pg = uvm_pagealloc(NULL, 0, anon, UVM_PGA_ZERO)) == NULL) {
uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, anon);
/*
* loaning to an anon. check to see if there is already an anon
* associated with this page. if so, then just return a reference
* to this object.
*/
/* out of swap causes us to fail */
if (anon == NULL) {
return (-1);
}
/*
* drop our reference; we're the only one,
* so it's okay that the anon isn't locked
* here.
*/
anon->an_ref--;
uvm_anfree(anon);
uvm_wait("loanzero2"); /* wait for pagedaemon */
if (!uvmfault_relock(ufi)) {
/* map changed while unlocked, need relookup */
return (0);
}
/* relock everything else */
if (amap) {
amap_lock(amap);
}
if (uobj) {
simple_lock(&uobj->vmobjlock);
}
if (pg->uanon) {
anon = pg->uanon;
simple_lock(&anon->an_lock);
anon->an_ref++;
simple_unlock(&anon->an_lock);
simple_unlock(&uvm_loanzero_object.vmobjlock);
**output = anon;
(*output)++;
return (1);
}
/* got a zero'd page; return */
pg->flags &= ~(PG_BUSY|PG_FAKE);
UVM_PAGE_OWN(pg, NULL);
/*
* need to allocate a new anon
*/
anon = uvm_analloc();
if (anon == NULL) {
/* out of swap causes us to fail */
simple_unlock(&uvm_loanzero_object.vmobjlock);
uvmfault_unlockall(ufi, amap, uobj, NULL);
return (-1);
}
anon->u.an_page = pg;
pg->uanon = anon;
uvm_lock_pageq();
pg->loan_count++;
uvm_pageactivate(pg);
uvm_unlock_pageq();
simple_unlock(&uvm_loanzero_object.vmobjlock);
**output = anon;
(*output)++;
return (1);
@ -827,3 +840,15 @@ uvm_unloan(void *v, int npages, int flags)
uvm_unloanpage(v, npages);
}
}
/*
* uvm_loan_init(): initialize the uvm_loan() facility.
*/
void
uvm_loan_init(void)
{
simple_lock_init(&uvm_loanzero_object.vmobjlock);
TAILQ_INIT(&uvm_loanzero_object.memq);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_loan.h,v 1.9 2001/11/06 08:07:51 chs Exp $ */
/* $NetBSD: uvm_loan.h,v 1.10 2003/03/04 06:18:54 thorpej Exp $ */
/*
*
@ -50,6 +50,7 @@
* loan prototypes
*/
void uvm_loan_init __P((void));
int uvm_loan __P((struct vm_map *, vaddr_t, vsize_t, void *, int));
void uvm_unloan __P((void *, int, int));