Detangle the pagedaemon from uvm_fpageqlock:

- Have a single lock (uvmpd_lock) to protect pagedaemon state that was
  previously covered by uvmpd_pool_drain_lock plus uvm_fpageqlock.
- Don't require any locks be held when calling uvm_kick_pdaemon().
- Use uvm_free().
This commit is contained in:
ad 2019-12-21 14:50:34 +00:00
parent a7b92da9a8
commit 479b92623d
4 changed files with 48 additions and 56 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: subr_vmem.c,v 1.99 2019/10/16 18:29:49 christos Exp $ */
/* $NetBSD: subr_vmem.c,v 1.100 2019/12/21 14:50:34 ad Exp $ */
/*-
* Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
@ -46,7 +46,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.99 2019/10/16 18:29:49 christos Exp $");
__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.100 2019/12/21 14:50:34 ad Exp $");
#if defined(_KERNEL) && defined(_KERNEL_OPT)
#include "opt_ddb.h"
@ -198,9 +198,7 @@ static void
vmem_kick_pdaemon(void)
{
#if defined(_KERNEL)
mutex_spin_enter(&uvm_fpageqlock);
uvm_kick_pdaemon();
mutex_spin_exit(&uvm_fpageqlock);
#endif
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page.c,v 1.209 2019/12/21 14:41:44 ad Exp $ */
/* $NetBSD: uvm_page.c,v 1.210 2019/12/21 14:50:34 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.209 2019/12/21 14:41:44 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.210 2019/12/21 14:50:34 ad Exp $");
#include "opt_ddb.h"
#include "opt_uvm.h"
@ -878,8 +878,6 @@ uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
KASSERT(anon == NULL || anon->an_lock == NULL ||
mutex_owned(anon->an_lock));
mutex_spin_enter(&uvm_fpageqlock);
/*
* This implements a global round-robin page coloring
* algorithm.
@ -909,6 +907,7 @@ uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
* we make kernel reserve pages available if called by a
* kernel thread or a realtime thread.
*/
mutex_spin_enter(&uvm_fpageqlock);
l = curlwp;
if (__predict_true(l != NULL) && lwp_eprio(l) >= PRI_KTHREAD) {
flags |= UVM_PGA_USERESERVE;

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pdaemon.c,v 1.116 2019/12/21 11:41:18 ad Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.117 2019/12/21 14:50:34 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.116 2019/12/21 11:41:18 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.117 2019/12/21 14:50:34 ad Exp $");
#include "opt_uvmhist.h"
#include "opt_readahead.h"
@ -112,7 +112,7 @@ static void uvmpd_pool_drain_wakeup(void);
static unsigned int uvm_pagedaemon_waiters;
/* State for the pool drainer thread */
static kmutex_t uvmpd_pool_drain_lock __cacheline_aligned;
static kmutex_t uvmpd_lock __cacheline_aligned;
static kcondvar_t uvmpd_pool_drain_cv;
static bool uvmpd_pool_drain_run = false;
@ -136,7 +136,7 @@ uvm_wait(const char *wmsg)
if (uvm.pagedaemon_lwp == NULL)
panic("out of memory before the pagedaemon thread exists");
mutex_spin_enter(&uvm_fpageqlock);
mutex_spin_enter(&uvmpd_lock);
/*
* check for page daemon going to sleep (waiting for itself)
@ -170,27 +170,26 @@ uvm_wait(const char *wmsg)
uvm_pagedaemon_waiters++;
wakeup(&uvm.pagedaemon); /* wake the daemon! */
UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm_fpageqlock, false, wmsg, timo);
UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvmpd_lock, false, wmsg, timo);
}
/*
* uvm_kick_pdaemon: perform checks to determine if we need to
* give the pagedaemon a nudge, and do so if necessary.
*
* => called with uvm_fpageqlock held.
*/
void
uvm_kick_pdaemon(void)
{
int fpages = uvm_free();
KASSERT(mutex_owned(&uvm_fpageqlock));
if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
(uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
if (fpages + uvmexp.paging < uvmexp.freemin ||
(fpages + uvmexp.paging < uvmexp.freetarg &&
uvmpdpol_needsscan_p()) ||
uvm_km_va_starved_p()) {
mutex_spin_enter(&uvmpd_lock);
wakeup(&uvm.pagedaemon);
mutex_spin_exit(&uvmpd_lock);
}
}
@ -241,12 +240,14 @@ uvm_pageout(void *arg)
{
int npages = 0;
int extrapages = 0;
int fpages;
UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
mutex_init(&uvmpd_pool_drain_lock, MUTEX_DEFAULT, IPL_VM);
mutex_init(&uvmpd_lock, MUTEX_DEFAULT, IPL_VM);
mutex_init(&uvmpd_lock, MUTEX_DEFAULT, IPL_VM);
cv_init(&uvmpd_pool_drain_cv, "pooldrain");
/* Create the pool drainer kernel thread. */
@ -271,16 +272,16 @@ uvm_pageout(void *arg)
kmem_va_starved = uvm_km_va_starved_p();
mutex_spin_enter(&uvm_fpageqlock);
mutex_spin_enter(&uvmpd_lock);
if ((uvm_pagedaemon_waiters == 0 || uvmexp.paging > 0) &&
!kmem_va_starved) {
UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0);
UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
&uvm_fpageqlock, false, "pgdaemon", 0);
&uvmpd_lock, false, "pgdaemon", 0);
uvmexp.pdwoke++;
UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0);
} else {
mutex_spin_exit(&uvm_fpageqlock);
mutex_spin_exit(&uvmpd_lock);
}
/*
@ -290,9 +291,7 @@ uvm_pageout(void *arg)
if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
npages = uvmexp.npages;
extrapages = uvm_extrapages;
mutex_spin_enter(&uvm_fpageqlock);
uvmpd_tune();
mutex_spin_exit(&uvm_fpageqlock);
}
uvmpdpol_tune();
@ -301,33 +300,30 @@ uvm_pageout(void *arg)
* Estimate a hint. Note that bufmem are returned to
* system only when entire pool page is empty.
*/
mutex_spin_enter(&uvm_fpageqlock);
fpages = uvm_free();
UVMHIST_LOG(pdhist," free/ftarg=%jd/%jd",
uvmexp.free, uvmexp.freetarg, 0,0);
fpages, uvmexp.freetarg, 0,0);
needsfree = uvmexp.free + uvmexp.paging < uvmexp.freetarg;
needsfree = fpages + uvmexp.paging < uvmexp.freetarg;
needsscan = needsfree || uvmpdpol_needsscan_p();
/*
* scan if needed
*/
if (needsscan) {
mutex_spin_exit(&uvm_fpageqlock);
uvmpd_scan();
mutex_spin_enter(&uvm_fpageqlock);
}
/*
* if there's any free memory to be had,
* wake up any waiters.
*/
if (uvmexp.free > uvmexp.reserve_kernel ||
uvmexp.paging == 0) {
if (uvm_free() > uvmexp.reserve_kernel || uvmexp.paging == 0) {
mutex_spin_enter(&uvmpd_lock);
wakeup(&uvmexp.free);
uvm_pagedaemon_waiters = 0;
mutex_spin_exit(&uvmpd_lock);
}
mutex_spin_exit(&uvm_fpageqlock);
/*
* scan done. if we don't need free memory, we're done.
@ -382,14 +378,14 @@ uvm_pageout_done(int npages)
* wake up either of pagedaemon or LWPs waiting for it.
*/
mutex_spin_enter(&uvm_fpageqlock);
if (uvmexp.free <= uvmexp.reserve_kernel) {
mutex_spin_enter(&uvmpd_lock);
if (uvm_free() <= uvmexp.reserve_kernel) {
wakeup(&uvm.pagedaemon);
} else {
} else if (uvm_pagedaemon_waiters != 0) {
wakeup(&uvmexp.free);
uvm_pagedaemon_waiters = 0;
}
mutex_spin_exit(&uvm_fpageqlock);
mutex_spin_exit(&uvmpd_lock);
}
/*
@ -714,7 +710,7 @@ uvmpd_scan_queue(void)
* see if we've met the free target.
*/
if (uvmexp.free + uvmexp.paging
if (uvm_free() + uvmexp.paging
#if defined(VMSWAP)
+ swapcluster_nused(&swc)
#endif /* defined(VMSWAP) */
@ -843,7 +839,7 @@ uvmpd_scan_queue(void)
* free target when all the current pageouts complete.
*/
if (uvmexp.free + uvmexp.paging > uvmexp.freetarg << 2) {
if (uvm_free() + uvmexp.paging > uvmexp.freetarg << 2) {
mutex_exit(slock);
continue;
}
@ -926,7 +922,7 @@ uvmpd_scan_queue(void)
static void
uvmpd_scan(void)
{
int swap_shortage, pages_freed;
int swap_shortage, pages_freed, fpages;
UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
uvmexp.pdrevs++;
@ -950,11 +946,12 @@ uvmpd_scan(void)
*/
swap_shortage = 0;
if (uvmexp.free < uvmexp.freetarg &&
fpages = uvm_free();
if (fpages < uvmexp.freetarg &&
uvmexp.swpginuse >= uvmexp.swpgavail &&
!uvm_swapisfull() &&
pages_freed == 0) {
swap_shortage = uvmexp.freetarg - uvmexp.free;
swap_shortage = uvmexp.freetarg - fpages;
}
uvmpdpol_balancequeue(swap_shortage);
@ -964,7 +961,7 @@ uvmpd_scan(void)
* modules.
*/
if (uvmexp.free < uvmexp.freemin) {
if (uvm_free() < uvmexp.freemin) {
module_thread_kick();
}
}
@ -1038,19 +1035,17 @@ uvmpd_pool_drain_thread(void *arg)
int bufcnt;
for (;;) {
mutex_enter(&uvmpd_pool_drain_lock);
mutex_enter(&uvmpd_lock);
if (!uvmpd_pool_drain_run) {
cv_wait(&uvmpd_pool_drain_cv, &uvmpd_pool_drain_lock);
cv_wait(&uvmpd_pool_drain_cv, &uvmpd_lock);
}
uvmpd_pool_drain_run = false;
mutex_exit(&uvmpd_pool_drain_lock);
mutex_exit(&uvmpd_lock);
/*
* kill unused metadata buffers.
*/
mutex_spin_enter(&uvm_fpageqlock);
bufcnt = uvmexp.freetarg - uvmexp.free;
mutex_spin_exit(&uvm_fpageqlock);
bufcnt = uvmexp.freetarg - uvm_free();
if (bufcnt < 0)
bufcnt = 0;
@ -1070,8 +1065,8 @@ static void
uvmpd_pool_drain_wakeup(void)
{
mutex_enter(&uvmpd_pool_drain_lock);
mutex_enter(&uvmpd_lock);
uvmpd_pool_drain_run = true;
cv_signal(&uvmpd_pool_drain_cv);
mutex_exit(&uvmpd_pool_drain_lock);
mutex_exit(&uvmpd_lock);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pglist.c,v 1.76 2019/12/21 14:41:44 ad Exp $ */
/* $NetBSD: uvm_pglist.c,v 1.77 2019/12/21 14:50:34 ad Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.76 2019/12/21 14:41:44 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.77 2019/12/21 14:50:34 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -352,8 +352,8 @@ out:
* the pagedaemon.
*/
uvm_kick_pdaemon();
mutex_spin_exit(&uvm_fpageqlock);
uvm_kick_pdaemon();
return (error);
}
@ -493,8 +493,8 @@ out:
* the pagedaemon.
*/
uvm_kick_pdaemon();
mutex_spin_exit(&uvm_fpageqlock);
uvm_kick_pdaemon();
if (error) {
if (waitok) {