Now that pool_cache_invalidate() is synchronous and can handle per-CPU
caches, merge together pool_drain_start() and pool_drain_end() into bool pool_drain(struct pool **ppp); "bool" value indicates whether reclaiming was fully done (true) or not (false) "ppp" will contain a pointer to the pool that was drained (optional). See http://mail-index.netbsd.org/tech-kern/2012/06/04/msg013287.html
This commit is contained in:
parent
288782db86
commit
57d7988f76
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: misc.c,v 1.3 2011/03/10 19:35:24 pooka Exp $ */
|
||||
/* $NetBSD: misc.c,v 1.4 2012/06/05 22:51:47 jym Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2009 The NetBSD Foundation, Inc.
|
||||
|
@ -133,15 +133,8 @@ void
|
|||
kmem_reap(void)
|
||||
{
|
||||
int bufcnt;
|
||||
uint64_t where;
|
||||
struct pool *pp;
|
||||
|
||||
/*
|
||||
* start draining pool resources now that we're not
|
||||
* holding any locks.
|
||||
*/
|
||||
pool_drain_start(&pp, &where);
|
||||
|
||||
bufcnt = uvmexp.freetarg - uvmexp.free;
|
||||
if (bufcnt < 0)
|
||||
bufcnt = 0;
|
||||
|
@ -153,9 +146,9 @@ kmem_reap(void)
|
|||
mutex_exit(&bufcache_lock);
|
||||
|
||||
/*
|
||||
* complete draining the pools.
|
||||
* drain the pools.
|
||||
*/
|
||||
pool_drain_end(pp, where);
|
||||
pool_drain(&pp);
|
||||
// printf("XXXNETBSD kmem_reap called, write me\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: subr_pool.c,v 1.196 2012/06/05 22:28:11 jym Exp $ */
|
||||
/* $NetBSD: subr_pool.c,v 1.197 2012/06/05 22:51:47 jym Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010
|
||||
|
@ -32,7 +32,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.196 2012/06/05 22:28:11 jym Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.197 2012/06/05 22:51:47 jym Exp $");
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#include "opt_lockdebug.h"
|
||||
|
@ -1300,7 +1300,7 @@ pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
|
|||
/*
|
||||
* Release all complete pages that have not been used recently.
|
||||
*
|
||||
* Might be called from interrupt context.
|
||||
* Must not be called from interrupt context.
|
||||
*/
|
||||
int
|
||||
pool_reclaim(struct pool *pp)
|
||||
|
@ -1311,9 +1311,7 @@ pool_reclaim(struct pool *pp)
|
|||
bool klock;
|
||||
int rv;
|
||||
|
||||
if (cpu_intr_p() || cpu_softintr_p()) {
|
||||
KASSERT(pp->pr_ipl != IPL_NONE);
|
||||
}
|
||||
KASSERT(!cpu_intr_p() && !cpu_softintr_p());
|
||||
|
||||
if (pp->pr_drain_hook != NULL) {
|
||||
/*
|
||||
|
@ -1387,17 +1385,14 @@ pool_reclaim(struct pool *pp)
|
|||
}
|
||||
|
||||
/*
|
||||
* Drain pools, one at a time. This is a two stage process;
|
||||
* drain_start kicks off a cross call to drain CPU-level caches
|
||||
* if the pool has an associated pool_cache. drain_end waits
|
||||
* for those cross calls to finish, and then drains the cache
|
||||
* (if any) and pool.
|
||||
* Drain pools, one at a time. The drained pool is returned within ppp.
|
||||
*
|
||||
* Note, must never be called from interrupt context.
|
||||
*/
|
||||
void
|
||||
pool_drain_start(struct pool **ppp, uint64_t *wp)
|
||||
bool
|
||||
pool_drain(struct pool **ppp)
|
||||
{
|
||||
bool reclaimed;
|
||||
struct pool *pp;
|
||||
|
||||
KASSERT(!TAILQ_EMPTY(&pool_head));
|
||||
|
@ -1422,28 +1417,6 @@ pool_drain_start(struct pool **ppp, uint64_t *wp)
|
|||
pp->pr_refcnt++;
|
||||
mutex_exit(&pool_head_lock);
|
||||
|
||||
/* If there is a pool_cache, drain CPU level caches. */
|
||||
*ppp = pp;
|
||||
if (pp->pr_cache != NULL) {
|
||||
*wp = xc_broadcast(0, (xcfunc_t)pool_cache_transfer,
|
||||
pp->pr_cache, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
pool_drain_end(struct pool *pp, uint64_t where)
|
||||
{
|
||||
bool reclaimed;
|
||||
|
||||
if (pp == NULL)
|
||||
return false;
|
||||
|
||||
KASSERT(pp->pr_refcnt > 0);
|
||||
|
||||
/* Wait for remote draining to complete. */
|
||||
if (pp->pr_cache != NULL)
|
||||
xc_wait(where);
|
||||
|
||||
/* Drain the cache (if any) and pool.. */
|
||||
reclaimed = pool_reclaim(pp);
|
||||
|
||||
|
@ -1453,6 +1426,9 @@ pool_drain_end(struct pool *pp, uint64_t where)
|
|||
cv_broadcast(&pool_busy);
|
||||
mutex_exit(&pool_head_lock);
|
||||
|
||||
if (ppp != NULL)
|
||||
*ppp = pp;
|
||||
|
||||
return reclaimed;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: memalloc.c,v 1.15 2012/04/29 20:27:32 dsl Exp $ */
|
||||
/* $NetBSD: memalloc.c,v 1.16 2012/06/05 22:51:47 jym Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2009 Antti Kantee. All Rights Reserved.
|
||||
|
@ -26,7 +26,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: memalloc.c,v 1.15 2012/04/29 20:27:32 dsl Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: memalloc.c,v 1.16 2012/06/05 22:51:47 jym Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/kmem.h>
|
||||
|
@ -285,15 +285,8 @@ pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
|
|||
pc->pc_pool.pr_drain_hook_arg = arg;
|
||||
}
|
||||
|
||||
void
|
||||
pool_drain_start(struct pool **ppp, uint64_t *wp)
|
||||
{
|
||||
|
||||
/* nada */
|
||||
}
|
||||
|
||||
bool
|
||||
pool_drain_end(struct pool *pp, uint64_t w)
|
||||
pool_drain(struct pool **ppp)
|
||||
{
|
||||
|
||||
/* can't reclaim anything in this model */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: vm.c,v 1.126 2012/05/23 14:59:21 martin Exp $ */
|
||||
/* $NetBSD: vm.c,v 1.127 2012/06/05 22:51:47 jym Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
|
||||
|
@ -41,7 +41,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.126 2012/05/23 14:59:21 martin Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.127 2012/06/05 22:51:47 jym Exp $");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/atomic.h>
|
||||
|
@ -989,7 +989,6 @@ uvm_pageout(void *arg)
|
|||
{
|
||||
struct vm_page *pg;
|
||||
struct pool *pp, *pp_first;
|
||||
uint64_t where;
|
||||
int cleaned, skip, skipped;
|
||||
int waspaging;
|
||||
bool succ;
|
||||
|
@ -1094,19 +1093,15 @@ uvm_pageout(void *arg)
|
|||
/*
|
||||
* And then drain the pools. Wipe them out ... all of them.
|
||||
*/
|
||||
|
||||
pool_drain_start(&pp_first, &where);
|
||||
pp = pp_first;
|
||||
for (;;) {
|
||||
for (pp_first = NULL;;) {
|
||||
rump_vfs_drainbufs(10 /* XXX: estimate better */);
|
||||
succ = pool_drain_end(pp, where);
|
||||
if (succ)
|
||||
|
||||
succ = pool_drain(&pp);
|
||||
if (succ || pp == pp_first)
|
||||
break;
|
||||
pool_drain_start(&pp, &where);
|
||||
if (pp == pp_first) {
|
||||
succ = pool_drain_end(pp, where);
|
||||
break;
|
||||
}
|
||||
|
||||
if (pp_first == NULL)
|
||||
pp_first = pp;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pool.h,v 1.74 2012/05/05 19:15:10 rmind Exp $ */
|
||||
/* $NetBSD: pool.h,v 1.75 2012/06/05 22:51:47 jym Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1997, 1998, 1999, 2000, 2007 The NetBSD Foundation, Inc.
|
||||
|
@ -263,8 +263,7 @@ int pool_prime(struct pool *, int);
|
|||
void pool_setlowat(struct pool *, int);
|
||||
void pool_sethiwat(struct pool *, int);
|
||||
void pool_sethardlimit(struct pool *, int, const char *, int);
|
||||
void pool_drain_start(struct pool **, uint64_t *);
|
||||
bool pool_drain_end(struct pool *, uint64_t);
|
||||
bool pool_drain(struct pool **);
|
||||
|
||||
/*
|
||||
* Debugging and diagnostic aides.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: uvm_pdaemon.c,v 1.105 2012/02/01 23:43:49 para Exp $ */
|
||||
/* $NetBSD: uvm_pdaemon.c,v 1.106 2012/06/05 22:51:47 jym Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1997 Charles D. Cranor and Washington University.
|
||||
|
@ -66,7 +66,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.105 2012/02/01 23:43:49 para Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.106 2012/06/05 22:51:47 jym Exp $");
|
||||
|
||||
#include "opt_uvmhist.h"
|
||||
#include "opt_readahead.h"
|
||||
|
@ -228,7 +228,6 @@ uvm_pageout(void *arg)
|
|||
int bufcnt, npages = 0;
|
||||
int extrapages = 0;
|
||||
struct pool *pp;
|
||||
uint64_t where;
|
||||
|
||||
UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
|
||||
|
||||
|
@ -327,12 +326,6 @@ uvm_pageout(void *arg)
|
|||
if (!needsfree && !kmem_va_starved)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* start draining pool resources now that we're not
|
||||
* holding any locks.
|
||||
*/
|
||||
pool_drain_start(&pp, &where);
|
||||
|
||||
/*
|
||||
* kill unused metadata buffers.
|
||||
*/
|
||||
|
@ -341,9 +334,9 @@ uvm_pageout(void *arg)
|
|||
mutex_exit(&bufcache_lock);
|
||||
|
||||
/*
|
||||
* complete draining the pools.
|
||||
* drain the pools.
|
||||
*/
|
||||
pool_drain_end(pp, where);
|
||||
pool_drain(&pp);
|
||||
}
|
||||
/*NOTREACHED*/
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue