Add pool_cache_invalidate_local() to the pool_cache(9) API, to permit
per-CPU objects invalidation when cached in the pool cache. See http://mail-index.netbsd.org/tech-kern/2009/10/05/msg006206.html . Reviewed by bouyer@. Thanks!
This commit is contained in:
parent
4515588628
commit
31629a1342
|
@ -1,4 +1,4 @@
|
|||
# $NetBSD: Makefile,v 1.294 2009/10/05 23:44:10 rmind Exp $
|
||||
# $NetBSD: Makefile,v 1.295 2009/10/08 21:54:45 jym Exp $
|
||||
|
||||
# Makefile for section 9 (kernel function and variable) manual pages.
|
||||
|
||||
|
@ -543,7 +543,8 @@ MLINKS+=pool_cache.9 pool_cache_init.9 \
|
|||
pool_cache.9 pool_cache_put_paddr.9 \
|
||||
pool_cache.9 pool_cache_put.9 \
|
||||
pool_cache.9 pool_cache_destruct_object.9 \
|
||||
pool_cache.9 pool_cache_invalidate.9
|
||||
pool_cache.9 pool_cache_invalidate.9 \
|
||||
pool_cache.9 pool_cache_invalidate_local.9
|
||||
MLINKS+=powerhook_establish.9 powerhook_disestablish.9
|
||||
MLINKS+=preempt.9 yield.9
|
||||
MLINKS+=ras.9 ras_lookup.9 \
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
.\" $NetBSD: pool_cache.9,v 1.10 2009/09/06 19:46:24 jym Exp $
|
||||
.\" $NetBSD: pool_cache.9,v 1.11 2009/10/08 21:54:45 jym Exp $
|
||||
.\"
|
||||
.\" Copyright (c)2003 YAMAMOTO Takashi,
|
||||
.\" All rights reserved.
|
||||
|
@ -67,6 +67,7 @@
|
|||
.Nm pool_cache_put ,
|
||||
.Nm pool_cache_destruct_object ,
|
||||
.Nm pool_cache_invalidate ,
|
||||
.Nm pool_cache_invalidate_local ,
|
||||
.Nm pool_cache_sethiwat ,
|
||||
.Nm pool_cache_setlowat
|
||||
.Nd resource-pool cache manager
|
||||
|
@ -108,6 +109,10 @@
|
|||
.Fn pool_cache_invalidate \
|
||||
"pool_cache_t pc"
|
||||
.Ft void
|
||||
.Fn pool_cache_invalidate_local \
|
||||
"pool_cache_t pc"
|
||||
.\" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
.Ft void
|
||||
.Fn pool_cache_sethiwat \
|
||||
"pool_cache_t pc" "int nitems"
|
||||
.Ft void
|
||||
|
@ -280,6 +285,17 @@ is still possible to allocate "stale" items from the cache.
|
|||
If relevant, the user must check for this condition when allocating
|
||||
items.
|
||||
.\" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
.It Fn pool_cache_invalidate_local "pc"
|
||||
.Pp
|
||||
Invalidate local, current CPU pool cache
|
||||
.Fa pc .
|
||||
Destruct and release all objects in the local, current CPU cache.
|
||||
Only the Per-CPU caches associated to the current CPU calling the routine
|
||||
will be invalidated, meaning that stale items can still be allocated from
|
||||
other CPUs or the global cache.
|
||||
It is the caller's responsibility to ensure that such conditions do
|
||||
not occur.
|
||||
.\" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
.It Fn pool_cache_sethiwat "pc" "nitems"
|
||||
.Pp
|
||||
A pool will attempt to increase its resource usage to keep up with the demand
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: subr_pool.c,v 1.174 2009/09/13 18:45:11 pooka Exp $ */
|
||||
/* $NetBSD: subr_pool.c,v 1.175 2009/10/08 21:54:45 jym Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008 The NetBSD Foundation, Inc.
|
||||
|
@ -31,7 +31,7 @@
|
|||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.174 2009/09/13 18:45:11 pooka Exp $");
|
||||
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.175 2009/10/08 21:54:45 jym Exp $");
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#include "opt_pool.h"
|
||||
|
@ -188,6 +188,7 @@ static bool pool_cache_get_slow(pool_cache_cpu_t *, int,
|
|||
void **, paddr_t *, int);
|
||||
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t);
|
||||
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *);
|
||||
static void pool_cache_invalidate_cpu(pool_cache_t, u_int);
|
||||
static void pool_cache_xcall(pool_cache_t);
|
||||
|
||||
static int pool_catchup(struct pool *);
|
||||
|
@ -2122,9 +2123,7 @@ void
|
|||
pool_cache_destroy(pool_cache_t pc)
|
||||
{
|
||||
struct pool *pp = &pc->pc_pool;
|
||||
pool_cache_cpu_t *cc;
|
||||
pcg_t *pcg;
|
||||
int i;
|
||||
u_int i;
|
||||
|
||||
/* Remove it from the global list. */
|
||||
mutex_enter(&pool_head_lock);
|
||||
|
@ -2142,20 +2141,8 @@ pool_cache_destroy(pool_cache_t pc)
|
|||
mutex_exit(&pp->pr_lock);
|
||||
|
||||
/* Destroy per-CPU data */
|
||||
for (i = 0; i < MAXCPUS; i++) {
|
||||
if ((cc = pc->pc_cpus[i]) == NULL)
|
||||
continue;
|
||||
if ((pcg = cc->cc_current) != &pcg_dummy) {
|
||||
pcg->pcg_next = NULL;
|
||||
pool_cache_invalidate_groups(pc, pcg);
|
||||
}
|
||||
if ((pcg = cc->cc_previous) != &pcg_dummy) {
|
||||
pcg->pcg_next = NULL;
|
||||
pool_cache_invalidate_groups(pc, pcg);
|
||||
}
|
||||
if (cc != &pc->pc_cpu0)
|
||||
pool_put(&cache_cpu_pool, cc);
|
||||
}
|
||||
for (i = 0; i < MAXCPUS; i++)
|
||||
pool_cache_invalidate_cpu(pc, i);
|
||||
|
||||
/* Finally, destroy it. */
|
||||
mutex_destroy(&pc->pc_lock);
|
||||
|
@ -2325,6 +2312,54 @@ pool_cache_invalidate(pool_cache_t pc)
|
|||
pool_cache_invalidate_groups(pc, part);
|
||||
}
|
||||
|
||||
/*
|
||||
* pool_cache_invalidate_local:
|
||||
*
|
||||
* Invalidate all local ('current CPU') cached objects in
|
||||
* pool cache.
|
||||
* It is caller's responsibility to ensure that no operation is
|
||||
* taking place on this pool cache while doing the local invalidation.
|
||||
*/
|
||||
void
|
||||
pool_cache_invalidate_local(pool_cache_t pc)
|
||||
{
|
||||
pool_cache_invalidate_cpu(pc, curcpu()->ci_index);
|
||||
}
|
||||
|
||||
/*
|
||||
* pool_cache_invalidate_cpu:
|
||||
*
|
||||
* Invalidate all CPU-bound cached objects in pool cache, the CPU being
|
||||
* identified by its associated index.
|
||||
* It is caller's responsibility to ensure that no operation is
|
||||
* taking place on this pool cache while doing this invalidation.
|
||||
* WARNING: as no inter-CPU locking is enforced, trying to invalidate
|
||||
* pool cached objects from a CPU different from the one currently running
|
||||
* may result in an undefined behaviour.
|
||||
*/
|
||||
static void
|
||||
pool_cache_invalidate_cpu(pool_cache_t pc, u_int index)
|
||||
{
|
||||
|
||||
pool_cache_cpu_t *cc;
|
||||
pcg_t *pcg;
|
||||
|
||||
if ((cc = pc->pc_cpus[index]) == NULL)
|
||||
return;
|
||||
|
||||
if ((pcg = cc->cc_current) != &pcg_dummy) {
|
||||
pcg->pcg_next = NULL;
|
||||
pool_cache_invalidate_groups(pc, pcg);
|
||||
}
|
||||
if ((pcg = cc->cc_previous) != &pcg_dummy) {
|
||||
pcg->pcg_next = NULL;
|
||||
pool_cache_invalidate_groups(pc, pcg);
|
||||
}
|
||||
if (cc != &pc->pc_cpu0)
|
||||
pool_put(&cache_cpu_pool, cc);
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg)
|
||||
{
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pool.h,v 1.65 2009/09/13 18:45:12 pooka Exp $ */
|
||||
/* $NetBSD: pool.h,v 1.66 2009/10/08 21:54:45 jym Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1997, 1998, 1999, 2000, 2007 The NetBSD Foundation, Inc.
|
||||
|
@ -321,6 +321,7 @@ void *pool_cache_get_paddr(pool_cache_t, int, paddr_t *);
|
|||
void pool_cache_put_paddr(pool_cache_t, void *, paddr_t);
|
||||
void pool_cache_destruct_object(pool_cache_t, void *);
|
||||
void pool_cache_invalidate(pool_cache_t);
|
||||
void pool_cache_invalidate_local(pool_cache_t);
|
||||
bool pool_cache_reclaim(pool_cache_t);
|
||||
void pool_cache_set_drain_hook(pool_cache_t,
|
||||
void (*)(void *, int), void *);
|
||||
|
|
Loading…
Reference in New Issue