optimize pool_caches similarly to how I optimized pools before:

split the single list of pool cache groups into three lists:
completely full, partially full, and completely empty.
use LIST instead of TAILQ where appropriate.
This commit is contained in:
chs 2005-10-02 17:29:31 +00:00
parent 18b5fc9e5c
commit 9c47d944e4
3 changed files with 208 additions and 169 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: subr_pool.c,v 1.101 2005/06/18 01:34:03 thorpej Exp $ */
/* $NetBSD: subr_pool.c,v 1.102 2005/10/02 17:29:31 chs Exp $ */
/*-
* Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.101 2005/06/18 01:34:03 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.102 2005/10/02 17:29:31 chs Exp $");
#include "opt_pool.h"
#include "opt_poollog.h"
@ -70,7 +70,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.101 2005/06/18 01:34:03 thorpej Exp
*/
/* List of all pools */
TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head);
/* Private pool for page header structures */
#define PHPOOL_MAX 8
@ -112,7 +112,7 @@ struct pool_item_header {
union {
/* !PR_NOTOUCH */
struct {
TAILQ_HEAD(, pool_item)
LIST_HEAD(, pool_item)
phu_itemlist; /* chunk list for this page */
} phu_normal;
/* PR_NOTOUCH */
@ -139,7 +139,7 @@ struct pool_item {
#endif
#define PI_MAGIC 0xdeadbeefU
/* Other entries use only this list entry */
TAILQ_ENTRY(pool_item) pi_list;
LIST_ENTRY(pool_item) pi_list;
};
#define POOL_NEEDS_CATCHUP(pp) \
@ -174,7 +174,9 @@ struct pool_item {
/* The cache group pool. */
static struct pool pcgpool;
static void pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *);
static void pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *,
struct pool_cache_grouplist *);
static void pcg_grouplist_free(struct pool_cache_grouplist *);
static int pool_catchup(struct pool *);
static void pool_prime_page(struct pool *, caddr_t,
@ -537,7 +539,7 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
LIST_INIT(&pp->pr_emptypages);
LIST_INIT(&pp->pr_fullpages);
LIST_INIT(&pp->pr_partpages);
TAILQ_INIT(&pp->pr_cachelist);
LIST_INIT(&pp->pr_cachelist);
pp->pr_curpage = NULL;
pp->pr_npages = 0;
pp->pr_minitems = 0;
@ -687,7 +689,7 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
/* Insert into the list of all pools. */
simple_lock(&pool_head_slock);
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
simple_unlock(&pool_head_slock);
/* Insert this into the list of pools using this allocator. */
@ -710,7 +712,7 @@ pool_destroy(struct pool *pp)
/* Remove from global pool list */
simple_lock(&pool_head_slock);
TAILQ_REMOVE(&pool_head, pp, pr_poollist);
LIST_REMOVE(pp, pr_poollist);
if (drainpp == pp)
drainpp = NULL;
simple_unlock(&pool_head_slock);
@ -725,7 +727,7 @@ pool_destroy(struct pool *pp)
s = splvm();
simple_lock(&pp->pr_slock);
KASSERT(TAILQ_EMPTY(&pp->pr_cachelist));
KASSERT(LIST_EMPTY(&pp->pr_cachelist));
#ifdef DIAGNOSTIC
if (pp->pr_nout != 0) {
@ -808,11 +810,12 @@ pool_get(struct pool *pp, int flags)
(flags & PR_WAITOK) != 0))
panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
#endif /* DIAGNOSTIC */
#ifdef LOCKDEBUG
if (flags & PR_WAITOK)
simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
SCHED_ASSERT_UNLOCKED();
#endif
#endif /* DIAGNOSTIC */
simple_lock(&pp->pr_slock);
pr_enter(pp, file, line);
@ -959,7 +962,7 @@ pool_get(struct pool *pp, int flags)
pr_log(pp, v, PRLOG_GET, file, line);
#endif
} else {
v = pi = TAILQ_FIRST(&ph->ph_itemlist);
v = pi = LIST_FIRST(&ph->ph_itemlist);
if (__predict_false(v == NULL)) {
pr_leave(pp);
simple_unlock(&pp->pr_slock);
@ -991,7 +994,7 @@ pool_get(struct pool *pp, int flags)
/*
* Remove from item list.
*/
TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
LIST_REMOVE(pi, pi_list);
}
pp->pr_nitems--;
pp->pr_nout++;
@ -1013,7 +1016,7 @@ pool_get(struct pool *pp, int flags)
if (ph->ph_nmissing == pp->pr_itemsperpage) {
#ifdef DIAGNOSTIC
if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
!TAILQ_EMPTY(&ph->ph_itemlist))) {
!LIST_EMPTY(&ph->ph_itemlist))) {
pr_leave(pp);
simple_unlock(&pp->pr_slock);
panic("pool_get: %s: nmissing inconsistent",
@ -1060,6 +1063,7 @@ pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
int s;
LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
SCHED_ASSERT_UNLOCKED();
page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
@ -1102,7 +1106,7 @@ pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
}
#endif
TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
}
KDASSERT(ph->ph_nmissing != 0);
ph->ph_nmissing--;
@ -1192,8 +1196,7 @@ _pool_put(struct pool *pp, void *v, const char *file, long line)
pr_leave(pp);
simple_unlock(&pp->pr_slock);
if (! LIST_EMPTY(&pq))
pr_pagelist_free(pp, &pq);
pr_pagelist_free(pp, &pq);
}
#undef pool_put
#endif /* POOL_DIAGNOSTIC */
@ -1209,8 +1212,7 @@ pool_put(struct pool *pp, void *v)
pool_do_put(pp, v, &pq);
simple_unlock(&pp->pr_slock);
if (! LIST_EMPTY(&pq))
pr_pagelist_free(pp, &pq);
pr_pagelist_free(pp, &pq);
}
#ifdef POOL_DIAGNOSTIC
@ -1283,7 +1285,7 @@ pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
* Insert page header.
*/
LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
TAILQ_INIT(&ph->ph_itemlist);
LIST_INIT(&ph->ph_itemlist);
ph->ph_page = storage;
ph->ph_nmissing = 0;
s = splclock();
@ -1329,7 +1331,7 @@ pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
/* Insert on page list */
TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
#ifdef DIAGNOSTIC
pi->pi_magic = PI_MAGIC;
#endif
@ -1470,9 +1472,9 @@ pool_reclaim(struct pool *pp)
{
struct pool_item_header *ph, *phnext;
struct pool_cache *pc;
struct timeval curtime;
struct pool_pagelist pq;
struct timeval diff;
struct pool_cache_grouplist pcgl;
struct timeval curtime, diff;
int s;
if (pp->pr_drain_hook != NULL) {
@ -1487,12 +1489,13 @@ pool_reclaim(struct pool *pp)
pr_enter(pp, file, line);
LIST_INIT(&pq);
LIST_INIT(&pcgl);
/*
* Reclaim items from the pool's caches.
*/
TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
pool_cache_reclaim(pc, &pq);
LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
pool_cache_reclaim(pc, &pq, &pcgl);
s = splclock();
curtime = mono_time;
@ -1523,10 +1526,11 @@ pool_reclaim(struct pool *pp)
pr_leave(pp);
simple_unlock(&pp->pr_slock);
if (LIST_EMPTY(&pq))
return (0);
if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl))
return 0;
pr_pagelist_free(pp, &pq);
pcg_grouplist_free(&pcgl);
return (1);
}
@ -1545,11 +1549,11 @@ pool_drain(void *arg)
s = splvm();
simple_lock(&pool_head_slock);
if (drainpp == NULL) {
drainpp = TAILQ_FIRST(&pool_head);
drainpp = LIST_FIRST(&pool_head);
}
if (drainpp) {
pp = drainpp;
drainpp = TAILQ_NEXT(pp, pr_poollist);
drainpp = LIST_NEXT(pp, pr_poollist);
}
simple_unlock(&pool_head_slock);
pool_reclaim(pp);
@ -1622,7 +1626,7 @@ pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
(u_long)ph->ph_time.tv_usec);
#ifdef DIAGNOSTIC
if (!(pp->pr_roflags & PR_NOTOUCH)) {
TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) {
LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
if (pi->pi_magic != PI_MAGIC) {
(*pr)("\t\t\titem %p, magic 0x%x\n",
pi, pi->pi_magic);
@ -1697,27 +1701,35 @@ pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
if (print_cache == 0)
goto skip_cache;
TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
(*pr)("\tcache %p: allocfrom %p freeto %p\n", pc,
pc->pc_allocfrom, pc->pc_freeto);
#define PR_GROUPLIST(pcg) \
(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
for (i = 0; i < PCG_NOBJECTS; i++) { \
if (pcg->pcg_objects[i].pcgo_pa != \
POOL_PADDR_INVALID) { \
(*pr)("\t\t\t%p, 0x%llx\n", \
pcg->pcg_objects[i].pcgo_va, \
(unsigned long long) \
pcg->pcg_objects[i].pcgo_pa); \
} else { \
(*pr)("\t\t\t%p\n", \
pcg->pcg_objects[i].pcgo_va); \
} \
}
LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
(*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
(*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail);
for (i = 0; i < PCG_NOBJECTS; i++) {
if (pcg->pcg_objects[i].pcgo_pa !=
POOL_PADDR_INVALID) {
(*pr)("\t\t\t%p, 0x%llx\n",
pcg->pcg_objects[i].pcgo_va,
(unsigned long long)
pcg->pcg_objects[i].pcgo_pa);
} else {
(*pr)("\t\t\t%p\n",
pcg->pcg_objects[i].pcgo_va);
}
}
}
(*pr)("\t full groups:\n");
LIST_FOREACH(pcg, &pc->pc_fullgroups, pcg_list)
PR_GROUPLIST(pcg);
(*pr)("\t partial groups:\n");
LIST_FOREACH(pcg, &pc->pc_partgroups, pcg_list)
PR_GROUPLIST(pcg);
(*pr)("\t empty groups:\n");
LIST_FOREACH(pcg, &pc->pc_emptygroups, pcg_list)
PR_GROUPLIST(pcg);
}
#undef PR_GROUPLIST
skip_cache:
pr_enter_check(pp, pr);
@ -1745,9 +1757,9 @@ pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
if ((pp->pr_roflags & PR_NOTOUCH) != 0)
return 0;
for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0;
for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
pi != NULL;
pi = TAILQ_NEXT(pi,pi_list), n++) {
pi = LIST_NEXT(pi,pi_list), n++) {
#ifdef DIAGNOSTIC
if (pi->pi_magic != PI_MAGIC) {
@ -1824,11 +1836,11 @@ pool_cache_init(struct pool_cache *pc, struct pool *pp,
void *arg)
{
TAILQ_INIT(&pc->pc_grouplist);
LIST_INIT(&pc->pc_emptygroups);
LIST_INIT(&pc->pc_fullgroups);
LIST_INIT(&pc->pc_partgroups);
simple_lock_init(&pc->pc_slock);
pc->pc_allocfrom = NULL;
pc->pc_freeto = NULL;
pc->pc_pool = pp;
pc->pc_ctor = ctor;
@ -1843,7 +1855,7 @@ pool_cache_init(struct pool_cache *pc, struct pool *pp,
pc->pc_nitems = 0;
simple_lock(&pp->pr_slock);
TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist);
LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist);
simple_unlock(&pp->pr_slock);
}
@ -1862,7 +1874,7 @@ pool_cache_destroy(struct pool_cache *pc)
/* ...and remove it from the pool's cache list. */
simple_lock(&pp->pr_slock);
TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist);
LIST_REMOVE(pc, pc_poollist);
simple_unlock(&pp->pr_slock);
}
@ -1898,6 +1910,20 @@ pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
pcg->pcg_objects[idx].pcgo_pa = pa;
}
static void
pcg_grouplist_free(struct pool_cache_grouplist *pcgl)
{
struct pool_cache_group *pcg;
int s;
s = splvm();
while ((pcg = LIST_FIRST(pcgl)) != NULL) {
LIST_REMOVE(pcg, pcg_list);
pool_put(&pcgpool, pcg);
}
splx(s);
}
/*
* pool_cache_get{,_paddr}:
*
@ -1917,13 +1943,15 @@ pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
simple_lock(&pc->pc_slock);
if ((pcg = pc->pc_allocfrom) == NULL) {
TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
if (pcg->pcg_avail != 0) {
pc->pc_allocfrom = pcg;
goto have_group;
}
pcg = LIST_FIRST(&pc->pc_partgroups);
if (pcg == NULL) {
pcg = LIST_FIRST(&pc->pc_fullgroups);
if (pcg != NULL) {
LIST_REMOVE(pcg, pcg_list);
LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
}
}
if (pcg == NULL) {
/*
* No groups with any available objects. Allocate
@ -1950,14 +1978,14 @@ pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
return (object);
}
have_group:
pc->pc_hits++;
pc->pc_nitems--;
object = pcg_get(pcg, pap);
if (pcg->pcg_avail == 0)
pc->pc_allocfrom = NULL;
if (pcg->pcg_avail == 0) {
LIST_REMOVE(pcg, pcg_list);
LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list);
}
simple_unlock(&pc->pc_slock);
return (object);
@ -1977,13 +2005,15 @@ pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
simple_lock(&pc->pc_slock);
if ((pcg = pc->pc_freeto) == NULL) {
TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) {
if (pcg->pcg_avail != PCG_NOBJECTS) {
pc->pc_freeto = pcg;
goto have_group;
}
pcg = LIST_FIRST(&pc->pc_partgroups);
if (pcg == NULL) {
pcg = LIST_FIRST(&pc->pc_emptygroups);
if (pcg != NULL) {
LIST_REMOVE(pcg, pcg_list);
LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
}
}
if (pcg == NULL) {
/*
* No empty groups to free the object to. Attempt to
@ -1993,31 +2023,28 @@ pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
s = splvm();
pcg = pool_get(&pcgpool, PR_NOWAIT);
splx(s);
if (pcg != NULL) {
memset(pcg, 0, sizeof(*pcg));
simple_lock(&pc->pc_slock);
pc->pc_ngroups++;
TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list);
if (pc->pc_freeto == NULL)
pc->pc_freeto = pcg;
goto have_group;
}
if (pcg == NULL) {
/*
* Unable to allocate a cache group; destruct the object
* and free it back to the pool.
*/
pool_cache_destruct_object(pc, object);
return;
/*
* Unable to allocate a cache group; destruct the object
* and free it back to the pool.
*/
pool_cache_destruct_object(pc, object);
return;
}
memset(pcg, 0, sizeof(*pcg));
simple_lock(&pc->pc_slock);
pc->pc_ngroups++;
LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
}
have_group:
pc->pc_nitems++;
pcg_put(pcg, object, pa);
if (pcg->pcg_avail == PCG_NOBJECTS)
pc->pc_freeto = NULL;
if (pcg->pcg_avail == PCG_NOBJECTS) {
LIST_REMOVE(pcg, pcg_list);
LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list);
}
simple_unlock(&pc->pc_slock);
}
@ -2036,6 +2063,36 @@ pool_cache_destruct_object(struct pool_cache *pc, void *object)
pool_put(pc->pc_pool, object);
}
static void
pool_do_cache_invalidate(struct pool_cache *pc, struct pool_pagelist *pq,
struct pool_cache_grouplist *pcgl)
{
struct pool_cache_group *pcg, *npcg;
void *object;
boolean_t firstpass = TRUE;
LOCK_ASSERT(simple_lock_held(&pc->pc_slock));
LOCK_ASSERT(simple_lock_held(&pc->pc_pool->pr_slock));
for (pcg = LIST_FIRST(&pc->pc_partgroups); pcg != NULL;
pcg = npcg) {
npcg = LIST_NEXT(pcg, pcg_list);
if (npcg == NULL && firstpass) {
npcg = LIST_FIRST(&pc->pc_fullgroups);
firstpass = FALSE;
}
while (pcg->pcg_avail != 0) {
pc->pc_nitems--;
object = pcg_get(pcg, NULL);
if (pc->pc_dtor != NULL)
(*pc->pc_dtor)(pc->pc_arg, object);
pool_do_put(pc->pc_pool, object, pq);
}
LIST_REMOVE(pcg, pcg_list);
LIST_INSERT_HEAD(pcgl, pcg, pcg_list);
}
}
/*
* pool_cache_invalidate:
*
@ -2046,33 +2103,21 @@ void
pool_cache_invalidate(struct pool_cache *pc)
{
struct pool_pagelist pq;
struct pool_cache_group *pcg, *npcg;
void *object;
struct pool_cache_grouplist pcgl;
LIST_INIT(&pq);
LIST_INIT(&pcgl);
simple_lock(&pc->pc_slock);
simple_lock(&pc->pc_pool->pr_slock);
for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
pcg = npcg) {
npcg = TAILQ_NEXT(pcg, pcg_list);
while (pcg->pcg_avail != 0) {
pc->pc_nitems--;
object = pcg_get(pcg, NULL);
if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
pc->pc_allocfrom = NULL;
if (pc->pc_dtor != NULL)
(*pc->pc_dtor)(pc->pc_arg, object);
pool_do_put(pc->pc_pool, object, &pq);
}
}
pool_do_cache_invalidate(pc, &pq, &pcgl);
simple_unlock(&pc->pc_pool->pr_slock);
simple_unlock(&pc->pc_slock);
if (! LIST_EMPTY(&pq))
pr_pagelist_free(pc->pc_pool, &pq);
pr_pagelist_free(pc->pc_pool, &pq);
pcg_grouplist_free(&pcgl);
}
/*
@ -2081,11 +2126,9 @@ pool_cache_invalidate(struct pool_cache *pc)
* Reclaim a pool cache for pool_reclaim().
*/
static void
pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq)
pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq,
struct pool_cache_grouplist *pcgl)
{
struct pool_cache_group *pcg, *npcg;
void *object;
int s;
/*
* We're locking in the wrong order (normally pool_cache -> pool,
@ -2096,26 +2139,7 @@ pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq)
if (simple_lock_try(&pc->pc_slock) == 0)
return;
for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL;
pcg = npcg) {
npcg = TAILQ_NEXT(pcg, pcg_list);
while (pcg->pcg_avail != 0) {
pc->pc_nitems--;
object = pcg_get(pcg, NULL);
if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg)
pc->pc_allocfrom = NULL;
if (pc->pc_dtor != NULL)
(*pc->pc_dtor)(pc->pc_arg, object);
pool_do_put(pc->pc_pool, object, pq);
}
pc->pc_ngroups--;
TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list);
if (pc->pc_freeto == pcg)
pc->pc_freeto = NULL;
s = splvm();
pool_put(&pcgpool, pcg);
splx(s);
}
pool_do_cache_invalidate(pc, pq, pcgl);
simple_unlock(&pc->pc_slock);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pool.h,v 1.45 2005/01/01 21:04:39 yamt Exp $ */
/* $NetBSD: pool.h,v 1.46 2005/10/02 17:29:31 chs Exp $ */
/*-
* Copyright (c) 1997, 1998, 1999, 2000 The NetBSD Foundation, Inc.
@ -62,7 +62,7 @@
#ifdef __POOL_EXPOSE
/* The pool cache group. */
struct pool_cache_group {
TAILQ_ENTRY(pool_cache_group)
LIST_ENTRY(pool_cache_group)
pcg_list; /* link in the pool cache's group list */
u_int pcg_avail; /* # available objects */
/* pointers to the objects */
@ -72,15 +72,16 @@ struct pool_cache_group {
} pcg_objects[PCG_NOBJECTS];
};
LIST_HEAD(pool_cache_grouplist,pool_cache_group);
struct pool_cache {
TAILQ_ENTRY(pool_cache)
LIST_ENTRY(pool_cache)
pc_poollist; /* entry on pool's group list */
TAILQ_HEAD(, pool_cache_group)
pc_grouplist; /* Cache group list */
struct pool_cache_group
*pc_allocfrom; /* group to allocate from */
struct pool_cache_group
*pc_freeto; /* grop to free to */
struct pool_cache_grouplist
pc_emptygroups; /* list of empty cache groups */
struct pool_cache_grouplist
pc_fullgroups; /* list of full cache groups */
struct pool_cache_grouplist
pc_partgroups; /* list of partial cache groups */
struct pool *pc_pool; /* parent pool */
struct simplelock pc_slock; /* mutex */
@ -115,7 +116,7 @@ struct pool_allocator {
LIST_HEAD(pool_pagelist,pool_item_header);
struct pool {
TAILQ_ENTRY(pool)
LIST_ENTRY(pool)
pr_poollist;
struct pool_pagelist
pr_emptypages; /* Empty pages */
@ -125,7 +126,7 @@ struct pool {
pr_partpages; /* Partially-allocated pages */
struct pool_item_header *pr_curpage;
struct pool *pr_phpool; /* Pool item header pool */
TAILQ_HEAD(,pool_cache)
LIST_HEAD(,pool_cache)
pr_cachelist; /* Caches for this pool */
unsigned int pr_size; /* Size of item */
unsigned int pr_align; /* Requested alignment, must be 2^n */

View File

@ -1,4 +1,4 @@
/* $NetBSD: vmstat.c,v 1.136 2005/08/07 12:23:20 blymn Exp $ */
/* $NetBSD: vmstat.c,v 1.137 2005/10/02 17:29:31 chs Exp $ */
/*-
* Copyright (c) 1998, 2000, 2001 The NetBSD Foundation, Inc.
@ -77,7 +77,7 @@ __COPYRIGHT("@(#) Copyright (c) 1980, 1986, 1991, 1993\n\
#if 0
static char sccsid[] = "@(#)vmstat.c 8.2 (Berkeley) 3/1/95";
#else
__RCSID("$NetBSD: vmstat.c,v 1.136 2005/08/07 12:23:20 blymn Exp $");
__RCSID("$NetBSD: vmstat.c,v 1.137 2005/10/02 17:29:31 chs Exp $");
#endif
#endif /* not lint */
@ -1155,17 +1155,17 @@ dopool(int verbose, int wide)
int first, ovflw;
void *addr;
long total, inuse, this_total, this_inuse;
TAILQ_HEAD(,pool) pool_head;
LIST_HEAD(,pool) pool_head;
struct pool pool, *pp = &pool;
struct pool_allocator pa;
char name[32], maxp[32];
kread(namelist, X_POOLHEAD, &pool_head, sizeof(pool_head));
addr = TAILQ_FIRST(&pool_head);
addr = LIST_FIRST(&pool_head);
total = inuse = 0;
for (first = 1; addr != NULL; addr = TAILQ_NEXT(pp, pr_poollist) ) {
for (first = 1; addr != NULL; addr = LIST_NEXT(pp, pr_poollist) ) {
deref_kptr(addr, pp, sizeof(*pp), "pool chain trashed");
deref_kptr(pp->pr_alloc, &pa, sizeof(pa),
"pool allocator trashed");
@ -1281,36 +1281,50 @@ dopoolcache(struct pool *pp, int verbose)
if (verbose < 1)
return;
for (addr = TAILQ_FIRST(&pp->pr_cachelist); addr != NULL;
addr = TAILQ_NEXT(pc, pc_poollist)) {
#define PR_GROUPLIST \
deref_kptr(pcg_addr, pcg, sizeof(*pcg), \
"pool cache group trashed"); \
printf("\t\tgroup %p: avail %d\n", pcg_addr, \
pcg->pcg_avail); \
for (i = 0; i < PCG_NOBJECTS; i++) { \
if (pcg->pcg_objects[i].pcgo_pa != \
POOL_PADDR_INVALID) { \
printf("\t\t\t%p, 0x%llx\n", \
pcg->pcg_objects[i].pcgo_va, \
(unsigned long long) \
pcg->pcg_objects[i].pcgo_pa); \
} else { \
printf("\t\t\t%p\n", \
pcg->pcg_objects[i].pcgo_va); \
} \
}
for (addr = LIST_FIRST(&pp->pr_cachelist); addr != NULL;
addr = LIST_NEXT(pc, pc_poollist)) {
deref_kptr(addr, pc, sizeof(*pc), "pool cache trashed");
printf("\tcache %p: allocfrom %p freeto %p\n", addr,
pc->pc_allocfrom, pc->pc_freeto);
printf("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
if (verbose < 2)
continue;
for (pcg_addr = TAILQ_FIRST(&pc->pc_grouplist);
pcg_addr != NULL; pcg_addr = TAILQ_NEXT(pcg, pcg_list)) {
deref_kptr(pcg_addr, pcg, sizeof(*pcg),
"pool cache group trashed");
printf("\t\tgroup %p: avail %d\n", pcg_addr,
pcg->pcg_avail);
for (i = 0; i < PCG_NOBJECTS; i++) {
if (pcg->pcg_objects[i].pcgo_pa !=
POOL_PADDR_INVALID) {
printf("\t\t\t%p, 0x%llx\n",
pcg->pcg_objects[i].pcgo_va,
(unsigned long long)
pcg->pcg_objects[i].pcgo_pa);
} else {
printf("\t\t\t%p\n",
pcg->pcg_objects[i].pcgo_va);
}
}
printf("\t full groups:\n");
for (pcg_addr = LIST_FIRST(&pc->pc_fullgroups);
pcg_addr != NULL; pcg_addr = LIST_NEXT(pcg, pcg_list)) {
PR_GROUPLIST;
}
printf("\t partial groups:\n");
for (pcg_addr = LIST_FIRST(&pc->pc_partgroups);
pcg_addr != NULL; pcg_addr = LIST_NEXT(pcg, pcg_list)) {
PR_GROUPLIST;
}
printf("\t empty groups:\n");
for (pcg_addr = LIST_FIRST(&pc->pc_emptygroups);
pcg_addr != NULL; pcg_addr = LIST_NEXT(pcg, pcg_list)) {
PR_GROUPLIST;
}
}
#undef PR_GROUPLIST
}
enum hashtype { /* from <sys/systm.h> */