vfs_cache:

- Don't use goto in critical paths, it can confuse the compiler.
- Sprinkle some branch hints.
- Make namecache stats per-CPU and collate once per second.
- Use vtryget().
This commit is contained in:
ad 2008-06-03 15:50:22 +00:00
parent bd8a810c44
commit 1b23b70818
3 changed files with 143 additions and 100 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: vfs_cache.c,v 1.76 2008/05/05 17:11:17 ad Exp $ */
/* $NetBSD: vfs_cache.c,v 1.77 2008/06/03 15:50:22 ad Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
@ -58,7 +58,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.76 2008/05/05 17:11:17 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.77 2008/06/03 15:50:22 ad Exp $");
#include "opt_ddb.h"
#include "opt_revcache.h"
@ -100,6 +100,14 @@ __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.76 2008/05/05 17:11:17 ad Exp $");
* number has changed while waiting for the lock.
*/
/*
* Per-cpu namecache data.
*/
struct nchcpu {
kmutex_t cpu_lock;
struct nchstats cpu_stats;
};
/*
* Structures associated with name cacheing.
*/
@ -118,7 +126,7 @@ static void *cache_gcqueue; /* garbage collection queue */
TAILQ_HEAD(, namecache) nclruhead = /* LRU chain */
TAILQ_HEAD_INITIALIZER(nclruhead);
#define COUNT(x) nchstats.x++
#define COUNT(c,x) (c.x++)
struct nchstats nchstats; /* cache effectiveness statistics */
static pool_cache_t namecache_cache;
@ -208,9 +216,21 @@ cache_lock_cpus(void)
{
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
struct nchcpu *cpup;
long *s, *d, *m;
for (CPU_INFO_FOREACH(cii, ci)) {
mutex_enter(ci->ci_data.cpu_cachelock);
cpup = ci->ci_data.cpu_nch;
mutex_enter(&cpup->cpu_lock);
/* Collate statistics. */
d = (long *)&nchstats;
s = (long *)&cpup->cpu_stats;
m = s + sizeof(nchstats) / sizeof(long);
for (; s < m; s++, d++) {
*d += *s;
*s = 0;
}
}
}
@ -222,9 +242,11 @@ cache_unlock_cpus(void)
{
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
struct nchcpu *cpup;
for (CPU_INFO_FOREACH(cii, ci)) {
mutex_exit(ci->ci_data.cpu_cachelock);
cpup = ci->ci_data.cpu_nch;
mutex_exit(&cpup->cpu_lock);
}
}
@ -246,7 +268,7 @@ cache_lookup_entry(const struct vnode *dvp, const struct componentname *cnp)
memcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen))
continue;
mutex_enter(&ncp->nc_lock);
if (ncp->nc_dvp == dvp) {
if (__predict_true(ncp->nc_dvp == dvp)) {
ncp->nc_hittime = hardclock_ticks;
return ncp;
}
@ -278,53 +300,90 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
{
struct namecache *ncp;
struct vnode *vp;
kmutex_t *cpulock;
struct nchcpu *cpup;
int error;
if (!doingcache) {
if (__predict_false(!doingcache)) {
cnp->cn_flags &= ~MAKEENTRY;
*vpp = NULL;
return (-1);
return -1;
}
if (cnp->cn_namelen > NCHNAMLEN) {
/* Unlocked, but only for stats. */
COUNT(ncs_long);
cpup = curcpu()->ci_data.cpu_nch;
mutex_enter(&cpup->cpu_lock);
if (__predict_false(cnp->cn_namelen > NCHNAMLEN)) {
COUNT(cpup->cpu_stats, ncs_long);
cnp->cn_flags &= ~MAKEENTRY;
goto fail;
mutex_exit(&cpup->cpu_lock);
*vpp = NULL;
return -1;
}
cpulock = curcpu()->ci_data.cpu_cachelock;
mutex_enter(cpulock);
ncp = cache_lookup_entry(dvp, cnp);
if (ncp == NULL) {
COUNT(ncs_miss);
goto fail_wlock;
if (__predict_false(ncp == NULL)) {
COUNT(cpup->cpu_stats, ncs_miss);
mutex_exit(&cpup->cpu_lock);
*vpp = NULL;
return -1;
}
if ((cnp->cn_flags & MAKEENTRY) == 0) {
COUNT(ncs_badhits);
goto remove;
COUNT(cpup->cpu_stats, ncs_badhits);
/*
* Last component and we are renaming or deleting,
* the cache entry is invalid, or otherwise don't
* want cache entry to exist.
*/
cache_invalidate(ncp);
mutex_exit(&ncp->nc_lock);
mutex_exit(&cpup->cpu_lock);
*vpp = NULL;
return -1;
} else if (ncp->nc_vp == NULL) {
/*
* Restore the ISWHITEOUT flag saved earlier.
*/
cnp->cn_flags |= ncp->nc_flags;
if (cnp->cn_nameiop != CREATE ||
(cnp->cn_flags & ISLASTCN) == 0) {
COUNT(ncs_neghits);
if (__predict_true(cnp->cn_nameiop != CREATE ||
(cnp->cn_flags & ISLASTCN) == 0)) {
COUNT(cpup->cpu_stats, ncs_neghits);
mutex_exit(&ncp->nc_lock);
mutex_exit(cpulock);
return (ENOENT);
mutex_exit(&cpup->cpu_lock);
return ENOENT;
} else {
COUNT(ncs_badhits);
goto remove;
COUNT(cpup->cpu_stats, ncs_badhits);
/*
* Last component and we are renaming or
* deleting, the cache entry is invalid,
* or otherwise don't want cache entry to
* exist.
*/
cache_invalidate(ncp);
mutex_exit(&ncp->nc_lock);
mutex_exit(&cpup->cpu_lock);
*vpp = NULL;
return -1;
}
}
vp = ncp->nc_vp;
mutex_enter(&vp->v_interlock);
mutex_exit(&ncp->nc_lock);
mutex_exit(cpulock);
error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
if (vtryget(vp)) {
mutex_exit(&ncp->nc_lock);
mutex_exit(&cpup->cpu_lock);
} else {
mutex_enter(&vp->v_interlock);
mutex_exit(&ncp->nc_lock);
mutex_exit(&cpup->cpu_lock);
error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
if (error) {
KASSERT(error == EBUSY);
/*
* This vnode is being cleaned out.
* XXX badhits?
*/
COUNT(cpup->cpu_stats, ncs_falsehits);
*vpp = NULL;
return -1;
}
}
#ifdef DEBUG
/*
@ -334,15 +393,6 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
ncp = NULL;
#endif /* DEBUG */
if (error) {
KASSERT(error == EBUSY);
/*
* this vnode is being cleaned out.
*/
COUNT(ncs_falsehits); /* XXX badhits? */
goto fail;
}
if (vp == dvp) { /* lookup on "." */
error = 0;
} else if (cnp->cn_flags & ISDOTDOT) {
@ -358,29 +408,15 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
*/
if (error) {
/* Unlocked, but only for stats. */
COUNT(ncs_badhits);
COUNT(cpup->cpu_stats, ncs_badhits);
*vpp = NULL;
return (-1);
return -1;
}
/* Unlocked, but only for stats. */
COUNT(ncs_goodhits);
COUNT(cpup->cpu_stats, ncs_goodhits);
*vpp = vp;
return (0);
remove:
/*
* Last component and we are renaming or deleting,
* the cache entry is invalid, or otherwise don't
* want cache entry to exist.
*/
cache_invalidate(ncp);
mutex_exit(&ncp->nc_lock);
fail_wlock:
mutex_exit(cpulock);
fail:
*vpp = NULL;
return (-1);
return 0;
}
int
@ -389,27 +425,30 @@ cache_lookup_raw(struct vnode *dvp, struct vnode **vpp,
{
struct namecache *ncp;
struct vnode *vp;
kmutex_t *cpulock;
struct nchcpu *cpup;
int error;
if (!doingcache) {
if (__predict_false(!doingcache)) {
cnp->cn_flags &= ~MAKEENTRY;
*vpp = NULL;
return (-1);
}
if (cnp->cn_namelen > NCHNAMLEN) {
/* Unlocked, but only for stats. */
COUNT(ncs_long);
cpup = curcpu()->ci_data.cpu_nch;
mutex_enter(&cpup->cpu_lock);
if (__predict_false(cnp->cn_namelen > NCHNAMLEN)) {
COUNT(cpup->cpu_stats, ncs_long);
cnp->cn_flags &= ~MAKEENTRY;
goto fail;
mutex_exit(&cpup->cpu_lock);
*vpp = NULL;
return -1;
}
cpulock = curcpu()->ci_data.cpu_cachelock;
mutex_enter(cpulock);
ncp = cache_lookup_entry(dvp, cnp);
if (ncp == NULL) {
COUNT(ncs_miss);
goto fail_wlock;
if (__predict_false(ncp == NULL)) {
COUNT(cpup->cpu_stats, ncs_miss);
mutex_exit(&cpup->cpu_lock);
*vpp = NULL;
return -1;
}
vp = ncp->nc_vp;
if (vp == NULL) {
@ -417,34 +456,33 @@ cache_lookup_raw(struct vnode *dvp, struct vnode **vpp,
* Restore the ISWHITEOUT flag saved earlier.
*/
cnp->cn_flags |= ncp->nc_flags;
COUNT(ncs_neghits);
COUNT(cpup->cpu_stats, ncs_neghits);
mutex_exit(&ncp->nc_lock);
mutex_exit(cpulock);
return (ENOENT);
mutex_exit(&cpup->cpu_lock);
return ENOENT;
}
mutex_enter(&vp->v_interlock);
mutex_exit(&ncp->nc_lock);
mutex_exit(cpulock);
error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
if (error) {
KASSERT(error == EBUSY);
/*
* this vnode is being cleaned out.
*/
COUNT(ncs_falsehits); /* XXX badhits? */
goto fail;
if (vtryget(vp)) {
mutex_exit(&ncp->nc_lock);
mutex_exit(&cpup->cpu_lock);
} else {
mutex_enter(&vp->v_interlock);
mutex_exit(&ncp->nc_lock);
mutex_exit(&cpup->cpu_lock);
error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
if (error) {
KASSERT(error == EBUSY);
/*
* This vnode is being cleaned out.
* XXX badhits?
*/
COUNT(cpup->cpu_stats, ncs_falsehits);
*vpp = NULL;
return -1;
}
}
*vpp = vp;
return 0;
fail_wlock:
mutex_exit(cpulock);
fail:
*vpp = NULL;
return -1;
}
/*
@ -489,7 +527,7 @@ cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
ncp->nc_name[1] == '.')
panic("cache_revlookup: found entry for ..");
#endif
COUNT(ncs_revhits);
COUNT(nchstats, ncs_revhits);
if (bufp) {
bp = *bpp;
@ -512,7 +550,7 @@ cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
}
mutex_exit(&ncp->nc_lock);
}
COUNT(ncs_revmiss);
COUNT(nchstats, ncs_revmiss);
mutex_exit(namecache_lock);
out:
*dvpp = NULL;
@ -686,8 +724,14 @@ cache_dtor(void *arg, void *obj)
void
cache_cpu_init(struct cpu_info *ci)
{
struct nchcpu *cpup;
size_t sz;
ci->ci_data.cpu_cachelock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
sz = roundup2(sizeof(*cpup), coherency_unit) + coherency_unit;
cpup = kmem_zalloc(sz, KM_SLEEP);
cpup = (void *)roundup2((uintptr_t)cpup, coherency_unit);
mutex_init(&cpup->cpu_lock, MUTEX_DEFAULT, IPL_NONE);
ci->ci_data.cpu_nch = cpup;
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: rump.c,v 1.46 2008/05/20 19:02:36 ad Exp $ */
/* $NetBSD: rump.c,v 1.47 2008/06/03 15:50:22 ad Exp $ */
/*
* Copyright (c) 2007 Antti Kantee. All Rights Reserved.
@ -102,9 +102,7 @@ rump_init()
desiredvnodes = 1<<16;
}
rump_cpu.ci_data.cpu_cachelock = mutex_obj_alloc(MUTEX_DEFAULT,
IPL_NONE);
cache_cpu_init(&rump_cpu);
rw_init(&rump_cwdi.cwdi_lock);
l = &lwp0;
p = &proc0;

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu_data.h,v 1.26 2008/06/01 21:24:15 ad Exp $ */
/* $NetBSD: cpu_data.h,v 1.27 2008/06/03 15:50:22 ad Exp $ */
/*-
* Copyright (c) 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -86,11 +86,12 @@ struct cpu_data {
u_int cpu_nsyscall; /* syscall counter */
u_int cpu_ntrap; /* trap counter */
u_int cpu_nswtch; /* context switch counter */
void *cpu_uvm; /* uvm per-cpu data */
void *cpu_softcpu; /* soft interrupt table */
TAILQ_HEAD(,buf) cpu_biodone; /* finished block xfers */
percpu_cpu_t cpu_percpu; /* per-cpu data */
struct selcpu *cpu_selcpu; /* per-CPU select() info */
void *cpu_cachelock; /* per-cpu vfs_cache lock */
void *cpu_nch; /* per-cpu vfs_cache data */
_TAILQ_HEAD(,struct lockdebug,volatile) cpu_ld_locks;/* !: lockdebug */
__cpu_simple_lock_t cpu_ld_lock; /* lockdebug */
uint64_t cpu_cc_freq; /* cycle counter frequency */