- Extend the per-CPU counters matt@ did to include all of the hot counters

in UVM, excluding uvmexp.free, which needs special treatment and will be
  done with a separate commit.  Cuts system time for a build by 20-25% on
  a 48 CPU machine w/DIAGNOSTIC.

- Avoid 64-bit integer divide on every fault (for rnd_add_uint32).
This commit is contained in:
ad 2019-12-16 22:47:54 +00:00
parent df1f230d9f
commit a98966d3dc
21 changed files with 480 additions and 269 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_cpu.c,v 1.81 2019/12/04 09:34:13 wiz Exp $ */
/* $NetBSD: kern_cpu.c,v 1.82 2019/12/16 22:47:54 ad Exp $ */
/*-
* Copyright (c) 2007, 2008, 2009, 2010, 2012, 2019 The NetBSD Foundation, Inc.
@ -56,9 +56,11 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.81 2019/12/04 09:34:13 wiz Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.82 2019/12/16 22:47:54 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_cpu_ucode.h"
#endif
#include <sys/param.h>
#include <sys/systm.h>
@ -120,6 +122,7 @@ int ncpu __read_mostly;
int ncpuonline __read_mostly;
bool mp_online __read_mostly;
static bool cpu_topology_present __read_mostly;
int64_t cpu_counts[CPU_COUNT_MAX];
/* An array of CPUs. There are ncpu entries. */
struct cpu_info **cpu_infos __read_mostly;
@ -305,6 +308,7 @@ cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
return error;
}
#ifndef _RUMPKERNEL
struct cpu_info *
cpu_lookup(u_int idx)
{
@ -327,6 +331,7 @@ cpu_lookup(u_int idx)
return ci;
}
#endif
static void
cpu_xc_offline(struct cpu_info *ci, void *unused)
@ -830,3 +835,86 @@ err0:
return error;
}
#endif
/*
* Adjust one count, for a counter that's NOT updated from interrupt
* context. Hardly worth making an inline due to preemption stuff.
*/
void
cpu_count(enum cpu_count idx, int64_t delta)
{
lwp_t *l = curlwp;
KPREEMPT_DISABLE(l);
l->l_cpu->ci_counts[idx] += delta;
KPREEMPT_ENABLE(l);
}
/*
* Fetch fresh sum total for all counts. Expensive - don't call often.
*/
void
cpu_count_sync_all(void)
{
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
int64_t sum[CPU_COUNT_MAX], *ptr;
enum cpu_count i;
int s;
KASSERT(sizeof(ci->ci_counts) == sizeof(cpu_counts));
if (__predict_true(mp_online)) {
memset(sum, 0, sizeof(sum));
/*
* We want this to be reasonably quick, so any value we get
* isn't totally out of whack, so don't let the current LWP
* get preempted.
*/
s = splvm();
curcpu()->ci_counts[CPU_COUNT_SYNC_ALL]++;
for (CPU_INFO_FOREACH(cii, ci)) {
ptr = ci->ci_counts;
for (i = 0; i < CPU_COUNT_MAX; i += 8) {
sum[i+0] += ptr[i+0];
sum[i+1] += ptr[i+1];
sum[i+2] += ptr[i+2];
sum[i+3] += ptr[i+3];
sum[i+4] += ptr[i+4];
sum[i+5] += ptr[i+5];
sum[i+6] += ptr[i+6];
sum[i+7] += ptr[i+7];
}
KASSERT(i == CPU_COUNT_MAX);
}
memcpy(cpu_counts, sum, sizeof(cpu_counts));
splx(s);
} else {
memcpy(cpu_counts, curcpu()->ci_counts, sizeof(cpu_counts));
}
}
/*
* Fetch a fresh sum total for one single count. Expensive - don't call often.
*/
int64_t
cpu_count_sync(enum cpu_count count)
{
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
int64_t sum;
int s;
if (__predict_true(mp_online)) {
s = splvm();
curcpu()->ci_counts[CPU_COUNT_SYNC_ONE]++;
sum = 0;
for (CPU_INFO_FOREACH(cii, ci)) {
sum += ci->ci_counts[count];
}
splx(s);
} else {
/* XXX Early boot, iterator might not be available. */
sum = curcpu()->ci_counts[count];
}
return cpu_counts[count] = sum;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_fork.c,v 1.216 2019/11/23 19:42:52 ad Exp $ */
/* $NetBSD: kern_fork.c,v 1.217 2019/12/16 22:47:54 ad Exp $ */
/*-
* Copyright (c) 1999, 2001, 2004, 2006, 2007, 2008, 2019
@ -68,7 +68,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_fork.c,v 1.216 2019/11/23 19:42:52 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_fork.c,v 1.217 2019/12/16 22:47:54 ad Exp $");
#include "opt_ktrace.h"
#include "opt_dtrace.h"
@ -96,8 +96,6 @@ __KERNEL_RCSID(0, "$NetBSD: kern_fork.c,v 1.216 2019/11/23 19:42:52 ad Exp $");
#include <sys/sdt.h>
#include <sys/ptrace.h>
#include <uvm/uvm_extern.h>
/*
* DTrace SDT provider definitions
*/
@ -525,11 +523,13 @@ fork1(struct lwp *l1, int flags, int exitsig, void *stack, size_t stacksize,
/*
* Update stats now that we know the fork was successful.
*/
uvmexp.forks++;
KPREEMPT_DISABLE(l1);
CPU_COUNT(CPU_COUNT_FORKS, 1);
if (flags & FORK_PPWAIT)
uvmexp.forks_ppwait++;
CPU_COUNT(CPU_COUNT_FORKS_PPWAIT, 1);
if (flags & FORK_SHAREVM)
uvmexp.forks_sharevm++;
CPU_COUNT(CPU_COUNT_FORKS_SHAREVM, 1);
KPREEMPT_ENABLE(l1);
if (ktrpoint(KTR_EMUL))
p2->p_traceflag |= KTRFAC_TRC_EMUL;

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_softint.c,v 1.55 2019/12/06 21:36:10 ad Exp $ */
/* $NetBSD: kern_softint.c,v 1.56 2019/12/16 22:47:54 ad Exp $ */
/*-
* Copyright (c) 2007, 2008, 2019 The NetBSD Foundation, Inc.
@ -170,7 +170,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.55 2019/12/06 21:36:10 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.56 2019/12/16 22:47:54 ad Exp $");
#include <sys/param.h>
#include <sys/proc.h>
@ -607,11 +607,7 @@ softint_execute(softint_t *si, lwp_t *l, int s)
KERNEL_UNLOCK_ONE(l);
}
/*
* Unlocked, but only for statistics.
* Should be per-CPU to prevent cache ping-pong.
*/
curcpu()->ci_data.cpu_nsoft++;
CPU_COUNT(CPU_COUNT_NSOFT, 1);
KASSERT(si->si_cpu == curcpu());
KASSERT(si->si_lwp->l_wchan == NULL);

View File

@ -1,4 +1,4 @@
/* $NetBSD: vfs_vnode.c,v 1.104 2019/12/01 13:56:29 ad Exp $ */
/* $NetBSD: vfs_vnode.c,v 1.105 2019/12/16 22:47:54 ad Exp $ */
/*-
* Copyright (c) 1997-2011, 2019 The NetBSD Foundation, Inc.
@ -146,7 +146,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.104 2019/12/01 13:56:29 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.105 2019/12/16 22:47:54 ad Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@ -792,10 +792,8 @@ vrelel(vnode_t *vp, int flags)
/* Take care of space accounting. */
if ((vp->v_iflag & VI_EXECMAP) != 0 &&
vp->v_uobj.uo_npages != 0) {
atomic_add_int(&uvmexp.execpages,
-vp->v_uobj.uo_npages);
atomic_add_int(&uvmexp.filepages,
vp->v_uobj.uo_npages);
cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
}
vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
vp->v_vflag &= ~VV_MAPPED;
@ -1565,8 +1563,8 @@ vcache_reclaim(vnode_t *vp)
*/
VSTATE_CHANGE(vp, VS_LOADED, VS_RECLAIMING);
if ((vp->v_iflag & VI_EXECMAP) != 0 && vp->v_uobj.uo_npages != 0) {
atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
}
vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
mutex_exit(vp->v_interlock);

View File

@ -1,4 +1,4 @@
/* $NetBSD: vfs_vnops.c,v 1.203 2019/12/01 13:56:29 ad Exp $ */
/* $NetBSD: vfs_vnops.c,v 1.204 2019/12/16 22:47:54 ad Exp $ */
/*-
* Copyright (c) 2009 The NetBSD Foundation, Inc.
@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.203 2019/12/01 13:56:29 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.204 2019/12/16 22:47:54 ad Exp $");
#include "veriexec.h"
@ -341,8 +341,8 @@ vn_markexec(struct vnode *vp)
mutex_enter(vp->v_interlock);
if ((vp->v_iflag & VI_EXECMAP) == 0) {
atomic_add_int(&uvmexp.filepages, -vp->v_uobj.uo_npages);
atomic_add_int(&uvmexp.execpages, vp->v_uobj.uo_npages);
cpu_count(CPU_COUNT_FILEPAGES, -vp->v_uobj.uo_npages);
cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
vp->v_iflag |= VI_EXECMAP;
}
mutex_exit(vp->v_interlock);
@ -368,8 +368,8 @@ vn_marktext(struct vnode *vp)
return (ETXTBSY);
}
if ((vp->v_iflag & VI_EXECMAP) == 0) {
atomic_add_int(&uvmexp.filepages, -vp->v_uobj.uo_npages);
atomic_add_int(&uvmexp.execpages, vp->v_uobj.uo_npages);
cpu_count(CPU_COUNT_FILEPAGES, -vp->v_uobj.uo_npages);
cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
}
vp->v_iflag |= (VI_TEXT | VI_EXECMAP);
mutex_exit(vp->v_interlock);

View File

@ -1,4 +1,4 @@
/* $NetBSD: procfs_linux.c,v 1.76 2019/09/07 19:08:28 chs Exp $ */
/* $NetBSD: procfs_linux.c,v 1.77 2019/12/16 22:47:55 ad Exp $ */
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: procfs_linux.c,v 1.76 2019/09/07 19:08:28 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: procfs_linux.c,v 1.77 2019/12/16 22:47:55 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -137,9 +137,16 @@ procfs_domeminfo(struct lwp *curl, struct proc *p,
char *bf;
int len;
int error = 0;
long filepg, anonpg, execpg, freepg;
bf = malloc(LBFSZ, M_TEMP, M_WAITOK);
cpu_count_sync_all();
freepg = uvmexp.free;
filepg = (long)cpu_count_get(CPU_COUNT_FILEPAGES);
anonpg = (long)cpu_count_get(CPU_COUNT_ANONPAGES);
execpg = (long)cpu_count_get(CPU_COUNT_EXECPAGES);
len = snprintf(bf, LBFSZ,
" total: used: free: shared: buffers: cached:\n"
"Mem: %8lu %8lu %8lu %8lu %8lu %8lu\n"
@ -152,19 +159,19 @@ procfs_domeminfo(struct lwp *curl, struct proc *p,
"SwapTotal: %8lu kB\n"
"SwapFree: %8lu kB\n",
PGTOB(uvmexp.npages),
PGTOB(uvmexp.npages - uvmexp.free),
PGTOB(uvmexp.free),
PGTOB(uvmexp.npages - freepg),
PGTOB(freepg),
0L,
PGTOB(uvmexp.filepages),
PGTOB(uvmexp.anonpages + uvmexp.filepages + uvmexp.execpages),
PGTOB(filepg),
PGTOB(anonpg + filepg + execpg),
PGTOB(uvmexp.swpages),
PGTOB(uvmexp.swpginuse),
PGTOB(uvmexp.swpages - uvmexp.swpginuse),
PGTOKB(uvmexp.npages),
PGTOKB(uvmexp.free),
PGTOKB(freepg),
0L,
PGTOKB(uvmexp.filepages),
PGTOKB(uvmexp.anonpages + uvmexp.filepages + uvmexp.execpages),
PGTOKB(freepg),
PGTOKB(anonpg + filepg + execpg),
PGTOKB(uvmexp.swpages),
PGTOKB(uvmexp.swpages - uvmexp.swpginuse));
@ -253,8 +260,6 @@ procfs_docpustat(struct lwp *curl, struct proc *p,
CPU_INFO_ITERATOR cii;
#endif
int i;
uint64_t nintr;
uint64_t nswtch;
error = ENAMETOOLONG;
bf = malloc(LBFSZ, M_TEMP, M_WAITOK);
@ -277,8 +282,6 @@ procfs_docpustat(struct lwp *curl, struct proc *p,
#endif
i = 0;
nintr = 0;
nswtch = 0;
for (ALLCPUS) {
len += snprintf(&bf[len], LBFSZ - len,
"cpu%d %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64
@ -290,21 +293,21 @@ procfs_docpustat(struct lwp *curl, struct proc *p,
if (len >= LBFSZ)
goto out;
i += 1;
nintr += CPUNAME->ci_data.cpu_nintr;
nswtch += CPUNAME->ci_data.cpu_nswtch;
}
cpu_count_sync_all();
len += snprintf(&bf[len], LBFSZ - len,
"disk 0 0 0 0\n"
"page %u %u\n"
"swap %u %u\n"
"intr %"PRIu64"\n"
"ctxt %"PRIu64"\n"
"intr %"PRId64"\n"
"ctxt %"PRId64"\n"
"btime %"PRId64"\n",
uvmexp.pageins, uvmexp.pdpageouts,
uvmexp.pgswapin, uvmexp.pgswapout,
nintr,
nswtch,
cpu_count_get(CPU_COUNT_NINTR),
cpu_count_get(CPU_COUNT_NSWTCH),
boottime.tv_sec);
if (len >= LBFSZ)
goto out;

View File

@ -4,3 +4,6 @@ include "conf/files"
include "rump/dev/files.rump"
mainbus0 at root
pseudo-device cpuctl

View File

@ -1,4 +1,4 @@
# $NetBSD: Makefile.rumpkern,v 1.179 2019/12/15 21:11:35 ad Exp $
# $NetBSD: Makefile.rumpkern,v 1.180 2019/12/16 22:47:55 ad Exp $
#
IOCONFDIR:= ${.PARSEDIR}
@ -69,6 +69,7 @@ SRCS+= init_sysctl_base.c \
kern_auth.c \
kern_cfglock.c \
kern_clock.c \
kern_cpu.c \
kern_descrip.c \
kern_event.c \
kern_hook.c \

View File

@ -1,4 +1,4 @@
/* $NetBSD: emul.c,v 1.192 2019/09/26 17:52:50 bad Exp $ */
/* $NetBSD: emul.c,v 1.193 2019/12/16 22:47:55 ad Exp $ */
/*
* Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: emul.c,v 1.192 2019/09/26 17:52:50 bad Exp $");
__KERNEL_RCSID(0, "$NetBSD: emul.c,v 1.193 2019/12/16 22:47:55 ad Exp $");
#include <sys/param.h>
#include <sys/cprng.h>
@ -64,7 +64,6 @@ struct vnode *rootvp;
dev_t rootdev = NODEV;
const int schedppq = 1;
bool mp_online = false;
struct timespec boottime;
int cold = 1;
int boothowto = AB_SILENT;
@ -431,10 +430,3 @@ cpu_reboot(int howto, char *bootstr)
rump_sysproxy_fini(finiarg);
rumpuser_exit(ruhow);
}
const char *
cpu_getmodel(void)
{
return "rumpcore (virtual)";
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: intr.c,v 1.54 2016/01/26 23:12:17 pooka Exp $ */
/* $NetBSD: intr.c,v 1.55 2019/12/16 22:47:55 ad Exp $ */
/*
* Copyright (c) 2008-2010, 2015 Antti Kantee. All Rights Reserved.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.54 2016/01/26 23:12:17 pooka Exp $");
__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.55 2019/12/16 22:47:55 ad Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
@ -474,10 +474,3 @@ cpu_intr_p(void)
return false;
}
bool
cpu_softintr_p(void)
{
return curlwp->l_pflag & LP_INTR;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: scheduler.c,v 1.47 2019/12/01 19:21:13 ad Exp $ */
/* $NetBSD: scheduler.c,v 1.48 2019/12/16 22:47:55 ad Exp $ */
/*
* Copyright (c) 2010, 2011 Antti Kantee. All Rights Reserved.
@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.47 2019/12/01 19:21:13 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.48 2019/12/16 22:47:55 ad Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
@ -76,11 +76,6 @@ cpuinfo_to_rumpcpu(struct cpu_info *ci)
}
struct cpu_info rump_bootcpu;
kcpuset_t *kcpuset_attached = NULL;
kcpuset_t *kcpuset_running = NULL;
int ncpu, ncpuonline;
kmutex_t cpu_lock;
#define RCPULWP_BUSY ((void *)-1)
#define RCPULWP_WANTED ((void *)-2)
@ -143,10 +138,9 @@ rump_cpus_bootstrap(int *nump)
num = MAXCPUS;
}
mutex_init(&cpu_lock, MUTEX_DEFAULT, IPL_NONE);
cpu_setmodel("rumpcore (virtual)");
kcpuset_create(&kcpuset_attached, true);
kcpuset_create(&kcpuset_running, true);
mi_cpu_init();
/* attach first cpu for bootstrap */
rump_cpu_attach(&rump_bootcpu);

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu_data.h,v 1.43 2019/12/03 22:28:41 ad Exp $ */
/* $NetBSD: cpu_data.h,v 1.44 2019/12/16 22:47:55 ad Exp $ */
/*-
* Copyright (c) 2004, 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc.
@ -45,6 +45,59 @@ struct lwp;
#include <sys/kcpuset.h>
#include <sys/ipi.h>
/* Per-CPU counters. New elements must be added in blocks of 8. */
enum cpu_count {
CPU_COUNT_NFAULT, /* 0 */
CPU_COUNT_NSWTCH,
CPU_COUNT_NSYSCALL,
CPU_COUNT_NTRAP,
CPU_COUNT_NINTR,
CPU_COUNT_NSOFT,
CPU_COUNT_FORKS,
CPU_COUNT_FORKS_PPWAIT,
CPU_COUNT_FORKS_SHAREVM, /* 8 */
CPU_COUNT_ANONPAGES,
CPU_COUNT_COLORHIT,
CPU_COUNT_COLORMISS,
CPU_COUNT_CPUHIT,
CPU_COUNT_CPUMISS,
CPU_COUNT_BUCKETMISS,
CPU_COUNT_EXECPAGES,
CPU_COUNT_FILEPAGES, /* 16 */
CPU_COUNT_PGA_ZEROHIT,
CPU_COUNT_PGA_ZEROMISS,
CPU_COUNT_ZEROPAGES,
CPU_COUNT_ZEROABORTS,
CPU_COUNT_FREE,
CPU_COUNT_SYNC_ONE,
CPU_COUNT_SYNC_ALL,
CPU_COUNT_FLT_ACOW, /* 24 */
CPU_COUNT_FLT_ANON,
CPU_COUNT_FLT_OBJ,
CPU_COUNT_FLT_PRCOPY,
CPU_COUNT_FLT_PRZERO,
CPU_COUNT_FLTAMCOPY,
CPU_COUNT_FLTANGET,
CPU_COUNT_FLTANRETRY,
CPU_COUNT_FLTGET, /* 32 */
CPU_COUNT_FLTLGET,
CPU_COUNT_FLTNAMAP,
CPU_COUNT_FLTNOMAP,
CPU_COUNT_FLTNOANON,
CPU_COUNT_FLTNORAM,
CPU_COUNT_FLTPGRELE,
CPU_COUNT_FLTPGWAIT,
CPU_COUNT_FLTRELCK, /* 40 */
CPU_COUNT_FLTRELCKOK,
CPU_COUNT_PAGEINS,
CPU_COUNT__SPARE1,
CPU_COUNT__SPARE2,
CPU_COUNT__SPARE3,
CPU_COUNT__SPARE4,
CPU_COUNT__SPARE5,
CPU_COUNT_MAX /* 48 */
};
/*
* MI per-cpu data
*
@ -97,13 +150,8 @@ struct cpu_data {
u_int cpu_spin_locks2; /* # of spin locks held XXX */
u_int cpu_lkdebug_recurse; /* LOCKDEBUG recursion */
u_int cpu_softints; /* pending (slow) softints */
uint64_t cpu_nsyscall; /* syscall counter */
uint64_t cpu_ntrap; /* trap counter */
uint64_t cpu_nswtch; /* context switch counter */
uint64_t cpu_nintr; /* interrupt count */
uint64_t cpu_nsoft; /* soft interrupt count */
uint64_t cpu_nfault; /* pagefault counter */
struct uvm_cpu *cpu_uvm; /* uvm per-cpu data */
u_int cpu_faultrng; /* counter for fault rng */
void *cpu_callout; /* per-CPU callout state */
void *cpu_softcpu; /* soft interrupt table */
TAILQ_HEAD(,buf) cpu_biodone; /* finished block xfers */
@ -116,8 +164,8 @@ struct cpu_data {
int64_t cpu_cc_skew; /* counter skew vs cpu0 */
char cpu_name[8]; /* eg, "cpu4" */
kcpuset_t *cpu_kcpuset; /* kcpuset_t of this cpu only */
struct lwp * volatile cpu_pcu_curlwp[PCU_UNIT_COUNT];
int64_t cpu_counts[CPU_COUNT_MAX];/* per-CPU counts */
};
#define ci_schedstate ci_data.cpu_schedstate
@ -140,8 +188,44 @@ struct cpu_data {
#define ci_smt_id ci_data.cpu_smt_id
#define ci_nsibling ci_data.cpu_nsibling
#define ci_sibling ci_data.cpu_sibling
#define ci_faultrng ci_data.cpu_faultrng
#define ci_counts ci_data.cpu_counts
#define cpu_nsyscall cpu_counts[CPU_COUNT_NSYSCALL]
#define cpu_ntrap cpu_counts[CPU_COUNT_NTRAP]
#define cpu_nswtch cpu_counts[CPU_COUNT_NSWTCH]
#define cpu_nintr cpu_counts[CPU_COUNT_NINTR]
#define cpu_nsoft cpu_counts[CPU_COUNT_NSOFT]
#define cpu_nfault cpu_counts[CPU_COUNT_NFAULT]
void mi_cpu_init(void);
int mi_cpu_attach(struct cpu_info *);
/*
* Adjust a count with preemption already disabled. If the counter being
* adjusted can be updated from interrupt context, SPL must be raised.
*/
#define CPU_COUNT(idx, d) \
do { \
extern bool kpreempt_disabled(void); \
KASSERT(kpreempt_disabled()); \
KASSERT((unsigned)idx < CPU_COUNT_MAX); \
curcpu()->ci_counts[(idx)] += (d); \
} while (/* CONSTCOND */ 0)
/*
* Fetch a potentially stale count - cheap, use as often as you like.
*/
static inline int64_t
cpu_count_get(enum cpu_count idx)
{
extern int64_t cpu_counts[];
return cpu_counts[idx];
}
void cpu_count(enum cpu_count, int64_t);
int64_t cpu_count_get(enum cpu_count);
int64_t cpu_count_sync(enum cpu_count);
void cpu_count_sync_all(void);
#endif /* _SYS_CPU_DATA_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_extern.h,v 1.213 2018/05/28 21:04:35 chs Exp $ */
/* $NetBSD: uvm_extern.h,v 1.214 2019/12/16 22:47:55 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -346,12 +346,12 @@ struct uvmexp {
int nswget; /* number of times fault calls uvm_swap_get() */
/* stat counters. XXX: should be 64-bit counters */
int _unused_faults; /* page fault count */
int _unused_traps; /* trap count */
int _unused_intrs; /* interrupt count */
int _unused_swtch; /* context switch count */
int _unused_softs; /* software interrupt count */
int _unused_syscalls; /* system calls */
int faults; /* page fault count */
int traps; /* trap count */
int intrs; /* interrupt count */
int swtch; /* context switch count */
int softs; /* software interrupt count */
int syscalls; /* system calls */
int pageins; /* pagein operation count */
/* pageouts are in pdpageouts below */
int _unused1;
@ -451,7 +451,7 @@ struct uvmexp_sysctl {
int64_t pageins;
int64_t swapins; /* unused */
int64_t swapouts; /* unused */
int64_t pgswapin;
int64_t pgswapin; /* unused */
int64_t pgswapout;
int64_t forks;
int64_t forks_ppwait;
@ -497,6 +497,8 @@ struct uvmexp_sysctl {
int64_t ncolors;
int64_t bootpages;
int64_t poolpages;
int64_t countsyncone;
int64_t countsyncall;
};
#ifdef _KERNEL
@ -709,6 +711,7 @@ void uvm_pctparam_init(struct uvm_pctparam *, int,
int (*)(struct uvm_pctparam *, int));
int uvm_pctparam_createsysctlnode(struct uvm_pctparam *,
const char *, const char *);
void uvm_update_uvmexp(void);
/* uvm_mmap.c */
int uvm_mmap_dev(struct proc *, void **, size_t, dev_t,

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_fault.c,v 1.212 2019/12/13 20:10:22 ad Exp $ */
/* $NetBSD: uvm_fault.c,v 1.213 2019/12/16 22:47:55 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.212 2019/12/13 20:10:22 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.213 2019/12/16 22:47:55 ad Exp $");
#include "opt_uvmhist.h"
@ -281,8 +281,7 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
KASSERT(anon->an_lock == amap->am_lock);
/* Increment the counters.*/
atomic_store_relaxed(&uvmexp.fltanget,
atomic_load_relaxed(&uvmexp.fltanget) + 1);
cpu_count(CPU_COUNT_FLTANGET, 1);
if (anon->an_page) {
curlwp->l_ru.ru_minflt++;
} else {
@ -329,8 +328,7 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
return 0;
}
pg->flags |= PG_WANTED;
atomic_store_relaxed(&uvmexp.fltpgwait,
atomic_load_relaxed(&uvmexp.fltpgwait) + 1);
cpu_count(CPU_COUNT_FLTPGWAIT, 1);
/*
* The last unlock must be an atomic unlock and wait
@ -365,8 +363,7 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
if (pg == NULL) {
/* Out of memory. Wait a little. */
uvmfault_unlockall(ufi, amap, NULL);
atomic_store_relaxed(&uvmexp.fltnoram,
atomic_load_relaxed(&uvmexp.fltnoram) + 1);
cpu_count(CPU_COUNT_FLTNORAM, 1);
UVMHIST_LOG(maphist, " noram -- UVM_WAIT",0,
0,0,0);
if (!uvm_reclaimable()) {
@ -385,8 +382,7 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
* to read an_swslot here, because we hold
* PG_BUSY on the page.
*/
atomic_store_relaxed(&uvmexp.pageins,
atomic_load_relaxed(&uvmexp.pageins) + 1);
cpu_count(CPU_COUNT_PAGEINS, 1);
error = uvm_swap_get(pg, anon->an_swslot,
PGO_SYNCIO);
@ -523,8 +519,7 @@ released:
* Retry..
*/
atomic_store_relaxed(&uvmexp.fltanretry,
atomic_load_relaxed(&uvmexp.fltanretry) + 1);
cpu_count(CPU_COUNT_FLTANRETRY, 1);
continue;
}
/*NOTREACHED*/
@ -625,15 +620,13 @@ uvmfault_promote(struct uvm_faultinfo *ufi,
uvmfault_unlockall(ufi, amap, uobj);
if (!uvm_reclaimable()) {
UVMHIST_LOG(maphist, "out of VM", 0,0,0,0);
atomic_store_relaxed(&uvmexp.fltnoanon,
atomic_load_relaxed(&uvmexp.fltnoanon) + 1);
cpu_count(CPU_COUNT_FLTNOANON, 1);
error = ENOMEM;
goto done;
}
UVMHIST_LOG(maphist, "out of RAM, waiting for more", 0,0,0,0);
atomic_store_relaxed(&uvmexp.fltnoram,
atomic_load_relaxed(&uvmexp.fltnoram) + 1);
cpu_count(CPU_COUNT_FLTNORAM, 1);
uvm_wait("flt_noram5");
error = ERESTART;
goto done;
@ -811,8 +804,6 @@ int
uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
vm_prot_t access_type, int fault_flag)
{
struct cpu_data *cd;
struct uvm_cpu *ucpu;
struct uvm_faultinfo ufi;
struct uvm_faultctx flt = {
.access_type = access_type,
@ -834,26 +825,24 @@ uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
UVMHIST_LOG(maphist, "(map=%#jx, vaddr=%#jx, at=%jd, ff=%jd)",
(uintptr_t)orig_map, vaddr, access_type, fault_flag);
cd = &(curcpu()->ci_data);
cd->cpu_nfault++;
ucpu = cd->cpu_uvm;
/* Don't flood RNG subsystem with samples. */
if (cd->cpu_nfault % 503)
goto norng;
/* Don't count anything until user interaction is possible */
kpreempt_disable();
if (__predict_true(start_init_exec)) {
kpreempt_disable();
rnd_add_uint32(&ucpu->rs,
sizeof(vaddr_t) == sizeof(uint32_t) ?
(uint32_t)vaddr : sizeof(vaddr_t) ==
sizeof(uint64_t) ?
(uint32_t)(vaddr & 0x00000000ffffffff) :
(uint32_t)(cd->cpu_nfault & 0x00000000ffffffff));
kpreempt_enable();
struct cpu_info *ci = curcpu();
CPU_COUNT(CPU_COUNT_NFAULT, 1);
/* Don't flood RNG subsystem with samples. */
if (++(ci->ci_faultrng) == 503) {
ci->ci_faultrng = 0;
rnd_add_uint32(&curcpu()->ci_data.cpu_uvm->rs,
sizeof(vaddr_t) == sizeof(uint32_t) ?
(uint32_t)vaddr : sizeof(vaddr_t) ==
sizeof(uint64_t) ?
(uint32_t)vaddr :
(uint32_t)ci->ci_counts[CPU_COUNT_NFAULT]);
}
}
norng:
kpreempt_enable();
/*
* init the IN parameters in the ufi
*/
@ -1026,8 +1015,7 @@ uvm_fault_check(
" need to clear needs_copy and refault",0,0,0,0);
uvmfault_unlockmaps(ufi, false);
uvmfault_amapcopy(ufi);
atomic_store_relaxed(&uvmexp.fltamcopy,
atomic_load_relaxed(&uvmexp.fltamcopy) + 1);
cpu_count(CPU_COUNT_FLTAMCOPY, 1);
return ERESTART;
} else {
@ -1268,8 +1256,7 @@ uvm_fault_upper_neighbor(
UVMHIST_LOG(maphist,
" MAPPING: n anon: pm=%#jx, va=%#jx, pg=%#jx",
(uintptr_t)ufi->orig_map->pmap, currva, (uintptr_t)pg, 0);
atomic_store_relaxed(&uvmexp.fltnamap,
atomic_load_relaxed(&uvmexp.fltnamap) + 1);
cpu_count(CPU_COUNT_FLTNAMAP, 1);
/*
* Since this page isn't the page that's actually faulting,
@ -1465,8 +1452,7 @@ uvm_fault_upper_promote(
UVMHIST_FUNC("uvm_fault_upper_promote"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, " case 1B: COW fault",0,0,0,0);
atomic_store_relaxed(&uvmexp.flt_acow,
atomic_load_relaxed(&uvmexp.flt_acow) + 1);
cpu_count(CPU_COUNT_FLT_ACOW, 1);
error = uvmfault_promote(ufi, oanon, PGO_DONTCARE, &anon,
&flt->anon_spare);
@ -1513,8 +1499,7 @@ uvm_fault_upper_direct(
struct vm_page *pg;
UVMHIST_FUNC("uvm_fault_upper_direct"); UVMHIST_CALLED(maphist);
atomic_store_relaxed(&uvmexp.flt_anon,
atomic_load_relaxed(&uvmexp.flt_anon) + 1);
cpu_count(CPU_COUNT_FLT_ANON, 1);
pg = anon->an_page;
if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */
flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
@ -1780,8 +1765,7 @@ uvm_fault_lower_lookup(
mutex_enter(uobj->vmobjlock);
/* Locked: maps(read), amap(if there), uobj */
atomic_store_relaxed(&uvmexp.fltlget,
atomic_load_relaxed(&uvmexp.fltlget) + 1);
cpu_count(CPU_COUNT_FLTLGET, 1);
gotpages = flt->npages;
(void) uobj->pgops->pgo_get(uobj,
ufi->entry->offset + flt->startva - ufi->entry->start,
@ -1853,8 +1837,7 @@ uvm_fault_lower_neighbor(
UVMHIST_LOG(maphist,
" MAPPING: n obj: pm=%#jx, va=%#jx, pg=%#jx",
(uintptr_t)ufi->orig_map->pmap, currva, (uintptr_t)pg, 0);
atomic_store_relaxed(&uvmexp.fltnomap,
atomic_load_relaxed(&uvmexp.fltnomap) + 1);
cpu_count(CPU_COUNT_FLTNOMAP, 1);
/*
* Since this page isn't the page that's actually faulting,
@ -1920,8 +1903,7 @@ uvm_fault_lower_io(
/* Locked: uobj */
KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));
atomic_store_relaxed(&uvmexp.fltget,
atomic_load_relaxed(&uvmexp.fltget) + 1);
cpu_count(CPU_COUNT_FLTGET, 1);
gotpages = 1;
pg = NULL;
error = uobj->pgops->pgo_get(uobj, uoff, &pg, &gotpages,
@ -2002,8 +1984,7 @@ uvm_fault_lower_io(
pg->flags &= ~(PG_BUSY | PG_WANTED);
UVM_PAGE_OWN(pg, NULL);
} else {
atomic_store_relaxed(&uvmexp.fltpgrele,
atomic_load_relaxed(&uvmexp.fltpgrele) + 1);
cpu_count(CPU_COUNT_FLTPGRELE, 1);
uvm_pagefree(pg);
}
mutex_exit(uobj->vmobjlock);
@ -2047,8 +2028,7 @@ uvm_fault_lower_direct(
* set "pg" to the page we want to map in (uobjpage, usually)
*/
atomic_store_relaxed(&uvmexp.flt_obj,
atomic_load_relaxed(&uvmexp.flt_obj) + 1);
cpu_count(CPU_COUNT_FLT_OBJ, 1);
if (UVM_ET_ISCOPYONWRITE(ufi->entry) ||
UVM_OBJ_NEEDS_WRITEFAULT(uobjpage->uobject))
flt->enter_prot &= ~VM_PROT_WRITE;
@ -2111,8 +2091,7 @@ uvm_fault_lower_direct_loan(
UVMHIST_LOG(maphist,
" out of RAM breaking loan, waiting",
0,0,0,0);
atomic_store_relaxed(&uvmexp.fltnoram,
atomic_load_relaxed(&uvmexp.fltnoram) + 1);
cpu_count(CPU_COUNT_FLTNORAM, 1);
uvm_wait("flt_noram4");
return ERESTART;
}
@ -2166,7 +2145,7 @@ uvm_fault_lower_promote(
KASSERT(uobj == NULL || (uobjpage->flags & PG_BUSY) != 0);
if (uobjpage != PGO_DONTCARE) {
uvmexp.flt_prcopy++;
cpu_count(CPU_COUNT_FLT_PRCOPY, 1);
/*
* promote to shared amap? make sure all sharing
@ -2197,7 +2176,7 @@ uvm_fault_lower_promote(
(uintptr_t)uobjpage, (uintptr_t)anon, (uintptr_t)pg, 0);
} else {
uvmexp.flt_przero++;
cpu_count(CPU_COUNT_FLT_PRZERO, 1);
/*
* Page is zero'd and marked dirty by

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_fault_i.h,v 1.31 2018/05/08 19:33:57 christos Exp $ */
/* $NetBSD: uvm_fault_i.h,v 1.32 2019/12/16 22:47:55 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -183,7 +183,7 @@ uvmfault_relock(struct uvm_faultinfo *ufi)
return true;
}
uvmexp.fltrelck++;
cpu_count(CPU_COUNT_FLTRELCK, 1);
/*
* relock map. fail if version mismatch (in which case nothing
@ -196,7 +196,7 @@ uvmfault_relock(struct uvm_faultinfo *ufi)
return(false);
}
uvmexp.fltrelckok++;
cpu_count(CPU_COUNT_FLTRELCKOK, 1);
return(true);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_glue.c,v 1.170 2019/11/21 17:47:53 ad Exp $ */
/* $NetBSD: uvm_glue.c,v 1.171 2019/12/16 22:47:55 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -62,7 +62,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.170 2019/11/21 17:47:53 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.171 2019/12/16 22:47:55 ad Exp $");
#include "opt_kgdb.h"
#include "opt_kstack.h"
@ -501,6 +501,8 @@ uvm_scheduler(void)
lwp_unlock(l);
for (;;) {
/* Update legacy stats for post-mortem debugging. */
uvm_update_uvmexp();
sched_pstats();
(void)kpause("uvm", false, hz, NULL);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_meter.c,v 1.69 2019/01/07 22:48:01 jdolecek Exp $ */
/* $NetBSD: uvm_meter.c,v 1.70 2019/12/16 22:47:55 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.69 2019/01/07 22:48:01 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.70 2019/12/16 22:47:55 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -81,6 +81,8 @@ sysctl_vm_uvmexp(SYSCTLFN_ARGS)
{
struct sysctlnode node;
uvm_update_uvmexp();
node = *rnode;
if (oldlenp)
node.sysctl_size = uimin(*oldlenp, node.sysctl_size);
@ -94,9 +96,8 @@ sysctl_vm_uvmexp2(SYSCTLFN_ARGS)
struct sysctlnode node;
struct uvmexp_sysctl u;
int active, inactive;
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
cpu_count_sync_all();
uvm_estimatepageable(&active, &inactive);
memset(&u, 0, sizeof(u));
@ -111,7 +112,7 @@ sysctl_vm_uvmexp2(SYSCTLFN_ARGS)
u.inactive = inactive;
u.paging = uvmexp.paging;
u.wired = uvmexp.wired;
u.zeropages = uvmexp.zeropages;
u.zeropages = cpu_count_get(CPU_COUNT_ZEROPAGES);
u.reserve_pagedaemon = uvmexp.reserve_pagedaemon;
u.reserve_kernel = uvmexp.reserve_kernel;
u.freemin = uvmexp.freemin;
@ -123,43 +124,41 @@ sysctl_vm_uvmexp2(SYSCTLFN_ARGS)
u.swpginuse = uvmexp.swpginuse;
u.swpgonly = uvmexp.swpgonly;
u.nswget = uvmexp.nswget;
u.cpuhit = uvmexp.cpuhit;
u.cpumiss = uvmexp.cpumiss;
for (CPU_INFO_FOREACH(cii, ci)) {
u.faults += ci->ci_data.cpu_nfault;
u.traps += ci->ci_data.cpu_ntrap;
u.intrs += ci->ci_data.cpu_nintr;
u.swtch += ci->ci_data.cpu_nswtch;
u.softs += ci->ci_data.cpu_nsoft;
u.syscalls += ci->ci_data.cpu_nsyscall;
}
u.pageins = uvmexp.pageins;
u.pgswapin = uvmexp.pgswapin;
u.cpuhit = cpu_count_get(CPU_COUNT_CPUHIT);
u.cpumiss = cpu_count_get(CPU_COUNT_CPUMISS);
u.faults = cpu_count_get(CPU_COUNT_NFAULT);
u.traps = cpu_count_get(CPU_COUNT_NTRAP);
u.intrs = cpu_count_get(CPU_COUNT_NINTR);
u.swtch = cpu_count_get(CPU_COUNT_NSWTCH);
u.softs = cpu_count_get(CPU_COUNT_NSOFT);
u.syscalls = cpu_count_get(CPU_COUNT_NSYSCALL);
u.pageins = cpu_count_get(CPU_COUNT_PAGEINS);
u.pgswapin = 0; /* unused */
u.pgswapout = uvmexp.pgswapout;
u.forks = uvmexp.forks;
u.forks_ppwait = uvmexp.forks_ppwait;
u.forks_sharevm = uvmexp.forks_sharevm;
u.pga_zerohit = uvmexp.pga_zerohit;
u.pga_zeromiss = uvmexp.pga_zeromiss;
u.zeroaborts = uvmexp.zeroaborts;
u.fltnoram = uvmexp.fltnoram;
u.fltnoanon = uvmexp.fltnoanon;
u.fltpgwait = uvmexp.fltpgwait;
u.fltpgrele = uvmexp.fltpgrele;
u.fltrelck = uvmexp.fltrelck;
u.fltrelckok = uvmexp.fltrelckok;
u.fltanget = uvmexp.fltanget;
u.fltanretry = uvmexp.fltanretry;
u.fltamcopy = uvmexp.fltamcopy;
u.fltnamap = uvmexp.fltnamap;
u.fltnomap = uvmexp.fltnomap;
u.fltlget = uvmexp.fltlget;
u.fltget = uvmexp.fltget;
u.flt_anon = uvmexp.flt_anon;
u.flt_acow = uvmexp.flt_acow;
u.flt_obj = uvmexp.flt_obj;
u.flt_prcopy = uvmexp.flt_prcopy;
u.flt_przero = uvmexp.flt_przero;
u.forks = cpu_count_get(CPU_COUNT_FORKS);
u.forks_ppwait = cpu_count_get(CPU_COUNT_FORKS_PPWAIT);
u.forks_sharevm = cpu_count_get(CPU_COUNT_FORKS_SHAREVM);
u.pga_zerohit = cpu_count_get(CPU_COUNT_PGA_ZEROHIT);
u.pga_zeromiss = cpu_count_get(CPU_COUNT_PGA_ZEROMISS);
u.zeroaborts = cpu_count_get(CPU_COUNT_ZEROABORTS);
u.fltnoram = cpu_count_get(CPU_COUNT_FLTNORAM);
u.fltnoanon = cpu_count_get(CPU_COUNT_FLTNOANON);
u.fltpgwait = cpu_count_get(CPU_COUNT_FLTPGWAIT);
u.fltpgrele = cpu_count_get(CPU_COUNT_FLTPGRELE);
u.fltrelck = cpu_count_get(CPU_COUNT_FLTRELCK);
u.fltrelckok = cpu_count_get(CPU_COUNT_FLTRELCKOK);
u.fltanget = cpu_count_get(CPU_COUNT_FLTANGET);
u.fltanretry = cpu_count_get(CPU_COUNT_FLTANRETRY);
u.fltamcopy = cpu_count_get(CPU_COUNT_FLTAMCOPY);
u.fltnamap = cpu_count_get(CPU_COUNT_FLTNAMAP);
u.fltnomap = cpu_count_get(CPU_COUNT_FLTNOMAP);
u.fltlget = cpu_count_get(CPU_COUNT_FLTLGET);
u.fltget = cpu_count_get(CPU_COUNT_FLTGET);
u.flt_anon = cpu_count_get(CPU_COUNT_FLT_ANON);
u.flt_acow = cpu_count_get(CPU_COUNT_FLT_ACOW);
u.flt_obj = cpu_count_get(CPU_COUNT_FLT_OBJ);
u.flt_prcopy = cpu_count_get(CPU_COUNT_FLT_PRCOPY);
u.flt_przero = cpu_count_get(CPU_COUNT_FLT_PRZERO);
u.pdwoke = uvmexp.pdwoke;
u.pdrevs = uvmexp.pdrevs;
u.pdfreed = uvmexp.pdfreed;
@ -171,14 +170,16 @@ sysctl_vm_uvmexp2(SYSCTLFN_ARGS)
u.pdpageouts = uvmexp.pdpageouts;
u.pdpending = uvmexp.pdpending;
u.pddeact = uvmexp.pddeact;
u.anonpages = uvmexp.anonpages;
u.filepages = uvmexp.filepages;
u.execpages = uvmexp.execpages;
u.colorhit = uvmexp.colorhit;
u.colormiss = uvmexp.colormiss;
u.anonpages = cpu_count_get(CPU_COUNT_ANONPAGES);
u.filepages = cpu_count_get(CPU_COUNT_FILEPAGES);
u.execpages = cpu_count_get(CPU_COUNT_EXECPAGES);
u.colorhit = cpu_count_get(CPU_COUNT_COLORHIT);
u.colormiss = cpu_count_get(CPU_COUNT_COLORMISS);
u.ncolors = uvmexp.ncolors;
u.bootpages = uvmexp.bootpages;
u.poolpages = pool_totalpages();
u.countsyncone = cpu_count_get(CPU_COUNT_SYNC_ONE);
u.countsyncall = cpu_count_get(CPU_COUNT_SYNC_ALL);
node = *rnode;
node.sysctl_data = &u;
@ -436,3 +437,54 @@ uvm_pctparam_createsysctlnode(struct uvm_pctparam *pct, const char *name,
CTLTYPE_INT, name, SYSCTL_DESCR(desc),
uvm_sysctlpctparam, 0, (void *)pct, 0, CTL_VM, CTL_CREATE, CTL_EOL);
}
/*
* Update uvmexp with aggregate values from the per-CPU counters.
*/
void
uvm_update_uvmexp(void)
{
cpu_count_sync_all();
/* uvmexp.free = (int)cpu_count_get(CPU_COUNT_FREE); */
uvmexp.zeropages = (int)cpu_count_get(CPU_COUNT_ZEROPAGES);
uvmexp.cpuhit = (int)cpu_count_get(CPU_COUNT_CPUHIT);
uvmexp.cpumiss = (int)cpu_count_get(CPU_COUNT_CPUMISS);
uvmexp.faults = (int)cpu_count_get(CPU_COUNT_NFAULT);
uvmexp.traps = (int)cpu_count_get(CPU_COUNT_NTRAP);
uvmexp.intrs = (int)cpu_count_get(CPU_COUNT_NINTR);
uvmexp.swtch = (int)cpu_count_get(CPU_COUNT_NSWTCH);
uvmexp.softs = (int)cpu_count_get(CPU_COUNT_NSOFT);
uvmexp.syscalls = (int)cpu_count_get(CPU_COUNT_NSYSCALL);
uvmexp.pageins = (int)cpu_count_get(CPU_COUNT_PAGEINS);
uvmexp.forks = (int)cpu_count_get(CPU_COUNT_FORKS);
uvmexp.forks_ppwait = (int)cpu_count_get(CPU_COUNT_FORKS_PPWAIT);
uvmexp.forks_sharevm = (int)cpu_count_get(CPU_COUNT_FORKS_SHAREVM);
uvmexp.pga_zerohit = (int)cpu_count_get(CPU_COUNT_PGA_ZEROHIT);
uvmexp.pga_zeromiss = (int)cpu_count_get(CPU_COUNT_PGA_ZEROMISS);
uvmexp.fltnoram = (int)cpu_count_get(CPU_COUNT_FLTNORAM);
uvmexp.fltnoanon = (int)cpu_count_get(CPU_COUNT_FLTNOANON);
uvmexp.fltpgwait = (int)cpu_count_get(CPU_COUNT_FLTPGWAIT);
uvmexp.fltpgrele = (int)cpu_count_get(CPU_COUNT_FLTPGRELE);
uvmexp.fltrelck = (int)cpu_count_get(CPU_COUNT_FLTRELCK);
uvmexp.fltrelckok = (int)cpu_count_get(CPU_COUNT_FLTRELCKOK);
uvmexp.fltanget = (int)cpu_count_get(CPU_COUNT_FLTANGET);
uvmexp.fltanretry = (int)cpu_count_get(CPU_COUNT_FLTANRETRY);
uvmexp.fltamcopy = (int)cpu_count_get(CPU_COUNT_FLTAMCOPY);
uvmexp.fltnamap = (int)cpu_count_get(CPU_COUNT_FLTNAMAP);
uvmexp.fltnomap = (int)cpu_count_get(CPU_COUNT_FLTNOMAP);
uvmexp.fltlget = (int)cpu_count_get(CPU_COUNT_FLTLGET);
uvmexp.fltget = (int)cpu_count_get(CPU_COUNT_FLTGET);
uvmexp.flt_anon = (int)cpu_count_get(CPU_COUNT_FLT_ANON);
uvmexp.flt_acow = (int)cpu_count_get(CPU_COUNT_FLT_ACOW);
uvmexp.flt_obj = (int)cpu_count_get(CPU_COUNT_FLT_OBJ);
uvmexp.flt_prcopy = (int)cpu_count_get(CPU_COUNT_FLT_PRCOPY);
uvmexp.flt_przero = (int)cpu_count_get(CPU_COUNT_FLT_PRZERO);
uvmexp.anonpages = (int)cpu_count_get(CPU_COUNT_ANONPAGES);
uvmexp.filepages = (int)cpu_count_get(CPU_COUNT_FILEPAGES);
uvmexp.execpages = (int)cpu_count_get(CPU_COUNT_EXECPAGES);
uvmexp.colorhit = (int)cpu_count_get(CPU_COUNT_COLORHIT);
uvmexp.colormiss = (int)cpu_count_get(CPU_COUNT_COLORMISS);
uvmexp.zeroaborts = (int)cpu_count_get(CPU_COUNT_ZEROABORTS);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_page.c,v 1.204 2019/12/16 18:30:18 ad Exp $ */
/* $NetBSD: uvm_page.c,v 1.205 2019/12/16 22:47:55 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.204 2019/12/16 18:30:18 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.205 2019/12/16 22:47:55 ad Exp $");
#include "opt_ddb.h"
#include "opt_uvm.h"
@ -181,12 +181,12 @@ uvm_pageinsert_object(struct uvm_object *uobj, struct vm_page *pg)
vholdl(vp);
}
if (UVM_OBJ_IS_VTEXT(uobj)) {
atomic_inc_uint(&uvmexp.execpages);
cpu_count(CPU_COUNT_EXECPAGES, 1);
} else {
atomic_inc_uint(&uvmexp.filepages);
cpu_count(CPU_COUNT_FILEPAGES, 1);
}
} else if (UVM_OBJ_IS_AOBJ(uobj)) {
atomic_inc_uint(&uvmexp.anonpages);
cpu_count(CPU_COUNT_ANONPAGES, 1);
}
pg->flags |= PG_TABLED;
uobj->uo_npages++;
@ -242,12 +242,12 @@ uvm_pageremove_object(struct uvm_object *uobj, struct vm_page *pg)
holdrelel(vp);
}
if (UVM_OBJ_IS_VTEXT(uobj)) {
atomic_dec_uint(&uvmexp.execpages);
cpu_count(CPU_COUNT_EXECPAGES, -1);
} else {
atomic_dec_uint(&uvmexp.filepages);
cpu_count(CPU_COUNT_FILEPAGES, -1);
}
} else if (UVM_OBJ_IS_AOBJ(uobj)) {
atomic_dec_uint(&uvmexp.anonpages);
cpu_count(CPU_COUNT_ANONPAGES, -1);
}
/* object should be locked */
@ -805,7 +805,7 @@ uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int flist, int try1, int try2,
KASSERT(try1 == PGFL_UNKNOWN || (pg->flags & PG_ZERO));
KASSERT(ucpu == VM_FREE_PAGE_TO_CPU(pg));
VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--;
uvmexp.cpuhit++;
CPU_COUNT(CPU_COUNT_CPUHIT, 1);
goto gotit;
}
/* global, try1 */
@ -816,7 +816,7 @@ uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int flist, int try1, int try2,
KASSERT(try1 == PGFL_UNKNOWN || (pg->flags & PG_ZERO));
KASSERT(ucpu != VM_FREE_PAGE_TO_CPU(pg));
VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--;
uvmexp.cpumiss++;
CPU_COUNT(CPU_COUNT_CPUMISS, 1);
goto gotit;
}
/* cpu, try2 */
@ -827,7 +827,7 @@ uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int flist, int try1, int try2,
KASSERT(try2 == PGFL_UNKNOWN || (pg->flags & PG_ZERO));
KASSERT(ucpu == VM_FREE_PAGE_TO_CPU(pg));
VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--;
uvmexp.cpuhit++;
CPU_COUNT(CPU_COUNT_CPUHIT, 1);
goto gotit;
}
/* global, try2 */
@ -838,7 +838,7 @@ uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int flist, int try1, int try2,
KASSERT(try2 == PGFL_UNKNOWN || (pg->flags & PG_ZERO));
KASSERT(ucpu != VM_FREE_PAGE_TO_CPU(pg));
VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--;
uvmexp.cpumiss++;
CPU_COUNT(CPU_COUNT_CPUMISS, 1);
goto gotit;
}
color = (color + 1) & uvmexp.colormask;
@ -853,12 +853,12 @@ uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int flist, int try1, int try2,
/* update zero'd page count */
if (pg->flags & PG_ZERO)
uvmexp.zeropages--;
CPU_COUNT(CPU_COUNT_ZEROPAGES, -1);
if (color == trycolor)
uvmexp.colorhit++;
CPU_COUNT(CPU_COUNT_COLORHIT, 1);
else {
uvmexp.colormiss++;
CPU_COUNT(CPU_COUNT_COLORMISS, 1);
*trycolorp = color;
}
@ -1009,10 +1009,10 @@ uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
if (flags & UVM_PGA_ZERO) {
if (pg->flags & PG_ZERO) {
uvmexp.pga_zerohit++;
CPU_COUNT(CPU_COUNT_PGA_ZEROHIT, 1);
zeroit = 0;
} else {
uvmexp.pga_zeromiss++;
CPU_COUNT(CPU_COUNT_PGA_ZEROMISS, 1);
zeroit = 1;
}
if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) {
@ -1041,7 +1041,7 @@ uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
if (anon) {
anon->an_page = pg;
pg->flags |= PG_ANON;
atomic_inc_uint(&uvmexp.anonpages);
cpu_count(CPU_COUNT_ANONPAGES, 1);
} else if (obj) {
error = uvm_pageinsert(obj, pg);
if (error != 0) {
@ -1224,7 +1224,7 @@ uvm_pagefree(struct vm_page *pg)
pg->loan_count--;
} else {
pg->flags &= ~PG_ANON;
atomic_dec_uint(&uvmexp.anonpages);
cpu_count(CPU_COUNT_ANONPAGES, -1);
}
pg->uanon->an_page = NULL;
pg->uanon = NULL;
@ -1260,7 +1260,7 @@ uvm_pagefree(struct vm_page *pg)
} else if (pg->uanon != NULL) {
pg->uanon->an_page = NULL;
pg->uanon = NULL;
atomic_dec_uint(&uvmexp.anonpages);
cpu_count(CPU_COUNT_ANONPAGES, -1);
}
/*
@ -1308,7 +1308,7 @@ uvm_pagefree(struct vm_page *pg)
LIST_INSERT_HEAD(pgfl, pg, pageq.list);
uvmexp.free++;
if (iszero) {
uvmexp.zeropages++;
CPU_COUNT(CPU_COUNT_ZEROPAGES, 1);
}
/* per-cpu list */
@ -1488,7 +1488,7 @@ uvm_pageidlezero(void)
PGFL_UNKNOWN], pg, listq.list);
ucpu->pages[PGFL_UNKNOWN]++;
uvmexp.free++;
uvmexp.zeroaborts++;
CPU_COUNT(CPU_COUNT_ZEROABORTS, 1);
goto quit;
}
#else
@ -1509,7 +1509,7 @@ uvm_pageidlezero(void)
pg, listq.list);
ucpu->pages[PGFL_ZEROS]++;
uvmexp.free++;
uvmexp.zeropages++;
CPU_COUNT(CPU_COUNT_ZEROPAGES, 1);
}
}
if (ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pdpolicy_clock.c,v 1.19 2019/12/16 19:18:26 ad Exp $ */
/* $NetBSD: uvm_pdpolicy_clock.c,v 1.20 2019/12/16 22:47:55 ad Exp $ */
/* NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $ */
/*
@ -69,7 +69,7 @@
#else /* defined(PDSIM) */
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.19 2019/12/16 19:18:26 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.20 2019/12/16 22:47:55 ad Exp $");
#include <sys/param.h>
#include <sys/proc.h>
@ -147,20 +147,27 @@ uvmpdpol_scaninit(void)
bool anonunder, fileunder, execunder;
bool anonover, fileover, execover;
bool anonreact, filereact, execreact;
int64_t freepg, anonpg, filepg, execpg;
/*
* decide which types of pages we want to reactivate instead of freeing
* to keep usage within the minimum and maximum usage limits.
*/
cpu_count_sync_all();
freepg = uvmexp.free;
anonpg = cpu_count_get(CPU_COUNT_ANONPAGES);
filepg = cpu_count_get(CPU_COUNT_FILEPAGES);
execpg = cpu_count_get(CPU_COUNT_EXECPAGES);
mutex_enter(&s->lock);
t = s->s_active + s->s_inactive + uvmexp.free;
anonunder = uvmexp.anonpages <= UVM_PCTPARAM_APPLY(&s->s_anonmin, t);
fileunder = uvmexp.filepages <= UVM_PCTPARAM_APPLY(&s->s_filemin, t);
execunder = uvmexp.execpages <= UVM_PCTPARAM_APPLY(&s->s_execmin, t);
anonover = uvmexp.anonpages > UVM_PCTPARAM_APPLY(&s->s_anonmax, t);
fileover = uvmexp.filepages > UVM_PCTPARAM_APPLY(&s->s_filemax, t);
execover = uvmexp.execpages > UVM_PCTPARAM_APPLY(&s->s_execmax, t);
t = s->s_active + s->s_inactive + freepg;
anonunder = anonpg <= UVM_PCTPARAM_APPLY(&s->s_anonmin, t);
fileunder = filepg <= UVM_PCTPARAM_APPLY(&s->s_filemin, t);
execunder = execpg <= UVM_PCTPARAM_APPLY(&s->s_execmin, t);
anonover = anonpg > UVM_PCTPARAM_APPLY(&s->s_anonmax, t);
fileover = filepg > UVM_PCTPARAM_APPLY(&s->s_filemax, t);
execover = execpg > UVM_PCTPARAM_APPLY(&s->s_execmax, t);
anonreact = anonunder || (!anonover && (fileover || execover));
filereact = fileunder || (!fileover && (anonover || execover));
execreact = execunder || (!execover && (anonover || fileover));

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_pglist.c,v 1.73 2019/12/13 20:10:22 ad Exp $ */
/* $NetBSD: uvm_pglist.c,v 1.74 2019/12/16 22:47:55 ad Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.73 2019/12/13 20:10:22 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.74 2019/12/16 22:47:55 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -105,7 +105,7 @@ uvm_pglist_add(struct vm_page *pg, struct pglist *rlist)
LIST_REMOVE(pg, listq.list); /* cpu */
uvmexp.free--;
if (pg->flags & PG_ZERO)
uvmexp.zeropages--;
CPU_COUNT(CPU_COUNT_ZEROPAGES, -1);
VM_FREE_PAGE_TO_CPU(pg)->pages[pgflidx]--;
pg->flags = PG_CLEAN;
pg->uobject = NULL;
@ -592,7 +592,7 @@ uvm_pglistfree(struct pglist *list)
pgfl_queues[queue], pg, listq.list);
uvmexp.free++;
if (iszero)
uvmexp.zeropages++;
CPU_COUNT(CPU_COUNT_ZEROPAGES, 1);
ucpu->pages[queue]++;
STAT_DECR(uvm_pglistalloc_npages);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_stat.c,v 1.40 2019/05/09 08:16:15 skrll Exp $ */
/* $NetBSD: uvm_stat.c,v 1.41 2019/12/16 22:47:55 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: uvm_stat.c,v 1.40 2019/05/09 08:16:15 skrll Exp $");
__KERNEL_RCSID(0, "$NetBSD: uvm_stat.c,v 1.41 2019/12/16 22:47:55 ad Exp $");
#include "opt_readahead.h"
#include "opt_ddb.h"
@ -57,50 +57,66 @@ uvmexp_print(void (*pr)(const char *, ...)
{
int active, inactive;
int poolpages;
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
uvm_estimatepageable(&active, &inactive);
poolpages = pool_totalpages_locked();
cpu_count_sync_all();
(*pr)("Current UVM status:\n");
(*pr)(" pagesize=%d (0x%x), pagemask=0x%x, pageshift=%d, ncolors=%d\n",
uvmexp.pagesize, uvmexp.pagesize, uvmexp.pagemask,
uvmexp.pageshift, uvmexp.ncolors);
(*pr)(" %d VM pages: %d active, %d inactive, %d wired, %d free\n",
uvmexp.npages, active, inactive, uvmexp.wired,
uvmexp.free);
(*pr)(" pages %d anon, %d file, %d exec\n",
uvmexp.anonpages, uvmexp.filepages, uvmexp.execpages);
uvmexp.npages, active, inactive, uvmexp.wired, uvmexp.free);
(*pr)(" pages %" PRId64 " anon, %" PRId64 " file, %" PRId64 " exec\n",
cpu_count_get(CPU_COUNT_ANONPAGES),
cpu_count_get(CPU_COUNT_FILEPAGES),
cpu_count_get(CPU_COUNT_EXECPAGES));
(*pr)(" freemin=%d, free-target=%d, wired-max=%d\n",
uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax);
(*pr)(" resv-pg=%d, resv-kernel=%d, zeropages=%d\n",
uvmexp.reserve_pagedaemon, uvmexp.reserve_kernel, uvmexp.zeropages);
(*pr)(" resv-pg=%d, resv-kernel=%d, zeropages=%" PRId64 "\n",
uvmexp.reserve_pagedaemon, uvmexp.reserve_kernel,
cpu_count_get(CPU_COUNT_ZEROPAGES));
(*pr)(" bootpages=%d, poolpages=%d\n",
uvmexp.bootpages, poolpages);
for (CPU_INFO_FOREACH(cii, ci)) {
(*pr)(" cpu%u:\n", cpu_index(ci));
(*pr)(" faults=%" PRIu64 ", traps=%" PRIu64 ", "
"intrs=%" PRIu64 ", ctxswitch=%" PRIu64 "\n",
ci->ci_data.cpu_nfault, ci->ci_data.cpu_ntrap,
ci->ci_data.cpu_nintr, ci->ci_data.cpu_nswtch);
(*pr)(" softint=%" PRIu64 ", syscalls=%" PRIu64 "\n",
ci->ci_data.cpu_nsoft, ci->ci_data.cpu_nsyscall);
}
(*pr)(" faults=%" PRId64 ", traps=%" PRId64 ", "
"intrs=%" PRId64 ", ctxswitch=%" PRId64 "\n",
cpu_count_get(CPU_COUNT_NFAULT),
cpu_count_get(CPU_COUNT_NTRAP),
cpu_count_get(CPU_COUNT_NINTR),
cpu_count_get(CPU_COUNT_NSWTCH));
(*pr)(" softint=%" PRId64 ", syscalls=%" PRId64 "\n",
cpu_count_get(CPU_COUNT_NSOFT),
cpu_count_get(CPU_COUNT_NSYSCALL));
(*pr)(" fault counts:\n");
(*pr)(" noram=%d, noanon=%d, pgwait=%d, pgrele=%d\n",
uvmexp.fltnoram, uvmexp.fltnoanon, uvmexp.fltpgwait,
uvmexp.fltpgrele);
(*pr)(" ok relocks(total)=%d(%d), anget(retrys)=%d(%d), "
"amapcopy=%d\n", uvmexp.fltrelckok, uvmexp.fltrelck,
uvmexp.fltanget, uvmexp.fltanretry, uvmexp.fltamcopy);
(*pr)(" neighbor anon/obj pg=%d/%d, gets(lock/unlock)=%d/%d\n",
uvmexp.fltnamap, uvmexp.fltnomap, uvmexp.fltlget, uvmexp.fltget);
(*pr)(" cases: anon=%d, anoncow=%d, obj=%d, prcopy=%d, przero=%d\n",
uvmexp.flt_anon, uvmexp.flt_acow, uvmexp.flt_obj, uvmexp.flt_prcopy,
uvmexp.flt_przero);
(*pr)(" noram=%" PRId64 ", noanon=%" PRId64 ", pgwait=%" PRId64
", pgrele=%" PRId64 "\n",
cpu_count_get(CPU_COUNT_FLTNORAM),
cpu_count_get(CPU_COUNT_FLTNOANON),
cpu_count_get(CPU_COUNT_FLTPGWAIT),
cpu_count_get(CPU_COUNT_FLTPGRELE));
(*pr)(" ok relocks(total)=%" PRId64 "(%" PRId64 "), "
"anget(retrys)=%" PRId64 "(%" PRId64 "), amapcopy=%" PRId64 "\n",
cpu_count_get(CPU_COUNT_FLTRELCKOK),
cpu_count_get(CPU_COUNT_FLTRELCK),
cpu_count_get(CPU_COUNT_FLTANGET),
cpu_count_get(CPU_COUNT_FLTANRETRY),
cpu_count_get(CPU_COUNT_FLTAMCOPY));
(*pr)(" neighbor anon/obj pg=%" PRId64 "/%" PRId64
", gets(lock/unlock)=%" PRId64 "/%" PRId64 "\n",
cpu_count_get(CPU_COUNT_FLTNAMAP),
cpu_count_get(CPU_COUNT_FLTNOMAP),
cpu_count_get(CPU_COUNT_FLTLGET),
cpu_count_get(CPU_COUNT_FLTGET));
(*pr)(" cases: anon=%" PRId64 ", anoncow=%" PRId64 ", obj=%" PRId64
", prcopy=%" PRId64 ", przero=%" PRId64 "\n",
cpu_count_get(CPU_COUNT_FLT_ANON),
cpu_count_get(CPU_COUNT_FLT_ACOW),
cpu_count_get(CPU_COUNT_FLT_OBJ),
cpu_count_get(CPU_COUNT_FLT_PRCOPY),
cpu_count_get(CPU_COUNT_FLT_PRZERO));
(*pr)(" daemon and swap counts:\n");
(*pr)(" woke=%d, revs=%d, scans=%d, obscans=%d, anscans=%d\n",