2020-05-09 01:10:08 +03:00
|
|
|
/* $NetBSD: kern_clock.c,v 1.141 2020/05/08 22:10:09 ad Exp $ */
|
2000-03-23 09:30:07 +03:00
|
|
|
|
|
|
|
/*-
|
2008-03-11 05:24:43 +03:00
|
|
|
* Copyright (c) 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
|
2000-03-23 09:30:07 +03:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
|
|
|
|
* NASA Ames Research Center.
|
2005-03-02 14:05:34 +03:00
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Charles M. Hannum.
|
2000-03-23 09:30:07 +03:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
1994-06-29 10:29:24 +04:00
|
|
|
|
|
|
|
/*-
|
|
|
|
* Copyright (c) 1982, 1986, 1991, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
* (c) UNIX System Laboratories, Inc.
|
|
|
|
* All or some portions of this file are derived from material licensed
|
|
|
|
* to the University of California by American Telephone and Telegraph
|
|
|
|
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
|
|
|
|
* the permission of UNIX System Laboratories, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2003-08-07 20:26:28 +04:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1994-06-29 10:29:24 +04:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
|
|
|
|
*/
|
|
|
|
|
2001-11-12 18:25:01 +03:00
|
|
|
#include <sys/cdefs.h>
|
2020-05-09 01:10:08 +03:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.141 2020/05/08 22:10:09 ad Exp $");
|
2001-11-12 18:25:01 +03:00
|
|
|
|
2015-04-22 19:43:11 +03:00
|
|
|
#ifdef _KERNEL_OPT
|
2012-12-02 05:05:16 +04:00
|
|
|
#include "opt_dtrace.h"
|
2018-02-04 20:31:51 +03:00
|
|
|
#include "opt_gprof.h"
|
2015-04-22 19:43:11 +03:00
|
|
|
#endif
|
1998-04-22 11:08:11 +04:00
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/callout.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/resourcevar.h>
|
1996-02-04 05:15:01 +03:00
|
|
|
#include <sys/signalvar.h>
|
1996-02-09 21:59:18 +03:00
|
|
|
#include <sys/sysctl.h>
|
Add NTP kernel precision timekeeping from Dave Mill's xntp distribution
and the "kernel.tar.Z" distribution on louie.udel.edu, which is older than
xntp 3.4y or 3.5a, but contains newer kernel source fragments.
This commit adds support for a new kernel configuration option, NTP.
If NTP is selected, then the system clock should be run at "HZ", which
must be defined at compile time to be one value from:
60, 64, 100, 128, 256, 512, 1024.
Powers of 2 are ideal; 60 and 100 are supported but are marginally less
accurate.
If NTP is not configured, there should be no change in behavior relative
to pre-NTP kernels.
These changes have been tested extensively with xntpd 3.4y on a decstation;
almost identical kernel mods work on an i386. No pulse-per-second (PPS)
line discipline support is included, due to unavailability of hardware
to test it.
With this in-kernel PLL support for NetBSD, both xntp 3.4y and xntp
3.5a user-level code need minor changes. xntp's prototype for
syscall() is correct for FreeBSD, but not for NetBSD.
1996-02-27 07:20:30 +03:00
|
|
|
#include <sys/timex.h>
|
Scheduler bug fixes and reorganization
* fix the ancient nice(1) bug, where nice +20 processes incorrectly
steal 10 - 20% of the CPU, (or even more depending on load average)
* provide a new schedclk() mechanism at a new clock at schedhz, so high
platform hz values don't cause nice +0 processes to look like they are
niced
* change the algorithm slightly, and reorganize the code a lot
* fix percent-CPU calculation bugs, and eliminate some no-op code
=== nice bug === Correctly divide the scheduler queues between niced and
compute-bound processes. The current nice weight of two (sort of, see
`algorithm change' below) neatly divides the USRPRI queues in half; this
should have been used to clip p_estcpu, instead of UCHAR_MAX. Besides
being the wrong amount, clipping an unsigned char to UCHAR_MAX is a no-op,
and it was done after decay_cpu() which can only _reduce_ the value. It
has to be kept <= NICE_WEIGHT * PRIO_MAX - PPQ or processes can
scheduler-penalize themselves onto the same queue as nice +20 processes.
(Or even a higher one.)
=== New schedclk() mechansism === Some platforms should be cutting down
stathz before hitting the scheduler, since the scheduler algorithm only
works right in the vicinity of 64 Hz. Rather than prescale hz, then scale
back and forth by 4 every time p_estcpu is touched (each occurance an
abstraction violation), use p_estcpu without scaling and require schedhz
to be generated directly at the right frequency. Use a default stathz (well,
actually, profhz) / 4, so nothing changes unless a platform defines schedhz
and a new clock. Define these for alpha, where hz==1024, and nice was
totally broke.
=== Algorithm change === The nice value used to be added to the
exponentially-decayed scheduler history value p_estcpu, in _addition_ to
be incorporated directly (with greater wieght) into the priority calculation.
At first glance, it appears to be a pointless increase of 1/8 the nice
effect (pri = p_estcpu/4 + nice*2), but it's actually at least 3x that
because it will ramp up linearly but be decayed only exponentially, thus
converging to an additional .75 nice for a loadaverage of one. I killed
this, it makes the behavior hard to control, almost impossible to analyze,
and the effect (~~nothing at for the first second, then somewhat increased
niceness after three seconds or more, depending on load average) pointless.
=== Other bugs === hz -> profhz in the p_pctcpu = f(p_cpticks) calcuation.
Collect scheduler functionality. Try to put each abstraction in just one
place.
1999-02-23 05:56:03 +03:00
|
|
|
#include <sys/sched.h>
|
2003-01-18 13:06:22 +03:00
|
|
|
#include <sys/time.h>
|
2006-06-08 02:33:33 +04:00
|
|
|
#include <sys/timetc.h>
|
2007-07-10 00:51:58 +04:00
|
|
|
#include <sys/cpu.h>
|
2008-03-11 05:24:43 +03:00
|
|
|
#include <sys/atomic.h>
|
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
#ifdef GPROF
|
|
|
|
#include <sys/gmon.h>
|
|
|
|
#endif
|
|
|
|
|
2012-12-02 05:05:16 +04:00
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
#include <sys/dtrace_bsd.h>
|
|
|
|
#include <sys/cpu.h>
|
|
|
|
|
|
|
|
cyclic_clock_func_t cyclic_clock_func[MAXCPUS];
|
|
|
|
#endif
|
|
|
|
|
2015-04-22 19:42:24 +03:00
|
|
|
static int sysctl_kern_clockrate(SYSCTLFN_PROTO);
|
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/*
|
|
|
|
* Clock handling routines.
|
|
|
|
*
|
|
|
|
* This code is written to operate with two timers that run independently of
|
|
|
|
* each other. The main clock, running hz times per second, is used to keep
|
|
|
|
* track of real time. The second timer handles kernel and user profiling,
|
|
|
|
* and does resource use estimation. If the second timer is programmable,
|
|
|
|
* it is randomized to avoid aliasing between the two clocks. For example,
|
2004-02-13 14:36:08 +03:00
|
|
|
* the randomization prevents an adversary from always giving up the CPU
|
1994-06-29 10:29:24 +04:00
|
|
|
* just before its quantum expires. Otherwise, it would never accumulate
|
2004-02-13 14:36:08 +03:00
|
|
|
* CPU ticks. The mean frequency of the second timer is stathz.
|
1994-06-29 10:29:24 +04:00
|
|
|
*
|
|
|
|
* If no second timer exists, stathz will be zero; in this case we drive
|
|
|
|
* profiling and statistics off the main clock. This WILL NOT be accurate;
|
|
|
|
* do not do it unless absolutely necessary.
|
|
|
|
*
|
|
|
|
* The statistics clock may (or may not) be run at a higher rate while
|
|
|
|
* profiling. This profile clock runs at profhz. We require that profhz
|
|
|
|
* be an integral multiple of stathz.
|
|
|
|
*
|
|
|
|
* If the statistics clock is running fast, it must be divided by the ratio
|
|
|
|
* profhz/stathz for statistics. (For profiling, every tick counts.)
|
|
|
|
*/
|
|
|
|
|
|
|
|
int stathz;
|
|
|
|
int profhz;
|
2002-08-07 09:14:47 +04:00
|
|
|
int profsrc;
|
2001-05-06 17:46:34 +04:00
|
|
|
int schedhz;
|
1994-06-29 10:29:24 +04:00
|
|
|
int profprocs;
|
2006-06-08 21:23:11 +04:00
|
|
|
int hardclock_ticks;
|
2007-11-06 03:42:39 +03:00
|
|
|
static int hardscheddiv; /* hard => sched divider (used if schedhz == 0) */
|
2000-08-26 07:34:36 +04:00
|
|
|
static int psdiv; /* prof => stat divider */
|
1995-03-03 04:24:03 +03:00
|
|
|
int psratio; /* ratio: prof / stat */
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2006-06-08 02:33:33 +04:00
|
|
|
static u_int get_intr_timecount(struct timecounter *);
|
|
|
|
|
|
|
|
static struct timecounter intr_timecounter = {
|
2018-09-04 00:29:30 +03:00
|
|
|
.tc_get_timecount = get_intr_timecount,
|
|
|
|
.tc_poll_pps = NULL,
|
|
|
|
.tc_counter_mask = ~0u,
|
|
|
|
.tc_frequency = 0,
|
|
|
|
.tc_name = "clockinterrupt",
|
|
|
|
/* quality - minimum implementation level for a clock */
|
|
|
|
.tc_quality = 0,
|
|
|
|
.tc_priv = NULL,
|
2006-06-08 02:33:33 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
static u_int
|
2006-11-01 13:17:58 +03:00
|
|
|
get_intr_timecount(struct timecounter *tc)
|
2006-06-08 02:33:33 +04:00
|
|
|
{
|
2006-11-01 13:17:58 +03:00
|
|
|
|
2020-04-02 19:29:30 +03:00
|
|
|
return (u_int)getticks();
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
getticks(void)
|
|
|
|
{
|
|
|
|
return atomic_load_relaxed(&hardclock_ticks);
|
2006-06-08 02:33:33 +04:00
|
|
|
}
|
2001-01-15 23:19:50 +03:00
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/*
|
|
|
|
* Initialize clock frequencies and start both clocks running.
|
|
|
|
*/
|
|
|
|
void
|
2000-08-01 08:57:28 +04:00
|
|
|
initclocks(void)
|
1994-06-29 10:29:24 +04:00
|
|
|
{
|
2015-04-22 19:42:24 +03:00
|
|
|
static struct sysctllog *clog;
|
2000-03-30 13:27:11 +04:00
|
|
|
int i;
|
1994-06-29 10:29:24 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set divisors to 1 (normal case) and let the machine-specific
|
|
|
|
* code do its bit.
|
|
|
|
*/
|
2000-08-26 07:34:36 +04:00
|
|
|
psdiv = 1;
|
2006-06-08 02:33:33 +04:00
|
|
|
/*
|
|
|
|
* provide minimum default time counter
|
|
|
|
* will only run at interrupt resolution
|
|
|
|
*/
|
|
|
|
intr_timecounter.tc_frequency = hz;
|
|
|
|
tc_init(&intr_timecounter);
|
1994-06-29 10:29:24 +04:00
|
|
|
cpu_initclocks();
|
|
|
|
|
|
|
|
/*
|
2007-05-17 18:51:11 +04:00
|
|
|
* Compute profhz and stathz, fix profhz if needed.
|
1994-06-29 10:29:24 +04:00
|
|
|
*/
|
|
|
|
i = stathz ? stathz : hz;
|
|
|
|
if (profhz == 0)
|
|
|
|
profhz = i;
|
|
|
|
psratio = profhz / i;
|
2004-07-01 16:36:57 +04:00
|
|
|
if (schedhz == 0) {
|
|
|
|
/* 16Hz is best */
|
2007-11-06 03:42:39 +03:00
|
|
|
hardscheddiv = hz / 16;
|
|
|
|
if (hardscheddiv <= 0)
|
|
|
|
panic("hardscheddiv");
|
2004-07-01 16:36:57 +04:00
|
|
|
}
|
1996-03-15 10:56:00 +03:00
|
|
|
|
2015-04-22 19:42:24 +03:00
|
|
|
sysctl_createv(&clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
|
|
|
CTLTYPE_STRUCT, "clockrate",
|
|
|
|
SYSCTL_DESCR("Kernel clock rates"),
|
|
|
|
sysctl_kern_clockrate, 0, NULL,
|
|
|
|
sizeof(struct clockinfo),
|
|
|
|
CTL_KERN, KERN_CLOCKRATE, CTL_EOL);
|
|
|
|
sysctl_createv(&clog, 0, NULL, NULL,
|
|
|
|
CTLFLAG_PERMANENT,
|
|
|
|
CTLTYPE_INT, "hardclock_ticks",
|
|
|
|
SYSCTL_DESCR("Number of hardclock ticks"),
|
|
|
|
NULL, 0, &hardclock_ticks, sizeof(hardclock_ticks),
|
|
|
|
CTL_KERN, KERN_HARDCLOCK_TICKS, CTL_EOL);
|
1994-06-29 10:29:24 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The real-time timer, interrupting hz times per second.
|
|
|
|
*/
|
|
|
|
void
|
2000-08-01 08:57:28 +04:00
|
|
|
hardclock(struct clockframe *frame)
|
1994-06-29 10:29:24 +04:00
|
|
|
{
|
2003-01-18 13:06:22 +03:00
|
|
|
struct lwp *l;
|
2008-04-21 04:13:46 +04:00
|
|
|
struct cpu_info *ci;
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
ci = curcpu();
|
2019-12-01 18:34:44 +03:00
|
|
|
l = ci->ci_onproc;
|
2008-04-21 04:13:46 +04:00
|
|
|
|
|
|
|
timer_tick(l, CLKF_USERMODE(frame));
|
1994-06-29 10:29:24 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If no separate statistics clock is available, run it from here.
|
|
|
|
*/
|
|
|
|
if (stathz == 0)
|
|
|
|
statclock(frame);
|
2007-11-06 03:42:39 +03:00
|
|
|
/*
|
|
|
|
* If no separate schedclock is provided, call it here
|
|
|
|
* at about 16 Hz.
|
|
|
|
*/
|
|
|
|
if (schedhz == 0) {
|
|
|
|
if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) {
|
|
|
|
schedclock(l);
|
|
|
|
ci->ci_schedstate.spc_schedticks = hardscheddiv;
|
|
|
|
}
|
|
|
|
}
|
2007-05-17 18:51:11 +04:00
|
|
|
if ((--ci->ci_schedstate.spc_ticks) <= 0)
|
|
|
|
sched_tick(ci);
|
2005-02-27 00:34:55 +03:00
|
|
|
|
2008-05-19 21:06:02 +04:00
|
|
|
if (CPU_IS_PRIMARY(ci)) {
|
2020-04-02 19:29:30 +03:00
|
|
|
atomic_store_relaxed(&hardclock_ticks,
|
|
|
|
atomic_load_relaxed(&hardclock_ticks) + 1);
|
2008-04-22 15:45:28 +04:00
|
|
|
tc_ticktock();
|
|
|
|
}
|
1994-06-29 10:29:24 +04:00
|
|
|
|
|
|
|
/*
|
2008-10-06 01:57:20 +04:00
|
|
|
* Update real-time timeout queue.
|
1994-06-29 10:29:24 +04:00
|
|
|
*/
|
2007-07-10 00:51:58 +04:00
|
|
|
callout_hardclock();
|
2000-03-23 09:30:07 +03:00
|
|
|
}
|
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/*
|
|
|
|
* Start profiling on a process.
|
|
|
|
*
|
|
|
|
* Kernel profiling passes proc0 which never exits and hence
|
|
|
|
* keeps the profile clock running constantly.
|
|
|
|
*/
|
|
|
|
void
|
2000-08-01 08:57:28 +04:00
|
|
|
startprofclock(struct proc *p)
|
1994-06-29 10:29:24 +04:00
|
|
|
{
|
|
|
|
|
2007-07-10 00:51:58 +04:00
|
|
|
KASSERT(mutex_owned(&p->p_stmutex));
|
2007-02-10 00:55:00 +03:00
|
|
|
|
|
|
|
if ((p->p_stflag & PST_PROFIL) == 0) {
|
|
|
|
p->p_stflag |= PST_PROFIL;
|
2002-08-07 09:14:47 +04:00
|
|
|
/*
|
|
|
|
* This is only necessary if using the clock as the
|
|
|
|
* profiling source.
|
|
|
|
*/
|
2000-08-26 07:34:36 +04:00
|
|
|
if (++profprocs == 1 && stathz != 0)
|
|
|
|
psdiv = psratio;
|
1994-06-29 10:29:24 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Stop profiling on a process.
|
|
|
|
*/
|
|
|
|
void
|
2000-08-01 08:57:28 +04:00
|
|
|
stopprofclock(struct proc *p)
|
1994-06-29 10:29:24 +04:00
|
|
|
{
|
|
|
|
|
2007-07-10 00:51:58 +04:00
|
|
|
KASSERT(mutex_owned(&p->p_stmutex));
|
2007-02-10 00:55:00 +03:00
|
|
|
|
|
|
|
if (p->p_stflag & PST_PROFIL) {
|
|
|
|
p->p_stflag &= ~PST_PROFIL;
|
2002-08-07 09:14:47 +04:00
|
|
|
/*
|
|
|
|
* This is only necessary if using the clock as the
|
|
|
|
* profiling source.
|
|
|
|
*/
|
2000-08-26 07:34:36 +04:00
|
|
|
if (--profprocs == 0 && stathz != 0)
|
|
|
|
psdiv = 1;
|
1994-06-29 10:29:24 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-05-17 18:51:11 +04:00
|
|
|
void
|
|
|
|
schedclock(struct lwp *l)
|
|
|
|
{
|
|
|
|
if ((l->l_flag & LW_IDLE) != 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sched_schedclock(l);
|
|
|
|
}
|
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/*
|
|
|
|
* Statistics clock. Grab profile sample, and if divider reaches 0,
|
|
|
|
* do process and kernel statistics.
|
|
|
|
*/
|
|
|
|
void
|
2000-08-01 08:57:28 +04:00
|
|
|
statclock(struct clockframe *frame)
|
1994-06-29 10:29:24 +04:00
|
|
|
{
|
|
|
|
#ifdef GPROF
|
2000-03-30 13:27:11 +04:00
|
|
|
struct gmonparam *g;
|
2000-08-22 20:44:51 +04:00
|
|
|
intptr_t i;
|
1994-06-29 10:29:24 +04:00
|
|
|
#endif
|
2000-06-04 00:42:42 +04:00
|
|
|
struct cpu_info *ci = curcpu();
|
|
|
|
struct schedstate_percpu *spc = &ci->ci_schedstate;
|
2000-03-30 13:27:11 +04:00
|
|
|
struct proc *p;
|
2006-04-15 06:12:49 +04:00
|
|
|
struct lwp *l;
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2000-08-26 07:34:36 +04:00
|
|
|
/*
|
|
|
|
* Notice changes in divisor frequency, and adjust clock
|
|
|
|
* frequency accordingly.
|
|
|
|
*/
|
|
|
|
if (spc->spc_psdiv != psdiv) {
|
|
|
|
spc->spc_psdiv = psdiv;
|
|
|
|
spc->spc_pscnt = psdiv;
|
|
|
|
if (psdiv == 1) {
|
|
|
|
setstatclockrate(stathz);
|
|
|
|
} else {
|
2005-02-27 00:34:55 +03:00
|
|
|
setstatclockrate(profhz);
|
2000-08-26 07:34:36 +04:00
|
|
|
}
|
|
|
|
}
|
2019-12-01 18:34:44 +03:00
|
|
|
l = ci->ci_onproc;
|
2007-05-17 18:51:11 +04:00
|
|
|
if ((l->l_flag & LW_IDLE) != 0) {
|
|
|
|
/*
|
|
|
|
* don't account idle lwps as swapper.
|
|
|
|
*/
|
|
|
|
p = NULL;
|
|
|
|
} else {
|
|
|
|
p = l->l_proc;
|
2007-02-10 00:55:00 +03:00
|
|
|
mutex_spin_enter(&p->p_stmutex);
|
2007-05-17 18:51:11 +04:00
|
|
|
}
|
2006-04-15 04:34:42 +04:00
|
|
|
|
2007-05-17 18:51:11 +04:00
|
|
|
if (CLKF_USERMODE(frame)) {
|
2017-02-12 21:43:56 +03:00
|
|
|
KASSERT(p != NULL);
|
2007-02-10 00:55:00 +03:00
|
|
|
if ((p->p_stflag & PST_PROFIL) && profsrc == PROFSRC_CLOCK)
|
|
|
|
addupc_intr(l, CLKF_PC(frame));
|
|
|
|
if (--spc->spc_pscnt > 0) {
|
|
|
|
mutex_spin_exit(&p->p_stmutex);
|
1994-06-29 10:29:24 +04:00
|
|
|
return;
|
2007-02-10 00:55:00 +03:00
|
|
|
}
|
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/*
|
|
|
|
* Came from user mode; CPU was in user state.
|
|
|
|
* If this process is being profiled record the tick.
|
|
|
|
*/
|
|
|
|
p->p_uticks++;
|
|
|
|
if (p->p_nice > NZERO)
|
2000-06-04 00:42:42 +04:00
|
|
|
spc->spc_cp_time[CP_NICE]++;
|
1994-06-29 10:29:24 +04:00
|
|
|
else
|
2000-06-04 00:42:42 +04:00
|
|
|
spc->spc_cp_time[CP_USER]++;
|
1994-06-29 10:29:24 +04:00
|
|
|
} else {
|
|
|
|
#ifdef GPROF
|
|
|
|
/*
|
|
|
|
* Kernel statistics are just like addupc_intr, only easier.
|
|
|
|
*/
|
|
|
|
g = &_gmonparam;
|
2002-08-07 09:14:47 +04:00
|
|
|
if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) {
|
1994-06-29 10:29:24 +04:00
|
|
|
i = CLKF_PC(frame) - g->lowpc;
|
|
|
|
if (i < g->textsize) {
|
|
|
|
i /= HISTFRACTION * sizeof(*g->kcount);
|
|
|
|
g->kcount[i]++;
|
|
|
|
}
|
|
|
|
}
|
2000-12-10 22:29:30 +03:00
|
|
|
#endif
|
2003-01-18 13:06:22 +03:00
|
|
|
#ifdef LWP_PC
|
2007-05-17 18:51:11 +04:00
|
|
|
if (p != NULL && profsrc == PROFSRC_CLOCK &&
|
|
|
|
(p->p_stflag & PST_PROFIL)) {
|
2007-02-10 00:55:00 +03:00
|
|
|
addupc_intr(l, LWP_PC(l));
|
2007-05-17 18:51:11 +04:00
|
|
|
}
|
1994-06-29 10:29:24 +04:00
|
|
|
#endif
|
2007-02-10 00:55:00 +03:00
|
|
|
if (--spc->spc_pscnt > 0) {
|
|
|
|
if (p != NULL)
|
|
|
|
mutex_spin_exit(&p->p_stmutex);
|
1994-06-29 10:29:24 +04:00
|
|
|
return;
|
2007-02-10 00:55:00 +03:00
|
|
|
}
|
1994-06-29 10:29:24 +04:00
|
|
|
/*
|
|
|
|
* Came from kernel mode, so we were:
|
|
|
|
* - handling an interrupt,
|
|
|
|
* - doing syscall or trap work on behalf of the current
|
|
|
|
* user process, or
|
|
|
|
* - spinning in the idle loop.
|
|
|
|
* Whichever it is, charge the time as appropriate.
|
|
|
|
* Note that we charge interrupts to the current process,
|
|
|
|
* regardless of whether they are ``for'' that process,
|
|
|
|
* so that we know how much of its real time was spent
|
|
|
|
* in ``non-process'' (i.e., interrupt) work.
|
|
|
|
*/
|
2007-11-06 03:42:39 +03:00
|
|
|
if (CLKF_INTR(frame) || (curlwp->l_pflag & LP_INTR) != 0) {
|
2007-05-17 18:51:11 +04:00
|
|
|
if (p != NULL) {
|
1994-06-29 10:29:24 +04:00
|
|
|
p->p_iticks++;
|
2007-05-17 18:51:11 +04:00
|
|
|
}
|
2000-06-04 00:42:42 +04:00
|
|
|
spc->spc_cp_time[CP_INTR]++;
|
1994-06-29 10:29:24 +04:00
|
|
|
} else if (p != NULL) {
|
|
|
|
p->p_sticks++;
|
2000-06-04 00:42:42 +04:00
|
|
|
spc->spc_cp_time[CP_SYS]++;
|
2007-05-17 18:51:11 +04:00
|
|
|
} else {
|
2000-06-04 00:42:42 +04:00
|
|
|
spc->spc_cp_time[CP_IDLE]++;
|
2007-05-17 18:51:11 +04:00
|
|
|
}
|
1994-06-29 10:29:24 +04:00
|
|
|
}
|
2000-08-26 07:34:36 +04:00
|
|
|
spc->spc_pscnt = psdiv;
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2006-04-15 04:34:42 +04:00
|
|
|
if (p != NULL) {
|
2008-07-02 23:38:37 +04:00
|
|
|
atomic_inc_uint(&l->l_cpticks);
|
2007-02-10 00:55:00 +03:00
|
|
|
mutex_spin_exit(&p->p_stmutex);
|
2007-05-17 18:51:11 +04:00
|
|
|
}
|
2020-05-09 01:10:08 +03:00
|
|
|
|
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
cyclic_clock_func_t func = cyclic_clock_func[cpu_index(ci)];
|
|
|
|
if (func) {
|
|
|
|
(*func)((struct clockframe *)frame);
|
|
|
|
}
|
|
|
|
#endif
|
1994-06-29 10:29:24 +04:00
|
|
|
}
|
2015-04-22 19:42:24 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* sysctl helper routine for kern.clockrate. Assembles a struct on
|
|
|
|
* the fly to be returned to the caller.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sysctl_kern_clockrate(SYSCTLFN_ARGS)
|
|
|
|
{
|
|
|
|
struct clockinfo clkinfo;
|
|
|
|
struct sysctlnode node;
|
|
|
|
|
|
|
|
clkinfo.tick = tick;
|
|
|
|
clkinfo.tickadj = tickadj;
|
|
|
|
clkinfo.hz = hz;
|
|
|
|
clkinfo.profhz = profhz;
|
|
|
|
clkinfo.stathz = stathz ? stathz : hz;
|
|
|
|
|
|
|
|
node = *rnode;
|
|
|
|
node.sysctl_data = &clkinfo;
|
|
|
|
return (sysctl_lookup(SYSCTLFN_CALL(&node)));
|
|
|
|
}
|