2011-04-08 14:35:37 +04:00
|
|
|
/* $NetBSD: kern_time.c,v 1.168 2011/04/08 10:35:37 yamt Exp $ */
|
2000-02-04 02:04:45 +03:00
|
|
|
|
|
|
|
/*-
|
2009-01-31 02:11:27 +03:00
|
|
|
* Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc.
|
2000-02-04 02:04:45 +03:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
2009-01-31 02:11:27 +03:00
|
|
|
* by Christopher G. Demetriou, and by Andrew Doran.
|
2000-02-04 02:04:45 +03:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
1994-06-29 10:29:24 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
1994-05-20 11:24:51 +04:00
|
|
|
* Copyright (c) 1982, 1986, 1989, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
1993-03-21 12:45:37 +03:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2003-08-07 20:26:28 +04:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1993-03-21 12:45:37 +03:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1998-03-01 05:20:01 +03:00
|
|
|
* @(#)kern_time.c 8.4 (Berkeley) 5/26/95
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
|
|
|
|
2001-11-12 18:25:01 +03:00
|
|
|
#include <sys/cdefs.h>
|
2011-04-08 14:35:37 +04:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.168 2011/04/08 10:35:37 yamt Exp $");
|
1998-02-19 03:47:02 +03:00
|
|
|
|
1993-12-18 06:59:02 +03:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/resourcevar.h>
|
|
|
|
#include <sys/kernel.h>
|
1994-05-20 11:24:51 +04:00
|
|
|
#include <sys/systm.h>
|
1993-12-18 06:59:02 +03:00
|
|
|
#include <sys/proc.h>
|
1994-05-20 11:24:51 +04:00
|
|
|
#include <sys/vnode.h>
|
1996-02-04 05:15:01 +03:00
|
|
|
#include <sys/signalvar.h>
|
1997-01-15 04:28:28 +03:00
|
|
|
#include <sys/syslog.h>
|
2006-06-08 02:33:33 +04:00
|
|
|
#include <sys/timetc.h>
|
2008-04-21 16:56:30 +04:00
|
|
|
#include <sys/timex.h>
|
2006-05-15 01:15:11 +04:00
|
|
|
#include <sys/kauth.h>
|
1994-10-20 07:22:35 +03:00
|
|
|
#include <sys/mount.h>
|
2008-10-15 10:51:17 +04:00
|
|
|
#include <sys/sa.h>
|
|
|
|
#include <sys/savar.h>
|
1994-10-20 07:22:35 +03:00
|
|
|
#include <sys/syscallargs.h>
|
2008-04-21 16:56:30 +04:00
|
|
|
#include <sys/cpu.h>
|
1994-10-20 07:22:35 +03:00
|
|
|
|
1999-06-08 02:33:53 +04:00
|
|
|
#include <uvm/uvm_extern.h>
|
|
|
|
|
2008-10-15 10:51:17 +04:00
|
|
|
#include "opt_sa.h"
|
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
static void timer_intr(void *);
|
|
|
|
static void itimerfire(struct ptimer *);
|
|
|
|
static void itimerfree(struct ptimers *, int);
|
|
|
|
|
|
|
|
kmutex_t timer_lock;
|
|
|
|
|
|
|
|
static void *timer_sih;
|
|
|
|
static TAILQ_HEAD(, ptimer) timer_queue;
|
2007-11-15 23:12:04 +03:00
|
|
|
|
2009-09-13 22:45:10 +04:00
|
|
|
struct pool ptimer_pool, ptimers_pool;
|
2005-11-26 08:26:33 +03:00
|
|
|
|
2011-04-08 14:35:37 +04:00
|
|
|
#define CLOCK_VIRTUAL_P(clockid) \
|
|
|
|
((clockid) == CLOCK_VIRTUAL || (clockid) == CLOCK_PROF)
|
|
|
|
|
|
|
|
CTASSERT(ITIMER_REAL == CLOCK_REALTIME);
|
|
|
|
CTASSERT(ITIMER_VIRTUAL == CLOCK_VIRTUAL);
|
|
|
|
CTASSERT(ITIMER_PROF == CLOCK_PROF);
|
|
|
|
|
2007-11-15 23:12:04 +03:00
|
|
|
/*
|
|
|
|
* Initialize timekeeping.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
time_init(void)
|
|
|
|
{
|
|
|
|
|
2009-09-13 22:45:10 +04:00
|
|
|
pool_init(&ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl",
|
|
|
|
&pool_allocator_nointr, IPL_NONE);
|
|
|
|
pool_init(&ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl",
|
|
|
|
&pool_allocator_nointr, IPL_NONE);
|
2007-11-15 23:12:04 +03:00
|
|
|
}
|
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
void
|
|
|
|
time_init2(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
TAILQ_INIT(&timer_queue);
|
|
|
|
mutex_init(&timer_lock, MUTEX_DEFAULT, IPL_SCHED);
|
|
|
|
timer_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
|
|
|
|
timer_intr, NULL);
|
|
|
|
}
|
|
|
|
|
2003-01-18 13:06:22 +03:00
|
|
|
/* Time of day and interval timer support.
|
1993-03-21 12:45:37 +03:00
|
|
|
*
|
|
|
|
* These routines provide the kernel entry points to get and set
|
|
|
|
* the time-of-day and per-process interval timers. Subroutines
|
|
|
|
* here provide support for adding and subtracting timeval structures
|
|
|
|
* and decrementing interval timers, optionally reloading the interval
|
|
|
|
* timers when they expire.
|
|
|
|
*/
|
|
|
|
|
1996-11-16 01:44:25 +03:00
|
|
|
/* This function is used by clock_settime and settimeofday */
|
2007-11-25 03:35:26 +03:00
|
|
|
static int
|
2009-01-11 05:45:45 +03:00
|
|
|
settime1(struct proc *p, const struct timespec *ts, bool check_kauth)
|
1996-11-16 01:44:25 +03:00
|
|
|
{
|
2009-01-11 05:45:45 +03:00
|
|
|
struct timespec delta, now;
|
2007-10-09 00:06:17 +04:00
|
|
|
int s;
|
1996-11-16 01:44:25 +03:00
|
|
|
|
|
|
|
/* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
|
2007-10-09 00:06:17 +04:00
|
|
|
s = splclock();
|
2009-01-11 05:45:45 +03:00
|
|
|
nanotime(&now);
|
|
|
|
timespecsub(ts, &now, &delta);
|
2007-11-25 03:35:26 +03:00
|
|
|
|
2007-12-08 16:31:03 +03:00
|
|
|
if (check_kauth && kauth_authorize_system(kauth_cred_get(),
|
2009-01-11 05:45:45 +03:00
|
|
|
KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts),
|
|
|
|
&delta, KAUTH_ARG(check_kauth ? false : true)) != 0) {
|
2007-10-09 00:06:17 +04:00
|
|
|
splx(s);
|
1997-04-27 01:22:57 +04:00
|
|
|
return (EPERM);
|
2001-06-11 11:07:12 +04:00
|
|
|
}
|
2007-11-25 03:35:26 +03:00
|
|
|
|
1997-04-27 01:22:57 +04:00
|
|
|
#ifdef notyet
|
2006-10-21 02:22:48 +04:00
|
|
|
if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */
|
2007-10-09 00:06:17 +04:00
|
|
|
splx(s);
|
1997-04-27 01:22:57 +04:00
|
|
|
return (EPERM);
|
2001-06-11 11:07:12 +04:00
|
|
|
}
|
1997-04-27 01:22:57 +04:00
|
|
|
#endif
|
2006-07-15 02:44:28 +04:00
|
|
|
|
2009-01-11 05:45:45 +03:00
|
|
|
tc_setclock(ts);
|
2006-07-15 02:44:28 +04:00
|
|
|
|
2009-01-11 05:45:45 +03:00
|
|
|
timespecadd(&boottime, &delta, &boottime);
|
2006-07-15 02:44:28 +04:00
|
|
|
|
1996-11-16 01:44:25 +03:00
|
|
|
resettodr();
|
2007-10-09 00:06:17 +04:00
|
|
|
splx(s);
|
|
|
|
|
1997-04-27 01:22:57 +04:00
|
|
|
return (0);
|
1996-11-16 01:44:25 +03:00
|
|
|
}
|
|
|
|
|
2007-11-25 03:35:26 +03:00
|
|
|
int
|
|
|
|
settime(struct proc *p, struct timespec *ts)
|
|
|
|
{
|
|
|
|
return (settime1(p, ts, true));
|
|
|
|
}
|
|
|
|
|
1996-11-16 01:44:25 +03:00
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2009-01-11 05:45:45 +03:00
|
|
|
sys___clock_gettime50(struct lwp *l,
|
|
|
|
const struct sys___clock_gettime50_args *uap, register_t *retval)
|
1996-11-16 01:44:25 +03:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
1996-11-16 01:44:25 +03:00
|
|
|
syscallarg(clockid_t) clock_id;
|
1996-11-16 02:53:32 +03:00
|
|
|
syscallarg(struct timespec *) tp;
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
2010-04-08 15:51:13 +04:00
|
|
|
int error;
|
1996-11-16 01:44:25 +03:00
|
|
|
struct timespec ats;
|
|
|
|
|
2010-04-08 15:51:13 +04:00
|
|
|
error = clock_gettime1(SCARG(uap, clock_id), &ats);
|
|
|
|
if (error != 0)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
return copyout(&ats, SCARG(uap, tp), sizeof(ats));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
clock_gettime1(clockid_t clock_id, struct timespec *ts)
|
|
|
|
{
|
|
|
|
|
2002-01-31 03:13:07 +03:00
|
|
|
switch (clock_id) {
|
|
|
|
case CLOCK_REALTIME:
|
2010-04-08 15:51:13 +04:00
|
|
|
nanotime(ts);
|
2002-01-31 03:13:07 +03:00
|
|
|
break;
|
|
|
|
case CLOCK_MONOTONIC:
|
2010-04-08 15:51:13 +04:00
|
|
|
nanouptime(ts);
|
2002-01-31 03:13:07 +03:00
|
|
|
break;
|
|
|
|
default:
|
2010-04-08 15:51:13 +04:00
|
|
|
return EINVAL;
|
2002-01-31 03:13:07 +03:00
|
|
|
}
|
1996-11-16 01:44:25 +03:00
|
|
|
|
2010-04-08 15:51:13 +04:00
|
|
|
return 0;
|
1996-11-16 01:44:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2009-01-11 05:45:45 +03:00
|
|
|
sys___clock_settime50(struct lwp *l,
|
|
|
|
const struct sys___clock_settime50_args *uap, register_t *retval)
|
1996-11-16 01:44:25 +03:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
1996-11-16 01:44:25 +03:00
|
|
|
syscallarg(clockid_t) clock_id;
|
1996-11-16 02:53:32 +03:00
|
|
|
syscallarg(const struct timespec *) tp;
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
2009-01-11 05:45:45 +03:00
|
|
|
int error;
|
|
|
|
struct timespec ats;
|
1996-11-16 01:44:25 +03:00
|
|
|
|
2009-01-11 05:45:45 +03:00
|
|
|
if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
return clock_settime1(l->l_proc, SCARG(uap, clock_id), &ats, true);
|
2001-09-16 10:50:06 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2007-11-25 03:35:26 +03:00
|
|
|
clock_settime1(struct proc *p, clockid_t clock_id, const struct timespec *tp,
|
|
|
|
bool check_kauth)
|
2001-09-16 10:50:06 +04:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2002-01-31 03:13:07 +03:00
|
|
|
switch (clock_id) {
|
|
|
|
case CLOCK_REALTIME:
|
2009-01-11 05:45:45 +03:00
|
|
|
if ((error = settime1(p, tp, check_kauth)) != 0)
|
2002-01-31 03:13:07 +03:00
|
|
|
return (error);
|
|
|
|
break;
|
|
|
|
case CLOCK_MONOTONIC:
|
|
|
|
return (EINVAL); /* read-only clock */
|
|
|
|
default:
|
2001-09-16 10:50:06 +04:00
|
|
|
return (EINVAL);
|
2002-01-31 03:13:07 +03:00
|
|
|
}
|
1996-11-16 01:44:25 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2009-01-11 05:45:45 +03:00
|
|
|
sys___clock_getres50(struct lwp *l, const struct sys___clock_getres50_args *uap,
|
2008-02-19 17:21:56 +03:00
|
|
|
register_t *retval)
|
1996-11-16 01:44:25 +03:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
1996-11-16 01:44:25 +03:00
|
|
|
syscallarg(clockid_t) clock_id;
|
1996-11-16 02:53:32 +03:00
|
|
|
syscallarg(struct timespec *) tp;
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
1996-11-16 01:44:25 +03:00
|
|
|
struct timespec ts;
|
|
|
|
int error = 0;
|
|
|
|
|
2010-04-03 21:20:05 +04:00
|
|
|
if ((error = clock_getres1(SCARG(uap, clock_id), &ts)) != 0)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
if (SCARG(uap, tp))
|
|
|
|
error = copyout(&ts, SCARG(uap, tp), sizeof(ts));
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
clock_getres1(clockid_t clock_id, struct timespec *ts)
|
|
|
|
{
|
|
|
|
|
2002-01-31 03:13:07 +03:00
|
|
|
switch (clock_id) {
|
|
|
|
case CLOCK_REALTIME:
|
|
|
|
case CLOCK_MONOTONIC:
|
2010-04-03 21:20:05 +04:00
|
|
|
ts->tv_sec = 0;
|
2006-07-08 16:10:33 +04:00
|
|
|
if (tc_getfrequency() > 1000000000)
|
2010-04-03 21:20:05 +04:00
|
|
|
ts->tv_nsec = 1;
|
2006-07-08 16:10:33 +04:00
|
|
|
else
|
2010-04-03 21:20:05 +04:00
|
|
|
ts->tv_nsec = 1000000000 / tc_getfrequency();
|
2002-01-31 03:13:07 +03:00
|
|
|
break;
|
|
|
|
default:
|
2010-04-03 21:20:05 +04:00
|
|
|
return EINVAL;
|
2002-01-31 03:13:07 +03:00
|
|
|
}
|
1996-11-16 01:44:25 +03:00
|
|
|
|
2010-04-03 21:20:05 +04:00
|
|
|
return 0;
|
1996-11-16 01:44:25 +03:00
|
|
|
}
|
|
|
|
|
1997-04-16 18:41:28 +04:00
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2009-01-11 05:45:45 +03:00
|
|
|
sys___nanosleep50(struct lwp *l, const struct sys___nanosleep50_args *uap,
|
2008-02-19 17:21:56 +03:00
|
|
|
register_t *retval)
|
1997-04-16 18:41:28 +04:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
2006-06-08 02:33:33 +04:00
|
|
|
syscallarg(struct timespec *) rqtp;
|
|
|
|
syscallarg(struct timespec *) rmtp;
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
2006-06-08 02:33:33 +04:00
|
|
|
struct timespec rmt, rqt;
|
2007-05-13 14:34:25 +04:00
|
|
|
int error, error1;
|
2006-06-08 02:33:33 +04:00
|
|
|
|
|
|
|
error = copyin(SCARG(uap, rqtp), &rqt, sizeof(struct timespec));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
2007-05-13 14:34:25 +04:00
|
|
|
error = nanosleep1(l, &rqt, SCARG(uap, rmtp) ? &rmt : NULL);
|
|
|
|
if (SCARG(uap, rmtp) == NULL || (error != 0 && error != EINTR))
|
|
|
|
return error;
|
|
|
|
|
|
|
|
error1 = copyout(&rmt, SCARG(uap, rmtp), sizeof(rmt));
|
|
|
|
return error1 ? error1 : error;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nanosleep1(struct lwp *l, struct timespec *rqt, struct timespec *rmt)
|
|
|
|
{
|
2008-02-25 15:25:03 +03:00
|
|
|
struct timespec rmtstart;
|
2007-05-13 14:34:25 +04:00
|
|
|
int error, timo;
|
|
|
|
|
2009-03-29 23:21:19 +04:00
|
|
|
if ((error = itimespecfix(rqt)) != 0)
|
|
|
|
return error;
|
2006-06-08 02:33:33 +04:00
|
|
|
|
2007-05-13 14:34:25 +04:00
|
|
|
timo = tstohz(rqt);
|
2006-06-08 02:33:33 +04:00
|
|
|
/*
|
|
|
|
* Avoid inadvertantly sleeping forever
|
|
|
|
*/
|
|
|
|
if (timo == 0)
|
|
|
|
timo = 1;
|
2008-02-25 15:25:03 +03:00
|
|
|
getnanouptime(&rmtstart);
|
|
|
|
again:
|
|
|
|
error = kpause("nanoslp", true, timo, NULL);
|
|
|
|
if (rmt != NULL || error == 0) {
|
|
|
|
struct timespec rmtend;
|
|
|
|
struct timespec t0;
|
|
|
|
struct timespec *t;
|
2006-06-08 02:33:33 +04:00
|
|
|
|
2008-02-25 15:25:03 +03:00
|
|
|
getnanouptime(&rmtend);
|
|
|
|
t = (rmt != NULL) ? rmt : &t0;
|
|
|
|
timespecsub(&rmtend, &rmtstart, t);
|
|
|
|
timespecsub(rqt, t, t);
|
|
|
|
if (t->tv_sec < 0)
|
|
|
|
timespecclear(t);
|
|
|
|
if (error == 0) {
|
|
|
|
timo = tstohz(t);
|
|
|
|
if (timo > 0)
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
}
|
2006-07-16 23:23:11 +04:00
|
|
|
|
2006-06-08 02:33:33 +04:00
|
|
|
if (error == ERESTART)
|
|
|
|
error = EINTR;
|
|
|
|
if (error == EWOULDBLOCK)
|
|
|
|
error = 0;
|
|
|
|
|
|
|
|
return error;
|
1997-04-16 18:41:28 +04:00
|
|
|
}
|
1996-11-16 01:44:25 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/* ARGSUSED */
|
1993-06-27 10:01:27 +04:00
|
|
|
int
|
2009-01-11 05:45:45 +03:00
|
|
|
sys___gettimeofday50(struct lwp *l, const struct sys___gettimeofday50_args *uap,
|
2008-02-19 17:21:56 +03:00
|
|
|
register_t *retval)
|
1995-09-20 01:40:36 +04:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
1994-10-20 07:22:35 +03:00
|
|
|
syscallarg(struct timeval *) tp;
|
2007-12-21 02:02:38 +03:00
|
|
|
syscallarg(void *) tzp; really "struct timezone *";
|
|
|
|
} */
|
1993-03-21 12:45:37 +03:00
|
|
|
struct timeval atv;
|
|
|
|
int error = 0;
|
1997-01-15 04:28:28 +03:00
|
|
|
struct timezone tzfake;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
1994-10-20 07:22:35 +03:00
|
|
|
if (SCARG(uap, tp)) {
|
1993-03-21 12:45:37 +03:00
|
|
|
microtime(&atv);
|
1998-08-01 02:50:48 +04:00
|
|
|
error = copyout(&atv, SCARG(uap, tp), sizeof(atv));
|
1996-02-04 05:15:01 +03:00
|
|
|
if (error)
|
1993-03-21 12:45:37 +03:00
|
|
|
return (error);
|
|
|
|
}
|
1997-01-15 04:28:28 +03:00
|
|
|
if (SCARG(uap, tzp)) {
|
|
|
|
/*
|
1998-02-20 10:22:14 +03:00
|
|
|
* NetBSD has no kernel notion of time zone, so we just
|
1997-01-15 04:28:28 +03:00
|
|
|
* fake up a timezone struct and return it if demanded.
|
|
|
|
*/
|
|
|
|
tzfake.tz_minuteswest = 0;
|
|
|
|
tzfake.tz_dsttime = 0;
|
1998-08-01 02:50:48 +04:00
|
|
|
error = copyout(&tzfake, SCARG(uap, tzp), sizeof(tzfake));
|
1997-01-15 04:28:28 +03:00
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
1993-06-27 10:01:27 +04:00
|
|
|
int
|
2009-01-11 05:45:45 +03:00
|
|
|
sys___settimeofday50(struct lwp *l, const struct sys___settimeofday50_args *uap,
|
2008-02-19 17:21:56 +03:00
|
|
|
register_t *retval)
|
1995-09-20 01:40:36 +04:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
1996-12-22 13:21:06 +03:00
|
|
|
syscallarg(const struct timeval *) tv;
|
2008-02-19 17:21:56 +03:00
|
|
|
syscallarg(const void *) tzp; really "const struct timezone *";
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
2001-12-09 19:10:43 +03:00
|
|
|
|
2007-05-13 00:27:13 +04:00
|
|
|
return settimeofday1(SCARG(uap, tv), true, SCARG(uap, tzp), l, true);
|
2001-12-09 19:10:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-05-13 00:27:13 +04:00
|
|
|
settimeofday1(const struct timeval *utv, bool userspace,
|
|
|
|
const void *utzp, struct lwp *l, bool check_kauth)
|
2001-12-09 19:10:43 +03:00
|
|
|
{
|
1996-11-16 01:44:25 +03:00
|
|
|
struct timeval atv;
|
2005-12-05 03:16:33 +03:00
|
|
|
struct timespec ts;
|
1996-11-16 01:44:25 +03:00
|
|
|
int error;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
1994-05-20 11:24:51 +04:00
|
|
|
/* Verify all parameters before changing time. */
|
2007-05-13 00:27:13 +04:00
|
|
|
|
1997-01-15 04:28:28 +03:00
|
|
|
/*
|
1998-02-20 10:22:14 +03:00
|
|
|
* NetBSD has no kernel notion of time zone, and only an
|
1997-01-15 04:28:28 +03:00
|
|
|
* obsolete program would try to set it, so we log a warning.
|
|
|
|
*/
|
2005-12-05 03:16:33 +03:00
|
|
|
if (utzp)
|
1997-01-15 04:28:28 +03:00
|
|
|
log(LOG_WARNING, "pid %d attempted to set the "
|
2007-05-13 00:27:13 +04:00
|
|
|
"(obsolete) kernel time zone\n", l->l_proc->p_pid);
|
2005-12-05 03:16:33 +03:00
|
|
|
|
|
|
|
if (utv == NULL)
|
|
|
|
return 0;
|
|
|
|
|
2007-05-13 00:27:13 +04:00
|
|
|
if (userspace) {
|
|
|
|
if ((error = copyin(utv, &atv, sizeof(atv))) != 0)
|
|
|
|
return error;
|
|
|
|
utv = &atv;
|
|
|
|
}
|
|
|
|
|
|
|
|
TIMEVAL_TO_TIMESPEC(utv, &ts);
|
2007-11-25 11:43:11 +03:00
|
|
|
return settime1(l->l_proc, &ts, check_kauth);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
2003-04-17 01:34:15 +04:00
|
|
|
int time_adjusted; /* set if an adjustment is made */
|
1993-03-21 12:45:37 +03:00
|
|
|
|
|
|
|
/* ARGSUSED */
|
1993-06-27 10:01:27 +04:00
|
|
|
int
|
2009-01-11 05:45:45 +03:00
|
|
|
sys___adjtime50(struct lwp *l, const struct sys___adjtime50_args *uap,
|
2008-02-19 17:21:56 +03:00
|
|
|
register_t *retval)
|
1995-09-20 01:40:36 +04:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
1996-12-22 13:21:06 +03:00
|
|
|
syscallarg(const struct timeval *) delta;
|
1994-10-20 07:22:35 +03:00
|
|
|
syscallarg(struct timeval *) olddelta;
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
2009-01-11 05:45:45 +03:00
|
|
|
int error = 0;
|
|
|
|
struct timeval atv, oldatv;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
First take at security model abstraction.
- Add a few scopes to the kernel: system, network, and machdep.
- Add a few more actions/sub-actions (requests), and start using them as
opposed to the KAUTH_GENERIC_ISSUSER place-holders.
- Introduce a basic set of listeners that implement our "traditional"
security model, called "bsd44". This is the default (and only) model we
have at the moment.
- Update all relevant documentation.
- Add some code and docs to help folks who want to actually use this stuff:
* There's a sample overlay model, sitting on-top of "bsd44", for
fast experimenting with tweaking just a subset of an existing model.
This is pretty cool because it's *really* straightforward to do stuff
you had to use ugly hacks for until now...
* And of course, documentation describing how to do the above for quick
reference, including code samples.
All of these changes were tested for regressions using a Python-based
testsuite that will be (I hope) available soon via pkgsrc. Information
about the tests, and how to write new ones, can be found on:
http://kauth.linbsd.org/kauthwiki
NOTE FOR DEVELOPERS: *PLEASE* don't add any code that does any of the
following:
- Uses a KAUTH_GENERIC_ISSUSER kauth(9) request,
- Checks 'securelevel' directly,
- Checks a uid/gid directly.
(or if you feel you have to, contact me first)
This is still work in progress; It's far from being done, but now it'll
be a lot easier.
Relevant mailing list threads:
http://mail-index.netbsd.org/tech-security/2006/01/25/0011.html
http://mail-index.netbsd.org/tech-security/2006/03/24/0001.html
http://mail-index.netbsd.org/tech-security/2006/04/18/0000.html
http://mail-index.netbsd.org/tech-security/2006/05/15/0000.html
http://mail-index.netbsd.org/tech-security/2006/08/01/0000.html
http://mail-index.netbsd.org/tech-security/2006/08/25/0000.html
Many thanks to YAMAMOTO Takashi, Matt Thomas, and Christos Zoulas for help
stablizing kauth(9).
Full credit for the regression tests, making sure these changes didn't break
anything, goes to Matt Fleming and Jaime Fournier.
Happy birthday Randi! :)
2006-09-09 00:58:56 +04:00
|
|
|
if ((error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_TIME,
|
|
|
|
KAUTH_REQ_SYSTEM_TIME_ADJTIME, NULL, NULL, NULL)) != 0)
|
2009-01-11 05:45:45 +03:00
|
|
|
return error;
|
1996-02-04 05:15:01 +03:00
|
|
|
|
2009-01-11 05:45:45 +03:00
|
|
|
if (SCARG(uap, delta)) {
|
|
|
|
error = copyin(SCARG(uap, delta), &atv,
|
|
|
|
sizeof(*SCARG(uap, delta)));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
adjtime1(SCARG(uap, delta) ? &atv : NULL,
|
|
|
|
SCARG(uap, olddelta) ? &oldatv : NULL, l->l_proc);
|
|
|
|
if (SCARG(uap, olddelta))
|
|
|
|
error = copyout(&oldatv, SCARG(uap, olddelta),
|
|
|
|
sizeof(*SCARG(uap, olddelta)));
|
|
|
|
return error;
|
2001-09-16 10:50:06 +04:00
|
|
|
}
|
|
|
|
|
2009-01-11 05:45:45 +03:00
|
|
|
void
|
2006-11-01 13:17:58 +03:00
|
|
|
adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p)
|
2001-09-16 10:50:06 +04:00
|
|
|
{
|
2006-06-08 02:33:33 +04:00
|
|
|
extern int64_t time_adjtime; /* in kern_ntptime.c */
|
|
|
|
|
|
|
|
if (olddelta) {
|
2008-04-21 16:56:30 +04:00
|
|
|
mutex_spin_enter(&timecounter_lock);
|
2009-01-11 05:45:45 +03:00
|
|
|
olddelta->tv_sec = time_adjtime / 1000000;
|
|
|
|
olddelta->tv_usec = time_adjtime % 1000000;
|
|
|
|
if (olddelta->tv_usec < 0) {
|
|
|
|
olddelta->tv_usec += 1000000;
|
|
|
|
olddelta->tv_sec--;
|
2006-06-08 02:33:33 +04:00
|
|
|
}
|
2009-01-11 18:57:29 +03:00
|
|
|
mutex_spin_exit(&timecounter_lock);
|
2006-06-08 02:33:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (delta) {
|
2008-04-21 16:56:30 +04:00
|
|
|
mutex_spin_enter(&timecounter_lock);
|
2009-01-11 18:57:29 +03:00
|
|
|
time_adjtime = delta->tv_sec * 1000000 + delta->tv_usec;
|
2009-01-11 05:45:45 +03:00
|
|
|
|
2008-04-21 16:56:30 +04:00
|
|
|
if (time_adjtime) {
|
2006-06-08 02:33:33 +04:00
|
|
|
/* We need to save the system time during shutdown */
|
|
|
|
time_adjusted |= 1;
|
2008-04-21 16:56:30 +04:00
|
|
|
}
|
|
|
|
mutex_spin_exit(&timecounter_lock);
|
2006-06-08 02:33:33 +04:00
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-01-18 13:06:22 +03:00
|
|
|
* Interval timer support. Both the BSD getitimer() family and the POSIX
|
|
|
|
* timer_*() family of routines are supported.
|
1993-03-21 12:45:37 +03:00
|
|
|
*
|
2003-01-18 13:06:22 +03:00
|
|
|
* All timers are kept in an array pointed to by p_timers, which is
|
|
|
|
* allocated on demand - many processes don't use timers at all. The
|
|
|
|
* first three elements in this array are reserved for the BSD timers:
|
|
|
|
* element 0 is ITIMER_REAL, element 1 is ITIMER_VIRTUAL, and element
|
|
|
|
* 2 is ITIMER_PROF. The rest may be allocated by the timer_create()
|
|
|
|
* syscall.
|
1993-03-21 12:45:37 +03:00
|
|
|
*
|
2003-01-18 13:06:22 +03:00
|
|
|
* Realtime timers are kept in the ptimer structure as an absolute
|
|
|
|
* time; virtual time timers are kept as a linked list of deltas.
|
1993-03-21 12:45:37 +03:00
|
|
|
* Virtual time timers are processed in the hardclock() routine of
|
2003-01-18 13:06:22 +03:00
|
|
|
* kern_clock.c. The real time timer is processed by a callout
|
|
|
|
* routine, called from the softclock() routine. Since a callout may
|
|
|
|
* be delayed in real time due to interrupt processing in the system,
|
|
|
|
* it is possible for the real time timeout routine (realtimeexpire,
|
|
|
|
* given below), to be delayed in real time past when it is supposed
|
|
|
|
* to occur. It does not suffice, therefore, to reload the real timer
|
|
|
|
* .it_value from the real time timers .it_interval. Rather, we
|
|
|
|
* compute the next time in absolute time the timer should go off. */
|
|
|
|
|
|
|
|
/* Allocate a POSIX realtime timer. */
|
1993-06-27 10:01:27 +04:00
|
|
|
int
|
2008-02-19 17:21:56 +03:00
|
|
|
sys_timer_create(struct lwp *l, const struct sys_timer_create_args *uap,
|
|
|
|
register_t *retval)
|
1995-09-20 01:40:36 +04:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
2003-01-18 13:06:22 +03:00
|
|
|
syscallarg(clockid_t) clock_id;
|
|
|
|
syscallarg(struct sigevent *) evp;
|
|
|
|
syscallarg(timer_t *) timerid;
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
2005-07-23 22:54:07 +04:00
|
|
|
|
|
|
|
return timer_create1(SCARG(uap, timerid), SCARG(uap, clock_id),
|
2006-07-24 02:06:03 +04:00
|
|
|
SCARG(uap, evp), copyin, l);
|
2005-07-23 22:54:07 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp,
|
2006-07-24 02:06:03 +04:00
|
|
|
copyin_t fetch_event, struct lwp *l)
|
2005-07-23 22:54:07 +04:00
|
|
|
{
|
2003-02-04 18:46:39 +03:00
|
|
|
int error;
|
2005-07-23 22:54:07 +04:00
|
|
|
timer_t timerid;
|
2008-04-21 04:13:46 +04:00
|
|
|
struct ptimers *pts;
|
2005-07-23 22:54:07 +04:00
|
|
|
struct ptimer *pt;
|
2006-07-24 02:06:03 +04:00
|
|
|
struct proc *p;
|
|
|
|
|
|
|
|
p = l->l_proc;
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2011-04-08 14:35:37 +04:00
|
|
|
if (id != CLOCK_REALTIME && id != CLOCK_VIRTUAL &&
|
|
|
|
id != CLOCK_PROF && id != CLOCK_MONOTONIC)
|
2003-01-18 13:06:22 +03:00
|
|
|
return (EINVAL);
|
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
if ((pts = p->p_timers) == NULL)
|
|
|
|
pts = timers_alloc(p);
|
2003-01-18 13:06:22 +03:00
|
|
|
|
|
|
|
pt = pool_get(&ptimer_pool, PR_WAITOK);
|
2008-04-21 04:13:46 +04:00
|
|
|
if (evp != NULL) {
|
2003-01-18 13:06:22 +03:00
|
|
|
if (((error =
|
2005-07-23 22:54:07 +04:00
|
|
|
(*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) ||
|
2003-01-18 13:06:22 +03:00
|
|
|
((pt->pt_ev.sigev_notify < SIGEV_NONE) ||
|
2009-12-10 15:39:12 +03:00
|
|
|
(pt->pt_ev.sigev_notify > SIGEV_SA)) ||
|
|
|
|
(pt->pt_ev.sigev_notify == SIGEV_SIGNAL &&
|
|
|
|
(pt->pt_ev.sigev_signo <= 0 ||
|
|
|
|
pt->pt_ev.sigev_signo >= NSIG))) {
|
2003-01-18 13:06:22 +03:00
|
|
|
pool_put(&ptimer_pool, pt);
|
|
|
|
return (error ? error : EINVAL);
|
|
|
|
}
|
2008-04-21 04:13:46 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Find a free timer slot, skipping those reserved for setitimer(). */
|
|
|
|
mutex_spin_enter(&timer_lock);
|
|
|
|
for (timerid = 3; timerid < TIMER_MAX; timerid++)
|
|
|
|
if (pts->pts_timers[timerid] == NULL)
|
|
|
|
break;
|
|
|
|
if (timerid == TIMER_MAX) {
|
|
|
|
mutex_spin_exit(&timer_lock);
|
|
|
|
pool_put(&ptimer_pool, pt);
|
|
|
|
return EAGAIN;
|
|
|
|
}
|
|
|
|
if (evp == NULL) {
|
2003-01-18 13:06:22 +03:00
|
|
|
pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
|
|
|
|
switch (id) {
|
|
|
|
case CLOCK_REALTIME:
|
2011-04-08 14:35:37 +04:00
|
|
|
case CLOCK_MONOTONIC:
|
2003-01-18 13:06:22 +03:00
|
|
|
pt->pt_ev.sigev_signo = SIGALRM;
|
|
|
|
break;
|
|
|
|
case CLOCK_VIRTUAL:
|
|
|
|
pt->pt_ev.sigev_signo = SIGVTALRM;
|
|
|
|
break;
|
|
|
|
case CLOCK_PROF:
|
|
|
|
pt->pt_ev.sigev_signo = SIGPROF;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
pt->pt_ev.sigev_value.sival_int = timerid;
|
|
|
|
}
|
2003-09-07 02:03:09 +04:00
|
|
|
pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo;
|
|
|
|
pt->pt_info.ksi_errno = 0;
|
|
|
|
pt->pt_info.ksi_code = 0;
|
|
|
|
pt->pt_info.ksi_pid = p->p_pid;
|
2006-07-24 02:06:03 +04:00
|
|
|
pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred);
|
2007-05-21 19:35:47 +04:00
|
|
|
pt->pt_info.ksi_value = pt->pt_ev.sigev_value;
|
2003-01-18 13:06:22 +03:00
|
|
|
pt->pt_type = id;
|
|
|
|
pt->pt_proc = p;
|
|
|
|
pt->pt_overruns = 0;
|
|
|
|
pt->pt_poverruns = 0;
|
2003-02-04 02:39:40 +03:00
|
|
|
pt->pt_entry = timerid;
|
2008-04-21 04:13:46 +04:00
|
|
|
pt->pt_queued = false;
|
2008-07-15 20:18:08 +04:00
|
|
|
timespecclear(&pt->pt_time.it_value);
|
2011-04-08 14:35:37 +04:00
|
|
|
if (!CLOCK_VIRTUAL_P(id))
|
|
|
|
callout_init(&pt->pt_ch, CALLOUT_MPSAFE);
|
2008-07-09 00:53:02 +04:00
|
|
|
else
|
|
|
|
pt->pt_active = 0;
|
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
pts->pts_timers[timerid] = pt;
|
|
|
|
mutex_spin_exit(&timer_lock);
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2005-07-23 22:54:07 +04:00
|
|
|
return copyout(&timerid, tid, sizeof(timerid));
|
2003-01-18 13:06:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Delete a POSIX realtime timer */
|
|
|
|
int
|
2008-02-19 17:21:56 +03:00
|
|
|
sys_timer_delete(struct lwp *l, const struct sys_timer_delete_args *uap,
|
|
|
|
register_t *retval)
|
2003-01-18 13:06:22 +03:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
2003-01-18 13:06:22 +03:00
|
|
|
syscallarg(timer_t) timerid;
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
2003-01-18 13:06:22 +03:00
|
|
|
struct proc *p = l->l_proc;
|
2003-02-04 18:46:39 +03:00
|
|
|
timer_t timerid;
|
2008-04-21 04:13:46 +04:00
|
|
|
struct ptimers *pts;
|
2003-01-18 13:06:22 +03:00
|
|
|
struct ptimer *pt, *ptn;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2003-01-18 13:06:22 +03:00
|
|
|
timerid = SCARG(uap, timerid);
|
2008-04-21 04:13:46 +04:00
|
|
|
pts = p->p_timers;
|
|
|
|
|
|
|
|
if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
|
1993-03-21 12:45:37 +03:00
|
|
|
return (EINVAL);
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
mutex_spin_enter(&timer_lock);
|
|
|
|
if ((pt = pts->pts_timers[timerid]) == NULL) {
|
|
|
|
mutex_spin_exit(&timer_lock);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
2011-04-08 14:35:37 +04:00
|
|
|
if (CLOCK_VIRTUAL_P(pt->pt_type)) {
|
2008-07-09 00:53:02 +04:00
|
|
|
if (pt->pt_active) {
|
|
|
|
ptn = LIST_NEXT(pt, pt_list);
|
|
|
|
LIST_REMOVE(pt, pt_list);
|
|
|
|
for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
|
2008-07-15 20:18:08 +04:00
|
|
|
timespecadd(&pt->pt_time.it_value,
|
2008-07-09 00:53:02 +04:00
|
|
|
&ptn->pt_time.it_value,
|
|
|
|
&ptn->pt_time.it_value);
|
|
|
|
pt->pt_active = 0;
|
|
|
|
}
|
2003-01-18 13:06:22 +03:00
|
|
|
}
|
2008-04-21 04:13:46 +04:00
|
|
|
itimerfree(pts, timerid);
|
2003-01-18 13:06:22 +03:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-03-11 00:49:56 +03:00
|
|
|
* Set up the given timer. The value in pt->pt_time.it_value is taken
|
2011-04-08 14:35:37 +04:00
|
|
|
* to be an absolute time for CLOCK_REALTIME/CLOCK_MONOTONIC timers and
|
|
|
|
* a relative time for CLOCK_VIRTUAL/CLOCK_PROF timers.
|
2003-01-18 13:06:22 +03:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
timer_settime(struct ptimer *pt)
|
|
|
|
{
|
|
|
|
struct ptimer *ptn, *pptn;
|
|
|
|
struct ptlist *ptl;
|
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
KASSERT(mutex_owned(&timer_lock));
|
|
|
|
|
2011-04-08 14:35:37 +04:00
|
|
|
if (!CLOCK_VIRTUAL_P(pt->pt_type)) {
|
|
|
|
callout_halt(&pt->pt_ch, &timer_lock);
|
2008-07-15 20:18:08 +04:00
|
|
|
if (timespecisset(&pt->pt_time.it_value)) {
|
2003-01-18 13:06:22 +03:00
|
|
|
/*
|
2008-07-15 20:18:08 +04:00
|
|
|
* Don't need to check tshzto() return value, here.
|
2003-01-18 13:06:22 +03:00
|
|
|
* callout_reset() does it for us.
|
|
|
|
*/
|
2008-07-15 20:18:08 +04:00
|
|
|
callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value),
|
2003-01-18 13:06:22 +03:00
|
|
|
realtimerexpire, pt);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (pt->pt_active) {
|
|
|
|
ptn = LIST_NEXT(pt, pt_list);
|
|
|
|
LIST_REMOVE(pt, pt_list);
|
|
|
|
for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list))
|
2008-07-15 20:18:08 +04:00
|
|
|
timespecadd(&pt->pt_time.it_value,
|
2003-01-18 13:06:22 +03:00
|
|
|
&ptn->pt_time.it_value,
|
|
|
|
&ptn->pt_time.it_value);
|
|
|
|
}
|
2008-07-15 20:18:08 +04:00
|
|
|
if (timespecisset(&pt->pt_time.it_value)) {
|
2003-01-18 13:06:22 +03:00
|
|
|
if (pt->pt_type == CLOCK_VIRTUAL)
|
|
|
|
ptl = &pt->pt_proc->p_timers->pts_virtual;
|
|
|
|
else
|
|
|
|
ptl = &pt->pt_proc->p_timers->pts_prof;
|
|
|
|
|
|
|
|
for (ptn = LIST_FIRST(ptl), pptn = NULL;
|
2008-07-15 20:18:08 +04:00
|
|
|
ptn && timespeccmp(&pt->pt_time.it_value,
|
2003-01-18 13:06:22 +03:00
|
|
|
&ptn->pt_time.it_value, >);
|
|
|
|
pptn = ptn, ptn = LIST_NEXT(ptn, pt_list))
|
2008-07-15 20:18:08 +04:00
|
|
|
timespecsub(&pt->pt_time.it_value,
|
2003-01-18 13:06:22 +03:00
|
|
|
&ptn->pt_time.it_value,
|
|
|
|
&pt->pt_time.it_value);
|
|
|
|
|
|
|
|
if (pptn)
|
|
|
|
LIST_INSERT_AFTER(pptn, pt, pt_list);
|
|
|
|
else
|
|
|
|
LIST_INSERT_HEAD(ptl, pt, pt_list);
|
|
|
|
|
|
|
|
for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list))
|
2008-07-15 20:18:08 +04:00
|
|
|
timespecsub(&ptn->pt_time.it_value,
|
2003-01-18 13:06:22 +03:00
|
|
|
&pt->pt_time.it_value,
|
|
|
|
&ptn->pt_time.it_value);
|
|
|
|
|
|
|
|
pt->pt_active = 1;
|
|
|
|
} else
|
|
|
|
pt->pt_active = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2008-07-15 20:18:08 +04:00
|
|
|
timer_gettime(struct ptimer *pt, struct itimerspec *aits)
|
2003-01-18 13:06:22 +03:00
|
|
|
{
|
2008-07-15 20:18:08 +04:00
|
|
|
struct timespec now;
|
2003-01-18 13:06:22 +03:00
|
|
|
struct ptimer *ptn;
|
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
KASSERT(mutex_owned(&timer_lock));
|
|
|
|
|
2008-07-15 20:18:08 +04:00
|
|
|
*aits = pt->pt_time;
|
2011-04-08 14:35:37 +04:00
|
|
|
if (!CLOCK_VIRTUAL_P(pt->pt_type)) {
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
1994-12-11 21:06:07 +03:00
|
|
|
* Convert from absolute to relative time in .it_value
|
2003-01-18 13:06:22 +03:00
|
|
|
* part of real time timer. If time for real time
|
|
|
|
* timer has passed return 0, else return difference
|
|
|
|
* between current time and time for the timer to go
|
|
|
|
* off.
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
2008-07-15 20:18:08 +04:00
|
|
|
if (timespecisset(&aits->it_value)) {
|
2011-04-08 14:35:37 +04:00
|
|
|
if (pt->pt_type == CLOCK_REALTIME) {
|
|
|
|
getnanotime(&now);
|
|
|
|
} else { /* CLOCK_MONOTONIC */
|
|
|
|
getnanouptime(&now);
|
|
|
|
}
|
2008-07-15 20:18:08 +04:00
|
|
|
if (timespeccmp(&aits->it_value, &now, <))
|
|
|
|
timespecclear(&aits->it_value);
|
2006-06-08 02:33:33 +04:00
|
|
|
else
|
2008-07-15 20:18:08 +04:00
|
|
|
timespecsub(&aits->it_value, &now,
|
|
|
|
&aits->it_value);
|
1998-08-18 10:27:01 +04:00
|
|
|
}
|
2003-01-18 13:06:22 +03:00
|
|
|
} else if (pt->pt_active) {
|
|
|
|
if (pt->pt_type == CLOCK_VIRTUAL)
|
|
|
|
ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_virtual);
|
|
|
|
else
|
|
|
|
ptn = LIST_FIRST(&pt->pt_proc->p_timers->pts_prof);
|
|
|
|
for ( ; ptn && ptn != pt; ptn = LIST_NEXT(ptn, pt_list))
|
2008-07-15 20:18:08 +04:00
|
|
|
timespecadd(&aits->it_value,
|
|
|
|
&ptn->pt_time.it_value, &aits->it_value);
|
2003-01-18 13:06:22 +03:00
|
|
|
KASSERT(ptn != NULL); /* pt should be findable on the list */
|
1993-03-21 12:45:37 +03:00
|
|
|
} else
|
2008-07-15 20:18:08 +04:00
|
|
|
timespecclear(&aits->it_value);
|
2003-01-18 13:06:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Set and arm a POSIX realtime timer */
|
|
|
|
int
|
2009-01-11 05:45:45 +03:00
|
|
|
sys___timer_settime50(struct lwp *l,
|
|
|
|
const struct sys___timer_settime50_args *uap,
|
2008-02-19 17:21:56 +03:00
|
|
|
register_t *retval)
|
2003-01-18 13:06:22 +03:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
2003-01-18 13:06:22 +03:00
|
|
|
syscallarg(timer_t) timerid;
|
|
|
|
syscallarg(int) flags;
|
|
|
|
syscallarg(const struct itimerspec *) value;
|
|
|
|
syscallarg(struct itimerspec *) ovalue;
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
2005-07-23 22:54:07 +04:00
|
|
|
int error;
|
|
|
|
struct itimerspec value, ovalue, *ovp = NULL;
|
|
|
|
|
|
|
|
if ((error = copyin(SCARG(uap, value), &value,
|
|
|
|
sizeof(struct itimerspec))) != 0)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
if (SCARG(uap, ovalue))
|
|
|
|
ovp = &ovalue;
|
|
|
|
|
|
|
|
if ((error = dotimer_settime(SCARG(uap, timerid), &value, ovp,
|
|
|
|
SCARG(uap, flags), l->l_proc)) != 0)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
if (ovp)
|
|
|
|
return copyout(&ovalue, SCARG(uap, ovalue),
|
|
|
|
sizeof(struct itimerspec));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
dotimer_settime(int timerid, struct itimerspec *value,
|
|
|
|
struct itimerspec *ovalue, int flags, struct proc *p)
|
|
|
|
{
|
2008-07-15 20:18:08 +04:00
|
|
|
struct timespec now;
|
|
|
|
struct itimerspec val, oval;
|
2008-04-21 04:13:46 +04:00
|
|
|
struct ptimers *pts;
|
2003-01-18 13:06:22 +03:00
|
|
|
struct ptimer *pt;
|
2009-03-29 23:21:19 +04:00
|
|
|
int error;
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
pts = p->p_timers;
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
|
|
|
|
return EINVAL;
|
2008-07-15 20:18:08 +04:00
|
|
|
val = *value;
|
2009-03-29 23:21:19 +04:00
|
|
|
if ((error = itimespecfix(&val.it_value)) != 0 ||
|
|
|
|
(error = itimespecfix(&val.it_interval)) != 0)
|
|
|
|
return error;
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
mutex_spin_enter(&timer_lock);
|
|
|
|
if ((pt = pts->pts_timers[timerid]) == NULL) {
|
|
|
|
mutex_spin_exit(&timer_lock);
|
2008-07-15 20:18:08 +04:00
|
|
|
return EINVAL;
|
2008-04-21 04:13:46 +04:00
|
|
|
}
|
|
|
|
|
2003-01-18 13:06:22 +03:00
|
|
|
oval = pt->pt_time;
|
|
|
|
pt->pt_time = val;
|
|
|
|
|
2003-03-11 00:49:56 +03:00
|
|
|
/*
|
|
|
|
* If we've been passed a relative time for a realtime timer,
|
|
|
|
* convert it to absolute; if an absolute time for a virtual
|
|
|
|
* timer, convert it to relative and make sure we don't set it
|
|
|
|
* to zero, which would cancel the timer, or let it go
|
|
|
|
* negative, which would confuse the comparison tests.
|
|
|
|
*/
|
2008-07-15 20:18:08 +04:00
|
|
|
if (timespecisset(&pt->pt_time.it_value)) {
|
2011-04-08 14:35:37 +04:00
|
|
|
if (!CLOCK_VIRTUAL_P(pt->pt_type)) {
|
2006-06-08 02:33:33 +04:00
|
|
|
if ((flags & TIMER_ABSTIME) == 0) {
|
2011-04-08 14:35:37 +04:00
|
|
|
if (pt->pt_type == CLOCK_REALTIME) {
|
|
|
|
getnanotime(&now);
|
|
|
|
} else { /* CLOCK_MONOTONIC */
|
|
|
|
getnanouptime(&now);
|
|
|
|
}
|
2008-07-15 20:18:08 +04:00
|
|
|
timespecadd(&pt->pt_time.it_value, &now,
|
2006-06-08 02:33:33 +04:00
|
|
|
&pt->pt_time.it_value);
|
|
|
|
}
|
2003-03-11 00:49:56 +03:00
|
|
|
} else {
|
2005-07-23 22:54:07 +04:00
|
|
|
if ((flags & TIMER_ABSTIME) != 0) {
|
2008-07-15 20:18:08 +04:00
|
|
|
getnanotime(&now);
|
|
|
|
timespecsub(&pt->pt_time.it_value, &now,
|
2006-06-08 02:33:33 +04:00
|
|
|
&pt->pt_time.it_value);
|
2008-07-15 20:18:08 +04:00
|
|
|
if (!timespecisset(&pt->pt_time.it_value) ||
|
2003-03-11 00:49:56 +03:00
|
|
|
pt->pt_time.it_value.tv_sec < 0) {
|
|
|
|
pt->pt_time.it_value.tv_sec = 0;
|
2008-07-15 20:18:08 +04:00
|
|
|
pt->pt_time.it_value.tv_nsec = 1;
|
2003-03-11 00:49:56 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-01-18 13:06:22 +03:00
|
|
|
timer_settime(pt);
|
2008-04-21 04:13:46 +04:00
|
|
|
mutex_spin_exit(&timer_lock);
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2008-07-15 20:18:08 +04:00
|
|
|
if (ovalue)
|
|
|
|
*ovalue = oval;
|
2003-01-18 13:06:22 +03:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the time remaining until a POSIX timer fires. */
|
|
|
|
int
|
2009-01-11 05:45:45 +03:00
|
|
|
sys___timer_gettime50(struct lwp *l,
|
|
|
|
const struct sys___timer_gettime50_args *uap, register_t *retval)
|
2003-01-18 13:06:22 +03:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
2003-01-18 13:06:22 +03:00
|
|
|
syscallarg(timer_t) timerid;
|
|
|
|
syscallarg(struct itimerspec *) value;
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
2003-01-18 13:06:22 +03:00
|
|
|
struct itimerspec its;
|
2005-07-23 22:54:07 +04:00
|
|
|
int error;
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2005-07-23 22:54:07 +04:00
|
|
|
if ((error = dotimer_gettime(SCARG(uap, timerid), l->l_proc,
|
|
|
|
&its)) != 0)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
return copyout(&its, SCARG(uap, value), sizeof(its));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
dotimer_gettime(int timerid, struct proc *p, struct itimerspec *its)
|
|
|
|
{
|
|
|
|
struct ptimer *pt;
|
2008-04-21 04:13:46 +04:00
|
|
|
struct ptimers *pts;
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
pts = p->p_timers;
|
|
|
|
if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
|
2003-01-18 13:06:22 +03:00
|
|
|
return (EINVAL);
|
2008-04-21 04:13:46 +04:00
|
|
|
mutex_spin_enter(&timer_lock);
|
|
|
|
if ((pt = pts->pts_timers[timerid]) == NULL) {
|
|
|
|
mutex_spin_exit(&timer_lock);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
2008-07-15 20:18:08 +04:00
|
|
|
timer_gettime(pt, its);
|
2008-04-21 04:13:46 +04:00
|
|
|
mutex_spin_exit(&timer_lock);
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2005-07-23 22:54:07 +04:00
|
|
|
return 0;
|
2003-01-18 13:06:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the count of the number of times a periodic timer expired
|
|
|
|
* while a notification was already pending. The counter is reset when
|
|
|
|
* a timer expires and a notification can be posted.
|
|
|
|
*/
|
|
|
|
int
|
2008-02-19 17:21:56 +03:00
|
|
|
sys_timer_getoverrun(struct lwp *l, const struct sys_timer_getoverrun_args *uap,
|
|
|
|
register_t *retval)
|
2003-01-18 13:06:22 +03:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
2003-01-18 13:06:22 +03:00
|
|
|
syscallarg(timer_t) timerid;
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
2003-01-18 13:06:22 +03:00
|
|
|
struct proc *p = l->l_proc;
|
2008-04-21 04:13:46 +04:00
|
|
|
struct ptimers *pts;
|
2003-01-18 13:06:22 +03:00
|
|
|
int timerid;
|
|
|
|
struct ptimer *pt;
|
|
|
|
|
|
|
|
timerid = SCARG(uap, timerid);
|
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
pts = p->p_timers;
|
|
|
|
if (pts == NULL || timerid < 2 || timerid >= TIMER_MAX)
|
2003-01-18 13:06:22 +03:00
|
|
|
return (EINVAL);
|
2008-04-21 04:13:46 +04:00
|
|
|
mutex_spin_enter(&timer_lock);
|
|
|
|
if ((pt = pts->pts_timers[timerid]) == NULL) {
|
|
|
|
mutex_spin_exit(&timer_lock);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
2003-01-18 13:06:22 +03:00
|
|
|
*retval = pt->pt_poverruns;
|
2008-04-21 04:13:46 +04:00
|
|
|
mutex_spin_exit(&timer_lock);
|
2003-01-18 13:06:22 +03:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2008-10-15 10:51:17 +04:00
|
|
|
#ifdef KERN_SA
|
|
|
|
/* Glue function that triggers an upcall; called from userret(). */
|
|
|
|
void
|
|
|
|
timerupcall(struct lwp *l)
|
|
|
|
{
|
|
|
|
struct ptimers *pt = l->l_proc->p_timers;
|
|
|
|
struct proc *p = l->l_proc;
|
|
|
|
unsigned int i, fired, done;
|
|
|
|
|
|
|
|
KDASSERT(l->l_proc->p_sa);
|
|
|
|
/* Bail out if we do not own the virtual processor */
|
|
|
|
if (l->l_savp->savp_lwp != l)
|
|
|
|
return ;
|
|
|
|
|
|
|
|
mutex_enter(p->p_lock);
|
|
|
|
|
|
|
|
fired = pt->pts_fired;
|
|
|
|
done = 0;
|
|
|
|
while ((i = ffs(fired)) != 0) {
|
|
|
|
siginfo_t *si;
|
|
|
|
int mask = 1 << --i;
|
|
|
|
int f;
|
|
|
|
|
|
|
|
f = ~l->l_pflag & LP_SA_NOBLOCK;
|
|
|
|
l->l_pflag |= LP_SA_NOBLOCK;
|
|
|
|
si = siginfo_alloc(PR_WAITOK);
|
|
|
|
si->_info = pt->pts_timers[i]->pt_info.ksi_info;
|
|
|
|
if (sa_upcall(l, SA_UPCALL_SIGEV | SA_UPCALL_DEFER, NULL, l,
|
|
|
|
sizeof(*si), si, siginfo_free) != 0) {
|
|
|
|
siginfo_free(si);
|
|
|
|
/* XXX What do we do here?? */
|
|
|
|
} else
|
|
|
|
done |= mask;
|
|
|
|
fired &= ~mask;
|
|
|
|
l->l_pflag ^= f;
|
|
|
|
}
|
|
|
|
pt->pts_fired &= ~done;
|
|
|
|
if (pt->pts_fired == 0)
|
|
|
|
l->l_proc->p_timerpend = 0;
|
|
|
|
|
|
|
|
mutex_exit(p->p_lock);
|
|
|
|
}
|
|
|
|
#endif /* KERN_SA */
|
|
|
|
|
2003-01-18 13:06:22 +03:00
|
|
|
/*
|
|
|
|
* Real interval timer expired:
|
|
|
|
* send process whose timer expired an alarm signal.
|
|
|
|
* If time is not set up to reload, then just return.
|
|
|
|
* Else compute next time timer should go off which is > current time.
|
|
|
|
* This is where delay in processing this timeout causes multiple
|
|
|
|
* SIGALRM calls to be compressed into one.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
realtimerexpire(void *arg)
|
|
|
|
{
|
2010-12-18 01:08:18 +03:00
|
|
|
uint64_t last_val, next_val, interval, now_ns;
|
2008-07-15 20:18:08 +04:00
|
|
|
struct timespec now, next;
|
2003-01-18 13:06:22 +03:00
|
|
|
struct ptimer *pt;
|
2008-05-29 19:27:51 +04:00
|
|
|
int backwards;
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
pt = arg;
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
mutex_spin_enter(&timer_lock);
|
2003-01-18 13:06:22 +03:00
|
|
|
itimerfire(pt);
|
|
|
|
|
2008-07-15 20:18:08 +04:00
|
|
|
if (!timespecisset(&pt->pt_time.it_interval)) {
|
|
|
|
timespecclear(&pt->pt_time.it_value);
|
2008-04-21 04:13:46 +04:00
|
|
|
mutex_spin_exit(&timer_lock);
|
2003-01-18 13:06:22 +03:00
|
|
|
return;
|
|
|
|
}
|
2008-05-29 19:27:51 +04:00
|
|
|
|
2008-07-15 20:18:08 +04:00
|
|
|
getnanotime(&now);
|
|
|
|
backwards = (timespeccmp(&pt->pt_time.it_value, &now, >));
|
|
|
|
timespecadd(&pt->pt_time.it_value, &pt->pt_time.it_interval, &next);
|
2008-05-29 19:27:51 +04:00
|
|
|
/* Handle the easy case of non-overflown timers first. */
|
2008-07-15 20:18:08 +04:00
|
|
|
if (!backwards && timespeccmp(&next, &now, >)) {
|
2008-05-29 19:27:51 +04:00
|
|
|
pt->pt_time.it_value = next;
|
|
|
|
} else {
|
2010-12-18 01:08:18 +03:00
|
|
|
now_ns = timespec2ns(&now);
|
2008-07-15 20:18:08 +04:00
|
|
|
last_val = timespec2ns(&pt->pt_time.it_value);
|
|
|
|
interval = timespec2ns(&pt->pt_time.it_interval);
|
2008-05-29 19:27:51 +04:00
|
|
|
|
2010-12-18 01:08:18 +03:00
|
|
|
next_val = now_ns +
|
|
|
|
(now_ns - last_val + interval - 1) % interval;
|
2008-05-29 19:27:51 +04:00
|
|
|
|
|
|
|
if (backwards)
|
|
|
|
next_val += interval;
|
|
|
|
else
|
2010-12-18 01:08:18 +03:00
|
|
|
pt->pt_overruns += (now_ns - last_val) / interval;
|
2008-05-29 19:27:51 +04:00
|
|
|
|
2008-07-15 20:18:08 +04:00
|
|
|
pt->pt_time.it_value.tv_sec = next_val / 1000000000;
|
|
|
|
pt->pt_time.it_value.tv_nsec = next_val % 1000000000;
|
2006-06-08 02:33:33 +04:00
|
|
|
}
|
2008-05-29 19:27:51 +04:00
|
|
|
|
|
|
|
/*
|
2008-07-15 20:18:08 +04:00
|
|
|
* Don't need to check tshzto() return value, here.
|
2008-05-29 19:27:51 +04:00
|
|
|
* callout_reset() does it for us.
|
|
|
|
*/
|
2008-07-15 20:18:08 +04:00
|
|
|
callout_reset(&pt->pt_ch, tshzto(&pt->pt_time.it_value),
|
2008-05-29 19:27:51 +04:00
|
|
|
realtimerexpire, pt);
|
|
|
|
mutex_spin_exit(&timer_lock);
|
2003-01-18 13:06:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* BSD routine to get the value of an interval timer. */
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2009-01-11 05:45:45 +03:00
|
|
|
sys___getitimer50(struct lwp *l, const struct sys___getitimer50_args *uap,
|
2008-02-19 17:21:56 +03:00
|
|
|
register_t *retval)
|
2003-01-18 13:06:22 +03:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
2003-01-18 13:06:22 +03:00
|
|
|
syscallarg(int) which;
|
|
|
|
syscallarg(struct itimerval *) itv;
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
2003-01-18 13:06:22 +03:00
|
|
|
struct proc *p = l->l_proc;
|
|
|
|
struct itimerval aitv;
|
2005-07-11 23:50:42 +04:00
|
|
|
int error;
|
|
|
|
|
|
|
|
error = dogetitimer(p, SCARG(uap, which), &aitv);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
return (copyout(&aitv, SCARG(uap, itv), sizeof(struct itimerval)));
|
|
|
|
}
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2005-07-11 23:50:42 +04:00
|
|
|
int
|
|
|
|
dogetitimer(struct proc *p, int which, struct itimerval *itvp)
|
|
|
|
{
|
2008-04-21 04:13:46 +04:00
|
|
|
struct ptimers *pts;
|
|
|
|
struct ptimer *pt;
|
2008-07-15 20:18:08 +04:00
|
|
|
struct itimerspec its;
|
2003-01-18 13:06:22 +03:00
|
|
|
|
|
|
|
if ((u_int)which > ITIMER_PROF)
|
|
|
|
return (EINVAL);
|
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
mutex_spin_enter(&timer_lock);
|
|
|
|
pts = p->p_timers;
|
|
|
|
if (pts == NULL || (pt = pts->pts_timers[which]) == NULL) {
|
2005-07-11 23:50:42 +04:00
|
|
|
timerclear(&itvp->it_value);
|
|
|
|
timerclear(&itvp->it_interval);
|
2008-07-15 20:18:08 +04:00
|
|
|
} else {
|
|
|
|
timer_gettime(pt, &its);
|
2008-08-08 20:44:52 +04:00
|
|
|
TIMESPEC_TO_TIMEVAL(&itvp->it_value, &its.it_value);
|
|
|
|
TIMESPEC_TO_TIMEVAL(&itvp->it_interval, &its.it_interval);
|
2008-07-15 20:18:08 +04:00
|
|
|
}
|
2008-04-21 04:13:46 +04:00
|
|
|
mutex_spin_exit(&timer_lock);
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2005-07-11 23:50:42 +04:00
|
|
|
return 0;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
2003-01-18 13:06:22 +03:00
|
|
|
/* BSD routine to set/arm an interval timer. */
|
1993-03-21 12:45:37 +03:00
|
|
|
/* ARGSUSED */
|
1993-06-27 10:01:27 +04:00
|
|
|
int
|
2009-01-11 05:45:45 +03:00
|
|
|
sys___setitimer50(struct lwp *l, const struct sys___setitimer50_args *uap,
|
2008-02-19 17:21:56 +03:00
|
|
|
register_t *retval)
|
1995-09-20 01:40:36 +04:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
1997-10-15 21:03:52 +04:00
|
|
|
syscallarg(int) which;
|
1996-12-22 13:21:06 +03:00
|
|
|
syscallarg(const struct itimerval *) itv;
|
1994-10-20 07:22:35 +03:00
|
|
|
syscallarg(struct itimerval *) oitv;
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
2003-01-18 13:06:22 +03:00
|
|
|
struct proc *p = l->l_proc;
|
1997-10-15 21:03:52 +04:00
|
|
|
int which = SCARG(uap, which);
|
2009-01-11 05:45:45 +03:00
|
|
|
struct sys___getitimer50_args getargs;
|
2000-03-30 13:27:11 +04:00
|
|
|
const struct itimerval *itvp;
|
2005-07-11 23:50:42 +04:00
|
|
|
struct itimerval aitv;
|
|
|
|
int error;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
1997-10-15 21:03:52 +04:00
|
|
|
if ((u_int)which > ITIMER_PROF)
|
1993-03-21 12:45:37 +03:00
|
|
|
return (EINVAL);
|
1994-10-20 07:22:35 +03:00
|
|
|
itvp = SCARG(uap, itv);
|
2003-01-18 13:06:22 +03:00
|
|
|
if (itvp &&
|
2001-09-16 10:50:06 +04:00
|
|
|
(error = copyin(itvp, &aitv, sizeof(struct itimerval)) != 0))
|
1993-03-21 12:45:37 +03:00
|
|
|
return (error);
|
1996-10-24 08:35:33 +04:00
|
|
|
if (SCARG(uap, oitv) != NULL) {
|
1997-10-15 21:03:52 +04:00
|
|
|
SCARG(&getargs, which) = which;
|
1996-10-24 08:35:33 +04:00
|
|
|
SCARG(&getargs, itv) = SCARG(uap, oitv);
|
2009-01-11 05:45:45 +03:00
|
|
|
if ((error = sys___getitimer50(l, &getargs, retval)) != 0)
|
1996-10-24 08:35:33 +04:00
|
|
|
return (error);
|
|
|
|
}
|
1993-03-21 12:45:37 +03:00
|
|
|
if (itvp == 0)
|
|
|
|
return (0);
|
2005-07-11 23:50:42 +04:00
|
|
|
|
|
|
|
return dosetitimer(p, which, &aitv);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
dosetitimer(struct proc *p, int which, struct itimerval *itvp)
|
|
|
|
{
|
2008-07-15 20:18:08 +04:00
|
|
|
struct timespec now;
|
2008-04-21 04:13:46 +04:00
|
|
|
struct ptimers *pts;
|
|
|
|
struct ptimer *pt, *spare;
|
2005-07-11 23:50:42 +04:00
|
|
|
|
2011-04-08 14:35:37 +04:00
|
|
|
KASSERT(which == CLOCK_REALTIME || which == CLOCK_VIRTUAL ||
|
|
|
|
which == CLOCK_PROF);
|
2005-07-11 23:50:42 +04:00
|
|
|
if (itimerfix(&itvp->it_value) || itimerfix(&itvp->it_interval))
|
1993-03-21 12:45:37 +03:00
|
|
|
return (EINVAL);
|
2003-01-18 13:06:22 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't bother allocating data structures if the process just
|
|
|
|
* wants to clear the timer.
|
|
|
|
*/
|
2008-04-21 04:13:46 +04:00
|
|
|
spare = NULL;
|
|
|
|
pts = p->p_timers;
|
|
|
|
retry:
|
|
|
|
if (!timerisset(&itvp->it_value) && (pts == NULL ||
|
|
|
|
pts->pts_timers[which] == NULL))
|
2003-01-18 13:06:22 +03:00
|
|
|
return (0);
|
2008-04-21 04:13:46 +04:00
|
|
|
if (pts == NULL)
|
|
|
|
pts = timers_alloc(p);
|
|
|
|
mutex_spin_enter(&timer_lock);
|
|
|
|
pt = pts->pts_timers[which];
|
|
|
|
if (pt == NULL) {
|
|
|
|
if (spare == NULL) {
|
|
|
|
mutex_spin_exit(&timer_lock);
|
|
|
|
spare = pool_get(&ptimer_pool, PR_WAITOK);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
pt = spare;
|
|
|
|
spare = NULL;
|
2003-01-18 13:06:22 +03:00
|
|
|
pt->pt_ev.sigev_notify = SIGEV_SIGNAL;
|
2003-09-14 10:56:22 +04:00
|
|
|
pt->pt_ev.sigev_value.sival_int = which;
|
2003-01-18 13:06:22 +03:00
|
|
|
pt->pt_overruns = 0;
|
|
|
|
pt->pt_proc = p;
|
|
|
|
pt->pt_type = which;
|
2003-02-04 02:39:40 +03:00
|
|
|
pt->pt_entry = which;
|
2008-04-21 04:13:46 +04:00
|
|
|
pt->pt_queued = false;
|
2008-07-09 00:53:02 +04:00
|
|
|
if (pt->pt_type == CLOCK_REALTIME)
|
|
|
|
callout_init(&pt->pt_ch, CALLOUT_MPSAFE);
|
|
|
|
else
|
|
|
|
pt->pt_active = 0;
|
|
|
|
|
2003-01-18 13:06:22 +03:00
|
|
|
switch (which) {
|
|
|
|
case ITIMER_REAL:
|
|
|
|
pt->pt_ev.sigev_signo = SIGALRM;
|
|
|
|
break;
|
|
|
|
case ITIMER_VIRTUAL:
|
|
|
|
pt->pt_ev.sigev_signo = SIGVTALRM;
|
|
|
|
break;
|
|
|
|
case ITIMER_PROF:
|
|
|
|
pt->pt_ev.sigev_signo = SIGPROF;
|
|
|
|
break;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2008-04-21 04:13:46 +04:00
|
|
|
pts->pts_timers[which] = pt;
|
|
|
|
}
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2008-07-15 20:18:08 +04:00
|
|
|
TIMEVAL_TO_TIMESPEC(&itvp->it_value, &pt->pt_time.it_value);
|
|
|
|
TIMEVAL_TO_TIMESPEC(&itvp->it_interval, &pt->pt_time.it_interval);
|
|
|
|
|
|
|
|
if ((which == ITIMER_REAL) && timespecisset(&pt->pt_time.it_value)) {
|
2003-03-11 00:49:56 +03:00
|
|
|
/* Convert to absolute time */
|
2006-06-08 02:33:33 +04:00
|
|
|
/* XXX need to wrap in splclock for timecounters case? */
|
2008-07-15 20:18:08 +04:00
|
|
|
getnanotime(&now);
|
|
|
|
timespecadd(&pt->pt_time.it_value, &now, &pt->pt_time.it_value);
|
2003-03-11 00:49:56 +03:00
|
|
|
}
|
2003-01-18 13:06:22 +03:00
|
|
|
timer_settime(pt);
|
2008-04-21 04:13:46 +04:00
|
|
|
mutex_spin_exit(&timer_lock);
|
|
|
|
if (spare != NULL)
|
|
|
|
pool_put(&ptimer_pool, spare);
|
2003-01-18 13:06:22 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2003-01-18 13:06:22 +03:00
|
|
|
/* Utility routines to manage the array of pointers to timers. */
|
2008-04-21 04:13:46 +04:00
|
|
|
struct ptimers *
|
2003-01-18 13:06:22 +03:00
|
|
|
timers_alloc(struct proc *p)
|
|
|
|
{
|
|
|
|
struct ptimers *pts;
|
2008-04-21 04:13:46 +04:00
|
|
|
int i;
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2006-05-18 14:09:12 +04:00
|
|
|
pts = pool_get(&ptimers_pool, PR_WAITOK);
|
2003-01-18 13:06:22 +03:00
|
|
|
LIST_INIT(&pts->pts_virtual);
|
|
|
|
LIST_INIT(&pts->pts_prof);
|
|
|
|
for (i = 0; i < TIMER_MAX; i++)
|
|
|
|
pts->pts_timers[i] = NULL;
|
2003-02-04 02:39:40 +03:00
|
|
|
pts->pts_fired = 0;
|
2008-04-21 04:13:46 +04:00
|
|
|
mutex_spin_enter(&timer_lock);
|
|
|
|
if (p->p_timers == NULL) {
|
|
|
|
p->p_timers = pts;
|
|
|
|
mutex_spin_exit(&timer_lock);
|
|
|
|
return pts;
|
|
|
|
}
|
|
|
|
mutex_spin_exit(&timer_lock);
|
|
|
|
pool_put(&ptimers_pool, pts);
|
|
|
|
return p->p_timers;
|
2003-01-18 13:06:22 +03:00
|
|
|
}
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
2003-01-18 13:06:22 +03:00
|
|
|
* Clean up the per-process timers. If "which" is set to TIMERS_ALL,
|
|
|
|
* then clean up all timers and free all the data structures. If
|
|
|
|
* "which" is set to TIMERS_POSIX, only clean up the timers allocated
|
|
|
|
* by timer_create(), not the BSD setitimer() timers, and only free the
|
|
|
|
* structure if none of those remain.
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
1993-06-27 10:01:27 +04:00
|
|
|
void
|
2003-01-18 13:06:22 +03:00
|
|
|
timers_free(struct proc *p, int which)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2003-01-18 13:06:22 +03:00
|
|
|
struct ptimers *pts;
|
2008-04-21 04:13:46 +04:00
|
|
|
struct ptimer *ptn;
|
2008-07-15 20:18:08 +04:00
|
|
|
struct timespec ts;
|
2008-04-21 04:13:46 +04:00
|
|
|
int i;
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
if (p->p_timers == NULL)
|
|
|
|
return;
|
2003-01-18 13:06:22 +03:00
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
pts = p->p_timers;
|
|
|
|
mutex_spin_enter(&timer_lock);
|
|
|
|
if (which == TIMERS_ALL) {
|
|
|
|
p->p_timers = NULL;
|
|
|
|
i = 0;
|
|
|
|
} else {
|
2008-07-15 20:18:08 +04:00
|
|
|
timespecclear(&ts);
|
2008-04-21 04:13:46 +04:00
|
|
|
for (ptn = LIST_FIRST(&pts->pts_virtual);
|
|
|
|
ptn && ptn != pts->pts_timers[ITIMER_VIRTUAL];
|
2008-07-09 00:53:02 +04:00
|
|
|
ptn = LIST_NEXT(ptn, pt_list)) {
|
2011-04-08 14:35:37 +04:00
|
|
|
KASSERT(ptn->pt_type == CLOCK_VIRTUAL);
|
2008-07-15 20:18:08 +04:00
|
|
|
timespecadd(&ts, &ptn->pt_time.it_value, &ts);
|
2008-07-09 00:53:02 +04:00
|
|
|
}
|
2008-04-21 04:13:46 +04:00
|
|
|
LIST_FIRST(&pts->pts_virtual) = NULL;
|
|
|
|
if (ptn) {
|
2011-04-08 14:35:37 +04:00
|
|
|
KASSERT(ptn->pt_type == CLOCK_VIRTUAL);
|
2008-07-15 20:18:08 +04:00
|
|
|
timespecadd(&ts, &ptn->pt_time.it_value,
|
2008-04-21 04:13:46 +04:00
|
|
|
&ptn->pt_time.it_value);
|
|
|
|
LIST_INSERT_HEAD(&pts->pts_virtual, ptn, pt_list);
|
2003-01-18 13:06:22 +03:00
|
|
|
}
|
2008-07-15 20:18:08 +04:00
|
|
|
timespecclear(&ts);
|
2008-04-21 04:13:46 +04:00
|
|
|
for (ptn = LIST_FIRST(&pts->pts_prof);
|
|
|
|
ptn && ptn != pts->pts_timers[ITIMER_PROF];
|
2008-07-09 00:53:02 +04:00
|
|
|
ptn = LIST_NEXT(ptn, pt_list)) {
|
2011-04-08 14:35:37 +04:00
|
|
|
KASSERT(ptn->pt_type == CLOCK_PROF);
|
2008-07-15 20:18:08 +04:00
|
|
|
timespecadd(&ts, &ptn->pt_time.it_value, &ts);
|
2008-07-09 00:53:02 +04:00
|
|
|
}
|
2008-04-21 04:13:46 +04:00
|
|
|
LIST_FIRST(&pts->pts_prof) = NULL;
|
|
|
|
if (ptn) {
|
2011-04-08 14:35:37 +04:00
|
|
|
KASSERT(ptn->pt_type == CLOCK_PROF);
|
2008-07-15 20:18:08 +04:00
|
|
|
timespecadd(&ts, &ptn->pt_time.it_value,
|
2008-04-21 04:13:46 +04:00
|
|
|
&ptn->pt_time.it_value);
|
|
|
|
LIST_INSERT_HEAD(&pts->pts_prof, ptn, pt_list);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2008-04-21 04:13:46 +04:00
|
|
|
i = 3;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
2008-04-21 04:13:46 +04:00
|
|
|
for ( ; i < TIMER_MAX; i++) {
|
|
|
|
if (pts->pts_timers[i] != NULL) {
|
|
|
|
itimerfree(pts, i);
|
|
|
|
mutex_spin_enter(&timer_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (pts->pts_timers[0] == NULL && pts->pts_timers[1] == NULL &&
|
|
|
|
pts->pts_timers[2] == NULL) {
|
|
|
|
p->p_timers = NULL;
|
|
|
|
mutex_spin_exit(&timer_lock);
|
|
|
|
pool_put(&ptimers_pool, pts);
|
|
|
|
} else
|
|
|
|
mutex_spin_exit(&timer_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
itimerfree(struct ptimers *pts, int index)
|
|
|
|
{
|
|
|
|
struct ptimer *pt;
|
|
|
|
|
|
|
|
KASSERT(mutex_owned(&timer_lock));
|
|
|
|
|
|
|
|
pt = pts->pts_timers[index];
|
|
|
|
pts->pts_timers[index] = NULL;
|
2011-04-08 14:35:37 +04:00
|
|
|
if (!CLOCK_VIRTUAL_P(pt->pt_type))
|
2008-04-22 16:04:22 +04:00
|
|
|
callout_halt(&pt->pt_ch, &timer_lock);
|
2011-04-05 04:27:35 +04:00
|
|
|
if (pt->pt_queued)
|
2008-04-21 04:13:46 +04:00
|
|
|
TAILQ_REMOVE(&timer_queue, pt, pt_chain);
|
2008-04-22 16:04:22 +04:00
|
|
|
mutex_spin_exit(&timer_lock);
|
2011-04-08 14:35:37 +04:00
|
|
|
if (!CLOCK_VIRTUAL_P(pt->pt_type))
|
2008-07-09 00:53:02 +04:00
|
|
|
callout_destroy(&pt->pt_ch);
|
2008-04-21 04:13:46 +04:00
|
|
|
pool_put(&ptimer_pool, pt);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Decrement an interval timer by a specified number
|
2008-09-23 20:44:49 +04:00
|
|
|
* of nanoseconds, which must be less than a second,
|
|
|
|
* i.e. < 1000000000. If the timer expires, then reload
|
|
|
|
* it. In this case, carry over (nsec - old value) to
|
1994-05-20 11:24:51 +04:00
|
|
|
* reduce the value reloaded into the timer so that
|
1993-03-21 12:45:37 +03:00
|
|
|
* the timer does not drift. This routine assumes
|
|
|
|
* that it is called in a context where the timers
|
|
|
|
* on which it is operating cannot change in value.
|
|
|
|
*/
|
2008-04-21 04:13:46 +04:00
|
|
|
static int
|
2008-09-23 20:44:49 +04:00
|
|
|
itimerdecr(struct ptimer *pt, int nsec)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2008-07-15 20:18:08 +04:00
|
|
|
struct itimerspec *itp;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
KASSERT(mutex_owned(&timer_lock));
|
2011-04-08 14:35:37 +04:00
|
|
|
KASSERT(CLOCK_VIRTUAL_P(pt->pt_type));
|
2008-04-21 04:13:46 +04:00
|
|
|
|
2003-01-18 13:06:22 +03:00
|
|
|
itp = &pt->pt_time;
|
2008-07-15 20:18:08 +04:00
|
|
|
if (itp->it_value.tv_nsec < nsec) {
|
1993-03-21 12:45:37 +03:00
|
|
|
if (itp->it_value.tv_sec == 0) {
|
|
|
|
/* expired, and already in next interval */
|
2008-07-15 20:18:08 +04:00
|
|
|
nsec -= itp->it_value.tv_nsec;
|
1993-03-21 12:45:37 +03:00
|
|
|
goto expire;
|
|
|
|
}
|
2008-07-15 20:18:08 +04:00
|
|
|
itp->it_value.tv_nsec += 1000000000;
|
1993-03-21 12:45:37 +03:00
|
|
|
itp->it_value.tv_sec--;
|
|
|
|
}
|
2008-09-23 20:44:49 +04:00
|
|
|
itp->it_value.tv_nsec -= nsec;
|
|
|
|
nsec = 0;
|
2008-07-15 20:18:08 +04:00
|
|
|
if (timespecisset(&itp->it_value))
|
1993-03-21 12:45:37 +03:00
|
|
|
return (1);
|
|
|
|
/* expired, exactly at end of interval */
|
|
|
|
expire:
|
2008-07-15 20:18:08 +04:00
|
|
|
if (timespecisset(&itp->it_interval)) {
|
1993-03-21 12:45:37 +03:00
|
|
|
itp->it_value = itp->it_interval;
|
2008-07-15 20:18:08 +04:00
|
|
|
itp->it_value.tv_nsec -= nsec;
|
|
|
|
if (itp->it_value.tv_nsec < 0) {
|
|
|
|
itp->it_value.tv_nsec += 1000000000;
|
1993-03-21 12:45:37 +03:00
|
|
|
itp->it_value.tv_sec--;
|
|
|
|
}
|
2003-01-18 13:06:22 +03:00
|
|
|
timer_settime(pt);
|
1993-03-21 12:45:37 +03:00
|
|
|
} else
|
2008-07-15 20:18:08 +04:00
|
|
|
itp->it_value.tv_nsec = 0; /* sec is already 0 */
|
1993-03-21 12:45:37 +03:00
|
|
|
return (0);
|
|
|
|
}
|
2000-02-04 02:04:45 +03:00
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
static void
|
2003-01-18 13:06:22 +03:00
|
|
|
itimerfire(struct ptimer *pt)
|
|
|
|
{
|
2003-11-02 19:26:10 +03:00
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
KASSERT(mutex_owned(&timer_lock));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX Can overrun, but we don't do signal queueing yet, anyway.
|
|
|
|
* XXX Relying on the clock interrupt is stupid.
|
|
|
|
*/
|
2008-10-15 10:51:17 +04:00
|
|
|
if ((pt->pt_ev.sigev_notify == SIGEV_SA && pt->pt_proc->p_sa == NULL) ||
|
|
|
|
(pt->pt_ev.sigev_notify != SIGEV_SIGNAL &&
|
|
|
|
pt->pt_ev.sigev_notify != SIGEV_SA) || pt->pt_queued)
|
2008-04-21 04:13:46 +04:00
|
|
|
return;
|
|
|
|
TAILQ_INSERT_TAIL(&timer_queue, pt, pt_chain);
|
|
|
|
pt->pt_queued = true;
|
|
|
|
softint_schedule(timer_sih);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
timer_tick(lwp_t *l, bool user)
|
|
|
|
{
|
|
|
|
struct ptimers *pts;
|
|
|
|
struct ptimer *pt;
|
|
|
|
proc_t *p;
|
|
|
|
|
|
|
|
p = l->l_proc;
|
|
|
|
if (p->p_timers == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mutex_spin_enter(&timer_lock);
|
|
|
|
if ((pts = l->l_proc->p_timers) != NULL) {
|
2003-01-18 13:06:22 +03:00
|
|
|
/*
|
2008-04-21 04:13:46 +04:00
|
|
|
* Run current process's virtual and profile time, as needed.
|
2003-01-18 13:06:22 +03:00
|
|
|
*/
|
2008-04-21 04:13:46 +04:00
|
|
|
if (user && (pt = LIST_FIRST(&pts->pts_virtual)) != NULL)
|
2008-09-23 20:44:49 +04:00
|
|
|
if (itimerdecr(pt, tick * 1000) == 0)
|
2008-04-21 04:13:46 +04:00
|
|
|
itimerfire(pt);
|
|
|
|
if ((pt = LIST_FIRST(&pts->pts_prof)) != NULL)
|
2008-09-23 20:44:49 +04:00
|
|
|
if (itimerdecr(pt, tick * 1000) == 0)
|
2008-04-21 04:13:46 +04:00
|
|
|
itimerfire(pt);
|
|
|
|
}
|
|
|
|
mutex_spin_exit(&timer_lock);
|
|
|
|
}
|
|
|
|
|
2008-10-15 10:51:17 +04:00
|
|
|
#ifdef KERN_SA
|
|
|
|
/*
|
|
|
|
* timer_sa_intr:
|
|
|
|
*
|
|
|
|
* SIGEV_SA handling for timer_intr(). We are called (and return)
|
|
|
|
* with the timer lock held. We know that the process had SA enabled
|
|
|
|
* when this timer was enqueued. As timer_intr() is a soft interrupt
|
|
|
|
* handler, SA should still be enabled by the time we get here.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
timer_sa_intr(struct ptimer *pt, proc_t *p)
|
|
|
|
{
|
Adjust locking on the sadata::sa_vps list. The main time we
walk the list, we're looking for a vp to do something with. We do
this in the signal code and in the timer code. The signal code already
runs with proc::p_lock held, so it's a very natural lock to use. The
timer code, however, calls into the sa timer code with a spinlock held.
Since proc::p_lock is an adaptable mutex, we can sleep to get it. Sleeping
with a spinlock is BAD. So proc::p_lock is _not_ the right lock there,
and something like sadata::sa_mutex would be best.
Address this difficulty by noting that both uses actually just read
the list. Changing the list of VPs is rare - once one's added, it stays
until the process ends. So make the locking protocol that to write the
list you have to hold both proc::p_lock and sadata::sa_mutex (taken
in that order). Thus holding either one individually grants read access.
This removes a case where we could sleep with timer_lock, a spinlock at
IPL_SCHED (!!), while trying to get p_lock. If that ever happened, we'd
pretty much be dead. So don't do that!
This fixes a merge botch from how I handled our gaining p_lock - p_lock
should not have simply replaced p_smutex.
While here, tweak the sa_unblock_userret() code for the case
when the blessed vp is actually running (on another CPU). Make its
resched RESCHED_IMMED so we whack the CPU. Addresses a hang I've
observed in starting firefox on occasion when I see one thread running
in userland and another thread sitting in lwpublk, which means it's on
the list of threads for which we need an unblocked upcall. This list is
one on which things should NOT linger.
2008-10-16 22:21:45 +04:00
|
|
|
unsigned int i;
|
|
|
|
struct sadata *sa;
|
|
|
|
struct sadata_vp *vp;
|
2008-10-15 10:51:17 +04:00
|
|
|
|
|
|
|
/* Cause the process to generate an upcall when it returns. */
|
|
|
|
if (!p->p_timerpend) {
|
|
|
|
/*
|
|
|
|
* XXX stop signals can be processed inside tsleep,
|
|
|
|
* which can be inside sa_yield's inner loop, which
|
|
|
|
* makes testing for sa_idle alone insuffucent to
|
|
|
|
* determine if we really should call setrunnable.
|
|
|
|
*/
|
|
|
|
pt->pt_poverruns = pt->pt_overruns;
|
|
|
|
pt->pt_overruns = 0;
|
|
|
|
i = 1 << pt->pt_entry;
|
|
|
|
p->p_timers->pts_fired = i;
|
|
|
|
p->p_timerpend = 1;
|
|
|
|
|
Adjust locking on the sadata::sa_vps list. The main time we
walk the list, we're looking for a vp to do something with. We do
this in the signal code and in the timer code. The signal code already
runs with proc::p_lock held, so it's a very natural lock to use. The
timer code, however, calls into the sa timer code with a spinlock held.
Since proc::p_lock is an adaptable mutex, we can sleep to get it. Sleeping
with a spinlock is BAD. So proc::p_lock is _not_ the right lock there,
and something like sadata::sa_mutex would be best.
Address this difficulty by noting that both uses actually just read
the list. Changing the list of VPs is rare - once one's added, it stays
until the process ends. So make the locking protocol that to write the
list you have to hold both proc::p_lock and sadata::sa_mutex (taken
in that order). Thus holding either one individually grants read access.
This removes a case where we could sleep with timer_lock, a spinlock at
IPL_SCHED (!!), while trying to get p_lock. If that ever happened, we'd
pretty much be dead. So don't do that!
This fixes a merge botch from how I handled our gaining p_lock - p_lock
should not have simply replaced p_smutex.
While here, tweak the sa_unblock_userret() code for the case
when the blessed vp is actually running (on another CPU). Make its
resched RESCHED_IMMED so we whack the CPU. Addresses a hang I've
observed in starting firefox on occasion when I see one thread running
in userland and another thread sitting in lwpublk, which means it's on
the list of threads for which we need an unblocked upcall. This list is
one on which things should NOT linger.
2008-10-16 22:21:45 +04:00
|
|
|
sa = p->p_sa;
|
|
|
|
mutex_enter(&sa->sa_mutex);
|
|
|
|
SLIST_FOREACH(vp, &sa->sa_vps, savp_next) {
|
|
|
|
struct lwp *vp_lwp = vp->savp_lwp;
|
|
|
|
lwp_lock(vp_lwp);
|
|
|
|
lwp_need_userret(vp_lwp);
|
|
|
|
if (vp_lwp->l_flag & LW_SA_IDLE) {
|
|
|
|
vp_lwp->l_flag &= ~LW_SA_IDLE;
|
|
|
|
lwp_unsleep(vp_lwp, true);
|
2008-10-15 10:51:17 +04:00
|
|
|
break;
|
|
|
|
}
|
Adjust locking on the sadata::sa_vps list. The main time we
walk the list, we're looking for a vp to do something with. We do
this in the signal code and in the timer code. The signal code already
runs with proc::p_lock held, so it's a very natural lock to use. The
timer code, however, calls into the sa timer code with a spinlock held.
Since proc::p_lock is an adaptable mutex, we can sleep to get it. Sleeping
with a spinlock is BAD. So proc::p_lock is _not_ the right lock there,
and something like sadata::sa_mutex would be best.
Address this difficulty by noting that both uses actually just read
the list. Changing the list of VPs is rare - once one's added, it stays
until the process ends. So make the locking protocol that to write the
list you have to hold both proc::p_lock and sadata::sa_mutex (taken
in that order). Thus holding either one individually grants read access.
This removes a case where we could sleep with timer_lock, a spinlock at
IPL_SCHED (!!), while trying to get p_lock. If that ever happened, we'd
pretty much be dead. So don't do that!
This fixes a merge botch from how I handled our gaining p_lock - p_lock
should not have simply replaced p_smutex.
While here, tweak the sa_unblock_userret() code for the case
when the blessed vp is actually running (on another CPU). Make its
resched RESCHED_IMMED so we whack the CPU. Addresses a hang I've
observed in starting firefox on occasion when I see one thread running
in userland and another thread sitting in lwpublk, which means it's on
the list of threads for which we need an unblocked upcall. This list is
one on which things should NOT linger.
2008-10-16 22:21:45 +04:00
|
|
|
lwp_unlock(vp_lwp);
|
2008-10-15 10:51:17 +04:00
|
|
|
}
|
Adjust locking on the sadata::sa_vps list. The main time we
walk the list, we're looking for a vp to do something with. We do
this in the signal code and in the timer code. The signal code already
runs with proc::p_lock held, so it's a very natural lock to use. The
timer code, however, calls into the sa timer code with a spinlock held.
Since proc::p_lock is an adaptable mutex, we can sleep to get it. Sleeping
with a spinlock is BAD. So proc::p_lock is _not_ the right lock there,
and something like sadata::sa_mutex would be best.
Address this difficulty by noting that both uses actually just read
the list. Changing the list of VPs is rare - once one's added, it stays
until the process ends. So make the locking protocol that to write the
list you have to hold both proc::p_lock and sadata::sa_mutex (taken
in that order). Thus holding either one individually grants read access.
This removes a case where we could sleep with timer_lock, a spinlock at
IPL_SCHED (!!), while trying to get p_lock. If that ever happened, we'd
pretty much be dead. So don't do that!
This fixes a merge botch from how I handled our gaining p_lock - p_lock
should not have simply replaced p_smutex.
While here, tweak the sa_unblock_userret() code for the case
when the blessed vp is actually running (on another CPU). Make its
resched RESCHED_IMMED so we whack the CPU. Addresses a hang I've
observed in starting firefox on occasion when I see one thread running
in userland and another thread sitting in lwpublk, which means it's on
the list of threads for which we need an unblocked upcall. This list is
one on which things should NOT linger.
2008-10-16 22:21:45 +04:00
|
|
|
mutex_exit(&sa->sa_mutex);
|
2008-10-15 10:51:17 +04:00
|
|
|
} else {
|
|
|
|
i = 1 << pt->pt_entry;
|
|
|
|
if ((p->p_timers->pts_fired & i) == 0) {
|
|
|
|
pt->pt_poverruns = pt->pt_overruns;
|
|
|
|
pt->pt_overruns = 0;
|
|
|
|
p->p_timers->pts_fired |= i;
|
|
|
|
} else
|
|
|
|
pt->pt_overruns++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* KERN_SA */
|
|
|
|
|
2008-04-21 04:13:46 +04:00
|
|
|
static void
|
|
|
|
timer_intr(void *cookie)
|
|
|
|
{
|
|
|
|
ksiginfo_t ksi;
|
|
|
|
struct ptimer *pt;
|
|
|
|
proc_t *p;
|
|
|
|
|
2009-01-31 02:11:27 +03:00
|
|
|
mutex_enter(proc_lock);
|
2008-04-21 04:13:46 +04:00
|
|
|
mutex_spin_enter(&timer_lock);
|
|
|
|
while ((pt = TAILQ_FIRST(&timer_queue)) != NULL) {
|
|
|
|
TAILQ_REMOVE(&timer_queue, pt, pt_chain);
|
|
|
|
KASSERT(pt->pt_queued);
|
|
|
|
pt->pt_queued = false;
|
|
|
|
|
|
|
|
if (pt->pt_proc->p_timers == NULL) {
|
|
|
|
/* Process is dying. */
|
|
|
|
continue;
|
|
|
|
}
|
2008-10-15 10:51:17 +04:00
|
|
|
p = pt->pt_proc;
|
|
|
|
#ifdef KERN_SA
|
|
|
|
if (pt->pt_ev.sigev_notify == SIGEV_SA) {
|
|
|
|
timer_sa_intr(pt, p);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif /* KERN_SA */
|
|
|
|
if (pt->pt_ev.sigev_notify != SIGEV_SIGNAL)
|
|
|
|
continue;
|
2008-04-21 04:13:46 +04:00
|
|
|
if (sigismember(&p->p_sigpend.sp_set, pt->pt_ev.sigev_signo)) {
|
2003-01-18 13:06:22 +03:00
|
|
|
pt->pt_overruns++;
|
2008-04-21 04:13:46 +04:00
|
|
|
continue;
|
2003-02-04 02:39:40 +03:00
|
|
|
}
|
2008-04-21 04:13:46 +04:00
|
|
|
|
|
|
|
KSI_INIT(&ksi);
|
|
|
|
ksi.ksi_signo = pt->pt_ev.sigev_signo;
|
|
|
|
ksi.ksi_code = SI_TIMER;
|
|
|
|
ksi.ksi_value = pt->pt_ev.sigev_value;
|
|
|
|
pt->pt_poverruns = pt->pt_overruns;
|
|
|
|
pt->pt_overruns = 0;
|
|
|
|
mutex_spin_exit(&timer_lock);
|
|
|
|
kpsignal(p, &ksi, NULL);
|
|
|
|
mutex_spin_enter(&timer_lock);
|
2003-01-18 13:06:22 +03:00
|
|
|
}
|
2008-04-21 04:13:46 +04:00
|
|
|
mutex_spin_exit(&timer_lock);
|
2009-01-31 02:11:27 +03:00
|
|
|
mutex_exit(proc_lock);
|
2003-01-18 13:06:22 +03:00
|
|
|
}
|
2009-10-04 00:48:42 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the time will wrap if set to ts.
|
|
|
|
*
|
|
|
|
* ts - timespec describing the new time
|
|
|
|
* delta - the delta between the current time and ts
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
time_wraps(struct timespec *ts, struct timespec *delta)
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't allow the time to be set forward so far it
|
|
|
|
* will wrap and become negative, thus allowing an
|
|
|
|
* attacker to bypass the next check below. The
|
|
|
|
* cutoff is 1 year before rollover occurs, so even
|
|
|
|
* if the attacker uses adjtime(2) to move the time
|
|
|
|
* past the cutoff, it will take a very long time
|
|
|
|
* to get to the wrap point.
|
|
|
|
*/
|
|
|
|
if ((ts->tv_sec > LLONG_MAX - 365*24*60*60) ||
|
|
|
|
(delta->tv_sec < 0 || delta->tv_nsec < 0))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|