2023-10-17 13:27:34 +03:00
|
|
|
/* $NetBSD: kern_ktrace.c,v 1.184 2023/10/17 10:27:34 riastradh Exp $ */
|
2007-08-15 16:07:23 +04:00
|
|
|
|
|
|
|
/*-
|
2020-03-14 21:08:38 +03:00
|
|
|
* Copyright (c) 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
|
2007-08-15 16:07:23 +04:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Andrew Doran.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
1994-06-29 10:29:24 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
1994-05-18 09:12:10 +04:00
|
|
|
* Copyright (c) 1989, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
1993-03-21 12:45:37 +03:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2003-08-07 20:26:28 +04:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1993-03-21 12:45:37 +03:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1998-03-01 05:20:01 +03:00
|
|
|
* @(#)kern_ktrace.c 8.5 (Berkeley) 5/14/95
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
|
|
|
|
2001-11-12 18:25:01 +03:00
|
|
|
#include <sys/cdefs.h>
|
2023-10-17 13:27:34 +03:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: kern_ktrace.c,v 1.184 2023/10/17 10:27:34 riastradh Exp $");
|
1998-06-26 01:17:15 +04:00
|
|
|
|
1993-12-18 06:59:02 +03:00
|
|
|
#include <sys/param.h>
|
2023-10-17 13:27:34 +03:00
|
|
|
|
|
|
|
#include <sys/callout.h>
|
|
|
|
#include <sys/cpu.h>
|
1993-12-18 06:59:02 +03:00
|
|
|
#include <sys/file.h>
|
1998-05-02 22:33:19 +04:00
|
|
|
#include <sys/filedesc.h>
|
2000-05-27 04:40:29 +04:00
|
|
|
#include <sys/ioctl.h>
|
2006-05-15 01:15:11 +04:00
|
|
|
#include <sys/kauth.h>
|
2023-10-17 13:27:34 +03:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/kmem.h>
|
|
|
|
#include <sys/kthread.h>
|
|
|
|
#include <sys/ktrace.h>
|
1994-10-20 07:22:35 +03:00
|
|
|
#include <sys/mount.h>
|
2023-10-17 13:27:34 +03:00
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/syncobj.h>
|
1994-10-20 07:22:35 +03:00
|
|
|
#include <sys/syscallargs.h>
|
2023-10-17 13:27:34 +03:00
|
|
|
#include <sys/syslog.h>
|
|
|
|
#include <sys/systm.h>
|
1994-10-20 07:22:35 +03:00
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
/*
|
2007-03-29 21:37:13 +04:00
|
|
|
* TODO:
|
2004-09-23 02:15:03 +04:00
|
|
|
* - need better error reporting?
|
|
|
|
* - userland utility to sort ktrace.out by timestamp.
|
|
|
|
* - keep minimum information in ktrace_entry when rest of alloc failed.
|
|
|
|
* - per trace control of configurable parameters.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct ktrace_entry {
|
|
|
|
TAILQ_ENTRY(ktrace_entry) kte_list;
|
2007-02-10 00:55:00 +03:00
|
|
|
struct ktr_header kte_kth;
|
|
|
|
void *kte_buf;
|
2016-07-07 09:55:38 +03:00
|
|
|
size_t kte_bufsz;
|
2007-02-10 00:55:00 +03:00
|
|
|
#define KTE_SPACE 32
|
2011-09-01 22:24:19 +04:00
|
|
|
uint8_t kte_space[KTE_SPACE] __aligned(sizeof(register_t));
|
2004-09-23 02:15:03 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ktr_desc {
|
|
|
|
TAILQ_ENTRY(ktr_desc) ktd_list;
|
|
|
|
int ktd_flags;
|
|
|
|
#define KTDF_WAIT 0x0001
|
|
|
|
#define KTDF_DONE 0x0002
|
|
|
|
#define KTDF_BLOCKING 0x0004
|
|
|
|
#define KTDF_INTERACTIVE 0x0008
|
|
|
|
int ktd_error;
|
|
|
|
#define KTDE_ENOMEM 0x0001
|
|
|
|
#define KTDE_ENOSPC 0x0002
|
|
|
|
int ktd_errcnt;
|
|
|
|
int ktd_ref; /* # of reference */
|
|
|
|
int ktd_qcount; /* # of entry in the queue */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Params to control behaviour.
|
|
|
|
*/
|
|
|
|
int ktd_delayqcnt; /* # of entry allowed to delay */
|
|
|
|
int ktd_wakedelay; /* delay of wakeup in *tick* */
|
|
|
|
int ktd_intrwakdl; /* ditto, but when interactive */
|
|
|
|
|
2008-03-22 00:54:58 +03:00
|
|
|
file_t *ktd_fp; /* trace output file */
|
2007-08-15 16:07:23 +04:00
|
|
|
lwp_t *ktd_lwp; /* our kernel thread */
|
2004-09-23 02:15:03 +04:00
|
|
|
TAILQ_HEAD(, ktrace_entry) ktd_queue;
|
2007-07-10 00:51:58 +04:00
|
|
|
callout_t ktd_wakch; /* delayed wakeup */
|
2007-02-10 00:55:00 +03:00
|
|
|
kcondvar_t ktd_sync_cv;
|
|
|
|
kcondvar_t ktd_cv;
|
2004-09-23 02:15:03 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
static void ktrwrite(struct ktr_desc *, struct ktrace_entry *);
|
2007-08-15 16:07:23 +04:00
|
|
|
static int ktrops(lwp_t *, struct proc *, int, int,
|
2004-09-23 02:15:03 +04:00
|
|
|
struct ktr_desc *);
|
2007-08-15 16:07:23 +04:00
|
|
|
static int ktrsetchildren(lwp_t *, struct proc *, int, int,
|
2004-09-23 02:15:03 +04:00
|
|
|
struct ktr_desc *);
|
2007-08-15 16:07:23 +04:00
|
|
|
static int ktrcanset(lwp_t *, struct proc *);
|
2008-03-22 00:54:58 +03:00
|
|
|
static int ktrsamefile(file_t *, file_t *);
|
2007-08-15 16:07:23 +04:00
|
|
|
static void ktr_kmem(lwp_t *, int, const void *, size_t);
|
|
|
|
static void ktr_io(lwp_t *, int, enum uio_rw, struct iovec *, size_t);
|
2004-09-23 02:15:03 +04:00
|
|
|
|
|
|
|
static struct ktr_desc *
|
2008-03-22 00:54:58 +03:00
|
|
|
ktd_lookup(file_t *);
|
2004-09-23 02:15:03 +04:00
|
|
|
static void ktdrel(struct ktr_desc *);
|
|
|
|
static void ktdref(struct ktr_desc *);
|
|
|
|
static void ktefree(struct ktrace_entry *);
|
|
|
|
static void ktd_logerrl(struct ktr_desc *, int);
|
|
|
|
static void ktrace_thread(void *);
|
2007-02-10 00:55:00 +03:00
|
|
|
static int ktrderefall(struct ktr_desc *, int);
|
2004-09-23 02:15:03 +04:00
|
|
|
|
|
|
|
/*
|
2020-02-05 12:59:50 +03:00
|
|
|
* Default values.
|
2004-09-23 02:15:03 +04:00
|
|
|
*/
|
|
|
|
#define KTD_MAXENTRY 1000 /* XXX: tune */
|
|
|
|
#define KTD_TIMEOUT 5 /* XXX: tune */
|
|
|
|
#define KTD_DELAYQCNT 100 /* XXX: tune */
|
|
|
|
#define KTD_WAKEDELAY 5000 /* XXX: tune */
|
|
|
|
#define KTD_INTRWAKDL 100 /* XXX: tune */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Patchable variables.
|
|
|
|
*/
|
|
|
|
int ktd_maxentry = KTD_MAXENTRY; /* max # of entry in the queue */
|
|
|
|
int ktd_timeout = KTD_TIMEOUT; /* timeout in seconds */
|
|
|
|
int ktd_delayqcnt = KTD_DELAYQCNT; /* # of entry allowed to delay */
|
|
|
|
int ktd_wakedelay = KTD_WAKEDELAY; /* delay of wakeup in *ms* */
|
|
|
|
int ktd_intrwakdl = KTD_INTRWAKDL; /* ditto, but when interactive */
|
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
kmutex_t ktrace_lock;
|
|
|
|
int ktrace_on;
|
2004-09-23 02:15:03 +04:00
|
|
|
static TAILQ_HEAD(, ktr_desc) ktdq = TAILQ_HEAD_INITIALIZER(ktdq);
|
2008-05-27 21:48:27 +04:00
|
|
|
static pool_cache_t kte_cache;
|
2004-09-23 02:15:03 +04:00
|
|
|
|
2009-10-03 01:47:35 +04:00
|
|
|
static kauth_listener_t ktrace_listener;
|
|
|
|
|
2007-03-29 21:37:13 +04:00
|
|
|
static void
|
2004-09-23 02:15:03 +04:00
|
|
|
ktd_wakeup(struct ktr_desc *ktd)
|
|
|
|
{
|
|
|
|
|
|
|
|
callout_stop(&ktd->ktd_wakch);
|
2007-03-29 21:37:13 +04:00
|
|
|
cv_signal(&ktd->ktd_cv);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ktd_callout(void *arg)
|
|
|
|
{
|
|
|
|
|
2007-12-04 12:08:58 +03:00
|
|
|
mutex_enter(&ktrace_lock);
|
2007-03-29 21:37:13 +04:00
|
|
|
ktd_wakeup(arg);
|
2007-12-04 12:08:58 +03:00
|
|
|
mutex_exit(&ktrace_lock);
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ktd_logerrl(struct ktr_desc *ktd, int error)
|
|
|
|
{
|
|
|
|
|
|
|
|
ktd->ktd_error |= error;
|
|
|
|
ktd->ktd_errcnt++;
|
|
|
|
}
|
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
#if 0
|
2004-09-23 02:15:03 +04:00
|
|
|
static void
|
|
|
|
ktd_logerr(struct proc *p, int error)
|
|
|
|
{
|
2007-02-10 00:55:00 +03:00
|
|
|
struct ktr_desc *ktd;
|
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
KASSERT(mutex_owned(&ktrace_lock));
|
2004-09-23 02:15:03 +04:00
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
ktd = p->p_tracep;
|
2004-09-23 02:15:03 +04:00
|
|
|
if (ktd == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ktd_logerrl(ktd, error);
|
2007-02-10 00:55:00 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-10-03 01:47:35 +04:00
|
|
|
static int
|
|
|
|
ktrace_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
|
|
|
|
void *arg0, void *arg1, void *arg2, void *arg3)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
|
|
|
int result;
|
|
|
|
enum kauth_process_req req;
|
|
|
|
|
|
|
|
result = KAUTH_RESULT_DEFER;
|
|
|
|
p = arg0;
|
|
|
|
|
|
|
|
if (action != KAUTH_PROCESS_KTRACE)
|
|
|
|
return result;
|
|
|
|
|
2020-02-21 03:26:21 +03:00
|
|
|
req = (enum kauth_process_req)(uintptr_t)arg1;
|
2009-10-03 01:47:35 +04:00
|
|
|
|
|
|
|
/* Privileged; secmodel should handle these. */
|
|
|
|
if (req == KAUTH_REQ_PROCESS_KTRACE_PERSISTENT)
|
|
|
|
return result;
|
|
|
|
|
|
|
|
if ((p->p_traceflag & KTRFAC_PERSISTENT) ||
|
|
|
|
(p->p_flag & PK_SUGID))
|
|
|
|
return result;
|
|
|
|
|
|
|
|
if (kauth_cred_geteuid(cred) == kauth_cred_getuid(p->p_cred) &&
|
|
|
|
kauth_cred_getuid(cred) == kauth_cred_getsvuid(p->p_cred) &&
|
|
|
|
kauth_cred_getgid(cred) == kauth_cred_getgid(p->p_cred) &&
|
|
|
|
kauth_cred_getgid(cred) == kauth_cred_getsvgid(p->p_cred))
|
|
|
|
result = KAUTH_RESULT_ALLOW;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
/*
|
|
|
|
* Initialise the ktrace system.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ktrinit(void)
|
|
|
|
{
|
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_init(&ktrace_lock, MUTEX_DEFAULT, IPL_NONE);
|
2008-05-27 21:48:27 +04:00
|
|
|
kte_cache = pool_cache_init(sizeof(struct ktrace_entry), 0, 0, 0,
|
|
|
|
"ktrace", &pool_allocator_nointr, IPL_NONE, NULL, NULL, NULL);
|
2009-10-03 01:47:35 +04:00
|
|
|
|
|
|
|
ktrace_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS,
|
2016-07-07 09:55:38 +03:00
|
|
|
ktrace_listener_cb, NULL);
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-08-15 16:07:23 +04:00
|
|
|
* Release a reference. Called with ktrace_lock held.
|
2004-09-23 02:15:03 +04:00
|
|
|
*/
|
2021-02-27 16:02:42 +03:00
|
|
|
static void
|
2004-09-23 02:15:03 +04:00
|
|
|
ktdrel(struct ktr_desc *ktd)
|
|
|
|
{
|
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
KASSERT(mutex_owned(&ktrace_lock));
|
2007-02-10 00:55:00 +03:00
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
KDASSERT(ktd->ktd_ref != 0);
|
|
|
|
KASSERT(ktd->ktd_ref > 0);
|
2007-08-15 16:07:23 +04:00
|
|
|
KASSERT(ktrace_on > 0);
|
|
|
|
ktrace_on--;
|
2004-09-23 02:15:03 +04:00
|
|
|
if (--ktd->ktd_ref <= 0) {
|
|
|
|
ktd->ktd_flags |= KTDF_DONE;
|
2007-03-29 21:37:13 +04:00
|
|
|
cv_signal(&ktd->ktd_cv);
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-27 16:02:42 +03:00
|
|
|
static void
|
2004-09-23 02:15:03 +04:00
|
|
|
ktdref(struct ktr_desc *ktd)
|
|
|
|
{
|
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
KASSERT(mutex_owned(&ktrace_lock));
|
2007-02-10 00:55:00 +03:00
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
ktd->ktd_ref++;
|
2007-08-15 16:07:23 +04:00
|
|
|
ktrace_on++;
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
|
|
|
|
2021-02-27 16:02:42 +03:00
|
|
|
static struct ktr_desc *
|
2008-03-22 00:54:58 +03:00
|
|
|
ktd_lookup(file_t *fp)
|
2004-09-23 02:15:03 +04:00
|
|
|
{
|
|
|
|
struct ktr_desc *ktd;
|
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
KASSERT(mutex_owned(&ktrace_lock));
|
2007-02-10 00:55:00 +03:00
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
for (ktd = TAILQ_FIRST(&ktdq); ktd != NULL;
|
|
|
|
ktd = TAILQ_NEXT(ktd, ktd_list)) {
|
|
|
|
if (ktrsamefile(ktd->ktd_fp, fp)) {
|
2007-08-15 16:07:23 +04:00
|
|
|
ktdref(ktd);
|
2004-09-23 02:15:03 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2007-02-10 00:55:00 +03:00
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
return (ktd);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2007-08-15 16:07:23 +04:00
|
|
|
ktraddentry(lwp_t *l, struct ktrace_entry *kte, int flags)
|
2004-09-23 02:15:03 +04:00
|
|
|
{
|
2005-12-11 15:16:03 +03:00
|
|
|
struct proc *p = l->l_proc;
|
2004-09-23 02:15:03 +04:00
|
|
|
struct ktr_desc *ktd;
|
|
|
|
#ifdef DEBUG
|
2006-06-08 02:33:33 +04:00
|
|
|
struct timeval t1, t2;
|
2004-09-23 02:15:03 +04:00
|
|
|
#endif
|
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_enter(&ktrace_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
if (p->p_traceflag & KTRFAC_TRC_EMUL) {
|
|
|
|
/* Add emulation trace before first entry for this process */
|
|
|
|
p->p_traceflag &= ~KTRFAC_TRC_EMUL;
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_exit(&ktrace_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
ktrexit(l);
|
2007-08-15 16:07:23 +04:00
|
|
|
ktremul();
|
2007-02-10 00:55:00 +03:00
|
|
|
(void)ktrenter(l);
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_enter(&ktrace_lock);
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
/* Tracing may have been cancelled. */
|
2004-09-23 02:15:03 +04:00
|
|
|
ktd = p->p_tracep;
|
|
|
|
if (ktd == NULL)
|
|
|
|
goto freekte;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bump reference count so that the object will remain while
|
|
|
|
* we are here. Note that the trace is controlled by other
|
|
|
|
* process.
|
|
|
|
*/
|
|
|
|
ktdref(ktd);
|
|
|
|
|
|
|
|
if (ktd->ktd_flags & KTDF_DONE)
|
|
|
|
goto relktd;
|
|
|
|
|
|
|
|
if (ktd->ktd_qcount > ktd_maxentry) {
|
|
|
|
ktd_logerrl(ktd, KTDE_ENOSPC);
|
|
|
|
goto relktd;
|
|
|
|
}
|
|
|
|
TAILQ_INSERT_TAIL(&ktd->ktd_queue, kte, kte_list);
|
|
|
|
ktd->ktd_qcount++;
|
|
|
|
if (ktd->ktd_flags & KTDF_BLOCKING)
|
|
|
|
goto skip_sync;
|
|
|
|
|
|
|
|
if (flags & KTA_WAITOK &&
|
|
|
|
(/* flags & KTA_LARGE */0 || ktd->ktd_flags & KTDF_WAIT ||
|
|
|
|
ktd->ktd_qcount > ktd_maxentry >> 1))
|
|
|
|
/*
|
|
|
|
* Sync with writer thread since we're requesting rather
|
|
|
|
* big one or many requests are pending.
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
ktd->ktd_flags |= KTDF_WAIT;
|
|
|
|
ktd_wakeup(ktd);
|
|
|
|
#ifdef DEBUG
|
2006-06-08 02:33:33 +04:00
|
|
|
getmicrouptime(&t1);
|
2004-09-23 02:15:03 +04:00
|
|
|
#endif
|
2007-08-15 16:07:23 +04:00
|
|
|
if (cv_timedwait(&ktd->ktd_sync_cv, &ktrace_lock,
|
2007-02-10 00:55:00 +03:00
|
|
|
ktd_timeout * hz) != 0) {
|
2004-09-23 02:15:03 +04:00
|
|
|
ktd->ktd_flags |= KTDF_BLOCKING;
|
|
|
|
/*
|
|
|
|
* Maybe the writer thread is blocking
|
|
|
|
* completely for some reason, but
|
|
|
|
* don't stop target process forever.
|
|
|
|
*/
|
|
|
|
log(LOG_NOTICE, "ktrace timeout\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#ifdef DEBUG
|
2006-06-08 02:33:33 +04:00
|
|
|
getmicrouptime(&t2);
|
|
|
|
timersub(&t2, &t1, &t2);
|
|
|
|
if (t2.tv_sec > 0)
|
2004-09-23 02:15:03 +04:00
|
|
|
log(LOG_NOTICE,
|
2009-01-11 05:45:45 +03:00
|
|
|
"ktrace long wait: %lld.%06ld\n",
|
|
|
|
(long long)t2.tv_sec, (long)t2.tv_usec);
|
2004-09-23 02:15:03 +04:00
|
|
|
#endif
|
|
|
|
} while (p->p_tracep == ktd &&
|
|
|
|
(ktd->ktd_flags & (KTDF_WAIT | KTDF_DONE)) == KTDF_WAIT);
|
|
|
|
else {
|
|
|
|
/* Schedule delayed wakeup */
|
|
|
|
if (ktd->ktd_qcount > ktd->ktd_delayqcnt)
|
|
|
|
ktd_wakeup(ktd); /* Wakeup now */
|
|
|
|
else if (!callout_pending(&ktd->ktd_wakch))
|
|
|
|
callout_reset(&ktd->ktd_wakch,
|
|
|
|
ktd->ktd_flags & KTDF_INTERACTIVE ?
|
|
|
|
ktd->ktd_intrwakdl : ktd->ktd_wakedelay,
|
2007-03-29 21:37:13 +04:00
|
|
|
ktd_callout, ktd);
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
skip_sync:
|
|
|
|
ktdrel(ktd);
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_exit(&ktrace_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
ktrexit(l);
|
2004-09-23 02:15:03 +04:00
|
|
|
return;
|
|
|
|
|
|
|
|
relktd:
|
|
|
|
ktdrel(ktd);
|
|
|
|
|
|
|
|
freekte:
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_exit(&ktrace_lock);
|
2004-09-23 02:15:03 +04:00
|
|
|
ktefree(kte);
|
2007-02-10 00:55:00 +03:00
|
|
|
ktrexit(l);
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
|
|
|
|
2021-02-27 16:02:42 +03:00
|
|
|
static void
|
2004-09-23 02:15:03 +04:00
|
|
|
ktefree(struct ktrace_entry *kte)
|
|
|
|
{
|
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
if (kte->kte_buf != kte->kte_space)
|
|
|
|
kmem_free(kte->kte_buf, kte->kte_bufsz);
|
2008-05-27 21:48:27 +04:00
|
|
|
pool_cache_put(kte_cache, kte);
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
2000-05-30 02:04:11 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* "deep" compare of two files for the purposes of clearing a trace.
|
|
|
|
* Returns true if they're the same open file, or if they point at the
|
|
|
|
* same underlying vnode/socket.
|
|
|
|
*/
|
|
|
|
|
2021-02-27 16:02:42 +03:00
|
|
|
static int
|
2008-03-22 00:54:58 +03:00
|
|
|
ktrsamefile(file_t *f1, file_t *f2)
|
2000-05-30 02:04:11 +04:00
|
|
|
{
|
2004-02-26 00:40:40 +03:00
|
|
|
|
2000-05-30 02:04:11 +04:00
|
|
|
return ((f1 == f2) ||
|
2000-05-30 02:29:01 +04:00
|
|
|
((f1 != NULL) && (f2 != NULL) &&
|
|
|
|
(f1->f_type == f2->f_type) &&
|
2000-05-30 02:04:11 +04:00
|
|
|
(f1->f_data == f2->f_data)));
|
|
|
|
}
|
1996-02-04 05:15:01 +03:00
|
|
|
|
1998-05-02 22:33:19 +04:00
|
|
|
void
|
2004-04-30 11:51:59 +04:00
|
|
|
ktrderef(struct proc *p)
|
1998-05-02 22:33:19 +04:00
|
|
|
{
|
2004-09-23 02:15:03 +04:00
|
|
|
struct ktr_desc *ktd = p->p_tracep;
|
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
KASSERT(mutex_owned(&ktrace_lock));
|
2007-02-10 00:55:00 +03:00
|
|
|
|
2000-05-27 04:40:29 +04:00
|
|
|
p->p_traceflag = 0;
|
2004-09-23 02:15:03 +04:00
|
|
|
if (ktd == NULL)
|
1998-05-02 22:33:19 +04:00
|
|
|
return;
|
2003-12-15 01:56:45 +03:00
|
|
|
p->p_tracep = NULL;
|
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
cv_broadcast(&ktd->ktd_sync_cv);
|
2004-09-23 02:15:03 +04:00
|
|
|
ktdrel(ktd);
|
1998-05-02 22:33:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2004-04-30 11:51:59 +04:00
|
|
|
ktradref(struct proc *p)
|
1998-05-02 22:33:19 +04:00
|
|
|
{
|
2004-09-23 02:15:03 +04:00
|
|
|
struct ktr_desc *ktd = p->p_tracep;
|
1998-05-02 22:33:19 +04:00
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
KASSERT(mutex_owned(&ktrace_lock));
|
2007-02-10 00:55:00 +03:00
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
ktdref(ktd);
|
1998-05-02 22:33:19 +04:00
|
|
|
}
|
|
|
|
|
2021-02-27 16:02:42 +03:00
|
|
|
static int
|
2007-02-10 00:55:00 +03:00
|
|
|
ktrderefall(struct ktr_desc *ktd, int auth)
|
|
|
|
{
|
2007-08-15 16:07:23 +04:00
|
|
|
lwp_t *curl = curlwp;
|
2007-02-10 00:55:00 +03:00
|
|
|
struct proc *p;
|
|
|
|
int error = 0;
|
|
|
|
|
2020-05-24 02:42:41 +03:00
|
|
|
mutex_enter(&proc_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
PROCLIST_FOREACH(p, &allproc) {
|
2010-03-03 03:47:30 +03:00
|
|
|
if (p->p_tracep != ktd)
|
2007-02-10 00:55:00 +03:00
|
|
|
continue;
|
2008-04-24 22:39:20 +04:00
|
|
|
mutex_enter(p->p_lock);
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_enter(&ktrace_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
if (p->p_tracep == ktd) {
|
|
|
|
if (!auth || ktrcanset(curl, p))
|
|
|
|
ktrderef(p);
|
|
|
|
else
|
|
|
|
error = EPERM;
|
|
|
|
}
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_exit(&ktrace_lock);
|
2008-04-24 22:39:20 +04:00
|
|
|
mutex_exit(p->p_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
}
|
2020-05-24 02:42:41 +03:00
|
|
|
mutex_exit(&proc_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2007-08-15 16:07:23 +04:00
|
|
|
ktealloc(struct ktrace_entry **ktep, void **bufp, lwp_t *l, int type,
|
2007-02-10 00:55:00 +03:00
|
|
|
size_t sz)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2005-12-11 15:16:03 +03:00
|
|
|
struct proc *p = l->l_proc;
|
2007-02-10 00:55:00 +03:00
|
|
|
struct ktrace_entry *kte;
|
|
|
|
struct ktr_header *kth;
|
|
|
|
void *buf;
|
|
|
|
|
|
|
|
if (ktrenter(l))
|
|
|
|
return EAGAIN;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2008-05-27 21:48:27 +04:00
|
|
|
kte = pool_cache_get(kte_cache, PR_WAITOK);
|
2007-02-10 00:55:00 +03:00
|
|
|
if (sz > sizeof(kte->kte_space)) {
|
2017-06-01 05:45:05 +03:00
|
|
|
buf = kmem_alloc(sz, KM_SLEEP);
|
2007-02-10 00:55:00 +03:00
|
|
|
} else
|
|
|
|
buf = kte->kte_space;
|
|
|
|
|
|
|
|
kte->kte_bufsz = sz;
|
|
|
|
kte->kte_buf = buf;
|
|
|
|
|
|
|
|
kth = &kte->kte_kth;
|
2004-06-24 03:05:48 +04:00
|
|
|
(void)memset(kth, 0, sizeof(*kth));
|
2007-02-10 00:55:00 +03:00
|
|
|
kth->ktr_len = sz;
|
1993-03-21 12:45:37 +03:00
|
|
|
kth->ktr_type = type;
|
|
|
|
kth->ktr_pid = p->p_pid;
|
Abolition of bcopy, ovbcopy, bcmp, and bzero, phase one.
bcopy(x, y, z) -> memcpy(y, x, z)
ovbcopy(x, y, z) -> memmove(y, x, z)
bcmp(x, y, z) -> memcmp(x, y, z)
bzero(x, y) -> memset(x, 0, y)
1998-08-04 08:03:10 +04:00
|
|
|
memcpy(kth->ktr_comm, p->p_comm, MAXCOMLEN);
|
2005-12-11 15:16:03 +03:00
|
|
|
kth->ktr_version = KTRFAC_VERSION(p->p_traceflag);
|
2010-10-18 04:09:13 +04:00
|
|
|
kth->ktr_lid = l->l_lid;
|
|
|
|
nanotime(&kth->ktr_ts);
|
2007-02-10 00:55:00 +03:00
|
|
|
|
|
|
|
*ktep = kte;
|
|
|
|
*bufp = buf;
|
|
|
|
|
|
|
|
return 0;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
2016-09-13 10:39:45 +03:00
|
|
|
void
|
|
|
|
ktesethdrlen(struct ktrace_entry *kte, size_t l)
|
2020-02-05 12:59:50 +03:00
|
|
|
{
|
2016-09-13 10:39:45 +03:00
|
|
|
kte->kte_kth.ktr_len = l;
|
|
|
|
}
|
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
void
|
2008-02-07 01:12:39 +03:00
|
|
|
ktr_syscall(register_t code, const register_t args[], int narg)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2007-08-15 16:07:23 +04:00
|
|
|
lwp_t *l = curlwp;
|
2005-12-11 15:16:03 +03:00
|
|
|
struct proc *p = l->l_proc;
|
2004-09-23 02:15:03 +04:00
|
|
|
struct ktrace_entry *kte;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct ktr_syscall *ktp;
|
1995-03-26 11:48:47 +04:00
|
|
|
register_t *argp;
|
2002-06-18 12:01:30 +04:00
|
|
|
size_t len;
|
2002-11-10 06:28:59 +03:00
|
|
|
u_int i;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
if (!KTRPOINT(p, KTR_SYSCALL))
|
|
|
|
return;
|
|
|
|
|
2008-02-07 01:12:39 +03:00
|
|
|
len = sizeof(struct ktr_syscall) + narg * sizeof argp[0];
|
2002-06-18 12:01:30 +04:00
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSCALL, len))
|
|
|
|
return;
|
2004-09-23 02:15:03 +04:00
|
|
|
|
2008-02-07 01:12:39 +03:00
|
|
|
ktp->ktr_code = code;
|
|
|
|
ktp->ktr_argsize = narg * sizeof argp[0];
|
2004-09-23 02:15:03 +04:00
|
|
|
argp = (register_t *)(ktp + 1);
|
2008-02-07 01:12:39 +03:00
|
|
|
for (i = 0; i < narg; i++)
|
1993-03-21 12:45:37 +03:00
|
|
|
*argp++ = args[i];
|
2004-09-23 02:15:03 +04:00
|
|
|
|
2005-12-11 15:16:03 +03:00
|
|
|
ktraddentry(l, kte, KTA_WAITOK);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
void
|
2007-08-15 16:07:23 +04:00
|
|
|
ktr_sysret(register_t code, int error, register_t *retval)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2007-08-15 16:07:23 +04:00
|
|
|
lwp_t *l = curlwp;
|
2004-09-23 02:15:03 +04:00
|
|
|
struct ktrace_entry *kte;
|
|
|
|
struct ktr_sysret *ktp;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
if (!KTRPOINT(l->l_proc, KTR_SYSRET))
|
|
|
|
return;
|
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSRET,
|
|
|
|
sizeof(struct ktr_sysret)))
|
|
|
|
return;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
ktp->ktr_code = code;
|
|
|
|
ktp->ktr_eosys = 0; /* XXX unused */
|
|
|
|
ktp->ktr_error = error;
|
2011-11-30 14:27:46 +04:00
|
|
|
ktp->ktr_retval = retval && error == 0 ? retval[0] : 0;
|
|
|
|
ktp->ktr_retval_1 = retval && error == 0 ? retval[1] : 0;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2005-12-11 15:16:03 +03:00
|
|
|
ktraddentry(l, kte, KTA_WAITOK);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
void
|
2007-08-15 16:07:23 +04:00
|
|
|
ktr_namei(const char *path, size_t pathlen)
|
2007-04-26 20:27:32 +04:00
|
|
|
{
|
2007-08-15 16:07:23 +04:00
|
|
|
lwp_t *l = curlwp;
|
|
|
|
|
|
|
|
if (!KTRPOINT(l->l_proc, KTR_NAMEI))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ktr_kmem(l, KTR_NAMEI, path, pathlen);
|
2007-04-26 20:27:32 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2007-08-15 16:07:23 +04:00
|
|
|
ktr_namei2(const char *eroot, size_t erootlen,
|
|
|
|
const char *path, size_t pathlen)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2007-08-15 16:07:23 +04:00
|
|
|
lwp_t *l = curlwp;
|
2007-04-26 20:27:32 +04:00
|
|
|
struct ktrace_entry *kte;
|
|
|
|
void *buf;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
if (!KTRPOINT(l->l_proc, KTR_NAMEI))
|
|
|
|
return;
|
|
|
|
|
2007-04-26 20:27:32 +04:00
|
|
|
if (ktealloc(&kte, &buf, l, KTR_NAMEI, erootlen + pathlen))
|
|
|
|
return;
|
|
|
|
memcpy(buf, eroot, erootlen);
|
|
|
|
buf = (char *)buf + erootlen;
|
|
|
|
memcpy(buf, path, pathlen);
|
|
|
|
ktraddentry(l, kte, KTA_WAITOK);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
void
|
2007-08-15 16:07:23 +04:00
|
|
|
ktr_emul(void)
|
1995-07-19 19:19:08 +04:00
|
|
|
{
|
2007-08-15 16:07:23 +04:00
|
|
|
lwp_t *l = curlwp;
|
2005-12-11 15:16:03 +03:00
|
|
|
const char *emul = l->l_proc->p_emul->e_name;
|
1995-07-19 19:19:08 +04:00
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
if (!KTRPOINT(l->l_proc, KTR_EMUL))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ktr_kmem(l, KTR_EMUL, emul, strlen(emul));
|
1995-07-19 19:19:08 +04:00
|
|
|
}
|
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
void
|
2007-08-15 16:07:23 +04:00
|
|
|
ktr_execarg(const void *bf, size_t len)
|
|
|
|
{
|
|
|
|
lwp_t *l = curlwp;
|
|
|
|
|
|
|
|
if (!KTRPOINT(l->l_proc, KTR_EXEC_ARG))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ktr_kmem(l, KTR_EXEC_ARG, bf, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ktr_execenv(const void *bf, size_t len)
|
|
|
|
{
|
|
|
|
lwp_t *l = curlwp;
|
|
|
|
|
|
|
|
if (!KTRPOINT(l->l_proc, KTR_EXEC_ENV))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ktr_kmem(l, KTR_EXEC_ENV, bf, len);
|
|
|
|
}
|
|
|
|
|
2011-06-02 01:24:59 +04:00
|
|
|
void
|
|
|
|
ktr_execfd(int fd, u_int dtype)
|
|
|
|
{
|
|
|
|
struct ktrace_entry *kte;
|
|
|
|
struct ktr_execfd* ktp;
|
|
|
|
|
|
|
|
lwp_t *l = curlwp;
|
|
|
|
|
|
|
|
if (!KTRPOINT(l->l_proc, KTR_EXEC_FD))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (ktealloc(&kte, (void *)&ktp, l, KTR_EXEC_FD, sizeof(*ktp)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ktp->ktr_fd = fd;
|
|
|
|
ktp->ktr_dtype = dtype;
|
|
|
|
ktraddentry(l, kte, KTA_WAITOK);
|
|
|
|
}
|
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
static void
|
|
|
|
ktr_kmem(lwp_t *l, int type, const void *bf, size_t len)
|
2003-07-17 02:42:47 +04:00
|
|
|
{
|
2004-09-23 02:15:03 +04:00
|
|
|
struct ktrace_entry *kte;
|
2007-02-10 00:55:00 +03:00
|
|
|
void *buf;
|
2004-09-23 02:15:03 +04:00
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
if (ktealloc(&kte, &buf, l, type, len))
|
|
|
|
return;
|
|
|
|
memcpy(buf, bf, len);
|
2005-12-11 15:16:03 +03:00
|
|
|
ktraddentry(l, kte, KTA_WAITOK);
|
2003-07-17 02:42:47 +04:00
|
|
|
}
|
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
static void
|
|
|
|
ktr_io(lwp_t *l, int fd, enum uio_rw rw, struct iovec *iov, size_t len)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2004-09-23 02:15:03 +04:00
|
|
|
struct ktrace_entry *kte;
|
1998-05-02 22:33:19 +04:00
|
|
|
struct ktr_genio *ktp;
|
2007-08-15 16:07:23 +04:00
|
|
|
size_t resid = len, cnt, buflen;
|
2009-08-05 23:53:42 +04:00
|
|
|
char *cp;
|
2000-04-19 23:14:17 +04:00
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
next:
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
buflen = uimin(PAGE_SIZE, resid + sizeof(struct ktr_genio));
|
2004-09-23 02:15:03 +04:00
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
if (ktealloc(&kte, (void *)&ktp, l, KTR_GENIO, buflen))
|
|
|
|
return;
|
2000-04-19 23:14:17 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
ktp->ktr_fd = fd;
|
|
|
|
ktp->ktr_rw = rw;
|
2000-04-19 23:14:17 +04:00
|
|
|
|
2007-03-04 08:59:00 +03:00
|
|
|
cp = (void *)(ktp + 1);
|
2000-04-19 23:14:17 +04:00
|
|
|
buflen -= sizeof(struct ktr_genio);
|
2007-02-10 00:55:00 +03:00
|
|
|
kte->kte_kth.ktr_len = sizeof(struct ktr_genio);
|
2004-09-23 02:15:03 +04:00
|
|
|
|
|
|
|
while (buflen > 0) {
|
Rename min/max -> uimin/uimax for better honesty.
These functions are defined on unsigned int. The generic name
min/max should not silently truncate to 32 bits on 64-bit systems.
This is purely a name change -- no functional change intended.
HOWEVER! Some subsystems have
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
even though our standard name for that is MIN/MAX. Although these
may invite multiple evaluation bugs, these do _not_ cause integer
truncation.
To avoid `fixing' these cases, I first changed the name in libkern,
and then compile-tested every file where min/max occurred in order to
confirm that it failed -- and thus confirm that nothing shadowed
min/max -- before changing it.
I have left a handful of bootloaders that are too annoying to
compile-test, and some dead code:
cobalt ews4800mips hp300 hppa ia64 luna68k vax
acorn32/if_ie.c (not included in any kernels)
macppc/if_gm.c (superseded by gem(4))
It should be easy to fix the fallout once identified -- this way of
doing things fails safe, and the goal here, after all, is to _avoid_
silent integer truncations, not introduce them.
Maybe one day we can reintroduce min/max as type-generic things that
never silently truncate. But we should avoid doing that for a while,
so that existing code has a chance to be detected by the compiler for
conversion to uimin/uimax without changing the semantics until we can
properly audit it all. (Who knows, maybe in some cases integer
truncation is actually intended!)
2018-09-03 19:29:22 +03:00
|
|
|
cnt = uimin(iov->iov_len, buflen);
|
2004-09-23 02:15:03 +04:00
|
|
|
if (copyin(iov->iov_base, cp, cnt) != 0)
|
|
|
|
goto out;
|
2007-02-10 00:55:00 +03:00
|
|
|
kte->kte_kth.ktr_len += cnt;
|
2009-08-05 23:53:42 +04:00
|
|
|
cp += cnt;
|
2004-09-23 02:15:03 +04:00
|
|
|
buflen -= cnt;
|
|
|
|
resid -= cnt;
|
|
|
|
iov->iov_len -= cnt;
|
|
|
|
if (iov->iov_len == 0)
|
|
|
|
iov++;
|
|
|
|
else
|
2007-03-04 08:59:00 +03:00
|
|
|
iov->iov_base = (char *)iov->iov_base + cnt;
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
2000-04-19 23:14:17 +04:00
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
/*
|
|
|
|
* Don't push so many entry at once. It will cause kmem map
|
|
|
|
* shortage.
|
|
|
|
*/
|
2005-12-11 15:16:03 +03:00
|
|
|
ktraddentry(l, kte, KTA_WAITOK | KTA_LARGE);
|
2004-09-23 02:15:03 +04:00
|
|
|
if (resid > 0) {
|
2020-03-14 21:08:38 +03:00
|
|
|
if (preempt_needed()) {
|
2007-02-10 00:55:00 +03:00
|
|
|
(void)ktrenter(l);
|
|
|
|
preempt();
|
|
|
|
ktrexit(l);
|
|
|
|
}
|
2000-04-19 23:14:17 +04:00
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
goto next;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
return;
|
|
|
|
|
|
|
|
out:
|
|
|
|
ktefree(kte);
|
2007-02-10 00:55:00 +03:00
|
|
|
ktrexit(l);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
void
|
2007-08-15 16:07:23 +04:00
|
|
|
ktr_genio(int fd, enum uio_rw rw, const void *addr, size_t len, int error)
|
|
|
|
{
|
|
|
|
lwp_t *l = curlwp;
|
|
|
|
struct iovec iov;
|
|
|
|
|
|
|
|
if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0)
|
|
|
|
return;
|
|
|
|
iov.iov_base = __UNCONST(addr);
|
|
|
|
iov.iov_len = len;
|
|
|
|
ktr_io(l, fd, rw, &iov, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ktr_geniov(int fd, enum uio_rw rw, struct iovec *iov, size_t len, int error)
|
|
|
|
{
|
|
|
|
lwp_t *l = curlwp;
|
|
|
|
|
|
|
|
if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0)
|
|
|
|
return;
|
|
|
|
ktr_io(l, fd, rw, iov, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ktr_mibio(int fd, enum uio_rw rw, const void *addr, size_t len, int error)
|
|
|
|
{
|
|
|
|
lwp_t *l = curlwp;
|
|
|
|
struct iovec iov;
|
|
|
|
|
|
|
|
if (!KTRPOINT(l->l_proc, KTR_MIB) || error != 0)
|
|
|
|
return;
|
|
|
|
iov.iov_base = __UNCONST(addr);
|
|
|
|
iov.iov_len = len;
|
|
|
|
ktr_io(l, fd, rw, &iov, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ktr_psig(int sig, sig_t action, const sigset_t *mask,
|
|
|
|
const ksiginfo_t *ksi)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2004-09-23 02:15:03 +04:00
|
|
|
struct ktrace_entry *kte;
|
2007-08-15 16:07:23 +04:00
|
|
|
lwp_t *l = curlwp;
|
2003-09-20 02:50:02 +04:00
|
|
|
struct {
|
|
|
|
struct ktr_psig kp;
|
|
|
|
siginfo_t si;
|
2004-09-23 02:15:03 +04:00
|
|
|
} *kbuf;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
if (!KTRPOINT(l->l_proc, KTR_PSIG))
|
|
|
|
return;
|
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
if (ktealloc(&kte, (void *)&kbuf, l, KTR_PSIG, sizeof(*kbuf)))
|
|
|
|
return;
|
2004-09-23 02:15:03 +04:00
|
|
|
|
2022-07-01 04:07:56 +03:00
|
|
|
memset(&kbuf->kp, 0, sizeof(kbuf->kp));
|
2004-09-23 02:15:03 +04:00
|
|
|
kbuf->kp.signo = (char)sig;
|
|
|
|
kbuf->kp.action = action;
|
|
|
|
kbuf->kp.mask = *mask;
|
2007-02-10 00:55:00 +03:00
|
|
|
|
2003-09-20 02:50:02 +04:00
|
|
|
if (ksi) {
|
2004-09-23 02:15:03 +04:00
|
|
|
kbuf->kp.code = KSI_TRAPCODE(ksi);
|
|
|
|
(void)memset(&kbuf->si, 0, sizeof(kbuf->si));
|
|
|
|
kbuf->si._info = ksi->ksi_info;
|
2007-02-10 00:55:00 +03:00
|
|
|
kte->kte_kth.ktr_len = sizeof(*kbuf);
|
2003-09-20 02:50:02 +04:00
|
|
|
} else {
|
2004-09-23 02:15:03 +04:00
|
|
|
kbuf->kp.code = 0;
|
2007-02-10 00:55:00 +03:00
|
|
|
kte->kte_kth.ktr_len = sizeof(struct ktr_psig);
|
2003-09-20 02:50:02 +04:00
|
|
|
}
|
2004-09-23 02:15:03 +04:00
|
|
|
|
2005-12-11 15:16:03 +03:00
|
|
|
ktraddentry(l, kte, KTA_WAITOK);
|
1994-05-18 09:12:10 +04:00
|
|
|
}
|
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
void
|
2022-06-30 01:10:43 +03:00
|
|
|
ktr_csw(int out, int user, const struct syncobj *syncobj)
|
1994-05-18 09:12:10 +04:00
|
|
|
{
|
2007-08-15 16:07:23 +04:00
|
|
|
lwp_t *l = curlwp;
|
2005-12-11 15:16:03 +03:00
|
|
|
struct proc *p = l->l_proc;
|
2004-09-23 02:15:03 +04:00
|
|
|
struct ktrace_entry *kte;
|
|
|
|
struct ktr_csw *kc;
|
1994-05-18 09:12:10 +04:00
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
if (!KTRPOINT(p, KTR_CSW))
|
|
|
|
return;
|
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
/*
|
2020-02-05 12:59:50 +03:00
|
|
|
* Don't record context switches resulting from blocking on
|
2022-06-30 01:10:43 +03:00
|
|
|
* locks; the results are not useful, and the mutex may be in a
|
|
|
|
* softint, which would lead us to ktealloc in softint context,
|
|
|
|
* which is forbidden.
|
2007-02-10 00:55:00 +03:00
|
|
|
*/
|
2022-06-30 01:10:43 +03:00
|
|
|
if (syncobj == &mutex_syncobj || syncobj == &rw_syncobj)
|
2007-02-10 00:55:00 +03:00
|
|
|
return;
|
2022-06-30 01:10:43 +03:00
|
|
|
KASSERT(!cpu_intr_p());
|
|
|
|
KASSERT(!cpu_softintr_p());
|
1994-05-18 09:12:10 +04:00
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
/*
|
|
|
|
* We can't sleep if we're already going to sleep (if original
|
|
|
|
* condition is met during sleep, we hang up).
|
2007-02-10 00:55:00 +03:00
|
|
|
*
|
|
|
|
* XXX This is not ideal: it would be better to maintain a pool
|
|
|
|
* of ktes and actually push this to the kthread when context
|
|
|
|
* switch happens, however given the points where we are called
|
2020-02-05 12:59:50 +03:00
|
|
|
* from that is difficult to do.
|
2004-09-23 02:15:03 +04:00
|
|
|
*/
|
2007-02-10 00:55:00 +03:00
|
|
|
if (out) {
|
|
|
|
if (ktrenter(l))
|
|
|
|
return;
|
|
|
|
|
2009-01-11 05:45:45 +03:00
|
|
|
nanotime(&l->l_ktrcsw);
|
|
|
|
l->l_pflag |= LP_KTRCSW;
|
2007-02-10 00:55:00 +03:00
|
|
|
if (user)
|
|
|
|
l->l_pflag |= LP_KTRCSWUSER;
|
|
|
|
else
|
|
|
|
l->l_pflag &= ~LP_KTRCSWUSER;
|
|
|
|
|
|
|
|
ktrexit(l);
|
|
|
|
return;
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
/*
|
|
|
|
* On the way back in, we need to record twice: once for entry, and
|
|
|
|
* once for exit.
|
|
|
|
*/
|
|
|
|
if ((l->l_pflag & LP_KTRCSW) != 0) {
|
2009-01-11 05:45:45 +03:00
|
|
|
struct timespec *ts;
|
2007-02-10 00:55:00 +03:00
|
|
|
l->l_pflag &= ~LP_KTRCSW;
|
|
|
|
|
|
|
|
if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
kc->out = 1;
|
|
|
|
kc->user = ((l->l_pflag & LP_KTRCSWUSER) != 0);
|
|
|
|
|
2009-01-11 05:45:45 +03:00
|
|
|
ts = &l->l_ktrcsw;
|
2007-02-10 00:55:00 +03:00
|
|
|
switch (KTRFAC_VERSION(p->p_traceflag)) {
|
|
|
|
case 0:
|
2009-01-11 05:45:45 +03:00
|
|
|
kte->kte_kth.ktr_otv.tv_sec = ts->tv_sec;
|
|
|
|
kte->kte_kth.ktr_otv.tv_usec = ts->tv_nsec / 1000;
|
2007-02-10 00:55:00 +03:00
|
|
|
break;
|
2020-02-05 12:59:50 +03:00
|
|
|
case 1:
|
2009-01-11 05:45:45 +03:00
|
|
|
kte->kte_kth.ktr_ots.tv_sec = ts->tv_sec;
|
2016-07-07 09:55:38 +03:00
|
|
|
kte->kte_kth.ktr_ots.tv_nsec = ts->tv_nsec;
|
|
|
|
break;
|
2009-01-11 05:45:45 +03:00
|
|
|
case 2:
|
|
|
|
kte->kte_kth.ktr_ts.tv_sec = ts->tv_sec;
|
2016-07-07 09:55:38 +03:00
|
|
|
kte->kte_kth.ktr_ts.tv_nsec = ts->tv_nsec;
|
|
|
|
break;
|
2007-02-10 00:55:00 +03:00
|
|
|
default:
|
2016-07-07 09:55:38 +03:00
|
|
|
break;
|
2007-02-10 00:55:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
ktraddentry(l, kte, KTA_WAITOK);
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc)))
|
|
|
|
return;
|
2004-09-23 02:15:03 +04:00
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
kc->out = 0;
|
|
|
|
kc->user = user;
|
|
|
|
|
|
|
|
ktraddentry(l, kte, KTA_WAITOK);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
bool
|
2007-08-27 17:33:45 +04:00
|
|
|
ktr_point(int fac_bit)
|
2007-08-15 16:07:23 +04:00
|
|
|
{
|
2007-08-27 17:33:45 +04:00
|
|
|
return curlwp->l_proc->p_traceflag & fac_bit;
|
2007-08-15 16:07:23 +04:00
|
|
|
}
|
|
|
|
|
2006-10-22 22:19:49 +04:00
|
|
|
int
|
2007-08-15 16:07:23 +04:00
|
|
|
ktruser(const char *id, void *addr, size_t len, int ustr)
|
2000-12-28 14:10:15 +03:00
|
|
|
{
|
2004-09-23 02:15:03 +04:00
|
|
|
struct ktrace_entry *kte;
|
2000-12-28 14:10:15 +03:00
|
|
|
struct ktr_user *ktp;
|
2007-08-15 16:07:23 +04:00
|
|
|
lwp_t *l = curlwp;
|
2007-03-04 08:59:00 +03:00
|
|
|
void *user_dta;
|
2006-10-22 22:19:49 +04:00
|
|
|
int error;
|
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
if (!KTRPOINT(l->l_proc, KTR_USER))
|
|
|
|
return 0;
|
|
|
|
|
2006-10-22 22:19:49 +04:00
|
|
|
if (len > KTR_USER_MAXLEN)
|
|
|
|
return ENOSPC;
|
2000-12-28 14:10:15 +03:00
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len);
|
|
|
|
if (error != 0)
|
|
|
|
return error;
|
2004-09-23 02:15:03 +04:00
|
|
|
|
2000-12-28 14:10:15 +03:00
|
|
|
if (ustr) {
|
|
|
|
if (copyinstr(id, ktp->ktr_id, KTR_USER_MAXIDLEN, NULL) != 0)
|
|
|
|
ktp->ktr_id[0] = '\0';
|
|
|
|
} else
|
|
|
|
strncpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN);
|
|
|
|
ktp->ktr_id[KTR_USER_MAXIDLEN-1] = '\0';
|
|
|
|
|
2007-03-04 08:59:00 +03:00
|
|
|
user_dta = (void *)(ktp + 1);
|
2014-09-21 21:17:15 +04:00
|
|
|
if ((error = copyin(addr, user_dta, len)) != 0)
|
2017-07-28 18:16:39 +03:00
|
|
|
kte->kte_kth.ktr_len = 0;
|
2000-12-28 14:10:15 +03:00
|
|
|
|
2005-12-11 15:16:03 +03:00
|
|
|
ktraddentry(l, kte, KTA_WAITOK);
|
2006-10-22 22:19:49 +04:00
|
|
|
return error;
|
2000-12-28 14:10:15 +03:00
|
|
|
}
|
|
|
|
|
2007-06-02 00:24:21 +04:00
|
|
|
void
|
2016-09-13 10:01:07 +03:00
|
|
|
ktr_kuser(const char *id, const void *addr, size_t len)
|
2007-06-02 00:24:21 +04:00
|
|
|
{
|
|
|
|
struct ktrace_entry *kte;
|
|
|
|
struct ktr_user *ktp;
|
2007-08-15 16:07:23 +04:00
|
|
|
lwp_t *l = curlwp;
|
2007-06-02 00:24:21 +04:00
|
|
|
int error;
|
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
if (!KTRPOINT(l->l_proc, KTR_USER))
|
|
|
|
return;
|
|
|
|
|
2007-06-02 00:24:21 +04:00
|
|
|
if (len > KTR_USER_MAXLEN)
|
|
|
|
return;
|
|
|
|
|
|
|
|
error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len);
|
|
|
|
if (error != 0)
|
|
|
|
return;
|
|
|
|
|
2022-06-27 06:56:37 +03:00
|
|
|
strncpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN - 1);
|
|
|
|
ktp->ktr_id[KTR_USER_MAXIDLEN - 1] = '\0';
|
2007-06-02 00:24:21 +04:00
|
|
|
|
|
|
|
memcpy(ktp + 1, addr, len);
|
|
|
|
|
|
|
|
ktraddentry(l, kte, KTA_WAITOK);
|
|
|
|
}
|
|
|
|
|
2006-09-24 02:01:04 +04:00
|
|
|
void
|
2007-08-15 16:07:23 +04:00
|
|
|
ktr_mib(const int *name, u_int namelen)
|
2006-09-24 02:01:04 +04:00
|
|
|
{
|
|
|
|
struct ktrace_entry *kte;
|
|
|
|
int *namep;
|
|
|
|
size_t size;
|
2007-08-15 16:07:23 +04:00
|
|
|
lwp_t *l = curlwp;
|
|
|
|
|
|
|
|
if (!KTRPOINT(l->l_proc, KTR_MIB))
|
|
|
|
return;
|
2006-09-24 02:01:04 +04:00
|
|
|
|
|
|
|
size = namelen * sizeof(*name);
|
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
if (ktealloc(&kte, (void *)&namep, l, KTR_MIB, size))
|
|
|
|
return;
|
|
|
|
|
|
|
|
(void)memcpy(namep, name, namelen * sizeof(*name));
|
2006-09-24 02:01:04 +04:00
|
|
|
|
|
|
|
ktraddentry(l, kte, KTA_WAITOK);
|
|
|
|
}
|
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/* Interface and common routines */
|
|
|
|
|
1998-05-02 22:33:19 +04:00
|
|
|
int
|
Avoid panic on DIAGNOSTIC kernels with ktrace -p <not-existing-process>
The old logic was:
error = ktrace_common(, fp);
if (fp)
if (error)
fd_abort(, fp, );
else
fd_abort(, NULL, );
The 'if (fp)' portion really means if the op is not KTROP_CLEAR,
since the logic above always sets up fp otherwise, so change the
code to test this directly.
ktrace_common() can return an error both on the kernel thread
creation failure, which means that we should be calling fd_abort()
with fp, since nobody used the file yet and we should clear it now.
But it can also return an error because later, after the thread
creation if the process or process group was not found. In this
second case, we should be calling fd_abort with NULL, since the fp
is now used by the thread and it is going to clean it later. So
instead of checking the error from ktrace_common() to decide if we
are going to call fd_abort() with a NULL fp or not, let krace_common()
decide for us. So the new logic becomes:
error = ktrace_common(, &fp);
if (op != KTROP_CLEAR)
fd_abort(, fp, );
Since I am here, fix a freed memory access, by setting ktd to FALSE.
2011-12-31 00:33:04 +04:00
|
|
|
ktrace_common(lwp_t *curl, int ops, int facs, int pid, file_t **fpp)
|
1998-05-02 22:33:19 +04:00
|
|
|
{
|
2004-09-23 02:15:03 +04:00
|
|
|
struct proc *p;
|
|
|
|
struct pgrp *pg;
|
2017-08-28 07:57:11 +03:00
|
|
|
struct ktr_desc *ktd = NULL, *nktd;
|
Avoid panic on DIAGNOSTIC kernels with ktrace -p <not-existing-process>
The old logic was:
error = ktrace_common(, fp);
if (fp)
if (error)
fd_abort(, fp, );
else
fd_abort(, NULL, );
The 'if (fp)' portion really means if the op is not KTROP_CLEAR,
since the logic above always sets up fp otherwise, so change the
code to test this directly.
ktrace_common() can return an error both on the kernel thread
creation failure, which means that we should be calling fd_abort()
with fp, since nobody used the file yet and we should clear it now.
But it can also return an error because later, after the thread
creation if the process or process group was not found. In this
second case, we should be calling fd_abort with NULL, since the fp
is now used by the thread and it is going to clean it later. So
instead of checking the error from ktrace_common() to decide if we
are going to call fd_abort() with a NULL fp or not, let krace_common()
decide for us. So the new logic becomes:
error = ktrace_common(, &fp);
if (op != KTROP_CLEAR)
fd_abort(, fp, );
Since I am here, fix a freed memory access, by setting ktd to FALSE.
2011-12-31 00:33:04 +04:00
|
|
|
file_t *fp = *fpp;
|
2003-06-28 18:20:43 +04:00
|
|
|
int ret = 0;
|
2003-06-30 02:28:00 +04:00
|
|
|
int error = 0;
|
2000-05-27 04:40:29 +04:00
|
|
|
int descend;
|
1998-05-02 22:33:19 +04:00
|
|
|
|
2000-05-27 04:40:29 +04:00
|
|
|
descend = ops & KTRFLAG_DESCEND;
|
2008-02-02 23:42:18 +03:00
|
|
|
facs = facs & ~((unsigned) KTRFAC_PERSISTENT);
|
1998-05-02 22:33:19 +04:00
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
(void)ktrenter(curl);
|
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
switch (KTROP(ops)) {
|
|
|
|
|
|
|
|
case KTROP_CLEARFILE:
|
|
|
|
/*
|
|
|
|
* Clear all uses of the tracefile
|
|
|
|
*/
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_enter(&ktrace_lock);
|
2004-09-23 02:15:03 +04:00
|
|
|
ktd = ktd_lookup(fp);
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_exit(&ktrace_lock);
|
2004-09-23 02:15:03 +04:00
|
|
|
if (ktd == NULL)
|
|
|
|
goto done;
|
2007-02-10 00:55:00 +03:00
|
|
|
error = ktrderefall(ktd, 1);
|
1998-05-02 22:33:19 +04:00
|
|
|
goto done;
|
2000-05-27 04:40:29 +04:00
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
case KTROP_SET:
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_enter(&ktrace_lock);
|
2004-09-23 02:15:03 +04:00
|
|
|
ktd = ktd_lookup(fp);
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_exit(&ktrace_lock);
|
2004-09-23 02:15:03 +04:00
|
|
|
if (ktd == NULL) {
|
2017-08-28 07:57:11 +03:00
|
|
|
nktd = kmem_alloc(sizeof(*nktd), KM_SLEEP);
|
|
|
|
TAILQ_INIT(&nktd->ktd_queue);
|
|
|
|
callout_init(&nktd->ktd_wakch, CALLOUT_MPSAFE);
|
|
|
|
cv_init(&nktd->ktd_cv, "ktrwait");
|
|
|
|
cv_init(&nktd->ktd_sync_cv, "ktrsync");
|
|
|
|
nktd->ktd_flags = 0;
|
|
|
|
nktd->ktd_qcount = 0;
|
|
|
|
nktd->ktd_error = 0;
|
|
|
|
nktd->ktd_errcnt = 0;
|
|
|
|
nktd->ktd_delayqcnt = ktd_delayqcnt;
|
|
|
|
nktd->ktd_wakedelay = mstohz(ktd_wakedelay);
|
|
|
|
nktd->ktd_intrwakdl = mstohz(ktd_intrwakdl);
|
|
|
|
nktd->ktd_ref = 0;
|
|
|
|
nktd->ktd_fp = fp;
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_enter(&ktrace_lock);
|
2017-08-28 07:57:11 +03:00
|
|
|
ktdref(nktd);
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_exit(&ktrace_lock);
|
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
/*
|
|
|
|
* XXX: not correct. needs an way to detect
|
|
|
|
* whether ktruss or ktrace.
|
|
|
|
*/
|
|
|
|
if (fp->f_type == DTYPE_PIPE)
|
2017-08-28 07:57:11 +03:00
|
|
|
nktd->ktd_flags |= KTDF_INTERACTIVE;
|
2004-09-23 02:15:03 +04:00
|
|
|
|
2008-03-22 00:54:58 +03:00
|
|
|
mutex_enter(&fp->f_lock);
|
|
|
|
fp->f_count++;
|
|
|
|
mutex_exit(&fp->f_lock);
|
2008-01-02 14:48:20 +03:00
|
|
|
error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
|
2017-08-28 07:57:11 +03:00
|
|
|
ktrace_thread, nktd, &nktd->ktd_lwp, "ktrace");
|
2004-09-23 02:15:03 +04:00
|
|
|
if (error != 0) {
|
2017-08-28 07:57:11 +03:00
|
|
|
kmem_free(nktd, sizeof(*nktd));
|
|
|
|
nktd = NULL;
|
2008-03-22 00:54:58 +03:00
|
|
|
mutex_enter(&fp->f_lock);
|
|
|
|
fp->f_count--;
|
|
|
|
mutex_exit(&fp->f_lock);
|
2004-09-23 02:15:03 +04:00
|
|
|
goto done;
|
|
|
|
}
|
2000-05-27 04:40:29 +04:00
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_enter(&ktrace_lock);
|
2017-08-28 07:57:11 +03:00
|
|
|
ktd = ktd_lookup(fp);
|
|
|
|
if (ktd != NULL) {
|
|
|
|
ktdrel(nktd);
|
|
|
|
nktd = NULL;
|
|
|
|
} else {
|
|
|
|
TAILQ_INSERT_TAIL(&ktdq, nktd, ktd_list);
|
|
|
|
ktd = nktd;
|
|
|
|
}
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_exit(&ktrace_lock);
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case KTROP_CLEAR:
|
|
|
|
break;
|
2000-05-28 19:27:51 +04:00
|
|
|
}
|
2004-02-26 00:40:40 +03:00
|
|
|
|
1998-05-02 22:33:19 +04:00
|
|
|
/*
|
|
|
|
* need something to (un)trace (XXX - why is this here?)
|
|
|
|
*/
|
|
|
|
if (!facs) {
|
|
|
|
error = EINVAL;
|
Avoid panic on DIAGNOSTIC kernels with ktrace -p <not-existing-process>
The old logic was:
error = ktrace_common(, fp);
if (fp)
if (error)
fd_abort(, fp, );
else
fd_abort(, NULL, );
The 'if (fp)' portion really means if the op is not KTROP_CLEAR,
since the logic above always sets up fp otherwise, so change the
code to test this directly.
ktrace_common() can return an error both on the kernel thread
creation failure, which means that we should be calling fd_abort()
with fp, since nobody used the file yet and we should clear it now.
But it can also return an error because later, after the thread
creation if the process or process group was not found. In this
second case, we should be calling fd_abort with NULL, since the fp
is now used by the thread and it is going to clean it later. So
instead of checking the error from ktrace_common() to decide if we
are going to call fd_abort() with a NULL fp or not, let krace_common()
decide for us. So the new logic becomes:
error = ktrace_common(, &fp);
if (op != KTROP_CLEAR)
fd_abort(, fp, );
Since I am here, fix a freed memory access, by setting ktd to FALSE.
2011-12-31 00:33:04 +04:00
|
|
|
*fpp = NULL;
|
1998-05-02 22:33:19 +04:00
|
|
|
goto done;
|
|
|
|
}
|
2004-09-23 02:15:03 +04:00
|
|
|
|
2004-02-26 00:40:40 +03:00
|
|
|
/*
|
1998-05-02 22:33:19 +04:00
|
|
|
* do it
|
|
|
|
*/
|
2020-05-24 02:42:41 +03:00
|
|
|
mutex_enter(&proc_lock);
|
2000-05-27 04:40:29 +04:00
|
|
|
if (pid < 0) {
|
1998-05-02 22:33:19 +04:00
|
|
|
/*
|
|
|
|
* by process group
|
|
|
|
*/
|
2010-07-01 06:38:26 +04:00
|
|
|
pg = pgrp_find(-pid);
|
2007-02-10 00:55:00 +03:00
|
|
|
if (pg == NULL)
|
1998-05-02 22:33:19 +04:00
|
|
|
error = ESRCH;
|
2007-02-10 00:55:00 +03:00
|
|
|
else {
|
|
|
|
LIST_FOREACH(p, &pg->pg_members, p_pglist) {
|
|
|
|
if (descend)
|
|
|
|
ret |= ktrsetchildren(curl, p, ops,
|
|
|
|
facs, ktd);
|
|
|
|
else
|
|
|
|
ret |= ktrops(curl, p, ops, facs,
|
|
|
|
ktd);
|
|
|
|
}
|
2000-04-19 23:14:17 +04:00
|
|
|
}
|
2004-02-26 00:40:40 +03:00
|
|
|
|
1998-05-02 22:33:19 +04:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* by pid
|
|
|
|
*/
|
2010-07-01 06:38:26 +04:00
|
|
|
p = proc_find(pid);
|
2007-02-10 00:55:00 +03:00
|
|
|
if (p == NULL)
|
1998-05-02 22:33:19 +04:00
|
|
|
error = ESRCH;
|
2007-02-10 00:55:00 +03:00
|
|
|
else if (descend)
|
2006-07-24 02:06:03 +04:00
|
|
|
ret |= ktrsetchildren(curl, p, ops, facs, ktd);
|
1998-05-02 22:33:19 +04:00
|
|
|
else
|
2006-07-24 02:06:03 +04:00
|
|
|
ret |= ktrops(curl, p, ops, facs, ktd);
|
1998-05-02 22:33:19 +04:00
|
|
|
}
|
2020-05-24 02:42:41 +03:00
|
|
|
mutex_exit(&proc_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
if (error == 0 && !ret)
|
1998-05-02 22:33:19 +04:00
|
|
|
error = EPERM;
|
Avoid panic on DIAGNOSTIC kernels with ktrace -p <not-existing-process>
The old logic was:
error = ktrace_common(, fp);
if (fp)
if (error)
fd_abort(, fp, );
else
fd_abort(, NULL, );
The 'if (fp)' portion really means if the op is not KTROP_CLEAR,
since the logic above always sets up fp otherwise, so change the
code to test this directly.
ktrace_common() can return an error both on the kernel thread
creation failure, which means that we should be calling fd_abort()
with fp, since nobody used the file yet and we should clear it now.
But it can also return an error because later, after the thread
creation if the process or process group was not found. In this
second case, we should be calling fd_abort with NULL, since the fp
is now used by the thread and it is going to clean it later. So
instead of checking the error from ktrace_common() to decide if we
are going to call fd_abort() with a NULL fp or not, let krace_common()
decide for us. So the new logic becomes:
error = ktrace_common(, &fp);
if (op != KTROP_CLEAR)
fd_abort(, fp, );
Since I am here, fix a freed memory access, by setting ktd to FALSE.
2011-12-31 00:33:04 +04:00
|
|
|
*fpp = NULL;
|
1998-05-02 22:33:19 +04:00
|
|
|
done:
|
2005-02-09 19:15:16 +03:00
|
|
|
if (ktd != NULL) {
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_enter(&ktrace_lock);
|
2005-02-09 19:15:16 +03:00
|
|
|
if (error != 0) {
|
|
|
|
/*
|
|
|
|
* Wakeup the thread so that it can be die if we
|
|
|
|
* can't trace any process.
|
|
|
|
*/
|
|
|
|
ktd_wakeup(ktd);
|
|
|
|
}
|
2007-03-29 21:37:13 +04:00
|
|
|
if (KTROP(ops) == KTROP_SET || KTROP(ops) == KTROP_CLEARFILE)
|
2007-02-10 00:55:00 +03:00
|
|
|
ktdrel(ktd);
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_exit(&ktrace_lock);
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
2007-02-10 00:55:00 +03:00
|
|
|
ktrexit(curl);
|
1998-05-02 22:33:19 +04:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2000-05-27 04:40:29 +04:00
|
|
|
/*
|
2004-09-23 02:15:03 +04:00
|
|
|
* fktrace system call
|
2000-05-27 04:40:29 +04:00
|
|
|
*/
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
2020-02-05 12:59:50 +03:00
|
|
|
sys_fktrace(struct lwp *l, const struct sys_fktrace_args *uap,
|
|
|
|
register_t *retval)
|
2000-05-27 04:40:29 +04:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
2000-05-27 04:40:29 +04:00
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(int) ops;
|
|
|
|
syscallarg(int) facs;
|
|
|
|
syscallarg(int) pid;
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
2008-03-22 00:54:58 +03:00
|
|
|
file_t *fp;
|
|
|
|
int error, fd;
|
2000-05-27 04:40:29 +04:00
|
|
|
|
2008-03-22 00:54:58 +03:00
|
|
|
fd = SCARG(uap, fd);
|
|
|
|
if ((fp = fd_getfile(fd)) == NULL)
|
2001-06-15 00:32:41 +04:00
|
|
|
return (EBADF);
|
|
|
|
if ((fp->f_flag & FWRITE) == 0)
|
2003-05-02 16:43:01 +04:00
|
|
|
error = EBADF;
|
|
|
|
else
|
2006-07-24 02:06:03 +04:00
|
|
|
error = ktrace_common(l, SCARG(uap, ops),
|
Avoid panic on DIAGNOSTIC kernels with ktrace -p <not-existing-process>
The old logic was:
error = ktrace_common(, fp);
if (fp)
if (error)
fd_abort(, fp, );
else
fd_abort(, NULL, );
The 'if (fp)' portion really means if the op is not KTROP_CLEAR,
since the logic above always sets up fp otherwise, so change the
code to test this directly.
ktrace_common() can return an error both on the kernel thread
creation failure, which means that we should be calling fd_abort()
with fp, since nobody used the file yet and we should clear it now.
But it can also return an error because later, after the thread
creation if the process or process group was not found. In this
second case, we should be calling fd_abort with NULL, since the fp
is now used by the thread and it is going to clean it later. So
instead of checking the error from ktrace_common() to decide if we
are going to call fd_abort() with a NULL fp or not, let krace_common()
decide for us. So the new logic becomes:
error = ktrace_common(, &fp);
if (op != KTROP_CLEAR)
fd_abort(, fp, );
Since I am here, fix a freed memory access, by setting ktd to FALSE.
2011-12-31 00:33:04 +04:00
|
|
|
SCARG(uap, facs), SCARG(uap, pid), &fp);
|
2008-03-22 00:54:58 +03:00
|
|
|
fd_putfile(fd);
|
2003-05-02 16:43:01 +04:00
|
|
|
return error;
|
2000-05-27 04:40:29 +04:00
|
|
|
}
|
|
|
|
|
2021-02-27 16:02:42 +03:00
|
|
|
static int
|
2007-08-15 16:07:23 +04:00
|
|
|
ktrops(lwp_t *curl, struct proc *p, int ops, int facs,
|
2004-09-23 02:15:03 +04:00
|
|
|
struct ktr_desc *ktd)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2005-12-11 15:16:03 +03:00
|
|
|
int vers = ops & KTRFAC_VER_MASK;
|
2007-02-10 00:55:00 +03:00
|
|
|
int error = 0;
|
|
|
|
|
2008-04-24 22:39:20 +04:00
|
|
|
mutex_enter(p->p_lock);
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_enter(&ktrace_lock);
|
2005-12-11 15:16:03 +03:00
|
|
|
|
2006-07-24 02:06:03 +04:00
|
|
|
if (!ktrcanset(curl, p))
|
2007-02-10 00:55:00 +03:00
|
|
|
goto out;
|
2005-12-11 15:16:03 +03:00
|
|
|
|
|
|
|
switch (vers) {
|
|
|
|
case KTRFACv0:
|
|
|
|
case KTRFACv1:
|
2009-01-11 05:45:45 +03:00
|
|
|
case KTRFACv2:
|
2005-12-11 15:16:03 +03:00
|
|
|
break;
|
|
|
|
default:
|
2007-02-10 00:55:00 +03:00
|
|
|
error = EINVAL;
|
|
|
|
goto out;
|
2005-12-11 15:16:03 +03:00
|
|
|
}
|
|
|
|
|
1998-05-02 22:33:19 +04:00
|
|
|
if (KTROP(ops) == KTROP_SET) {
|
2004-09-23 02:15:03 +04:00
|
|
|
if (p->p_tracep != ktd) {
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* if trace file already in use, relinquish
|
|
|
|
*/
|
1998-05-02 22:33:19 +04:00
|
|
|
ktrderef(p);
|
2004-09-23 02:15:03 +04:00
|
|
|
p->p_tracep = ktd;
|
1998-05-02 22:33:19 +04:00
|
|
|
ktradref(p);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
p->p_traceflag |= facs;
|
2008-02-03 00:04:40 +03:00
|
|
|
if (kauth_authorize_process(curl->l_cred, KAUTH_PROCESS_KTRACE,
|
|
|
|
p, KAUTH_ARG(KAUTH_REQ_PROCESS_KTRACE_PERSISTENT), NULL,
|
|
|
|
NULL) == 0)
|
2008-02-02 23:42:18 +03:00
|
|
|
p->p_traceflag |= KTRFAC_PERSISTENT;
|
2004-02-26 00:40:40 +03:00
|
|
|
} else {
|
1993-03-21 12:45:37 +03:00
|
|
|
/* KTROP_CLEAR */
|
|
|
|
if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
|
|
|
|
/* no more tracing */
|
1998-05-02 22:33:19 +04:00
|
|
|
ktrderef(p);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-12-11 15:16:03 +03:00
|
|
|
if (p->p_traceflag)
|
|
|
|
p->p_traceflag |= vers;
|
1995-10-22 03:35:06 +03:00
|
|
|
/*
|
|
|
|
* Emit an emulation record, every time there is a ktrace
|
2004-02-26 00:40:40 +03:00
|
|
|
* change/attach request.
|
1995-10-22 03:35:06 +03:00
|
|
|
*/
|
|
|
|
if (KTRPOINT(p, KTR_EMUL))
|
2003-12-15 01:56:45 +03:00
|
|
|
p->p_traceflag |= KTRFAC_TRC_EMUL;
|
2008-02-24 21:30:07 +03:00
|
|
|
|
|
|
|
p->p_trace_enabled = trace_is_enabled(p);
|
2000-12-11 22:53:06 +03:00
|
|
|
#ifdef __HAVE_SYSCALL_INTERN
|
2000-12-11 19:39:01 +03:00
|
|
|
(*p->p_emul->e_syscall_intern)(p);
|
2000-12-11 22:53:06 +03:00
|
|
|
#endif
|
1995-10-22 03:35:06 +03:00
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
out:
|
2020-02-05 12:59:50 +03:00
|
|
|
mutex_exit(&ktrace_lock);
|
|
|
|
mutex_exit(p->p_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
|
2013-09-16 13:25:56 +04:00
|
|
|
return error ? 0 : 1;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
2021-02-27 16:02:42 +03:00
|
|
|
static int
|
2007-08-15 16:07:23 +04:00
|
|
|
ktrsetchildren(lwp_t *curl, struct proc *top, int ops, int facs,
|
2004-09-23 02:15:03 +04:00
|
|
|
struct ktr_desc *ktd)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
1998-05-02 22:33:19 +04:00
|
|
|
struct proc *p;
|
|
|
|
int ret = 0;
|
1993-03-21 12:45:37 +03:00
|
|
|
|
2020-05-24 02:42:41 +03:00
|
|
|
KASSERT(mutex_owned(&proc_lock));
|
2007-02-10 00:55:00 +03:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
p = top;
|
|
|
|
for (;;) {
|
2006-07-24 02:06:03 +04:00
|
|
|
ret |= ktrops(curl, p, ops, facs, ktd);
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
|
|
|
* If this process has children, descend to them next,
|
|
|
|
* otherwise do any siblings, and if done with this level,
|
|
|
|
* follow back up the tree (but not past top).
|
|
|
|
*/
|
2003-11-13 00:07:37 +03:00
|
|
|
if (LIST_FIRST(&p->p_children) != NULL) {
|
2000-04-19 23:14:17 +04:00
|
|
|
p = LIST_FIRST(&p->p_children);
|
2003-11-13 00:07:37 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
for (;;) {
|
1993-03-21 12:45:37 +03:00
|
|
|
if (p == top)
|
|
|
|
return (ret);
|
2000-04-19 23:14:17 +04:00
|
|
|
if (LIST_NEXT(p, p_sibling) != NULL) {
|
|
|
|
p = LIST_NEXT(p, p_sibling);
|
1993-03-21 12:45:37 +03:00
|
|
|
break;
|
|
|
|
}
|
1994-08-30 07:04:28 +04:00
|
|
|
p = p->p_pptr;
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/*NOTREACHED*/
|
|
|
|
}
|
|
|
|
|
2021-02-27 16:02:42 +03:00
|
|
|
static void
|
2004-09-23 02:15:03 +04:00
|
|
|
ktrwrite(struct ktr_desc *ktd, struct ktrace_entry *kte)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2009-01-11 05:45:45 +03:00
|
|
|
size_t hlen;
|
2003-06-30 02:28:00 +04:00
|
|
|
struct uio auio;
|
2004-09-23 02:15:03 +04:00
|
|
|
struct iovec aiov[64], *iov;
|
|
|
|
struct ktrace_entry *top = kte;
|
|
|
|
struct ktr_header *kth;
|
2008-03-22 00:54:58 +03:00
|
|
|
file_t *fp = ktd->ktd_fp;
|
2004-09-23 02:15:03 +04:00
|
|
|
int error;
|
|
|
|
next:
|
|
|
|
auio.uio_iov = iov = &aiov[0];
|
1993-03-21 12:45:37 +03:00
|
|
|
auio.uio_offset = 0;
|
|
|
|
auio.uio_rw = UIO_WRITE;
|
2004-09-23 02:15:03 +04:00
|
|
|
auio.uio_resid = 0;
|
|
|
|
auio.uio_iovcnt = 0;
|
2006-03-01 15:38:10 +03:00
|
|
|
UIO_SETUP_SYSSPACE(&auio);
|
2004-09-23 02:15:03 +04:00
|
|
|
do {
|
2009-01-11 05:45:45 +03:00
|
|
|
struct timespec ts;
|
|
|
|
lwpid_t lid;
|
2004-09-23 02:15:03 +04:00
|
|
|
kth = &kte->kte_kth;
|
2005-12-11 15:16:03 +03:00
|
|
|
|
2009-01-11 05:45:45 +03:00
|
|
|
hlen = sizeof(struct ktr_header);
|
|
|
|
switch (kth->ktr_version) {
|
|
|
|
case 0:
|
|
|
|
ts = kth->ktr_time;
|
|
|
|
|
|
|
|
kth->ktr_otv.tv_sec = ts.tv_sec;
|
|
|
|
kth->ktr_otv.tv_usec = ts.tv_nsec / 1000;
|
2005-12-11 15:16:03 +03:00
|
|
|
kth->ktr_unused = NULL;
|
2009-01-11 05:45:45 +03:00
|
|
|
hlen -= sizeof(kth->_v) -
|
|
|
|
MAX(sizeof(kth->_v._v0), sizeof(kth->_v._v1));
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
ts = kth->ktr_time;
|
|
|
|
lid = kth->ktr_lid;
|
|
|
|
|
|
|
|
kth->ktr_ots.tv_sec = ts.tv_sec;
|
|
|
|
kth->ktr_ots.tv_nsec = ts.tv_nsec;
|
|
|
|
kth->ktr_olid = lid;
|
|
|
|
hlen -= sizeof(kth->_v) -
|
|
|
|
MAX(sizeof(kth->_v._v0), sizeof(kth->_v._v1));
|
|
|
|
break;
|
2005-12-11 15:16:03 +03:00
|
|
|
}
|
2007-03-04 08:59:00 +03:00
|
|
|
iov->iov_base = (void *)kth;
|
2009-01-11 05:45:45 +03:00
|
|
|
iov++->iov_len = hlen;
|
|
|
|
auio.uio_resid += hlen;
|
1993-03-21 12:45:37 +03:00
|
|
|
auio.uio_iovcnt++;
|
2004-09-23 02:15:03 +04:00
|
|
|
if (kth->ktr_len > 0) {
|
|
|
|
iov->iov_base = kte->kte_buf;
|
|
|
|
iov++->iov_len = kth->ktr_len;
|
|
|
|
auio.uio_resid += kth->ktr_len;
|
|
|
|
auio.uio_iovcnt++;
|
|
|
|
}
|
|
|
|
} while ((kte = TAILQ_NEXT(kte, kte_list)) != NULL &&
|
|
|
|
auio.uio_iovcnt < sizeof(aiov) / sizeof(aiov[0]) - 1);
|
1998-05-02 22:33:19 +04:00
|
|
|
|
2004-09-23 02:15:03 +04:00
|
|
|
again:
|
|
|
|
error = (*fp->f_ops->fo_write)(fp, &fp->f_offset, &auio,
|
|
|
|
fp->f_cred, FOF_UPDATE_OFFSET);
|
|
|
|
switch (error) {
|
|
|
|
|
|
|
|
case 0:
|
|
|
|
if (auio.uio_resid > 0)
|
|
|
|
goto again;
|
|
|
|
if (kte != NULL)
|
|
|
|
goto next;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EWOULDBLOCK:
|
2007-02-22 09:34:42 +03:00
|
|
|
kpause("ktrzzz", false, 1, NULL);
|
2004-09-23 02:15:03 +04:00
|
|
|
goto again;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/*
|
|
|
|
* If error encountered, give up tracing on this
|
|
|
|
* vnode. Don't report EPIPE as this can easily
|
|
|
|
* happen with fktrace()/ktruss.
|
|
|
|
*/
|
|
|
|
#ifndef DEBUG
|
|
|
|
if (error != EPIPE)
|
|
|
|
#endif
|
|
|
|
log(LOG_NOTICE,
|
|
|
|
"ktrace write failed, errno %d, tracing stopped\n",
|
|
|
|
error);
|
2007-02-10 00:55:00 +03:00
|
|
|
(void)ktrderefall(ktd, 0);
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
while ((kte = top) != NULL) {
|
|
|
|
top = TAILQ_NEXT(top, kte_list);
|
|
|
|
ktefree(kte);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-27 16:02:42 +03:00
|
|
|
static void
|
2004-09-23 02:15:03 +04:00
|
|
|
ktrace_thread(void *arg)
|
|
|
|
{
|
|
|
|
struct ktr_desc *ktd = arg;
|
2008-03-22 00:54:58 +03:00
|
|
|
file_t *fp = ktd->ktd_fp;
|
2004-09-23 02:15:03 +04:00
|
|
|
struct ktrace_entry *kte;
|
|
|
|
int ktrerr, errcnt;
|
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_enter(&ktrace_lock);
|
2004-09-23 02:15:03 +04:00
|
|
|
for (;;) {
|
|
|
|
kte = TAILQ_FIRST(&ktd->ktd_queue);
|
|
|
|
if (kte == NULL) {
|
|
|
|
if (ktd->ktd_flags & KTDF_WAIT) {
|
|
|
|
ktd->ktd_flags &= ~(KTDF_WAIT | KTDF_BLOCKING);
|
2007-02-10 00:55:00 +03:00
|
|
|
cv_broadcast(&ktd->ktd_sync_cv);
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
|
|
|
if (ktd->ktd_ref == 0)
|
|
|
|
break;
|
2007-08-15 16:07:23 +04:00
|
|
|
cv_wait(&ktd->ktd_cv, &ktrace_lock);
|
2004-09-23 02:15:03 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
TAILQ_INIT(&ktd->ktd_queue);
|
|
|
|
ktd->ktd_qcount = 0;
|
|
|
|
ktrerr = ktd->ktd_error;
|
|
|
|
errcnt = ktd->ktd_errcnt;
|
|
|
|
ktd->ktd_error = ktd->ktd_errcnt = 0;
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_exit(&ktrace_lock);
|
2004-09-23 02:15:03 +04:00
|
|
|
|
|
|
|
if (ktrerr) {
|
|
|
|
log(LOG_NOTICE,
|
|
|
|
"ktrace failed, fp %p, error 0x%x, total %d\n",
|
|
|
|
fp, ktrerr, errcnt);
|
|
|
|
}
|
|
|
|
ktrwrite(ktd, kte);
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_enter(&ktrace_lock);
|
2004-09-23 02:15:03 +04:00
|
|
|
}
|
|
|
|
|
2022-03-12 20:45:53 +03:00
|
|
|
if (ktd_lookup(ktd->ktd_fp) == ktd) {
|
|
|
|
TAILQ_REMOVE(&ktdq, ktd, ktd_list);
|
|
|
|
} else {
|
|
|
|
/* nothing, collision in KTROP_SET */
|
|
|
|
}
|
2014-11-21 12:40:10 +03:00
|
|
|
|
|
|
|
callout_halt(&ktd->ktd_wakch, &ktrace_lock);
|
|
|
|
callout_destroy(&ktd->ktd_wakch);
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_exit(&ktrace_lock);
|
2004-09-23 02:15:03 +04:00
|
|
|
|
1993-03-21 12:45:37 +03:00
|
|
|
/*
|
2004-09-23 02:15:03 +04:00
|
|
|
* ktrace file descriptor can't be watched (are not visible to
|
|
|
|
* userspace), so no kqueue stuff here
|
|
|
|
* XXX: The above comment is wrong, because the fktrace file
|
|
|
|
* descriptor is available in userland.
|
1993-03-21 12:45:37 +03:00
|
|
|
*/
|
2008-03-22 00:54:58 +03:00
|
|
|
closef(fp);
|
2000-04-19 23:14:17 +04:00
|
|
|
|
2008-06-03 09:53:09 +04:00
|
|
|
cv_destroy(&ktd->ktd_sync_cv);
|
|
|
|
cv_destroy(&ktd->ktd_cv);
|
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
kmem_free(ktd, sizeof(*ktd));
|
2004-09-23 02:15:03 +04:00
|
|
|
|
|
|
|
kthread_exit(0);
|
1993-03-21 12:45:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return true if caller has permission to set the ktracing state
|
|
|
|
* of target. Essentially, the target can't possess any
|
2008-02-02 23:42:18 +03:00
|
|
|
* more permissions than the caller. KTRFAC_PERSISTENT signifies that
|
|
|
|
* the tracing will persist on sugid processes during exec; it is only
|
|
|
|
* settable by a process with appropriate credentials.
|
1993-03-21 12:45:37 +03:00
|
|
|
*
|
|
|
|
* TODO: check groups. use caller effective gid.
|
|
|
|
*/
|
2021-02-27 16:02:42 +03:00
|
|
|
static int
|
2007-08-15 16:07:23 +04:00
|
|
|
ktrcanset(lwp_t *calll, struct proc *targetp)
|
1993-03-21 12:45:37 +03:00
|
|
|
{
|
2008-04-24 22:39:20 +04:00
|
|
|
KASSERT(mutex_owned(targetp->p_lock));
|
2007-08-15 16:07:23 +04:00
|
|
|
KASSERT(mutex_owned(&ktrace_lock));
|
2007-02-10 00:55:00 +03:00
|
|
|
|
2008-01-23 20:56:53 +03:00
|
|
|
if (kauth_authorize_process(calll->l_cred, KAUTH_PROCESS_KTRACE,
|
2006-11-28 20:27:09 +03:00
|
|
|
targetp, NULL, NULL, NULL) == 0)
|
1993-03-21 12:45:37 +03:00
|
|
|
return (1);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
2000-12-28 14:10:15 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Put user defined entry to ktrace records.
|
|
|
|
*/
|
|
|
|
int
|
2007-12-21 02:02:38 +03:00
|
|
|
sys_utrace(struct lwp *l, const struct sys_utrace_args *uap, register_t *retval)
|
2000-12-28 14:10:15 +03:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
2001-01-06 00:42:08 +03:00
|
|
|
syscallarg(const char *) label;
|
2000-12-28 14:10:15 +03:00
|
|
|
syscallarg(void *) addr;
|
|
|
|
syscallarg(size_t) len;
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
2000-12-28 14:10:15 +03:00
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
return ktruser(SCARG(uap, label), SCARG(uap, addr),
|
2006-10-22 22:19:49 +04:00
|
|
|
SCARG(uap, len), 1);
|
2000-12-28 14:10:15 +03:00
|
|
|
}
|