2012-02-20 01:05:51 +04:00
|
|
|
/* $NetBSD: kern_exec.c,v 1.340 2012/02/19 21:06:49 rmind Exp $ */
|
2008-07-02 21:28:54 +04:00
|
|
|
|
|
|
|
/*-
|
|
|
|
* Copyright (c) 2008 The NetBSD Foundation, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
1994-06-29 10:29:24 +04:00
|
|
|
|
|
|
|
/*-
|
1996-10-01 03:18:43 +04:00
|
|
|
* Copyright (C) 1993, 1994, 1996 Christopher G. Demetriou
|
1994-06-29 10:29:24 +04:00
|
|
|
* Copyright (C) 1992 Wolfgang Solfrank.
|
|
|
|
* Copyright (C) 1992 TooLs GmbH.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by TooLs GmbH.
|
|
|
|
* 4. The name of TooLs GmbH may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
|
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
|
|
|
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
|
|
|
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
|
|
|
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
|
|
|
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2001-11-12 18:25:01 +03:00
|
|
|
#include <sys/cdefs.h>
|
2012-02-20 01:05:51 +04:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.340 2012/02/19 21:06:49 rmind Exp $");
|
2001-11-12 18:25:01 +03:00
|
|
|
|
2011-08-26 23:07:13 +04:00
|
|
|
#include "opt_exec.h"
|
1998-06-26 01:17:15 +04:00
|
|
|
#include "opt_ktrace.h"
|
2009-02-14 01:41:00 +03:00
|
|
|
#include "opt_modular.h"
|
2000-11-21 03:37:49 +03:00
|
|
|
#include "opt_syscall_debug.h"
|
2006-07-26 13:33:57 +04:00
|
|
|
#include "veriexec.h"
|
2006-11-22 05:02:51 +03:00
|
|
|
#include "opt_pax.h"
|
1998-02-10 17:08:44 +03:00
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/filedesc.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/mount.h>
|
|
|
|
#include <sys/malloc.h>
|
2008-01-02 22:44:36 +03:00
|
|
|
#include <sys/kmem.h>
|
1994-06-29 10:29:24 +04:00
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/acct.h>
|
2012-02-12 03:16:15 +04:00
|
|
|
#include <sys/atomic.h>
|
1994-06-29 10:29:24 +04:00
|
|
|
#include <sys/exec.h>
|
|
|
|
#include <sys/ktrace.h>
|
2008-10-11 17:40:57 +04:00
|
|
|
#include <sys/uidinfo.h>
|
1994-06-29 10:29:24 +04:00
|
|
|
#include <sys/wait.h>
|
|
|
|
#include <sys/mman.h>
|
2002-08-28 11:16:33 +04:00
|
|
|
#include <sys/ras.h>
|
1994-06-29 10:29:24 +04:00
|
|
|
#include <sys/signalvar.h>
|
|
|
|
#include <sys/stat.h>
|
2000-11-21 03:37:49 +03:00
|
|
|
#include <sys/syscall.h>
|
2006-05-15 01:15:11 +04:00
|
|
|
#include <sys/kauth.h>
|
2007-11-13 02:11:58 +03:00
|
|
|
#include <sys/lwpctl.h>
|
2007-12-27 01:11:47 +03:00
|
|
|
#include <sys/pax.h>
|
2007-12-31 18:31:24 +03:00
|
|
|
#include <sys/cpu.h>
|
2008-11-19 21:35:57 +03:00
|
|
|
#include <sys/module.h>
|
2009-06-03 03:21:37 +04:00
|
|
|
#include <sys/syscallvar.h>
|
1994-10-20 07:22:35 +03:00
|
|
|
#include <sys/syscallargs.h>
|
2006-07-22 14:34:26 +04:00
|
|
|
#if NVERIEXEC > 0
|
2005-04-20 17:44:45 +04:00
|
|
|
#include <sys/verified_exec.h>
|
2006-07-22 14:34:26 +04:00
|
|
|
#endif /* NVERIEXEC > 0 */
|
DTrace: Add an SDT (Statically Defined Tracing) provider framework, and
implement most of the proc provider. Adds proc:::create, exec,
exec_success, exec_faillure, signal_send, signal_discard, signal_handle,
lwp_create, lwp_start, lwp_exit.
2010-03-02 00:10:13 +03:00
|
|
|
#include <sys/sdt.h>
|
2012-02-12 03:16:15 +04:00
|
|
|
#include <sys/spawn.h>
|
|
|
|
#include <sys/prot.h>
|
First step of random number subsystem rework described in
<20111022023242.BA26F14A158@mail.netbsd.org>. This change includes
the following:
An initial cleanup and minor reorganization of the entropy pool
code in sys/dev/rnd.c and sys/dev/rndpool.c. Several bugs are
fixed. Some effort is made to accumulate entropy more quickly at
boot time.
A generic interface, "rndsink", is added, for stream generators to
request that they be re-keyed with good quality entropy from the pool
as soon as it is available.
The arc4random()/arc4randbytes() implementation in libkern is
adjusted to use the rndsink interface for rekeying, which helps
address the problem of low-quality keys at boot time.
An implementation of the FIPS 140-2 statistical tests for random
number generator quality is provided (libkern/rngtest.c). This
is based on Greg Rose's implementation from Qualcomm.
A new random stream generator, nist_ctr_drbg, is provided. It is
based on an implementation of the NIST SP800-90 CTR_DRBG by
Henric Jungheim. This generator users AES in a modified counter
mode to generate a backtracking-resistant random stream.
An abstraction layer, "cprng", is provided for in-kernel consumers
of randomness. The arc4random/arc4randbytes API is deprecated for
in-kernel use. It is replaced by "cprng_strong". The current
cprng_fast implementation wraps the existing arc4random
implementation. The current cprng_strong implementation wraps the
new CTR_DRBG implementation. Both interfaces are rekeyed from
the entropy pool automatically at intervals justifiable from best
current cryptographic practice.
In some quick tests, cprng_fast() is about the same speed as
the old arc4randbytes(), and cprng_strong() is about 20% faster
than rnd_extract_data(). Performance is expected to improve.
The AES code in src/crypto/rijndael is no longer an optional
kernel component, as it is required by cprng_strong, which is
not an optional kernel component.
The entropy pool output is subjected to the rngtest tests at
startup time; if it fails, the system will reboot. There is
approximately a 3/10000 chance of a false positive from these
tests. Entropy pool _input_ from hardware random numbers is
subjected to the rngtest tests at attach time, as well as the
FIPS continuous-output test, to detect bad or stuck hardware
RNGs; if any are detected, they are detached, but the system
continues to run.
A problem with rndctl(8) is fixed -- datastructures with
pointers in arrays are no longer passed to userspace (this
was not a security problem, but rather a major issue for
compat32). A new kernel will require a new rndctl.
The sysctl kern.arandom() and kern.urandom() nodes are hooked
up to the new generators, but the /dev/*random pseudodevices
are not, yet.
Manual pages for the new kernel interfaces are forthcoming.
2011-11-20 02:51:18 +04:00
|
|
|
#include <sys/cprng.h>
|
1994-10-20 07:22:35 +03:00
|
|
|
|
1998-02-05 10:59:28 +03:00
|
|
|
#include <uvm/uvm_extern.h>
|
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
#include <machine/reg.h>
|
|
|
|
|
2007-04-22 12:29:55 +04:00
|
|
|
#include <compat/common/compat_util.h>
|
|
|
|
|
2003-08-24 21:52:28 +04:00
|
|
|
static int exec_sigcode_map(struct proc *, const struct emul *);
|
|
|
|
|
2001-07-16 00:49:40 +04:00
|
|
|
#ifdef DEBUG_EXEC
|
2011-01-18 11:21:03 +03:00
|
|
|
#define DPRINTF(a) printf a
|
2011-03-14 02:44:14 +03:00
|
|
|
#define COPYPRINTF(s, a, b) printf("%s, %d: copyout%s @%p %zu\n", __func__, \
|
|
|
|
__LINE__, (s), (a), (b))
|
2001-07-16 00:49:40 +04:00
|
|
|
#else
|
|
|
|
#define DPRINTF(a)
|
2011-03-14 02:44:14 +03:00
|
|
|
#define COPYPRINTF(s, a, b)
|
2001-07-16 00:49:40 +04:00
|
|
|
#endif /* DEBUG_EXEC */
|
|
|
|
|
DTrace: Add an SDT (Statically Defined Tracing) provider framework, and
implement most of the proc provider. Adds proc:::create, exec,
exec_success, exec_faillure, signal_send, signal_discard, signal_handle,
lwp_create, lwp_start, lwp_exit.
2010-03-02 00:10:13 +03:00
|
|
|
/*
|
|
|
|
* DTrace SDT provider definitions
|
|
|
|
*/
|
|
|
|
SDT_PROBE_DEFINE(proc,,,exec,
|
|
|
|
"char *", NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL);
|
|
|
|
SDT_PROBE_DEFINE(proc,,,exec_success,
|
|
|
|
"char *", NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL);
|
|
|
|
SDT_PROBE_DEFINE(proc,,,exec_failure,
|
|
|
|
"int", NULL,
|
|
|
|
NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL);
|
|
|
|
|
2000-12-08 22:42:11 +03:00
|
|
|
/*
|
|
|
|
* Exec function switch:
|
|
|
|
*
|
|
|
|
* Note that each makecmds function is responsible for loading the
|
|
|
|
* exec package with the necessary functions for any exec-type-specific
|
|
|
|
* handling.
|
|
|
|
*
|
|
|
|
* Functions for specific exec types should be defined in their own
|
|
|
|
* header file.
|
|
|
|
*/
|
2001-02-26 23:43:25 +03:00
|
|
|
static const struct execsw **execsw = NULL;
|
|
|
|
static int nexecs;
|
|
|
|
|
2008-11-19 21:35:57 +03:00
|
|
|
u_int exec_maxhdrsz; /* must not be static - used by netbsd32 */
|
2000-12-08 22:42:11 +03:00
|
|
|
|
|
|
|
/* list of dynamically loaded execsw entries */
|
2008-11-19 21:35:57 +03:00
|
|
|
static LIST_HEAD(execlist_head, exec_entry) ex_head =
|
|
|
|
LIST_HEAD_INITIALIZER(ex_head);
|
2000-12-08 22:42:11 +03:00
|
|
|
struct exec_entry {
|
2001-02-26 23:43:25 +03:00
|
|
|
LIST_ENTRY(exec_entry) ex_list;
|
2008-11-19 21:35:57 +03:00
|
|
|
SLIST_ENTRY(exec_entry) ex_slist;
|
|
|
|
const struct execsw *ex_sw;
|
2000-12-08 22:42:11 +03:00
|
|
|
};
|
|
|
|
|
2005-07-10 08:20:34 +04:00
|
|
|
#ifndef __HAVE_SYSCALL_INTERN
|
|
|
|
void syscall(void);
|
|
|
|
#endif
|
|
|
|
|
2003-09-10 20:43:34 +04:00
|
|
|
/* NetBSD emul struct */
|
2008-11-19 21:35:57 +03:00
|
|
|
struct emul emul_netbsd = {
|
2009-10-25 04:14:03 +03:00
|
|
|
.e_name = "netbsd",
|
|
|
|
.e_path = NULL,
|
2000-12-11 08:28:59 +03:00
|
|
|
#ifndef __HAVE_MINIMAL_EMUL
|
2009-10-25 04:14:03 +03:00
|
|
|
.e_flags = EMUL_HAS_SYS___syscall,
|
|
|
|
.e_errno = NULL,
|
|
|
|
.e_nosys = SYS_syscall,
|
|
|
|
.e_nsysent = SYS_NSYSENT,
|
2000-12-11 08:28:59 +03:00
|
|
|
#endif
|
2009-10-25 04:14:03 +03:00
|
|
|
.e_sysent = sysent,
|
2000-11-21 03:37:49 +03:00
|
|
|
#ifdef SYSCALL_DEBUG
|
2009-10-25 04:14:03 +03:00
|
|
|
.e_syscallnames = syscallnames,
|
2000-11-21 03:37:49 +03:00
|
|
|
#else
|
2009-10-25 04:14:03 +03:00
|
|
|
.e_syscallnames = NULL,
|
2000-11-21 03:37:49 +03:00
|
|
|
#endif
|
2009-10-25 04:14:03 +03:00
|
|
|
.e_sendsig = sendsig,
|
|
|
|
.e_trapsignal = trapsignal,
|
|
|
|
.e_tracesig = NULL,
|
|
|
|
.e_sigcode = NULL,
|
|
|
|
.e_esigcode = NULL,
|
|
|
|
.e_sigobject = NULL,
|
|
|
|
.e_setregs = setregs,
|
|
|
|
.e_proc_exec = NULL,
|
|
|
|
.e_proc_fork = NULL,
|
|
|
|
.e_proc_exit = NULL,
|
|
|
|
.e_lwp_fork = NULL,
|
|
|
|
.e_lwp_exit = NULL,
|
2000-12-11 08:28:59 +03:00
|
|
|
#ifdef __HAVE_SYSCALL_INTERN
|
2009-10-25 04:14:03 +03:00
|
|
|
.e_syscall_intern = syscall_intern,
|
2000-12-11 08:28:59 +03:00
|
|
|
#else
|
2009-10-25 04:14:03 +03:00
|
|
|
.e_syscall = syscall,
|
2000-12-11 08:28:59 +03:00
|
|
|
#endif
|
2009-10-25 04:14:03 +03:00
|
|
|
.e_sysctlovly = NULL,
|
|
|
|
.e_fault = NULL,
|
|
|
|
.e_vm_default_addr = uvm_default_mapaddr,
|
|
|
|
.e_usertrap = NULL,
|
|
|
|
.e_ucsize = sizeof(ucontext_t),
|
|
|
|
.e_startlwp = startlwp
|
2000-11-21 03:37:49 +03:00
|
|
|
};
|
|
|
|
|
2000-12-08 22:42:11 +03:00
|
|
|
/*
|
|
|
|
* Exec lock. Used to control access to execsw[] structures.
|
|
|
|
* This must not be static so that netbsd32 can access it, too.
|
|
|
|
*/
|
2007-02-10 00:55:00 +03:00
|
|
|
krwlock_t exec_lock;
|
2004-03-05 14:30:50 +03:00
|
|
|
|
2007-12-26 19:01:34 +03:00
|
|
|
static kmutex_t sigobject_lock;
|
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
/*
|
|
|
|
* Data used between a loadvm and execve part of an "exec" operation
|
|
|
|
*/
|
|
|
|
struct execve_data {
|
|
|
|
struct exec_package ed_pack;
|
|
|
|
struct pathbuf *ed_pathbuf;
|
|
|
|
struct vattr ed_attr;
|
|
|
|
struct ps_strings ed_arginfo;
|
|
|
|
char *ed_argp;
|
|
|
|
const char *ed_pathstring;
|
|
|
|
char *ed_resolvedpathbuf;
|
|
|
|
size_t ed_ps_strings_sz;
|
|
|
|
int ed_szsigcode;
|
|
|
|
long ed_argc;
|
|
|
|
long ed_envc;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* data passed from parent lwp to child during a posix_spawn()
|
|
|
|
*/
|
|
|
|
struct spawn_exec_data {
|
|
|
|
struct execve_data sed_exec;
|
|
|
|
size_t sed_actions_len;
|
|
|
|
struct posix_spawn_file_actions_entry
|
|
|
|
*sed_actions;
|
|
|
|
struct posix_spawnattr *sed_attrs;
|
|
|
|
struct proc *sed_parent;
|
|
|
|
kcondvar_t sed_cv_child_ready;
|
|
|
|
kmutex_t sed_mtx_child;
|
|
|
|
int sed_error;
|
|
|
|
};
|
|
|
|
|
2008-07-02 21:28:54 +04:00
|
|
|
static void *
|
|
|
|
exec_pool_alloc(struct pool *pp, int flags)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (void *)uvm_km_alloc(kernel_map, NCARGS, 0,
|
|
|
|
UVM_KMF_PAGEABLE | UVM_KMF_WAITVA);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
exec_pool_free(struct pool *pp, void *addr)
|
|
|
|
{
|
|
|
|
|
|
|
|
uvm_km_free(kernel_map, (vaddr_t)addr, NCARGS, UVM_KMF_PAGEABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pool exec_pool;
|
|
|
|
|
|
|
|
static struct pool_allocator exec_palloc = {
|
|
|
|
.pa_alloc = exec_pool_alloc,
|
|
|
|
.pa_free = exec_pool_free,
|
|
|
|
.pa_pagesz = NCARGS
|
|
|
|
};
|
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/*
|
|
|
|
* check exec:
|
|
|
|
* given an "executable" described in the exec package's namei info,
|
|
|
|
* see what we can do with it.
|
|
|
|
*
|
|
|
|
* ON ENTRY:
|
|
|
|
* exec package with appropriate namei info
|
2005-12-11 15:16:03 +03:00
|
|
|
* lwp pointer of exec'ing lwp
|
1994-06-29 10:29:24 +04:00
|
|
|
* NO SELF-LOCKED VNODES
|
|
|
|
*
|
|
|
|
* ON EXIT:
|
|
|
|
* error: nothing held, etc. exec header still allocated.
|
1996-10-01 03:18:43 +04:00
|
|
|
* ok: filled exec package, executable's vnode (unlocked).
|
1994-06-29 10:29:24 +04:00
|
|
|
*
|
|
|
|
* EXEC SWITCH ENTRY:
|
|
|
|
* Locked vnode to check, exec package, proc.
|
|
|
|
*
|
|
|
|
* EXEC SWITCH EXIT:
|
1996-10-01 03:18:43 +04:00
|
|
|
* ok: return 0, filled exec package, executable's vnode (unlocked).
|
1994-06-29 10:29:24 +04:00
|
|
|
* error: destructive:
|
|
|
|
* everything deallocated execept exec header.
|
1996-09-27 03:34:46 +04:00
|
|
|
* non-destructive:
|
1996-10-01 03:18:43 +04:00
|
|
|
* error code, executable's vnode (unlocked),
|
1996-09-27 03:34:46 +04:00
|
|
|
* exec header unmodified.
|
1994-06-29 10:29:24 +04:00
|
|
|
*/
|
|
|
|
int
|
2005-07-17 02:47:18 +04:00
|
|
|
/*ARGSUSED*/
|
2010-11-19 09:44:33 +03:00
|
|
|
check_exec(struct lwp *l, struct exec_package *epp, struct pathbuf *pb)
|
1994-06-29 10:29:24 +04:00
|
|
|
{
|
2001-02-26 23:43:25 +03:00
|
|
|
int error, i;
|
|
|
|
struct vnode *vp;
|
2010-05-02 09:30:20 +04:00
|
|
|
struct nameidata nd;
|
2001-02-26 23:43:25 +03:00
|
|
|
size_t resid;
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2010-11-30 13:43:01 +03:00
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb);
|
2010-05-02 09:30:20 +04:00
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/* first get the vnode */
|
2010-05-02 09:30:20 +04:00
|
|
|
if ((error = namei(&nd)) != 0)
|
1994-06-29 10:29:24 +04:00
|
|
|
return error;
|
2010-05-02 09:30:20 +04:00
|
|
|
epp->ep_vp = vp = nd.ni_vp;
|
|
|
|
/* this cannot overflow as both are size PATH_MAX */
|
2010-11-30 13:29:57 +03:00
|
|
|
strcpy(epp->ep_resolvedname, nd.ni_pnbuf);
|
2010-05-02 09:30:20 +04:00
|
|
|
|
2010-05-03 03:22:51 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
/* paranoia (take this out once namei stuff stabilizes) */
|
2010-11-30 13:29:57 +03:00
|
|
|
memset(nd.ni_pnbuf, '~', PATH_MAX);
|
2010-05-02 09:30:20 +04:00
|
|
|
#endif
|
1994-06-29 10:29:24 +04:00
|
|
|
|
1997-05-08 20:19:43 +04:00
|
|
|
/* check access and type */
|
1994-06-29 10:29:24 +04:00
|
|
|
if (vp->v_type != VREG) {
|
1997-04-10 23:45:40 +04:00
|
|
|
error = EACCES;
|
1994-06-29 10:29:24 +04:00
|
|
|
goto bad1;
|
|
|
|
}
|
2007-11-26 22:01:26 +03:00
|
|
|
if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0)
|
1997-05-08 20:19:43 +04:00
|
|
|
goto bad1;
|
1994-06-29 10:29:24 +04:00
|
|
|
|
|
|
|
/* get attributes */
|
2007-11-26 22:01:26 +03:00
|
|
|
if ((error = VOP_GETATTR(vp, epp->ep_vap, l->l_cred)) != 0)
|
1994-06-29 10:29:24 +04:00
|
|
|
goto bad1;
|
|
|
|
|
|
|
|
/* Check mount point */
|
|
|
|
if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
|
|
|
|
error = EACCES;
|
|
|
|
goto bad1;
|
|
|
|
}
|
2001-06-15 21:24:19 +04:00
|
|
|
if (vp->v_mount->mnt_flag & MNT_NOSUID)
|
1997-05-08 14:19:10 +04:00
|
|
|
epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID);
|
1994-06-29 10:29:24 +04:00
|
|
|
|
|
|
|
/* try to open it */
|
2007-11-26 22:01:26 +03:00
|
|
|
if ((error = VOP_OPEN(vp, FREAD, l->l_cred)) != 0)
|
1994-06-29 10:29:24 +04:00
|
|
|
goto bad1;
|
|
|
|
|
1999-02-27 02:38:55 +03:00
|
|
|
/* unlock vp, since we need it unlocked from here on out. */
|
2010-06-24 16:58:48 +04:00
|
|
|
VOP_UNLOCK(vp);
|
1996-10-01 03:18:43 +04:00
|
|
|
|
2006-07-22 14:34:26 +04:00
|
|
|
#if NVERIEXEC > 0
|
2010-05-02 09:30:20 +04:00
|
|
|
error = veriexec_verify(l, vp, epp->ep_resolvedname,
|
2006-12-20 14:35:29 +03:00
|
|
|
epp->ep_flags & EXEC_INDIR ? VERIEXEC_INDIRECT : VERIEXEC_DIRECT,
|
2007-02-08 03:26:50 +03:00
|
|
|
NULL);
|
|
|
|
if (error)
|
2006-12-23 20:23:51 +03:00
|
|
|
goto bad2;
|
2006-07-22 14:34:26 +04:00
|
|
|
#endif /* NVERIEXEC > 0 */
|
2002-10-29 15:31:20 +03:00
|
|
|
|
2006-11-22 05:02:51 +03:00
|
|
|
#ifdef PAX_SEGVGUARD
|
2010-05-02 09:30:20 +04:00
|
|
|
error = pax_segvguard(l, vp, epp->ep_resolvedname, false);
|
2006-12-23 20:23:51 +03:00
|
|
|
if (error)
|
|
|
|
goto bad2;
|
2006-11-22 05:02:51 +03:00
|
|
|
#endif /* PAX_SEGVGUARD */
|
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/* now we have the file, get the exec header */
|
1996-02-04 05:15:01 +03:00
|
|
|
error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0,
|
2006-07-24 02:06:03 +04:00
|
|
|
UIO_SYSSPACE, 0, l->l_cred, &resid, NULL);
|
1996-02-04 05:15:01 +03:00
|
|
|
if (error)
|
1994-06-29 10:29:24 +04:00
|
|
|
goto bad2;
|
|
|
|
epp->ep_hdrvalid = epp->ep_hdrlen - resid;
|
|
|
|
|
2001-02-14 21:21:42 +03:00
|
|
|
/*
|
|
|
|
* Set up default address space limits. Can be overridden
|
|
|
|
* by individual exec packages.
|
2004-03-05 14:30:50 +03:00
|
|
|
*
|
2007-02-05 17:34:29 +03:00
|
|
|
* XXX probably should be all done in the exec packages.
|
2001-02-14 21:21:42 +03:00
|
|
|
*/
|
|
|
|
epp->ep_vm_minaddr = VM_MIN_ADDRESS;
|
|
|
|
epp->ep_vm_maxaddr = VM_MAXUSER_ADDRESS;
|
1994-06-29 10:29:24 +04:00
|
|
|
/*
|
|
|
|
* set up the vmcmds for creation of the process
|
|
|
|
* address space
|
|
|
|
*/
|
|
|
|
error = ENOEXEC;
|
2007-04-22 12:29:55 +04:00
|
|
|
for (i = 0; i < nexecs; i++) {
|
1995-05-02 02:36:45 +04:00
|
|
|
int newerror;
|
|
|
|
|
2000-12-08 22:42:11 +03:00
|
|
|
epp->ep_esch = execsw[i];
|
2005-12-11 15:16:03 +03:00
|
|
|
newerror = (*execsw[i]->es_makecmds)(l, epp);
|
2007-04-22 12:29:55 +04:00
|
|
|
|
|
|
|
if (!newerror) {
|
2011-08-25 23:14:07 +04:00
|
|
|
/* Seems ok: check that entry point is not too high */
|
2011-08-26 13:29:16 +04:00
|
|
|
if (epp->ep_entry > epp->ep_vm_maxaddr) {
|
2011-08-26 13:13:08 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
2011-09-17 01:02:28 +04:00
|
|
|
printf("%s: rejecting %p due to "
|
2011-11-24 21:09:14 +04:00
|
|
|
"too high entry address (> %p)\n",
|
|
|
|
__func__, (void *)epp->ep_entry,
|
|
|
|
(void *)epp->ep_vm_maxaddr);
|
2011-08-26 13:13:08 +04:00
|
|
|
#endif
|
2011-08-25 23:14:07 +04:00
|
|
|
error = ENOEXEC;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Seems ok: check that entry point is not too low */
|
2011-08-26 13:29:16 +04:00
|
|
|
if (epp->ep_entry < epp->ep_vm_minaddr) {
|
2011-08-26 13:13:08 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
2011-09-17 01:02:28 +04:00
|
|
|
printf("%s: rejecting %p due to "
|
2011-11-24 21:09:14 +04:00
|
|
|
"too low entry address (< %p)\n",
|
|
|
|
__func__, (void *)epp->ep_entry,
|
|
|
|
(void *)epp->ep_vm_minaddr);
|
2011-08-26 13:13:08 +04:00
|
|
|
#endif
|
2007-04-22 12:29:55 +04:00
|
|
|
error = ENOEXEC;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check limits */
|
|
|
|
if ((epp->ep_tsize > MAXTSIZ) ||
|
|
|
|
(epp->ep_dsize > (u_quad_t)l->l_proc->p_rlimit
|
|
|
|
[RLIMIT_DATA].rlim_cur)) {
|
2011-08-26 13:13:08 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
2011-08-26 13:29:16 +04:00
|
|
|
printf("%s: rejecting due to "
|
2011-11-24 21:09:14 +04:00
|
|
|
"limits (t=%llu > %llu || d=%llu > %llu)\n",
|
|
|
|
__func__,
|
|
|
|
(unsigned long long)epp->ep_tsize,
|
|
|
|
(unsigned long long)MAXTSIZ,
|
|
|
|
(unsigned long long)epp->ep_dsize,
|
2011-11-24 23:55:22 +04:00
|
|
|
(unsigned long long)
|
|
|
|
l->l_proc->p_rlimit[RLIMIT_DATA].rlim_cur);
|
2011-08-26 13:13:08 +04:00
|
|
|
#endif
|
2007-04-22 12:29:55 +04:00
|
|
|
error = ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (epp->ep_emul_root != NULL) {
|
|
|
|
vrele(epp->ep_emul_root);
|
|
|
|
epp->ep_emul_root = NULL;
|
|
|
|
}
|
|
|
|
if (epp->ep_interp != NULL) {
|
|
|
|
vrele(epp->ep_interp);
|
|
|
|
epp->ep_interp = NULL;
|
|
|
|
}
|
|
|
|
|
1995-05-02 02:36:45 +04:00
|
|
|
/* make sure the first "interesting" error code is saved. */
|
2007-04-22 12:29:55 +04:00
|
|
|
if (error == ENOEXEC)
|
1995-05-02 02:36:45 +04:00
|
|
|
error = newerror;
|
2000-11-21 03:37:49 +03:00
|
|
|
|
2007-04-22 12:29:55 +04:00
|
|
|
if (epp->ep_flags & EXEC_DESTR)
|
|
|
|
/* Error from "#!" code, tidied up by recursive call */
|
1994-06-29 10:29:24 +04:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2007-10-02 16:01:17 +04:00
|
|
|
/* not found, error */
|
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/*
|
|
|
|
* free any vmspace-creation commands,
|
|
|
|
* and release their references
|
|
|
|
*/
|
|
|
|
kill_vmcmds(&epp->ep_vmcmds);
|
|
|
|
|
|
|
|
bad2:
|
|
|
|
/*
|
1999-02-27 02:38:55 +03:00
|
|
|
* close and release the vnode, restore the old one, free the
|
1994-06-29 10:29:24 +04:00
|
|
|
* pathname buf, and punt.
|
|
|
|
*/
|
1999-02-27 02:38:55 +03:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
|
2007-11-26 22:01:26 +03:00
|
|
|
VOP_CLOSE(vp, FREAD, l->l_cred);
|
1999-02-27 02:38:55 +03:00
|
|
|
vput(vp);
|
1994-06-29 10:29:24 +04:00
|
|
|
return error;
|
|
|
|
|
|
|
|
bad1:
|
|
|
|
/*
|
|
|
|
* free the namei pathname buffer, and put the vnode
|
|
|
|
* (which we don't yet have open).
|
|
|
|
*/
|
1996-10-01 03:18:43 +04:00
|
|
|
vput(vp); /* was still locked */
|
1994-06-29 10:29:24 +04:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2004-07-19 01:29:26 +04:00
|
|
|
#ifdef __MACHINE_STACK_GROWS_UP
|
|
|
|
#define STACK_PTHREADSPACE NBPG
|
|
|
|
#else
|
|
|
|
#define STACK_PTHREADSPACE 0
|
|
|
|
#endif
|
|
|
|
|
2005-07-12 00:15:26 +04:00
|
|
|
static int
|
|
|
|
execve_fetch_element(char * const *array, size_t index, char **value)
|
|
|
|
{
|
|
|
|
return copyin(array + index, value, sizeof(*value));
|
|
|
|
}
|
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/*
|
|
|
|
* exec system call
|
|
|
|
*/
|
1996-02-09 21:59:18 +03:00
|
|
|
int
|
2007-12-21 02:02:38 +03:00
|
|
|
sys_execve(struct lwp *l, const struct sys_execve_args *uap, register_t *retval)
|
1995-09-20 01:40:36 +04:00
|
|
|
{
|
2007-12-21 02:02:38 +03:00
|
|
|
/* {
|
2001-02-26 23:43:25 +03:00
|
|
|
syscallarg(const char *) path;
|
|
|
|
syscallarg(char * const *) argp;
|
|
|
|
syscallarg(char * const *) envp;
|
2007-12-21 02:02:38 +03:00
|
|
|
} */
|
2005-07-12 00:15:26 +04:00
|
|
|
|
|
|
|
return execve1(l, SCARG(uap, path), SCARG(uap, argp),
|
|
|
|
SCARG(uap, envp), execve_fetch_element);
|
|
|
|
}
|
|
|
|
|
2011-08-08 16:08:52 +04:00
|
|
|
int
|
|
|
|
sys_fexecve(struct lwp *l, const struct sys_fexecve_args *uap,
|
|
|
|
register_t *retval)
|
|
|
|
{
|
|
|
|
/* {
|
|
|
|
syscallarg(int) fd;
|
|
|
|
syscallarg(char * const *) argp;
|
|
|
|
syscallarg(char * const *) envp;
|
|
|
|
} */
|
|
|
|
|
|
|
|
return ENOSYS;
|
|
|
|
}
|
|
|
|
|
2008-11-19 21:35:57 +03:00
|
|
|
/*
|
|
|
|
* Load modules to try and execute an image that we do not understand.
|
|
|
|
* If no execsw entries are present, we load those likely to be needed
|
|
|
|
* in order to run native images only. Otherwise, we autoload all
|
|
|
|
* possible modules that could let us run the binary. XXX lame
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
exec_autoload(void)
|
|
|
|
{
|
|
|
|
#ifdef MODULAR
|
|
|
|
static const char * const native[] = {
|
|
|
|
"exec_elf32",
|
|
|
|
"exec_elf64",
|
|
|
|
"exec_script",
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
static const char * const compat[] = {
|
|
|
|
"exec_elf32",
|
|
|
|
"exec_elf64",
|
|
|
|
"exec_script",
|
|
|
|
"exec_aout",
|
|
|
|
"exec_coff",
|
|
|
|
"exec_ecoff",
|
|
|
|
"compat_aoutm68k",
|
|
|
|
"compat_freebsd",
|
|
|
|
"compat_ibcs2",
|
|
|
|
"compat_linux",
|
|
|
|
"compat_linux32",
|
|
|
|
"compat_netbsd32",
|
|
|
|
"compat_sunos",
|
|
|
|
"compat_sunos32",
|
|
|
|
"compat_svr4",
|
|
|
|
"compat_svr4_32",
|
|
|
|
"compat_ultrix",
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
char const * const *list;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
list = (nexecs == 0 ? native : compat);
|
|
|
|
for (i = 0; list[i] != NULL; i++) {
|
|
|
|
if (module_autoload(list[i], MODULE_CLASS_MISC) != 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
yield();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
static int
|
|
|
|
execve_loadvm(struct lwp *l, const char *path, char * const *args,
|
|
|
|
char * const *envs, execve_fetch_element_t fetch_element,
|
|
|
|
struct execve_data * restrict data)
|
2005-07-12 00:15:26 +04:00
|
|
|
{
|
2002-08-26 01:18:15 +04:00
|
|
|
int error;
|
2003-01-18 13:06:22 +03:00
|
|
|
struct proc *p;
|
2001-02-26 23:43:25 +03:00
|
|
|
char *dp, *sp;
|
2007-09-21 00:51:38 +04:00
|
|
|
size_t i, len;
|
2008-01-02 22:44:36 +03:00
|
|
|
struct exec_fakearg *tmpfap;
|
2008-11-19 21:35:57 +03:00
|
|
|
u_int modgen;
|
2012-02-12 03:16:15 +04:00
|
|
|
|
|
|
|
KASSERT(data != NULL);
|
2001-02-26 23:43:25 +03:00
|
|
|
|
2003-01-18 13:06:22 +03:00
|
|
|
p = l->l_proc;
|
2008-11-19 21:35:57 +03:00
|
|
|
modgen = 0;
|
2007-02-10 00:55:00 +03:00
|
|
|
|
DTrace: Add an SDT (Statically Defined Tracing) provider framework, and
implement most of the proc provider. Adds proc:::create, exec,
exec_success, exec_faillure, signal_send, signal_discard, signal_handle,
lwp_create, lwp_start, lwp_exit.
2010-03-02 00:10:13 +03:00
|
|
|
SDT_PROBE(proc,,,exec, path, 0, 0, 0, 0);
|
|
|
|
|
2008-02-25 00:46:04 +03:00
|
|
|
/*
|
|
|
|
* Check if we have exceeded our number of processes limit.
|
|
|
|
* This is so that we handle the case where a root daemon
|
|
|
|
* forked, ran setuid to become the desired user and is trying
|
|
|
|
* to exec. The obvious place to do the reference counting check
|
|
|
|
* is setuid(), but we don't do the reference counting check there
|
|
|
|
* like other OS's do because then all the programs that use setuid()
|
|
|
|
* must be modified to check the return code of setuid() and exit().
|
|
|
|
* It is dangerous to make setuid() fail, because it fails open and
|
|
|
|
* the program will continue to run as root. If we make it succeed
|
|
|
|
* and return an error code, again we are not enforcing the limit.
|
|
|
|
* The best place to enforce the limit is here, when the process tries
|
|
|
|
* to execute a new image, because eventually the process will need
|
|
|
|
* to call exec in order to do something useful.
|
|
|
|
*/
|
2008-11-19 21:35:57 +03:00
|
|
|
retry:
|
2009-03-25 00:00:05 +03:00
|
|
|
if ((p->p_flag & PK_SUGID) && kauth_authorize_generic(l->l_cred,
|
|
|
|
KAUTH_GENERIC_ISSUSER, NULL) != 0 && chgproccnt(kauth_cred_getuid(
|
|
|
|
l->l_cred), 0) > p->p_rlimit[RLIMIT_NPROC].rlim_cur)
|
2008-02-25 00:46:04 +03:00
|
|
|
return EAGAIN;
|
|
|
|
|
2002-01-12 00:16:27 +03:00
|
|
|
/*
|
2007-02-10 00:55:00 +03:00
|
|
|
* Drain existing references and forbid new ones. The process
|
|
|
|
* should be left alone until we're done here. This is necessary
|
|
|
|
* to avoid race conditions - e.g. in ptrace() - that might allow
|
|
|
|
* a local user to illicitly obtain elevated privileges.
|
2002-01-12 00:16:27 +03:00
|
|
|
*/
|
2007-11-07 03:23:13 +03:00
|
|
|
rw_enter(&p->p_reflock, RW_WRITER);
|
2002-01-12 00:16:27 +03:00
|
|
|
|
2000-12-07 19:14:35 +03:00
|
|
|
/*
|
|
|
|
* Init the namei data to point the file user's program name.
|
|
|
|
* This is done here rather than in check_exec(), so that it's
|
|
|
|
* possible to override this settings if any of makecmd/probe
|
|
|
|
* functions call check_exec() recursively - for example,
|
|
|
|
* see exec_script_makecmds().
|
|
|
|
*/
|
2012-02-12 03:16:15 +04:00
|
|
|
error = pathbuf_copyin(path, &data->ed_pathbuf);
|
2007-09-21 00:51:38 +04:00
|
|
|
if (error) {
|
2011-03-14 02:44:14 +03:00
|
|
|
DPRINTF(("%s: pathbuf_copyin path @%p %d\n", __func__,
|
|
|
|
path, error));
|
2005-06-26 23:58:29 +04:00
|
|
|
goto clrflg;
|
2007-09-21 00:51:38 +04:00
|
|
|
}
|
2012-02-12 03:16:15 +04:00
|
|
|
data->ed_pathstring = pathbuf_stringcopy_get(data->ed_pathbuf);
|
|
|
|
|
|
|
|
data->ed_resolvedpathbuf = PNBUF_GET();
|
2010-05-02 09:30:20 +04:00
|
|
|
#ifdef DIAGNOSTIC
|
2012-02-12 03:16:15 +04:00
|
|
|
strcpy(data->ed_resolvedpathbuf, "/wrong");
|
2010-05-02 09:30:20 +04:00
|
|
|
#endif
|
1994-06-29 10:29:24 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* initialize the fields of the exec package.
|
|
|
|
*/
|
2012-02-12 03:16:15 +04:00
|
|
|
data->ed_pack.ep_name = path;
|
|
|
|
data->ed_pack.ep_kname = data->ed_pathstring;
|
|
|
|
data->ed_pack.ep_resolvedname = data->ed_resolvedpathbuf;
|
|
|
|
data->ed_pack.ep_hdr = kmem_alloc(exec_maxhdrsz, KM_SLEEP);
|
|
|
|
data->ed_pack.ep_hdrlen = exec_maxhdrsz;
|
|
|
|
data->ed_pack.ep_hdrvalid = 0;
|
|
|
|
data->ed_pack.ep_emul_arg = NULL;
|
|
|
|
data->ed_pack.ep_emul_arg_free = NULL;
|
|
|
|
data->ed_pack.ep_vmcmds.evs_cnt = 0;
|
|
|
|
data->ed_pack.ep_vmcmds.evs_used = 0;
|
|
|
|
data->ed_pack.ep_vap = &data->ed_attr;
|
|
|
|
data->ed_pack.ep_flags = 0;
|
|
|
|
data->ed_pack.ep_emul_root = NULL;
|
|
|
|
data->ed_pack.ep_interp = NULL;
|
|
|
|
data->ed_pack.ep_esch = NULL;
|
|
|
|
data->ed_pack.ep_pax_flags = 0;
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
rw_enter(&exec_lock, RW_READER);
|
2000-12-08 22:42:11 +03:00
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/* see if we can run it. */
|
2012-02-12 03:16:15 +04:00
|
|
|
if ((error = check_exec(l, &data->ed_pack, data->ed_pathbuf)) != 0) {
|
2007-12-27 01:49:19 +03:00
|
|
|
if (error != ENOENT) {
|
2011-03-14 02:44:14 +03:00
|
|
|
DPRINTF(("%s: check exec failed %d\n",
|
|
|
|
__func__, error));
|
2007-12-27 01:49:19 +03:00
|
|
|
}
|
1994-06-29 10:29:24 +04:00
|
|
|
goto freehdr;
|
2007-09-21 00:51:38 +04:00
|
|
|
}
|
1994-06-29 10:29:24 +04:00
|
|
|
|
|
|
|
/* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */
|
|
|
|
|
|
|
|
/* allocate an argument buffer */
|
2012-02-12 03:16:15 +04:00
|
|
|
data->ed_argp = pool_get(&exec_pool, PR_WAITOK);
|
|
|
|
KASSERT(data->ed_argp != NULL);
|
|
|
|
dp = data->ed_argp;
|
|
|
|
data->ed_argc = 0;
|
1994-06-29 10:29:24 +04:00
|
|
|
|
|
|
|
/* copy the fake args list, if there's one, freeing it as we go */
|
2012-02-12 03:16:15 +04:00
|
|
|
if (data->ed_pack.ep_flags & EXEC_HASARGL) {
|
|
|
|
tmpfap = data->ed_pack.ep_fa;
|
2008-01-02 22:44:36 +03:00
|
|
|
while (tmpfap->fa_arg != NULL) {
|
|
|
|
const char *cp;
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2008-01-02 22:44:36 +03:00
|
|
|
cp = tmpfap->fa_arg;
|
1994-06-29 10:29:24 +04:00
|
|
|
while (*cp)
|
|
|
|
*dp++ = *cp++;
|
2008-06-24 22:04:52 +04:00
|
|
|
*dp++ = '\0';
|
2009-08-07 01:33:54 +04:00
|
|
|
ktrexecarg(tmpfap->fa_arg, cp - tmpfap->fa_arg);
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2008-01-02 22:44:36 +03:00
|
|
|
kmem_free(tmpfap->fa_arg, tmpfap->fa_len);
|
2012-02-12 03:16:15 +04:00
|
|
|
tmpfap++; data->ed_argc++;
|
1994-06-29 10:29:24 +04:00
|
|
|
}
|
2012-02-12 03:16:15 +04:00
|
|
|
kmem_free(data->ed_pack.ep_fa, data->ed_pack.ep_fa_len);
|
|
|
|
data->ed_pack.ep_flags &= ~EXEC_HASARGL;
|
1994-06-29 10:29:24 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Now get argv & environment */
|
2005-07-12 00:15:26 +04:00
|
|
|
if (args == NULL) {
|
2011-03-14 02:44:14 +03:00
|
|
|
DPRINTF(("%s: null args\n", __func__));
|
1994-06-29 10:29:24 +04:00
|
|
|
error = EINVAL;
|
|
|
|
goto bad;
|
|
|
|
}
|
2005-07-12 00:15:26 +04:00
|
|
|
/* 'i' will index the argp/envp element to be retrieved */
|
|
|
|
i = 0;
|
2012-02-12 03:16:15 +04:00
|
|
|
if (data->ed_pack.ep_flags & EXEC_SKIPARG)
|
2005-07-12 00:15:26 +04:00
|
|
|
i++;
|
1994-06-29 10:29:24 +04:00
|
|
|
|
|
|
|
while (1) {
|
2012-02-12 03:16:15 +04:00
|
|
|
len = data->ed_argp + ARG_MAX - dp;
|
2007-09-21 00:51:38 +04:00
|
|
|
if ((error = (*fetch_element)(args, i, &sp)) != 0) {
|
2011-03-14 02:44:14 +03:00
|
|
|
DPRINTF(("%s: fetch_element args %d\n",
|
2011-03-14 23:12:40 +03:00
|
|
|
__func__, error));
|
1994-06-29 10:29:24 +04:00
|
|
|
goto bad;
|
2007-09-21 00:51:38 +04:00
|
|
|
}
|
1994-06-29 10:29:24 +04:00
|
|
|
if (!sp)
|
|
|
|
break;
|
1996-02-04 05:15:01 +03:00
|
|
|
if ((error = copyinstr(sp, dp, len, &len)) != 0) {
|
2011-03-14 02:44:14 +03:00
|
|
|
DPRINTF(("%s: copyinstr args %d\n", __func__, error));
|
1994-06-29 10:29:24 +04:00
|
|
|
if (error == ENAMETOOLONG)
|
|
|
|
error = E2BIG;
|
|
|
|
goto bad;
|
|
|
|
}
|
2007-08-15 16:07:23 +04:00
|
|
|
ktrexecarg(dp, len - 1);
|
1994-06-29 10:29:24 +04:00
|
|
|
dp += len;
|
2005-07-12 00:15:26 +04:00
|
|
|
i++;
|
2012-02-12 03:16:15 +04:00
|
|
|
data->ed_argc++;
|
1994-06-29 10:29:24 +04:00
|
|
|
}
|
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
data->ed_envc = 0;
|
1996-02-04 05:15:01 +03:00
|
|
|
/* environment need not be there */
|
2005-07-12 00:15:26 +04:00
|
|
|
if (envs != NULL) {
|
|
|
|
i = 0;
|
1994-06-29 10:29:24 +04:00
|
|
|
while (1) {
|
2012-02-12 03:16:15 +04:00
|
|
|
len = data->ed_argp + ARG_MAX - dp;
|
2007-09-21 00:51:38 +04:00
|
|
|
if ((error = (*fetch_element)(envs, i, &sp)) != 0) {
|
2011-03-14 02:44:14 +03:00
|
|
|
DPRINTF(("%s: fetch_element env %d\n",
|
|
|
|
__func__, error));
|
1994-06-29 10:29:24 +04:00
|
|
|
goto bad;
|
2007-09-21 00:51:38 +04:00
|
|
|
}
|
1994-06-29 10:29:24 +04:00
|
|
|
if (!sp)
|
|
|
|
break;
|
1996-02-04 05:15:01 +03:00
|
|
|
if ((error = copyinstr(sp, dp, len, &len)) != 0) {
|
2011-03-14 02:44:14 +03:00
|
|
|
DPRINTF(("%s: copyinstr env %d\n",
|
|
|
|
__func__, error));
|
1994-06-29 10:29:24 +04:00
|
|
|
if (error == ENAMETOOLONG)
|
|
|
|
error = E2BIG;
|
|
|
|
goto bad;
|
|
|
|
}
|
2012-02-12 03:16:15 +04:00
|
|
|
|
2007-08-15 16:07:23 +04:00
|
|
|
ktrexecenv(dp, len - 1);
|
1994-06-29 10:29:24 +04:00
|
|
|
dp += len;
|
2005-07-12 00:15:26 +04:00
|
|
|
i++;
|
2012-02-12 03:16:15 +04:00
|
|
|
data->ed_envc++;
|
1994-06-29 10:29:24 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1995-02-22 04:39:56 +03:00
|
|
|
dp = (char *) ALIGN(dp);
|
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
data->ed_szsigcode = data->ed_pack.ep_esch->es_emul->e_esigcode -
|
|
|
|
data->ed_pack.ep_esch->es_emul->e_sigcode;
|
1995-04-08 02:33:23 +04:00
|
|
|
|
2008-01-20 13:15:50 +03:00
|
|
|
#ifdef __MACHINE_STACK_GROWS_UP
|
|
|
|
/* See big comment lower down */
|
|
|
|
#define RTLD_GAP 32
|
|
|
|
#else
|
|
|
|
#define RTLD_GAP 0
|
|
|
|
#endif
|
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/* Now check if args & environ fit into new stack */
|
2012-02-12 03:16:15 +04:00
|
|
|
if (data->ed_pack.ep_flags & EXEC_32) {
|
|
|
|
data->ed_ps_strings_sz = sizeof(struct ps_strings32);
|
|
|
|
len = ((data->ed_argc + data->ed_envc + 2 +
|
|
|
|
data->ed_pack.ep_esch->es_arglen) *
|
2008-01-20 13:15:50 +03:00
|
|
|
sizeof(int) + sizeof(int) + dp + RTLD_GAP +
|
2012-02-12 03:16:15 +04:00
|
|
|
data->ed_szsigcode + data->ed_ps_strings_sz + STACK_PTHREADSPACE)
|
|
|
|
- data->ed_argp;
|
2011-03-05 01:25:24 +03:00
|
|
|
} else {
|
2012-02-12 03:16:15 +04:00
|
|
|
data->ed_ps_strings_sz = sizeof(struct ps_strings);
|
|
|
|
len = ((data->ed_argc + data->ed_envc + 2 +
|
|
|
|
data->ed_pack.ep_esch->es_arglen) *
|
2008-01-20 13:15:50 +03:00
|
|
|
sizeof(char *) + sizeof(int) + dp + RTLD_GAP +
|
2012-02-12 03:16:15 +04:00
|
|
|
data->ed_szsigcode + data->ed_ps_strings_sz + STACK_PTHREADSPACE)
|
|
|
|
- data->ed_argp;
|
2011-03-05 01:25:24 +03:00
|
|
|
}
|
1995-04-22 23:42:47 +04:00
|
|
|
|
2007-12-28 20:14:50 +03:00
|
|
|
#ifdef PAX_ASLR
|
|
|
|
if (pax_aslr_active(l))
|
First step of random number subsystem rework described in
<20111022023242.BA26F14A158@mail.netbsd.org>. This change includes
the following:
An initial cleanup and minor reorganization of the entropy pool
code in sys/dev/rnd.c and sys/dev/rndpool.c. Several bugs are
fixed. Some effort is made to accumulate entropy more quickly at
boot time.
A generic interface, "rndsink", is added, for stream generators to
request that they be re-keyed with good quality entropy from the pool
as soon as it is available.
The arc4random()/arc4randbytes() implementation in libkern is
adjusted to use the rndsink interface for rekeying, which helps
address the problem of low-quality keys at boot time.
An implementation of the FIPS 140-2 statistical tests for random
number generator quality is provided (libkern/rngtest.c). This
is based on Greg Rose's implementation from Qualcomm.
A new random stream generator, nist_ctr_drbg, is provided. It is
based on an implementation of the NIST SP800-90 CTR_DRBG by
Henric Jungheim. This generator users AES in a modified counter
mode to generate a backtracking-resistant random stream.
An abstraction layer, "cprng", is provided for in-kernel consumers
of randomness. The arc4random/arc4randbytes API is deprecated for
in-kernel use. It is replaced by "cprng_strong". The current
cprng_fast implementation wraps the existing arc4random
implementation. The current cprng_strong implementation wraps the
new CTR_DRBG implementation. Both interfaces are rekeyed from
the entropy pool automatically at intervals justifiable from best
current cryptographic practice.
In some quick tests, cprng_fast() is about the same speed as
the old arc4randbytes(), and cprng_strong() is about 20% faster
than rnd_extract_data(). Performance is expected to improve.
The AES code in src/crypto/rijndael is no longer an optional
kernel component, as it is required by cprng_strong, which is
not an optional kernel component.
The entropy pool output is subjected to the rngtest tests at
startup time; if it fails, the system will reboot. There is
approximately a 3/10000 chance of a false positive from these
tests. Entropy pool _input_ from hardware random numbers is
subjected to the rngtest tests at attach time, as well as the
FIPS continuous-output test, to detect bad or stuck hardware
RNGs; if any are detected, they are detached, but the system
continues to run.
A problem with rndctl(8) is fixed -- datastructures with
pointers in arrays are no longer passed to userspace (this
was not a security problem, but rather a major issue for
compat32). A new kernel will require a new rndctl.
The sysctl kern.arandom() and kern.urandom() nodes are hooked
up to the new generators, but the /dev/*random pseudodevices
are not, yet.
Manual pages for the new kernel interfaces are forthcoming.
2011-11-20 02:51:18 +04:00
|
|
|
len += (cprng_fast32() % PAGE_SIZE);
|
2007-12-28 20:14:50 +03:00
|
|
|
#endif /* PAX_ASLR */
|
|
|
|
|
2012-01-25 00:03:36 +04:00
|
|
|
/* make the stack "safely" aligned */
|
2012-01-25 22:26:26 +04:00
|
|
|
len = STACK_LEN_ALIGN(len, STACK_ALIGNBYTES);
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
if (len > data->ed_pack.ep_ssize) {
|
|
|
|
/* in effect, compare to initial limit */
|
2011-03-14 02:44:14 +03:00
|
|
|
DPRINTF(("%s: stack limit exceeded %zu\n", __func__, len));
|
1994-06-29 10:29:24 +04:00
|
|
|
goto bad;
|
|
|
|
}
|
2012-02-12 03:16:15 +04:00
|
|
|
/* adjust "active stack depth" for process VSZ */
|
|
|
|
data->ed_pack.ep_ssize = len;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bad:
|
|
|
|
/* free the vmspace-creation commands, and release their references */
|
|
|
|
kill_vmcmds(&data->ed_pack.ep_vmcmds);
|
|
|
|
/* kill any opened file descriptor, if necessary */
|
|
|
|
if (data->ed_pack.ep_flags & EXEC_HASFD) {
|
|
|
|
data->ed_pack.ep_flags &= ~EXEC_HASFD;
|
|
|
|
fd_close(data->ed_pack.ep_fd);
|
|
|
|
}
|
|
|
|
/* close and put the exec'd file */
|
|
|
|
vn_lock(data->ed_pack.ep_vp, LK_EXCLUSIVE | LK_RETRY);
|
|
|
|
VOP_CLOSE(data->ed_pack.ep_vp, FREAD, l->l_cred);
|
|
|
|
vput(data->ed_pack.ep_vp);
|
|
|
|
pool_put(&exec_pool, data->ed_argp);
|
|
|
|
|
|
|
|
freehdr:
|
|
|
|
kmem_free(data->ed_pack.ep_hdr, data->ed_pack.ep_hdrlen);
|
|
|
|
if (data->ed_pack.ep_emul_root != NULL)
|
|
|
|
vrele(data->ed_pack.ep_emul_root);
|
|
|
|
if (data->ed_pack.ep_interp != NULL)
|
|
|
|
vrele(data->ed_pack.ep_interp);
|
|
|
|
|
|
|
|
rw_exit(&exec_lock);
|
|
|
|
|
|
|
|
pathbuf_stringcopy_put(data->ed_pathbuf, data->ed_pathstring);
|
|
|
|
pathbuf_destroy(data->ed_pathbuf);
|
|
|
|
PNBUF_PUT(data->ed_resolvedpathbuf);
|
|
|
|
|
|
|
|
clrflg:
|
|
|
|
rw_exit(&p->p_reflock);
|
|
|
|
|
|
|
|
if (modgen != module_gen && error == ENOEXEC) {
|
|
|
|
modgen = module_gen;
|
|
|
|
exec_autoload();
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
SDT_PROBE(proc,,,exec_failure, error, 0, 0, 0, 0);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
execve_runproc(struct lwp *l, struct execve_data * restrict data)
|
|
|
|
{
|
|
|
|
int error = 0;
|
|
|
|
struct proc *p;
|
|
|
|
size_t i;
|
|
|
|
char *stack, *dp;
|
|
|
|
const char *commandname;
|
|
|
|
struct ps_strings32 arginfo32;
|
|
|
|
struct exec_vmcmd *base_vcp;
|
|
|
|
void *aip;
|
|
|
|
struct vmspace *vm;
|
|
|
|
ksiginfo_t ksi;
|
|
|
|
ksiginfoq_t kq;
|
|
|
|
bool proc_is_new;
|
|
|
|
|
|
|
|
KASSERT(rw_lock_held(&exec_lock));
|
|
|
|
KASSERT(data != NULL);
|
|
|
|
if (data == NULL)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
p = l->l_proc;
|
|
|
|
proc_is_new = p->p_vmspace == NULL;
|
|
|
|
|
|
|
|
base_vcp = NULL;
|
|
|
|
|
|
|
|
if (data->ed_pack.ep_flags & EXEC_32)
|
|
|
|
aip = &arginfo32;
|
|
|
|
else
|
|
|
|
aip = &data->ed_arginfo;
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
/* Get rid of other LWPs. */
|
2012-02-20 01:05:51 +04:00
|
|
|
if (p->p_nlwps > 1) {
|
2008-04-24 22:39:20 +04:00
|
|
|
mutex_enter(p->p_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
exit_lwps(l);
|
2008-04-24 22:39:20 +04:00
|
|
|
mutex_exit(p->p_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
}
|
2003-01-18 13:06:22 +03:00
|
|
|
KDASSERT(p->p_nlwps == 1);
|
|
|
|
|
2007-11-13 02:11:58 +03:00
|
|
|
/* Destroy any lwpctl info. */
|
|
|
|
if (p->p_lwpctl != NULL)
|
|
|
|
lwp_ctl_exit();
|
|
|
|
|
2003-01-18 13:06:22 +03:00
|
|
|
/* Remove POSIX timers */
|
|
|
|
timers_free(p, TIMERS_POSIX);
|
|
|
|
|
1997-12-31 10:47:41 +03:00
|
|
|
/*
|
|
|
|
* Do whatever is necessary to prepare the address space
|
|
|
|
* for remapping. Note that this might replace the current
|
|
|
|
* vmspace with another!
|
|
|
|
*/
|
2012-02-12 03:16:15 +04:00
|
|
|
uvmspace_exec(l, data->ed_pack.ep_vm_minaddr, data->ed_pack.ep_vm_maxaddr);
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2004-06-27 04:41:03 +04:00
|
|
|
/* record proc's vnode, for use by procfs and others */
|
|
|
|
if (p->p_textvp)
|
|
|
|
vrele(p->p_textvp);
|
2012-02-12 03:16:15 +04:00
|
|
|
vref(data->ed_pack.ep_vp);
|
|
|
|
p->p_textvp = data->ed_pack.ep_vp;
|
2004-06-27 04:41:03 +04:00
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/* Now map address space */
|
1997-12-31 10:47:41 +03:00
|
|
|
vm = p->p_vmspace;
|
2012-02-12 03:16:15 +04:00
|
|
|
vm->vm_taddr = (void *)data->ed_pack.ep_taddr;
|
|
|
|
vm->vm_tsize = btoc(data->ed_pack.ep_tsize);
|
|
|
|
vm->vm_daddr = (void*)data->ed_pack.ep_daddr;
|
|
|
|
vm->vm_dsize = btoc(data->ed_pack.ep_dsize);
|
|
|
|
vm->vm_ssize = btoc(data->ed_pack.ep_ssize);
|
- add new RLIMIT_AS (aka RLIMIT_VMEM) resource that limits the total
address space available to processes. this limit exists in most other
modern unix variants, and like most of them, our defaults are unlimited.
remove the old mmap / rlimit.datasize hack.
- adds the VMCMD_STACK flag to all the stack-creation vmcmd callers.
it is currently unused, but was added a few years ago.
- add a pair of new process size values to kinfo_proc2{}. one is the
total size of the process memory map, and the other is the total size
adjusted for unused stack space (since most processes have a lot of
this...)
- patch sh, and csh to notice RLIMIT_AS. (in some cases, the alias
RLIMIT_VMEM was already present and used if availble.)
- patch ps, top and systat to notice the new k_vm_vsize member of
kinfo_proc2{}.
- update irix, svr4, svr4_32, linux and osf1 emulations to support
this information. (freebsd could be done, but that it's best left
as part of the full-update of compat/freebsd.)
this addresses PR 7897. it also gives correct memory usage values,
which have never been entirely correct (since mmap), and have been
very incorrect since jemalloc() was enabled.
tested on i386 and sparc64, build tested on several other platforms.
thanks to many folks for feedback and testing but most espcially
chuq and yamt for critical suggestions that lead to this patch not
having a special ugliness i wasn't happy with anyway :-)
2009-03-29 05:02:48 +04:00
|
|
|
vm->vm_issize = 0;
|
2012-02-12 03:16:15 +04:00
|
|
|
vm->vm_maxsaddr = (void *)data->ed_pack.ep_maxsaddr;
|
|
|
|
vm->vm_minsaddr = (void *)data->ed_pack.ep_minsaddr;
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2007-12-27 01:11:47 +03:00
|
|
|
#ifdef PAX_ASLR
|
|
|
|
pax_aslr_init(l, vm);
|
|
|
|
#endif /* PAX_ASLR */
|
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/* create the new process's VM space by running the vmcmds */
|
|
|
|
#ifdef DIAGNOSTIC
|
2012-02-12 03:16:15 +04:00
|
|
|
if (data->ed_pack.ep_vmcmds.evs_used == 0)
|
2011-03-14 02:44:14 +03:00
|
|
|
panic("%s: no vmcmds", __func__);
|
1994-06-29 10:29:24 +04:00
|
|
|
#endif
|
2011-08-27 21:51:38 +04:00
|
|
|
|
|
|
|
#ifdef DEBUG_EXEC
|
|
|
|
{
|
|
|
|
size_t j;
|
2012-02-12 03:16:15 +04:00
|
|
|
struct exec_vmcmd *vp = &data->ed_pack.ep_vmcmds.evs_cmds[0];
|
|
|
|
DPRINTF(("vmcmds %u\n", data->ed_pack.ep_vmcmds.evs_used));
|
|
|
|
for (j = 0; j < data->ed_pack.ep_vmcmds.evs_used; j++) {
|
2011-08-27 22:11:48 +04:00
|
|
|
DPRINTF(("vmcmd[%zu] = vmcmd_map_%s %#"
|
2011-08-27 21:51:38 +04:00
|
|
|
PRIxVADDR"/%#"PRIxVSIZE" fd@%#"
|
|
|
|
PRIxVSIZE" prot=0%o flags=%d\n", j,
|
|
|
|
vp[j].ev_proc == vmcmd_map_pagedvn ?
|
|
|
|
"pagedvn" :
|
|
|
|
vp[j].ev_proc == vmcmd_map_readvn ?
|
|
|
|
"readvn" :
|
|
|
|
vp[j].ev_proc == vmcmd_map_zero ?
|
|
|
|
"zero" : "*unknown*",
|
|
|
|
vp[j].ev_addr, vp[j].ev_len,
|
|
|
|
vp[j].ev_offset, vp[j].ev_prot,
|
2011-08-27 22:07:10 +04:00
|
|
|
vp[j].ev_flags));
|
2011-08-27 21:51:38 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* DEBUG_EXEC */
|
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
for (i = 0; i < data->ed_pack.ep_vmcmds.evs_used && !error; i++) {
|
1994-06-29 10:29:24 +04:00
|
|
|
struct exec_vmcmd *vcp;
|
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
vcp = &data->ed_pack.ep_vmcmds.evs_cmds[i];
|
2000-07-13 05:24:04 +04:00
|
|
|
if (vcp->ev_flags & VMCMD_RELATIVE) {
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (base_vcp == NULL)
|
2011-03-14 02:44:14 +03:00
|
|
|
panic("%s: relative vmcmd with no base",
|
|
|
|
__func__);
|
2000-07-13 05:24:04 +04:00
|
|
|
if (vcp->ev_flags & VMCMD_BASE)
|
2011-03-14 02:44:14 +03:00
|
|
|
panic("%s: illegal base & relative vmcmd",
|
|
|
|
__func__);
|
2000-07-13 05:24:04 +04:00
|
|
|
#endif
|
|
|
|
vcp->ev_addr += base_vcp->ev_addr;
|
|
|
|
}
|
2005-12-11 15:16:03 +03:00
|
|
|
error = (*vcp->ev_proc)(l, vcp);
|
2001-07-16 00:49:40 +04:00
|
|
|
#ifdef DEBUG_EXEC
|
2000-06-21 09:43:33 +04:00
|
|
|
if (error) {
|
2007-09-21 00:51:38 +04:00
|
|
|
size_t j;
|
2012-02-12 03:16:15 +04:00
|
|
|
struct exec_vmcmd *vp =
|
|
|
|
&data->ed_pack.ep_vmcmds.evs_cmds[0];
|
2011-08-27 22:07:10 +04:00
|
|
|
DPRINTF(("vmcmds %zu/%u, error %d\n", i,
|
2012-02-12 03:16:15 +04:00
|
|
|
data->ed_pack.ep_vmcmds.evs_used, error));
|
|
|
|
for (j = 0; j < data->ed_pack.ep_vmcmds.evs_used; j++) {
|
2011-08-27 22:07:10 +04:00
|
|
|
DPRINTF(("vmcmd[%zu] = vmcmd_map_%s %#"
|
2011-03-04 07:17:12 +03:00
|
|
|
PRIxVADDR"/%#"PRIxVSIZE" fd@%#"
|
|
|
|
PRIxVSIZE" prot=0%o flags=%d\n", j,
|
|
|
|
vp[j].ev_proc == vmcmd_map_pagedvn ?
|
|
|
|
"pagedvn" :
|
|
|
|
vp[j].ev_proc == vmcmd_map_readvn ?
|
|
|
|
"readvn" :
|
|
|
|
vp[j].ev_proc == vmcmd_map_zero ?
|
|
|
|
"zero" : "*unknown*",
|
|
|
|
vp[j].ev_addr, vp[j].ev_len,
|
2001-07-16 00:49:40 +04:00
|
|
|
vp[j].ev_offset, vp[j].ev_prot,
|
2011-08-27 22:07:10 +04:00
|
|
|
vp[j].ev_flags));
|
2011-08-27 21:51:38 +04:00
|
|
|
if (j == i)
|
2011-08-27 22:07:10 +04:00
|
|
|
DPRINTF((" ^--- failed\n"));
|
2011-08-27 21:51:38 +04:00
|
|
|
}
|
2000-06-21 09:43:33 +04:00
|
|
|
}
|
2001-07-16 00:49:40 +04:00
|
|
|
#endif /* DEBUG_EXEC */
|
2000-07-13 05:24:04 +04:00
|
|
|
if (vcp->ev_flags & VMCMD_BASE)
|
|
|
|
base_vcp = vcp;
|
1994-06-29 10:29:24 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* free the vmspace-creation commands, and release their references */
|
2012-02-12 03:16:15 +04:00
|
|
|
kill_vmcmds(&data->ed_pack.ep_vmcmds);
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
vn_lock(data->ed_pack.ep_vp, LK_EXCLUSIVE | LK_RETRY);
|
|
|
|
VOP_CLOSE(data->ed_pack.ep_vp, FREAD, l->l_cred);
|
|
|
|
vput(data->ed_pack.ep_vp);
|
2004-06-27 04:41:03 +04:00
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/* if an error happened, deallocate and punt */
|
2000-06-21 09:43:33 +04:00
|
|
|
if (error) {
|
2011-03-14 02:44:14 +03:00
|
|
|
DPRINTF(("%s: vmcmd %zu failed: %d\n", __func__, i - 1, error));
|
1994-06-29 10:29:24 +04:00
|
|
|
goto exec_abort;
|
2000-06-21 09:43:33 +04:00
|
|
|
}
|
1994-06-29 10:29:24 +04:00
|
|
|
|
|
|
|
/* remember information about the process */
|
2012-02-12 03:16:15 +04:00
|
|
|
data->ed_arginfo.ps_nargvstr = data->ed_argc;
|
|
|
|
data->ed_arginfo.ps_nenvstr = data->ed_envc;
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2007-12-03 05:06:57 +03:00
|
|
|
/* set command name & other accounting info */
|
2012-02-12 03:16:15 +04:00
|
|
|
commandname = strrchr(data->ed_pack.ep_resolvedname, '/');
|
2010-05-02 09:30:20 +04:00
|
|
|
if (commandname != NULL) {
|
|
|
|
commandname++;
|
|
|
|
} else {
|
2012-02-12 03:16:15 +04:00
|
|
|
commandname = data->ed_pack.ep_resolvedname;
|
2010-05-02 09:30:20 +04:00
|
|
|
}
|
|
|
|
i = min(strlen(commandname), MAXCOMLEN);
|
|
|
|
(void)memcpy(p->p_comm, commandname, i);
|
2007-12-03 05:06:57 +03:00
|
|
|
p->p_comm[i] = '\0';
|
|
|
|
|
|
|
|
dp = PNBUF_GET();
|
|
|
|
/*
|
|
|
|
* If the path starts with /, we don't need to do any work.
|
|
|
|
* This handles the majority of the cases.
|
|
|
|
* In the future perhaps we could canonicalize it?
|
|
|
|
*/
|
2012-02-12 03:16:15 +04:00
|
|
|
if (data->ed_pathstring[0] == '/')
|
|
|
|
(void)strlcpy(data->ed_pack.ep_path = dp, data->ed_pathstring,
|
|
|
|
MAXPATHLEN);
|
2011-12-04 19:12:07 +04:00
|
|
|
#ifdef notyet
|
2007-12-03 05:06:57 +03:00
|
|
|
/*
|
|
|
|
* Although this works most of the time [since the entry was just
|
|
|
|
* entered in the cache] we don't use it because it theoretically
|
|
|
|
* can fail and it is not the cleanest interface, because there
|
|
|
|
* could be races. When the namei cache is re-written, this can
|
|
|
|
* be changed to use the appropriate function.
|
|
|
|
*/
|
|
|
|
else if (!(error = vnode_to_path(dp, MAXPATHLEN, p->p_textvp, l, p)))
|
2012-02-12 03:16:15 +04:00
|
|
|
data->ed_pack.ep_path = dp;
|
2007-12-03 05:06:57 +03:00
|
|
|
#endif
|
|
|
|
else {
|
2011-12-04 19:12:07 +04:00
|
|
|
#ifdef notyet
|
2007-12-03 05:06:57 +03:00
|
|
|
printf("Cannot get path for pid %d [%s] (error %d)",
|
|
|
|
(int)p->p_pid, p->p_comm, error);
|
|
|
|
#endif
|
2012-02-12 03:16:15 +04:00
|
|
|
data->ed_pack.ep_path = NULL;
|
2007-12-03 05:06:57 +03:00
|
|
|
PNBUF_PUT(dp);
|
|
|
|
}
|
|
|
|
|
2002-11-18 01:53:46 +03:00
|
|
|
stack = (char *)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr,
|
2012-02-12 03:16:15 +04:00
|
|
|
STACK_PTHREADSPACE + data->ed_ps_strings_sz + data->ed_szsigcode),
|
|
|
|
data->ed_pack.ep_ssize - (data->ed_ps_strings_sz + data->ed_szsigcode));
|
2008-01-20 13:15:50 +03:00
|
|
|
|
2002-11-18 01:53:46 +03:00
|
|
|
#ifdef __MACHINE_STACK_GROWS_UP
|
|
|
|
/*
|
|
|
|
* The copyargs call always copies into lower addresses
|
|
|
|
* first, moving towards higher addresses, starting with
|
2004-03-05 14:30:50 +03:00
|
|
|
* the stack pointer that we give. When the stack grows
|
|
|
|
* down, this puts argc/argv/envp very shallow on the
|
2008-01-20 13:15:50 +03:00
|
|
|
* stack, right at the first user stack pointer.
|
|
|
|
* When the stack grows up, the situation is reversed.
|
2002-11-18 01:53:46 +03:00
|
|
|
*
|
|
|
|
* Normally, this is no big deal. But the ld_elf.so _rtld()
|
2004-03-05 14:30:50 +03:00
|
|
|
* function expects to be called with a single pointer to
|
|
|
|
* a region that has a few words it can stash values into,
|
2002-11-18 01:53:46 +03:00
|
|
|
* followed by argc/argv/envp. When the stack grows down,
|
|
|
|
* it's easy to decrement the stack pointer a little bit to
|
|
|
|
* allocate the space for these few words and pass the new
|
|
|
|
* stack pointer to _rtld. When the stack grows up, however,
|
2003-08-24 21:52:28 +04:00
|
|
|
* a few words before argc is part of the signal trampoline, XXX
|
2002-11-18 01:53:46 +03:00
|
|
|
* so we have a problem.
|
|
|
|
*
|
2004-03-05 14:30:50 +03:00
|
|
|
* Instead of changing how _rtld works, we take the easy way
|
2008-01-20 13:15:50 +03:00
|
|
|
* out and steal 32 bytes before we call copyargs.
|
2012-02-12 03:16:15 +04:00
|
|
|
* This extra space was allowed for when 'pack.ep_ssize' was calculated.
|
2002-11-18 01:53:46 +03:00
|
|
|
*/
|
2008-01-20 13:15:50 +03:00
|
|
|
stack += RTLD_GAP;
|
2002-11-18 01:53:46 +03:00
|
|
|
#endif /* __MACHINE_STACK_GROWS_UP */
|
2012-02-12 03:16:15 +04:00
|
|
|
|
1995-04-22 23:42:47 +04:00
|
|
|
/* Now copy argc, args & environ to new stack */
|
2012-02-12 03:16:15 +04:00
|
|
|
error = (*data->ed_pack.ep_esch->es_copyargs)(l, &data->ed_pack,
|
|
|
|
&data->ed_arginfo, &stack, data->ed_argp);
|
|
|
|
|
|
|
|
if (data->ed_pack.ep_path) {
|
|
|
|
PNBUF_PUT(data->ed_pack.ep_path);
|
|
|
|
data->ed_pack.ep_path = NULL;
|
2007-12-03 05:06:57 +03:00
|
|
|
}
|
2001-07-30 01:22:42 +04:00
|
|
|
if (error) {
|
2011-03-14 02:44:14 +03:00
|
|
|
DPRINTF(("%s: copyargs failed %d\n", __func__, error));
|
1994-06-29 10:29:24 +04:00
|
|
|
goto exec_abort;
|
2000-06-21 09:43:33 +04:00
|
|
|
}
|
2001-07-30 01:22:42 +04:00
|
|
|
/* Move the stack back to original point */
|
2012-02-12 03:16:15 +04:00
|
|
|
stack = (char *)STACK_GROW(vm->vm_minsaddr, data->ed_pack.ep_ssize);
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2000-09-28 23:05:06 +04:00
|
|
|
/* fill process ps_strings info */
|
2011-03-05 01:25:24 +03:00
|
|
|
p->p_psstrp = (vaddr_t)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr,
|
2012-02-12 03:16:15 +04:00
|
|
|
STACK_PTHREADSPACE), data->ed_ps_strings_sz);
|
2011-03-05 01:25:24 +03:00
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
if (data->ed_pack.ep_flags & EXEC_32) {
|
|
|
|
arginfo32.ps_argvstr = (vaddr_t)data->ed_arginfo.ps_argvstr;
|
|
|
|
arginfo32.ps_nargvstr = data->ed_arginfo.ps_nargvstr;
|
|
|
|
arginfo32.ps_envstr = (vaddr_t)data->ed_arginfo.ps_envstr;
|
|
|
|
arginfo32.ps_nenvstr = data->ed_arginfo.ps_nenvstr;
|
2011-03-05 01:25:24 +03:00
|
|
|
}
|
2000-09-28 23:05:06 +04:00
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/* copy out the process's ps_strings structure */
|
2012-02-12 03:16:15 +04:00
|
|
|
if ((error = copyout(aip, (void *)p->p_psstrp, data->ed_ps_strings_sz))
|
|
|
|
!= 0) {
|
2011-03-14 02:44:14 +03:00
|
|
|
DPRINTF(("%s: ps_strings copyout %p->%p size %zu failed\n",
|
2012-02-12 03:16:15 +04:00
|
|
|
__func__, aip, (void *)p->p_psstrp, data->ed_ps_strings_sz));
|
1994-06-29 10:29:24 +04:00
|
|
|
goto exec_abort;
|
2000-06-21 09:43:33 +04:00
|
|
|
}
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2011-02-15 19:49:54 +03:00
|
|
|
cwdexec(p);
|
2008-03-22 00:54:58 +03:00
|
|
|
fd_closeexec(); /* handle close on exec */
|
2011-06-02 01:24:59 +04:00
|
|
|
|
|
|
|
if (__predict_false(ktrace_on))
|
|
|
|
fd_ktrexecfd();
|
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
execsigs(p); /* reset catched signals */
|
2004-03-05 14:30:50 +03:00
|
|
|
|
2003-01-18 13:06:22 +03:00
|
|
|
l->l_ctxlink = NULL; /* reset ucontext link */
|
1994-06-29 10:29:24 +04:00
|
|
|
|
|
|
|
|
2007-12-03 05:06:57 +03:00
|
|
|
p->p_acflag &= ~AFORK;
|
2008-04-24 22:39:20 +04:00
|
|
|
mutex_enter(p->p_lock);
|
2007-02-18 01:31:36 +03:00
|
|
|
p->p_flag |= PK_EXEC;
|
2008-04-24 22:39:20 +04:00
|
|
|
mutex_exit(p->p_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Stop profiling.
|
|
|
|
*/
|
|
|
|
if ((p->p_stflag & PST_PROFIL) != 0) {
|
|
|
|
mutex_spin_enter(&p->p_stmutex);
|
|
|
|
stopprofclock(p);
|
|
|
|
mutex_spin_exit(&p->p_stmutex);
|
1994-06-29 10:29:24 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-06-16 13:51:14 +04:00
|
|
|
* It's OK to test PL_PPWAIT unlocked here, as other LWPs have
|
2007-02-10 00:55:00 +03:00
|
|
|
* exited and exec()/exit() are the only places it will be cleared.
|
1994-06-29 10:29:24 +04:00
|
|
|
*/
|
2008-06-16 13:51:14 +04:00
|
|
|
if ((p->p_lflag & PL_PPWAIT) != 0) {
|
2008-04-24 19:35:27 +04:00
|
|
|
mutex_enter(proc_lock);
|
2011-02-21 23:23:28 +03:00
|
|
|
l->l_lwpctl = NULL; /* was on loan from blocked parent */
|
2008-06-16 13:51:14 +04:00
|
|
|
p->p_lflag &= ~PL_PPWAIT;
|
2007-02-10 00:55:00 +03:00
|
|
|
cv_broadcast(&p->p_pptr->p_waitcv);
|
2008-04-24 19:35:27 +04:00
|
|
|
mutex_exit(proc_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Deal with set[ug]id. MNT_NOSUID has already been used to disable
|
|
|
|
* s[ug]id. It's OK to check for PSL_TRACED here as we have blocked
|
|
|
|
* out additional references on the process for the moment.
|
|
|
|
*/
|
|
|
|
if ((p->p_slflag & PSL_TRACED) == 0 &&
|
2001-06-15 21:24:19 +04:00
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
(((data->ed_attr.va_mode & S_ISUID) != 0 &&
|
|
|
|
kauth_cred_geteuid(l->l_cred) != data->ed_attr.va_uid) ||
|
2001-06-15 21:24:19 +04:00
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
((data->ed_attr.va_mode & S_ISGID) != 0 &&
|
|
|
|
kauth_cred_getegid(l->l_cred) != data->ed_attr.va_gid))) {
|
2001-06-15 21:24:19 +04:00
|
|
|
/*
|
|
|
|
* Mark the process as SUGID before we do
|
|
|
|
* anything that might block.
|
|
|
|
*/
|
2007-02-10 00:55:00 +03:00
|
|
|
proc_crmod_enter();
|
2007-02-22 09:34:42 +03:00
|
|
|
proc_crmod_leave(NULL, NULL, true);
|
2001-06-15 21:24:19 +04:00
|
|
|
|
2002-04-23 19:11:25 +04:00
|
|
|
/* Make sure file descriptors 0..2 are in use. */
|
2008-03-22 00:54:58 +03:00
|
|
|
if ((error = fd_checkstd()) != 0) {
|
2011-03-14 02:44:14 +03:00
|
|
|
DPRINTF(("%s: fdcheckstd failed %d\n",
|
|
|
|
__func__, error));
|
2002-04-23 19:11:25 +04:00
|
|
|
goto exec_abort;
|
2005-08-19 06:04:02 +04:00
|
|
|
}
|
2002-04-23 19:11:25 +04:00
|
|
|
|
2006-07-17 19:29:06 +04:00
|
|
|
/*
|
|
|
|
* Copy the credential so other references don't see our
|
|
|
|
* changes.
|
|
|
|
*/
|
2006-07-20 01:11:37 +04:00
|
|
|
l->l_cred = kauth_cred_copy(l->l_cred);
|
1994-06-29 10:29:24 +04:00
|
|
|
#ifdef KTRACE
|
|
|
|
/*
|
2008-02-02 23:42:18 +03:00
|
|
|
* If the persistent trace flag isn't set, turn off.
|
1994-06-29 10:29:24 +04:00
|
|
|
*/
|
2007-02-10 00:55:00 +03:00
|
|
|
if (p->p_tracep) {
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_enter(&ktrace_lock);
|
2008-02-02 23:42:18 +03:00
|
|
|
if (!(p->p_traceflag & KTRFAC_PERSISTENT))
|
2007-02-10 00:55:00 +03:00
|
|
|
ktrderef(p);
|
2007-08-15 16:07:23 +04:00
|
|
|
mutex_exit(&ktrace_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
}
|
1994-06-29 10:29:24 +04:00
|
|
|
#endif
|
2012-02-12 03:16:15 +04:00
|
|
|
if (data->ed_attr.va_mode & S_ISUID)
|
|
|
|
kauth_cred_seteuid(l->l_cred, data->ed_attr.va_uid);
|
|
|
|
if (data->ed_attr.va_mode & S_ISGID)
|
|
|
|
kauth_cred_setegid(l->l_cred, data->ed_attr.va_gid);
|
2005-10-31 07:31:58 +03:00
|
|
|
} else {
|
2006-07-20 01:11:37 +04:00
|
|
|
if (kauth_cred_geteuid(l->l_cred) ==
|
|
|
|
kauth_cred_getuid(l->l_cred) &&
|
|
|
|
kauth_cred_getegid(l->l_cred) ==
|
|
|
|
kauth_cred_getgid(l->l_cred))
|
2007-02-18 01:31:36 +03:00
|
|
|
p->p_flag &= ~PK_SUGID;
|
2005-10-31 07:31:58 +03:00
|
|
|
}
|
2006-07-17 19:29:06 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy the credential so other references don't see our changes.
|
|
|
|
* Test to see if this is necessary first, since in the common case
|
|
|
|
* we won't need a private reference.
|
|
|
|
*/
|
2006-07-20 01:11:37 +04:00
|
|
|
if (kauth_cred_geteuid(l->l_cred) != kauth_cred_getsvuid(l->l_cred) ||
|
|
|
|
kauth_cred_getegid(l->l_cred) != kauth_cred_getsvgid(l->l_cred)) {
|
|
|
|
l->l_cred = kauth_cred_copy(l->l_cred);
|
|
|
|
kauth_cred_setsvuid(l->l_cred, kauth_cred_geteuid(l->l_cred));
|
|
|
|
kauth_cred_setsvgid(l->l_cred, kauth_cred_getegid(l->l_cred));
|
2006-07-17 19:29:06 +04:00
|
|
|
}
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2006-07-20 01:11:37 +04:00
|
|
|
/* Update the master credentials. */
|
2006-07-31 01:58:11 +04:00
|
|
|
if (l->l_cred != p->p_cred) {
|
|
|
|
kauth_cred_t ocred;
|
|
|
|
|
|
|
|
kauth_cred_hold(l->l_cred);
|
2008-04-24 22:39:20 +04:00
|
|
|
mutex_enter(p->p_lock);
|
2006-07-31 01:58:11 +04:00
|
|
|
ocred = p->p_cred;
|
|
|
|
p->p_cred = l->l_cred;
|
2008-04-24 22:39:20 +04:00
|
|
|
mutex_exit(p->p_lock);
|
2006-07-31 01:58:11 +04:00
|
|
|
kauth_cred_free(ocred);
|
|
|
|
}
|
2006-07-20 01:11:37 +04:00
|
|
|
|
2002-08-28 11:16:33 +04:00
|
|
|
#if defined(__HAVE_RAS)
|
|
|
|
/*
|
|
|
|
* Remove all RASs from the address space.
|
|
|
|
*/
|
2007-10-24 18:50:38 +04:00
|
|
|
ras_purgeall();
|
2002-08-28 11:16:33 +04:00
|
|
|
#endif
|
|
|
|
|
2000-01-25 04:15:14 +03:00
|
|
|
doexechooks(p);
|
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/* setup new registers and do misc. setup. */
|
2012-02-12 03:16:15 +04:00
|
|
|
(*data->ed_pack.ep_esch->es_emul->e_setregs)(l, &data->ed_pack,
|
|
|
|
(vaddr_t)stack);
|
|
|
|
if (data->ed_pack.ep_esch->es_setregs)
|
|
|
|
(*data->ed_pack.ep_esch->es_setregs)(l, &data->ed_pack,
|
|
|
|
(vaddr_t)stack);
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2011-03-01 21:53:10 +03:00
|
|
|
/* Provide a consistent LWP private setting */
|
|
|
|
(void)lwp_setprivate(l, NULL);
|
|
|
|
|
2011-06-07 02:04:34 +04:00
|
|
|
/* Discard all PCU state; need to start fresh */
|
|
|
|
pcu_discard_all(l);
|
|
|
|
|
2003-08-24 21:52:28 +04:00
|
|
|
/* map the process's signal trampoline code */
|
2012-02-12 03:16:15 +04:00
|
|
|
if ((error = exec_sigcode_map(p, data->ed_pack.ep_esch->es_emul)) != 0) {
|
2011-03-14 02:44:14 +03:00
|
|
|
DPRINTF(("%s: map sigcode failed %d\n", __func__, error));
|
2003-08-24 21:52:28 +04:00
|
|
|
goto exec_abort;
|
2005-08-19 06:04:02 +04:00
|
|
|
}
|
2003-08-24 21:52:28 +04:00
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
pool_put(&exec_pool, data->ed_argp);
|
2008-06-24 22:04:52 +04:00
|
|
|
|
|
|
|
/* notify others that we exec'd */
|
|
|
|
KNOTE(&p->p_klist, NOTE_EXEC);
|
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
kmem_free(data->ed_pack.ep_hdr, data->ed_pack.ep_hdrlen);
|
1995-07-19 19:19:08 +04:00
|
|
|
|
2012-02-13 00:11:03 +04:00
|
|
|
SDT_PROBE(proc,,,exec_success, data->ed_pack.ep_name, 0, 0, 0, 0);
|
DTrace: Add an SDT (Statically Defined Tracing) provider framework, and
implement most of the proc provider. Adds proc:::create, exec,
exec_success, exec_faillure, signal_send, signal_discard, signal_handle,
lwp_create, lwp_start, lwp_exit.
2010-03-02 00:10:13 +03:00
|
|
|
|
2007-04-22 12:29:55 +04:00
|
|
|
/* The emulation root will usually have been found when we looked
|
|
|
|
* for the elf interpreter (or similar), if not look now. */
|
2012-02-12 03:16:15 +04:00
|
|
|
if (data->ed_pack.ep_esch->es_emul->e_path != NULL &&
|
|
|
|
data->ed_pack.ep_emul_root == NULL)
|
|
|
|
emul_find_root(l, &data->ed_pack);
|
2007-04-22 12:29:55 +04:00
|
|
|
|
|
|
|
/* Any old emulation root got removed by fdcloseexec */
|
2007-12-26 19:01:34 +03:00
|
|
|
rw_enter(&p->p_cwdi->cwdi_lock, RW_WRITER);
|
2012-02-12 03:16:15 +04:00
|
|
|
p->p_cwdi->cwdi_edir = data->ed_pack.ep_emul_root;
|
2007-12-26 19:01:34 +03:00
|
|
|
rw_exit(&p->p_cwdi->cwdi_lock);
|
2012-02-12 03:16:15 +04:00
|
|
|
data->ed_pack.ep_emul_root = NULL;
|
|
|
|
if (data->ed_pack.ep_interp != NULL)
|
|
|
|
vrele(data->ed_pack.ep_interp);
|
2007-04-22 12:29:55 +04:00
|
|
|
|
2000-11-07 15:41:52 +03:00
|
|
|
/*
|
2005-02-18 03:21:37 +03:00
|
|
|
* Call emulation specific exec hook. This can setup per-process
|
2000-11-07 15:41:52 +03:00
|
|
|
* p->p_emuldata or do any other per-process stuff an emulation needs.
|
|
|
|
*
|
|
|
|
* If we are executing process of different emulation than the
|
|
|
|
* original forked process, call e_proc_exit() of the old emulation
|
|
|
|
* first, then e_proc_exec() of new emulation. If the emulation is
|
|
|
|
* same, the exec hook code should deallocate any old emulation
|
|
|
|
* resources held previously by this process.
|
|
|
|
*/
|
2000-11-21 03:37:49 +03:00
|
|
|
if (p->p_emul && p->p_emul->e_proc_exit
|
2012-02-12 03:16:15 +04:00
|
|
|
&& p->p_emul != data->ed_pack.ep_esch->es_emul)
|
2000-11-07 15:41:52 +03:00
|
|
|
(*p->p_emul->e_proc_exit)(p);
|
|
|
|
|
2010-07-07 05:30:32 +04:00
|
|
|
/*
|
|
|
|
* This is now LWP 1.
|
|
|
|
*/
|
|
|
|
mutex_enter(p->p_lock);
|
|
|
|
p->p_nlwpid = 1;
|
|
|
|
l->l_lid = 1;
|
|
|
|
mutex_exit(p->p_lock);
|
|
|
|
|
2000-11-16 23:04:33 +03:00
|
|
|
/*
|
|
|
|
* Call exec hook. Emulation code may NOT store reference to anything
|
|
|
|
* from &pack.
|
|
|
|
*/
|
2012-02-12 03:16:15 +04:00
|
|
|
if (data->ed_pack.ep_esch->es_emul->e_proc_exec)
|
|
|
|
(*data->ed_pack.ep_esch->es_emul->e_proc_exec)(p, &data->ed_pack);
|
2000-11-07 15:41:52 +03:00
|
|
|
|
|
|
|
/* update p_emul, the old value is no longer needed */
|
2012-02-12 03:16:15 +04:00
|
|
|
p->p_emul = data->ed_pack.ep_esch->es_emul;
|
2001-12-08 03:35:25 +03:00
|
|
|
|
|
|
|
/* ...and the same for p_execsw */
|
2012-02-12 03:16:15 +04:00
|
|
|
p->p_execsw = data->ed_pack.ep_esch;
|
2001-12-08 03:35:25 +03:00
|
|
|
|
2000-12-11 08:28:59 +03:00
|
|
|
#ifdef __HAVE_SYSCALL_INTERN
|
|
|
|
(*p->p_emul->e_syscall_intern)(p);
|
|
|
|
#endif
|
2007-08-15 16:07:23 +04:00
|
|
|
ktremul();
|
1997-09-12 03:01:44 +04:00
|
|
|
|
2007-11-07 03:23:13 +03:00
|
|
|
/* Allow new references from the debugger/procfs. */
|
2012-02-12 03:16:15 +04:00
|
|
|
if (!proc_is_new)
|
|
|
|
rw_exit(&p->p_reflock);
|
2007-02-10 00:55:00 +03:00
|
|
|
rw_exit(&exec_lock);
|
2002-11-07 03:22:28 +03:00
|
|
|
|
2008-04-24 19:35:27 +04:00
|
|
|
mutex_enter(proc_lock);
|
2002-11-07 03:22:28 +03:00
|
|
|
|
2007-02-10 00:55:00 +03:00
|
|
|
if ((p->p_slflag & (PSL_TRACED|PSL_SYSCALL)) == PSL_TRACED) {
|
|
|
|
KSI_INIT_EMPTY(&ksi);
|
|
|
|
ksi.ksi_signo = SIGTRAP;
|
|
|
|
ksi.ksi_lid = l->l_lid;
|
|
|
|
kpsignal(p, &ksi, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p->p_sflag & PS_STOPEXEC) {
|
|
|
|
KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
|
2003-11-13 00:07:37 +03:00
|
|
|
p->p_pptr->p_nstopchild++;
|
2007-02-10 00:55:00 +03:00
|
|
|
p->p_pptr->p_waited = 0;
|
2008-04-24 22:39:20 +04:00
|
|
|
mutex_enter(p->p_lock);
|
2007-02-10 00:55:00 +03:00
|
|
|
ksiginfo_queue_init(&kq);
|
|
|
|
sigclearall(p, &contsigmask, &kq);
|
|
|
|
lwp_lock(l);
|
2003-01-18 13:06:22 +03:00
|
|
|
l->l_stat = LSSTOP;
|
2007-02-10 00:55:00 +03:00
|
|
|
p->p_stat = SSTOP;
|
2003-01-18 13:06:22 +03:00
|
|
|
p->p_nrlwps--;
|
2010-12-18 04:36:19 +03:00
|
|
|
lwp_unlock(l);
|
2008-04-24 22:39:20 +04:00
|
|
|
mutex_exit(p->p_lock);
|
2008-04-24 19:35:27 +04:00
|
|
|
mutex_exit(proc_lock);
|
2010-12-18 04:36:19 +03:00
|
|
|
lwp_lock(l);
|
2007-05-17 18:51:11 +04:00
|
|
|
mi_switch(l);
|
2007-02-10 00:55:00 +03:00
|
|
|
ksiginfo_queue_drain(&kq);
|
|
|
|
KERNEL_LOCK(l->l_biglocks, l);
|
|
|
|
} else {
|
2008-04-24 19:35:27 +04:00
|
|
|
mutex_exit(proc_lock);
|
2002-11-07 03:22:28 +03:00
|
|
|
}
|
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
pathbuf_stringcopy_put(data->ed_pathbuf, data->ed_pathstring);
|
|
|
|
pathbuf_destroy(data->ed_pathbuf);
|
|
|
|
PNBUF_PUT(data->ed_resolvedpathbuf);
|
2011-08-27 22:07:10 +04:00
|
|
|
DPRINTF(("%s finished\n", __func__));
|
1997-09-12 03:01:44 +04:00
|
|
|
return (EJUSTRETURN);
|
1994-06-29 10:29:24 +04:00
|
|
|
|
2001-02-26 23:43:25 +03:00
|
|
|
exec_abort:
|
DTrace: Add an SDT (Statically Defined Tracing) provider framework, and
implement most of the proc provider. Adds proc:::create, exec,
exec_success, exec_faillure, signal_send, signal_discard, signal_handle,
lwp_create, lwp_start, lwp_exit.
2010-03-02 00:10:13 +03:00
|
|
|
SDT_PROBE(proc,,,exec_failure, error, 0, 0, 0, 0);
|
2007-11-07 03:23:13 +03:00
|
|
|
rw_exit(&p->p_reflock);
|
2007-02-10 00:55:00 +03:00
|
|
|
rw_exit(&exec_lock);
|
2000-12-08 22:42:11 +03:00
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
pathbuf_stringcopy_put(data->ed_pathbuf, data->ed_pathstring);
|
|
|
|
pathbuf_destroy(data->ed_pathbuf);
|
|
|
|
PNBUF_PUT(data->ed_resolvedpathbuf);
|
2010-05-12 07:40:38 +04:00
|
|
|
|
1994-06-29 10:29:24 +04:00
|
|
|
/*
|
|
|
|
* the old process doesn't exist anymore. exit gracefully.
|
|
|
|
* get rid of the (new) address space we have created, if any, get rid
|
|
|
|
* of our namei data and vnode, and exit noting failure
|
|
|
|
*/
|
1998-02-05 10:59:28 +03:00
|
|
|
uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS,
|
|
|
|
VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS);
|
2012-02-12 03:16:15 +04:00
|
|
|
exec_free_emul_arg(&data->ed_pack);
|
|
|
|
pool_put(&exec_pool, data->ed_argp);
|
|
|
|
kmem_free(data->ed_pack.ep_hdr, data->ed_pack.ep_hdrlen);
|
|
|
|
if (data->ed_pack.ep_emul_root != NULL)
|
|
|
|
vrele(data->ed_pack.ep_emul_root);
|
|
|
|
if (data->ed_pack.ep_interp != NULL)
|
|
|
|
vrele(data->ed_pack.ep_interp);
|
2007-02-10 00:55:00 +03:00
|
|
|
|
2007-11-07 03:23:13 +03:00
|
|
|
/* Acquire the sched-state mutex (exit1() will release it). */
|
2012-02-12 03:16:15 +04:00
|
|
|
if (!proc_is_new) {
|
|
|
|
mutex_enter(p->p_lock);
|
|
|
|
exit1(l, W_EXITCODE(error, SIGABRT));
|
|
|
|
}
|
1994-06-29 10:29:24 +04:00
|
|
|
|
|
|
|
/* NOTREACHED */
|
|
|
|
return 0;
|
|
|
|
}
|
1995-04-22 23:42:47 +04:00
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
int
|
|
|
|
execve1(struct lwp *l, const char *path, char * const *args,
|
|
|
|
char * const *envs, execve_fetch_element_t fetch_element)
|
|
|
|
{
|
|
|
|
struct execve_data data;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = execve_loadvm(l, path, args, envs, fetch_element, &data);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
error = execve_runproc(l, &data);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2001-07-30 01:22:42 +04:00
|
|
|
int
|
2006-11-01 13:17:58 +03:00
|
|
|
copyargs(struct lwp *l, struct exec_package *pack, struct ps_strings *arginfo,
|
|
|
|
char **stackp, void *argp)
|
1995-04-22 23:42:47 +04:00
|
|
|
{
|
2001-02-26 23:43:25 +03:00
|
|
|
char **cpp, *dp, *sp;
|
|
|
|
size_t len;
|
|
|
|
void *nullp;
|
|
|
|
long argc, envc;
|
2001-07-30 01:22:42 +04:00
|
|
|
int error;
|
2001-02-26 23:43:25 +03:00
|
|
|
|
2001-07-30 01:22:42 +04:00
|
|
|
cpp = (char **)*stackp;
|
2001-02-26 23:43:25 +03:00
|
|
|
nullp = NULL;
|
|
|
|
argc = arginfo->ps_nargvstr;
|
|
|
|
envc = arginfo->ps_nenvstr;
|
2011-01-18 11:21:03 +03:00
|
|
|
if ((error = copyout(&argc, cpp++, sizeof(argc))) != 0) {
|
2011-03-14 02:44:14 +03:00
|
|
|
COPYPRINTF("", cpp - 1, sizeof(argc));
|
2001-07-30 01:22:42 +04:00
|
|
|
return error;
|
2011-01-18 11:21:03 +03:00
|
|
|
}
|
1995-04-22 23:42:47 +04:00
|
|
|
|
2007-04-22 12:29:55 +04:00
|
|
|
dp = (char *) (cpp + argc + envc + 2 + pack->ep_esch->es_arglen);
|
1995-04-22 23:42:47 +04:00
|
|
|
sp = argp;
|
|
|
|
|
|
|
|
/* XXX don't copy them out, remap them! */
|
1995-05-16 18:19:03 +04:00
|
|
|
arginfo->ps_argvstr = cpp; /* remember location of argv for later */
|
1995-04-22 23:42:47 +04:00
|
|
|
|
2011-01-18 11:21:03 +03:00
|
|
|
for (; --argc >= 0; sp += len, dp += len) {
|
|
|
|
if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) {
|
2011-03-14 02:44:14 +03:00
|
|
|
COPYPRINTF("", cpp - 1, sizeof(dp));
|
2011-01-18 11:21:03 +03:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) {
|
2011-03-14 23:12:40 +03:00
|
|
|
COPYPRINTF("str", dp, (size_t)ARG_MAX);
|
2001-07-30 01:22:42 +04:00
|
|
|
return error;
|
2011-01-18 11:21:03 +03:00
|
|
|
}
|
|
|
|
}
|
1995-04-22 23:42:47 +04:00
|
|
|
|
2011-01-18 11:21:03 +03:00
|
|
|
if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) {
|
2011-03-14 02:44:14 +03:00
|
|
|
COPYPRINTF("", cpp - 1, sizeof(nullp));
|
2001-07-30 01:22:42 +04:00
|
|
|
return error;
|
2011-01-18 11:21:03 +03:00
|
|
|
}
|
1995-04-22 23:42:47 +04:00
|
|
|
|
1995-05-16 18:19:03 +04:00
|
|
|
arginfo->ps_envstr = cpp; /* remember location of envp for later */
|
1995-04-22 23:42:47 +04:00
|
|
|
|
2011-01-18 11:21:03 +03:00
|
|
|
for (; --envc >= 0; sp += len, dp += len) {
|
|
|
|
if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) {
|
2011-03-14 02:44:14 +03:00
|
|
|
COPYPRINTF("", cpp - 1, sizeof(dp));
|
2001-07-30 01:22:42 +04:00
|
|
|
return error;
|
2011-01-18 11:21:03 +03:00
|
|
|
}
|
|
|
|
if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) {
|
2011-03-14 23:12:40 +03:00
|
|
|
COPYPRINTF("str", dp, (size_t)ARG_MAX);
|
2011-01-18 11:21:03 +03:00
|
|
|
return error;
|
|
|
|
}
|
2012-02-12 03:16:15 +04:00
|
|
|
|
2011-01-18 11:21:03 +03:00
|
|
|
}
|
1995-04-22 23:42:47 +04:00
|
|
|
|
2011-01-18 11:21:03 +03:00
|
|
|
if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) {
|
2011-03-14 02:44:14 +03:00
|
|
|
COPYPRINTF("", cpp - 1, sizeof(nullp));
|
2001-07-30 01:22:42 +04:00
|
|
|
return error;
|
2011-01-18 11:21:03 +03:00
|
|
|
}
|
1995-04-22 23:42:47 +04:00
|
|
|
|
2001-07-30 01:22:42 +04:00
|
|
|
*stackp = (char *)cpp;
|
|
|
|
return 0;
|
1995-04-22 23:42:47 +04:00
|
|
|
}
|
2000-12-08 22:42:11 +03:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
2008-11-19 21:35:57 +03:00
|
|
|
* Add execsw[] entries.
|
2000-12-08 22:42:11 +03:00
|
|
|
*/
|
|
|
|
int
|
2008-11-19 21:35:57 +03:00
|
|
|
exec_add(struct execsw *esp, int count)
|
2000-12-08 22:42:11 +03:00
|
|
|
{
|
2008-11-19 21:35:57 +03:00
|
|
|
struct exec_entry *it;
|
|
|
|
int i;
|
2000-12-08 22:42:11 +03:00
|
|
|
|
2008-11-28 13:55:10 +03:00
|
|
|
if (count == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
2000-12-08 22:42:11 +03:00
|
|
|
|
2008-11-19 21:35:57 +03:00
|
|
|
/* Check for duplicates. */
|
2007-02-10 00:55:00 +03:00
|
|
|
rw_enter(&exec_lock, RW_WRITER);
|
2008-11-19 21:35:57 +03:00
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
LIST_FOREACH(it, &ex_head, ex_list) {
|
|
|
|
/* assume unique (makecmds, probe_func, emulation) */
|
|
|
|
if (it->ex_sw->es_makecmds == esp[i].es_makecmds &&
|
|
|
|
it->ex_sw->u.elf_probe_func ==
|
|
|
|
esp[i].u.elf_probe_func &&
|
|
|
|
it->ex_sw->es_emul == esp[i].es_emul) {
|
|
|
|
rw_exit(&exec_lock);
|
|
|
|
return EEXIST;
|
2000-12-08 22:42:11 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-19 21:35:57 +03:00
|
|
|
/* Allocate new entries. */
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
it = kmem_alloc(sizeof(*it), KM_SLEEP);
|
|
|
|
it->ex_sw = &esp[i];
|
|
|
|
LIST_INSERT_HEAD(&ex_head, it, ex_list);
|
2000-12-08 22:42:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* update execsw[] */
|
|
|
|
exec_init(0);
|
2007-02-10 00:55:00 +03:00
|
|
|
rw_exit(&exec_lock);
|
2008-11-19 21:35:57 +03:00
|
|
|
return 0;
|
2000-12-08 22:42:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove execsw[] entry.
|
|
|
|
*/
|
|
|
|
int
|
2008-11-19 21:35:57 +03:00
|
|
|
exec_remove(struct execsw *esp, int count)
|
2000-12-08 22:42:11 +03:00
|
|
|
{
|
2008-11-19 21:35:57 +03:00
|
|
|
struct exec_entry *it, *next;
|
|
|
|
int i;
|
|
|
|
const struct proclist_desc *pd;
|
|
|
|
proc_t *p;
|
2000-12-08 22:42:11 +03:00
|
|
|
|
2008-11-28 13:55:10 +03:00
|
|
|
if (count == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
2000-12-08 22:42:11 +03:00
|
|
|
|
2008-11-19 21:35:57 +03:00
|
|
|
/* Abort if any are busy. */
|
|
|
|
rw_enter(&exec_lock, RW_WRITER);
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
mutex_enter(proc_lock);
|
|
|
|
for (pd = proclists; pd->pd_list != NULL; pd++) {
|
|
|
|
PROCLIST_FOREACH(p, pd->pd_list) {
|
|
|
|
if (p->p_execsw == &esp[i]) {
|
|
|
|
mutex_exit(proc_lock);
|
|
|
|
rw_exit(&exec_lock);
|
|
|
|
return EBUSY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_exit(proc_lock);
|
2000-12-08 22:42:11 +03:00
|
|
|
}
|
|
|
|
|
2008-11-19 21:35:57 +03:00
|
|
|
/* None are busy, so remove them all. */
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
for (it = LIST_FIRST(&ex_head); it != NULL; it = next) {
|
|
|
|
next = LIST_NEXT(it, ex_list);
|
|
|
|
if (it->ex_sw == &esp[i]) {
|
|
|
|
LIST_REMOVE(it, ex_list);
|
|
|
|
kmem_free(it, sizeof(*it));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2000-12-08 22:42:11 +03:00
|
|
|
|
|
|
|
/* update execsw[] */
|
|
|
|
exec_init(0);
|
2007-02-10 00:55:00 +03:00
|
|
|
rw_exit(&exec_lock);
|
2008-11-19 21:35:57 +03:00
|
|
|
return 0;
|
2000-12-08 22:42:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize exec structures. If init_boot is true, also does necessary
|
|
|
|
* one-time initialization (it's called from main() that way).
|
2001-11-24 01:02:39 +03:00
|
|
|
* Once system is multiuser, this should be called with exec_lock held,
|
2000-12-08 22:42:11 +03:00
|
|
|
* i.e. via exec_{add|remove}().
|
|
|
|
*/
|
|
|
|
int
|
2001-02-26 23:43:25 +03:00
|
|
|
exec_init(int init_boot)
|
2000-12-08 22:42:11 +03:00
|
|
|
{
|
2008-11-19 21:35:57 +03:00
|
|
|
const struct execsw **sw;
|
|
|
|
struct exec_entry *ex;
|
|
|
|
SLIST_HEAD(,exec_entry) first;
|
|
|
|
SLIST_HEAD(,exec_entry) any;
|
|
|
|
SLIST_HEAD(,exec_entry) last;
|
|
|
|
int i, sz;
|
2000-12-08 22:42:11 +03:00
|
|
|
|
|
|
|
if (init_boot) {
|
|
|
|
/* do one-time initializations */
|
2007-02-10 00:55:00 +03:00
|
|
|
rw_init(&exec_lock);
|
2007-12-26 19:01:34 +03:00
|
|
|
mutex_init(&sigobject_lock, MUTEX_DEFAULT, IPL_NONE);
|
2008-07-02 21:28:54 +04:00
|
|
|
pool_init(&exec_pool, NCARGS, 0, 0, PR_NOALIGN|PR_NOTOUCH,
|
|
|
|
"execargs", &exec_palloc, IPL_NONE);
|
|
|
|
pool_sethardlimit(&exec_pool, maxexec, "should not happen", 0);
|
2008-11-19 21:35:57 +03:00
|
|
|
} else {
|
|
|
|
KASSERT(rw_write_held(&exec_lock));
|
|
|
|
}
|
2000-12-08 22:42:11 +03:00
|
|
|
|
2008-11-19 21:35:57 +03:00
|
|
|
/* Sort each entry onto the appropriate queue. */
|
|
|
|
SLIST_INIT(&first);
|
|
|
|
SLIST_INIT(&any);
|
|
|
|
SLIST_INIT(&last);
|
|
|
|
sz = 0;
|
|
|
|
LIST_FOREACH(ex, &ex_head, ex_list) {
|
|
|
|
switch(ex->ex_sw->es_prio) {
|
|
|
|
case EXECSW_PRIO_FIRST:
|
|
|
|
SLIST_INSERT_HEAD(&first, ex, ex_slist);
|
|
|
|
break;
|
|
|
|
case EXECSW_PRIO_ANY:
|
|
|
|
SLIST_INSERT_HEAD(&any, ex, ex_slist);
|
|
|
|
break;
|
|
|
|
case EXECSW_PRIO_LAST:
|
|
|
|
SLIST_INSERT_HEAD(&last, ex, ex_slist);
|
|
|
|
break;
|
|
|
|
default:
|
2011-03-14 02:44:14 +03:00
|
|
|
panic("%s", __func__);
|
2008-11-19 21:35:57 +03:00
|
|
|
break;
|
2000-12-08 22:42:11 +03:00
|
|
|
}
|
2008-11-19 21:35:57 +03:00
|
|
|
sz++;
|
2000-12-08 22:42:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-11-19 21:35:57 +03:00
|
|
|
* Create new execsw[]. Ensure we do not try a zero-sized
|
|
|
|
* allocation.
|
2000-12-08 22:42:11 +03:00
|
|
|
*/
|
2008-11-19 21:35:57 +03:00
|
|
|
sw = kmem_alloc(sz * sizeof(struct execsw *) + 1, KM_SLEEP);
|
|
|
|
i = 0;
|
|
|
|
SLIST_FOREACH(ex, &first, ex_slist) {
|
|
|
|
sw[i++] = ex->ex_sw;
|
2000-12-08 22:42:11 +03:00
|
|
|
}
|
2008-11-19 21:35:57 +03:00
|
|
|
SLIST_FOREACH(ex, &any, ex_slist) {
|
|
|
|
sw[i++] = ex->ex_sw;
|
|
|
|
}
|
|
|
|
SLIST_FOREACH(ex, &last, ex_slist) {
|
|
|
|
sw[i++] = ex->ex_sw;
|
2000-12-08 22:42:11 +03:00
|
|
|
}
|
|
|
|
|
2008-11-19 21:35:57 +03:00
|
|
|
/* Replace old execsw[] and free used memory. */
|
|
|
|
if (execsw != NULL) {
|
|
|
|
kmem_free(__UNCONST(execsw),
|
|
|
|
nexecs * sizeof(struct execsw *) + 1);
|
|
|
|
}
|
|
|
|
execsw = sw;
|
|
|
|
nexecs = sz;
|
2000-12-08 22:42:11 +03:00
|
|
|
|
2008-11-19 21:35:57 +03:00
|
|
|
/* Figure out the maximum size of an exec header. */
|
|
|
|
exec_maxhdrsz = sizeof(int);
|
2000-12-08 22:42:11 +03:00
|
|
|
for (i = 0; i < nexecs; i++) {
|
|
|
|
if (execsw[i]->es_hdrsz > exec_maxhdrsz)
|
|
|
|
exec_maxhdrsz = execsw[i]->es_hdrsz;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2003-08-24 21:52:28 +04:00
|
|
|
|
|
|
|
static int
|
|
|
|
exec_sigcode_map(struct proc *p, const struct emul *e)
|
|
|
|
{
|
|
|
|
vaddr_t va;
|
|
|
|
vsize_t sz;
|
|
|
|
int error;
|
|
|
|
struct uvm_object *uobj;
|
|
|
|
|
2004-03-25 21:29:24 +03:00
|
|
|
sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode;
|
|
|
|
|
|
|
|
if (e->e_sigobject == NULL || sz == 0) {
|
2003-08-24 21:52:28 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we don't have a sigobject for this emulation, create one.
|
|
|
|
*
|
|
|
|
* sigobject is an anonymous memory object (just like SYSV shared
|
|
|
|
* memory) that we keep a permanent reference to and that we map
|
|
|
|
* in all processes that need this sigcode. The creation is simple,
|
|
|
|
* we create an object, add a permanent reference to it, map it in
|
|
|
|
* kernel space, copy out the sigcode to it and unmap it.
|
2004-09-10 10:09:15 +04:00
|
|
|
* We map it with PROT_READ|PROT_EXEC into the process just
|
|
|
|
* the way sys_mmap() would map it.
|
2003-08-24 21:52:28 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
uobj = *e->e_sigobject;
|
|
|
|
if (uobj == NULL) {
|
2007-12-26 19:01:34 +03:00
|
|
|
mutex_enter(&sigobject_lock);
|
|
|
|
if ((uobj = *e->e_sigobject) == NULL) {
|
|
|
|
uobj = uao_create(sz, 0);
|
|
|
|
(*uobj->pgops->pgo_reference)(uobj);
|
|
|
|
va = vm_map_min(kernel_map);
|
|
|
|
if ((error = uvm_map(kernel_map, &va, round_page(sz),
|
|
|
|
uobj, 0, 0,
|
|
|
|
UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
|
|
|
|
UVM_INH_SHARE, UVM_ADV_RANDOM, 0)))) {
|
|
|
|
printf("kernel mapping failed %d\n", error);
|
|
|
|
(*uobj->pgops->pgo_detach)(uobj);
|
|
|
|
mutex_exit(&sigobject_lock);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
memcpy((void *)va, e->e_sigcode, sz);
|
2003-08-24 21:52:28 +04:00
|
|
|
#ifdef PMAP_NEED_PROCWR
|
2007-12-26 19:01:34 +03:00
|
|
|
pmap_procwr(&proc0, va, sz);
|
2003-08-24 21:52:28 +04:00
|
|
|
#endif
|
2007-12-26 19:01:34 +03:00
|
|
|
uvm_unmap(kernel_map, va, va + round_page(sz));
|
|
|
|
*e->e_sigobject = uobj;
|
|
|
|
}
|
|
|
|
mutex_exit(&sigobject_lock);
|
2003-08-24 21:52:28 +04:00
|
|
|
}
|
|
|
|
|
2003-08-29 17:29:32 +04:00
|
|
|
/* Just a hint to uvm_map where to put it. */
|
2005-03-26 08:12:34 +03:00
|
|
|
va = e->e_vm_default_addr(p, (vaddr_t)p->p_vmspace->vm_daddr,
|
|
|
|
round_page(sz));
|
2004-06-27 04:55:08 +04:00
|
|
|
|
|
|
|
#ifdef __alpha__
|
|
|
|
/*
|
|
|
|
* Tru64 puts /sbin/loader at the end of user virtual memory,
|
|
|
|
* which causes the above calculation to put the sigcode at
|
|
|
|
* an invalid address. Put it just below the text instead.
|
|
|
|
*/
|
2005-02-12 12:38:25 +03:00
|
|
|
if (va == (vaddr_t)vm_map_max(&p->p_vmspace->vm_map)) {
|
2004-06-27 04:55:08 +04:00
|
|
|
va = (vaddr_t)p->p_vmspace->vm_taddr - round_page(sz);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2003-08-24 21:52:28 +04:00
|
|
|
(*uobj->pgops->pgo_reference)(uobj);
|
|
|
|
error = uvm_map(&p->p_vmspace->vm_map, &va, round_page(sz),
|
|
|
|
uobj, 0, 0,
|
|
|
|
UVM_MAPFLAG(UVM_PROT_RX, UVM_PROT_RX, UVM_INH_SHARE,
|
|
|
|
UVM_ADV_RANDOM, 0));
|
|
|
|
if (error) {
|
2011-03-14 02:44:14 +03:00
|
|
|
DPRINTF(("%s, %d: map %p "
|
2011-01-18 11:21:03 +03:00
|
|
|
"uvm_map %#"PRIxVSIZE"@%#"PRIxVADDR" failed %d\n",
|
2011-03-14 02:44:14 +03:00
|
|
|
__func__, __LINE__, &p->p_vmspace->vm_map, round_page(sz),
|
|
|
|
va, error));
|
2003-08-24 21:52:28 +04:00
|
|
|
(*uobj->pgops->pgo_detach)(uobj);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
p->p_sigctx.ps_sigcode = (void *)va;
|
|
|
|
return (0);
|
|
|
|
}
|
2012-02-04 00:11:53 +04:00
|
|
|
|
2012-02-12 03:16:15 +04:00
|
|
|
/*
|
|
|
|
* A child lwp of a posix_spawn operation starts here and ends up in
|
|
|
|
* cpu_spawn_return, dealing with all filedescriptor and scheduler
|
|
|
|
* manipulations in between.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
spawn_return(void *arg)
|
|
|
|
{
|
|
|
|
struct spawn_exec_data *spawn_data = arg;
|
|
|
|
struct lwp *l = curlwp;
|
|
|
|
int error, newfd;
|
|
|
|
size_t i;
|
|
|
|
const struct posix_spawn_file_actions_entry *fae;
|
|
|
|
register_t retval;
|
|
|
|
|
2012-02-12 17:14:37 +04:00
|
|
|
error = 0;
|
2012-02-12 03:16:15 +04:00
|
|
|
/* handle posix_spawn_file_actions */
|
|
|
|
if (spawn_data->sed_actions != NULL) {
|
|
|
|
for (i = 0; i < spawn_data->sed_actions_len; i++) {
|
|
|
|
fae = &spawn_data->sed_actions[i];
|
|
|
|
switch (fae->fae_action) {
|
|
|
|
case FAE_OPEN:
|
2012-02-12 17:14:37 +04:00
|
|
|
if (fd_getfile(fae->fae_fildes) != NULL) {
|
|
|
|
error = fd_close(fae->fae_fildes);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
}
|
2012-02-12 03:16:15 +04:00
|
|
|
error = fd_open(fae->fae_path, fae->fae_oflag,
|
|
|
|
fae->fae_mode, &newfd);
|
2012-02-12 17:14:37 +04:00
|
|
|
if (error)
|
|
|
|
break;
|
2012-02-12 03:16:15 +04:00
|
|
|
if (newfd != fae->fae_fildes) {
|
|
|
|
error = dodup(l, newfd,
|
|
|
|
fae->fae_fildes, 0, &retval);
|
|
|
|
if (fd_getfile(newfd) != NULL)
|
|
|
|
fd_close(newfd);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case FAE_DUP2:
|
|
|
|
error = dodup(l, fae->fae_fildes,
|
|
|
|
fae->fae_newfildes, 0, &retval);
|
|
|
|
break;
|
|
|
|
case FAE_CLOSE:
|
|
|
|
if (fd_getfile(fae->fae_fildes) == NULL) {
|
|
|
|
error = EBADF;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
error = fd_close(fae->fae_fildes);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (error)
|
|
|
|
goto report_error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* handle posix_spawnattr */
|
|
|
|
if (spawn_data->sed_attrs != NULL) {
|
|
|
|
struct sigaction sigact;
|
|
|
|
sigact._sa_u._sa_handler = SIG_DFL;
|
|
|
|
sigact.sa_flags = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* set state to SSTOP so that this proc can be found by pid.
|
|
|
|
* see proc_enterprp, do_sched_setparam below
|
|
|
|
*/
|
|
|
|
l->l_proc->p_stat = SSTOP;
|
|
|
|
|
|
|
|
/* Set process group */
|
|
|
|
if (spawn_data->sed_attrs->sa_flags & POSIX_SPAWN_SETPGROUP) {
|
|
|
|
pid_t mypid = l->l_proc->p_pid,
|
|
|
|
pgrp = spawn_data->sed_attrs->sa_pgroup;
|
|
|
|
|
|
|
|
if (pgrp == 0)
|
|
|
|
pgrp = mypid;
|
|
|
|
|
|
|
|
error = proc_enterpgrp(spawn_data->sed_parent,
|
|
|
|
mypid, pgrp, false);
|
|
|
|
if (error)
|
|
|
|
goto report_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set scheduler policy */
|
|
|
|
if (spawn_data->sed_attrs->sa_flags & POSIX_SPAWN_SETSCHEDULER)
|
|
|
|
error = do_sched_setparam(l->l_proc->p_pid, 0,
|
|
|
|
spawn_data->sed_attrs->sa_schedpolicy,
|
|
|
|
&spawn_data->sed_attrs->sa_schedparam);
|
|
|
|
else if (spawn_data->sed_attrs->sa_flags
|
|
|
|
& POSIX_SPAWN_SETSCHEDPARAM) {
|
|
|
|
error = do_sched_setparam(spawn_data->sed_parent->p_pid, 0,
|
|
|
|
SCHED_NONE, &spawn_data->sed_attrs->sa_schedparam);
|
|
|
|
}
|
|
|
|
if (error)
|
|
|
|
goto report_error;
|
|
|
|
|
|
|
|
/* Reset user ID's */
|
|
|
|
if (spawn_data->sed_attrs->sa_flags & POSIX_SPAWN_RESETIDS) {
|
|
|
|
error = do_setresuid(l, -1,
|
|
|
|
kauth_cred_getgid(l->l_cred), -1,
|
|
|
|
ID_E_EQ_R | ID_E_EQ_S);
|
|
|
|
if (error)
|
|
|
|
goto report_error;
|
|
|
|
error = do_setresuid(l, -1,
|
|
|
|
kauth_cred_getuid(l->l_cred), -1,
|
|
|
|
ID_E_EQ_R | ID_E_EQ_S);
|
|
|
|
if (error)
|
|
|
|
goto report_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set signal masks/defaults */
|
|
|
|
if (spawn_data->sed_attrs->sa_flags & POSIX_SPAWN_SETSIGMASK) {
|
|
|
|
mutex_enter(l->l_proc->p_lock);
|
|
|
|
error = sigprocmask1(l, SIG_SETMASK,
|
|
|
|
&spawn_data->sed_attrs->sa_sigmask, NULL);
|
|
|
|
mutex_exit(l->l_proc->p_lock);
|
|
|
|
if (error)
|
|
|
|
goto report_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (spawn_data->sed_attrs->sa_flags & POSIX_SPAWN_SETSIGDEF) {
|
|
|
|
for (i = 1; i <= NSIG; i++) {
|
|
|
|
if (sigismember(
|
|
|
|
&spawn_data->sed_attrs->sa_sigdefault, i))
|
|
|
|
sigaction1(l, i, &sigact, NULL, NULL,
|
|
|
|
0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (spawn_data->sed_actions != NULL) {
|
|
|
|
for (i = 0; i < spawn_data->sed_actions_len; i++) {
|
|
|
|
fae = &spawn_data->sed_actions[i];
|
|
|
|
if (fae->fae_action == FAE_OPEN)
|
|
|
|
kmem_free(fae->fae_path,
|
|
|
|
strlen(fae->fae_path)+1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now do the real exec */
|
|
|
|
rw_enter(&exec_lock, RW_READER);
|
|
|
|
error = execve_runproc(l, &spawn_data->sed_exec);
|
|
|
|
if (error == EJUSTRETURN)
|
|
|
|
error = 0;
|
|
|
|
else if (error)
|
|
|
|
goto report_error;
|
|
|
|
|
|
|
|
/* done, signal parent */
|
|
|
|
mutex_enter(&spawn_data->sed_mtx_child);
|
|
|
|
cv_signal(&spawn_data->sed_cv_child_ready);
|
|
|
|
mutex_exit(&spawn_data->sed_mtx_child);
|
|
|
|
|
|
|
|
/* and finaly: leave to userland for the first time */
|
|
|
|
cpu_spawn_return(l);
|
|
|
|
|
|
|
|
/* NOTREACHED */
|
|
|
|
return;
|
|
|
|
|
|
|
|
report_error:
|
|
|
|
if (spawn_data->sed_actions != NULL) {
|
|
|
|
for (i = 0; i < spawn_data->sed_actions_len; i++) {
|
|
|
|
fae = &spawn_data->sed_actions[i];
|
|
|
|
if (fae->fae_action == FAE_OPEN)
|
|
|
|
kmem_free(fae->fae_path,
|
|
|
|
strlen(fae->fae_path)+1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set error value for parent to pick up (and take over ownership
|
|
|
|
* of spawn_data again), signal parent and exit this process.
|
|
|
|
*/
|
|
|
|
mutex_enter(&spawn_data->sed_mtx_child);
|
|
|
|
spawn_data->sed_error = error;
|
|
|
|
cv_signal(&spawn_data->sed_cv_child_ready);
|
|
|
|
mutex_exit(&spawn_data->sed_mtx_child);
|
|
|
|
mutex_enter(l->l_proc->p_lock);
|
|
|
|
exit1(l, W_EXITCODE(error, SIGABRT));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
sys_posix_spawn(struct lwp *l1, const struct sys_posix_spawn_args *uap,
|
|
|
|
register_t *retval)
|
|
|
|
{
|
|
|
|
/* {
|
|
|
|
syscallarg(pid_t *) pid;
|
|
|
|
syscallarg(const char *) path;
|
|
|
|
syscallarg(const struct posix_spawn_file_actions *) file_actions;
|
|
|
|
syscallarg(const struct posix_spawnattr *) attrp;
|
|
|
|
syscallarg(char *const *) argv;
|
|
|
|
syscallarg(char *const *) envp;
|
|
|
|
} */
|
|
|
|
|
|
|
|
struct proc *p1, *p2;
|
|
|
|
struct plimit *p1_lim;
|
|
|
|
struct lwp *l2;
|
|
|
|
int error = 0, tnprocs, count, i;
|
|
|
|
struct posix_spawn_file_actions *fa = NULL;
|
|
|
|
struct posix_spawnattr *sa = NULL;
|
|
|
|
struct posix_spawn_file_actions_entry *ufa;
|
|
|
|
struct spawn_exec_data *spawn_data;
|
|
|
|
uid_t uid;
|
|
|
|
vaddr_t uaddr;
|
|
|
|
pid_t pid;
|
|
|
|
bool have_exec_lock = false;
|
|
|
|
|
|
|
|
p1 = l1->l_proc;
|
|
|
|
uid = kauth_cred_getuid(l1->l_cred);
|
|
|
|
tnprocs = atomic_inc_uint_nv(&nprocs);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Although process entries are dynamically created, we still keep
|
|
|
|
* a global limit on the maximum number we will create.
|
|
|
|
*/
|
|
|
|
if (__predict_false(tnprocs >= maxproc))
|
|
|
|
error = -1;
|
|
|
|
else
|
|
|
|
error = kauth_authorize_process(l1->l_cred,
|
|
|
|
KAUTH_PROCESS_FORK, p1, KAUTH_ARG(tnprocs), NULL, NULL);
|
|
|
|
|
|
|
|
if (error) {
|
|
|
|
atomic_dec_uint(&nprocs);
|
|
|
|
*retval = EAGAIN;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enforce limits.
|
|
|
|
*/
|
|
|
|
count = chgproccnt(uid, 1);
|
|
|
|
if (kauth_authorize_generic(l1->l_cred, KAUTH_GENERIC_ISSUSER, NULL) !=
|
|
|
|
0 && __predict_false(count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur)) {
|
|
|
|
error = EAGAIN;
|
|
|
|
goto error_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* copy in file_actions struct */
|
|
|
|
if (SCARG(uap, file_actions) != NULL) {
|
|
|
|
fa = kmem_alloc(sizeof(struct posix_spawn_file_actions),
|
|
|
|
KM_SLEEP);
|
|
|
|
error = copyin(SCARG(uap, file_actions), fa,
|
|
|
|
sizeof(struct posix_spawn_file_actions));
|
|
|
|
if (error) {
|
|
|
|
fa->fae = NULL;
|
|
|
|
goto error_exit;
|
|
|
|
}
|
|
|
|
ufa = fa->fae;
|
|
|
|
fa->fae = kmem_alloc(fa->len *
|
|
|
|
sizeof(struct posix_spawn_file_actions_entry), KM_SLEEP);
|
|
|
|
error = copyin(ufa, fa->fae,
|
|
|
|
fa->len * sizeof(struct posix_spawn_file_actions_entry));
|
|
|
|
if (error)
|
|
|
|
goto error_exit;
|
|
|
|
for (i = 0; i < fa->len; i++) {
|
|
|
|
if (fa->fae[i].fae_action == FAE_OPEN) {
|
|
|
|
char buf[PATH_MAX];
|
|
|
|
error = copyinstr(fa->fae[i].fae_path, buf,
|
|
|
|
sizeof(buf), NULL);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
fa->fae[i].fae_path = kmem_alloc(strlen(buf)+1,
|
|
|
|
KM_SLEEP);
|
|
|
|
if (fa->fae[i].fae_path == NULL) {
|
|
|
|
error = ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
strcpy(fa->fae[i].fae_path, buf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* copyin posix_spawnattr struct */
|
|
|
|
sa = NULL;
|
|
|
|
if (SCARG(uap, attrp) != NULL) {
|
|
|
|
sa = kmem_alloc(sizeof(struct posix_spawnattr), KM_SLEEP);
|
|
|
|
error = copyin(SCARG(uap, attrp), sa,
|
|
|
|
sizeof(struct posix_spawnattr));
|
|
|
|
if (error)
|
|
|
|
goto error_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do the first part of the exec now, collect state
|
|
|
|
* in spawn_data.
|
|
|
|
*/
|
|
|
|
spawn_data = kmem_zalloc(sizeof(*spawn_data), KM_SLEEP);
|
|
|
|
error = execve_loadvm(l1, SCARG(uap, path), SCARG(uap, argv),
|
|
|
|
SCARG(uap, envp), execve_fetch_element, &spawn_data->sed_exec);
|
|
|
|
if (error == EJUSTRETURN)
|
|
|
|
error = 0;
|
|
|
|
else if (error)
|
|
|
|
goto error_exit;
|
|
|
|
|
|
|
|
have_exec_lock = true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate virtual address space for the U-area now, while it
|
|
|
|
* is still easy to abort the fork operation if we're out of
|
|
|
|
* kernel virtual address space.
|
|
|
|
*/
|
|
|
|
uaddr = uvm_uarea_alloc();
|
|
|
|
if (__predict_false(uaddr == 0)) {
|
|
|
|
error = ENOMEM;
|
|
|
|
goto error_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate new proc. Leave it's p_vmspace NULL for now.
|
|
|
|
* This is a point of no return, we will have to go through
|
|
|
|
* the child proc to properly clean it up past this point.
|
|
|
|
*/
|
|
|
|
p2 = proc_alloc();
|
|
|
|
pid = p2->p_pid;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make a proc table entry for the new process.
|
|
|
|
* Start by zeroing the section of proc that is zero-initialized,
|
|
|
|
* then copy the section that is copied directly from the parent.
|
|
|
|
*/
|
|
|
|
memset(&p2->p_startzero, 0,
|
|
|
|
(unsigned) ((char *)&p2->p_endzero - (char *)&p2->p_startzero));
|
|
|
|
memcpy(&p2->p_startcopy, &p1->p_startcopy,
|
|
|
|
(unsigned) ((char *)&p2->p_endcopy - (char *)&p2->p_startcopy));
|
|
|
|
p2->p_vmspace = NULL;
|
|
|
|
|
|
|
|
CIRCLEQ_INIT(&p2->p_sigpend.sp_info);
|
|
|
|
|
|
|
|
LIST_INIT(&p2->p_lwps);
|
|
|
|
LIST_INIT(&p2->p_sigwaiters);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Duplicate sub-structures as needed.
|
|
|
|
* Increase reference counts on shared objects.
|
|
|
|
* Inherit flags we want to keep. The flags related to SIGCHLD
|
|
|
|
* handling are important in order to keep a consistent behaviour
|
|
|
|
* for the child after the fork. If we are a 32-bit process, the
|
|
|
|
* child will be too.
|
|
|
|
*/
|
|
|
|
p2->p_flag =
|
|
|
|
p1->p_flag & (PK_SUGID | PK_NOCLDWAIT | PK_CLDSIGIGN | PK_32);
|
|
|
|
p2->p_emul = p1->p_emul;
|
|
|
|
p2->p_execsw = p1->p_execsw;
|
|
|
|
|
|
|
|
mutex_init(&p2->p_stmutex, MUTEX_DEFAULT, IPL_HIGH);
|
|
|
|
mutex_init(&p2->p_auxlock, MUTEX_DEFAULT, IPL_NONE);
|
|
|
|
rw_init(&p2->p_reflock);
|
|
|
|
cv_init(&p2->p_waitcv, "wait");
|
|
|
|
cv_init(&p2->p_lwpcv, "lwpwait");
|
|
|
|
|
|
|
|
p2->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
|
|
|
|
|
|
|
|
kauth_proc_fork(p1, p2);
|
|
|
|
|
|
|
|
p2->p_raslist = NULL;
|
|
|
|
p2->p_fd = fd_copy();
|
|
|
|
|
|
|
|
/* XXX racy */
|
|
|
|
p2->p_mqueue_cnt = p1->p_mqueue_cnt;
|
|
|
|
|
|
|
|
p2->p_cwdi = cwdinit();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: p_limit (rlimit stuff) is copy-on-write, so normally
|
|
|
|
* we just need increase pl_refcnt.
|
|
|
|
*/
|
|
|
|
p1_lim = p1->p_limit;
|
|
|
|
if (!p1_lim->pl_writeable) {
|
|
|
|
lim_addref(p1_lim);
|
|
|
|
p2->p_limit = p1_lim;
|
|
|
|
} else {
|
|
|
|
p2->p_limit = lim_copy(p1->p_limit);
|
|
|
|
}
|
|
|
|
|
|
|
|
p2->p_lflag = 0;
|
|
|
|
p2->p_sflag = 0;
|
|
|
|
p2->p_slflag = 0;
|
|
|
|
p2->p_pptr = p1;
|
|
|
|
p2->p_ppid = p1->p_pid;
|
|
|
|
LIST_INIT(&p2->p_children);
|
|
|
|
|
|
|
|
p2->p_aio = NULL;
|
|
|
|
|
|
|
|
#ifdef KTRACE
|
|
|
|
/*
|
|
|
|
* Copy traceflag and tracefile if enabled.
|
|
|
|
* If not inherited, these were zeroed above.
|
|
|
|
*/
|
|
|
|
if (p1->p_traceflag & KTRFAC_INHERIT) {
|
|
|
|
mutex_enter(&ktrace_lock);
|
|
|
|
p2->p_traceflag = p1->p_traceflag;
|
|
|
|
if ((p2->p_tracep = p1->p_tracep) != NULL)
|
|
|
|
ktradref(p2);
|
|
|
|
mutex_exit(&ktrace_lock);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create signal actions for the child process.
|
|
|
|
*/
|
|
|
|
p2->p_sigacts = sigactsinit(p1, 0);
|
|
|
|
mutex_enter(p1->p_lock);
|
|
|
|
p2->p_sflag |=
|
|
|
|
(p1->p_sflag & (PS_STOPFORK | PS_STOPEXEC | PS_NOCLDSTOP));
|
|
|
|
sched_proc_fork(p1, p2);
|
|
|
|
mutex_exit(p1->p_lock);
|
|
|
|
|
|
|
|
p2->p_stflag = p1->p_stflag;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* p_stats.
|
|
|
|
* Copy parts of p_stats, and zero out the rest.
|
|
|
|
*/
|
|
|
|
p2->p_stats = pstatscopy(p1->p_stats);
|
|
|
|
|
|
|
|
/* copy over machdep flags to the new proc */
|
|
|
|
cpu_proc_fork(p1, p2);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prepare remaining parts of spawn data
|
|
|
|
*/
|
|
|
|
if (fa != NULL) {
|
|
|
|
spawn_data->sed_actions_len = fa->len;
|
|
|
|
spawn_data->sed_actions = fa->fae;
|
|
|
|
kmem_free(fa, sizeof(*fa));
|
|
|
|
fa = NULL;
|
|
|
|
}
|
|
|
|
if (sa != NULL) {
|
|
|
|
spawn_data->sed_attrs = sa;
|
|
|
|
sa = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
spawn_data->sed_parent = p1;
|
|
|
|
cv_init(&spawn_data->sed_cv_child_ready, "pspawn");
|
|
|
|
mutex_init(&spawn_data->sed_mtx_child, MUTEX_DEFAULT, IPL_NONE);
|
|
|
|
mutex_enter(&spawn_data->sed_mtx_child);
|
|
|
|
|
|
|
|
/* create LWP */
|
|
|
|
lwp_create(l1, p2, uaddr, 0, NULL, 0, spawn_return, spawn_data,
|
|
|
|
&l2, l1->l_class);
|
|
|
|
l2->l_ctxlink = NULL; /* reset ucontext link */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy the credential so other references don't see our changes.
|
|
|
|
* Test to see if this is necessary first, since in the common case
|
|
|
|
* we won't need a private reference.
|
|
|
|
*/
|
|
|
|
if (kauth_cred_geteuid(l2->l_cred) != kauth_cred_getsvuid(l2->l_cred) ||
|
|
|
|
kauth_cred_getegid(l2->l_cred) != kauth_cred_getsvgid(l2->l_cred)) {
|
|
|
|
l2->l_cred = kauth_cred_copy(l2->l_cred);
|
|
|
|
kauth_cred_setsvuid(l2->l_cred, kauth_cred_geteuid(l2->l_cred));
|
|
|
|
kauth_cred_setsvgid(l2->l_cred, kauth_cred_getegid(l2->l_cred));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the master credentials. */
|
|
|
|
if (l2->l_cred != p2->p_cred) {
|
|
|
|
kauth_cred_t ocred;
|
|
|
|
|
|
|
|
kauth_cred_hold(l2->l_cred);
|
|
|
|
mutex_enter(p2->p_lock);
|
|
|
|
ocred = p2->p_cred;
|
|
|
|
p2->p_cred = l2->l_cred;
|
|
|
|
mutex_exit(p2->p_lock);
|
|
|
|
kauth_cred_free(ocred);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It's now safe for the scheduler and other processes to see the
|
|
|
|
* child process.
|
|
|
|
*/
|
|
|
|
mutex_enter(proc_lock);
|
|
|
|
|
|
|
|
if (p1->p_session->s_ttyvp != NULL && p1->p_lflag & PL_CONTROLT)
|
|
|
|
p2->p_lflag |= PL_CONTROLT;
|
|
|
|
|
|
|
|
LIST_INSERT_HEAD(&p1->p_children, p2, p_sibling);
|
|
|
|
p2->p_exitsig = SIGCHLD; /* signal for parent on exit */
|
|
|
|
|
|
|
|
LIST_INSERT_AFTER(p1, p2, p_pglist);
|
|
|
|
LIST_INSERT_HEAD(&allproc, p2, p_list);
|
|
|
|
|
|
|
|
p2->p_trace_enabled = trace_is_enabled(p2);
|
|
|
|
#ifdef __HAVE_SYSCALL_INTERN
|
|
|
|
(*p2->p_emul->e_syscall_intern)(p2);
|
|
|
|
#endif
|
|
|
|
rw_exit(&p1->p_reflock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make child runnable, set start time, and add to run queue except
|
|
|
|
* if the parent requested the child to start in SSTOP state.
|
|
|
|
*/
|
|
|
|
mutex_enter(p2->p_lock);
|
|
|
|
|
|
|
|
getmicrotime(&p2->p_stats->p_start);
|
|
|
|
|
|
|
|
lwp_lock(l2);
|
|
|
|
KASSERT(p2->p_nrlwps == 1);
|
|
|
|
p2->p_nrlwps = 1;
|
|
|
|
p2->p_stat = SACTIVE;
|
|
|
|
l2->l_stat = LSRUN;
|
|
|
|
sched_enqueue(l2, false);
|
|
|
|
lwp_unlock(l2);
|
|
|
|
|
|
|
|
mutex_exit(p2->p_lock);
|
|
|
|
mutex_exit(proc_lock);
|
|
|
|
|
|
|
|
cv_wait(&spawn_data->sed_cv_child_ready, &spawn_data->sed_mtx_child);
|
|
|
|
mutex_exit(&spawn_data->sed_mtx_child);
|
|
|
|
error = spawn_data->sed_error;
|
|
|
|
|
|
|
|
rw_exit(&exec_lock);
|
|
|
|
have_exec_lock = false;
|
|
|
|
|
|
|
|
if (spawn_data->sed_actions != NULL)
|
|
|
|
kmem_free(spawn_data->sed_actions,
|
|
|
|
spawn_data->sed_actions_len * sizeof(*spawn_data->sed_actions));
|
|
|
|
|
|
|
|
if (spawn_data->sed_attrs != NULL)
|
|
|
|
kmem_free(spawn_data->sed_attrs, sizeof(*spawn_data->sed_attrs));
|
|
|
|
|
|
|
|
cv_destroy(&spawn_data->sed_cv_child_ready);
|
|
|
|
mutex_destroy(&spawn_data->sed_mtx_child);
|
|
|
|
|
|
|
|
kmem_free(spawn_data, sizeof(*spawn_data));
|
|
|
|
|
|
|
|
if (error == 0 && SCARG(uap, pid) != NULL)
|
|
|
|
error = copyout(&pid, SCARG(uap, pid), sizeof(pid));
|
|
|
|
|
|
|
|
*retval = error;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error_exit:
|
|
|
|
if (have_exec_lock)
|
|
|
|
rw_exit(&exec_lock);
|
|
|
|
|
|
|
|
if (fa != NULL) {
|
|
|
|
if (fa->fae != NULL)
|
|
|
|
kmem_free(fa->fae, fa->len * sizeof(*fa->fae));
|
|
|
|
kmem_free(fa, sizeof(*fa));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sa != NULL)
|
|
|
|
kmem_free(sa, sizeof(*sa));
|
|
|
|
|
|
|
|
(void)chgproccnt(uid, -1);
|
|
|
|
atomic_dec_uint(&nprocs);
|
|
|
|
|
|
|
|
*retval = error;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-02-04 00:11:53 +04:00
|
|
|
void
|
|
|
|
exec_free_emul_arg(struct exec_package *epp)
|
|
|
|
{
|
|
|
|
if (epp->ep_emul_arg_free != NULL) {
|
|
|
|
KASSERT(epp->ep_emul_arg != NULL);
|
|
|
|
(*epp->ep_emul_arg_free)(epp->ep_emul_arg);
|
|
|
|
epp->ep_emul_arg_free = NULL;
|
|
|
|
epp->ep_emul_arg = NULL;
|
|
|
|
} else {
|
|
|
|
KASSERT(epp->ep_emul_arg == NULL);
|
|
|
|
}
|
|
|
|
}
|