configure: Make NPTL non-optional
Now all linux-user targets support building with NPTL, we can make it mandatory. This is a good idea because: * NPTL is no longer new and experimental; it is completely standard * in practice, linux-user without NPTL is nearly useless for binaries built against non-ancient glibc * it allows us to delete the rather untested code for handling the non-NPTL configuration Note that this patch leaves the CONFIG_USE_NPTL ifdefs in the bsd-user codebase alone. This makes no change for bsd-user, since our configure test for NPTL had a "#include <linux/futex.h>" which means bsd-user would never have been compiled with CONFIG_USE_NPTL defined, and it still is not. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <rth@twiddle.net> Signed-off-by: Riku Voipio <riku.voipio@linaro.org>
This commit is contained in:
parent
2667e71c3d
commit
24cb36a61c
23
configure
vendored
23
configure
vendored
@ -155,7 +155,6 @@ curl=""
|
||||
curses=""
|
||||
docs=""
|
||||
fdt=""
|
||||
nptl=""
|
||||
pixman=""
|
||||
sdl=""
|
||||
virtfs=""
|
||||
@ -855,10 +854,6 @@ for opt do
|
||||
;;
|
||||
--enable-fdt) fdt="yes"
|
||||
;;
|
||||
--disable-nptl) nptl="no"
|
||||
;;
|
||||
--enable-nptl) nptl="yes"
|
||||
;;
|
||||
--enable-mixemu) mixemu="yes"
|
||||
;;
|
||||
--disable-linux-aio) linux_aio="no"
|
||||
@ -1096,8 +1091,6 @@ echo " --disable-slirp disable SLIRP userspace network connectivity"
|
||||
echo " --disable-kvm disable KVM acceleration support"
|
||||
echo " --enable-kvm enable KVM acceleration support"
|
||||
echo " --enable-tcg-interpreter enable TCG with bytecode interpreter (TCI)"
|
||||
echo " --disable-nptl disable usermode NPTL support"
|
||||
echo " --enable-nptl enable usermode NPTL support"
|
||||
echo " --enable-system enable all system emulation targets"
|
||||
echo " --disable-system disable all system emulation targets"
|
||||
echo " --enable-user enable supported user emulation targets"
|
||||
@ -1432,7 +1425,7 @@ fi
|
||||
##########################################
|
||||
# NPTL probe
|
||||
|
||||
if test "$nptl" != "no" ; then
|
||||
if test "$linux_user" = "yes"; then
|
||||
cat > $TMPC <<EOF
|
||||
#include <sched.h>
|
||||
#include <linux/futex.h>
|
||||
@ -1443,15 +1436,9 @@ int main(void) {
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
|
||||
if compile_object ; then
|
||||
nptl=yes
|
||||
else
|
||||
if test "$nptl" = "yes" ; then
|
||||
if ! compile_object ; then
|
||||
feature_not_found "nptl"
|
||||
fi
|
||||
nptl=no
|
||||
fi
|
||||
fi
|
||||
|
||||
##########################################
|
||||
@ -3550,7 +3537,6 @@ echo "bluez support $bluez"
|
||||
echo "Documentation $docs"
|
||||
[ ! -z "$uname_release" ] && \
|
||||
echo "uname -r $uname_release"
|
||||
echo "NPTL support $nptl"
|
||||
echo "GUEST_BASE $guest_base"
|
||||
echo "PIE $pie"
|
||||
echo "vde support $vde"
|
||||
@ -4180,7 +4166,6 @@ mkdir -p $target_dir
|
||||
echo "# Automatically generated by configure - do not modify" > $config_target_mak
|
||||
|
||||
bflt="no"
|
||||
target_nptl="yes"
|
||||
interp_prefix1=`echo "$interp_prefix" | sed "s/%M/$target_name/g"`
|
||||
gdb_xml_files=""
|
||||
|
||||
@ -4351,10 +4336,6 @@ fi
|
||||
if test "$target_user_only" = "yes" -a "$bflt" = "yes"; then
|
||||
echo "TARGET_HAS_BFLT=y" >> $config_target_mak
|
||||
fi
|
||||
if test "$target_user_only" = "yes" \
|
||||
-a "$nptl" = "yes" -a "$target_nptl" = "yes"; then
|
||||
echo "CONFIG_USE_NPTL=y" >> $config_target_mak
|
||||
fi
|
||||
if test "$target_user_only" = "yes" -a "$guest_base" = "yes"; then
|
||||
echo "CONFIG_USE_GUEST_BASE=y" >> $config_target_mak
|
||||
fi
|
||||
|
@ -32,7 +32,7 @@ void gdb_register_coprocessor(CPUArchState *env,
|
||||
|
||||
static inline int cpu_index(CPUState *cpu)
|
||||
{
|
||||
#if defined(CONFIG_USER_ONLY) && defined(CONFIG_USE_NPTL)
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
return cpu->host_tid;
|
||||
#else
|
||||
return cpu->cpu_index + 1;
|
||||
|
@ -92,7 +92,6 @@ int cpu_get_pic_interrupt(CPUX86State *env)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_USE_NPTL)
|
||||
/***********************************************************/
|
||||
/* Helper routines for implementing atomic operations. */
|
||||
|
||||
@ -207,43 +206,6 @@ void cpu_list_unlock(void)
|
||||
{
|
||||
pthread_mutex_unlock(&cpu_list_mutex);
|
||||
}
|
||||
#else /* if !CONFIG_USE_NPTL */
|
||||
/* These are no-ops because we are not threadsafe. */
|
||||
static inline void cpu_exec_start(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void cpu_exec_end(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void start_exclusive(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void end_exclusive(void)
|
||||
{
|
||||
}
|
||||
|
||||
void fork_start(void)
|
||||
{
|
||||
}
|
||||
|
||||
void fork_end(int child)
|
||||
{
|
||||
if (child) {
|
||||
gdbserver_fork((CPUArchState *)thread_cpu->env_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_list_lock(void)
|
||||
{
|
||||
}
|
||||
|
||||
void cpu_list_unlock(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef TARGET_I386
|
||||
@ -3156,12 +3118,7 @@ THREAD CPUState *thread_cpu;
|
||||
void task_settid(TaskState *ts)
|
||||
{
|
||||
if (ts->ts_tid == 0) {
|
||||
#ifdef CONFIG_USE_NPTL
|
||||
ts->ts_tid = (pid_t)syscall(SYS_gettid);
|
||||
#else
|
||||
/* when no threads are used, tid becomes pid */
|
||||
ts->ts_tid = getpid();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,6 @@
|
||||
|
||||
//#define DEBUG_MMAP
|
||||
|
||||
#if defined(CONFIG_USE_NPTL)
|
||||
static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
static __thread int mmap_lock_count;
|
||||
|
||||
@ -66,16 +65,6 @@ void mmap_fork_end(int child)
|
||||
else
|
||||
pthread_mutex_unlock(&mmap_mutex);
|
||||
}
|
||||
#else
|
||||
/* We aren't threadsafe to start with, so no need to worry about locking. */
|
||||
void mmap_lock(void)
|
||||
{
|
||||
}
|
||||
|
||||
void mmap_unlock(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/* NOTE: all the constants are the HOST ones, but addresses are target. */
|
||||
int target_mprotect(abi_ulong start, abi_ulong len, int prot)
|
||||
|
@ -19,11 +19,7 @@
|
||||
#include "exec/gdbstub.h"
|
||||
#include "qemu/queue.h"
|
||||
|
||||
#if defined(CONFIG_USE_NPTL)
|
||||
#define THREAD __thread
|
||||
#else
|
||||
#define THREAD
|
||||
#endif
|
||||
|
||||
/* This struct is used to hold certain information about the image.
|
||||
* Basically, it replicates in user space what would be certain
|
||||
@ -116,9 +112,7 @@ typedef struct TaskState {
|
||||
uint32_t v86flags;
|
||||
uint32_t v86mask;
|
||||
#endif
|
||||
#ifdef CONFIG_USE_NPTL
|
||||
abi_ulong child_tidptr;
|
||||
#endif
|
||||
#ifdef TARGET_M68K
|
||||
int sim_syscalls;
|
||||
abi_ulong tp_value;
|
||||
@ -268,10 +262,8 @@ void mmap_unlock(void);
|
||||
abi_ulong mmap_find_vma(abi_ulong, abi_ulong);
|
||||
void cpu_list_lock(void);
|
||||
void cpu_list_unlock(void);
|
||||
#if defined(CONFIG_USE_NPTL)
|
||||
void mmap_fork_start(void);
|
||||
void mmap_fork_end(int child);
|
||||
#endif
|
||||
|
||||
/* main.c */
|
||||
extern unsigned long guest_stack_size;
|
||||
@ -449,9 +441,7 @@ static inline void *lock_user_string(abi_ulong guest_addr)
|
||||
#define unlock_user_struct(host_ptr, guest_addr, copy) \
|
||||
unlock_user(host_ptr, guest_addr, (copy) ? sizeof(*host_ptr) : 0)
|
||||
|
||||
#if defined(CONFIG_USE_NPTL)
|
||||
#include <pthread.h>
|
||||
#endif
|
||||
|
||||
/* Include target-specific struct and function definitions;
|
||||
* they may need access to the target-independent structures
|
||||
|
@ -111,13 +111,8 @@ int __clone2(int (*fn)(void *), void *child_stack_base,
|
||||
|
||||
#include "qemu.h"
|
||||
|
||||
#if defined(CONFIG_USE_NPTL)
|
||||
#define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
|
||||
CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
|
||||
#else
|
||||
/* XXX: Hardcode the above values. */
|
||||
#define CLONE_NPTL_FLAGS2 0
|
||||
#endif
|
||||
|
||||
//#define DEBUG
|
||||
|
||||
@ -234,12 +229,10 @@ _syscall1(int,exit_group,int,error_code)
|
||||
#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
|
||||
_syscall1(int,set_tid_address,int *,tidptr)
|
||||
#endif
|
||||
#if defined(CONFIG_USE_NPTL)
|
||||
#if defined(TARGET_NR_futex) && defined(__NR_futex)
|
||||
_syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
|
||||
const struct timespec *,timeout,int *,uaddr2,int,val3)
|
||||
#endif
|
||||
#endif
|
||||
#define __NR_sys_sched_getaffinity __NR_sched_getaffinity
|
||||
_syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
|
||||
unsigned long *, user_mask_ptr);
|
||||
@ -4227,7 +4220,6 @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
|
||||
|
||||
#define NEW_STACK_SIZE 0x40000
|
||||
|
||||
#if defined(CONFIG_USE_NPTL)
|
||||
|
||||
static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
typedef struct {
|
||||
@ -4272,16 +4264,6 @@ static void *clone_func(void *arg)
|
||||
/* never exits */
|
||||
return NULL;
|
||||
}
|
||||
#else
|
||||
|
||||
static int clone_func(void *arg)
|
||||
{
|
||||
CPUArchState *env = arg;
|
||||
cpu_loop(env);
|
||||
/* never exits */
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* do_fork() Must return host values and target errnos (unlike most
|
||||
do_*() functions). */
|
||||
@ -4292,12 +4274,8 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
|
||||
int ret;
|
||||
TaskState *ts;
|
||||
CPUArchState *new_env;
|
||||
#if defined(CONFIG_USE_NPTL)
|
||||
unsigned int nptl_flags;
|
||||
sigset_t sigmask;
|
||||
#else
|
||||
uint8_t *new_stack;
|
||||
#endif
|
||||
|
||||
/* Emulate vfork() with fork() */
|
||||
if (flags & CLONE_VFORK)
|
||||
@ -4305,10 +4283,9 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
|
||||
|
||||
if (flags & CLONE_VM) {
|
||||
TaskState *parent_ts = (TaskState *)env->opaque;
|
||||
#if defined(CONFIG_USE_NPTL)
|
||||
new_thread_info info;
|
||||
pthread_attr_t attr;
|
||||
#endif
|
||||
|
||||
ts = g_malloc0(sizeof(TaskState));
|
||||
init_task_state(ts);
|
||||
/* we create a new CPU instance. */
|
||||
@ -4321,7 +4298,6 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
|
||||
new_env->opaque = ts;
|
||||
ts->bprm = parent_ts->bprm;
|
||||
ts->info = parent_ts->info;
|
||||
#if defined(CONFIG_USE_NPTL)
|
||||
nptl_flags = flags;
|
||||
flags &= ~CLONE_NPTL_FLAGS2;
|
||||
|
||||
@ -4371,17 +4347,6 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
|
||||
pthread_cond_destroy(&info.cond);
|
||||
pthread_mutex_destroy(&info.mutex);
|
||||
pthread_mutex_unlock(&clone_lock);
|
||||
#else
|
||||
if (flags & CLONE_NPTL_FLAGS2)
|
||||
return -EINVAL;
|
||||
/* This is probably going to die very quickly, but do it anyway. */
|
||||
new_stack = g_malloc0 (NEW_STACK_SIZE);
|
||||
#ifdef __ia64__
|
||||
ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
|
||||
#else
|
||||
ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
|
||||
#endif
|
||||
#endif
|
||||
} else {
|
||||
/* if no CLONE_VM, we consider it is a fork */
|
||||
if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
|
||||
@ -4392,7 +4357,6 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
|
||||
/* Child Process. */
|
||||
cpu_clone_regs(env, newsp);
|
||||
fork_end(1);
|
||||
#if defined(CONFIG_USE_NPTL)
|
||||
/* There is a race condition here. The parent process could
|
||||
theoretically read the TID in the child process before the child
|
||||
tid is set. This would require using either ptrace
|
||||
@ -4408,7 +4372,6 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
|
||||
cpu_set_tls (env, newtls);
|
||||
if (flags & CLONE_CHILD_CLEARTID)
|
||||
ts->child_tidptr = child_tidptr;
|
||||
#endif
|
||||
} else {
|
||||
fork_end(0);
|
||||
}
|
||||
@ -4834,7 +4797,6 @@ static inline abi_long host_to_target_stat64(void *cpu_env,
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_USE_NPTL)
|
||||
/* ??? Using host futex calls even when target atomic operations
|
||||
are not really atomic probably breaks things. However implementing
|
||||
futexes locally would make futexes shared between multiple processes
|
||||
@ -4886,7 +4848,6 @@ static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
|
||||
return -TARGET_ENOSYS;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Map host to target signal numbers for the wait family of syscalls.
|
||||
Assume all other status bits are the same. */
|
||||
@ -5132,9 +5093,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
|
||||
abi_long arg5, abi_long arg6, abi_long arg7,
|
||||
abi_long arg8)
|
||||
{
|
||||
#ifdef CONFIG_USE_NPTL
|
||||
CPUState *cpu = ENV_GET_CPU(cpu_env);
|
||||
#endif
|
||||
abi_long ret;
|
||||
struct stat st;
|
||||
struct statfs stfs;
|
||||
@ -5148,7 +5107,6 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
|
||||
|
||||
switch(num) {
|
||||
case TARGET_NR_exit:
|
||||
#ifdef CONFIG_USE_NPTL
|
||||
/* In old applications this may be used to implement _exit(2).
|
||||
However in threaded applictions it is used for thread termination,
|
||||
and _exit_group is used for application termination.
|
||||
@ -5186,7 +5144,6 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
|
||||
g_free(ts);
|
||||
pthread_exit(NULL);
|
||||
}
|
||||
#endif
|
||||
#ifdef TARGET_GPROF
|
||||
_mcleanup();
|
||||
#endif
|
||||
@ -8687,11 +8644,9 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
#if defined(CONFIG_USE_NPTL)
|
||||
case TARGET_NR_futex:
|
||||
ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
|
||||
break;
|
||||
#endif
|
||||
#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
|
||||
case TARGET_NR_inotify_init:
|
||||
ret = get_errno(sys_inotify_init());
|
||||
|
Loading…
Reference in New Issue
Block a user