2005-02-07 15:35:39 +03:00
|
|
|
#ifndef QEMU_H
|
|
|
|
#define QEMU_H
|
2003-02-19 01:55:36 +03:00
|
|
|
|
2016-05-12 20:47:46 +03:00
|
|
|
#include "hostdep.h"
|
2003-10-01 01:04:53 +04:00
|
|
|
#include "cpu.h"
|
2016-03-15 15:18:37 +03:00
|
|
|
#include "exec/exec-all.h"
|
2014-03-28 22:42:10 +04:00
|
|
|
#include "exec/cpu_ldst.h"
|
2007-10-14 20:27:31 +04:00
|
|
|
|
2007-12-24 16:47:52 +03:00
|
|
|
#undef DEBUG_REMAP
|
|
|
|
|
2012-12-17 21:19:49 +04:00
|
|
|
#include "exec/user/abitypes.h"
|
2007-10-14 20:27:31 +04:00
|
|
|
|
2012-12-17 21:19:49 +04:00
|
|
|
#include "exec/user/thunk.h"
|
2007-10-14 20:27:31 +04:00
|
|
|
#include "syscall_defs.h"
|
2016-02-01 21:38:42 +03:00
|
|
|
#include "target_syscall.h"
|
2012-12-17 21:19:49 +04:00
|
|
|
#include "exec/gdbstub.h"
|
2003-03-23 04:06:05 +03:00
|
|
|
|
2021-09-08 18:43:57 +03:00
|
|
|
/*
|
|
|
|
* This is the size of the host kernel's sigset_t, needed where we make
|
2016-06-14 14:49:18 +03:00
|
|
|
* direct system calls that take a sigset_t pointer and a size.
|
|
|
|
*/
|
|
|
|
#define SIGSET_T_SIZE (_NSIG / 8)
|
|
|
|
|
2021-09-08 18:43:57 +03:00
|
|
|
/*
|
|
|
|
* This struct is used to hold certain information about the image.
|
2003-02-19 01:55:36 +03:00
|
|
|
* Basically, it replicates in user space what would be certain
|
|
|
|
* task_struct fields in the kernel
|
|
|
|
*/
|
|
|
|
struct image_info {
|
2010-07-27 21:25:30 +04:00
|
|
|
abi_ulong load_bias;
|
2007-10-14 20:27:31 +04:00
|
|
|
abi_ulong load_addr;
|
|
|
|
abi_ulong start_code;
|
|
|
|
abi_ulong end_code;
|
|
|
|
abi_ulong start_data;
|
|
|
|
abi_ulong end_data;
|
|
|
|
abi_ulong start_brk;
|
|
|
|
abi_ulong brk;
|
2020-01-18 02:02:45 +03:00
|
|
|
abi_ulong reserve_brk;
|
2007-10-14 20:27:31 +04:00
|
|
|
abi_ulong start_mmap;
|
|
|
|
abi_ulong start_stack;
|
2010-06-16 16:03:51 +04:00
|
|
|
abi_ulong stack_limit;
|
2007-10-14 20:27:31 +04:00
|
|
|
abi_ulong entry;
|
|
|
|
abi_ulong code_offset;
|
|
|
|
abi_ulong data_offset;
|
2009-04-07 10:57:11 +04:00
|
|
|
abi_ulong saved_auxv;
|
2012-01-28 23:12:14 +04:00
|
|
|
abi_ulong auxv_len;
|
2009-04-07 10:57:11 +04:00
|
|
|
abi_ulong arg_start;
|
|
|
|
abi_ulong arg_end;
|
2016-12-15 20:38:11 +03:00
|
|
|
abi_ulong arg_strings;
|
|
|
|
abi_ulong env_strings;
|
|
|
|
abi_ulong file_string;
|
2012-03-30 21:02:50 +04:00
|
|
|
uint32_t elf_flags;
|
2021-09-08 18:43:57 +03:00
|
|
|
int personality;
|
linux-user: fix ELF load alignment error
When we try to use some targets on ppc64, it can happen the target
doesn't support the host page size to align ELF load sections and
fails with:
ELF load command alignment not page-aligned
Since commit a70daba3771 ("linux-user: Tell guest about big host
page sizes") the host page size is used to align ELF sections, but
this doesn't work if the alignment required by the load section is
smaller than the host one. For these cases, we continue to use the
TARGET_PAGE_SIZE instead of the host one.
I have tested this change on ppc64, and it fixes qemu linux-user for:
s390x, m68k, i386, arm, aarch64, hppa
and I have tested it doesn't break the following targets:
x86_64, mips64el, sh4
mips and mipsel abort, but I think for another reason.
Signed-off-by: Laurent Vivier <laurent@vivier.eu>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
[lv: fixed "info->alignment = 0"]
Message-Id: <20180716195349.29959-1-laurent@vivier.eu>
2018-07-16 22:53:49 +03:00
|
|
|
abi_ulong alignment;
|
2018-04-30 11:03:41 +03:00
|
|
|
|
|
|
|
/* The fields below are used in FDPIC mode. */
|
2011-02-07 09:05:50 +03:00
|
|
|
abi_ulong loadmap_addr;
|
|
|
|
uint16_t nsegs;
|
2021-09-08 18:43:57 +03:00
|
|
|
void *loadsegs;
|
2011-02-07 09:05:50 +03:00
|
|
|
abi_ulong pt_dynamic_addr;
|
2018-04-30 11:03:43 +03:00
|
|
|
abi_ulong interpreter_loadmap_addr;
|
|
|
|
abi_ulong interpreter_pt_dynamic_addr;
|
2011-02-07 09:05:50 +03:00
|
|
|
struct image_info *other_info;
|
2020-10-21 20:37:47 +03:00
|
|
|
|
|
|
|
/* For target-specific processing of NT_GNU_PROPERTY_TYPE_0. */
|
|
|
|
uint32_t note_flags;
|
|
|
|
|
2018-10-22 17:43:40 +03:00
|
|
|
#ifdef TARGET_MIPS
|
|
|
|
int fp_abi;
|
|
|
|
int interp_fp_abi;
|
|
|
|
#endif
|
2003-02-19 01:55:36 +03:00
|
|
|
};
|
|
|
|
|
2003-06-16 00:05:50 +04:00
|
|
|
#ifdef TARGET_I386
|
2003-03-29 19:53:14 +03:00
|
|
|
/* Information about the current linux thread */
|
|
|
|
struct vm86_saved_state {
|
|
|
|
uint32_t eax; /* return code */
|
|
|
|
uint32_t ebx;
|
|
|
|
uint32_t ecx;
|
|
|
|
uint32_t edx;
|
|
|
|
uint32_t esi;
|
|
|
|
uint32_t edi;
|
|
|
|
uint32_t ebp;
|
|
|
|
uint32_t esp;
|
|
|
|
uint32_t eflags;
|
|
|
|
uint32_t eip;
|
|
|
|
uint16_t cs, ss, ds, es, fs, gs;
|
|
|
|
};
|
2003-06-16 00:05:50 +04:00
|
|
|
#endif
|
2003-03-29 19:53:14 +03:00
|
|
|
|
2013-09-03 23:12:17 +04:00
|
|
|
#if defined(TARGET_ARM) && defined(TARGET_ABI32)
|
2004-02-17 00:47:43 +03:00
|
|
|
/* FPU emulator */
|
|
|
|
#include "nwfpe/fpa11.h"
|
|
|
|
#endif
|
|
|
|
|
2008-05-31 20:11:38 +04:00
|
|
|
#define MAX_SIGQUEUE_SIZE 1024
|
|
|
|
|
|
|
|
struct emulated_sigtable {
|
|
|
|
int pending; /* true if signal is pending */
|
2016-05-27 17:51:52 +03:00
|
|
|
target_siginfo_t info;
|
2008-05-31 20:11:38 +04:00
|
|
|
};
|
|
|
|
|
2021-09-08 18:43:57 +03:00
|
|
|
/*
|
|
|
|
* NOTE: we force a big alignment so that the stack stored after is
|
|
|
|
* aligned too
|
|
|
|
*/
|
2003-03-29 19:53:14 +03:00
|
|
|
typedef struct TaskState {
|
2009-04-07 10:57:11 +04:00
|
|
|
pid_t ts_tid; /* tid (or pid) of this task */
|
2004-02-17 00:47:43 +03:00
|
|
|
#ifdef TARGET_ARM
|
2013-09-03 23:12:17 +04:00
|
|
|
# ifdef TARGET_ABI32
|
2004-02-17 00:47:43 +03:00
|
|
|
/* FPA state */
|
|
|
|
FPA11 fpa;
|
2013-09-03 23:12:17 +04:00
|
|
|
# endif
|
2021-01-09 01:42:52 +03:00
|
|
|
#endif
|
|
|
|
#if defined(TARGET_ARM) || defined(TARGET_RISCV)
|
2005-04-23 22:25:41 +04:00
|
|
|
int swi_errno;
|
2004-02-17 00:47:43 +03:00
|
|
|
#endif
|
2007-04-06 12:56:50 +04:00
|
|
|
#if defined(TARGET_I386) && !defined(TARGET_X86_64)
|
2007-10-14 20:27:31 +04:00
|
|
|
abi_ulong target_v86;
|
2003-03-29 19:53:14 +03:00
|
|
|
struct vm86_saved_state vm86_saved_regs;
|
2003-05-15 01:48:51 +04:00
|
|
|
struct target_vm86plus_struct vm86plus;
|
2003-05-10 17:14:52 +04:00
|
|
|
uint32_t v86flags;
|
|
|
|
uint32_t v86mask;
|
2006-10-22 04:18:54 +04:00
|
|
|
#endif
|
2009-03-07 18:24:59 +03:00
|
|
|
abi_ulong child_tidptr;
|
2006-10-22 04:18:54 +04:00
|
|
|
#ifdef TARGET_M68K
|
2013-07-16 21:44:55 +04:00
|
|
|
abi_ulong tp_value;
|
2007-05-26 19:09:38 +04:00
|
|
|
#endif
|
2021-01-09 01:42:52 +03:00
|
|
|
#if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_RISCV)
|
2007-05-26 19:09:38 +04:00
|
|
|
/* Extra fields for semihosted binaries. */
|
2016-07-04 15:06:35 +03:00
|
|
|
abi_ulong heap_base;
|
|
|
|
abi_ulong heap_limit;
|
2003-06-16 00:05:50 +04:00
|
|
|
#endif
|
2016-07-04 15:06:35 +03:00
|
|
|
abi_ulong stack_base;
|
2003-03-29 19:53:14 +03:00
|
|
|
int used; /* non zero if used */
|
2006-06-17 22:30:42 +04:00
|
|
|
struct image_info *info;
|
2009-04-07 10:57:11 +04:00
|
|
|
struct linux_binprm *bprm;
|
2008-05-31 20:11:38 +04:00
|
|
|
|
2016-05-27 17:51:53 +03:00
|
|
|
struct emulated_sigtable sync_signal;
|
2008-05-31 20:11:38 +04:00
|
|
|
struct emulated_sigtable sigtab[TARGET_NSIG];
|
2021-09-08 18:43:57 +03:00
|
|
|
/*
|
|
|
|
* This thread's signal mask, as requested by the guest program.
|
2016-05-27 17:51:49 +03:00
|
|
|
* The actual signal mask of this thread may differ:
|
|
|
|
* + we don't let SIGSEGV and SIGBUS be blocked while running guest code
|
|
|
|
* + sometimes we block all signals to avoid races
|
|
|
|
*/
|
|
|
|
sigset_t signal_mask;
|
2021-09-08 18:43:57 +03:00
|
|
|
/*
|
|
|
|
* The signal mask imposed by a guest sigsuspend syscall, if we are
|
2016-05-27 17:51:49 +03:00
|
|
|
* currently in the middle of such a syscall
|
|
|
|
*/
|
|
|
|
sigset_t sigsuspend_mask;
|
|
|
|
/* Nonzero if we're leaving a sigsuspend and sigsuspend_mask is valid. */
|
|
|
|
int in_sigsuspend;
|
|
|
|
|
2021-09-08 18:43:57 +03:00
|
|
|
/*
|
|
|
|
* Nonzero if process_pending_signals() needs to do something (either
|
2016-05-27 17:51:49 +03:00
|
|
|
* handle a pending signal or unblock signals).
|
|
|
|
* This flag is written from a signal handler so should be accessed via
|
2020-09-23 13:56:46 +03:00
|
|
|
* the qatomic_read() and qatomic_set() functions. (It is not accessed
|
2016-05-27 17:51:49 +03:00
|
|
|
* from multiple threads.)
|
|
|
|
*/
|
|
|
|
int signal_pending;
|
|
|
|
|
2019-07-25 16:16:45 +03:00
|
|
|
/* This thread's sigaltstack, if it has one */
|
|
|
|
struct target_sigaltstack sigaltstack_used;
|
2003-03-29 19:53:14 +03:00
|
|
|
} __attribute__((aligned(16))) TaskState;
|
|
|
|
|
2009-01-30 23:09:01 +03:00
|
|
|
extern char *exec_path;
|
2008-05-31 20:11:38 +04:00
|
|
|
void init_task_state(TaskState *ts);
|
2009-04-07 10:57:11 +04:00
|
|
|
void task_settid(TaskState *);
|
|
|
|
void stop_all_tasks(void);
|
2006-05-14 15:30:38 +04:00
|
|
|
extern const char *qemu_uname_release;
|
2009-07-17 15:48:08 +04:00
|
|
|
extern unsigned long mmap_min_addr;
|
2003-03-29 19:53:14 +03:00
|
|
|
|
2006-06-11 17:32:59 +04:00
|
|
|
/* ??? See if we can avoid exposing so much of the loader internals. */
|
|
|
|
|
2021-09-08 18:43:57 +03:00
|
|
|
/*
|
|
|
|
* Read a good amount of data initially, to hopefully get all the
|
|
|
|
* program headers loaded.
|
|
|
|
*/
|
2010-07-27 21:25:30 +04:00
|
|
|
#define BPRM_BUF_SIZE 1024
|
|
|
|
|
2006-06-11 17:32:59 +04:00
|
|
|
/*
|
2007-09-17 01:08:06 +04:00
|
|
|
* This structure is used to hold the arguments that are
|
2006-06-11 17:32:59 +04:00
|
|
|
* used when loading binaries.
|
|
|
|
*/
|
|
|
|
struct linux_binprm {
|
2010-07-27 21:25:30 +04:00
|
|
|
char buf[BPRM_BUF_SIZE] __attribute__((aligned));
|
2007-10-14 20:27:31 +04:00
|
|
|
abi_ulong p;
|
2018-12-14 01:37:37 +03:00
|
|
|
int fd;
|
2006-06-11 17:32:59 +04:00
|
|
|
int e_uid, e_gid;
|
|
|
|
int argc, envc;
|
|
|
|
char **argv;
|
|
|
|
char **envp;
|
2021-09-08 18:43:57 +03:00
|
|
|
char *filename; /* Name of binary */
|
2012-03-14 04:38:32 +04:00
|
|
|
int (*core_dump)(int, const CPUArchState *); /* coredump routine */
|
2006-06-11 17:32:59 +04:00
|
|
|
};
|
|
|
|
|
linux-user: Add strace support for printing arguments of ioctl()
This patch implements functionality for strace argument printing for ioctls.
When running ioctls through qemu with "-strace", they get printed in format:
"ioctl(fd_num,0x*,0x*) = ret_value"
where the request code an the ioctl's third argument get printed in a hexadicemal
format. This patch changes that by enabling strace to print both the request code
name and the contents of the third argument. For example, when running ioctl
RTC_SET_TIME with "-strace", with changes from this patch, it gets printed in
this way:
"ioctl(3,RTC_SET_TIME,{12,13,15,20,10,119,0,0,0}) = 0"
In case of IOC_R type ioctls, the contents of the third argument get printed
after the return value, and the argument inside the ioctl call gets printed
as pointer in hexadecimal format. For example, when running RTC_RD_TIME with
"-strace", with changes from this patch, it gets printed in this way:
"ioctl(3,RTC_RD_TIME,0x40800374) = 0 ({22,9,13,11,5,120,0,0,0})"
In case of IOC_RW type ioctls, the contents of the third argument get printed
both inside the ioctl call and after the return value.
Implementation notes:
Functions "print_ioctl()" and "print_syscall_ret_ioctl()", that are defined
in "strace.c", are listed in file "strace.list" as "call" and "result"
value for ioctl. Structure definition "IOCTLEntry" as well as predefined
values for IOC_R, IOC_W and IOC_RW were cut and pasted from file "syscall.c"
to file "qemu.h" so that they can be used by these functions to print the
contents of the third ioctl argument. Also, the "static" identifier for array
"ioctl_entries[]" was removed and this array was declared as "extern" in "qemu.h"
so that it can also be used by these functions. To decode the structure type
of the ioctl third argument, function "thunk_print()" was defined in file
"thunk.c" and its definition is somewhat simillar to that of function
"thunk_convert()".
Signed-off-by: Filip Bozuta <Filip.Bozuta@syrmia.com>
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
Message-Id: <20200619124727.18080-3-filip.bozuta@syrmia.com>
[lv: fix close-bracket]
Signed-off-by: Laurent Vivier <laurent@vivier.eu>
2020-06-19 15:47:27 +03:00
|
|
|
typedef struct IOCTLEntry IOCTLEntry;
|
|
|
|
|
|
|
|
typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
|
|
|
|
int fd, int cmd, abi_long arg);
|
|
|
|
|
|
|
|
struct IOCTLEntry {
|
|
|
|
int target_cmd;
|
|
|
|
unsigned int host_cmd;
|
|
|
|
const char *name;
|
|
|
|
int access;
|
|
|
|
do_ioctl_fn *do_ioctl;
|
|
|
|
const argtype arg_type[5];
|
|
|
|
};
|
|
|
|
|
|
|
|
extern IOCTLEntry ioctl_entries[];
|
|
|
|
|
|
|
|
#define IOC_R 0x0001
|
|
|
|
#define IOC_W 0x0002
|
|
|
|
#define IOC_RW (IOC_R | IOC_W)
|
|
|
|
|
2006-06-11 17:32:59 +04:00
|
|
|
void do_init_thread(struct target_pt_regs *regs, struct image_info *infop);
|
2007-10-14 20:27:31 +04:00
|
|
|
abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp,
|
|
|
|
abi_ulong stringp, int push_ptr);
|
2013-08-30 03:46:44 +04:00
|
|
|
int loader_exec(int fdexec, const char *filename, char **argv, char **envp,
|
2021-09-08 18:43:57 +03:00
|
|
|
struct target_pt_regs *regs, struct image_info *infop,
|
2009-04-07 10:57:11 +04:00
|
|
|
struct linux_binprm *);
|
2003-02-19 01:55:36 +03:00
|
|
|
|
2021-09-08 18:43:57 +03:00
|
|
|
/*
|
|
|
|
* Returns true if the image uses the FDPIC ABI. If this is the case,
|
2018-04-30 11:03:43 +03:00
|
|
|
* we have to provide some information (loadmap, pt_dynamic_info) such
|
|
|
|
* that the program can be relocated adequately. This is also useful
|
|
|
|
* when handling signals.
|
|
|
|
*/
|
|
|
|
int info_is_fdpic(struct image_info *info);
|
|
|
|
|
2018-02-20 20:33:05 +03:00
|
|
|
uint32_t get_elf_eflags(int fd);
|
2014-01-09 13:10:50 +04:00
|
|
|
int load_elf_binary(struct linux_binprm *bprm, struct image_info *info);
|
|
|
|
int load_flt_binary(struct linux_binprm *bprm, struct image_info *info);
|
2006-06-11 17:32:59 +04:00
|
|
|
|
2007-11-11 17:26:47 +03:00
|
|
|
abi_long memcpy_to_target(abi_ulong dest, const void *src,
|
|
|
|
unsigned long len);
|
2007-10-14 20:27:31 +04:00
|
|
|
void target_set_brk(abi_ulong new_brk);
|
|
|
|
abi_long do_brk(abi_ulong new_brk);
|
2003-02-19 01:55:36 +03:00
|
|
|
void syscall_init(void);
|
2007-10-14 20:27:31 +04:00
|
|
|
abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
|
|
|
|
abi_long arg2, abi_long arg3, abi_long arg4,
|
2011-06-16 20:37:13 +04:00
|
|
|
abi_long arg5, abi_long arg6, abi_long arg7,
|
|
|
|
abi_long arg8);
|
2018-02-13 16:22:46 +03:00
|
|
|
extern __thread CPUState *thread_cpu;
|
2012-03-14 04:38:32 +04:00
|
|
|
void cpu_loop(CPUArchState *env);
|
2016-06-06 21:58:18 +03:00
|
|
|
const char *target_strerror(int err);
|
2008-05-06 19:36:17 +04:00
|
|
|
int get_osversion(void);
|
2013-09-03 23:12:20 +04:00
|
|
|
void init_qemu_uname_release(void);
|
2008-06-08 00:50:51 +04:00
|
|
|
void fork_start(void);
|
|
|
|
void fork_end(int child);
|
2003-04-30 00:39:23 +04:00
|
|
|
|
2020-05-13 20:51:28 +03:00
|
|
|
/**
|
|
|
|
* probe_guest_base:
|
|
|
|
* @image_name: the executable being loaded
|
|
|
|
* @loaddr: the lowest fixed address in the executable
|
|
|
|
* @hiaddr: the highest fixed address in the executable
|
|
|
|
*
|
|
|
|
* Creates the initial guest address space in the host memory space.
|
|
|
|
*
|
|
|
|
* If @loaddr == 0, then no address in the executable is fixed,
|
|
|
|
* i.e. it is fully relocatable. In that case @hiaddr is the size
|
|
|
|
* of the executable.
|
|
|
|
*
|
|
|
|
* This function will not return if a valid value for guest_base
|
|
|
|
* cannot be chosen. On return, the executable loader can expect
|
|
|
|
*
|
|
|
|
* target_mmap(loaddr, hiaddr - loaddr, ...)
|
|
|
|
*
|
|
|
|
* to succeed.
|
2012-07-26 20:50:01 +04:00
|
|
|
*/
|
2020-05-13 20:51:28 +03:00
|
|
|
void probe_guest_base(const char *image_name,
|
|
|
|
abi_ulong loaddr, abi_ulong hiaddr);
|
2012-07-26 20:50:01 +04:00
|
|
|
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/log.h"
|
2003-05-10 17:14:52 +04:00
|
|
|
|
2016-05-12 20:47:46 +03:00
|
|
|
/* safe_syscall.S */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* safe_syscall:
|
|
|
|
* @int number: number of system call to make
|
|
|
|
* ...: arguments to the system call
|
|
|
|
*
|
|
|
|
* Call a system call if guest signal not pending.
|
|
|
|
* This has the same API as the libc syscall() function, except that it
|
|
|
|
* may return -1 with errno == TARGET_ERESTARTSYS if a signal was pending.
|
|
|
|
*
|
|
|
|
* Returns: the system call result, or -1 with an error code in errno
|
|
|
|
* (Errnos are host errnos; we rely on TARGET_ERESTARTSYS not clashing
|
|
|
|
* with any of the host errno values.)
|
|
|
|
*/
|
|
|
|
|
2021-09-08 18:43:57 +03:00
|
|
|
/*
|
|
|
|
* A guide to using safe_syscall() to handle interactions between guest
|
2016-05-12 20:47:46 +03:00
|
|
|
* syscalls and guest signals:
|
|
|
|
*
|
|
|
|
* Guest syscalls come in two flavours:
|
|
|
|
*
|
|
|
|
* (1) Non-interruptible syscalls
|
|
|
|
*
|
|
|
|
* These are guest syscalls that never get interrupted by signals and
|
|
|
|
* so never return EINTR. They can be implemented straightforwardly in
|
|
|
|
* QEMU: just make sure that if the implementation code has to make any
|
|
|
|
* blocking calls that those calls are retried if they return EINTR.
|
|
|
|
* It's also OK to implement these with safe_syscall, though it will be
|
|
|
|
* a little less efficient if a signal is delivered at the 'wrong' moment.
|
|
|
|
*
|
2016-05-27 17:51:49 +03:00
|
|
|
* Some non-interruptible syscalls need to be handled using block_signals()
|
|
|
|
* to block signals for the duration of the syscall. This mainly applies
|
|
|
|
* to code which needs to modify the data structures used by the
|
|
|
|
* host_signal_handler() function and the functions it calls, including
|
|
|
|
* all syscalls which change the thread's signal mask.
|
|
|
|
*
|
2016-05-12 20:47:46 +03:00
|
|
|
* (2) Interruptible syscalls
|
|
|
|
*
|
|
|
|
* These are guest syscalls that can be interrupted by signals and
|
|
|
|
* for which we need to either return EINTR or arrange for the guest
|
|
|
|
* syscall to be restarted. This category includes both syscalls which
|
|
|
|
* always restart (and in the kernel return -ERESTARTNOINTR), ones
|
|
|
|
* which only restart if there is no handler (kernel returns -ERESTARTNOHAND
|
|
|
|
* or -ERESTART_RESTARTBLOCK), and the most common kind which restart
|
|
|
|
* if the handler was registered with SA_RESTART (kernel returns
|
|
|
|
* -ERESTARTSYS). System calls which are only interruptible in some
|
|
|
|
* situations (like 'open') also need to be handled this way.
|
|
|
|
*
|
|
|
|
* Here it is important that the host syscall is made
|
|
|
|
* via this safe_syscall() function, and *not* via the host libc.
|
|
|
|
* If the host libc is used then the implementation will appear to work
|
|
|
|
* most of the time, but there will be a race condition where a
|
|
|
|
* signal could arrive just before we make the host syscall inside libc,
|
|
|
|
* and then then guest syscall will not correctly be interrupted.
|
|
|
|
* Instead the implementation of the guest syscall can use the safe_syscall
|
|
|
|
* function but otherwise just return the result or errno in the usual
|
|
|
|
* way; the main loop code will take care of restarting the syscall
|
|
|
|
* if appropriate.
|
|
|
|
*
|
|
|
|
* (If the implementation needs to make multiple host syscalls this is
|
|
|
|
* OK; any which might really block must be via safe_syscall(); for those
|
|
|
|
* which are only technically blocking (ie which we know in practice won't
|
|
|
|
* stay in the host kernel indefinitely) it's OK to use libc if necessary.
|
|
|
|
* You must be able to cope with backing out correctly if some safe_syscall
|
|
|
|
* you make in the implementation returns either -TARGET_ERESTARTSYS or
|
|
|
|
* EINTR though.)
|
|
|
|
*
|
2016-05-27 17:51:49 +03:00
|
|
|
* block_signals() cannot be used for interruptible syscalls.
|
|
|
|
*
|
2016-05-12 20:47:46 +03:00
|
|
|
*
|
|
|
|
* How and why the safe_syscall implementation works:
|
|
|
|
*
|
|
|
|
* The basic setup is that we make the host syscall via a known
|
|
|
|
* section of host native assembly. If a signal occurs, our signal
|
|
|
|
* handler checks the interrupted host PC against the addresse of that
|
|
|
|
* known section. If the PC is before or at the address of the syscall
|
|
|
|
* instruction then we change the PC to point at a "return
|
|
|
|
* -TARGET_ERESTARTSYS" code path instead, and then exit the signal handler
|
|
|
|
* (causing the safe_syscall() call to immediately return that value).
|
|
|
|
* Then in the main.c loop if we see this magic return value we adjust
|
|
|
|
* the guest PC to wind it back to before the system call, and invoke
|
|
|
|
* the guest signal handler as usual.
|
|
|
|
*
|
|
|
|
* This winding-back will happen in two cases:
|
|
|
|
* (1) signal came in just before we took the host syscall (a race);
|
|
|
|
* in this case we'll take the guest signal and have another go
|
|
|
|
* at the syscall afterwards, and this is indistinguishable for the
|
|
|
|
* guest from the timing having been different such that the guest
|
|
|
|
* signal really did win the race
|
|
|
|
* (2) signal came in while the host syscall was blocking, and the
|
|
|
|
* host kernel decided the syscall should be restarted;
|
|
|
|
* in this case we want to restart the guest syscall also, and so
|
|
|
|
* rewinding is the right thing. (Note that "restart" semantics mean
|
|
|
|
* "first call the signal handler, then reattempt the syscall".)
|
|
|
|
* The other situation to consider is when a signal came in while the
|
|
|
|
* host syscall was blocking, and the host kernel decided that the syscall
|
|
|
|
* should not be restarted; in this case QEMU's host signal handler will
|
|
|
|
* be invoked with the PC pointing just after the syscall instruction,
|
|
|
|
* with registers indicating an EINTR return; the special code in the
|
|
|
|
* handler will not kick in, and we will return EINTR to the guest as
|
|
|
|
* we should.
|
|
|
|
*
|
|
|
|
* Notice that we can leave the host kernel to make the decision for
|
|
|
|
* us about whether to do a restart of the syscall or not; we do not
|
|
|
|
* need to check SA_RESTART flags in QEMU or distinguish the various
|
|
|
|
* kinds of restartability.
|
|
|
|
*/
|
|
|
|
#ifdef HAVE_SAFE_SYSCALL
|
|
|
|
/* The core part of this function is implemented in assembly */
|
|
|
|
extern long safe_syscall_base(int *pending, long number, ...);
|
|
|
|
|
|
|
|
#define safe_syscall(...) \
|
|
|
|
({ \
|
|
|
|
long ret_; \
|
|
|
|
int *psp_ = &((TaskState *)thread_cpu->opaque)->signal_pending; \
|
|
|
|
ret_ = safe_syscall_base(psp_, __VA_ARGS__); \
|
|
|
|
if (is_error(ret_)) { \
|
|
|
|
errno = -ret_; \
|
|
|
|
ret_ = -1; \
|
|
|
|
} \
|
|
|
|
ret_; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2021-09-08 18:43:57 +03:00
|
|
|
/*
|
|
|
|
* Fallback for architectures which don't yet provide a safe-syscall assembly
|
2016-05-12 20:47:46 +03:00
|
|
|
* fragment; note that this is racy!
|
|
|
|
* This should go away when all host architectures have been updated.
|
|
|
|
*/
|
|
|
|
#define safe_syscall syscall
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2012-09-15 22:34:20 +04:00
|
|
|
/* syscall.c */
|
|
|
|
int host_to_target_waitstatus(int status);
|
|
|
|
|
2003-06-16 00:05:50 +04:00
|
|
|
/* signal.c */
|
2012-03-14 04:38:32 +04:00
|
|
|
void process_pending_signals(CPUArchState *cpu_env);
|
2003-06-16 00:05:50 +04:00
|
|
|
void signal_init(void);
|
2016-07-28 18:44:46 +03:00
|
|
|
int queue_signal(CPUArchState *env, int sig, int si_type,
|
|
|
|
target_siginfo_t *info);
|
2009-10-02 01:12:16 +04:00
|
|
|
void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info);
|
|
|
|
void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo);
|
2008-05-30 22:05:19 +04:00
|
|
|
int target_to_host_signal(int sig);
|
2009-04-16 19:17:02 +04:00
|
|
|
int host_to_target_signal(int sig);
|
2012-03-14 04:38:32 +04:00
|
|
|
long do_sigreturn(CPUArchState *env);
|
|
|
|
long do_rt_sigreturn(CPUArchState *env);
|
2021-04-26 05:53:12 +03:00
|
|
|
abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
|
|
|
|
CPUArchState *env);
|
2014-03-14 18:36:55 +04:00
|
|
|
int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset);
|
2018-07-18 23:06:48 +03:00
|
|
|
abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx,
|
|
|
|
abi_ulong unew_ctx, abi_long ctx_size);
|
2016-05-27 17:51:49 +03:00
|
|
|
/**
|
|
|
|
* block_signals: block all signals while handling this guest syscall
|
|
|
|
*
|
|
|
|
* Block all signals, and arrange that the signal mask is returned to
|
|
|
|
* its correct value for the guest before we resume execution of guest code.
|
|
|
|
* If this function returns non-zero, then the caller should immediately
|
|
|
|
* return -TARGET_ERESTARTSYS to the main loop, which will take the pending
|
|
|
|
* signal and restart execution of the syscall.
|
|
|
|
* If block_signals() returns zero, then the caller can continue with
|
|
|
|
* emulation of the system call knowing that no signals can be taken
|
|
|
|
* (and therefore that no race conditions will result).
|
|
|
|
* This should only be called once, because if it is called a second time
|
|
|
|
* it will always return non-zero. (Think of it like a mutex that can't
|
|
|
|
* be recursively locked.)
|
|
|
|
* Signals will be unblocked again by process_pending_signals().
|
|
|
|
*
|
|
|
|
* Return value: non-zero if there was a pending signal, zero if not.
|
|
|
|
*/
|
|
|
|
int block_signals(void); /* Returns non zero if signal pending */
|
2003-06-16 00:05:50 +04:00
|
|
|
|
|
|
|
#ifdef TARGET_I386
|
2003-05-10 17:14:52 +04:00
|
|
|
/* vm86.c */
|
|
|
|
void save_v86_state(CPUX86State *env);
|
2003-05-10 19:10:36 +04:00
|
|
|
void handle_vm86_trap(CPUX86State *env, int trapno);
|
2003-05-10 17:14:52 +04:00
|
|
|
void handle_vm86_fault(CPUX86State *env);
|
2007-10-14 20:27:31 +04:00
|
|
|
int do_vm86(CPUX86State *env, long subfunction, abi_ulong v86_addr);
|
2007-10-05 21:01:51 +04:00
|
|
|
#elif defined(TARGET_SPARC64)
|
|
|
|
void sparc64_set_context(CPUSPARCState *env);
|
|
|
|
void sparc64_get_context(CPUSPARCState *env);
|
2003-06-16 00:05:50 +04:00
|
|
|
#endif
|
2003-05-10 17:14:52 +04:00
|
|
|
|
2003-05-13 04:25:15 +04:00
|
|
|
/* mmap.c */
|
2007-10-14 20:27:31 +04:00
|
|
|
int target_mprotect(abi_ulong start, abi_ulong len, int prot);
|
|
|
|
abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
|
|
|
|
int flags, int fd, abi_ulong offset);
|
|
|
|
int target_munmap(abi_ulong start, abi_ulong len);
|
|
|
|
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
|
|
|
|
abi_ulong new_size, unsigned long flags,
|
|
|
|
abi_ulong new_addr);
|
2008-05-31 20:33:53 +04:00
|
|
|
extern unsigned long last_brk;
|
2012-03-08 18:40:33 +04:00
|
|
|
extern abi_ulong mmap_next_start;
|
2019-05-19 23:19:52 +03:00
|
|
|
abi_ulong mmap_find_vma(abi_ulong, abi_ulong, abi_ulong);
|
2008-06-08 00:50:51 +04:00
|
|
|
void mmap_fork_start(void);
|
|
|
|
void mmap_fork_end(int child);
|
2003-05-13 04:25:15 +04:00
|
|
|
|
2008-10-05 15:05:14 +04:00
|
|
|
/* main.c */
|
2010-03-20 00:21:13 +03:00
|
|
|
extern unsigned long guest_stack_size;
|
2008-10-05 15:05:14 +04:00
|
|
|
|
2004-02-22 16:40:13 +03:00
|
|
|
/* user access */
|
|
|
|
|
2021-02-12 21:48:38 +03:00
|
|
|
#define VERIFY_READ PAGE_READ
|
|
|
|
#define VERIFY_WRITE (PAGE_READ | PAGE_WRITE)
|
2004-02-22 16:40:13 +03:00
|
|
|
|
2021-02-12 21:48:47 +03:00
|
|
|
static inline bool access_ok_untagged(int type, abi_ulong addr, abi_ulong size)
|
2007-11-14 13:51:00 +03:00
|
|
|
{
|
2021-02-12 21:48:45 +03:00
|
|
|
if (size == 0
|
2021-02-12 21:48:46 +03:00
|
|
|
? !guest_addr_valid_untagged(addr)
|
|
|
|
: !guest_range_valid_untagged(addr, size)) {
|
2021-02-12 21:48:37 +03:00
|
|
|
return false;
|
|
|
|
}
|
2021-02-12 21:48:38 +03:00
|
|
|
return page_check_range((target_ulong)addr, size, type) == 0;
|
2007-11-14 13:51:00 +03:00
|
|
|
}
|
2004-02-22 16:40:13 +03:00
|
|
|
|
2021-02-12 21:48:47 +03:00
|
|
|
static inline bool access_ok(CPUState *cpu, int type,
|
|
|
|
abi_ulong addr, abi_ulong size)
|
|
|
|
{
|
|
|
|
return access_ok_untagged(type, cpu_untagged_addr(cpu, addr), size);
|
|
|
|
}
|
|
|
|
|
2013-01-05 04:39:31 +04:00
|
|
|
/* NOTE __get_user and __put_user use host pointers and don't check access.
|
|
|
|
These are usually used to access struct data members once the struct has
|
|
|
|
been locked - usually with lock_user_struct. */
|
|
|
|
|
2018-10-09 19:18:14 +03:00
|
|
|
/*
|
|
|
|
* Tricky points:
|
|
|
|
* - Use __builtin_choose_expr to avoid type promotion from ?:,
|
|
|
|
* - Invalid sizes result in a compile time error stemming from
|
|
|
|
* the fact that abort has no parameters.
|
|
|
|
* - It's easier to use the endian-specific unaligned load/store
|
|
|
|
* functions than host-endian unaligned load/store plus tswapN.
|
|
|
|
* - The pragmas are necessary only to silence a clang false-positive
|
|
|
|
* warning: see https://bugs.llvm.org/show_bug.cgi?id=39113 .
|
|
|
|
* - gcc has bugs in its _Pragma() support in some versions, eg
|
|
|
|
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83256 -- so we only
|
|
|
|
* include the warning-suppression pragmas for clang
|
|
|
|
*/
|
2018-11-30 11:23:16 +03:00
|
|
|
#if defined(__clang__) && __has_warning("-Waddress-of-packed-member")
|
2018-10-09 19:18:14 +03:00
|
|
|
#define PRAGMA_DISABLE_PACKED_WARNING \
|
|
|
|
_Pragma("GCC diagnostic push"); \
|
|
|
|
_Pragma("GCC diagnostic ignored \"-Waddress-of-packed-member\"")
|
|
|
|
|
|
|
|
#define PRAGMA_REENABLE_PACKED_WARNING \
|
|
|
|
_Pragma("GCC diagnostic pop")
|
|
|
|
|
|
|
|
#else
|
|
|
|
#define PRAGMA_DISABLE_PACKED_WARNING
|
|
|
|
#define PRAGMA_REENABLE_PACKED_WARNING
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define __put_user_e(x, hptr, e) \
|
|
|
|
do { \
|
|
|
|
PRAGMA_DISABLE_PACKED_WARNING; \
|
|
|
|
(__builtin_choose_expr(sizeof(*(hptr)) == 1, stb_p, \
|
|
|
|
__builtin_choose_expr(sizeof(*(hptr)) == 2, stw_##e##_p, \
|
|
|
|
__builtin_choose_expr(sizeof(*(hptr)) == 4, stl_##e##_p, \
|
|
|
|
__builtin_choose_expr(sizeof(*(hptr)) == 8, stq_##e##_p, abort)))) \
|
|
|
|
((hptr), (x)), (void)0); \
|
|
|
|
PRAGMA_REENABLE_PACKED_WARNING; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define __get_user_e(x, hptr, e) \
|
|
|
|
do { \
|
|
|
|
PRAGMA_DISABLE_PACKED_WARNING; \
|
|
|
|
((x) = (typeof(*hptr))( \
|
|
|
|
__builtin_choose_expr(sizeof(*(hptr)) == 1, ldub_p, \
|
|
|
|
__builtin_choose_expr(sizeof(*(hptr)) == 2, lduw_##e##_p, \
|
|
|
|
__builtin_choose_expr(sizeof(*(hptr)) == 4, ldl_##e##_p, \
|
|
|
|
__builtin_choose_expr(sizeof(*(hptr)) == 8, ldq_##e##_p, abort)))) \
|
|
|
|
(hptr)), (void)0); \
|
|
|
|
PRAGMA_REENABLE_PACKED_WARNING; \
|
|
|
|
} while (0)
|
|
|
|
|
2013-01-05 04:39:31 +04:00
|
|
|
|
|
|
|
#ifdef TARGET_WORDS_BIGENDIAN
|
|
|
|
# define __put_user(x, hptr) __put_user_e(x, hptr, be)
|
|
|
|
# define __get_user(x, hptr) __get_user_e(x, hptr, be)
|
|
|
|
#else
|
|
|
|
# define __put_user(x, hptr) __put_user_e(x, hptr, le)
|
|
|
|
# define __get_user(x, hptr) __get_user_e(x, hptr, le)
|
|
|
|
#endif
|
2004-02-22 16:40:13 +03:00
|
|
|
|
2007-11-11 17:26:47 +03:00
|
|
|
/* put_user()/get_user() take a guest address and check access */
|
|
|
|
/* These are usually used to access an atomic data type, such as an int,
|
|
|
|
* that has been passed by address. These internally perform locking
|
|
|
|
* and unlocking on the data type.
|
|
|
|
*/
|
|
|
|
#define put_user(x, gaddr, target_type) \
|
|
|
|
({ \
|
|
|
|
abi_ulong __gaddr = (gaddr); \
|
|
|
|
target_type *__hptr; \
|
2014-04-22 16:40:50 +04:00
|
|
|
abi_long __ret = 0; \
|
2007-11-11 17:26:47 +03:00
|
|
|
if ((__hptr = lock_user(VERIFY_WRITE, __gaddr, sizeof(target_type), 0))) { \
|
2014-04-22 16:40:50 +04:00
|
|
|
__put_user((x), __hptr); \
|
2007-11-11 17:26:47 +03:00
|
|
|
unlock_user(__hptr, __gaddr, sizeof(target_type)); \
|
|
|
|
} else \
|
|
|
|
__ret = -TARGET_EFAULT; \
|
|
|
|
__ret; \
|
2004-02-22 16:40:13 +03:00
|
|
|
})
|
|
|
|
|
2007-11-11 17:26:47 +03:00
|
|
|
#define get_user(x, gaddr, target_type) \
|
|
|
|
({ \
|
|
|
|
abi_ulong __gaddr = (gaddr); \
|
|
|
|
target_type *__hptr; \
|
2014-04-22 16:40:50 +04:00
|
|
|
abi_long __ret = 0; \
|
2007-11-11 17:26:47 +03:00
|
|
|
if ((__hptr = lock_user(VERIFY_READ, __gaddr, sizeof(target_type), 1))) { \
|
2014-04-22 16:40:50 +04:00
|
|
|
__get_user((x), __hptr); \
|
2007-11-11 17:26:47 +03:00
|
|
|
unlock_user(__hptr, __gaddr, 0); \
|
2007-11-16 13:46:05 +03:00
|
|
|
} else { \
|
|
|
|
/* avoid warning */ \
|
|
|
|
(x) = 0; \
|
2007-11-11 17:26:47 +03:00
|
|
|
__ret = -TARGET_EFAULT; \
|
2007-11-16 13:46:05 +03:00
|
|
|
} \
|
2007-11-11 17:26:47 +03:00
|
|
|
__ret; \
|
2004-02-22 16:40:13 +03:00
|
|
|
})
|
|
|
|
|
2007-11-16 13:46:05 +03:00
|
|
|
#define put_user_ual(x, gaddr) put_user((x), (gaddr), abi_ulong)
|
|
|
|
#define put_user_sal(x, gaddr) put_user((x), (gaddr), abi_long)
|
|
|
|
#define put_user_u64(x, gaddr) put_user((x), (gaddr), uint64_t)
|
|
|
|
#define put_user_s64(x, gaddr) put_user((x), (gaddr), int64_t)
|
|
|
|
#define put_user_u32(x, gaddr) put_user((x), (gaddr), uint32_t)
|
|
|
|
#define put_user_s32(x, gaddr) put_user((x), (gaddr), int32_t)
|
|
|
|
#define put_user_u16(x, gaddr) put_user((x), (gaddr), uint16_t)
|
|
|
|
#define put_user_s16(x, gaddr) put_user((x), (gaddr), int16_t)
|
|
|
|
#define put_user_u8(x, gaddr) put_user((x), (gaddr), uint8_t)
|
|
|
|
#define put_user_s8(x, gaddr) put_user((x), (gaddr), int8_t)
|
|
|
|
|
|
|
|
#define get_user_ual(x, gaddr) get_user((x), (gaddr), abi_ulong)
|
|
|
|
#define get_user_sal(x, gaddr) get_user((x), (gaddr), abi_long)
|
|
|
|
#define get_user_u64(x, gaddr) get_user((x), (gaddr), uint64_t)
|
|
|
|
#define get_user_s64(x, gaddr) get_user((x), (gaddr), int64_t)
|
|
|
|
#define get_user_u32(x, gaddr) get_user((x), (gaddr), uint32_t)
|
|
|
|
#define get_user_s32(x, gaddr) get_user((x), (gaddr), int32_t)
|
|
|
|
#define get_user_u16(x, gaddr) get_user((x), (gaddr), uint16_t)
|
|
|
|
#define get_user_s16(x, gaddr) get_user((x), (gaddr), int16_t)
|
|
|
|
#define get_user_u8(x, gaddr) get_user((x), (gaddr), uint8_t)
|
|
|
|
#define get_user_s8(x, gaddr) get_user((x), (gaddr), int8_t)
|
|
|
|
|
2007-11-11 17:26:47 +03:00
|
|
|
/* copy_from_user() and copy_to_user() are usually used to copy data
|
|
|
|
* buffers between the target and host. These internally perform
|
|
|
|
* locking/unlocking of the memory.
|
|
|
|
*/
|
2021-03-15 23:40:04 +03:00
|
|
|
int copy_from_user(void *hptr, abi_ulong gaddr, ssize_t len);
|
|
|
|
int copy_to_user(abi_ulong gaddr, void *hptr, ssize_t len);
|
2007-11-11 17:26:47 +03:00
|
|
|
|
2006-03-25 22:31:22 +03:00
|
|
|
/* Functions for accessing guest memory. The tget and tput functions
|
2013-09-12 21:57:15 +04:00
|
|
|
read/write single values, byteswapping as necessary. The lock_user function
|
2006-03-25 22:31:22 +03:00
|
|
|
gets a pointer to a contiguous area of guest memory, but does not perform
|
2013-09-12 21:57:15 +04:00
|
|
|
any byteswapping. lock_user may return either a pointer to the guest
|
2006-03-25 22:31:22 +03:00
|
|
|
memory, or a temporary buffer. */
|
|
|
|
|
|
|
|
/* Lock an area of guest memory into the host. If copy is true then the
|
|
|
|
host area will have the same contents as the guest. */
|
2021-03-15 23:40:04 +03:00
|
|
|
void *lock_user(int type, abi_ulong guest_addr, ssize_t len, bool copy);
|
2004-02-22 16:40:13 +03:00
|
|
|
|
2007-11-11 17:26:47 +03:00
|
|
|
/* Unlock an area of guest memory. The first LEN bytes must be
|
2008-06-03 23:51:57 +04:00
|
|
|
flushed back to guest memory. host_ptr = NULL is explicitly
|
2007-11-11 17:26:47 +03:00
|
|
|
allowed and does nothing. */
|
2021-02-12 21:48:48 +03:00
|
|
|
#ifndef DEBUG_REMAP
|
2021-03-15 23:40:04 +03:00
|
|
|
static inline void unlock_user(void *host_ptr, abi_ulong guest_addr,
|
|
|
|
ssize_t len)
|
|
|
|
{
|
|
|
|
/* no-op */
|
|
|
|
}
|
2021-02-12 21:48:48 +03:00
|
|
|
#else
|
2021-03-15 23:40:04 +03:00
|
|
|
void unlock_user(void *host_ptr, abi_ulong guest_addr, ssize_t len);
|
2006-03-25 22:31:22 +03:00
|
|
|
#endif
|
2004-02-22 16:40:13 +03:00
|
|
|
|
2007-11-11 17:26:47 +03:00
|
|
|
/* Return the length of a string in target memory or -TARGET_EFAULT if
|
|
|
|
access error. */
|
2021-02-12 21:48:49 +03:00
|
|
|
ssize_t target_strlen(abi_ulong gaddr);
|
2006-03-25 22:31:22 +03:00
|
|
|
|
|
|
|
/* Like lock_user but for null terminated strings. */
|
2021-02-12 21:48:48 +03:00
|
|
|
void *lock_user_string(abi_ulong guest_addr);
|
2004-02-22 16:40:13 +03:00
|
|
|
|
2013-09-12 21:57:41 +04:00
|
|
|
/* Helper macros for locking/unlocking a target struct. */
|
2007-11-11 17:26:47 +03:00
|
|
|
#define lock_user_struct(type, host_ptr, guest_addr, copy) \
|
|
|
|
(host_ptr = lock_user(type, guest_addr, sizeof(*host_ptr), copy))
|
|
|
|
#define unlock_user_struct(host_ptr, guest_addr, copy) \
|
2006-03-25 22:31:22 +03:00
|
|
|
unlock_user(host_ptr, guest_addr, (copy) ? sizeof(*host_ptr) : 0)
|
|
|
|
|
2008-06-02 20:16:42 +04:00
|
|
|
#include <pthread.h>
|
|
|
|
|
2018-06-04 18:37:21 +03:00
|
|
|
static inline int is_error(abi_long ret)
|
|
|
|
{
|
|
|
|
return (abi_ulong)ret >= (abi_ulong)(-4096);
|
|
|
|
}
|
|
|
|
|
2020-06-19 15:33:31 +03:00
|
|
|
#if TARGET_ABI_BITS == 32
|
|
|
|
static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
|
|
|
|
{
|
|
|
|
#ifdef TARGET_WORDS_BIGENDIAN
|
|
|
|
return ((uint64_t)word0 << 32) | word1;
|
|
|
|
#else
|
|
|
|
return ((uint64_t)word1 << 32) | word0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#else /* TARGET_ABI_BITS == 32 */
|
|
|
|
static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
|
|
|
|
{
|
|
|
|
return word0;
|
|
|
|
}
|
|
|
|
#endif /* TARGET_ABI_BITS != 32 */
|
|
|
|
|
2020-07-24 00:02:33 +03:00
|
|
|
void print_termios(void *arg);
|
2020-08-11 19:45:50 +03:00
|
|
|
|
|
|
|
/* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
|
|
|
|
#ifdef TARGET_ARM
|
|
|
|
static inline int regpairs_aligned(void *cpu_env, int num)
|
|
|
|
{
|
|
|
|
return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
|
|
|
|
}
|
|
|
|
#elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
|
|
|
|
static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
|
|
|
|
#elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
|
|
|
|
/*
|
|
|
|
* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
|
|
|
|
* of registers which translates to the same as ARM/MIPS, because we start with
|
|
|
|
* r3 as arg1
|
|
|
|
*/
|
|
|
|
static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
|
|
|
|
#elif defined(TARGET_SH4)
|
|
|
|
/* SH4 doesn't align register pairs, except for p{read,write}64 */
|
|
|
|
static inline int regpairs_aligned(void *cpu_env, int num)
|
|
|
|
{
|
|
|
|
switch (num) {
|
|
|
|
case TARGET_NR_pread64:
|
|
|
|
case TARGET_NR_pwrite64:
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#elif defined(TARGET_XTENSA)
|
|
|
|
static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
|
2021-02-08 08:46:20 +03:00
|
|
|
#elif defined(TARGET_HEXAGON)
|
|
|
|
static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
|
2020-08-11 19:45:50 +03:00
|
|
|
#else
|
|
|
|
static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
|
|
|
|
#endif
|
|
|
|
|
2018-06-22 19:09:10 +03:00
|
|
|
/**
|
|
|
|
* preexit_cleanup: housekeeping before the guest exits
|
|
|
|
*
|
|
|
|
* env: the CPU state
|
|
|
|
* code: the exit code
|
|
|
|
*/
|
|
|
|
void preexit_cleanup(CPUArchState *env, int code);
|
|
|
|
|
2021-09-08 18:43:57 +03:00
|
|
|
/*
|
|
|
|
* Include target-specific struct and function definitions;
|
2013-07-16 21:44:52 +04:00
|
|
|
* they may need access to the target-independent structures
|
|
|
|
* above, so include them last.
|
|
|
|
*/
|
|
|
|
#include "target_cpu.h"
|
2013-10-30 17:46:31 +04:00
|
|
|
#include "target_structs.h"
|
2013-07-16 21:44:52 +04:00
|
|
|
|
2005-02-07 15:35:39 +03:00
|
|
|
#endif /* QEMU_H */
|