2003-02-19 01:55:36 +03:00
|
|
|
/*
|
2003-10-01 00:57:29 +04:00
|
|
|
* qemu user main
|
2007-09-17 01:08:06 +04:00
|
|
|
*
|
2008-01-06 20:21:48 +03:00
|
|
|
* Copyright (c) 2003-2008 Fabrice Bellard
|
2003-02-19 01:55:36 +03:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
2009-07-17 00:47:01 +04:00
|
|
|
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
2003-02-19 01:55:36 +03:00
|
|
|
*/
|
2016-01-26 21:17:02 +03:00
|
|
|
#include "qemu/osdep.h"
|
2016-06-01 12:44:21 +03:00
|
|
|
#include "qemu-version.h"
|
2009-04-07 10:57:11 +04:00
|
|
|
#include <sys/syscall.h>
|
2010-03-20 00:21:13 +03:00
|
|
|
#include <sys/resource.h>
|
2003-02-19 01:55:36 +03:00
|
|
|
|
2016-06-15 20:27:16 +03:00
|
|
|
#include "qapi/error.h"
|
2003-03-23 23:17:16 +03:00
|
|
|
#include "qemu.h"
|
2016-03-20 20:16:19 +03:00
|
|
|
#include "qemu/path.h"
|
2016-07-15 20:08:38 +03:00
|
|
|
#include "qemu/config-file.h"
|
2016-03-20 20:16:19 +03:00
|
|
|
#include "qemu/cutils.h"
|
|
|
|
#include "qemu/help_option.h"
|
2011-06-20 00:38:22 +04:00
|
|
|
#include "cpu.h"
|
2016-03-15 15:18:37 +03:00
|
|
|
#include "exec/exec-all.h"
|
2010-05-06 19:50:41 +04:00
|
|
|
#include "tcg.h"
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/timer.h"
|
|
|
|
#include "qemu/envlist.h"
|
2012-03-30 21:02:50 +04:00
|
|
|
#include "elf.h"
|
2016-07-15 20:08:38 +03:00
|
|
|
#include "trace/control.h"
|
2018-02-20 20:33:04 +03:00
|
|
|
#include "target_elf.h"
|
2018-04-11 21:56:33 +03:00
|
|
|
#include "cpu_loop-common.h"
|
2009-01-30 22:59:17 +03:00
|
|
|
|
2009-01-30 23:09:01 +03:00
|
|
|
char *exec_path;
|
|
|
|
|
2009-04-06 00:08:59 +04:00
|
|
|
int singlestep;
|
2015-08-29 10:29:52 +03:00
|
|
|
static const char *filename;
|
|
|
|
static const char *argv0;
|
|
|
|
static int gdbstub_port;
|
|
|
|
static envlist_t *envlist;
|
2013-07-02 20:26:11 +04:00
|
|
|
static const char *cpu_model;
|
2018-02-07 13:40:26 +03:00
|
|
|
static const char *cpu_type;
|
2009-07-17 15:48:08 +04:00
|
|
|
unsigned long mmap_min_addr;
|
|
|
|
unsigned long guest_base;
|
|
|
|
int have_guest_base;
|
2015-11-13 15:20:35 +03:00
|
|
|
|
2011-12-14 03:33:28 +04:00
|
|
|
/*
|
|
|
|
* When running 32-on-64 we should make sure we can fit all of the possible
|
|
|
|
* guest address space into a contiguous chunk of virtual host memory.
|
|
|
|
*
|
|
|
|
* This way we will never overlap with our own libraries or binaries or stack
|
|
|
|
* or anything else that QEMU maps.
|
2017-10-05 17:36:00 +03:00
|
|
|
*
|
|
|
|
* Many cpus reserve the high bit (or more than one for some 64-bit cpus)
|
|
|
|
* of the address for the kernel. Some cpus rely on this and user space
|
|
|
|
* uses the high bit(s) for pointer tagging and the like. For them, we
|
|
|
|
* must preserve the expected address space.
|
2011-12-14 03:33:28 +04:00
|
|
|
*/
|
2017-10-05 17:36:00 +03:00
|
|
|
#ifndef MAX_RESERVED_VA
|
|
|
|
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
|
|
|
|
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
|
|
|
|
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
|
|
|
|
/* There are a number of places where we assign reserved_va to a variable
|
|
|
|
of type abi_ulong and expect it to fit. Avoid the last page. */
|
|
|
|
# define MAX_RESERVED_VA (0xfffffffful & TARGET_PAGE_MASK)
|
|
|
|
# else
|
|
|
|
# define MAX_RESERVED_VA (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
|
|
|
|
# endif
|
2013-01-03 17:17:18 +04:00
|
|
|
# else
|
2017-10-05 17:36:00 +03:00
|
|
|
# define MAX_RESERVED_VA 0
|
2013-01-03 17:17:18 +04:00
|
|
|
# endif
|
2017-10-05 17:36:00 +03:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* That said, reserving *too* much vm space via mmap can run into problems
|
|
|
|
with rlimits, oom due to page table creation, etc. We will still try it,
|
|
|
|
if directed by the command-line option, but not by default. */
|
|
|
|
#if HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32
|
|
|
|
unsigned long reserved_va = MAX_RESERVED_VA;
|
2011-12-14 03:33:28 +04:00
|
|
|
#else
|
2010-05-29 05:27:35 +04:00
|
|
|
unsigned long reserved_va;
|
2009-07-17 15:48:08 +04:00
|
|
|
#endif
|
2009-04-06 00:08:59 +04:00
|
|
|
|
2015-07-06 21:03:38 +03:00
|
|
|
static void usage(int exitcode);
|
2011-08-06 10:54:12 +04:00
|
|
|
|
2010-05-26 18:08:22 +04:00
|
|
|
static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX;
|
2014-03-04 06:28:43 +04:00
|
|
|
const char *qemu_uname_release;
|
2003-03-03 18:02:29 +03:00
|
|
|
|
2003-03-23 19:49:39 +03:00
|
|
|
/* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
|
|
|
|
we allocate a bigger stack. Need a better solution, for example
|
|
|
|
by remapping the process stack directly at the right place */
|
2010-03-20 00:21:13 +03:00
|
|
|
unsigned long guest_stack_size = 8 * 1024 * 1024UL;
|
2003-02-19 01:55:36 +03:00
|
|
|
|
|
|
|
void gemu_log(const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vfprintf(stderr, fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
|
2008-08-18 00:26:25 +04:00
|
|
|
#if defined(TARGET_I386)
|
2012-02-25 06:37:53 +04:00
|
|
|
int cpu_get_pic_interrupt(CPUX86State *env)
|
2003-06-24 17:30:31 +04:00
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
2008-08-18 00:26:25 +04:00
|
|
|
#endif
|
2003-06-24 17:30:31 +04:00
|
|
|
|
2008-06-08 00:50:51 +04:00
|
|
|
/***********************************************************/
|
|
|
|
/* Helper routines for implementing atomic operations. */
|
|
|
|
|
|
|
|
/* Make sure everything is in a consistent state for calling fork(). */
|
|
|
|
void fork_start(void)
|
|
|
|
{
|
2017-12-07 15:41:21 +03:00
|
|
|
start_exclusive();
|
2009-12-04 16:16:31 +03:00
|
|
|
mmap_fork_start();
|
2017-12-04 17:22:11 +03:00
|
|
|
qemu_mutex_lock(&tb_ctx.tb_lock);
|
|
|
|
cpu_list_lock();
|
2008-06-08 00:50:51 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void fork_end(int child)
|
|
|
|
{
|
2009-12-04 16:16:31 +03:00
|
|
|
mmap_fork_end(child);
|
2008-06-08 00:50:51 +04:00
|
|
|
if (child) {
|
2013-06-25 01:50:24 +04:00
|
|
|
CPUState *cpu, *next_cpu;
|
2008-06-08 00:50:51 +04:00
|
|
|
/* Child processes created by fork() only have a single thread.
|
|
|
|
Discard information about the parent threads. */
|
2013-06-25 01:50:24 +04:00
|
|
|
CPU_FOREACH_SAFE(cpu, next_cpu) {
|
|
|
|
if (cpu != thread_cpu) {
|
2016-06-07 19:31:04 +03:00
|
|
|
QTAILQ_REMOVE(&cpus, cpu, node);
|
2013-06-25 01:50:24 +04:00
|
|
|
}
|
|
|
|
}
|
2017-06-24 03:04:43 +03:00
|
|
|
qemu_mutex_init(&tb_ctx.tb_lock);
|
2016-08-28 04:45:14 +03:00
|
|
|
qemu_init_cpu_list();
|
2015-06-24 05:31:16 +03:00
|
|
|
gdbserver_fork(thread_cpu);
|
2017-12-07 15:41:21 +03:00
|
|
|
/* qemu_init_cpu_list() takes care of reinitializing the
|
|
|
|
* exclusive state, so we don't need to end_exclusive() here.
|
|
|
|
*/
|
2008-06-08 00:50:51 +04:00
|
|
|
} else {
|
2017-06-24 03:04:43 +03:00
|
|
|
qemu_mutex_unlock(&tb_ctx.tb_lock);
|
2016-08-28 04:45:14 +03:00
|
|
|
cpu_list_unlock();
|
2017-12-07 15:41:21 +03:00
|
|
|
end_exclusive();
|
2008-06-08 00:50:51 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-21 00:36:37 +03:00
|
|
|
#ifdef TARGET_TILEGX
|
|
|
|
|
|
|
|
static void gen_sigill_reg(CPUTLGState *env)
|
|
|
|
{
|
|
|
|
target_siginfo_t info;
|
|
|
|
|
|
|
|
info.si_signo = TARGET_SIGILL;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code = TARGET_ILL_PRVREG;
|
|
|
|
info._sifields._sigfault._addr = env->pc;
|
2016-07-28 18:44:46 +03:00
|
|
|
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
|
2015-08-21 00:36:37 +03:00
|
|
|
}
|
|
|
|
|
2015-09-28 00:26:04 +03:00
|
|
|
static void do_signal(CPUTLGState *env, int signo, int sigcode)
|
2015-09-26 08:00:35 +03:00
|
|
|
{
|
|
|
|
target_siginfo_t info;
|
|
|
|
|
2015-09-28 00:26:04 +03:00
|
|
|
info.si_signo = signo;
|
2015-09-26 08:00:35 +03:00
|
|
|
info.si_errno = 0;
|
|
|
|
info._sifields._sigfault._addr = env->pc;
|
2015-09-28 00:26:04 +03:00
|
|
|
|
|
|
|
if (signo == TARGET_SIGSEGV) {
|
|
|
|
/* The passed in sigcode is a dummy; check for a page mapping
|
|
|
|
and pass either MAPERR or ACCERR. */
|
|
|
|
target_ulong addr = env->excaddr;
|
|
|
|
info._sifields._sigfault._addr = addr;
|
|
|
|
if (page_check_range(addr, 1, PAGE_VALID) < 0) {
|
|
|
|
sigcode = TARGET_SEGV_MAPERR;
|
|
|
|
} else {
|
|
|
|
sigcode = TARGET_SEGV_ACCERR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
info.si_code = sigcode;
|
|
|
|
|
2016-07-28 18:44:46 +03:00
|
|
|
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
|
2015-09-26 08:00:35 +03:00
|
|
|
}
|
|
|
|
|
2015-09-28 00:26:04 +03:00
|
|
|
static void gen_sigsegv_maperr(CPUTLGState *env, target_ulong addr)
|
|
|
|
{
|
|
|
|
env->excaddr = addr;
|
|
|
|
do_signal(env, TARGET_SIGSEGV, 0);
|
|
|
|
}
|
|
|
|
|
2015-08-24 17:55:47 +03:00
|
|
|
static void set_regval(CPUTLGState *env, uint8_t reg, uint64_t val)
|
|
|
|
{
|
|
|
|
if (unlikely(reg >= TILEGX_R_COUNT)) {
|
|
|
|
switch (reg) {
|
|
|
|
case TILEGX_R_SN:
|
|
|
|
case TILEGX_R_ZERO:
|
|
|
|
return;
|
|
|
|
case TILEGX_R_IDN0:
|
|
|
|
case TILEGX_R_IDN1:
|
|
|
|
case TILEGX_R_UDN0:
|
|
|
|
case TILEGX_R_UDN1:
|
|
|
|
case TILEGX_R_UDN2:
|
|
|
|
case TILEGX_R_UDN3:
|
|
|
|
gen_sigill_reg(env);
|
|
|
|
return;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
env->regs[reg] = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
|
|
|
|
* memory at the address held in the first source register. If the values are
|
|
|
|
* not equal, then no memory operation is performed. If the values are equal,
|
|
|
|
* the 8-byte quantity from the second source register is written into memory
|
|
|
|
* at the address held in the first source register. In either case, the result
|
|
|
|
* of the instruction is the value read from memory. The compare and write to
|
|
|
|
* memory are atomic and thus can be used for synchronization purposes. This
|
|
|
|
* instruction only operates for addresses aligned to a 8-byte boundary.
|
|
|
|
* Unaligned memory access causes an Unaligned Data Reference interrupt.
|
|
|
|
*
|
|
|
|
* Functional Description (64-bit)
|
|
|
|
* uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
|
|
|
|
* rf[Dest] = memVal;
|
|
|
|
* if (memVal == SPR[CmpValueSPR])
|
|
|
|
* memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
|
|
|
|
*
|
|
|
|
* Functional Description (32-bit)
|
|
|
|
* uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
|
|
|
|
* rf[Dest] = memVal;
|
|
|
|
* if (memVal == signExtend32 (SPR[CmpValueSPR]))
|
|
|
|
* memoryWriteWord (rf[SrcA], rf[SrcB]);
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* This function also processes exch and exch4 which need not process SPR.
|
|
|
|
*/
|
|
|
|
static void do_exch(CPUTLGState *env, bool quad, bool cmp)
|
|
|
|
{
|
|
|
|
target_ulong addr;
|
|
|
|
target_long val, sprval;
|
|
|
|
|
|
|
|
start_exclusive();
|
|
|
|
|
|
|
|
addr = env->atomic_srca;
|
|
|
|
if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) {
|
|
|
|
goto sigsegv_maperr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmp) {
|
|
|
|
if (quad) {
|
|
|
|
sprval = env->spregs[TILEGX_SPR_CMPEXCH];
|
|
|
|
} else {
|
|
|
|
sprval = sextract64(env->spregs[TILEGX_SPR_CMPEXCH], 0, 32);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cmp || val == sprval) {
|
|
|
|
target_long valb = env->atomic_srcb;
|
|
|
|
if (quad ? put_user_u64(valb, addr) : put_user_u32(valb, addr)) {
|
|
|
|
goto sigsegv_maperr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
set_regval(env, env->atomic_dstr, val);
|
|
|
|
end_exclusive();
|
|
|
|
return;
|
|
|
|
|
|
|
|
sigsegv_maperr:
|
|
|
|
end_exclusive();
|
|
|
|
gen_sigsegv_maperr(env, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void do_fetch(CPUTLGState *env, int trapnr, bool quad)
|
|
|
|
{
|
|
|
|
int8_t write = 1;
|
|
|
|
target_ulong addr;
|
|
|
|
target_long val, valb;
|
|
|
|
|
|
|
|
start_exclusive();
|
|
|
|
|
|
|
|
addr = env->atomic_srca;
|
|
|
|
valb = env->atomic_srcb;
|
|
|
|
if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) {
|
|
|
|
goto sigsegv_maperr;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (trapnr) {
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHADD:
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHADD4:
|
|
|
|
valb += val;
|
|
|
|
break;
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHADDGEZ:
|
|
|
|
valb += val;
|
|
|
|
if (valb < 0) {
|
|
|
|
write = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHADDGEZ4:
|
|
|
|
valb += val;
|
|
|
|
if ((int32_t)valb < 0) {
|
|
|
|
write = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHAND:
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHAND4:
|
|
|
|
valb &= val;
|
|
|
|
break;
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHOR:
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHOR4:
|
|
|
|
valb |= val;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (write) {
|
|
|
|
if (quad ? put_user_u64(valb, addr) : put_user_u32(valb, addr)) {
|
|
|
|
goto sigsegv_maperr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
set_regval(env, env->atomic_dstr, val);
|
|
|
|
end_exclusive();
|
|
|
|
return;
|
|
|
|
|
|
|
|
sigsegv_maperr:
|
|
|
|
end_exclusive();
|
|
|
|
gen_sigsegv_maperr(env, addr);
|
|
|
|
}
|
|
|
|
|
2015-08-21 00:36:37 +03:00
|
|
|
void cpu_loop(CPUTLGState *env)
|
|
|
|
{
|
|
|
|
CPUState *cs = CPU(tilegx_env_get_cpu(env));
|
|
|
|
int trapnr;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
cpu_exec_start(cs);
|
2015-07-18 12:40:28 +03:00
|
|
|
trapnr = cpu_exec(cs);
|
2015-08-21 00:36:37 +03:00
|
|
|
cpu_exec_end(cs);
|
2016-08-29 10:51:00 +03:00
|
|
|
process_queued_cpu_work(cs);
|
|
|
|
|
2015-08-21 00:36:37 +03:00
|
|
|
switch (trapnr) {
|
|
|
|
case TILEGX_EXCP_SYSCALL:
|
2016-05-12 20:47:42 +03:00
|
|
|
{
|
|
|
|
abi_ulong ret = do_syscall(env, env->regs[TILEGX_R_NR],
|
|
|
|
env->regs[0], env->regs[1],
|
|
|
|
env->regs[2], env->regs[3],
|
|
|
|
env->regs[4], env->regs[5],
|
|
|
|
env->regs[6], env->regs[7]);
|
|
|
|
if (ret == -TARGET_ERESTARTSYS) {
|
|
|
|
env->pc -= 8;
|
|
|
|
} else if (ret != -TARGET_QEMU_ESIGRETURN) {
|
|
|
|
env->regs[TILEGX_R_RE] = ret;
|
|
|
|
env->regs[TILEGX_R_ERR] = TILEGX_IS_ERRNO(ret) ? -ret : 0;
|
|
|
|
}
|
2015-08-21 00:36:37 +03:00
|
|
|
break;
|
2016-05-12 20:47:42 +03:00
|
|
|
}
|
2015-08-24 17:55:47 +03:00
|
|
|
case TILEGX_EXCP_OPCODE_EXCH:
|
|
|
|
do_exch(env, true, false);
|
|
|
|
break;
|
|
|
|
case TILEGX_EXCP_OPCODE_EXCH4:
|
|
|
|
do_exch(env, false, false);
|
|
|
|
break;
|
|
|
|
case TILEGX_EXCP_OPCODE_CMPEXCH:
|
|
|
|
do_exch(env, true, true);
|
|
|
|
break;
|
|
|
|
case TILEGX_EXCP_OPCODE_CMPEXCH4:
|
|
|
|
do_exch(env, false, true);
|
|
|
|
break;
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHADD:
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHADDGEZ:
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHAND:
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHOR:
|
|
|
|
do_fetch(env, trapnr, true);
|
|
|
|
break;
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHADD4:
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHADDGEZ4:
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHAND4:
|
|
|
|
case TILEGX_EXCP_OPCODE_FETCHOR4:
|
|
|
|
do_fetch(env, trapnr, false);
|
|
|
|
break;
|
2015-09-26 08:00:35 +03:00
|
|
|
case TILEGX_EXCP_SIGNAL:
|
2015-09-28 00:26:04 +03:00
|
|
|
do_signal(env, env->signo, env->sigcode);
|
2015-09-26 08:00:35 +03:00
|
|
|
break;
|
2015-08-21 00:36:37 +03:00
|
|
|
case TILEGX_EXCP_REG_IDN_ACCESS:
|
|
|
|
case TILEGX_EXCP_REG_UDN_ACCESS:
|
|
|
|
gen_sigill_reg(env);
|
|
|
|
break;
|
2016-06-30 08:12:55 +03:00
|
|
|
case EXCP_ATOMIC:
|
|
|
|
cpu_exec_step_atomic(cs);
|
|
|
|
break;
|
2015-08-21 00:36:37 +03:00
|
|
|
default:
|
|
|
|
fprintf(stderr, "trapnr is %d[0x%x].\n", trapnr, trapnr);
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
process_pending_signals(env);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2018-03-02 15:31:11 +03:00
|
|
|
#ifdef TARGET_RISCV
|
|
|
|
|
|
|
|
void cpu_loop(CPURISCVState *env)
|
|
|
|
{
|
|
|
|
CPUState *cs = CPU(riscv_env_get_cpu(env));
|
|
|
|
int trapnr, signum, sigcode;
|
|
|
|
target_ulong sigaddr;
|
|
|
|
target_ulong ret;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
cpu_exec_start(cs);
|
|
|
|
trapnr = cpu_exec(cs);
|
|
|
|
cpu_exec_end(cs);
|
|
|
|
process_queued_cpu_work(cs);
|
|
|
|
|
|
|
|
signum = 0;
|
|
|
|
sigcode = 0;
|
|
|
|
sigaddr = 0;
|
|
|
|
|
|
|
|
switch (trapnr) {
|
|
|
|
case EXCP_INTERRUPT:
|
|
|
|
/* just indicate that signals should be handled asap */
|
|
|
|
break;
|
|
|
|
case EXCP_ATOMIC:
|
|
|
|
cpu_exec_step_atomic(cs);
|
|
|
|
break;
|
|
|
|
case RISCV_EXCP_U_ECALL:
|
|
|
|
env->pc += 4;
|
|
|
|
if (env->gpr[xA7] == TARGET_NR_arch_specific_syscall + 15) {
|
|
|
|
/* riscv_flush_icache_syscall is a no-op in QEMU as
|
|
|
|
self-modifying code is automatically detected */
|
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
ret = do_syscall(env,
|
|
|
|
env->gpr[xA7],
|
|
|
|
env->gpr[xA0],
|
|
|
|
env->gpr[xA1],
|
|
|
|
env->gpr[xA2],
|
|
|
|
env->gpr[xA3],
|
|
|
|
env->gpr[xA4],
|
|
|
|
env->gpr[xA5],
|
|
|
|
0, 0);
|
|
|
|
}
|
|
|
|
if (ret == -TARGET_ERESTARTSYS) {
|
|
|
|
env->pc -= 4;
|
|
|
|
} else if (ret != -TARGET_QEMU_ESIGRETURN) {
|
|
|
|
env->gpr[xA0] = ret;
|
|
|
|
}
|
|
|
|
if (cs->singlestep_enabled) {
|
|
|
|
goto gdbstep;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case RISCV_EXCP_ILLEGAL_INST:
|
|
|
|
signum = TARGET_SIGILL;
|
|
|
|
sigcode = TARGET_ILL_ILLOPC;
|
|
|
|
break;
|
|
|
|
case RISCV_EXCP_BREAKPOINT:
|
|
|
|
signum = TARGET_SIGTRAP;
|
|
|
|
sigcode = TARGET_TRAP_BRKPT;
|
|
|
|
sigaddr = env->pc;
|
|
|
|
break;
|
|
|
|
case RISCV_EXCP_INST_PAGE_FAULT:
|
|
|
|
case RISCV_EXCP_LOAD_PAGE_FAULT:
|
|
|
|
case RISCV_EXCP_STORE_PAGE_FAULT:
|
|
|
|
signum = TARGET_SIGSEGV;
|
|
|
|
sigcode = TARGET_SEGV_MAPERR;
|
|
|
|
break;
|
|
|
|
case EXCP_DEBUG:
|
|
|
|
gdbstep:
|
|
|
|
signum = gdb_handlesig(cs, TARGET_SIGTRAP);
|
|
|
|
sigcode = TARGET_TRAP_BRKPT;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
EXCP_DUMP(env, "\nqemu: unhandled CPU exception %#x - aborting\n",
|
|
|
|
trapnr);
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (signum) {
|
|
|
|
target_siginfo_t info = {
|
|
|
|
.si_signo = signum,
|
|
|
|
.si_errno = 0,
|
|
|
|
.si_code = sigcode,
|
|
|
|
._sifields._sigfault._addr = sigaddr
|
|
|
|
};
|
|
|
|
queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
|
|
|
|
}
|
|
|
|
|
|
|
|
process_pending_signals(env);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* TARGET_RISCV */
|
|
|
|
|
2016-12-15 21:01:00 +03:00
|
|
|
#ifdef TARGET_HPPA
|
|
|
|
|
|
|
|
static abi_ulong hppa_lws(CPUHPPAState *env)
|
|
|
|
{
|
|
|
|
uint32_t which = env->gr[20];
|
|
|
|
abi_ulong addr = env->gr[26];
|
|
|
|
abi_ulong old = env->gr[25];
|
|
|
|
abi_ulong new = env->gr[24];
|
|
|
|
abi_ulong size, ret;
|
|
|
|
|
|
|
|
switch (which) {
|
|
|
|
default:
|
|
|
|
return -TARGET_ENOSYS;
|
|
|
|
|
|
|
|
case 0: /* elf32 atomic 32bit cmpxchg */
|
|
|
|
if ((addr & 3) || !access_ok(VERIFY_WRITE, addr, 4)) {
|
|
|
|
return -TARGET_EFAULT;
|
|
|
|
}
|
|
|
|
old = tswap32(old);
|
|
|
|
new = tswap32(new);
|
|
|
|
ret = atomic_cmpxchg((uint32_t *)g2h(addr), old, new);
|
|
|
|
ret = tswap32(ret);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 2: /* elf32 atomic "new" cmpxchg */
|
|
|
|
size = env->gr[23];
|
|
|
|
if (size >= 4) {
|
|
|
|
return -TARGET_ENOSYS;
|
|
|
|
}
|
|
|
|
if (((addr | old | new) & ((1 << size) - 1))
|
|
|
|
|| !access_ok(VERIFY_WRITE, addr, 1 << size)
|
|
|
|
|| !access_ok(VERIFY_READ, old, 1 << size)
|
|
|
|
|| !access_ok(VERIFY_READ, new, 1 << size)) {
|
|
|
|
return -TARGET_EFAULT;
|
|
|
|
}
|
|
|
|
/* Note that below we use host-endian loads so that the cmpxchg
|
|
|
|
can be host-endian as well. */
|
|
|
|
switch (size) {
|
|
|
|
case 0:
|
|
|
|
old = *(uint8_t *)g2h(old);
|
|
|
|
new = *(uint8_t *)g2h(new);
|
|
|
|
ret = atomic_cmpxchg((uint8_t *)g2h(addr), old, new);
|
|
|
|
ret = ret != old;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
old = *(uint16_t *)g2h(old);
|
|
|
|
new = *(uint16_t *)g2h(new);
|
|
|
|
ret = atomic_cmpxchg((uint16_t *)g2h(addr), old, new);
|
|
|
|
ret = ret != old;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
old = *(uint32_t *)g2h(old);
|
|
|
|
new = *(uint32_t *)g2h(new);
|
|
|
|
ret = atomic_cmpxchg((uint32_t *)g2h(addr), old, new);
|
|
|
|
ret = ret != old;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
{
|
|
|
|
uint64_t o64, n64, r64;
|
|
|
|
o64 = *(uint64_t *)g2h(old);
|
|
|
|
n64 = *(uint64_t *)g2h(new);
|
|
|
|
#ifdef CONFIG_ATOMIC64
|
|
|
|
r64 = atomic_cmpxchg__nocheck((uint64_t *)g2h(addr), o64, n64);
|
|
|
|
ret = r64 != o64;
|
|
|
|
#else
|
|
|
|
start_exclusive();
|
|
|
|
r64 = *(uint64_t *)g2h(addr);
|
|
|
|
ret = 1;
|
|
|
|
if (r64 == o64) {
|
|
|
|
*(uint64_t *)g2h(addr) = n64;
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
end_exclusive();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
env->gr[28] = ret;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void cpu_loop(CPUHPPAState *env)
|
|
|
|
{
|
|
|
|
CPUState *cs = CPU(hppa_env_get_cpu(env));
|
|
|
|
target_siginfo_t info;
|
|
|
|
abi_ulong ret;
|
|
|
|
int trapnr;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
cpu_exec_start(cs);
|
|
|
|
trapnr = cpu_exec(cs);
|
|
|
|
cpu_exec_end(cs);
|
|
|
|
process_queued_cpu_work(cs);
|
|
|
|
|
|
|
|
switch (trapnr) {
|
|
|
|
case EXCP_SYSCALL:
|
|
|
|
ret = do_syscall(env, env->gr[20],
|
|
|
|
env->gr[26], env->gr[25],
|
|
|
|
env->gr[24], env->gr[23],
|
|
|
|
env->gr[22], env->gr[21], 0, 0);
|
|
|
|
switch (ret) {
|
|
|
|
default:
|
|
|
|
env->gr[28] = ret;
|
|
|
|
/* We arrived here by faking the gateway page. Return. */
|
|
|
|
env->iaoq_f = env->gr[31];
|
|
|
|
env->iaoq_b = env->gr[31] + 4;
|
|
|
|
break;
|
|
|
|
case -TARGET_ERESTARTSYS:
|
|
|
|
case -TARGET_QEMU_ESIGRETURN:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case EXCP_SYSCALL_LWS:
|
|
|
|
env->gr[21] = hppa_lws(env);
|
|
|
|
/* We arrived here by faking the gateway page. Return. */
|
|
|
|
env->iaoq_f = env->gr[31];
|
|
|
|
env->iaoq_b = env->gr[31] + 4;
|
|
|
|
break;
|
2017-10-11 20:03:02 +03:00
|
|
|
case EXCP_ITLB_MISS:
|
|
|
|
case EXCP_DTLB_MISS:
|
|
|
|
case EXCP_NA_ITLB_MISS:
|
|
|
|
case EXCP_NA_DTLB_MISS:
|
|
|
|
case EXCP_IMP:
|
|
|
|
case EXCP_DMP:
|
|
|
|
case EXCP_DMB:
|
|
|
|
case EXCP_PAGE_REF:
|
|
|
|
case EXCP_DMAR:
|
|
|
|
case EXCP_DMPI:
|
2016-12-15 21:01:00 +03:00
|
|
|
info.si_signo = TARGET_SIGSEGV;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code = TARGET_SEGV_ACCERR;
|
2017-10-11 07:19:34 +03:00
|
|
|
info._sifields._sigfault._addr = env->cr[CR_IOR];
|
2016-12-15 21:01:00 +03:00
|
|
|
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
|
|
|
|
break;
|
2017-10-11 20:03:02 +03:00
|
|
|
case EXCP_UNALIGN:
|
|
|
|
info.si_signo = TARGET_SIGBUS;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code = 0;
|
2017-10-11 07:19:34 +03:00
|
|
|
info._sifields._sigfault._addr = env->cr[CR_IOR];
|
2017-10-11 20:03:02 +03:00
|
|
|
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
|
|
|
|
break;
|
|
|
|
case EXCP_ILL:
|
|
|
|
case EXCP_PRIV_OPR:
|
|
|
|
case EXCP_PRIV_REG:
|
2016-12-15 21:01:00 +03:00
|
|
|
info.si_signo = TARGET_SIGILL;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code = TARGET_ILL_ILLOPN;
|
|
|
|
info._sifields._sigfault._addr = env->iaoq_f;
|
|
|
|
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
|
|
|
|
break;
|
2017-10-11 20:03:02 +03:00
|
|
|
case EXCP_OVERFLOW:
|
|
|
|
case EXCP_COND:
|
|
|
|
case EXCP_ASSIST:
|
2016-12-15 21:01:00 +03:00
|
|
|
info.si_signo = TARGET_SIGFPE;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code = 0;
|
|
|
|
info._sifields._sigfault._addr = env->iaoq_f;
|
|
|
|
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
|
|
|
|
break;
|
|
|
|
case EXCP_DEBUG:
|
|
|
|
trapnr = gdb_handlesig(cs, TARGET_SIGTRAP);
|
|
|
|
if (trapnr) {
|
|
|
|
info.si_signo = trapnr;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code = TARGET_TRAP_BRKPT;
|
|
|
|
queue_signal(env, trapnr, QEMU_SI_FAULT, &info);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case EXCP_INTERRUPT:
|
|
|
|
/* just indicate that signals should be handled asap */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
process_pending_signals(env);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* TARGET_HPPA */
|
|
|
|
|
2017-01-25 21:54:11 +03:00
|
|
|
#ifdef TARGET_XTENSA
|
|
|
|
|
|
|
|
static void xtensa_rfw(CPUXtensaState *env)
|
|
|
|
{
|
|
|
|
xtensa_restore_owb(env);
|
|
|
|
env->pc = env->sregs[EPC1];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xtensa_rfwu(CPUXtensaState *env)
|
|
|
|
{
|
|
|
|
env->sregs[WINDOW_START] |= (1 << env->sregs[WINDOW_BASE]);
|
|
|
|
xtensa_rfw(env);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xtensa_rfwo(CPUXtensaState *env)
|
|
|
|
{
|
|
|
|
env->sregs[WINDOW_START] &= ~(1 << env->sregs[WINDOW_BASE]);
|
|
|
|
xtensa_rfw(env);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xtensa_overflow4(CPUXtensaState *env)
|
|
|
|
{
|
|
|
|
put_user_ual(env->regs[0], env->regs[5] - 16);
|
|
|
|
put_user_ual(env->regs[1], env->regs[5] - 12);
|
|
|
|
put_user_ual(env->regs[2], env->regs[5] - 8);
|
|
|
|
put_user_ual(env->regs[3], env->regs[5] - 4);
|
|
|
|
xtensa_rfwo(env);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xtensa_underflow4(CPUXtensaState *env)
|
|
|
|
{
|
|
|
|
get_user_ual(env->regs[0], env->regs[5] - 16);
|
|
|
|
get_user_ual(env->regs[1], env->regs[5] - 12);
|
|
|
|
get_user_ual(env->regs[2], env->regs[5] - 8);
|
|
|
|
get_user_ual(env->regs[3], env->regs[5] - 4);
|
|
|
|
xtensa_rfwu(env);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xtensa_overflow8(CPUXtensaState *env)
|
|
|
|
{
|
|
|
|
put_user_ual(env->regs[0], env->regs[9] - 16);
|
|
|
|
get_user_ual(env->regs[0], env->regs[1] - 12);
|
|
|
|
put_user_ual(env->regs[1], env->regs[9] - 12);
|
|
|
|
put_user_ual(env->regs[2], env->regs[9] - 8);
|
|
|
|
put_user_ual(env->regs[3], env->regs[9] - 4);
|
|
|
|
put_user_ual(env->regs[4], env->regs[0] - 32);
|
|
|
|
put_user_ual(env->regs[5], env->regs[0] - 28);
|
|
|
|
put_user_ual(env->regs[6], env->regs[0] - 24);
|
|
|
|
put_user_ual(env->regs[7], env->regs[0] - 20);
|
|
|
|
xtensa_rfwo(env);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xtensa_underflow8(CPUXtensaState *env)
|
|
|
|
{
|
|
|
|
get_user_ual(env->regs[0], env->regs[9] - 16);
|
|
|
|
get_user_ual(env->regs[1], env->regs[9] - 12);
|
|
|
|
get_user_ual(env->regs[2], env->regs[9] - 8);
|
|
|
|
get_user_ual(env->regs[7], env->regs[1] - 12);
|
|
|
|
get_user_ual(env->regs[3], env->regs[9] - 4);
|
|
|
|
get_user_ual(env->regs[4], env->regs[7] - 32);
|
|
|
|
get_user_ual(env->regs[5], env->regs[7] - 28);
|
|
|
|
get_user_ual(env->regs[6], env->regs[7] - 24);
|
|
|
|
get_user_ual(env->regs[7], env->regs[7] - 20);
|
|
|
|
xtensa_rfwu(env);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xtensa_overflow12(CPUXtensaState *env)
|
|
|
|
{
|
|
|
|
put_user_ual(env->regs[0], env->regs[13] - 16);
|
|
|
|
get_user_ual(env->regs[0], env->regs[1] - 12);
|
|
|
|
put_user_ual(env->regs[1], env->regs[13] - 12);
|
|
|
|
put_user_ual(env->regs[2], env->regs[13] - 8);
|
|
|
|
put_user_ual(env->regs[3], env->regs[13] - 4);
|
|
|
|
put_user_ual(env->regs[4], env->regs[0] - 48);
|
|
|
|
put_user_ual(env->regs[5], env->regs[0] - 44);
|
|
|
|
put_user_ual(env->regs[6], env->regs[0] - 40);
|
|
|
|
put_user_ual(env->regs[7], env->regs[0] - 36);
|
|
|
|
put_user_ual(env->regs[8], env->regs[0] - 32);
|
|
|
|
put_user_ual(env->regs[9], env->regs[0] - 28);
|
|
|
|
put_user_ual(env->regs[10], env->regs[0] - 24);
|
|
|
|
put_user_ual(env->regs[11], env->regs[0] - 20);
|
|
|
|
xtensa_rfwo(env);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xtensa_underflow12(CPUXtensaState *env)
|
|
|
|
{
|
|
|
|
get_user_ual(env->regs[0], env->regs[13] - 16);
|
|
|
|
get_user_ual(env->regs[1], env->regs[13] - 12);
|
|
|
|
get_user_ual(env->regs[2], env->regs[13] - 8);
|
|
|
|
get_user_ual(env->regs[11], env->regs[1] - 12);
|
|
|
|
get_user_ual(env->regs[3], env->regs[13] - 4);
|
|
|
|
get_user_ual(env->regs[4], env->regs[11] - 48);
|
|
|
|
get_user_ual(env->regs[5], env->regs[11] - 44);
|
|
|
|
get_user_ual(env->regs[6], env->regs[11] - 40);
|
|
|
|
get_user_ual(env->regs[7], env->regs[11] - 36);
|
|
|
|
get_user_ual(env->regs[8], env->regs[11] - 32);
|
|
|
|
get_user_ual(env->regs[9], env->regs[11] - 28);
|
|
|
|
get_user_ual(env->regs[10], env->regs[11] - 24);
|
|
|
|
get_user_ual(env->regs[11], env->regs[11] - 20);
|
|
|
|
xtensa_rfwu(env);
|
|
|
|
}
|
|
|
|
|
|
|
|
void cpu_loop(CPUXtensaState *env)
|
|
|
|
{
|
|
|
|
CPUState *cs = CPU(xtensa_env_get_cpu(env));
|
|
|
|
target_siginfo_t info;
|
|
|
|
abi_ulong ret;
|
|
|
|
int trapnr;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
cpu_exec_start(cs);
|
|
|
|
trapnr = cpu_exec(cs);
|
|
|
|
cpu_exec_end(cs);
|
|
|
|
process_queued_cpu_work(cs);
|
|
|
|
|
|
|
|
env->sregs[PS] &= ~PS_EXCM;
|
|
|
|
switch (trapnr) {
|
|
|
|
case EXCP_INTERRUPT:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EXC_WINDOW_OVERFLOW4:
|
|
|
|
xtensa_overflow4(env);
|
|
|
|
break;
|
|
|
|
case EXC_WINDOW_UNDERFLOW4:
|
|
|
|
xtensa_underflow4(env);
|
|
|
|
break;
|
|
|
|
case EXC_WINDOW_OVERFLOW8:
|
|
|
|
xtensa_overflow8(env);
|
|
|
|
break;
|
|
|
|
case EXC_WINDOW_UNDERFLOW8:
|
|
|
|
xtensa_underflow8(env);
|
|
|
|
break;
|
|
|
|
case EXC_WINDOW_OVERFLOW12:
|
|
|
|
xtensa_overflow12(env);
|
|
|
|
break;
|
|
|
|
case EXC_WINDOW_UNDERFLOW12:
|
|
|
|
xtensa_underflow12(env);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EXC_USER:
|
|
|
|
switch (env->sregs[EXCCAUSE]) {
|
|
|
|
case ILLEGAL_INSTRUCTION_CAUSE:
|
|
|
|
case PRIVILEGED_CAUSE:
|
|
|
|
info.si_signo = TARGET_SIGILL;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code =
|
|
|
|
env->sregs[EXCCAUSE] == ILLEGAL_INSTRUCTION_CAUSE ?
|
|
|
|
TARGET_ILL_ILLOPC : TARGET_ILL_PRVOPC;
|
|
|
|
info._sifields._sigfault._addr = env->sregs[EPC1];
|
|
|
|
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SYSCALL_CAUSE:
|
|
|
|
env->pc += 3;
|
|
|
|
ret = do_syscall(env, env->regs[2],
|
|
|
|
env->regs[6], env->regs[3],
|
|
|
|
env->regs[4], env->regs[5],
|
|
|
|
env->regs[8], env->regs[9], 0, 0);
|
|
|
|
switch (ret) {
|
|
|
|
default:
|
|
|
|
env->regs[2] = ret;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case -TARGET_ERESTARTSYS:
|
2018-03-28 11:08:36 +03:00
|
|
|
env->pc -= 3;
|
|
|
|
break;
|
|
|
|
|
2017-01-25 21:54:11 +03:00
|
|
|
case -TARGET_QEMU_ESIGRETURN:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ALLOCA_CAUSE:
|
|
|
|
env->sregs[PS] = deposit32(env->sregs[PS],
|
|
|
|
PS_OWB_SHIFT,
|
|
|
|
PS_OWB_LEN,
|
|
|
|
env->sregs[WINDOW_BASE]);
|
|
|
|
|
|
|
|
switch (env->regs[0] & 0xc0000000) {
|
|
|
|
case 0x00000000:
|
|
|
|
case 0x40000000:
|
|
|
|
xtensa_rotate_window(env, -1);
|
|
|
|
xtensa_underflow4(env);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x80000000:
|
|
|
|
xtensa_rotate_window(env, -2);
|
|
|
|
xtensa_underflow8(env);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0xc0000000:
|
|
|
|
xtensa_rotate_window(env, -3);
|
|
|
|
xtensa_underflow12(env);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INTEGER_DIVIDE_BY_ZERO_CAUSE:
|
|
|
|
info.si_signo = TARGET_SIGFPE;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code = TARGET_FPE_INTDIV;
|
|
|
|
info._sifields._sigfault._addr = env->sregs[EPC1];
|
|
|
|
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LOAD_PROHIBITED_CAUSE:
|
|
|
|
case STORE_PROHIBITED_CAUSE:
|
|
|
|
info.si_signo = TARGET_SIGSEGV;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code = TARGET_SEGV_ACCERR;
|
|
|
|
info._sifields._sigfault._addr = env->sregs[EXCVADDR];
|
|
|
|
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
fprintf(stderr, "exccause = %d\n", env->sregs[EXCCAUSE]);
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case EXCP_DEBUG:
|
|
|
|
trapnr = gdb_handlesig(cs, TARGET_SIGTRAP);
|
|
|
|
if (trapnr) {
|
|
|
|
info.si_signo = trapnr;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code = TARGET_TRAP_BRKPT;
|
|
|
|
queue_signal(env, trapnr, QEMU_SI_FAULT, &info);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case EXC_DEBUG:
|
|
|
|
default:
|
|
|
|
fprintf(stderr, "trapnr = %d\n", trapnr);
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
process_pending_signals(env);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* TARGET_XTENSA */
|
|
|
|
|
2018-02-13 16:22:46 +03:00
|
|
|
__thread CPUState *thread_cpu;
|
2003-06-25 20:18:50 +04:00
|
|
|
|
2016-08-02 20:27:39 +03:00
|
|
|
bool qemu_cpu_is_self(CPUState *cpu)
|
|
|
|
{
|
|
|
|
return thread_cpu == cpu;
|
|
|
|
}
|
|
|
|
|
|
|
|
void qemu_cpu_kick(CPUState *cpu)
|
|
|
|
{
|
|
|
|
cpu_exit(cpu);
|
|
|
|
}
|
|
|
|
|
2009-04-07 10:57:11 +04:00
|
|
|
void task_settid(TaskState *ts)
|
|
|
|
{
|
|
|
|
if (ts->ts_tid == 0) {
|
|
|
|
ts->ts_tid = (pid_t)syscall(SYS_gettid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void stop_all_tasks(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We trust that when using NPTL, start_exclusive()
|
|
|
|
* handles thread stopping correctly.
|
|
|
|
*/
|
|
|
|
start_exclusive();
|
|
|
|
}
|
|
|
|
|
2008-06-09 18:02:50 +04:00
|
|
|
/* Assumes contents are already zeroed. */
|
2008-05-31 20:11:38 +04:00
|
|
|
void init_task_state(TaskState *ts)
|
|
|
|
{
|
|
|
|
ts->used = 1;
|
|
|
|
}
|
2011-08-06 10:54:12 +04:00
|
|
|
|
2013-07-02 19:43:21 +04:00
|
|
|
CPUArchState *cpu_copy(CPUArchState *env)
|
|
|
|
{
|
2013-08-26 20:23:18 +04:00
|
|
|
CPUState *cpu = ENV_GET_CPU(env);
|
2018-02-07 13:40:26 +03:00
|
|
|
CPUState *new_cpu = cpu_create(cpu_type);
|
2015-03-23 15:55:52 +03:00
|
|
|
CPUArchState *new_env = new_cpu->env_ptr;
|
2013-07-02 19:43:21 +04:00
|
|
|
CPUBreakpoint *bp;
|
|
|
|
CPUWatchpoint *wp;
|
|
|
|
|
|
|
|
/* Reset non arch specific state */
|
2013-09-02 18:57:02 +04:00
|
|
|
cpu_reset(new_cpu);
|
2013-07-02 19:43:21 +04:00
|
|
|
|
|
|
|
memcpy(new_env, env, sizeof(CPUArchState));
|
|
|
|
|
|
|
|
/* Clone all break/watchpoints.
|
|
|
|
Note: Once we support ptrace with hw-debug register access, make sure
|
|
|
|
BP_CPU break/watchpoints are handled correctly on clone. */
|
2015-06-12 12:24:10 +03:00
|
|
|
QTAILQ_INIT(&new_cpu->breakpoints);
|
|
|
|
QTAILQ_INIT(&new_cpu->watchpoints);
|
2013-08-26 23:22:53 +04:00
|
|
|
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
|
2013-09-02 19:26:20 +04:00
|
|
|
cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags, NULL);
|
2013-07-02 19:43:21 +04:00
|
|
|
}
|
2013-08-26 20:23:18 +04:00
|
|
|
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
|
2014-09-12 17:06:48 +04:00
|
|
|
cpu_watchpoint_insert(new_cpu, wp->vaddr, wp->len, wp->flags, NULL);
|
2013-07-02 19:43:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return new_env;
|
|
|
|
}
|
|
|
|
|
2011-08-06 10:54:12 +04:00
|
|
|
static void handle_arg_help(const char *arg)
|
|
|
|
{
|
2015-09-28 16:12:16 +03:00
|
|
|
usage(EXIT_SUCCESS);
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_arg_log(const char *arg)
|
|
|
|
{
|
|
|
|
int mask;
|
|
|
|
|
2013-02-11 20:41:22 +04:00
|
|
|
mask = qemu_str_to_log_mask(arg);
|
2011-08-06 10:54:12 +04:00
|
|
|
if (!mask) {
|
2013-02-11 20:41:21 +04:00
|
|
|
qemu_print_log_usage(stdout);
|
2015-09-28 16:12:16 +03:00
|
|
|
exit(EXIT_FAILURE);
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
2015-12-04 15:12:57 +03:00
|
|
|
qemu_log_needs_buffers();
|
2013-02-11 20:41:23 +04:00
|
|
|
qemu_set_log(mask);
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
|
|
|
|
2017-10-17 13:35:14 +03:00
|
|
|
static void handle_arg_dfilter(const char *arg)
|
|
|
|
{
|
|
|
|
qemu_set_dfilter_ranges(arg, NULL);
|
|
|
|
}
|
|
|
|
|
2011-11-08 13:46:44 +04:00
|
|
|
static void handle_arg_log_filename(const char *arg)
|
|
|
|
{
|
2016-06-15 20:27:16 +03:00
|
|
|
qemu_set_log_filename(arg, &error_fatal);
|
2011-11-08 13:46:44 +04:00
|
|
|
}
|
|
|
|
|
2011-08-06 10:54:12 +04:00
|
|
|
static void handle_arg_set_env(const char *arg)
|
|
|
|
{
|
|
|
|
char *r, *p, *token;
|
|
|
|
r = p = strdup(arg);
|
|
|
|
while ((token = strsep(&p, ",")) != NULL) {
|
|
|
|
if (envlist_setenv(envlist, token) != 0) {
|
2015-09-28 16:12:16 +03:00
|
|
|
usage(EXIT_FAILURE);
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
free(r);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_arg_unset_env(const char *arg)
|
|
|
|
{
|
|
|
|
char *r, *p, *token;
|
|
|
|
r = p = strdup(arg);
|
|
|
|
while ((token = strsep(&p, ",")) != NULL) {
|
|
|
|
if (envlist_unsetenv(envlist, token) != 0) {
|
2015-09-28 16:12:16 +03:00
|
|
|
usage(EXIT_FAILURE);
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
free(r);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_arg_argv0(const char *arg)
|
|
|
|
{
|
|
|
|
argv0 = strdup(arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_arg_stack_size(const char *arg)
|
|
|
|
{
|
|
|
|
char *p;
|
|
|
|
guest_stack_size = strtoul(arg, &p, 0);
|
|
|
|
if (guest_stack_size == 0) {
|
2015-09-28 16:12:16 +03:00
|
|
|
usage(EXIT_FAILURE);
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (*p == 'M') {
|
|
|
|
guest_stack_size *= 1024 * 1024;
|
|
|
|
} else if (*p == 'k' || *p == 'K') {
|
|
|
|
guest_stack_size *= 1024;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_arg_ld_prefix(const char *arg)
|
|
|
|
{
|
|
|
|
interp_prefix = strdup(arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_arg_pagesize(const char *arg)
|
|
|
|
{
|
|
|
|
qemu_host_page_size = atoi(arg);
|
|
|
|
if (qemu_host_page_size == 0 ||
|
|
|
|
(qemu_host_page_size & (qemu_host_page_size - 1)) != 0) {
|
|
|
|
fprintf(stderr, "page size must be a power of two\n");
|
2015-09-28 16:12:16 +03:00
|
|
|
exit(EXIT_FAILURE);
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-14 19:18:17 +04:00
|
|
|
static void handle_arg_randseed(const char *arg)
|
|
|
|
{
|
|
|
|
unsigned long long seed;
|
|
|
|
|
|
|
|
if (parse_uint_full(arg, &seed, 0) != 0 || seed > UINT_MAX) {
|
|
|
|
fprintf(stderr, "Invalid seed number: %s\n", arg);
|
2015-09-28 16:12:16 +03:00
|
|
|
exit(EXIT_FAILURE);
|
2014-10-14 19:18:17 +04:00
|
|
|
}
|
|
|
|
srand(seed);
|
|
|
|
}
|
|
|
|
|
2011-08-06 10:54:12 +04:00
|
|
|
static void handle_arg_gdb(const char *arg)
|
|
|
|
{
|
|
|
|
gdbstub_port = atoi(arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_arg_uname(const char *arg)
|
|
|
|
{
|
|
|
|
qemu_uname_release = strdup(arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_arg_cpu(const char *arg)
|
|
|
|
{
|
|
|
|
cpu_model = strdup(arg);
|
2012-08-02 16:45:54 +04:00
|
|
|
if (cpu_model == NULL || is_help_option(cpu_model)) {
|
2011-08-06 10:54:12 +04:00
|
|
|
/* XXX: implement xxx_cpu_list for targets that still miss it */
|
2012-09-06 00:41:08 +04:00
|
|
|
#if defined(cpu_list)
|
|
|
|
cpu_list(stdout, &fprintf);
|
2011-08-06 10:54:12 +04:00
|
|
|
#endif
|
2015-09-28 16:12:16 +03:00
|
|
|
exit(EXIT_FAILURE);
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_arg_guest_base(const char *arg)
|
|
|
|
{
|
|
|
|
guest_base = strtol(arg, NULL, 0);
|
|
|
|
have_guest_base = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_arg_reserved_va(const char *arg)
|
|
|
|
{
|
|
|
|
char *p;
|
|
|
|
int shift = 0;
|
|
|
|
reserved_va = strtoul(arg, &p, 0);
|
|
|
|
switch (*p) {
|
|
|
|
case 'k':
|
|
|
|
case 'K':
|
|
|
|
shift = 10;
|
|
|
|
break;
|
|
|
|
case 'M':
|
|
|
|
shift = 20;
|
|
|
|
break;
|
|
|
|
case 'G':
|
|
|
|
shift = 30;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (shift) {
|
|
|
|
unsigned long unshifted = reserved_va;
|
|
|
|
p++;
|
|
|
|
reserved_va <<= shift;
|
2017-10-05 17:36:00 +03:00
|
|
|
if (reserved_va >> shift != unshifted
|
|
|
|
|| (MAX_RESERVED_VA && reserved_va > MAX_RESERVED_VA)) {
|
2011-08-06 10:54:12 +04:00
|
|
|
fprintf(stderr, "Reserved virtual address too big\n");
|
2015-09-28 16:12:16 +03:00
|
|
|
exit(EXIT_FAILURE);
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (*p) {
|
|
|
|
fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p);
|
2015-09-28 16:12:16 +03:00
|
|
|
exit(EXIT_FAILURE);
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_arg_singlestep(const char *arg)
|
|
|
|
{
|
|
|
|
singlestep = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_arg_strace(const char *arg)
|
|
|
|
{
|
|
|
|
do_strace = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_arg_version(const char *arg)
|
|
|
|
{
|
2018-02-15 14:06:47 +03:00
|
|
|
printf("qemu-" TARGET_NAME " version " QEMU_FULL_VERSION
|
2016-10-05 12:54:44 +03:00
|
|
|
"\n" QEMU_COPYRIGHT "\n");
|
2015-09-28 16:12:16 +03:00
|
|
|
exit(EXIT_SUCCESS);
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
|
|
|
|
2016-07-15 20:08:38 +03:00
|
|
|
static char *trace_file;
|
|
|
|
static void handle_arg_trace(const char *arg)
|
|
|
|
{
|
|
|
|
g_free(trace_file);
|
|
|
|
trace_file = trace_opt_parse(arg);
|
|
|
|
}
|
|
|
|
|
2011-08-06 10:54:12 +04:00
|
|
|
struct qemu_argument {
|
|
|
|
const char *argv;
|
|
|
|
const char *env;
|
|
|
|
bool has_arg;
|
|
|
|
void (*handle_opt)(const char *arg);
|
|
|
|
const char *example;
|
|
|
|
const char *help;
|
|
|
|
};
|
|
|
|
|
2012-05-21 23:56:19 +04:00
|
|
|
static const struct qemu_argument arg_table[] = {
|
2011-08-06 10:54:12 +04:00
|
|
|
{"h", "", false, handle_arg_help,
|
|
|
|
"", "print this help"},
|
2015-07-06 21:03:39 +03:00
|
|
|
{"help", "", false, handle_arg_help,
|
|
|
|
"", ""},
|
2011-08-06 10:54:12 +04:00
|
|
|
{"g", "QEMU_GDB", true, handle_arg_gdb,
|
|
|
|
"port", "wait gdb connection to 'port'"},
|
|
|
|
{"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix,
|
|
|
|
"path", "set the elf interpreter prefix to 'path'"},
|
|
|
|
{"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size,
|
|
|
|
"size", "set the stack size to 'size' bytes"},
|
|
|
|
{"cpu", "QEMU_CPU", true, handle_arg_cpu,
|
2012-08-02 16:45:54 +04:00
|
|
|
"model", "select CPU (-cpu help for list)"},
|
2011-08-06 10:54:12 +04:00
|
|
|
{"E", "QEMU_SET_ENV", true, handle_arg_set_env,
|
|
|
|
"var=value", "sets targets environment variable (see below)"},
|
|
|
|
{"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env,
|
|
|
|
"var", "unsets targets environment variable (see below)"},
|
|
|
|
{"0", "QEMU_ARGV0", true, handle_arg_argv0,
|
|
|
|
"argv0", "forces target process argv[0] to be 'argv0'"},
|
|
|
|
{"r", "QEMU_UNAME", true, handle_arg_uname,
|
|
|
|
"uname", "set qemu uname release string to 'uname'"},
|
|
|
|
{"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base,
|
|
|
|
"address", "set guest_base address to 'address'"},
|
|
|
|
{"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va,
|
|
|
|
"size", "reserve 'size' bytes for guest virtual address space"},
|
|
|
|
{"d", "QEMU_LOG", true, handle_arg_log,
|
2013-02-26 21:52:40 +04:00
|
|
|
"item[,...]", "enable logging of specified items "
|
|
|
|
"(use '-d help' for a list of items)"},
|
2017-10-17 13:35:14 +03:00
|
|
|
{"dfilter", "QEMU_DFILTER", true, handle_arg_dfilter,
|
|
|
|
"range[,...]","filter logging based on address range"},
|
2011-11-08 13:46:44 +04:00
|
|
|
{"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename,
|
2013-02-26 21:52:40 +04:00
|
|
|
"logfile", "write logs to 'logfile' (default stderr)"},
|
2011-08-06 10:54:12 +04:00
|
|
|
{"p", "QEMU_PAGESIZE", true, handle_arg_pagesize,
|
|
|
|
"pagesize", "set the host page size to 'pagesize'"},
|
|
|
|
{"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep,
|
|
|
|
"", "run in singlestep mode"},
|
|
|
|
{"strace", "QEMU_STRACE", false, handle_arg_strace,
|
|
|
|
"", "log system calls"},
|
2014-10-14 19:18:17 +04:00
|
|
|
{"seed", "QEMU_RAND_SEED", true, handle_arg_randseed,
|
|
|
|
"", "Seed for pseudo-random number generator"},
|
2016-07-15 20:08:38 +03:00
|
|
|
{"trace", "QEMU_TRACE", true, handle_arg_trace,
|
|
|
|
"", "[[enable=]<pattern>][,events=<file>][,file=<file>]"},
|
2011-08-06 10:54:12 +04:00
|
|
|
{"version", "QEMU_VERSION", false, handle_arg_version,
|
2011-09-29 18:48:12 +04:00
|
|
|
"", "display version information and exit"},
|
2011-08-06 10:54:12 +04:00
|
|
|
{NULL, NULL, false, NULL, NULL, NULL}
|
|
|
|
};
|
|
|
|
|
2015-07-06 21:03:38 +03:00
|
|
|
static void usage(int exitcode)
|
2011-08-06 10:54:12 +04:00
|
|
|
{
|
2012-05-21 23:56:19 +04:00
|
|
|
const struct qemu_argument *arginfo;
|
2011-08-06 10:54:12 +04:00
|
|
|
int maxarglen;
|
|
|
|
int maxenvlen;
|
|
|
|
|
2013-06-04 16:45:27 +04:00
|
|
|
printf("usage: qemu-" TARGET_NAME " [options] program [arguments...]\n"
|
|
|
|
"Linux CPU emulator (compiled for " TARGET_NAME " emulation)\n"
|
2011-08-06 10:54:12 +04:00
|
|
|
"\n"
|
|
|
|
"Options and associated environment variables:\n"
|
|
|
|
"\n");
|
|
|
|
|
2013-02-14 12:46:43 +04:00
|
|
|
/* Calculate column widths. We must always have at least enough space
|
|
|
|
* for the column header.
|
|
|
|
*/
|
|
|
|
maxarglen = strlen("Argument");
|
|
|
|
maxenvlen = strlen("Env-variable");
|
2011-08-06 10:54:12 +04:00
|
|
|
|
|
|
|
for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
|
2013-02-14 12:46:43 +04:00
|
|
|
int arglen = strlen(arginfo->argv);
|
|
|
|
if (arginfo->has_arg) {
|
|
|
|
arglen += strlen(arginfo->example) + 1;
|
|
|
|
}
|
2011-08-06 10:54:12 +04:00
|
|
|
if (strlen(arginfo->env) > maxenvlen) {
|
|
|
|
maxenvlen = strlen(arginfo->env);
|
|
|
|
}
|
2013-02-14 12:46:43 +04:00
|
|
|
if (arglen > maxarglen) {
|
|
|
|
maxarglen = arglen;
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-14 12:46:43 +04:00
|
|
|
printf("%-*s %-*s Description\n", maxarglen+1, "Argument",
|
|
|
|
maxenvlen, "Env-variable");
|
2011-08-06 10:54:12 +04:00
|
|
|
|
|
|
|
for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
|
|
|
|
if (arginfo->has_arg) {
|
|
|
|
printf("-%s %-*s %-*s %s\n", arginfo->argv,
|
2013-02-14 12:46:43 +04:00
|
|
|
(int)(maxarglen - strlen(arginfo->argv) - 1),
|
|
|
|
arginfo->example, maxenvlen, arginfo->env, arginfo->help);
|
2011-08-06 10:54:12 +04:00
|
|
|
} else {
|
2013-02-14 12:46:43 +04:00
|
|
|
printf("-%-*s %-*s %s\n", maxarglen, arginfo->argv,
|
2011-08-06 10:54:12 +04:00
|
|
|
maxenvlen, arginfo->env,
|
|
|
|
arginfo->help);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
printf("\n"
|
|
|
|
"Defaults:\n"
|
|
|
|
"QEMU_LD_PREFIX = %s\n"
|
2013-02-26 21:52:40 +04:00
|
|
|
"QEMU_STACK_SIZE = %ld byte\n",
|
2011-08-06 10:54:12 +04:00
|
|
|
interp_prefix,
|
2013-02-26 21:52:40 +04:00
|
|
|
guest_stack_size);
|
2011-08-06 10:54:12 +04:00
|
|
|
|
|
|
|
printf("\n"
|
|
|
|
"You can use -E and -U options or the QEMU_SET_ENV and\n"
|
|
|
|
"QEMU_UNSET_ENV environment variables to set and unset\n"
|
|
|
|
"environment variables for the target process.\n"
|
|
|
|
"It is possible to provide several variables by separating them\n"
|
|
|
|
"by commas in getsubopt(3) style. Additionally it is possible to\n"
|
|
|
|
"provide the -E and -U options multiple times.\n"
|
|
|
|
"The following lines are equivalent:\n"
|
|
|
|
" -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
|
|
|
|
" -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
|
|
|
|
" QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
|
|
|
|
"Note that if you provide several changes to a single variable\n"
|
2017-08-03 19:33:53 +03:00
|
|
|
"the last change will stay in effect.\n"
|
|
|
|
"\n"
|
|
|
|
QEMU_HELP_BOTTOM "\n");
|
2011-08-06 10:54:12 +04:00
|
|
|
|
2015-07-06 21:03:38 +03:00
|
|
|
exit(exitcode);
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int parse_args(int argc, char **argv)
|
|
|
|
{
|
|
|
|
const char *r;
|
|
|
|
int optind;
|
2012-05-21 23:56:19 +04:00
|
|
|
const struct qemu_argument *arginfo;
|
2011-08-06 10:54:12 +04:00
|
|
|
|
|
|
|
for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
|
|
|
|
if (arginfo->env == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = getenv(arginfo->env);
|
|
|
|
if (r != NULL) {
|
|
|
|
arginfo->handle_opt(r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
optind = 1;
|
|
|
|
for (;;) {
|
|
|
|
if (optind >= argc) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
r = argv[optind];
|
|
|
|
if (r[0] != '-') {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
optind++;
|
|
|
|
r++;
|
|
|
|
if (!strcmp(r, "-")) {
|
|
|
|
break;
|
|
|
|
}
|
2015-07-06 21:03:41 +03:00
|
|
|
/* Treat --foo the same as -foo. */
|
|
|
|
if (r[0] == '-') {
|
|
|
|
r++;
|
|
|
|
}
|
2011-08-06 10:54:12 +04:00
|
|
|
|
|
|
|
for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
|
|
|
|
if (!strcmp(r, arginfo->argv)) {
|
|
|
|
if (arginfo->has_arg) {
|
2011-09-29 18:48:12 +04:00
|
|
|
if (optind >= argc) {
|
2015-07-06 21:03:40 +03:00
|
|
|
(void) fprintf(stderr,
|
|
|
|
"qemu: missing argument for option '%s'\n", r);
|
2015-09-28 16:12:16 +03:00
|
|
|
exit(EXIT_FAILURE);
|
2011-09-29 18:48:12 +04:00
|
|
|
}
|
|
|
|
arginfo->handle_opt(argv[optind]);
|
2011-08-06 10:54:12 +04:00
|
|
|
optind++;
|
2011-09-29 18:48:12 +04:00
|
|
|
} else {
|
|
|
|
arginfo->handle_opt(NULL);
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* no option matched the current argv */
|
|
|
|
if (arginfo->handle_opt == NULL) {
|
2015-07-06 21:03:40 +03:00
|
|
|
(void) fprintf(stderr, "qemu: unknown option '%s'\n", r);
|
2015-09-28 16:12:16 +03:00
|
|
|
exit(EXIT_FAILURE);
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (optind >= argc) {
|
2015-07-06 21:03:40 +03:00
|
|
|
(void) fprintf(stderr, "qemu: no user program specified\n");
|
2015-09-28 16:12:16 +03:00
|
|
|
exit(EXIT_FAILURE);
|
2011-08-06 10:54:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
filename = argv[optind];
|
|
|
|
exec_path = argv[optind];
|
|
|
|
|
|
|
|
return optind;
|
|
|
|
}
|
|
|
|
|
2008-12-10 22:18:40 +03:00
|
|
|
int main(int argc, char **argv, char **envp)
|
2003-02-19 01:55:36 +03:00
|
|
|
{
|
2003-02-19 02:00:51 +03:00
|
|
|
struct target_pt_regs regs1, *regs = ®s1;
|
2003-02-19 01:55:36 +03:00
|
|
|
struct image_info info1, *info = &info1;
|
2009-04-07 10:57:11 +04:00
|
|
|
struct linux_binprm bprm;
|
linux-user: fix memory leaks with NPTL emulation
Running programs that create large numbers of threads, such as this
snippet from libstdc++'s pthread7-rope.cc:
const int max_thread_count = 4;
const int max_loop_count = 10000;
...
for (int j = 0; j < max_loop_count; j++)
{
...
for (int i = 0; i < max_thread_count; i++)
pthread_create (&tid[i], NULL, thread_main, 0);
for (int i = 0; i < max_thread_count; i++)
pthread_join (tid[i], NULL);
}
in user-mode emulation will quickly run out of memory. This is caused
by a failure to free memory in do_syscall prior to thread exit:
/* TODO: Free CPU state. */
pthread_exit(NULL);
The first step in fixing this is to make all TaskStates used by QEMU
dynamically allocated. The TaskState used by the initial thread was
not, as it was allocated on main's stack. So fix that, free the
cpu_env, free the TaskState, and we're home free, right?
Not exactly. When we create a thread, we do:
ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
...
new_stack = ts->stack;
...
ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
If we blindly free the TaskState, then, we yank the current (host)
thread's stack out from underneath it while it still has things to do,
like calling pthread_exit. That causes problems, as you might expect.
The solution adopted here is to let the C library allocate the thread's
stack (so the C library can properly clean it up at pthread_exit) and
provide a hint that we want NEW_STACK_SIZE bytes of stack.
With those two changes, we're done, right? Well, almost. You see,
we're creating all these host threads and their parent threads never
bother to check that their children are finished. There's no good place
for the parent threads to do so. Therefore, we need to create the
threads in a detached state so the parent thread doesn't have to call
pthread_join on the child to release the child's resources; the child
does so automatically.
With those three major changes, we can comfortably run programs like the
above without exhausting memory. We do need to delete 'stack' from the
TaskState structure.
Signed-off-by: Nathan Froyd <froydnj@codesourcery.com>
Signed-off-by: Riku Voipio <riku.voipio@nokia.com>
2010-10-29 18:48:57 +04:00
|
|
|
TaskState *ts;
|
2012-03-14 04:38:32 +04:00
|
|
|
CPUArchState *env;
|
2013-06-27 21:49:31 +04:00
|
|
|
CPUState *cpu;
|
2003-03-03 18:02:29 +03:00
|
|
|
int optind;
|
2009-01-30 22:59:17 +03:00
|
|
|
char **target_environ, **wrk;
|
2009-04-15 20:11:52 +04:00
|
|
|
char **target_argv;
|
|
|
|
int target_argc;
|
|
|
|
int i;
|
2009-06-19 11:39:36 +04:00
|
|
|
int ret;
|
2013-08-30 03:46:44 +04:00
|
|
|
int execfd;
|
2007-06-17 20:38:39 +04:00
|
|
|
|
2016-10-04 16:35:52 +03:00
|
|
|
module_call_init(MODULE_INIT_TRACE);
|
2016-08-28 04:45:14 +03:00
|
|
|
qemu_init_cpu_list();
|
2012-03-05 00:32:36 +04:00
|
|
|
module_call_init(MODULE_INIT_QOM);
|
|
|
|
|
2017-03-20 20:38:28 +03:00
|
|
|
envlist = envlist_create();
|
2009-01-30 22:59:17 +03:00
|
|
|
|
|
|
|
/* add current environment into the list */
|
|
|
|
for (wrk = environ; *wrk != NULL; wrk++) {
|
|
|
|
(void) envlist_setenv(envlist, *wrk);
|
|
|
|
}
|
|
|
|
|
2010-03-20 00:21:13 +03:00
|
|
|
/* Read the stack limit from the kernel. If it's "unlimited",
|
|
|
|
then we can do little else besides use the default. */
|
|
|
|
{
|
|
|
|
struct rlimit lim;
|
|
|
|
if (getrlimit(RLIMIT_STACK, &lim) == 0
|
2010-04-11 23:07:35 +04:00
|
|
|
&& lim.rlim_cur != RLIM_INFINITY
|
|
|
|
&& lim.rlim_cur == (target_long)lim.rlim_cur) {
|
2010-03-20 00:21:13 +03:00
|
|
|
guest_stack_size = lim.rlim_cur;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-03-19 11:08:28 +03:00
|
|
|
cpu_model = NULL;
|
2010-02-20 20:14:59 +03:00
|
|
|
|
2014-10-14 19:18:17 +04:00
|
|
|
srand(time(NULL));
|
|
|
|
|
2016-07-15 20:08:38 +03:00
|
|
|
qemu_add_opts(&qemu_trace_opts);
|
|
|
|
|
2011-08-06 10:54:12 +04:00
|
|
|
optind = parse_args(argc, argv);
|
2003-03-03 18:02:29 +03:00
|
|
|
|
2016-07-15 20:08:38 +03:00
|
|
|
if (!trace_init_backends()) {
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
trace_init_file(trace_file);
|
|
|
|
|
2003-02-19 01:55:36 +03:00
|
|
|
/* Zero out regs */
|
2003-02-19 02:00:51 +03:00
|
|
|
memset(regs, 0, sizeof(struct target_pt_regs));
|
2003-02-19 01:55:36 +03:00
|
|
|
|
|
|
|
/* Zero out image_info */
|
|
|
|
memset(info, 0, sizeof(struct image_info));
|
|
|
|
|
2009-04-07 10:57:11 +04:00
|
|
|
memset(&bprm, 0, sizeof (bprm));
|
|
|
|
|
2003-04-11 04:13:41 +04:00
|
|
|
/* Scan interp_prefix dir for replacement files. */
|
|
|
|
init_paths(interp_prefix);
|
|
|
|
|
2013-09-03 23:12:20 +04:00
|
|
|
init_qemu_uname_release();
|
|
|
|
|
2018-02-20 20:33:05 +03:00
|
|
|
execfd = qemu_getauxval(AT_EXECFD);
|
|
|
|
if (execfd == 0) {
|
|
|
|
execfd = open(filename, O_RDONLY);
|
|
|
|
if (execfd < 0) {
|
|
|
|
printf("Error while loading %s: %s\n", filename, strerror(errno));
|
|
|
|
_exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-11-08 16:56:19 +03:00
|
|
|
if (cpu_model == NULL) {
|
2018-02-20 20:33:05 +03:00
|
|
|
cpu_model = cpu_get_model(get_elf_eflags(execfd));
|
2007-11-10 18:15:54 +03:00
|
|
|
}
|
2018-02-07 13:40:26 +03:00
|
|
|
cpu_type = parse_cpu_model(cpu_model);
|
|
|
|
|
2011-08-02 18:10:21 +04:00
|
|
|
tcg_exec_init(0);
|
2004-07-06 01:25:26 +04:00
|
|
|
/* NOTE: we need to init the CPU at this stage to get
|
|
|
|
qemu_host_page_size */
|
2018-02-07 13:40:26 +03:00
|
|
|
|
|
|
|
cpu = cpu_create(cpu_type);
|
2015-02-26 23:37:49 +03:00
|
|
|
env = cpu->env_ptr;
|
2013-07-26 18:42:25 +04:00
|
|
|
cpu_reset(cpu);
|
2009-11-07 13:37:06 +03:00
|
|
|
|
2013-06-27 21:49:31 +04:00
|
|
|
thread_cpu = cpu;
|
2007-09-17 12:09:54 +04:00
|
|
|
|
2007-11-11 17:46:06 +03:00
|
|
|
if (getenv("QEMU_STRACE")) {
|
|
|
|
do_strace = 1;
|
2007-11-01 03:07:38 +03:00
|
|
|
}
|
|
|
|
|
2014-10-14 19:18:17 +04:00
|
|
|
if (getenv("QEMU_RAND_SEED")) {
|
|
|
|
handle_arg_randseed(getenv("QEMU_RAND_SEED"));
|
|
|
|
}
|
|
|
|
|
2009-01-30 22:59:17 +03:00
|
|
|
target_environ = envlist_to_environ(envlist, NULL);
|
|
|
|
envlist_free(envlist);
|
2007-06-17 20:38:39 +04:00
|
|
|
|
2009-07-17 15:48:08 +04:00
|
|
|
/*
|
|
|
|
* Now that page sizes are configured in cpu_init() we can do
|
|
|
|
* proper page alignment for guest_base.
|
|
|
|
*/
|
|
|
|
guest_base = HOST_PAGE_ALIGN(guest_base);
|
2010-05-29 05:27:35 +04:00
|
|
|
|
2012-07-26 20:50:02 +04:00
|
|
|
if (reserved_va || have_guest_base) {
|
|
|
|
guest_base = init_guest_space(guest_base, reserved_va, 0,
|
|
|
|
have_guest_base);
|
|
|
|
if (guest_base == (unsigned long)-1) {
|
2012-08-20 14:36:32 +04:00
|
|
|
fprintf(stderr, "Unable to reserve 0x%lx bytes of virtual address "
|
|
|
|
"space for use as guest address space (check your virtual "
|
|
|
|
"memory ulimit setting or reserve less using -R option)\n",
|
|
|
|
reserved_va);
|
2015-09-28 16:12:16 +03:00
|
|
|
exit(EXIT_FAILURE);
|
2010-05-29 05:27:35 +04:00
|
|
|
}
|
2011-08-31 20:24:34 +04:00
|
|
|
|
2012-07-26 20:50:02 +04:00
|
|
|
if (reserved_va) {
|
|
|
|
mmap_next_start = reserved_va;
|
2011-08-31 20:24:34 +04:00
|
|
|
}
|
|
|
|
}
|
2009-07-17 15:48:08 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Read in mmap_min_addr kernel parameter. This value is used
|
|
|
|
* When loading the ELF image to determine whether guest_base
|
2010-03-11 02:39:07 +03:00
|
|
|
* is needed. It is also used in mmap_find_vma.
|
2009-07-17 15:48:08 +04:00
|
|
|
*/
|
2010-03-11 02:39:07 +03:00
|
|
|
{
|
2009-07-17 15:48:08 +04:00
|
|
|
FILE *fp;
|
|
|
|
|
|
|
|
if ((fp = fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL) {
|
|
|
|
unsigned long tmp;
|
|
|
|
if (fscanf(fp, "%lu", &tmp) == 1) {
|
|
|
|
mmap_min_addr = tmp;
|
2015-11-13 14:32:19 +03:00
|
|
|
qemu_log_mask(CPU_LOG_PAGE, "host mmap_min_addr=0x%lx\n", mmap_min_addr);
|
2009-07-17 15:48:08 +04:00
|
|
|
}
|
|
|
|
fclose(fp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-15 20:11:52 +04:00
|
|
|
/*
|
|
|
|
* Prepare copy of argv vector for target.
|
|
|
|
*/
|
|
|
|
target_argc = argc - optind;
|
|
|
|
target_argv = calloc(target_argc + 1, sizeof (char *));
|
|
|
|
if (target_argv == NULL) {
|
|
|
|
(void) fprintf(stderr, "Unable to allocate memory for target_argv\n");
|
2015-09-28 16:12:16 +03:00
|
|
|
exit(EXIT_FAILURE);
|
2009-04-15 20:11:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If argv0 is specified (using '-0' switch) we replace
|
|
|
|
* argv[0] pointer with the given one.
|
|
|
|
*/
|
|
|
|
i = 0;
|
|
|
|
if (argv0 != NULL) {
|
|
|
|
target_argv[i++] = strdup(argv0);
|
|
|
|
}
|
|
|
|
for (; i < target_argc; i++) {
|
|
|
|
target_argv[i] = strdup(argv[optind + i]);
|
|
|
|
}
|
|
|
|
target_argv[target_argc] = NULL;
|
|
|
|
|
2015-09-14 14:53:03 +03:00
|
|
|
ts = g_new0(TaskState, 1);
|
2009-04-07 10:57:11 +04:00
|
|
|
init_task_state(ts);
|
|
|
|
/* build Task State */
|
|
|
|
ts->info = info;
|
|
|
|
ts->bprm = &bprm;
|
2013-08-26 20:14:44 +04:00
|
|
|
cpu->opaque = ts;
|
2009-04-07 10:57:11 +04:00
|
|
|
task_settid(ts);
|
|
|
|
|
2013-08-30 03:46:44 +04:00
|
|
|
ret = loader_exec(execfd, filename, target_argv, target_environ, regs,
|
2009-06-19 11:39:36 +04:00
|
|
|
info, &bprm);
|
|
|
|
if (ret != 0) {
|
2012-08-24 10:55:53 +04:00
|
|
|
printf("Error while loading %s: %s\n", filename, strerror(-ret));
|
2015-09-28 16:12:16 +03:00
|
|
|
_exit(EXIT_FAILURE);
|
2007-06-17 20:38:39 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
for (wrk = target_environ; *wrk; wrk++) {
|
2017-03-20 20:38:28 +03:00
|
|
|
g_free(*wrk);
|
2003-02-19 01:55:36 +03:00
|
|
|
}
|
2007-09-17 12:09:54 +04:00
|
|
|
|
2017-03-20 20:38:28 +03:00
|
|
|
g_free(target_environ);
|
2007-06-17 20:38:39 +04:00
|
|
|
|
2015-11-13 14:32:19 +03:00
|
|
|
if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
|
2009-07-17 15:48:08 +04:00
|
|
|
qemu_log("guest_base 0x%lx\n", guest_base);
|
2009-01-20 19:57:34 +03:00
|
|
|
log_page_dump();
|
|
|
|
|
|
|
|
qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk);
|
|
|
|
qemu_log("end_code 0x" TARGET_ABI_FMT_lx "\n", info->end_code);
|
2016-12-15 20:38:11 +03:00
|
|
|
qemu_log("start_code 0x" TARGET_ABI_FMT_lx "\n", info->start_code);
|
|
|
|
qemu_log("start_data 0x" TARGET_ABI_FMT_lx "\n", info->start_data);
|
2009-01-20 19:57:34 +03:00
|
|
|
qemu_log("end_data 0x" TARGET_ABI_FMT_lx "\n", info->end_data);
|
2016-12-15 20:38:11 +03:00
|
|
|
qemu_log("start_stack 0x" TARGET_ABI_FMT_lx "\n", info->start_stack);
|
2009-01-20 19:57:34 +03:00
|
|
|
qemu_log("brk 0x" TARGET_ABI_FMT_lx "\n", info->brk);
|
|
|
|
qemu_log("entry 0x" TARGET_ABI_FMT_lx "\n", info->entry);
|
2016-12-15 20:38:11 +03:00
|
|
|
qemu_log("argv_start 0x" TARGET_ABI_FMT_lx "\n", info->arg_start);
|
|
|
|
qemu_log("env_start 0x" TARGET_ABI_FMT_lx "\n",
|
|
|
|
info->arg_end + (abi_ulong)sizeof(abi_ulong));
|
|
|
|
qemu_log("auxv_start 0x" TARGET_ABI_FMT_lx "\n", info->saved_auxv);
|
2009-01-20 19:57:34 +03:00
|
|
|
}
|
2003-02-19 01:55:36 +03:00
|
|
|
|
2006-03-25 22:31:22 +03:00
|
|
|
target_set_brk(info->brk);
|
2003-02-19 01:55:36 +03:00
|
|
|
syscall_init();
|
2003-03-23 04:06:05 +03:00
|
|
|
signal_init();
|
2003-02-19 01:55:36 +03:00
|
|
|
|
2010-05-06 19:50:41 +04:00
|
|
|
/* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
|
|
|
|
generating the prologue until now so that the prologue can take
|
|
|
|
the real value of GUEST_BASE into account. */
|
2017-07-13 00:15:52 +03:00
|
|
|
tcg_prologue_init(tcg_ctx);
|
tcg: introduce regions to split code_gen_buffer
This is groundwork for supporting multiple TCG contexts.
The naive solution here is to split code_gen_buffer statically
among the TCG threads; this however results in poor utilization
if translation needs are different across TCG threads.
What we do here is to add an extra layer of indirection, assigning
regions that act just like pages do in virtual memory allocation.
(BTW if you are wondering about the chosen naming, I did not want
to use blocks or pages because those are already heavily used in QEMU).
We use a global lock to serialize allocations as well as statistics
reporting (we now export the size of the used code_gen_buffer with
tcg_code_size()). Note that for the allocator we could just use
a counter and atomic_inc; however, that would complicate the gathering
of tcg_code_size()-like stats. So given that the region operations are
not a fast path, a lock seems the most reasonable choice.
The effectiveness of this approach is clear after seeing some numbers.
I used the bootup+shutdown of debian-arm with '-tb-size 80' as a benchmark.
Note that I'm evaluating this after enabling per-thread TCG (which
is done by a subsequent commit).
* -smp 1, 1 region (entire buffer):
qemu: flush code_size=83885014 nb_tbs=154739 avg_tb_size=357
qemu: flush code_size=83884902 nb_tbs=153136 avg_tb_size=363
qemu: flush code_size=83885014 nb_tbs=152777 avg_tb_size=364
qemu: flush code_size=83884950 nb_tbs=150057 avg_tb_size=373
qemu: flush code_size=83884998 nb_tbs=150234 avg_tb_size=373
qemu: flush code_size=83885014 nb_tbs=154009 avg_tb_size=360
qemu: flush code_size=83885014 nb_tbs=151007 avg_tb_size=370
qemu: flush code_size=83885014 nb_tbs=151816 avg_tb_size=367
That is, 8 flushes.
* -smp 8, 32 regions (80/32 MB per region) [i.e. this patch]:
qemu: flush code_size=76328008 nb_tbs=141040 avg_tb_size=356
qemu: flush code_size=75366534 nb_tbs=138000 avg_tb_size=361
qemu: flush code_size=76864546 nb_tbs=140653 avg_tb_size=361
qemu: flush code_size=76309084 nb_tbs=135945 avg_tb_size=375
qemu: flush code_size=74581856 nb_tbs=132909 avg_tb_size=375
qemu: flush code_size=73927256 nb_tbs=135616 avg_tb_size=360
qemu: flush code_size=78629426 nb_tbs=142896 avg_tb_size=365
qemu: flush code_size=76667052 nb_tbs=138508 avg_tb_size=368
Again, 8 flushes. Note how buffer utilization is not 100%, but it
is close. Smaller region sizes would yield higher utilization,
but we want region allocation to be rare (it acquires a lock), so
we do not want to go too small.
* -smp 8, static partitioning of 8 regions (10 MB per region):
qemu: flush code_size=21936504 nb_tbs=40570 avg_tb_size=354
qemu: flush code_size=11472174 nb_tbs=20633 avg_tb_size=370
qemu: flush code_size=11603976 nb_tbs=21059 avg_tb_size=365
qemu: flush code_size=23254872 nb_tbs=41243 avg_tb_size=377
qemu: flush code_size=28289496 nb_tbs=52057 avg_tb_size=358
qemu: flush code_size=43605160 nb_tbs=78896 avg_tb_size=367
qemu: flush code_size=45166552 nb_tbs=82158 avg_tb_size=364
qemu: flush code_size=63289640 nb_tbs=116494 avg_tb_size=358
qemu: flush code_size=51389960 nb_tbs=93937 avg_tb_size=362
qemu: flush code_size=59665928 nb_tbs=107063 avg_tb_size=372
qemu: flush code_size=38380824 nb_tbs=68597 avg_tb_size=374
qemu: flush code_size=44884568 nb_tbs=79901 avg_tb_size=376
qemu: flush code_size=50782632 nb_tbs=90681 avg_tb_size=374
qemu: flush code_size=39848888 nb_tbs=71433 avg_tb_size=372
qemu: flush code_size=64708840 nb_tbs=119052 avg_tb_size=359
qemu: flush code_size=49830008 nb_tbs=90992 avg_tb_size=362
qemu: flush code_size=68372408 nb_tbs=123442 avg_tb_size=368
qemu: flush code_size=33555560 nb_tbs=59514 avg_tb_size=378
qemu: flush code_size=44748344 nb_tbs=80974 avg_tb_size=367
qemu: flush code_size=37104248 nb_tbs=67609 avg_tb_size=364
That is, 20 flushes. Note how a static partitioning approach uses
the code buffer poorly, leading to many unnecessary flushes.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2017-07-08 02:24:20 +03:00
|
|
|
tcg_region_init();
|
2010-05-06 19:50:41 +04:00
|
|
|
|
2018-04-11 21:56:33 +03:00
|
|
|
target_cpu_copy_regs(env, regs);
|
|
|
|
|
2018-04-11 21:56:45 +03:00
|
|
|
#if defined(TARGET_RISCV)
|
2018-03-02 15:31:11 +03:00
|
|
|
{
|
|
|
|
env->pc = regs->sepc;
|
|
|
|
env->gpr[xSP] = regs->sp;
|
|
|
|
}
|
2015-08-21 00:36:37 +03:00
|
|
|
#elif defined(TARGET_TILEGX)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < TILEGX_R_COUNT; i++) {
|
|
|
|
env->regs[i] = regs->regs[i];
|
|
|
|
}
|
|
|
|
for (i = 0; i < TILEGX_SPR_COUNT; i++) {
|
|
|
|
env->spregs[i] = 0;
|
|
|
|
}
|
|
|
|
env->pc = regs->pc;
|
|
|
|
}
|
2016-12-15 21:01:00 +03:00
|
|
|
#elif defined(TARGET_HPPA)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 1; i < 32; i++) {
|
|
|
|
env->gr[i] = regs->gr[i];
|
|
|
|
}
|
|
|
|
env->iaoq_f = regs->iaoq[0];
|
|
|
|
env->iaoq_b = regs->iaoq[1];
|
|
|
|
}
|
2017-01-25 21:54:11 +03:00
|
|
|
#elif defined(TARGET_XTENSA)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < 16; ++i) {
|
|
|
|
env->regs[i] = regs->areg[i];
|
|
|
|
}
|
|
|
|
env->sregs[WINDOW_START] = regs->windowstart;
|
|
|
|
env->pc = regs->pc;
|
|
|
|
}
|
2003-06-16 00:05:50 +04:00
|
|
|
#endif
|
2003-02-19 01:55:36 +03:00
|
|
|
|
2005-10-31 00:01:05 +03:00
|
|
|
if (gdbstub_port) {
|
2011-09-06 17:15:50 +04:00
|
|
|
if (gdbserver_start(gdbstub_port) < 0) {
|
|
|
|
fprintf(stderr, "qemu: could not open gdbserver on port %d\n",
|
|
|
|
gdbstub_port);
|
2015-09-28 16:12:16 +03:00
|
|
|
exit(EXIT_FAILURE);
|
2011-09-06 17:15:50 +04:00
|
|
|
}
|
2013-06-27 21:49:31 +04:00
|
|
|
gdb_handlesig(cpu, 0);
|
2005-04-17 23:16:13 +04:00
|
|
|
}
|
2003-03-22 20:31:38 +03:00
|
|
|
cpu_loop(env);
|
|
|
|
/* never exits */
|
2003-02-19 01:55:36 +03:00
|
|
|
return 0;
|
|
|
|
}
|