Fix translation race condition for user-only.
Fix tcg/i386 encoding for VPSLLVQ, VPSRLVQ. Fix tcg/arm tcg_out_vec_op signature. Fix tcg/ppc (32bit) build with clang. Remove dupluate TCG_KICK_PERIOD definition. Remove unused tcg_global_reg_new. Restrict cpu_exec_interrupt and its callees to sysemu. Cleanups for tcg/arm. -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmFA9+MdHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/XYwgAi4mIVpxH+5xuzIKR ZNFLKv2oB48C/tWd8klRkT1jxewc+T+eE1nBC3s4BbRYf0UNTk+kzXMOfn4GcGdt 6uVEglCvR07ubqIhfrfT7cb23FsY0OytavaiT2LZAeAdlxKHj++LfwFJ166FPAmN H60QhAPG3aTZdXdFasLzI/y0spV7zepZkUiHmtFdgrXcoiidXbWgxEcOFVQx5Rfr lIdy8VhZjNDsA0VtISaElePGsmNUjGLgAod3RVtDNQITit2Kn0BhNVk3VXRo6JKv GJygeQ0v9wTpDIEvyXDjAlMKCfBWgeL3832BK0bQA+scmxidrSVvOpkgZAD4gLVr uNBpnA== =xBx2 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth-gitlab/tags/pull-tcg-20210914-4' into staging Fix translation race condition for user-only. Fix tcg/i386 encoding for VPSLLVQ, VPSRLVQ. Fix tcg/arm tcg_out_vec_op signature. Fix tcg/ppc (32bit) build with clang. Remove dupluate TCG_KICK_PERIOD definition. Remove unused tcg_global_reg_new. Restrict cpu_exec_interrupt and its callees to sysemu. Cleanups for tcg/arm. # gpg: Signature made Tue 14 Sep 2021 20:28:35 BST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth-gitlab/tags/pull-tcg-20210914-4: (43 commits) tcg/arm: More use of the TCGReg enum tcg/arm: More use of the ARMInsn enum tcg/arm: Give enum arm_cond_code_e a typedef and use it tcg/arm: Drop inline markers tcg/arm: Simplify usage of encode_imm tcg/arm: Split out tcg_out_ldstm tcg/arm: Support armv4t in tcg_out_goto and tcg_out_call tcg/arm: Simplify use_armv5t_instructions tcg/arm: Standardize on tcg_out_<branch>_{reg,imm} tcg/arm: Remove fallback definition of __ARM_ARCH accel/tcg/user-exec: Fix read-modify-write of code on s390 hosts user: Remove cpu_get_pic_interrupt() stubs accel/tcg: Restrict TCGCPUOps::cpu_exec_interrupt() to sysemu target/xtensa: Restrict cpu_exec_interrupt() handler to sysemu target/rx: Restrict cpu_exec_interrupt() handler to sysemu target/sparc: Restrict cpu_exec_interrupt() handler to sysemu target/sh4: Restrict cpu_exec_interrupt() handler to sysemu target/riscv: Restrict cpu_exec_interrupt() handler to sysemu target/ppc: Restrict cpu_exec_interrupt() handler to sysemu target/openrisc: Restrict cpu_exec_interrupt() handler to sysemu ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
0b6206b9c6
@ -651,8 +651,8 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||
loop */
|
||||
#if defined(TARGET_I386)
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
cc->tcg_ops->do_interrupt(cpu);
|
||||
#endif
|
||||
cc->tcg_ops->fake_user_interrupt(cpu);
|
||||
#endif /* TARGET_I386 */
|
||||
*ret = cpu->exception_index;
|
||||
cpu->exception_index = -1;
|
||||
return true;
|
||||
@ -685,6 +685,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/*
|
||||
* CPU_INTERRUPT_POLL is a virtual event which gets converted into a
|
||||
* "real" interrupt event later. It does not need to be recorded for
|
||||
@ -698,12 +699,11 @@ static inline bool need_replay_interrupt(int interrupt_request)
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
TranslationBlock **last_tb)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
/* Clear the interrupt flag now since we're processing
|
||||
* cpu->interrupt_request and cpu->exit_request.
|
||||
* Ensure zeroing happens before reading cpu->exit_request or
|
||||
@ -725,6 +725,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
|
||||
/* Do nothing */
|
||||
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
|
||||
@ -753,12 +754,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
qemu_mutex_unlock_iothread();
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
#endif /* !TARGET_I386 */
|
||||
/* The target hook has 3 exit conditions:
|
||||
False when the interrupt isn't processed,
|
||||
True when it is, and we should restart on a new TB,
|
||||
and via longjmp via cpu_loop_exit. */
|
||||
else {
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (cc->tcg_ops->cpu_exec_interrupt &&
|
||||
cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
|
||||
if (need_replay_interrupt(interrupt_request)) {
|
||||
@ -777,6 +780,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
* reload the 'interrupt_request' value */
|
||||
interrupt_request = cpu->interrupt_request;
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
if (interrupt_request & CPU_INTERRUPT_EXITTB) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
|
||||
/* ensure that no TB jump will be modified as
|
||||
|
@ -60,8 +60,6 @@ void rr_kick_vcpu_thread(CPUState *unused)
|
||||
static QEMUTimer *rr_kick_vcpu_timer;
|
||||
static CPUState *rr_current_cpu;
|
||||
|
||||
#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
|
||||
|
||||
static inline int64_t rr_next_kick_time(void)
|
||||
{
|
||||
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
|
||||
|
@ -1297,31 +1297,8 @@ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
|
||||
invalidate_page_bitmap(p);
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
if (p->flags & PAGE_WRITE) {
|
||||
target_ulong addr;
|
||||
PageDesc *p2;
|
||||
int prot;
|
||||
|
||||
/* force the host page as non writable (writes will have a
|
||||
page fault + mprotect overhead) */
|
||||
page_addr &= qemu_host_page_mask;
|
||||
prot = 0;
|
||||
for (addr = page_addr; addr < page_addr + qemu_host_page_size;
|
||||
addr += TARGET_PAGE_SIZE) {
|
||||
|
||||
p2 = page_find(addr >> TARGET_PAGE_BITS);
|
||||
if (!p2) {
|
||||
continue;
|
||||
}
|
||||
prot |= p2->flags;
|
||||
p2->flags &= ~PAGE_WRITE;
|
||||
}
|
||||
mprotect(g2h_untagged(page_addr), qemu_host_page_size,
|
||||
(prot & PAGE_BITS) & ~PAGE_WRITE);
|
||||
if (DEBUG_TB_INVALIDATE_GATE) {
|
||||
printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
|
||||
}
|
||||
}
|
||||
/* translator_loop() must have made all TB pages non-writable */
|
||||
assert(!(p->flags & PAGE_WRITE));
|
||||
#else
|
||||
/* if some code is already present, then the pages are already
|
||||
protected. So we handle the case where only the first TB is
|
||||
@ -2394,6 +2371,38 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void page_protect(tb_page_addr_t page_addr)
|
||||
{
|
||||
target_ulong addr;
|
||||
PageDesc *p;
|
||||
int prot;
|
||||
|
||||
p = page_find(page_addr >> TARGET_PAGE_BITS);
|
||||
if (p && (p->flags & PAGE_WRITE)) {
|
||||
/*
|
||||
* Force the host page as non writable (writes will have a page fault +
|
||||
* mprotect overhead).
|
||||
*/
|
||||
page_addr &= qemu_host_page_mask;
|
||||
prot = 0;
|
||||
for (addr = page_addr; addr < page_addr + qemu_host_page_size;
|
||||
addr += TARGET_PAGE_SIZE) {
|
||||
|
||||
p = page_find(addr >> TARGET_PAGE_BITS);
|
||||
if (!p) {
|
||||
continue;
|
||||
}
|
||||
prot |= p->flags;
|
||||
p->flags &= ~PAGE_WRITE;
|
||||
}
|
||||
mprotect(g2h_untagged(page_addr), qemu_host_page_size,
|
||||
(prot & PAGE_BITS) & ~PAGE_WRITE);
|
||||
if (DEBUG_TB_INVALIDATE_GATE) {
|
||||
printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* called from signal handler: invalidate the code and unprotect the
|
||||
* page. Return 0 if the fault was not handled, 1 if it was handled,
|
||||
* and 2 if it was handled but the caller must cause the TB to be
|
||||
|
@ -42,6 +42,15 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
|
||||
return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
|
||||
}
|
||||
|
||||
static inline void translator_page_protect(DisasContextBase *dcbase,
|
||||
target_ulong pc)
|
||||
{
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
dcbase->page_protect_end = pc | ~TARGET_PAGE_MASK;
|
||||
page_protect(pc);
|
||||
#endif
|
||||
}
|
||||
|
||||
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
|
||||
CPUState *cpu, TranslationBlock *tb, int max_insns)
|
||||
{
|
||||
@ -56,6 +65,7 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
|
||||
db->num_insns = 0;
|
||||
db->max_insns = max_insns;
|
||||
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
|
||||
translator_page_protect(db, db->pc_next);
|
||||
|
||||
ops->init_disas_context(db, cpu);
|
||||
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
|
||||
@ -137,3 +147,32 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void translator_maybe_page_protect(DisasContextBase *dcbase,
|
||||
target_ulong pc, size_t len)
|
||||
{
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
target_ulong end = pc + len - 1;
|
||||
|
||||
if (end > dcbase->page_protect_end) {
|
||||
translator_page_protect(dcbase, end);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
|
||||
type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
|
||||
abi_ptr pc, bool do_swap) \
|
||||
{ \
|
||||
translator_maybe_page_protect(dcbase, pc, sizeof(type)); \
|
||||
type ret = load_fn(env, pc); \
|
||||
if (do_swap) { \
|
||||
ret = swap_fn(ret); \
|
||||
} \
|
||||
plugin_insn_append(&ret, sizeof(ret)); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
|
||||
|
||||
#undef GEN_TRANSLATOR_LD
|
||||
|
@ -680,18 +680,26 @@ int cpu_signal_handler(int host_signum, void *pinfo,
|
||||
|
||||
pc = uc->uc_mcontext.psw.addr;
|
||||
|
||||
/* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
|
||||
of the normal 2 arguments. The 3rd argument contains the "int_code"
|
||||
from the hardware which does in fact contain the is_write value.
|
||||
The rt signal handler, as far as I can tell, does not give this value
|
||||
at all. Not that we could get to it from here even if it were. */
|
||||
/* ??? This is not even close to complete, since it ignores all
|
||||
of the read-modify-write instructions. */
|
||||
/*
|
||||
* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
|
||||
* of the normal 2 arguments. The 4th argument contains the "Translation-
|
||||
* Exception Identification for DAT Exceptions" from the hardware (aka
|
||||
* "int_parm_long"), which does in fact contain the is_write value.
|
||||
* The rt signal handler, as far as I can tell, does not give this value
|
||||
* at all. Not that we could get to it from here even if it were.
|
||||
* So fall back to parsing instructions. Treat read-modify-write ones as
|
||||
* writes, which is not fully correct, but for tracking self-modifying code
|
||||
* this is better than treating them as reads. Checking si_addr page flags
|
||||
* might be a viable improvement, albeit a racy one.
|
||||
*/
|
||||
/* ??? This is not even close to complete. */
|
||||
pinsn = (uint16_t *)pc;
|
||||
switch (pinsn[0] >> 8) {
|
||||
case 0x50: /* ST */
|
||||
case 0x42: /* STC */
|
||||
case 0x40: /* STH */
|
||||
case 0xba: /* CS */
|
||||
case 0xbb: /* CDS */
|
||||
is_write = 1;
|
||||
break;
|
||||
case 0xc4: /* RIL format insns */
|
||||
@ -702,6 +710,12 @@ int cpu_signal_handler(int host_signum, void *pinfo,
|
||||
is_write = 1;
|
||||
}
|
||||
break;
|
||||
case 0xc8: /* SSF format insns */
|
||||
switch (pinsn[0] & 0xf) {
|
||||
case 0x2: /* CSST */
|
||||
is_write = 1;
|
||||
}
|
||||
break;
|
||||
case 0xe3: /* RXY format insns */
|
||||
switch (pinsn[2] & 0xff) {
|
||||
case 0x50: /* STY */
|
||||
@ -715,7 +729,27 @@ int cpu_signal_handler(int host_signum, void *pinfo,
|
||||
is_write = 1;
|
||||
}
|
||||
break;
|
||||
case 0xeb: /* RSY format insns */
|
||||
switch (pinsn[2] & 0xff) {
|
||||
case 0x14: /* CSY */
|
||||
case 0x30: /* CSG */
|
||||
case 0x31: /* CDSY */
|
||||
case 0x3e: /* CDSG */
|
||||
case 0xe4: /* LANG */
|
||||
case 0xe6: /* LAOG */
|
||||
case 0xe7: /* LAXG */
|
||||
case 0xe8: /* LAAG */
|
||||
case 0xea: /* LAALG */
|
||||
case 0xf4: /* LAN */
|
||||
case 0xf6: /* LAO */
|
||||
case 0xf7: /* LAX */
|
||||
case 0xfa: /* LAAL */
|
||||
case 0xf8: /* LAA */
|
||||
is_write = 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
|
||||
}
|
||||
|
||||
|
@ -33,11 +33,6 @@ uint64_t cpu_get_tsc(CPUX86State *env)
|
||||
return cpu_get_host_ticks();
|
||||
}
|
||||
|
||||
int cpu_get_pic_interrupt(CPUX86State *env)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
void bsd_i386_write_dt(void *ptr, unsigned long addr, unsigned long limit,
|
||||
int flags)
|
||||
{
|
||||
|
@ -33,11 +33,6 @@ uint64_t cpu_get_tsc(CPUX86State *env)
|
||||
return cpu_get_host_ticks();
|
||||
}
|
||||
|
||||
int cpu_get_pic_interrupt(CPUX86State *env)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
void bsd_x86_64_write_dt(void *ptr, unsigned long addr,
|
||||
unsigned long limit, int flags)
|
||||
{
|
||||
|
@ -33,6 +33,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
|
||||
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
void page_protect(tb_page_addr_t page_addr);
|
||||
int page_unprotect(target_ulong address, uintptr_t pc);
|
||||
#endif
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
#include "exec/plugin-gen.h"
|
||||
#include "exec/translate-all.h"
|
||||
#include "tcg/tcg.h"
|
||||
|
||||
|
||||
@ -74,6 +75,17 @@ typedef struct DisasContextBase {
|
||||
int num_insns;
|
||||
int max_insns;
|
||||
bool singlestep_enabled;
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
/*
|
||||
* Guest address of the last byte of the last protected page.
|
||||
*
|
||||
* Pages containing the translated instructions are made non-writable in
|
||||
* order to achieve consistency in case another thread is modifying the
|
||||
* code while translate_insn() fetches the instruction bytes piecemeal.
|
||||
* Such writer threads are blocked on mmap_lock() in page_unprotect().
|
||||
*/
|
||||
target_ulong page_protect_end;
|
||||
#endif
|
||||
} DisasContextBase;
|
||||
|
||||
/**
|
||||
@ -156,27 +168,23 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
|
||||
*/
|
||||
|
||||
#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
|
||||
static inline type \
|
||||
fullname ## _swap(CPUArchState *env, abi_ptr pc, bool do_swap) \
|
||||
type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
|
||||
abi_ptr pc, bool do_swap); \
|
||||
static inline type fullname(CPUArchState *env, \
|
||||
DisasContextBase *dcbase, abi_ptr pc) \
|
||||
{ \
|
||||
type ret = load_fn(env, pc); \
|
||||
if (do_swap) { \
|
||||
ret = swap_fn(ret); \
|
||||
} \
|
||||
plugin_insn_append(&ret, sizeof(ret)); \
|
||||
return ret; \
|
||||
} \
|
||||
\
|
||||
static inline type fullname(CPUArchState *env, abi_ptr pc) \
|
||||
{ \
|
||||
return fullname ## _swap(env, pc, false); \
|
||||
return fullname ## _swap(env, dcbase, pc, false); \
|
||||
}
|
||||
|
||||
GEN_TRANSLATOR_LD(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */)
|
||||
GEN_TRANSLATOR_LD(translator_ldsw, int16_t, cpu_ldsw_code, bswap16)
|
||||
GEN_TRANSLATOR_LD(translator_lduw, uint16_t, cpu_lduw_code, bswap16)
|
||||
GEN_TRANSLATOR_LD(translator_ldl, uint32_t, cpu_ldl_code, bswap32)
|
||||
GEN_TRANSLATOR_LD(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
|
||||
#define FOR_EACH_TRANSLATOR_LD(F) \
|
||||
F(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) \
|
||||
F(translator_ldsw, int16_t, cpu_ldsw_code, bswap16) \
|
||||
F(translator_lduw, uint16_t, cpu_lduw_code, bswap16) \
|
||||
F(translator_ldl, uint32_t, cpu_ldl_code, bswap32) \
|
||||
F(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
|
||||
|
||||
FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
|
||||
|
||||
#undef GEN_TRANSLATOR_LD
|
||||
|
||||
#endif /* EXEC__TRANSLATOR_H */
|
||||
|
@ -35,16 +35,6 @@ struct TCGCPUOps {
|
||||
void (*cpu_exec_enter)(CPUState *cpu);
|
||||
/** @cpu_exec_exit: Callback for cpu_exec cleanup */
|
||||
void (*cpu_exec_exit)(CPUState *cpu);
|
||||
/** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
|
||||
bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
|
||||
/**
|
||||
* @do_interrupt: Callback for interrupt handling.
|
||||
*
|
||||
* note that this is in general SOFTMMU only, but it actually isn't
|
||||
* because of an x86 hack (accel/tcg/cpu-exec.c), so we cannot put it
|
||||
* in the SOFTMMU section in general.
|
||||
*/
|
||||
void (*do_interrupt)(CPUState *cpu);
|
||||
/**
|
||||
* @tlb_fill: Handle a softmmu tlb miss or user-only address fault
|
||||
*
|
||||
@ -61,7 +51,23 @@ struct TCGCPUOps {
|
||||
void (*debug_excp_handler)(CPUState *cpu);
|
||||
|
||||
#ifdef NEED_CPU_H
|
||||
#if defined(CONFIG_USER_ONLY) && defined(TARGET_I386)
|
||||
/**
|
||||
* @fake_user_interrupt: Callback for 'fake exception' handling.
|
||||
*
|
||||
* Simulate 'fake exception' which will be handled outside the
|
||||
* cpu execution loop (hack for x86 user mode).
|
||||
*/
|
||||
void (*fake_user_interrupt)(CPUState *cpu);
|
||||
#else
|
||||
/**
|
||||
* @do_interrupt: Callback for interrupt handling.
|
||||
*/
|
||||
void (*do_interrupt)(CPUState *cpu);
|
||||
#endif /* !CONFIG_USER_ONLY || !TARGET_I386 */
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
/** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
|
||||
bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
|
||||
/**
|
||||
* @do_transaction_failed: Callback for handling failed memory transactions
|
||||
* (ie bus faults or external aborts; not MMU faults)
|
||||
|
@ -843,7 +843,6 @@ static inline void tcg_gen_plugin_cb_end(void)
|
||||
|
||||
#if TARGET_LONG_BITS == 32
|
||||
#define tcg_temp_new() tcg_temp_new_i32()
|
||||
#define tcg_global_reg_new tcg_global_reg_new_i32
|
||||
#define tcg_global_mem_new tcg_global_mem_new_i32
|
||||
#define tcg_temp_local_new() tcg_temp_local_new_i32()
|
||||
#define tcg_temp_free tcg_temp_free_i32
|
||||
@ -851,7 +850,6 @@ static inline void tcg_gen_plugin_cb_end(void)
|
||||
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32
|
||||
#else
|
||||
#define tcg_temp_new() tcg_temp_new_i64()
|
||||
#define tcg_global_reg_new tcg_global_reg_new_i64
|
||||
#define tcg_global_mem_new tcg_global_mem_new_i64
|
||||
#define tcg_temp_local_new() tcg_temp_local_new_i64()
|
||||
#define tcg_temp_free tcg_temp_free_i64
|
||||
|
@ -120,13 +120,6 @@ const char *qemu_uname_release;
|
||||
by remapping the process stack directly at the right place */
|
||||
unsigned long guest_stack_size = 8 * 1024 * 1024UL;
|
||||
|
||||
#if defined(TARGET_I386)
|
||||
int cpu_get_pic_interrupt(CPUX86State *env)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
/***********************************************************/
|
||||
/* Helper routines for implementing atomic operations. */
|
||||
|
||||
|
@ -218,10 +218,10 @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
|
||||
|
||||
static const struct TCGCPUOps alpha_tcg_ops = {
|
||||
.initialize = alpha_translate_init,
|
||||
.cpu_exec_interrupt = alpha_cpu_exec_interrupt,
|
||||
.tlb_fill = alpha_cpu_tlb_fill,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.cpu_exec_interrupt = alpha_cpu_exec_interrupt,
|
||||
.do_interrupt = alpha_cpu_do_interrupt,
|
||||
.do_transaction_failed = alpha_cpu_do_transaction_failed,
|
||||
.do_unaligned_access = alpha_cpu_do_unaligned_access,
|
||||
|
@ -274,10 +274,10 @@ struct AlphaCPU {
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
extern const VMStateDescription vmstate_alpha_cpu;
|
||||
#endif
|
||||
|
||||
void alpha_cpu_do_interrupt(CPUState *cpu);
|
||||
bool alpha_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags);
|
||||
hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
|
||||
int alpha_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
|
||||
|
@ -293,7 +293,6 @@ bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
|
||||
prot, mmu_idx, TARGET_PAGE_SIZE);
|
||||
return true;
|
||||
}
|
||||
#endif /* USER_ONLY */
|
||||
|
||||
void alpha_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
@ -348,7 +347,6 @@ void alpha_cpu_do_interrupt(CPUState *cs)
|
||||
|
||||
cs->exception_index = -1;
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
switch (i) {
|
||||
case EXCP_RESET:
|
||||
i = 0x0000;
|
||||
@ -404,7 +402,6 @@ void alpha_cpu_do_interrupt(CPUState *cs)
|
||||
|
||||
/* Switch to PALmode. */
|
||||
env->flags |= ENV_FLAG_PAL_MODE;
|
||||
#endif /* !USER_ONLY */
|
||||
}
|
||||
|
||||
bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
@ -451,6 +448,8 @@ bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
||||
{
|
||||
static const char linux_reg_names[31][4] = {
|
||||
|
@ -2971,7 +2971,7 @@ static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
||||
{
|
||||
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||
CPUAlphaState *env = cpu->env_ptr;
|
||||
uint32_t insn = translator_ldl(env, ctx->base.pc_next);
|
||||
uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
|
||||
|
||||
ctx->base.pc_next += 4;
|
||||
ctx->base.is_jmp = translate_one(ctx, insn);
|
||||
|
@ -24,15 +24,15 @@
|
||||
#include "qemu/bswap.h"
|
||||
|
||||
/* Load an instruction and return it in the standard little-endian order */
|
||||
static inline uint32_t arm_ldl_code(CPUARMState *env, target_ulong addr,
|
||||
bool sctlr_b)
|
||||
static inline uint32_t arm_ldl_code(CPUARMState *env, DisasContextBase *s,
|
||||
target_ulong addr, bool sctlr_b)
|
||||
{
|
||||
return translator_ldl_swap(env, addr, bswap_code(sctlr_b));
|
||||
return translator_ldl_swap(env, s, addr, bswap_code(sctlr_b));
|
||||
}
|
||||
|
||||
/* Ditto, for a halfword (Thumb) instruction */
|
||||
static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr,
|
||||
bool sctlr_b)
|
||||
static inline uint16_t arm_lduw_code(CPUARMState *env, DisasContextBase* s,
|
||||
target_ulong addr, bool sctlr_b)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* In big-endian (BE32) mode, adjacent Thumb instructions have been swapped
|
||||
@ -41,7 +41,7 @@ static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr,
|
||||
addr ^= 2;
|
||||
}
|
||||
#endif
|
||||
return translator_lduw_swap(env, addr, bswap_code(sctlr_b));
|
||||
return translator_lduw_swap(env, s, addr, bswap_code(sctlr_b));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -440,6 +440,8 @@ static void arm_cpu_reset(DeviceState *dev)
|
||||
arm_rebuild_hflags(env);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
||||
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
|
||||
unsigned int target_el,
|
||||
unsigned int cur_el, bool secure,
|
||||
@ -556,7 +558,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
|
||||
return unmasked || pstate_unmasked;
|
||||
}
|
||||
|
||||
bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cs);
|
||||
CPUARMState *env = cs->env_ptr;
|
||||
@ -608,6 +610,7 @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
cc->tcg_ops->do_interrupt(cs);
|
||||
return true;
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
void arm_cpu_update_virq(ARMCPU *cpu)
|
||||
{
|
||||
@ -2010,11 +2013,11 @@ static const struct SysemuCPUOps arm_sysemu_ops = {
|
||||
static const struct TCGCPUOps arm_tcg_ops = {
|
||||
.initialize = arm_translate_init,
|
||||
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
|
||||
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
|
||||
.tlb_fill = arm_cpu_tlb_fill,
|
||||
.debug_excp_handler = arm_debug_excp_handler,
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
|
||||
.do_interrupt = arm_cpu_do_interrupt,
|
||||
.do_transaction_failed = arm_cpu_do_transaction_failed,
|
||||
.do_unaligned_access = arm_cpu_do_unaligned_access,
|
||||
|
@ -1040,11 +1040,10 @@ uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz);
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
extern const VMStateDescription vmstate_arm_cpu;
|
||||
#endif
|
||||
|
||||
void arm_cpu_do_interrupt(CPUState *cpu);
|
||||
void arm_v7m_cpu_do_interrupt(CPUState *cpu);
|
||||
bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
|
||||
MemTxAttrs *attrs);
|
||||
|
@ -22,7 +22,7 @@
|
||||
/* CPU models. These are not needed for the AArch64 linux-user build. */
|
||||
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
|
||||
|
||||
#ifdef CONFIG_TCG
|
||||
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
|
||||
static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cs);
|
||||
@ -46,7 +46,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_TCG */
|
||||
#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
|
||||
|
||||
static void arm926_initfn(Object *obj)
|
||||
{
|
||||
@ -898,11 +898,11 @@ static void pxa270c5_initfn(Object *obj)
|
||||
static const struct TCGCPUOps arm_v7m_tcg_ops = {
|
||||
.initialize = arm_translate_init,
|
||||
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
|
||||
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
|
||||
.tlb_fill = arm_cpu_tlb_fill,
|
||||
.debug_excp_handler = arm_debug_excp_handler,
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
|
||||
.do_interrupt = arm_v7m_cpu_do_interrupt,
|
||||
.do_transaction_failed = arm_cpu_do_transaction_failed,
|
||||
.do_unaligned_access = arm_cpu_do_unaligned_access,
|
||||
|
@ -14772,7 +14772,7 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
||||
}
|
||||
|
||||
s->pc_curr = s->base.pc_next;
|
||||
insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b);
|
||||
insn = arm_ldl_code(env, &s->base, s->base.pc_next, s->sctlr_b);
|
||||
s->insn = insn;
|
||||
s->base.pc_next += 4;
|
||||
|
||||
|
@ -9312,7 +9312,7 @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
|
||||
* boundary, so we cross the page if the first 16 bits indicate
|
||||
* that this is a 32 bit insn.
|
||||
*/
|
||||
uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
|
||||
uint16_t insn = arm_lduw_code(env, &s->base, s->base.pc_next, s->sctlr_b);
|
||||
|
||||
return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
|
||||
}
|
||||
@ -9551,7 +9551,7 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
||||
}
|
||||
|
||||
dc->pc_curr = dc->base.pc_next;
|
||||
insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
|
||||
insn = arm_ldl_code(env, &dc->base, dc->base.pc_next, dc->sctlr_b);
|
||||
dc->insn = insn;
|
||||
dc->base.pc_next += 4;
|
||||
disas_arm_insn(dc, insn);
|
||||
@ -9621,11 +9621,12 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
||||
}
|
||||
|
||||
dc->pc_curr = dc->base.pc_next;
|
||||
insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
|
||||
insn = arm_lduw_code(env, &dc->base, dc->base.pc_next, dc->sctlr_b);
|
||||
is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
|
||||
dc->base.pc_next += 2;
|
||||
if (!is_16bit) {
|
||||
uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
|
||||
uint32_t insn2 = arm_lduw_code(env, &dc->base, dc->base.pc_next,
|
||||
dc->sctlr_b);
|
||||
|
||||
insn = insn << 16 | insn2;
|
||||
dc->base.pc_next += 2;
|
||||
|
@ -197,10 +197,7 @@ static const struct TCGCPUOps avr_tcg_ops = {
|
||||
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
|
||||
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
|
||||
.tlb_fill = avr_cpu_tlb_fill,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.do_interrupt = avr_cpu_do_interrupt,
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
};
|
||||
|
||||
static void avr_cpu_class_init(ObjectClass *oc, void *data)
|
||||
|
@ -205,20 +205,20 @@ static const struct SysemuCPUOps cris_sysemu_ops = {
|
||||
|
||||
static const struct TCGCPUOps crisv10_tcg_ops = {
|
||||
.initialize = cris_initialize_crisv10_tcg,
|
||||
.cpu_exec_interrupt = cris_cpu_exec_interrupt,
|
||||
.tlb_fill = cris_cpu_tlb_fill,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.cpu_exec_interrupt = cris_cpu_exec_interrupt,
|
||||
.do_interrupt = crisv10_cpu_do_interrupt,
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
};
|
||||
|
||||
static const struct TCGCPUOps crisv32_tcg_ops = {
|
||||
.initialize = cris_initialize_tcg,
|
||||
.cpu_exec_interrupt = cris_cpu_exec_interrupt,
|
||||
.tlb_fill = cris_cpu_tlb_fill,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.cpu_exec_interrupt = cris_cpu_exec_interrupt,
|
||||
.do_interrupt = cris_cpu_do_interrupt,
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
};
|
||||
|
@ -185,11 +185,11 @@ struct CRISCPU {
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
extern const VMStateDescription vmstate_cris_cpu;
|
||||
#endif
|
||||
|
||||
void cris_cpu_do_interrupt(CPUState *cpu);
|
||||
void crisv10_cpu_do_interrupt(CPUState *cpu);
|
||||
bool cris_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
#endif
|
||||
|
||||
void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags);
|
||||
|
||||
|
@ -41,20 +41,6 @@
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
void cris_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
CRISCPU *cpu = CRIS_CPU(cs);
|
||||
CPUCRISState *env = &cpu->env;
|
||||
|
||||
cs->exception_index = -1;
|
||||
env->pregs[PR_ERP] = env->pc;
|
||||
}
|
||||
|
||||
void crisv10_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
cris_cpu_do_interrupt(cs);
|
||||
}
|
||||
|
||||
bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool probe, uintptr_t retaddr)
|
||||
@ -287,7 +273,6 @@ hwaddr cris_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||
D(fprintf(stderr, "%s %x -> %x\n", __func__, addr, phy));
|
||||
return phy;
|
||||
}
|
||||
#endif
|
||||
|
||||
bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
@ -319,3 +304,5 @@ bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
@ -112,7 +112,8 @@ static int read_packet_words(CPUHexagonState *env, DisasContext *ctx,
|
||||
memset(words, 0, PACKET_WORDS_MAX * sizeof(uint32_t));
|
||||
for (nwords = 0; !found_end && nwords < PACKET_WORDS_MAX; nwords++) {
|
||||
words[nwords] =
|
||||
translator_ldl(env, ctx->base.pc_next + nwords * sizeof(uint32_t));
|
||||
translator_ldl(env, &ctx->base,
|
||||
ctx->base.pc_next + nwords * sizeof(uint32_t));
|
||||
found_end = is_packet_end(words[nwords]);
|
||||
}
|
||||
if (!found_end) {
|
||||
|
@ -144,10 +144,10 @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
|
||||
static const struct TCGCPUOps hppa_tcg_ops = {
|
||||
.initialize = hppa_translate_init,
|
||||
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
|
||||
.cpu_exec_interrupt = hppa_cpu_exec_interrupt,
|
||||
.tlb_fill = hppa_cpu_tlb_fill,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.cpu_exec_interrupt = hppa_cpu_exec_interrupt,
|
||||
.do_interrupt = hppa_cpu_do_interrupt,
|
||||
.do_unaligned_access = hppa_cpu_do_unaligned_access,
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
@ -325,13 +325,13 @@ int cpu_hppa_signal_handler(int host_signum, void *pinfo, void *puc);
|
||||
hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
|
||||
int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
|
||||
int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
|
||||
void hppa_cpu_do_interrupt(CPUState *cpu);
|
||||
bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
|
||||
bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool probe, uintptr_t retaddr);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
void hppa_cpu_do_interrupt(CPUState *cpu);
|
||||
bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
||||
int type, hwaddr *pphys, int *pprot);
|
||||
extern const MemoryRegionOps hppa_io_eir_ops;
|
||||
|
@ -88,7 +88,6 @@ void HELPER(write_eiem)(CPUHPPAState *env, target_ureg val)
|
||||
eval_interrupt(env_archcpu(env));
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
void hppa_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
@ -100,7 +99,6 @@ void hppa_cpu_do_interrupt(CPUState *cs)
|
||||
uint64_t iasq_f = env->iasq_f;
|
||||
uint64_t iasq_b = env->iasq_b;
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
target_ureg old_psw;
|
||||
|
||||
/* As documented in pa2.0 -- interruption handling. */
|
||||
@ -187,7 +185,6 @@ void hppa_cpu_do_interrupt(CPUState *cs)
|
||||
env->iaoq_b = env->iaoq_f + 4;
|
||||
env->iasq_f = 0;
|
||||
env->iasq_b = 0;
|
||||
#endif
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_INT)) {
|
||||
static const char * const names[] = {
|
||||
@ -248,7 +245,6 @@ void hppa_cpu_do_interrupt(CPUState *cs)
|
||||
|
||||
bool hppa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
HPPACPU *cpu = HPPA_CPU(cs);
|
||||
CPUHPPAState *env = &cpu->env;
|
||||
|
||||
@ -258,6 +254,7 @@ bool hppa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
hppa_cpu_do_interrupt(cs);
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
@ -34,7 +34,6 @@
|
||||
|
||||
#undef TCGv
|
||||
#undef tcg_temp_new
|
||||
#undef tcg_global_reg_new
|
||||
#undef tcg_global_mem_new
|
||||
#undef tcg_temp_local_new
|
||||
#undef tcg_temp_free
|
||||
@ -59,7 +58,6 @@
|
||||
#define TCGv_reg TCGv_i64
|
||||
|
||||
#define tcg_temp_new tcg_temp_new_i64
|
||||
#define tcg_global_reg_new tcg_global_reg_new_i64
|
||||
#define tcg_global_mem_new tcg_global_mem_new_i64
|
||||
#define tcg_temp_local_new tcg_temp_local_new_i64
|
||||
#define tcg_temp_free tcg_temp_free_i64
|
||||
@ -155,7 +153,6 @@
|
||||
#else
|
||||
#define TCGv_reg TCGv_i32
|
||||
#define tcg_temp_new tcg_temp_new_i32
|
||||
#define tcg_global_reg_new tcg_global_reg_new_i32
|
||||
#define tcg_global_mem_new tcg_global_mem_new_i32
|
||||
#define tcg_temp_local_new tcg_temp_local_new_i32
|
||||
#define tcg_temp_free tcg_temp_free_i32
|
||||
@ -4177,7 +4174,7 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
|
||||
{
|
||||
/* Always fetch the insn, even if nullified, so that we check
|
||||
the page permissions for execute. */
|
||||
uint32_t insn = translator_ldl(env, ctx->base.pc_next);
|
||||
uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
|
||||
|
||||
/* Set up the IA queue for the next insn.
|
||||
This will be overwritten by a branch. */
|
||||
|
@ -1836,12 +1836,15 @@ int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
|
||||
void x86_cpu_list(void);
|
||||
int cpu_x86_support_mca_broadcast(CPUX86State *env);
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
int cpu_get_pic_interrupt(CPUX86State *s);
|
||||
|
||||
/* MSDOS compatibility mode FPU exception support */
|
||||
void x86_register_ferr_irq(qemu_irq irq);
|
||||
void fpu_check_raise_ferr_irq(CPUX86State *s);
|
||||
void cpu_set_ignne(void);
|
||||
void cpu_clear_ignne(void);
|
||||
#endif
|
||||
|
||||
/* mpx_helper.c */
|
||||
void cpu_sync_bndcs_hflags(CPUX86State *env);
|
||||
|
@ -38,7 +38,9 @@ QEMU_BUILD_BUG_ON(TCG_PHYS_ADDR_BITS > TARGET_PHYS_ADDR_SPACE_BITS);
|
||||
* @cpu: vCPU the interrupt is to be handled by.
|
||||
*/
|
||||
void x86_cpu_do_interrupt(CPUState *cpu);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
#endif
|
||||
|
||||
/* helper.c */
|
||||
bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
|
@ -929,9 +929,7 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
|
||||
e2);
|
||||
env->eip = offset;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
void helper_sysret(CPUX86State *env, int dflag)
|
||||
{
|
||||
int cpl, selector;
|
||||
@ -984,7 +982,7 @@ void helper_sysret(CPUX86State *env, int dflag)
|
||||
DESC_W_MASK | DESC_A_MASK);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif /* TARGET_X86_64 */
|
||||
|
||||
/* real mode interrupt */
|
||||
static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
|
||||
@ -1112,76 +1110,6 @@ void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
|
||||
do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
|
||||
}
|
||||
|
||||
bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
int intno;
|
||||
|
||||
interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
|
||||
if (!interrupt_request) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Don't process multiple interrupt requests in a single call.
|
||||
* This is required to make icount-driven execution deterministic.
|
||||
*/
|
||||
switch (interrupt_request) {
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
case CPU_INTERRUPT_POLL:
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
|
||||
apic_poll_irq(cpu->apic_state);
|
||||
break;
|
||||
#endif
|
||||
case CPU_INTERRUPT_SIPI:
|
||||
do_cpu_sipi(cpu);
|
||||
break;
|
||||
case CPU_INTERRUPT_SMI:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
cpu_abort(CPU(cpu), "SMI interrupt: cannot enter SMM in user-mode");
|
||||
#else
|
||||
do_smm_enter(cpu);
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
break;
|
||||
case CPU_INTERRUPT_NMI:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
||||
env->hflags2 |= HF2_NMI_MASK;
|
||||
do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
|
||||
break;
|
||||
case CPU_INTERRUPT_MCE:
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
|
||||
do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
|
||||
break;
|
||||
case CPU_INTERRUPT_HARD:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
|
||||
cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
|
||||
CPU_INTERRUPT_VIRQ);
|
||||
intno = cpu_get_pic_interrupt(env);
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM,
|
||||
"Servicing hardware INT=0x%02x\n", intno);
|
||||
do_interrupt_x86_hardirq(env, intno, 1);
|
||||
break;
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
case CPU_INTERRUPT_VIRQ:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
|
||||
intno = x86_ldl_phys(cs, env->vm_vmcb
|
||||
+ offsetof(struct vmcb, control.int_vector));
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM,
|
||||
"Servicing virtual hardware INT=0x%02x\n", intno);
|
||||
do_interrupt_x86_hardirq(env, intno, 1);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
|
||||
env->int_ctl &= ~V_IRQ_MASK;
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Ensure that no TB jump will be modified as the program flow was changed. */
|
||||
return true;
|
||||
}
|
||||
|
||||
void helper_lldt(CPUX86State *env, int selector)
|
||||
{
|
||||
SegmentCache *dt;
|
||||
|
@ -125,6 +125,68 @@ void x86_cpu_do_interrupt(CPUState *cs)
|
||||
}
|
||||
}
|
||||
|
||||
bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
int intno;
|
||||
|
||||
interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
|
||||
if (!interrupt_request) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Don't process multiple interrupt requests in a single call.
|
||||
* This is required to make icount-driven execution deterministic.
|
||||
*/
|
||||
switch (interrupt_request) {
|
||||
case CPU_INTERRUPT_POLL:
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
|
||||
apic_poll_irq(cpu->apic_state);
|
||||
break;
|
||||
case CPU_INTERRUPT_SIPI:
|
||||
do_cpu_sipi(cpu);
|
||||
break;
|
||||
case CPU_INTERRUPT_SMI:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
|
||||
do_smm_enter(cpu);
|
||||
break;
|
||||
case CPU_INTERRUPT_NMI:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
||||
env->hflags2 |= HF2_NMI_MASK;
|
||||
do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
|
||||
break;
|
||||
case CPU_INTERRUPT_MCE:
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
|
||||
do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
|
||||
break;
|
||||
case CPU_INTERRUPT_HARD:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
|
||||
cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
|
||||
CPU_INTERRUPT_VIRQ);
|
||||
intno = cpu_get_pic_interrupt(env);
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM,
|
||||
"Servicing hardware INT=0x%02x\n", intno);
|
||||
do_interrupt_x86_hardirq(env, intno, 1);
|
||||
break;
|
||||
case CPU_INTERRUPT_VIRQ:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
|
||||
intno = x86_ldl_phys(cs, env->vm_vmcb
|
||||
+ offsetof(struct vmcb, control.int_vector));
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM,
|
||||
"Servicing virtual hardware INT=0x%02x\n", intno);
|
||||
do_interrupt_x86_hardirq(env, intno, 1);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
|
||||
env->int_ctl &= ~V_IRQ_MASK;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Ensure that no TB jump will be modified as the program flow was changed. */
|
||||
return true;
|
||||
}
|
||||
|
||||
/* check if Port I/O is allowed in TSS */
|
||||
void helper_check_io(CPUX86State *env, uint32_t addr, uint32_t size)
|
||||
{
|
||||
|
@ -72,10 +72,12 @@ static const struct TCGCPUOps x86_tcg_ops = {
|
||||
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
|
||||
.cpu_exec_enter = x86_cpu_exec_enter,
|
||||
.cpu_exec_exit = x86_cpu_exec_exit,
|
||||
.cpu_exec_interrupt = x86_cpu_exec_interrupt,
|
||||
.do_interrupt = x86_cpu_do_interrupt,
|
||||
.tlb_fill = x86_cpu_tlb_fill,
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
.fake_user_interrupt = x86_cpu_do_interrupt,
|
||||
#else
|
||||
.do_interrupt = x86_cpu_do_interrupt,
|
||||
.cpu_exec_interrupt = x86_cpu_exec_interrupt,
|
||||
.debug_excp_handler = breakpoint_handler,
|
||||
.debug_check_breakpoint = x86_debug_check_breakpoint,
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
@ -2028,28 +2028,28 @@ static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
|
||||
|
||||
static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
|
||||
{
|
||||
return translator_ldub(env, advance_pc(env, s, 1));
|
||||
return translator_ldub(env, &s->base, advance_pc(env, s, 1));
|
||||
}
|
||||
|
||||
static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
|
||||
{
|
||||
return translator_ldsw(env, advance_pc(env, s, 2));
|
||||
return translator_ldsw(env, &s->base, advance_pc(env, s, 2));
|
||||
}
|
||||
|
||||
static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
|
||||
{
|
||||
return translator_lduw(env, advance_pc(env, s, 2));
|
||||
return translator_lduw(env, &s->base, advance_pc(env, s, 2));
|
||||
}
|
||||
|
||||
static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
|
||||
{
|
||||
return translator_ldl(env, advance_pc(env, s, 4));
|
||||
return translator_ldl(env, &s->base, advance_pc(env, s, 4));
|
||||
}
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
|
||||
{
|
||||
return translator_ldq(env, advance_pc(env, s, 8));
|
||||
return translator_ldq(env, &s->base, advance_pc(env, s, 8));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -515,10 +515,10 @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
|
||||
|
||||
static const struct TCGCPUOps m68k_tcg_ops = {
|
||||
.initialize = m68k_tcg_init,
|
||||
.cpu_exec_interrupt = m68k_cpu_exec_interrupt,
|
||||
.tlb_fill = m68k_cpu_tlb_fill,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.cpu_exec_interrupt = m68k_cpu_exec_interrupt,
|
||||
.do_interrupt = m68k_cpu_do_interrupt,
|
||||
.do_transaction_failed = m68k_cpu_transaction_failed,
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
@ -166,8 +166,10 @@ struct M68kCPU {
|
||||
};
|
||||
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
void m68k_cpu_do_interrupt(CPUState *cpu);
|
||||
bool m68k_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
void m68k_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
|
||||
hwaddr m68k_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
|
||||
int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
|
||||
|
@ -24,18 +24,7 @@
|
||||
#include "semihosting/semihost.h"
|
||||
#include "tcg/tcg.h"
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
void m68k_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
cs->exception_index = -1;
|
||||
}
|
||||
|
||||
static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
|
||||
{
|
||||
}
|
||||
|
||||
#else
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
static void cf_rte(CPUM68KState *env)
|
||||
{
|
||||
@ -516,7 +505,6 @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
|
||||
cpu_loop_exit(cs);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
@ -538,6 +526,8 @@ bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static void raise_exception_ra(CPUM68KState *env, int tt, uintptr_t raddr)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
@ -415,7 +415,7 @@ static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
|
||||
static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
|
||||
{
|
||||
uint16_t im;
|
||||
im = translator_lduw(env, s->pc);
|
||||
im = translator_lduw(env, &s->base, s->pc);
|
||||
s->pc += 2;
|
||||
return im;
|
||||
}
|
||||
|
@ -365,10 +365,10 @@ static const struct SysemuCPUOps mb_sysemu_ops = {
|
||||
static const struct TCGCPUOps mb_tcg_ops = {
|
||||
.initialize = mb_tcg_init,
|
||||
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
|
||||
.cpu_exec_interrupt = mb_cpu_exec_interrupt,
|
||||
.tlb_fill = mb_cpu_tlb_fill,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.cpu_exec_interrupt = mb_cpu_exec_interrupt,
|
||||
.do_interrupt = mb_cpu_do_interrupt,
|
||||
.do_transaction_failed = mb_cpu_transaction_failed,
|
||||
.do_unaligned_access = mb_cpu_do_unaligned_access,
|
||||
|
@ -355,8 +355,10 @@ struct MicroBlazeCPU {
|
||||
};
|
||||
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
void mb_cpu_do_interrupt(CPUState *cs);
|
||||
bool mb_cpu_exec_interrupt(CPUState *cs, int int_req);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
void mb_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
|
||||
MMUAccessType access_type,
|
||||
int mmu_idx, uintptr_t retaddr);
|
||||
|
@ -26,16 +26,6 @@
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
void mb_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
|
||||
CPUMBState *env = &cpu->env;
|
||||
|
||||
cs->exception_index = -1;
|
||||
env->res_addr = RES_ADDR_NONE;
|
||||
env->regs[14] = env->pc;
|
||||
}
|
||||
|
||||
bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool probe, uintptr_t retaddr)
|
||||
@ -271,7 +261,6 @@ hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
||||
|
||||
return paddr;
|
||||
}
|
||||
#endif
|
||||
|
||||
bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
@ -289,6 +278,8 @@ bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
|
||||
MMUAccessType access_type,
|
||||
int mmu_idx, uintptr_t retaddr)
|
||||
|
@ -539,10 +539,10 @@ static const struct SysemuCPUOps mips_sysemu_ops = {
|
||||
static const struct TCGCPUOps mips_tcg_ops = {
|
||||
.initialize = mips_tcg_init,
|
||||
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
|
||||
.cpu_exec_interrupt = mips_cpu_exec_interrupt,
|
||||
.tlb_fill = mips_cpu_tlb_fill,
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
.cpu_exec_interrupt = mips_cpu_exec_interrupt,
|
||||
.do_interrupt = mips_cpu_do_interrupt,
|
||||
.do_transaction_failed = mips_cpu_do_transaction_failed,
|
||||
.do_unaligned_access = mips_cpu_do_unaligned_access,
|
||||
|
@ -86,24 +86,6 @@ void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb)
|
||||
env->hflags |= tb->flags & MIPS_HFLAG_BMASK;
|
||||
}
|
||||
|
||||
bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
||||
MIPSCPU *cpu = MIPS_CPU(cs);
|
||||
CPUMIPSState *env = &cpu->env;
|
||||
|
||||
if (cpu_mips_hw_interrupts_enabled(env) &&
|
||||
cpu_mips_hw_interrupts_pending(env)) {
|
||||
/* Raise it */
|
||||
cs->exception_index = EXCP_EXT_INTERRUPT;
|
||||
env->error_code = 0;
|
||||
mips_cpu_do_interrupt(cs);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static const char * const excp_names[EXCP_LAST + 1] = {
|
||||
[EXCP_RESET] = "reset",
|
||||
[EXCP_SRESET] = "soft reset",
|
||||
|
@ -1627,7 +1627,7 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
|
||||
uint32_t op, minor, minor2, mips32_op;
|
||||
uint32_t cond, fmt, cc;
|
||||
|
||||
insn = translator_lduw(env, ctx->base.pc_next + 2);
|
||||
insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
|
||||
ctx->opcode = (ctx->opcode << 16) | insn;
|
||||
|
||||
rt = (ctx->opcode >> 21) & 0x1f;
|
||||
|
@ -455,7 +455,7 @@ static void decode_i64_mips16(DisasContext *ctx,
|
||||
|
||||
static int decode_extended_mips16_opc(CPUMIPSState *env, DisasContext *ctx)
|
||||
{
|
||||
int extend = translator_lduw(env, ctx->base.pc_next + 2);
|
||||
int extend = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
|
||||
int op, rx, ry, funct, sa;
|
||||
int16_t imm, offset;
|
||||
|
||||
@ -688,7 +688,7 @@ static int decode_ase_mips16e(CPUMIPSState *env, DisasContext *ctx)
|
||||
/* No delay slot, so just process as a normal instruction */
|
||||
break;
|
||||
case M16_OPC_JAL:
|
||||
offset = translator_lduw(env, ctx->base.pc_next + 2);
|
||||
offset = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
|
||||
offset = (((ctx->opcode & 0x1f) << 21)
|
||||
| ((ctx->opcode >> 5) & 0x1f) << 16
|
||||
| offset) << 2;
|
||||
|
@ -3656,7 +3656,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
|
||||
int offset;
|
||||
int imm;
|
||||
|
||||
insn = translator_lduw(env, ctx->base.pc_next + 2);
|
||||
insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 2);
|
||||
ctx->opcode = (ctx->opcode << 16) | insn;
|
||||
|
||||
rt = extract32(ctx->opcode, 21, 5);
|
||||
@ -3775,7 +3775,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx)
|
||||
break;
|
||||
case NM_P48I:
|
||||
{
|
||||
insn = translator_lduw(env, ctx->base.pc_next + 4);
|
||||
insn = translator_lduw(env, &ctx->base, ctx->base.pc_next + 4);
|
||||
target_long addr_off = extract32(ctx->opcode, 0, 16) | insn << 16;
|
||||
switch (extract32(ctx->opcode, 16, 5)) {
|
||||
case NM_LI48:
|
||||
|
@ -1339,6 +1339,24 @@ void mips_cpu_do_interrupt(CPUState *cs)
|
||||
cs->exception_index = EXCP_NONE;
|
||||
}
|
||||
|
||||
bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
||||
MIPSCPU *cpu = MIPS_CPU(cs);
|
||||
CPUMIPSState *env = &cpu->env;
|
||||
|
||||
if (cpu_mips_hw_interrupts_enabled(env) &&
|
||||
cpu_mips_hw_interrupts_pending(env)) {
|
||||
/* Raise it */
|
||||
cs->exception_index = EXCP_EXT_INTERRUPT;
|
||||
env->error_code = 0;
|
||||
mips_cpu_do_interrupt(cs);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
@ -18,8 +18,6 @@
|
||||
void mips_tcg_init(void);
|
||||
|
||||
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
|
||||
void mips_cpu_do_interrupt(CPUState *cpu);
|
||||
bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool probe, uintptr_t retaddr);
|
||||
@ -41,6 +39,9 @@ static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env,
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
void mips_cpu_do_interrupt(CPUState *cpu);
|
||||
bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
|
||||
void mmu_init(CPUMIPSState *env, const mips_def_t *def);
|
||||
|
||||
void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask);
|
||||
|
@ -16041,17 +16041,17 @@ static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
|
||||
|
||||
is_slot = ctx->hflags & MIPS_HFLAG_BMASK;
|
||||
if (ctx->insn_flags & ISA_NANOMIPS32) {
|
||||
ctx->opcode = translator_lduw(env, ctx->base.pc_next);
|
||||
ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
|
||||
insn_bytes = decode_isa_nanomips(env, ctx);
|
||||
} else if (!(ctx->hflags & MIPS_HFLAG_M16)) {
|
||||
ctx->opcode = translator_ldl(env, ctx->base.pc_next);
|
||||
ctx->opcode = translator_ldl(env, &ctx->base, ctx->base.pc_next);
|
||||
insn_bytes = 4;
|
||||
decode_opc(env, ctx);
|
||||
} else if (ctx->insn_flags & ASE_MICROMIPS) {
|
||||
ctx->opcode = translator_lduw(env, ctx->base.pc_next);
|
||||
ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
|
||||
insn_bytes = decode_isa_micromips(env, ctx);
|
||||
} else if (ctx->insn_flags & ASE_MIPS16) {
|
||||
ctx->opcode = translator_lduw(env, ctx->base.pc_next);
|
||||
ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
|
||||
insn_bytes = decode_ase_mips16e(env, ctx);
|
||||
} else {
|
||||
gen_reserved_instruction(ctx);
|
||||
|
@ -57,8 +57,3 @@ bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
raise_mmu_exception(env, address, access_type);
|
||||
do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
|
||||
}
|
||||
|
||||
void mips_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
cs->exception_index = EXCP_NONE;
|
||||
}
|
||||
|
@ -127,6 +127,7 @@ static void nios2_cpu_realizefn(DeviceState *dev, Error **errp)
|
||||
ncc->parent_realize(dev, errp);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static bool nios2_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
Nios2CPU *cpu = NIOS2_CPU(cs);
|
||||
@ -140,7 +141,7 @@ static bool nios2_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static void nios2_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
|
||||
{
|
||||
@ -219,10 +220,10 @@ static const struct SysemuCPUOps nios2_sysemu_ops = {
|
||||
|
||||
static const struct TCGCPUOps nios2_tcg_ops = {
|
||||
.initialize = nios2_tcg_init,
|
||||
.cpu_exec_interrupt = nios2_cpu_exec_interrupt,
|
||||
.tlb_fill = nios2_cpu_tlb_fill,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.cpu_exec_interrupt = nios2_cpu_exec_interrupt,
|
||||
.do_interrupt = nios2_cpu_do_interrupt,
|
||||
.do_unaligned_access = nios2_cpu_do_unaligned_access,
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
@ -186,10 +186,10 @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
|
||||
|
||||
static const struct TCGCPUOps openrisc_tcg_ops = {
|
||||
.initialize = openrisc_translate_init,
|
||||
.cpu_exec_interrupt = openrisc_cpu_exec_interrupt,
|
||||
.tlb_fill = openrisc_cpu_tlb_fill,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.cpu_exec_interrupt = openrisc_cpu_exec_interrupt,
|
||||
.do_interrupt = openrisc_cpu_do_interrupt,
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
};
|
||||
|
@ -312,8 +312,6 @@ struct OpenRISCCPU {
|
||||
|
||||
|
||||
void cpu_openrisc_list(void);
|
||||
void openrisc_cpu_do_interrupt(CPUState *cpu);
|
||||
bool openrisc_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
|
||||
hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
|
||||
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
|
||||
@ -331,6 +329,9 @@ int print_insn_or1k(bfd_vma addr, disassemble_info *info);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
extern const VMStateDescription vmstate_openrisc_cpu;
|
||||
|
||||
void openrisc_cpu_do_interrupt(CPUState *cpu);
|
||||
bool openrisc_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
|
||||
/* hw/openrisc_pic.c */
|
||||
void cpu_openrisc_pic_init(OpenRISCCPU *cpu);
|
||||
|
||||
|
@ -28,7 +28,6 @@
|
||||
|
||||
void openrisc_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
|
||||
CPUOpenRISCState *env = &cpu->env;
|
||||
int exception = cs->exception_index;
|
||||
@ -96,7 +95,6 @@ void openrisc_cpu_do_interrupt(CPUState *cs)
|
||||
} else {
|
||||
cpu_abort(cs, "Unhandled exception 0x%x\n", exception);
|
||||
}
|
||||
#endif
|
||||
|
||||
cs->exception_index = -1;
|
||||
}
|
||||
|
@ -9,7 +9,6 @@ openrisc_ss.add(files(
|
||||
'exception_helper.c',
|
||||
'fpu_helper.c',
|
||||
'gdbstub.c',
|
||||
'interrupt.c',
|
||||
'interrupt_helper.c',
|
||||
'mmu.c',
|
||||
'sys_helper.c',
|
||||
@ -17,7 +16,10 @@ openrisc_ss.add(files(
|
||||
))
|
||||
|
||||
openrisc_softmmu_ss = ss.source_set()
|
||||
openrisc_softmmu_ss.add(files('machine.c'))
|
||||
openrisc_softmmu_ss.add(files(
|
||||
'interrupt.c',
|
||||
'machine.c',
|
||||
))
|
||||
|
||||
target_arch += {'openrisc': openrisc_ss}
|
||||
target_softmmu_arch += {'openrisc': openrisc_softmmu_ss}
|
||||
|
@ -1613,7 +1613,7 @@ static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
|
||||
{
|
||||
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
||||
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
|
||||
uint32_t insn = translator_ldl(&cpu->env, dc->base.pc_next);
|
||||
uint32_t insn = translator_ldl(&cpu->env, &dc->base, dc->base.pc_next);
|
||||
|
||||
if (!decode(dc, insn)) {
|
||||
gen_illegal_exception(dc);
|
||||
|
@ -1254,8 +1254,6 @@ DECLARE_OBJ_CHECKERS(PPCVirtualHypervisor, PPCVirtualHypervisorClass,
|
||||
PPC_VIRTUAL_HYPERVISOR, TYPE_PPC_VIRTUAL_HYPERVISOR)
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
void ppc_cpu_do_interrupt(CPUState *cpu);
|
||||
bool ppc_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
void ppc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
|
||||
hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
|
||||
int ppc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
|
||||
@ -1271,6 +1269,8 @@ int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
|
||||
int ppc32_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
|
||||
int cpuid, void *opaque);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
void ppc_cpu_do_interrupt(CPUState *cpu);
|
||||
bool ppc_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
void ppc_cpu_do_system_reset(CPUState *cs);
|
||||
void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector);
|
||||
extern const VMStateDescription vmstate_ppc_cpu;
|
||||
|
@ -9014,10 +9014,10 @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
|
||||
|
||||
static const struct TCGCPUOps ppc_tcg_ops = {
|
||||
.initialize = ppc_translate_init,
|
||||
.cpu_exec_interrupt = ppc_cpu_exec_interrupt,
|
||||
.tlb_fill = ppc_cpu_tlb_fill,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.cpu_exec_interrupt = ppc_cpu_exec_interrupt,
|
||||
.do_interrupt = ppc_cpu_do_interrupt,
|
||||
.cpu_exec_enter = ppc_cpu_exec_enter,
|
||||
.cpu_exec_exit = ppc_cpu_exec_exit,
|
||||
|
@ -40,24 +40,8 @@
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Exception processing */
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
void ppc_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
cs->exception_index = POWERPC_EXCP_NONE;
|
||||
env->error_code = 0;
|
||||
}
|
||||
|
||||
static void ppc_hw_interrupt(CPUPPCState *env)
|
||||
{
|
||||
CPUState *cs = env_cpu(env);
|
||||
|
||||
cs->exception_index = POWERPC_EXCP_NONE;
|
||||
env->error_code = 0;
|
||||
}
|
||||
#else /* defined(CONFIG_USER_ONLY) */
|
||||
static inline void dump_syscall(CPUPPCState *env)
|
||||
{
|
||||
qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
|
||||
@ -1113,7 +1097,6 @@ void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
|
||||
|
||||
powerpc_set_excp_state(cpu, vector, msr);
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
@ -1130,6 +1113,8 @@ bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
#if defined(DEBUG_OP)
|
||||
static void cpu_dump_rfi(target_ulong RA, target_ulong msr)
|
||||
{
|
||||
|
@ -8585,7 +8585,7 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
|
||||
ctx->base.pc_next, ctx->mem_idx, (int)msr_ir);
|
||||
|
||||
ctx->cia = pc = ctx->base.pc_next;
|
||||
insn = translator_ldl_swap(env, pc, need_byteswap(ctx));
|
||||
insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx));
|
||||
ctx->base.pc_next = pc += 4;
|
||||
|
||||
if (!is_prefix_insn(ctx, insn)) {
|
||||
@ -8600,7 +8600,8 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
|
||||
gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_INSN);
|
||||
ok = true;
|
||||
} else {
|
||||
uint32_t insn2 = translator_ldl_swap(env, pc, need_byteswap(ctx));
|
||||
uint32_t insn2 = translator_ldl_swap(env, dcbase, pc,
|
||||
need_byteswap(ctx));
|
||||
ctx->base.pc_next = pc += 4;
|
||||
ok = decode_insn64(ctx, deposit64(insn2, 32, 32, insn));
|
||||
}
|
||||
|
@ -644,10 +644,10 @@ static const struct SysemuCPUOps riscv_sysemu_ops = {
|
||||
static const struct TCGCPUOps riscv_tcg_ops = {
|
||||
.initialize = riscv_translate_init,
|
||||
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
|
||||
.cpu_exec_interrupt = riscv_cpu_exec_interrupt,
|
||||
.tlb_fill = riscv_cpu_tlb_fill,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.cpu_exec_interrupt = riscv_cpu_exec_interrupt,
|
||||
.do_interrupt = riscv_cpu_do_interrupt,
|
||||
.do_transaction_failed = riscv_cpu_do_transaction_failed,
|
||||
.do_unaligned_access = riscv_cpu_do_unaligned_access,
|
||||
|
@ -334,7 +334,6 @@ int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
|
||||
int cpuid, void *opaque);
|
||||
int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
|
||||
int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
|
||||
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
|
||||
bool riscv_cpu_fp_enabled(CPURISCVState *env);
|
||||
bool riscv_cpu_virt_enabled(CPURISCVState *env);
|
||||
void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
|
||||
@ -362,6 +361,7 @@ void riscv_cpu_list(void);
|
||||
#define cpu_mmu_index riscv_cpu_mmu_index
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
|
||||
void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
|
||||
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts);
|
||||
uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value);
|
||||
|
@ -75,11 +75,9 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
|
||||
return RISCV_EXCP_NONE; /* indicates no pending interrupt */
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
||||
RISCVCPU *cpu = RISCV_CPU(cs);
|
||||
CPURISCVState *env = &cpu->env;
|
||||
@ -90,12 +88,9 @@ bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
/* Return true is floating point support is currently enabled */
|
||||
bool riscv_cpu_fp_enabled(CPURISCVState *env)
|
||||
{
|
||||
|
@ -500,7 +500,8 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
|
||||
} else {
|
||||
uint32_t opcode32 = opcode;
|
||||
opcode32 = deposit32(opcode32, 16, 16,
|
||||
translator_lduw(env, ctx->base.pc_next + 2));
|
||||
translator_lduw(env, &ctx->base,
|
||||
ctx->base.pc_next + 2));
|
||||
ctx->pc_succ_insn = ctx->base.pc_next + 4;
|
||||
if (!decode_insn32(ctx, opcode32)) {
|
||||
gen_exception_illegal(ctx);
|
||||
@ -561,7 +562,7 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
||||
{
|
||||
DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
||||
CPURISCVState *env = cpu->env_ptr;
|
||||
uint16_t opcode16 = translator_lduw(env, ctx->base.pc_next);
|
||||
uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next);
|
||||
|
||||
decode_opc(env, ctx, opcode16);
|
||||
ctx->base.pc_next = ctx->pc_succ_insn;
|
||||
|
@ -186,10 +186,10 @@ static const struct SysemuCPUOps rx_sysemu_ops = {
|
||||
static const struct TCGCPUOps rx_tcg_ops = {
|
||||
.initialize = rx_translate_init,
|
||||
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
|
||||
.cpu_exec_interrupt = rx_cpu_exec_interrupt,
|
||||
.tlb_fill = rx_cpu_tlb_fill,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.cpu_exec_interrupt = rx_cpu_exec_interrupt,
|
||||
.do_interrupt = rx_cpu_do_interrupt,
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
};
|
||||
|
@ -124,8 +124,10 @@ typedef RXCPU ArchCPU;
|
||||
#define CPU_RESOLVING_TYPE TYPE_RX_CPU
|
||||
|
||||
const char *rx_crname(uint8_t cr);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
void rx_cpu_do_interrupt(CPUState *cpu);
|
||||
bool rx_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
void rx_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
|
||||
int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
|
||||
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
|
||||
|
@ -40,6 +40,8 @@ void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte)
|
||||
env->psw_c = FIELD_EX32(psw, PSW, C);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
||||
#define INT_FLAGS (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR)
|
||||
void rx_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
@ -142,6 +144,8 @@ bool rx_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
hwaddr rx_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||
{
|
||||
return addr;
|
||||
|
@ -388,14 +388,16 @@ static void update_cc_op(DisasContext *s)
|
||||
}
|
||||
}
|
||||
|
||||
static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
|
||||
static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
|
||||
uint64_t pc)
|
||||
{
|
||||
return (uint64_t)cpu_lduw_code(env, pc);
|
||||
return (uint64_t)translator_lduw(env, &s->base, pc);
|
||||
}
|
||||
|
||||
static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
|
||||
static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
|
||||
uint64_t pc)
|
||||
{
|
||||
return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
|
||||
return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
|
||||
}
|
||||
|
||||
static int get_mem_index(DisasContext *s)
|
||||
@ -6273,7 +6275,7 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
|
||||
ilen = s->ex_value & 0xf;
|
||||
op = insn >> 56;
|
||||
} else {
|
||||
insn = ld_code2(env, pc);
|
||||
insn = ld_code2(env, s, pc);
|
||||
op = (insn >> 8) & 0xff;
|
||||
ilen = get_ilen(op);
|
||||
switch (ilen) {
|
||||
@ -6281,10 +6283,10 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
|
||||
insn = insn << 48;
|
||||
break;
|
||||
case 4:
|
||||
insn = ld_code4(env, pc) << 32;
|
||||
insn = ld_code4(env, s, pc) << 32;
|
||||
break;
|
||||
case 6:
|
||||
insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
|
||||
insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
|
@ -236,10 +236,10 @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
|
||||
static const struct TCGCPUOps superh_tcg_ops = {
|
||||
.initialize = sh4_translate_init,
|
||||
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
|
||||
.cpu_exec_interrupt = superh_cpu_exec_interrupt,
|
||||
.tlb_fill = superh_cpu_tlb_fill,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.cpu_exec_interrupt = superh_cpu_exec_interrupt,
|
||||
.do_interrupt = superh_cpu_do_interrupt,
|
||||
.do_unaligned_access = superh_cpu_do_unaligned_access,
|
||||
.io_recompile_replay_branch = superh_io_recompile_replay_branch,
|
||||
|
@ -204,8 +204,6 @@ struct SuperHCPU {
|
||||
};
|
||||
|
||||
|
||||
void superh_cpu_do_interrupt(CPUState *cpu);
|
||||
bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
void superh_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
|
||||
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
|
||||
int superh_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
|
||||
@ -223,6 +221,8 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
|
||||
void sh4_cpu_list(void);
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
void superh_cpu_do_interrupt(CPUState *cpu);
|
||||
bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
void cpu_sh4_invalidate_tlb(CPUSH4State *s);
|
||||
uint32_t cpu_sh4_read_mmaped_itlb_addr(CPUSH4State *s,
|
||||
hwaddr addr);
|
||||
|
@ -45,11 +45,6 @@
|
||||
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
|
||||
void superh_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
cs->exception_index = -1;
|
||||
}
|
||||
|
||||
int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr)
|
||||
{
|
||||
/* For user mode, only U0 area is cacheable. */
|
||||
@ -784,8 +779,6 @@ int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
||||
@ -803,6 +796,8 @@ bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool probe, uintptr_t retaddr)
|
||||
|
@ -1907,7 +1907,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
|
||||
|
||||
/* Read all of the insns for the region. */
|
||||
for (i = 0; i < max_insns; ++i) {
|
||||
insns[i] = translator_lduw(env, pc + i * 2);
|
||||
insns[i] = translator_lduw(env, &ctx->base, pc + i * 2);
|
||||
}
|
||||
|
||||
ld_adr = ld_dst = ld_mop = -1;
|
||||
@ -2307,7 +2307,7 @@ static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
|
||||
}
|
||||
#endif
|
||||
|
||||
ctx->opcode = translator_lduw(env, ctx->base.pc_next);
|
||||
ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
|
||||
decode_opc(ctx);
|
||||
ctx->base.pc_next += 2;
|
||||
}
|
||||
|
@ -77,6 +77,7 @@ static void sparc_cpu_reset(DeviceState *dev)
|
||||
env->cache_control = 0;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
if (interrupt_request & CPU_INTERRUPT_HARD) {
|
||||
@ -96,6 +97,7 @@ static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static void cpu_sparc_disas_set_info(CPUState *cpu, disassemble_info *info)
|
||||
{
|
||||
@ -863,10 +865,10 @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
|
||||
static const struct TCGCPUOps sparc_tcg_ops = {
|
||||
.initialize = sparc_tcg_init,
|
||||
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
|
||||
.cpu_exec_interrupt = sparc_cpu_exec_interrupt,
|
||||
.tlb_fill = sparc_cpu_tlb_fill,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.cpu_exec_interrupt = sparc_cpu_exec_interrupt,
|
||||
.do_interrupt = sparc_cpu_do_interrupt,
|
||||
.do_transaction_failed = sparc_cpu_do_transaction_failed,
|
||||
.do_unaligned_access = sparc_cpu_do_unaligned_access,
|
||||
|
@ -5855,7 +5855,7 @@ static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
|
||||
CPUSPARCState *env = cs->env_ptr;
|
||||
unsigned int insn;
|
||||
|
||||
insn = translator_ldl(env, dc->pc);
|
||||
insn = translator_ldl(env, &dc->base, dc->pc);
|
||||
dc->base.pc_next += 4;
|
||||
disas_sparc_insn(dc, insn);
|
||||
|
||||
|
@ -192,11 +192,11 @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
|
||||
|
||||
static const struct TCGCPUOps xtensa_tcg_ops = {
|
||||
.initialize = xtensa_translate_init,
|
||||
.cpu_exec_interrupt = xtensa_cpu_exec_interrupt,
|
||||
.tlb_fill = xtensa_cpu_tlb_fill,
|
||||
.debug_excp_handler = xtensa_breakpoint_handler,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
.cpu_exec_interrupt = xtensa_cpu_exec_interrupt,
|
||||
.do_interrupt = xtensa_cpu_do_interrupt,
|
||||
.do_transaction_failed = xtensa_cpu_do_transaction_failed,
|
||||
.do_unaligned_access = xtensa_cpu_do_unaligned_access,
|
||||
|
@ -566,12 +566,14 @@ struct XtensaCPU {
|
||||
bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool probe, uintptr_t retaddr);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
void xtensa_cpu_do_interrupt(CPUState *cpu);
|
||||
bool xtensa_cpu_exec_interrupt(CPUState *cpu, int interrupt_request);
|
||||
void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
|
||||
unsigned size, MMUAccessType access_type,
|
||||
int mmu_idx, MemTxAttrs attrs,
|
||||
MemTxResult response, uintptr_t retaddr);
|
||||
#endif
|
||||
void xtensa_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
|
||||
hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
|
||||
void xtensa_count_regs(const XtensaConfig *config,
|
||||
|
@ -255,11 +255,6 @@ void xtensa_cpu_do_interrupt(CPUState *cs)
|
||||
}
|
||||
check_interrupts(env);
|
||||
}
|
||||
#else
|
||||
void xtensa_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
@ -270,3 +265,5 @@ bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
@ -882,7 +882,8 @@ static int arg_copy_compare(const void *a, const void *b)
|
||||
static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
|
||||
{
|
||||
xtensa_isa isa = dc->config->isa;
|
||||
unsigned char b[MAX_INSN_LENGTH] = {translator_ldub(env, dc->pc)};
|
||||
unsigned char b[MAX_INSN_LENGTH] = {translator_ldub(env, &dc->base,
|
||||
dc->pc)};
|
||||
unsigned len = xtensa_op0_insn_len(dc, b[0]);
|
||||
xtensa_format fmt;
|
||||
int slot, slots;
|
||||
@ -907,7 +908,7 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
|
||||
|
||||
dc->base.pc_next = dc->pc + len;
|
||||
for (i = 1; i < len; ++i) {
|
||||
b[i] = translator_ldub(env, dc->pc + i);
|
||||
b[i] = translator_ldub(env, &dc->base, dc->pc + i);
|
||||
}
|
||||
xtensa_insnbuf_from_chars(isa, dc->insnbuf, b, len);
|
||||
fmt = xtensa_format_decode(isa, dc->insnbuf);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -26,34 +26,9 @@
|
||||
#ifndef ARM_TCG_TARGET_H
|
||||
#define ARM_TCG_TARGET_H
|
||||
|
||||
/* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */
|
||||
#ifndef __ARM_ARCH
|
||||
# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
|
||||
|| defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
|
||||
|| defined(__ARM_ARCH_7EM__)
|
||||
# define __ARM_ARCH 7
|
||||
# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
|
||||
|| defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
|
||||
|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__)
|
||||
# define __ARM_ARCH 6
|
||||
# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5E__) \
|
||||
|| defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__) \
|
||||
|| defined(__ARM_ARCH_5TEJ__)
|
||||
# define __ARM_ARCH 5
|
||||
# else
|
||||
# define __ARM_ARCH 4
|
||||
# endif
|
||||
#endif
|
||||
|
||||
extern int arm_arch;
|
||||
|
||||
#if defined(__ARM_ARCH_5T__) \
|
||||
|| defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
|
||||
# define use_armv5t_instructions 1
|
||||
#else
|
||||
# define use_armv5t_instructions use_armv6_instructions
|
||||
#endif
|
||||
|
||||
#define use_armv5t_instructions (__ARM_ARCH >= 5 || arm_arch >= 5)
|
||||
#define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6)
|
||||
#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
|
||||
|
||||
|
@ -241,8 +241,9 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
|
||||
#define P_EXT 0x100 /* 0x0f opcode prefix */
|
||||
#define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */
|
||||
#define P_DATA16 0x400 /* 0x66 opcode prefix */
|
||||
#define P_VEXW 0x1000 /* Set VEX.W = 1 */
|
||||
#if TCG_TARGET_REG_BITS == 64
|
||||
# define P_REXW 0x1000 /* Set REX.W = 1 */
|
||||
# define P_REXW P_VEXW /* Set REX.W = 1; match VEXW */
|
||||
# define P_REXB_R 0x2000 /* REG field as byte register */
|
||||
# define P_REXB_RM 0x4000 /* R/M field as byte register */
|
||||
# define P_GS 0x8000 /* gs segment override */
|
||||
@ -410,13 +411,13 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
|
||||
#define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16)
|
||||
#define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16)
|
||||
#define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
|
||||
#define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_REXW)
|
||||
#define OPC_VPERMQ (0x00 | P_EXT3A | P_DATA16 | P_VEXW)
|
||||
#define OPC_VPERM2I128 (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
|
||||
#define OPC_VPSLLVD (0x47 | P_EXT38 | P_DATA16)
|
||||
#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_REXW)
|
||||
#define OPC_VPSLLVQ (0x47 | P_EXT38 | P_DATA16 | P_VEXW)
|
||||
#define OPC_VPSRAVD (0x46 | P_EXT38 | P_DATA16)
|
||||
#define OPC_VPSRLVD (0x45 | P_EXT38 | P_DATA16)
|
||||
#define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_REXW)
|
||||
#define OPC_VPSRLVQ (0x45 | P_EXT38 | P_DATA16 | P_VEXW)
|
||||
#define OPC_VZEROUPPER (0x77 | P_EXT)
|
||||
#define OPC_XCHG_ax_r32 (0x90)
|
||||
|
||||
@ -576,7 +577,7 @@ static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v,
|
||||
|
||||
/* Use the two byte form if possible, which cannot encode
|
||||
VEX.W, VEX.B, VEX.X, or an m-mmmm field other than P_EXT. */
|
||||
if ((opc & (P_EXT | P_EXT38 | P_EXT3A | P_REXW)) == P_EXT
|
||||
if ((opc & (P_EXT | P_EXT38 | P_EXT3A | P_VEXW)) == P_EXT
|
||||
&& ((rm | index) & 8) == 0) {
|
||||
/* Two byte VEX prefix. */
|
||||
tcg_out8(s, 0xc5);
|
||||
@ -601,7 +602,7 @@ static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v,
|
||||
tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */
|
||||
tcg_out8(s, tmp);
|
||||
|
||||
tmp = (opc & P_REXW ? 0x80 : 0); /* VEX.W */
|
||||
tmp = (opc & P_VEXW ? 0x80 : 0); /* VEX.W */
|
||||
}
|
||||
|
||||
tmp |= (opc & P_VEXL ? 0x04 : 0); /* VEX.L */
|
||||
|
@ -25,9 +25,24 @@
|
||||
#include "elf.h"
|
||||
#include "../tcg-pool.c.inc"
|
||||
|
||||
#if defined _CALL_DARWIN || defined __APPLE__
|
||||
#define TCG_TARGET_CALL_DARWIN
|
||||
#endif
|
||||
/*
|
||||
* Standardize on the _CALL_FOO symbols used by GCC:
|
||||
* Apple XCode does not define _CALL_DARWIN.
|
||||
* Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV (32-bit).
|
||||
*/
|
||||
#if !defined(_CALL_SYSV) && \
|
||||
!defined(_CALL_DARWIN) && \
|
||||
!defined(_CALL_AIX) && \
|
||||
!defined(_CALL_ELF)
|
||||
# if defined(__APPLE__)
|
||||
# define _CALL_DARWIN
|
||||
# elif defined(__ELF__) && TCG_TARGET_REG_BITS == 32
|
||||
# define _CALL_SYSV
|
||||
# else
|
||||
# error "Unknown ABI"
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef _CALL_SYSV
|
||||
# define TCG_TARGET_CALL_ALIGN_ARGS 1
|
||||
#endif
|
||||
@ -169,7 +184,7 @@ static const int tcg_target_call_oarg_regs[] = {
|
||||
};
|
||||
|
||||
static const int tcg_target_callee_save_regs[] = {
|
||||
#ifdef TCG_TARGET_CALL_DARWIN
|
||||
#ifdef _CALL_DARWIN
|
||||
TCG_REG_R11,
|
||||
#endif
|
||||
TCG_REG_R14,
|
||||
@ -2372,7 +2387,7 @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
|
||||
# define LINK_AREA_SIZE (6 * SZR)
|
||||
# define LR_OFFSET (1 * SZR)
|
||||
# define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
|
||||
#elif defined(TCG_TARGET_CALL_DARWIN)
|
||||
#elif defined(_CALL_DARWIN)
|
||||
# define LINK_AREA_SIZE (6 * SZR)
|
||||
# define LR_OFFSET (2 * SZR)
|
||||
#elif TCG_TARGET_REG_BITS == 64
|
||||
|
Loading…
Reference in New Issue
Block a user