diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 75dbc1e4e3..5fd1ed3422 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -588,8 +588,9 @@ static inline void tb_add_jump(TranslationBlock *tb, int n, static inline bool cpu_handle_halt(CPUState *cpu) { +#ifndef CONFIG_USER_ONLY if (cpu->halted) { -#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) +#if defined(TARGET_I386) if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { X86CPU *x86_cpu = X86_CPU(cpu); qemu_mutex_lock_iothread(); @@ -597,13 +598,14 @@ static inline bool cpu_handle_halt(CPUState *cpu) cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); qemu_mutex_unlock_iothread(); } -#endif +#endif /* TARGET_I386 */ if (!cpu_has_work(cpu)) { return true; } cpu->halted = 0; } +#endif /* !CONFIG_USER_ONLY */ return false; } diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 5d1b6d80fb..9d5987ba04 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -662,6 +662,19 @@ static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, } return addr; } + +/** + * cpu_signal_handler + * @signum: host signal number + * @pinfo: host siginfo_t + * @puc: host ucontext_t + * + * To be called from the SIGBUS and SIGSEGV signal handler to inform the + * virtual cpu of exceptions. Returns true if the signal was handled by + * the virtual CPU. + */ +int cpu_signal_handler(int signum, void *pinfo, void *puc); + #else static inline void mmap_lock(void) {} static inline void mmap_unlock(void) {} diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h index 55123cb4d2..6cbe17f2e6 100644 --- a/include/hw/core/tcg-cpu-ops.h +++ b/include/hw/core/tcg-cpu-ops.h @@ -78,10 +78,11 @@ struct TCGCPUOps { MemTxResult response, uintptr_t retaddr); /** * @do_unaligned_access: Callback for unaligned access handling + * The callback must exit via raising an exception. */ void (*do_unaligned_access)(CPUState *cpu, vaddr addr, MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); + int mmu_idx, uintptr_t retaddr) QEMU_NORETURN; /** * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h index 4e993bd15b..772828cc26 100644 --- a/target/alpha/cpu.h +++ b/target/alpha/cpu.h @@ -283,11 +283,10 @@ hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int alpha_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr) QEMU_NORETURN; #define cpu_list alpha_cpu_list -#define cpu_signal_handler cpu_alpha_signal_handler typedef CPUAlphaState CPUArchState; typedef AlphaCPU ArchCPU; @@ -440,11 +439,6 @@ void alpha_translate_init(void); #define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU void alpha_cpu_list(void); -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_alpha_signal_handler(int host_signum, void *pinfo, - void *puc); bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 87235dae63..e33f37b70a 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -1121,12 +1121,6 @@ static inline bool is_a64(CPUARMState *env) return env->aarch64; } -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_arm_signal_handler(int host_signum, void *pinfo, - void *puc); - /** * pmu_op_start/finish * @env: CPUARMState @@ -3017,7 +3011,6 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); #define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU -#define cpu_signal_handler cpu_arm_signal_handler #define cpu_list arm_cpu_list /* ARM has the following "translation regimes" (as the ARM ARM calls them): diff --git a/target/arm/internals.h b/target/arm/internals.h index 777f968764..9fbb364968 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -594,7 +594,7 @@ bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); /* Raise a data fault alignment exception for the specified virtual address */ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); + int mmu_idx, uintptr_t retaddr) QEMU_NORETURN; /* arm_cpu_do_transaction_failed: handle a memory system error response * (eg "no device/memory present at address") by raising an external abort diff --git a/target/avr/cpu.h b/target/avr/cpu.h index 93e3faa0a9..dceacf3cd7 100644 --- a/target/avr/cpu.h +++ b/target/avr/cpu.h @@ -175,7 +175,6 @@ static inline void set_avr_feature(CPUAVRState *env, int feature) } #define cpu_list avr_cpu_list -#define cpu_signal_handler cpu_avr_signal_handler #define cpu_mmu_index avr_cpu_mmu_index static inline int avr_cpu_mmu_index(CPUAVRState *env, bool ifetch) @@ -187,7 +186,6 @@ void avr_cpu_tcg_init(void); void avr_cpu_list(void); int cpu_avr_exec(CPUState *cpu); -int cpu_avr_signal_handler(int host_signum, void *pinfo, void *puc); int avr_cpu_memory_rw_debug(CPUState *cs, vaddr address, uint8_t *buf, int len, bool is_write); diff --git a/target/cris/cpu.h b/target/cris/cpu.h index be021899ae..6603565f83 100644 --- a/target/cris/cpu.h +++ b/target/cris/cpu.h @@ -199,12 +199,6 @@ int crisv10_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int cris_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int cris_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_cris_signal_handler(int host_signum, void *pinfo, - void *puc); - void cris_initialize_tcg(void); void cris_initialize_crisv10_tcg(void); @@ -250,8 +244,6 @@ enum { #define CRIS_CPU_TYPE_NAME(name) (name CRIS_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_CRIS_CPU -#define cpu_signal_handler cpu_cris_signal_handler - /* MMU modes definitions */ #define MMU_USER_IDX 1 static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch) diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h index 2855dd3881..f7d043865b 100644 --- a/target/hexagon/cpu.h +++ b/target/hexagon/cpu.h @@ -129,9 +129,6 @@ typedef struct HexagonCPU { #include "cpu_bits.h" -#define cpu_signal_handler cpu_hexagon_signal_handler -int cpu_hexagon_signal_handler(int host_signum, void *pinfo, void *puc); - static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) { diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c index e8edd189bf..89cba9d7a2 100644 --- a/target/hppa/cpu.c +++ b/target/hppa/cpu.c @@ -72,9 +72,10 @@ static void hppa_cpu_disas_set_info(CPUState *cs, disassemble_info *info) } #ifndef CONFIG_USER_ONLY -static void hppa_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr) +static void QEMU_NORETURN +hppa_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr) { HPPACPU *cpu = HPPA_CPU(cs); CPUHPPAState *env = &cpu->env; diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h index 7854675b90..d3cb7a279f 100644 --- a/target/hppa/cpu.h +++ b/target/hppa/cpu.h @@ -319,9 +319,6 @@ static inline void cpu_hppa_change_prot_id(CPUHPPAState *env) { } void cpu_hppa_change_prot_id(CPUHPPAState *env); #endif -#define cpu_signal_handler cpu_hppa_signal_handler - -int cpu_hppa_signal_handler(int host_signum, void *pinfo, void *puc); hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr); int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 7dd664791a..c2954c71ea 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -1947,12 +1947,6 @@ void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr); void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr); -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_x86_signal_handler(int host_signum, void *pinfo, - void *puc); - /* cpu.c */ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, uint32_t vendor2, uint32_t vendor3); @@ -2020,7 +2014,6 @@ uint64_t cpu_get_tsc(CPUX86State *env); #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32") #endif -#define cpu_signal_handler cpu_x86_signal_handler #define cpu_list x86_cpu_list /* MMU modes definitions */ diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h index 550eb028b6..a3423729ef 100644 --- a/target/m68k/cpu.h +++ b/target/m68k/cpu.h @@ -177,13 +177,6 @@ int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void m68k_tcg_init(void); void m68k_cpu_init_gdb(M68kCPU *cpu); -/* - * you can call this signal handler from your SIGBUS and SIGSEGV - * signal handlers to inform the virtual CPU of exceptions. non zero - * is returned if the signal was handled by the virtual CPU. - */ -int cpu_m68k_signal_handler(int host_signum, void *pinfo, - void *puc); uint32_t cpu_m68k_get_ccr(CPUM68KState *env); void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t); void cpu_m68k_set_sr(CPUM68KState *env, uint32_t); @@ -563,7 +556,6 @@ enum { #define M68K_CPU_TYPE_NAME(model) model M68K_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_M68K_CPU -#define cpu_signal_handler cpu_m68k_signal_handler #define cpu_list m68k_cpu_list /* MMU modes definitions */ diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h index 40401c33b7..b7a848bbae 100644 --- a/target/microblaze/cpu.h +++ b/target/microblaze/cpu.h @@ -361,7 +361,7 @@ bool mb_cpu_exec_interrupt(CPUState *cs, int int_req); #endif /* !CONFIG_USER_ONLY */ void mb_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); + int mmu_idx, uintptr_t retaddr) QEMU_NORETURN; void mb_cpu_dump_state(CPUState *cpu, FILE *f, int flags); hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, MemTxAttrs *attrs); @@ -385,16 +385,9 @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val) } void mb_tcg_init(void); -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_mb_signal_handler(int host_signum, void *pinfo, - void *puc); #define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU -#define cpu_signal_handler cpu_mb_signal_handler - /* MMU modes definitions */ #define MMU_NOMMU_IDX 0 #define MMU_KERNEL_IDX 1 diff --git a/target/mips/cpu.h b/target/mips/cpu.h index 1dfe69c6c0..56b1cbd091 100644 --- a/target/mips/cpu.h +++ b/target/mips/cpu.h @@ -1193,7 +1193,6 @@ struct MIPSCPU { void mips_cpu_list(void); -#define cpu_signal_handler cpu_mips_signal_handler #define cpu_list mips_cpu_list extern void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env); @@ -1277,8 +1276,6 @@ enum { */ #define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0 -int cpu_mips_signal_handler(int host_signum, void *pinfo, void *puc); - #define MIPS_CPU_TYPE_SUFFIX "-" TYPE_MIPS_CPU #define MIPS_CPU_TYPE_NAME(model) model MIPS_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_MIPS_CPU diff --git a/target/mips/internal.h b/target/mips/internal.h index eecdd10116..daddb05fd4 100644 --- a/target/mips/internal.h +++ b/target/mips/internal.h @@ -156,8 +156,6 @@ extern const VMStateDescription vmstate_mips_cpu; #endif /* !CONFIG_USER_ONLY */ -#define cpu_signal_handler cpu_mips_signal_handler - static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env) { return (env->CP0_Status & (1 << CP0St_IE)) && diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h index c7a77ddccd..bad3deb611 100644 --- a/target/mips/tcg/tcg-internal.h +++ b/target/mips/tcg/tcg-internal.h @@ -22,8 +22,8 @@ bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr) QEMU_NORETURN; const char *mips_exception_name(int32_t exception); diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h index 2ab82fdc71..a80587338a 100644 --- a/target/nios2/cpu.h +++ b/target/nios2/cpu.h @@ -193,20 +193,18 @@ struct Nios2CPU { void nios2_tcg_init(void); void nios2_cpu_do_interrupt(CPUState *cs); -int cpu_nios2_signal_handler(int host_signum, void *pinfo, void *puc); void dump_mmu(CPUNios2State *env); void nios2_cpu_dump_state(CPUState *cpu, FILE *f, int flags); hwaddr nios2_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); void nios2_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr) QEMU_NORETURN; void do_nios2_semihosting(CPUNios2State *env); #define CPU_RESOLVING_TYPE TYPE_NIOS2_CPU #define cpu_gen_code cpu_nios2_gen_code -#define cpu_signal_handler cpu_nios2_signal_handler #define CPU_SAVE_VERSION 1 diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h index be6df81a81..187a4a114e 100644 --- a/target/openrisc/cpu.h +++ b/target/openrisc/cpu.h @@ -320,11 +320,9 @@ void openrisc_translate_init(void); bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); -int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc); int print_insn_or1k(bfd_vma addr, disassemble_info *info); #define cpu_list cpu_openrisc_list -#define cpu_signal_handler cpu_openrisc_signal_handler #ifndef CONFIG_USER_ONLY extern const VMStateDescription vmstate_openrisc_cpu; diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 362e7c4c5c..01d3773bc7 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -1278,12 +1278,6 @@ extern const VMStateDescription vmstate_ppc_cpu; /*****************************************************************************/ void ppc_translate_init(void); -/* - * you can call this signal handler from your SIGBUS and SIGSEGV - * signal handlers to inform the virtual CPU of exceptions. non zero - * is returned if the signal was handled by the virtual CPU. - */ -int cpu_ppc_signal_handler(int host_signum, void *pinfo, void *puc); bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); @@ -1371,7 +1365,6 @@ int ppc_dcr_write(ppc_dcr_t *dcr_env, int dcrn, uint32_t val); #define POWERPC_CPU_TYPE_NAME(model) model POWERPC_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_POWERPC_CPU -#define cpu_signal_handler cpu_ppc_signal_handler #define cpu_list ppc_cpu_list /* MMU modes definitions */ diff --git a/target/ppc/internal.h b/target/ppc/internal.h index b71406fa46..55284369f5 100644 --- a/target/ppc/internal.h +++ b/target/ppc/internal.h @@ -213,8 +213,8 @@ void helper_compute_fprf_float128(CPUPPCState *env, float128 arg); /* Raise a data fault alignment exception for the specified virtual address */ void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr) QEMU_NORETURN; /* translate.c */ diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index e735e53e26..5896aca346 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -344,7 +344,7 @@ int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch); hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, - uintptr_t retaddr); + uintptr_t retaddr) QEMU_NORETURN; bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); @@ -356,7 +356,6 @@ void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, char *riscv_isa_string(RISCVCPU *cpu); void riscv_cpu_list(void); -#define cpu_signal_handler riscv_cpu_signal_handler #define cpu_list riscv_cpu_list #define cpu_mmu_index riscv_cpu_mmu_index @@ -372,7 +371,6 @@ void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t), void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv); void riscv_translate_init(void); -int riscv_cpu_signal_handler(int host_signum, void *pinfo, void *puc); void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env, uint32_t exception, uintptr_t pc); diff --git a/target/rx/cpu.h b/target/rx/cpu.h index faa3606f52..4ac71aec37 100644 --- a/target/rx/cpu.h +++ b/target/rx/cpu.h @@ -134,13 +134,9 @@ int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); hwaddr rx_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); void rx_translate_init(void); -int cpu_rx_signal_handler(int host_signum, void *pinfo, - void *puc); - void rx_cpu_list(void); void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte); -#define cpu_signal_handler cpu_rx_signal_handler #define cpu_list rx_cpu_list #include "exec/cpu-all.h" diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h index b26ae8fff2..3153d053e9 100644 --- a/target/s390x/cpu.h +++ b/target/s390x/cpu.h @@ -809,13 +809,6 @@ void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga, #define S390_CPU_TYPE_NAME(name) (name S390_CPU_TYPE_SUFFIX) #define CPU_RESOLVING_TYPE TYPE_S390_CPU -/* you can call this signal handler from your SIGBUS and SIGSEGV - signal handlers to inform the virtual CPU of exceptions. non zero - is returned if the signal was handled by the virtual CPU. */ -int cpu_s390x_signal_handler(int host_signum, void *pinfo, void *puc); -#define cpu_signal_handler cpu_s390x_signal_handler - - /* interrupt.c */ #define RA_IGNORED 0 void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra); diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h index 7a6aa4dacc..27d4a03ca1 100644 --- a/target/s390x/s390x-internal.h +++ b/target/s390x/s390x-internal.h @@ -274,8 +274,8 @@ bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr) QEMU_NORETURN; /* fpu_helper.c */ diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h index 017a770214..dc81406646 100644 --- a/target/sh4/cpu.h +++ b/target/sh4/cpu.h @@ -209,12 +209,10 @@ hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int superh_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int superh_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr) QEMU_NORETURN; void sh4_translate_init(void); -int cpu_sh4_signal_handler(int host_signum, void *pinfo, - void *puc); bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); @@ -250,7 +248,6 @@ void cpu_load_tlb(CPUSH4State * env); #define SUPERH_CPU_TYPE_NAME(model) model SUPERH_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_SUPERH_CPU -#define cpu_signal_handler cpu_sh4_signal_handler #define cpu_list sh4_cpu_list /* MMU modes definitions */ diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h index 1f40d768d8..5a7f1ed5d6 100644 --- a/target/sparc/cpu.h +++ b/target/sparc/cpu.h @@ -648,13 +648,11 @@ hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, int mmu_idx); #endif #endif -int cpu_sparc_signal_handler(int host_signum, void *pinfo, void *puc); #define SPARC_CPU_TYPE_SUFFIX "-" TYPE_SPARC_CPU #define SPARC_CPU_TYPE_NAME(model) model SPARC_CPU_TYPE_SUFFIX #define CPU_RESOLVING_TYPE TYPE_SPARC_CPU -#define cpu_signal_handler cpu_sparc_signal_handler #define cpu_list sparc_cpu_list /* MMU modes definitions */ diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h index 4b61a2c03f..c461387e71 100644 --- a/target/tricore/cpu.h +++ b/target/tricore/cpu.h @@ -362,7 +362,6 @@ void fpu_set_state(CPUTriCoreState *env); void tricore_cpu_list(void); -#define cpu_signal_handler cpu_tricore_signal_handler #define cpu_list tricore_cpu_list static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch) @@ -377,7 +376,6 @@ typedef TriCoreCPU ArchCPU; void cpu_state_reset(CPUTriCoreState *s); void tricore_tcg_init(void); -int cpu_tricore_signal_handler(int host_signum, void *pinfo, void *puc); static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *flags) diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h index cbb720e7cc..f9a510ca46 100644 --- a/target/xtensa/cpu.h +++ b/target/xtensa/cpu.h @@ -581,10 +581,9 @@ void xtensa_count_regs(const XtensaConfig *config, int xtensa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int xtensa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr); + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr) QEMU_NORETURN; -#define cpu_signal_handler cpu_xtensa_signal_handler #define cpu_list xtensa_cpu_list #define XTENSA_CPU_TYPE_SUFFIX "-" TYPE_XTENSA_CPU @@ -613,7 +612,6 @@ void check_interrupts(CPUXtensaState *s); void xtensa_irq_init(CPUXtensaState *env); qemu_irq *xtensa_get_extints(CPUXtensaState *env); qemu_irq xtensa_get_runstall(CPUXtensaState *env); -int cpu_xtensa_signal_handler(int host_signum, void *pinfo, void *puc); void xtensa_cpu_list(void); void xtensa_sync_window_from_phys(CPUXtensaState *env); void xtensa_sync_phys_from_window(CPUXtensaState *env); diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc index bf0eb84e2d..41ffa28394 100644 --- a/tcg/mips/tcg-target.c.inc +++ b/tcg/mips/tcg-target.c.inc @@ -187,7 +187,7 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, #endif -static inline bool is_p2m1(tcg_target_long val) +static bool is_p2m1(tcg_target_long val) { return val && ((val + 1) & val) == 0; } @@ -361,8 +361,8 @@ typedef enum { /* * Type reg */ -static inline void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc, - TCGReg rd, TCGReg rs, TCGReg rt) +static void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc, + TCGReg rd, TCGReg rs, TCGReg rt) { int32_t inst; @@ -376,8 +376,8 @@ static inline void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc, /* * Type immediate */ -static inline void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc, - TCGReg rt, TCGReg rs, TCGArg imm) +static void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc, + TCGReg rt, TCGReg rs, TCGArg imm) { int32_t inst; @@ -391,8 +391,8 @@ static inline void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc, /* * Type bitfield */ -static inline void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt, - TCGReg rs, int msb, int lsb) +static void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt, + TCGReg rs, int msb, int lsb) { int32_t inst; @@ -404,8 +404,8 @@ static inline void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt, tcg_out32(s, inst); } -static inline void tcg_out_opc_bf64(TCGContext *s, MIPSInsn opc, MIPSInsn opm, - MIPSInsn oph, TCGReg rt, TCGReg rs, +static void tcg_out_opc_bf64(TCGContext *s, MIPSInsn opc, MIPSInsn opm, + MIPSInsn oph, TCGReg rt, TCGReg rs, int msb, int lsb) { if (lsb >= 32) { @@ -422,8 +422,7 @@ static inline void tcg_out_opc_bf64(TCGContext *s, MIPSInsn opc, MIPSInsn opm, /* * Type branch */ -static inline void tcg_out_opc_br(TCGContext *s, MIPSInsn opc, - TCGReg rt, TCGReg rs) +static void tcg_out_opc_br(TCGContext *s, MIPSInsn opc, TCGReg rt, TCGReg rs) { tcg_out_opc_imm(s, opc, rt, rs, 0); } @@ -431,8 +430,8 @@ static inline void tcg_out_opc_br(TCGContext *s, MIPSInsn opc, /* * Type sa */ -static inline void tcg_out_opc_sa(TCGContext *s, MIPSInsn opc, - TCGReg rd, TCGReg rt, TCGArg sa) +static void tcg_out_opc_sa(TCGContext *s, MIPSInsn opc, + TCGReg rd, TCGReg rt, TCGArg sa) { int32_t inst; @@ -479,28 +478,27 @@ static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, const void *target) return true; } -static inline void tcg_out_nop(TCGContext *s) +static void tcg_out_nop(TCGContext *s) { tcg_out32(s, 0); } -static inline void tcg_out_dsll(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) +static void tcg_out_dsll(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) { tcg_out_opc_sa64(s, OPC_DSLL, OPC_DSLL32, rd, rt, sa); } -static inline void tcg_out_dsrl(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) +static void tcg_out_dsrl(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) { tcg_out_opc_sa64(s, OPC_DSRL, OPC_DSRL32, rd, rt, sa); } -static inline void tcg_out_dsra(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) +static void tcg_out_dsra(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa) { tcg_out_opc_sa64(s, OPC_DSRA, OPC_DSRA32, rd, rt, sa); } -static inline bool tcg_out_mov(TCGContext *s, TCGType type, - TCGReg ret, TCGReg arg) +static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { /* Simple reg-reg move, optimising out the 'do nothing' case */ if (ret != arg) { @@ -575,8 +573,10 @@ static void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg, int flags) static void tcg_out_bswap_subr(TCGContext *s, const tcg_insn_unit *sub) { - bool ok = tcg_out_opc_jmp(s, OPC_JAL, sub); - tcg_debug_assert(ok); + if (!tcg_out_opc_jmp(s, OPC_JAL, sub)) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP1, (uintptr_t)sub); + tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_TMP1, 0); + } } static void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg, int flags) @@ -612,27 +612,7 @@ static void tcg_out_bswap64(TCGContext *s, TCGReg ret, TCGReg arg) } } -static inline void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) -{ - if (use_mips32r2_instructions) { - tcg_out_opc_reg(s, OPC_SEB, ret, 0, arg); - } else { - tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); - tcg_out_opc_sa(s, OPC_SRA, ret, ret, 24); - } -} - -static inline void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) -{ - if (use_mips32r2_instructions) { - tcg_out_opc_reg(s, OPC_SEH, ret, 0, arg); - } else { - tcg_out_opc_sa(s, OPC_SLL, ret, arg, 16); - tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); - } -} - -static inline void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) +static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) { if (use_mips32r2_instructions) { tcg_out_opc_bf(s, OPC_DEXT, ret, arg, 31, 0); @@ -656,8 +636,8 @@ static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data, tcg_out_opc_imm(s, opc, data, addr, lo); } -static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) { MIPSInsn opc = OPC_LD; if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) { @@ -666,8 +646,8 @@ static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, tcg_out_ldst(s, opc, arg, arg1, arg2); } -static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) { MIPSInsn opc = OPC_SD; if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) { @@ -676,8 +656,8 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, tcg_out_ldst(s, opc, arg, arg1, arg2); } -static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, - TCGReg base, intptr_t ofs) +static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs) { if (val == 0) { tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); @@ -1637,9 +1617,9 @@ static void tcg_out_clz(TCGContext *s, MIPSInsn opcv2, MIPSInsn opcv6, } } -static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, - const TCGArg args[TCG_MAX_OP_ARGS], - const int const_args[TCG_MAX_OP_ARGS]) +static void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg args[TCG_MAX_OP_ARGS], + const int const_args[TCG_MAX_OP_ARGS]) { MIPSInsn i1, i2; TCGArg a0, a1, a2; @@ -1674,17 +1654,11 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } break; case INDEX_op_goto_tb: - if (s->tb_jmp_insn_offset) { - /* direct jump method */ - s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); - /* Avoid clobbering the address during retranslation. */ - tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff)); - } else { - /* indirect jump method */ - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO, - (uintptr_t)(s->tb_jmp_target_addr + a0)); - tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); - } + /* indirect jump method */ + tcg_debug_assert(s->tb_jmp_insn_offset == 0); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO, + (uintptr_t)(s->tb_jmp_target_addr + a0)); + tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); tcg_out_nop(s); set_jmp_reset_offset(s, a0); break; @@ -2558,13 +2532,6 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */ } -void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, - uintptr_t jmp_rw, uintptr_t addr) -{ - qatomic_set((uint32_t *)jmp_rw, deposit32(OPC_J, 0, 26, addr >> 2)); - flush_idcache_range(jmp_rx, jmp_rw, 4); -} - typedef struct { DebugFrameHeader h; uint8_t fde_def_cfa[4]; diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h index 3a62055f04..c366fdf74b 100644 --- a/tcg/mips/tcg-target.h +++ b/tcg/mips/tcg-target.h @@ -39,11 +39,7 @@ #define TCG_TARGET_TLB_DISPLACEMENT_BITS 16 #define TCG_TARGET_NB_REGS 32 -/* - * We have a 256MB branch region, but leave room to make sure the - * main executable is also within that region. - */ -#define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB) +#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) typedef enum { TCG_REG_ZERO = 0, @@ -136,7 +132,7 @@ extern bool use_mips32r2_instructions; #define TCG_TARGET_HAS_muluh_i32 1 #define TCG_TARGET_HAS_mulsh_i32 1 #define TCG_TARGET_HAS_bswap32_i32 1 -#define TCG_TARGET_HAS_direct_jump 1 +#define TCG_TARGET_HAS_direct_jump 0 #if TCG_TARGET_REG_BITS == 64 #define TCG_TARGET_HAS_add2_i32 0 @@ -207,7 +203,9 @@ extern bool use_mips32r2_instructions; #define TCG_TARGET_DEFAULT_MO (0) #define TCG_TARGET_HAS_MEMORY_BSWAP 1 -void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t); +/* not defined -- call should be eliminated at compile time */ +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t) + QEMU_ERROR("code path is reachable"); #ifdef CONFIG_SOFTMMU #define TCG_TARGET_NEED_LDST_LABELS diff --git a/tcg/region.c b/tcg/region.c index e64c3ea230..9cc30d4922 100644 --- a/tcg/region.c +++ b/tcg/region.c @@ -467,38 +467,6 @@ static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus) (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) -#ifdef __mips__ -/* - * In order to use J and JAL within the code_gen_buffer, we require - * that the buffer not cross a 256MB boundary. - */ -static inline bool cross_256mb(void *addr, size_t size) -{ - return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; -} - -/* - * We weren't able to allocate a buffer without crossing that boundary, - * so make do with the larger portion of the buffer that doesn't cross. - * Returns the new base and size of the buffer in *obuf and *osize. - */ -static inline void split_cross_256mb(void **obuf, size_t *osize, - void *buf1, size_t size1) -{ - void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); - size_t size2 = buf1 + size1 - buf2; - - size1 = buf2 - buf1; - if (size1 < size2) { - size1 = size2; - buf1 = buf2; - } - - *obuf = buf1; - *osize = size1; -} -#endif - #ifdef USE_STATIC_CODE_GEN_BUFFER static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] __attribute__((aligned(CODE_GEN_ALIGN))); @@ -526,12 +494,6 @@ static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp) size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size); } -#ifdef __mips__ - if (cross_256mb(buf, size)) { - split_cross_256mb(&buf, &size, buf, size); - } -#endif - region.start_aligned = buf; region.total_size = size; @@ -573,39 +535,6 @@ static int alloc_code_gen_buffer_anon(size_t size, int prot, return -1; } -#ifdef __mips__ - if (cross_256mb(buf, size)) { - /* - * Try again, with the original still mapped, to avoid re-acquiring - * the same 256mb crossing. - */ - size_t size2; - void *buf2 = mmap(NULL, size, prot, flags, -1, 0); - switch ((int)(buf2 != MAP_FAILED)) { - case 1: - if (!cross_256mb(buf2, size)) { - /* Success! Use the new buffer. */ - munmap(buf, size); - break; - } - /* Failure. Work with what we had. */ - munmap(buf2, size); - /* fallthru */ - default: - /* Split the original buffer. Free the smaller half. */ - split_cross_256mb(&buf2, &size2, buf, size); - if (buf == buf2) { - munmap(buf + size2, size - size2); - } else { - munmap(buf, size - size2); - } - size = size2; - break; - } - buf = buf2; - } -#endif - region.start_aligned = buf; region.total_size = size; return prot; @@ -620,35 +549,15 @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) void *buf_rw = NULL, *buf_rx = MAP_FAILED; int fd = -1; -#ifdef __mips__ - /* Find space for the RX mapping, vs the 256MiB regions. */ - if (alloc_code_gen_buffer_anon(size, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | - MAP_NORESERVE, errp) < 0) { - return false; - } - /* The size of the mapping may have been adjusted. */ - buf_rx = region.start_aligned; - size = region.total_size; -#endif - buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp); if (buf_rw == NULL) { goto fail; } -#ifdef __mips__ - void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC, - MAP_SHARED | MAP_FIXED, fd, 0); - if (tmp != buf_rx) { - goto fail_rx; - } -#else buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0); if (buf_rx == MAP_FAILED) { goto fail_rx; } -#endif close(fd); region.start_aligned = buf_rw; diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index c16f96b401..dc8d8f1de2 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -1130,10 +1130,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) tcg_out_ext32u(s, base, addr_regl); addr_regl = base; } - - if (guest_base == 0) { - tcg_out_opc_reg(s, OPC_ADD, base, addr_regl, TCG_REG_ZERO); - } else { + if (guest_base != 0) { tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); } tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); @@ -1199,10 +1196,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) tcg_out_ext32u(s, base, addr_regl); addr_regl = base; } - - if (guest_base == 0) { - tcg_out_opc_reg(s, OPC_ADD, base, addr_regl, TCG_REG_ZERO); - } else { + if (guest_base != 0) { tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); } tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc index 688827968b..9720d76abd 100644 --- a/tcg/sparc/tcg-target.c.inc +++ b/tcg/sparc/tcg-target.c.inc @@ -294,12 +294,12 @@ static const int tcg_target_call_oarg_regs[] = { bool use_vis3_instructions; #endif -static inline int check_fit_i64(int64_t val, unsigned int bits) +static bool check_fit_i64(int64_t val, unsigned int bits) { return val == sextract64(val, 0, bits); } -static inline int check_fit_i32(int32_t val, unsigned int bits) +static bool check_fit_i32(int32_t val, unsigned int bits) { return val == sextract32(val, 0, bits); } @@ -362,14 +362,19 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct) } } -static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1, - TCGReg rs2, int op) +static void tcg_out_nop(TCGContext *s) +{ + tcg_out32(s, NOP); +} + +static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1, + TCGReg rs2, int op) { tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2)); } -static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1, - int32_t offset, int op) +static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1, + int32_t offset, int op) { tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset)); } @@ -381,8 +386,7 @@ static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1, | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2))); } -static inline bool tcg_out_mov(TCGContext *s, TCGType type, - TCGReg ret, TCGReg arg) +static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { if (ret != arg) { tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR); @@ -390,12 +394,21 @@ static inline bool tcg_out_mov(TCGContext *s, TCGType type, return true; } -static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg) +static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (ret != arg) { + tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR); + } else { + tcg_out_nop(s); + } +} + +static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg) { tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10)); } -static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg) +static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg) { tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR); } @@ -470,14 +483,14 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, } } -static inline void tcg_out_movi(TCGContext *s, TCGType type, - TCGReg ret, tcg_target_long arg) +static void tcg_out_movi(TCGContext *s, TCGType type, + TCGReg ret, tcg_target_long arg) { tcg_out_movi_int(s, type, ret, arg, false); } -static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1, - TCGReg a2, int op) +static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1, + TCGReg a2, int op) { tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2)); } @@ -494,20 +507,20 @@ static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr, } } -static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, - TCGReg arg1, intptr_t arg2) +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, + TCGReg arg1, intptr_t arg2) { tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX)); } -static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) { tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX)); } -static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, - TCGReg base, intptr_t ofs) +static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, + TCGReg base, intptr_t ofs) { if (val == 0) { tcg_out_st(s, type, TCG_REG_G0, base, ofs); @@ -527,12 +540,12 @@ static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, const void *arg) tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, (uintptr_t)arg & 0x3ff); } -static inline void tcg_out_sety(TCGContext *s, TCGReg rs) +static void tcg_out_sety(TCGContext *s, TCGReg rs) { tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs)); } -static inline void tcg_out_rdy(TCGContext *s, TCGReg rd) +static void tcg_out_rdy(TCGContext *s, TCGReg rd) { tcg_out32(s, RDY | INSN_RD(rd)); } @@ -552,11 +565,6 @@ static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1, uns ? ARITH_UDIV : ARITH_SDIV); } -static inline void tcg_out_nop(TCGContext *s) -{ - tcg_out32(s, NOP); -} - static const uint8_t tcg_cond_to_bcond[] = { [TCG_COND_EQ] = COND_E, [TCG_COND_NE] = COND_NE, @@ -1350,7 +1358,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_goto_ptr: tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL); if (USE_REG_TB) { - tcg_out_arith(s, TCG_REG_TB, a0, TCG_REG_G0, ARITH_OR); + tcg_out_mov_delay(s, TCG_REG_TB, a0); } else { tcg_out_nop(s); }