tcg: Merge GETPC and GETRA
The return address argument to the softmmu template helpers was confused. In the legacy case, we wanted to indicate that there is no return address, and so passed in NULL. However, we then immediately subtracted GETPC_ADJ from NULL, resulting in a non-zero value, indicating the presence of an (invalid) return address. Push the GETPC_ADJ subtraction down to the only point it's required: immediately before use within cpu_restore_state_from_tb, after all NULL pointer checks have been completed. This makes GETPC and GETRA identical. Remove GETRA as the lesser used macro, replacing all uses with GETPC. Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
parent
85aa80813d
commit
01ecaf438b
6
cputlb.c
6
cputlb.c
@ -543,10 +543,8 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
||||
#undef MMUSUFFIX
|
||||
|
||||
#define MMUSUFFIX _cmmu
|
||||
#undef GETPC_ADJ
|
||||
#define GETPC_ADJ 0
|
||||
#undef GETRA
|
||||
#define GETRA() ((uintptr_t)0)
|
||||
#undef GETPC
|
||||
#define GETPC() ((uintptr_t)0)
|
||||
#define SOFTMMU_CODE_ACCESS
|
||||
|
||||
#define SHIFT 0
|
||||
|
@ -349,13 +349,12 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
|
||||
tb_next->jmp_list_first = (uintptr_t)tb | n;
|
||||
}
|
||||
|
||||
/* GETRA is the true target of the return instruction that we'll execute,
|
||||
defined here for simplicity of defining the follow-up macros. */
|
||||
/* GETPC is the true target of the return instruction that we'll execute. */
|
||||
#if defined(CONFIG_TCG_INTERPRETER)
|
||||
extern uintptr_t tci_tb_ptr;
|
||||
# define GETRA() tci_tb_ptr
|
||||
# define GETPC() tci_tb_ptr
|
||||
#else
|
||||
# define GETRA() \
|
||||
# define GETPC() \
|
||||
((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
|
||||
#endif
|
||||
|
||||
@ -368,8 +367,6 @@ extern uintptr_t tci_tb_ptr;
|
||||
smaller than 4 bytes, so we don't worry about special-casing this. */
|
||||
#define GETPC_ADJ 2
|
||||
|
||||
#define GETPC() (GETRA() - GETPC_ADJ)
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
struct MemoryRegion *iotlb_to_region(CPUState *cpu,
|
||||
|
@ -150,9 +150,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
||||
uintptr_t haddr;
|
||||
DATA_TYPE res;
|
||||
|
||||
/* Adjust the given return address. */
|
||||
retaddr -= GETPC_ADJ;
|
||||
|
||||
if (addr & ((1 << a_bits) - 1)) {
|
||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||
mmu_idx, retaddr);
|
||||
@ -193,10 +190,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
||||
do_unaligned_access:
|
||||
addr1 = addr & ~(DATA_SIZE - 1);
|
||||
addr2 = addr1 + DATA_SIZE;
|
||||
/* Note the adjustment at the beginning of the function.
|
||||
Undo that for the recursion. */
|
||||
res1 = helper_le_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
|
||||
res2 = helper_le_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
|
||||
res1 = helper_le_ld_name(env, addr1, oi, retaddr);
|
||||
res2 = helper_le_ld_name(env, addr2, oi, retaddr);
|
||||
shift = (addr & (DATA_SIZE - 1)) * 8;
|
||||
|
||||
/* Little-endian combine. */
|
||||
@ -224,9 +219,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
||||
uintptr_t haddr;
|
||||
DATA_TYPE res;
|
||||
|
||||
/* Adjust the given return address. */
|
||||
retaddr -= GETPC_ADJ;
|
||||
|
||||
if (addr & ((1 << a_bits) - 1)) {
|
||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||
mmu_idx, retaddr);
|
||||
@ -267,10 +259,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
||||
do_unaligned_access:
|
||||
addr1 = addr & ~(DATA_SIZE - 1);
|
||||
addr2 = addr1 + DATA_SIZE;
|
||||
/* Note the adjustment at the beginning of the function.
|
||||
Undo that for the recursion. */
|
||||
res1 = helper_be_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
|
||||
res2 = helper_be_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
|
||||
res1 = helper_be_ld_name(env, addr1, oi, retaddr);
|
||||
res2 = helper_be_ld_name(env, addr2, oi, retaddr);
|
||||
shift = (addr & (DATA_SIZE - 1)) * 8;
|
||||
|
||||
/* Big-endian combine. */
|
||||
@ -334,9 +324,6 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||
unsigned a_bits = get_alignment_bits(get_memop(oi));
|
||||
uintptr_t haddr;
|
||||
|
||||
/* Adjust the given return address. */
|
||||
retaddr -= GETPC_ADJ;
|
||||
|
||||
if (addr & ((1 << a_bits) - 1)) {
|
||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
||||
mmu_idx, retaddr);
|
||||
@ -391,10 +378,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||
for (i = 0; i < DATA_SIZE; ++i) {
|
||||
/* Little-endian extract. */
|
||||
uint8_t val8 = val >> (i * 8);
|
||||
/* Note the adjustment at the beginning of the function.
|
||||
Undo that for the recursion. */
|
||||
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
|
||||
oi, retaddr + GETPC_ADJ);
|
||||
oi, retaddr);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -417,9 +402,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||
unsigned a_bits = get_alignment_bits(get_memop(oi));
|
||||
uintptr_t haddr;
|
||||
|
||||
/* Adjust the given return address. */
|
||||
retaddr -= GETPC_ADJ;
|
||||
|
||||
if (addr & ((1 << a_bits) - 1)) {
|
||||
cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
|
||||
mmu_idx, retaddr);
|
||||
@ -474,10 +456,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||
for (i = 0; i < DATA_SIZE; ++i) {
|
||||
/* Big-endian extract. */
|
||||
uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
|
||||
/* Note the adjustment at the beginning of the function.
|
||||
Undo that for the recursion. */
|
||||
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
|
||||
oi, retaddr + GETPC_ADJ);
|
||||
oi, retaddr);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -8310,12 +8310,12 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
|
||||
* this purpose use the actual register value passed to us
|
||||
* so that we get the fault address right.
|
||||
*/
|
||||
helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETRA());
|
||||
helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
|
||||
/* Now we can populate the other TLB entries, if any */
|
||||
for (i = 0; i < maxidx; i++) {
|
||||
uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
|
||||
if (va != (vaddr_in & TARGET_PAGE_MASK)) {
|
||||
helper_ret_stb_mmu(env, va, 0, oi, GETRA());
|
||||
helper_ret_stb_mmu(env, va, 0, oi, GETPC());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -8332,7 +8332,7 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
|
||||
* bounce buffer was in use
|
||||
*/
|
||||
for (i = 0; i < blocklen; i++) {
|
||||
helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETRA());
|
||||
helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
@ -4122,10 +4122,10 @@ void helper_msa_ld_ ## TYPE(CPUMIPSState *env, uint32_t wd, \
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
MSA_LD_DF(DF_BYTE, b, helper_ret_ldub_mmu, oi, GETRA())
|
||||
MSA_LD_DF(DF_HALF, h, helper_ret_lduw_mmu, oi, GETRA())
|
||||
MSA_LD_DF(DF_WORD, w, helper_ret_ldul_mmu, oi, GETRA())
|
||||
MSA_LD_DF(DF_DOUBLE, d, helper_ret_ldq_mmu, oi, GETRA())
|
||||
MSA_LD_DF(DF_BYTE, b, helper_ret_ldub_mmu, oi, GETPC())
|
||||
MSA_LD_DF(DF_HALF, h, helper_ret_lduw_mmu, oi, GETPC())
|
||||
MSA_LD_DF(DF_WORD, w, helper_ret_ldul_mmu, oi, GETPC())
|
||||
MSA_LD_DF(DF_DOUBLE, d, helper_ret_ldq_mmu, oi, GETPC())
|
||||
#else
|
||||
MSA_LD_DF(DF_BYTE, b, cpu_ldub_data)
|
||||
MSA_LD_DF(DF_HALF, h, cpu_lduw_data)
|
||||
@ -4161,17 +4161,17 @@ void helper_msa_st_ ## TYPE(CPUMIPSState *env, uint32_t wd, \
|
||||
int mmu_idx = cpu_mmu_index(env, false); \
|
||||
int i; \
|
||||
MEMOP_IDX(DF) \
|
||||
ensure_writable_pages(env, addr, mmu_idx, GETRA()); \
|
||||
ensure_writable_pages(env, addr, mmu_idx, GETPC()); \
|
||||
for (i = 0; i < DF_ELEMENTS(DF); i++) { \
|
||||
ST_INSN(env, addr + (i << DF), pwd->TYPE[i], ##__VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
MSA_ST_DF(DF_BYTE, b, helper_ret_stb_mmu, oi, GETRA())
|
||||
MSA_ST_DF(DF_HALF, h, helper_ret_stw_mmu, oi, GETRA())
|
||||
MSA_ST_DF(DF_WORD, w, helper_ret_stl_mmu, oi, GETRA())
|
||||
MSA_ST_DF(DF_DOUBLE, d, helper_ret_stq_mmu, oi, GETRA())
|
||||
MSA_ST_DF(DF_BYTE, b, helper_ret_stb_mmu, oi, GETPC())
|
||||
MSA_ST_DF(DF_HALF, h, helper_ret_stw_mmu, oi, GETPC())
|
||||
MSA_ST_DF(DF_WORD, w, helper_ret_stl_mmu, oi, GETPC())
|
||||
MSA_ST_DF(DF_DOUBLE, d, helper_ret_stq_mmu, oi, GETPC())
|
||||
#else
|
||||
MSA_ST_DF(DF_BYTE, b, cpu_stb_data)
|
||||
MSA_ST_DF(DF_HALF, h, cpu_stw_data)
|
||||
|
@ -260,6 +260,8 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
int64_t ti = profile_getclock();
|
||||
#endif
|
||||
|
||||
searched_pc -= GETPC_ADJ;
|
||||
|
||||
if (searched_pc < host_pc) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -105,8 +105,11 @@ static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
|
||||
if (ret == 0) {
|
||||
return 1; /* the MMU fault was handled without causing real CPU fault */
|
||||
}
|
||||
/* now we have a real cpu fault */
|
||||
cpu_restore_state(cpu, pc);
|
||||
|
||||
/* Now we have a real cpu fault. Since this is the exact location of
|
||||
* the exception, we must undo the adjustment done by cpu_restore_state
|
||||
* for handling call return addresses. */
|
||||
cpu_restore_state(cpu, pc + GETPC_ADJ);
|
||||
|
||||
sigprocmask(SIG_SETMASK, old_set, NULL);
|
||||
cpu_loop_exit(cpu);
|
||||
|
Loading…
Reference in New Issue
Block a user