exec: drop cpu_can_do_io, just read cpu->can_do_io

After commit 626cf8f (icount: set can_do_io outside TB execution,
2014-12-08), can_do_io is set to 1 if not executing code.  It is
no longer necessary to make this assumption in cpu_can_do_io.

It is also possible to remove the use_icount test, simply by
never setting cpu->can_do_io to 0 unless use_icount is true.

With these changes cpu_can_do_io boils down to a read of
cpu->can_do_io.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2015-06-24 14:16:26 +02:00
parent b4a4b8d0e0
commit 414b15c909
7 changed files with 10 additions and 28 deletions

View File

@ -196,7 +196,7 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
}
#endif /* DEBUG_DISAS */
cpu->can_do_io = 0;
cpu->can_do_io = !use_icount;
next_tb = tcg_qemu_tb_exec(env, tb_ptr);
cpu->can_do_io = 1;
trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),

2
cpus.c
View File

@ -145,7 +145,7 @@ int64_t cpu_get_icount_raw(void)
icount = timers_state.qemu_icount;
if (cpu) {
if (!cpu_can_do_io(cpu)) {
if (!cpu->can_do_io) {
fprintf(stderr, "Bad icount read\n");
exit(1);
}

View File

@ -346,27 +346,6 @@ extern int singlestep;
/* cpu-exec.c */
extern volatile sig_atomic_t exit_request;
/**
* cpu_can_do_io:
* @cpu: The CPU for which to check IO.
*
* Deterministic execution requires that IO only be performed on the last
* instruction of a TB so that interrupts take effect immediately.
*
* Returns: %true if memory-mapped IO is safe, %false otherwise.
*/
static inline bool cpu_can_do_io(CPUState *cpu)
{
if (!use_icount) {
return true;
}
/* If not executing code then assume we are ok. */
if (cpu->current_tb == NULL) {
return true;
}
return cpu->can_do_io != 0;
}
#if !defined(CONFIG_USER_ONLY)
void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
#endif

View File

@ -231,7 +231,9 @@ struct kvm_run;
* @icount_decr: Number of cycles left, with interrupt flag in high bit.
* This allows a single read-compare-cbranch-write sequence to test
* for both decrementer underflow and exceptions.
* @can_do_io: Nonzero if memory-mapped IO is safe.
* @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
* requires that IO only be performed on the last instruction of a TB
* so that interrupts take effect immediately.
* @env_ptr: Pointer to subclass-specific CPUArchState field.
* @current_tb: Currently executing TB.
* @gdb_regs: Additional GDB registers.

View File

@ -247,7 +247,7 @@ static void cpu_common_reset(CPUState *cpu)
cpu->mem_io_vaddr = 0;
cpu->icount_extra = 0;
cpu->icount_decr.u32 = 0;
cpu->can_do_io = 0;
cpu->can_do_io = 1;
cpu->exception_index = -1;
memset(cpu->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
}

View File

@ -154,7 +154,7 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
cpu->mem_io_pc = retaddr;
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
cpu_io_recompile(cpu, retaddr);
}
@ -374,7 +374,7 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
cpu_io_recompile(cpu, retaddr);
}

View File

@ -222,6 +222,7 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
gen_intermediate_code_pc(env, tb);
if (tb->cflags & CF_USE_ICOUNT) {
assert(use_icount);
/* Reset the cycle counter to the start of the block. */
cpu->icount_decr.u16.low += tb->icount;
/* Clear the IO flag. */
@ -1470,7 +1471,7 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
if (use_icount) {
cpu->icount_decr.u16.high = 0xffff;
if (!cpu_can_do_io(cpu)
if (!cpu->can_do_io
&& (mask & ~old_mask) != 0) {
cpu_abort(cpu, "Raised interrupt while not in I/O function");
}