Spelling fixes, spotted by Stuart Brady.
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4809 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
parent
b3c7724cbc
commit
bf20dc076b
@ -620,7 +620,7 @@ int cpu_exec(CPUState *env1)
|
||||
next_tb = tcg_qemu_tb_exec(tc_ptr);
|
||||
env->current_tb = NULL;
|
||||
if ((next_tb & 3) == 2) {
|
||||
/* Instruction counter exired. */
|
||||
/* Instruction counter expired. */
|
||||
int insns_left;
|
||||
tb = (TranslationBlock *)(long)(next_tb & ~3);
|
||||
/* Restore PC. */
|
||||
|
@ -372,7 +372,7 @@ static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
|
||||
return addr + env1->tlb_table[mmu_idx][page_index].addend - (unsigned long)phys_ram_base;
|
||||
}
|
||||
|
||||
/* Deterministic execution requires that IO only be performaed on the last
|
||||
/* Deterministic execution requires that IO only be performed on the last
|
||||
instruction of a TB so that interrupts take effect immediately. */
|
||||
static inline int can_do_io(CPUState *env)
|
||||
{
|
||||
|
12
exec.c
12
exec.c
@ -109,7 +109,7 @@ CPUState *first_cpu;
|
||||
cpu_exec() */
|
||||
CPUState *cpu_single_env;
|
||||
/* 0 = Do not count executed instructions.
|
||||
1 = Precice instruction counting.
|
||||
1 = Precise instruction counting.
|
||||
2 = Adaptive rate instruction counting. */
|
||||
int use_icount = 0;
|
||||
/* Current instruction counter. While executing translated code this may
|
||||
@ -1080,7 +1080,7 @@ TranslationBlock *tb_alloc(target_ulong pc)
|
||||
|
||||
void tb_free(TranslationBlock *tb)
|
||||
{
|
||||
/* In practice this is mostly used for single use temorary TB
|
||||
/* In practice this is mostly used for single use temporary TB
|
||||
Ignore the hard cases and just back up if this TB happens to
|
||||
be the last one generated. */
|
||||
if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
|
||||
@ -1394,7 +1394,7 @@ void cpu_interrupt(CPUState *env, int mask)
|
||||
|
||||
old_mask = env->interrupt_request;
|
||||
/* FIXME: This is probably not threadsafe. A different thread could
|
||||
be in the mittle of a read-modify-write operation. */
|
||||
be in the middle of a read-modify-write operation. */
|
||||
env->interrupt_request |= mask;
|
||||
#if defined(USE_NPTL)
|
||||
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
|
||||
@ -3019,13 +3019,13 @@ void cpu_io_recompile(CPUState *env, void *retaddr)
|
||||
n = env->icount_decr.u16.low + tb->icount;
|
||||
cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
|
||||
/* Calculate how many instructions had been executed before the fault
|
||||
occured. */
|
||||
occurred. */
|
||||
n = n - env->icount_decr.u16.low;
|
||||
/* Generate a new TB ending on the I/O insn. */
|
||||
n++;
|
||||
/* On MIPS and SH, delay slot instructions can only be restarted if
|
||||
they were already the first instruction in the TB. If this is not
|
||||
the first instruction in a TB then re-execute the preceeding
|
||||
the first instruction in a TB then re-execute the preceding
|
||||
branch. */
|
||||
#if defined(TARGET_MIPS)
|
||||
if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
|
||||
@ -3053,7 +3053,7 @@ void cpu_io_recompile(CPUState *env, void *retaddr)
|
||||
/* FIXME: In theory this could raise an exception. In practice
|
||||
we have already translated the block once so it's probably ok. */
|
||||
tb_gen_code(env, pc, cs_base, flags, cflags);
|
||||
/* TODO: If env->pc != tb->pc (i.e. the failuting instruction was not
|
||||
/* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
|
||||
the first in the TB) then we end up generating a whole new TB and
|
||||
repeating the fault, which is horribly inefficient.
|
||||
Better would be to execute just this insn uncached, or generate a
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Helpewrs for instruction counting code genration. */
|
||||
/* Helpers for instruction counting code generation. */
|
||||
|
||||
static TCGArg *icount_arg;
|
||||
static int icount_label;
|
||||
|
@ -8684,7 +8684,7 @@ static inline int gen_intermediate_code_internal(CPUState *env,
|
||||
/* Translation stops when a conditional branch is enoutered.
|
||||
* Otherwise the subsequent code could get translated several times.
|
||||
* Also stop translation when a page boundary is reached. This
|
||||
* ensures prefech aborts occur at the right place. */
|
||||
* ensures prefetch aborts occur at the right place. */
|
||||
num_insns ++;
|
||||
} while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
|
||||
!env->singlestep_enabled &&
|
||||
|
@ -3141,7 +3141,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
|
||||
|
||||
num_insns++;
|
||||
/* Check for delayed branches here. If we do it before
|
||||
actually genereating any host code, the simulator will just
|
||||
actually generating any host code, the simulator will just
|
||||
loop doing nothing for on this program location. */
|
||||
if (dc->delayed_branch) {
|
||||
dc->delayed_branch--;
|
||||
|
@ -2980,7 +2980,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
|
||||
num_insns++;
|
||||
|
||||
/* Terminate the TB on memory ops if watchpoints are present. */
|
||||
/* FIXME: This should be replacd by the deterministic execution
|
||||
/* FIXME: This should be replaced by the deterministic execution
|
||||
* IRQ raising bits. */
|
||||
if (dc->is_mem && env->nb_watchpoints)
|
||||
break;
|
||||
|
@ -3998,7 +3998,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int se
|
||||
rn, reg, sel);
|
||||
}
|
||||
#endif
|
||||
/* For simplicitly assume that all writes can cause interrupts. */
|
||||
/* For simplicity assume that all writes can cause interrupts. */
|
||||
if (use_icount) {
|
||||
gen_io_end();
|
||||
ctx->bstate = BS_STOP;
|
||||
@ -5170,7 +5170,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int s
|
||||
}
|
||||
#endif
|
||||
tcg_temp_free(t0);
|
||||
/* For simplicitly assume that all writes can cause interrupts. */
|
||||
/* For simplicity assume that all writes can cause interrupts. */
|
||||
if (use_icount) {
|
||||
gen_io_end();
|
||||
ctx->bstate = BS_STOP;
|
||||
|
8
vl.c
8
vl.c
@ -239,9 +239,9 @@ struct drive_opt {
|
||||
static CPUState *cur_cpu;
|
||||
static CPUState *next_cpu;
|
||||
static int event_pending = 1;
|
||||
/* Conversion factor from emulated instrctions to virtual clock ticks. */
|
||||
/* Conversion factor from emulated instructions to virtual clock ticks. */
|
||||
static int icount_time_shift;
|
||||
/* Arbitrarily pick 1MIPS as the minimum alowable speed. */
|
||||
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
|
||||
#define MAX_ICOUNT_SHIFT 10
|
||||
/* Compensate for varying guest execution speed. */
|
||||
static int64_t qemu_icount_bias;
|
||||
@ -903,7 +903,7 @@ static void rtc_stop_timer(struct qemu_alarm_timer *t);
|
||||
#endif /* _WIN32 */
|
||||
|
||||
/* Correlation between real and virtual time is always going to be
|
||||
farly approximate, so ignore small variation.
|
||||
fairly approximate, so ignore small variation.
|
||||
When the guest is idle real and virtual time will be aligned in
|
||||
the IO wait loop. */
|
||||
#define ICOUNT_WOBBLE (QEMU_TIMER_BASE / 10)
|
||||
@ -7262,7 +7262,7 @@ static int main_loop(void)
|
||||
if (use_icount == 1) {
|
||||
/* When not using an adaptive execution frequency
|
||||
we tend to get badly out of sync with real time,
|
||||
so just delay for a resonable amount of time. */
|
||||
so just delay for a reasonable amount of time. */
|
||||
delta = 0;
|
||||
} else {
|
||||
delta = cpu_get_icount() - cpu_get_clock();
|
||||
|
Loading…
Reference in New Issue
Block a user