target/ppc: Fix broadcast tlbie synchronisation
With mttcg, broadcast tlbie instructions do not wait until other vCPUs
have been kicked out of TCG execution before they complete (including
necessary subsequent tlbsync, etc., instructions). This is contrary to
the ISA, and it permits other vCPUs to use translations after the TLB
flush. For example:
CPU0
// *memP is initially 0, memV maps to memP with *pte
*pte = 0;
ptesync ; tlbie ; eieio ; tlbsync ; ptesync
*memP = 1;
CPU1
assert(*memV == 0);
It is possible for the assertion to fail because CPU1 translates memV
using the TLB after CPU0 has stored 1 to the underlying memory. This
race was observed with a careful test case where CPU1 checks run in a
very large expensive TB so it can run for the entire CPU0 period between
clearing the pte and storing the memory, but host vCPU thread preemption
could cause the race to hit anywhere.
As explained in commit 4ddc104689
("target/ppc: Fix tlbie"), it is not
enough to just use tlb_flush_all_cpus_synced(), because that does not
execute until the calling CPU has finished its TB. It is also required
that the TB is ended at the point where the TLB flush must subsequently
take effect.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
This commit is contained in:
parent
95912ce1eb
commit
82676f1fc4
@ -334,7 +334,7 @@ void check_tlb_flush(CPUPPCState *env, bool global)
|
|||||||
if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) {
|
if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) {
|
||||||
env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
|
env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
|
||||||
env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
|
env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
|
||||||
tlb_flush_all_cpus(cs);
|
tlb_flush_all_cpus_synced(cs);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -534,7 +534,7 @@ void helper_tlbie_isa300(CPUPPCState *env, target_ulong rb, target_ulong rs,
|
|||||||
if (local) {
|
if (local) {
|
||||||
tlb_flush_page(env_cpu(env), addr);
|
tlb_flush_page(env_cpu(env), addr);
|
||||||
} else {
|
} else {
|
||||||
tlb_flush_page_all_cpus(env_cpu(env), addr);
|
tlb_flush_page_all_cpus_synced(env_cpu(env), addr);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -3494,6 +3494,13 @@ static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
|
|||||||
gen_helper_check_tlb_flush_local(tcg_env);
|
gen_helper_check_tlb_flush_local(tcg_env);
|
||||||
}
|
}
|
||||||
gen_set_label(l);
|
gen_set_label(l);
|
||||||
|
if (global) {
|
||||||
|
/*
|
||||||
|
* Global TLB flush uses async-work which must run before the
|
||||||
|
* next instruction, so this must be the last in the TB.
|
||||||
|
*/
|
||||||
|
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
|
static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
|
||||||
|
@ -224,6 +224,13 @@ static bool do_tlbie(DisasContext *ctx, arg_X_tlbie *a, bool local)
|
|||||||
a->prs << TLBIE_F_PRS_SHIFT |
|
a->prs << TLBIE_F_PRS_SHIFT |
|
||||||
a->r << TLBIE_F_R_SHIFT |
|
a->r << TLBIE_F_R_SHIFT |
|
||||||
local << TLBIE_F_LOCAL_SHIFT));
|
local << TLBIE_F_LOCAL_SHIFT));
|
||||||
|
if (!local) {
|
||||||
|
/*
|
||||||
|
* Global TLB flush uses async-work which must run before the
|
||||||
|
* next instruction, so this must be the last in the TB.
|
||||||
|
*/
|
||||||
|
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user