target-ppc: tlbie/tlbivax should have global effect

tlbie (BookS) and tlbivax (BookE) plus the H_CALLs(pseries) should have
a global effect.

Introduces TLB_NEED_GLOBAL_FLUSH flag. During lazy tlb flush, after
taking care of pending local flushes, check broadcast flush(at context
synchronizing event ptesync/tlbsync, etc) is needed. Depending on the
bitmask state of the tlb_need_flush, tlb is flushed from other cpus if
needed and the flags are cleared.

Suggested-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Nikunj A Dadhania <nikunj@linux.vnet.ibm.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
[dwg: Use 'true' instead of '1' for call to check_tlb_flush()]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
Nikunj A Dadhania 2016-09-20 22:05:01 +05:30 committed by David Gibson
parent e3cffe6fad
commit d76ab5e1c7
6 changed files with 34 additions and 4 deletions

View File

@ -319,6 +319,8 @@ static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
ppc_hash64_store_hpte(cpu, pte_index, ppc_hash64_store_hpte(cpu, pte_index,
(v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0); (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
ppc_hash64_tlb_flush_hpte(cpu, pte_index, v, r); ppc_hash64_tlb_flush_hpte(cpu, pte_index, v, r);
/* Flush the tlb */
check_tlb_flush(env, true);
/* Don't need a memory barrier, due to qemu's global lock */ /* Don't need a memory barrier, due to qemu's global lock */
ppc_hash64_store_hpte(cpu, pte_index, v | HPTE64_V_HPTE_DIRTY, r); ppc_hash64_store_hpte(cpu, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
return H_SUCCESS; return H_SUCCESS;

View File

@ -1010,6 +1010,7 @@ struct CPUPPCState {
bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */ bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */
uint32_t tlb_need_flush; /* Delayed flush needed */ uint32_t tlb_need_flush; /* Delayed flush needed */
#define TLB_NEED_LOCAL_FLUSH 0x1 #define TLB_NEED_LOCAL_FLUSH 0x1
#define TLB_NEED_GLOBAL_FLUSH 0x2
#endif #endif
/* Other registers */ /* Other registers */

View File

@ -161,6 +161,23 @@ static inline void check_tlb_flush(CPUPPCState *env, bool global)
tlb_flush(cs, 1); tlb_flush(cs, 1);
env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
} }
/* Propagate TLB invalidations to other CPUs when the guest uses broadcast
* TLB invalidation instructions.
*/
if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) {
CPUState *other_cs;
CPU_FOREACH(other_cs) {
if (other_cs != cs) {
PowerPCCPU *cpu = POWERPC_CPU(other_cs);
CPUPPCState *other_env = &cpu->env;
other_env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
tlb_flush(other_cs, 1);
}
}
env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
}
} }
#else #else
static inline void check_tlb_flush(CPUPPCState *env, bool global) { } static inline void check_tlb_flush(CPUPPCState *env, bool global) { }

View File

@ -912,7 +912,7 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
* invalidate, and we still don't have a tlb_flush_mask(env, n, * invalidate, and we still don't have a tlb_flush_mask(env, n,
* mask) in QEMU, we just invalidate all TLBs * mask) in QEMU, we just invalidate all TLBs
*/ */
tlb_flush(CPU(cpu), 1); cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
} }
void ppc_hash64_update_rmls(CPUPPCState *env) void ppc_hash64_update_rmls(CPUPPCState *env)

View File

@ -2757,7 +2757,7 @@ static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address) void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
{ {
PowerPCCPU *cpu = ppc_env_get_cpu(env); CPUState *cs;
if (address & 0x4) { if (address & 0x4) {
/* flush all entries */ /* flush all entries */
@ -2774,11 +2774,15 @@ void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
if (address & 0x8) { if (address & 0x8) {
/* flush TLB1 entries */ /* flush TLB1 entries */
booke206_invalidate_ea_tlb(env, 1, address); booke206_invalidate_ea_tlb(env, 1, address);
tlb_flush(CPU(cpu), 1); CPU_FOREACH(cs) {
tlb_flush(cs, 1);
}
} else { } else {
/* flush TLB0 entries */ /* flush TLB0 entries */
booke206_invalidate_ea_tlb(env, 0, address); booke206_invalidate_ea_tlb(env, 0, address);
tlb_flush_page(CPU(cpu), address & MAS2_EPN_MASK); CPU_FOREACH(cs) {
tlb_flush_page(cs, address & MAS2_EPN_MASK);
}
} }
} }

View File

@ -4441,6 +4441,7 @@ static void gen_tlbie(DisasContext *ctx)
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
GEN_PRIV; GEN_PRIV;
#else #else
TCGv_i32 t1;
CHK_HV; CHK_HV;
if (NARROW_MODE(ctx)) { if (NARROW_MODE(ctx)) {
@ -4451,6 +4452,11 @@ static void gen_tlbie(DisasContext *ctx)
} else { } else {
gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
} }
t1 = tcg_temp_new_i32();
tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH);
tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
tcg_temp_free_i32(t1);
#endif /* defined(CONFIG_USER_ONLY) */ #endif /* defined(CONFIG_USER_ONLY) */
} }