Assorted tlb and tb caching fixes

-----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJbOky7AAoJEGTfOOivfiFfvYEH/iDRoHaTo+HOClIqrHY+yTr9
 39JrMbvRpJ0+TwhzWHvA8Ukuof2DpUFYNpx9F8zIy4HEVG8Pl9VX4ntK121WIOvb
 Cf7/gR4M6PW9TnV1NDe4cWeVVUlg2WuY81vJBFKaIRbh6/m3OnAxL+ZnKYHO7OLs
 mmxXI76kX9wAicOTsObx19Tb1XOlAqyzxdVb8HrrEK488iigVuJ3W1l+pQEEZMdF
 CICXVglTBCACnBZ1nG7vCY0UVkf4c8rOM+c8f+4ktkYl2GcNgkWLMjbVYf3rsozH
 5iUfCBqNbRQ5xZBVTSD/efTLbxQ7wCMCwfDmwvy/71Pi/vwxaIHEtdWxCofv0p8=
 =XQ94
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20180702' into staging

Assorted tlb and tb caching fixes

# gpg: Signature made Mon 02 Jul 2018 17:03:07 BST
# gpg:                using RSA key 64DF38E8AF7E215F
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>"
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth/tags/pull-tcg-20180702:
  cpu: Assert asidx_from_attrs return value in range
  accel/tcg: Avoid caching overwritten tlb entries
  accel/tcg: Don't treat invalid TLB entries as needing recheck
  accel/tcg: Correct "is this a TLB miss" check in get_page_addr_code()
  tcg: Define and use new tlb_hit() and tlb_hit_page() functions
  translate-all: fix locking of TBs whose two pages share the same physical page

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2018-07-02 17:57:46 +01:00
commit ab08440a4e
6 changed files with 100 additions and 58 deletions

View File

@ -235,20 +235,30 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
} }
static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
target_ulong page)
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
{ {
if (addr == (tlb_entry->addr_read & return tlb_hit_page(tlb_entry->addr_read, page) ||
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) || tlb_hit_page(tlb_entry->addr_write, page) ||
addr == (tlb_entry->addr_write & tlb_hit_page(tlb_entry->addr_code, page);
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) || }
addr == (tlb_entry->addr_code &
(TARGET_PAGE_MASK | TLB_INVALID_MASK))) { static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong page)
{
if (tlb_hit_page_anyprot(tlb_entry, page)) {
memset(tlb_entry, -1, sizeof(*tlb_entry)); memset(tlb_entry, -1, sizeof(*tlb_entry));
} }
} }
static inline void tlb_flush_vtlb_page(CPUArchState *env, int mmu_idx,
target_ulong page)
{
int k;
for (k = 0; k < CPU_VTLB_SIZE; k++) {
tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], page);
}
}
static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data) static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
{ {
CPUArchState *env = cpu->env_ptr; CPUArchState *env = cpu->env_ptr;
@ -274,14 +284,7 @@ static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
} tlb_flush_vtlb_page(env, mmu_idx, addr);
/* check whether there are entries that need to be flushed in the vtlb */
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
int k;
for (k = 0; k < CPU_VTLB_SIZE; k++) {
tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
}
} }
tb_flush_jmp_cache(cpu, addr); tb_flush_jmp_cache(cpu, addr);
@ -313,7 +316,6 @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
int mmu_idx; int mmu_idx;
int i;
assert_cpu_is_self(cpu); assert_cpu_is_self(cpu);
@ -323,11 +325,7 @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
if (test_bit(mmu_idx, &mmu_idx_bitmap)) { if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr); tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
tlb_flush_vtlb_page(env, mmu_idx, addr);
/* check whether there are vltb entries that need to be flushed */
for (i = 0; i < CPU_VTLB_SIZE; i++) {
tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr);
}
} }
} }
@ -612,10 +610,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
target_ulong address; target_ulong address;
target_ulong code_address; target_ulong code_address;
uintptr_t addend; uintptr_t addend;
CPUTLBEntry *te, *tv, tn; CPUTLBEntry *te, tn;
hwaddr iotlb, xlat, sz, paddr_page; hwaddr iotlb, xlat, sz, paddr_page;
target_ulong vaddr_page; target_ulong vaddr_page;
unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
int asidx = cpu_asidx_from_attrs(cpu, attrs); int asidx = cpu_asidx_from_attrs(cpu, attrs);
assert_cpu_is_self(cpu); assert_cpu_is_self(cpu);
@ -657,19 +654,28 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
} }
/* Make sure there's no cached translation for the new page. */
tlb_flush_vtlb_page(env, mmu_idx, vaddr_page);
code_address = address; code_address = address;
iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page, iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
paddr_page, xlat, prot, &address); paddr_page, xlat, prot, &address);
index = (vaddr_page >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); index = (vaddr_page >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
te = &env->tlb_table[mmu_idx][index]; te = &env->tlb_table[mmu_idx][index];
/* do not discard the translation in te, evict it into a victim tlb */
tv = &env->tlb_v_table[mmu_idx][vidx];
/* addr_write can race with tlb_reset_dirty_range */ /*
* Only evict the old entry to the victim tlb if it's for a
* different page; otherwise just overwrite the stale data.
*/
if (!tlb_hit_page_anyprot(te, vaddr_page)) {
unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx];
/* Evict the old entry into the victim tlb. */
copy_tlb_helper(tv, te, true); copy_tlb_helper(tv, te, true);
env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
}
/* refill the tlb */ /* refill the tlb */
/* /*
@ -960,14 +966,14 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = cpu_mmu_index(env, true); mmu_idx = cpu_mmu_index(env, true);
if (unlikely(env->tlb_table[mmu_idx][index].addr_code != if (unlikely(!tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr))) {
(addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)))) {
if (!VICTIM_TLB_HIT(addr_read, addr)) { if (!VICTIM_TLB_HIT(addr_read, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
} }
} }
if (unlikely(env->tlb_table[mmu_idx][index].addr_code & TLB_RECHECK)) { if (unlikely((env->tlb_table[mmu_idx][index].addr_code &
(TLB_RECHECK | TLB_INVALID_MASK)) == TLB_RECHECK)) {
/* /*
* This is a TLB_RECHECK access, where the MMU protection * This is a TLB_RECHECK access, where the MMU protection
* covers a smaller range than a target page, and we must * covers a smaller range than a target page, and we must
@ -1046,8 +1052,7 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
if ((addr & TARGET_PAGE_MASK) if (!tlb_hit(tlb_addr, addr)) {
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
/* TLB entry is for a different page */ /* TLB entry is for a different page */
if (!VICTIM_TLB_HIT(addr_write, addr)) { if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
@ -1091,8 +1096,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
} }
/* Check TLB entry and enforce page permissions. */ /* Check TLB entry and enforce page permissions. */
if ((addr & TARGET_PAGE_MASK) if (!tlb_hit(tlb_addr, addr)) {
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(addr_write, addr)) { if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE, tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
mmu_idx, retaddr); mmu_idx, retaddr);

View File

@ -123,8 +123,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
} }
/* If the TLB entry is for a different page, reload and try again. */ /* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK) if (!tlb_hit(tlb_addr, addr)) {
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE, tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
mmu_idx, retaddr); mmu_idx, retaddr);
@ -191,8 +190,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
} }
/* If the TLB entry is for a different page, reload and try again. */ /* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK) if (!tlb_hit(tlb_addr, addr)) {
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE, tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
mmu_idx, retaddr); mmu_idx, retaddr);
@ -286,8 +284,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
} }
/* If the TLB entry is for a different page, reload and try again. */ /* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK) if (!tlb_hit(tlb_addr, addr)) {
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(addr_write, addr)) { if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE, tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr); mmu_idx, retaddr);
@ -322,7 +319,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) if (!tlb_hit_page(tlb_addr2, page2)
&& !VICTIM_TLB_HIT(addr_write, page2)) { && !VICTIM_TLB_HIT(addr_write, page2)) {
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE, tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr); mmu_idx, retaddr);
@ -364,8 +361,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
} }
/* If the TLB entry is for a different page, reload and try again. */ /* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK) if (!tlb_hit(tlb_addr, addr)) {
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(addr_write, addr)) { if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE, tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr); mmu_idx, retaddr);
@ -400,7 +396,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) if (!tlb_hit_page(tlb_addr2, page2)
&& !VICTIM_TLB_HIT(addr_write, page2)) { && !VICTIM_TLB_HIT(addr_write, page2)) {
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE, tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr); mmu_idx, retaddr);

View File

@ -669,9 +669,15 @@ static inline void page_lock_tb(const TranslationBlock *tb)
static inline void page_unlock_tb(const TranslationBlock *tb) static inline void page_unlock_tb(const TranslationBlock *tb)
{ {
page_unlock(page_find(tb->page_addr[0] >> TARGET_PAGE_BITS)); PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
page_unlock(p1);
if (unlikely(tb->page_addr[1] != -1)) { if (unlikely(tb->page_addr[1] != -1)) {
page_unlock(page_find(tb->page_addr[1] >> TARGET_PAGE_BITS)); PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
if (p2 != p1) {
page_unlock(p2);
}
} }
} }
@ -850,22 +856,34 @@ static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
PageDesc **ret_p2, tb_page_addr_t phys2, int alloc) PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
{ {
PageDesc *p1, *p2; PageDesc *p1, *p2;
tb_page_addr_t page1;
tb_page_addr_t page2;
assert_memory_lock(); assert_memory_lock();
g_assert(phys1 != -1 && phys1 != phys2); g_assert(phys1 != -1);
p1 = page_find_alloc(phys1 >> TARGET_PAGE_BITS, alloc);
page1 = phys1 >> TARGET_PAGE_BITS;
page2 = phys2 >> TARGET_PAGE_BITS;
p1 = page_find_alloc(page1, alloc);
if (ret_p1) { if (ret_p1) {
*ret_p1 = p1; *ret_p1 = p1;
} }
if (likely(phys2 == -1)) { if (likely(phys2 == -1)) {
page_lock(p1); page_lock(p1);
return; return;
} else if (page1 == page2) {
page_lock(p1);
if (ret_p2) {
*ret_p2 = p1;
} }
p2 = page_find_alloc(phys2 >> TARGET_PAGE_BITS, alloc); return;
}
p2 = page_find_alloc(page2, alloc);
if (ret_p2) { if (ret_p2) {
*ret_p2 = p2; *ret_p2 = p2;
} }
if (phys1 < phys2) { if (page1 < page2) {
page_lock(p1); page_lock(p1);
page_lock(p2); page_lock(p2);
} else { } else {
@ -1623,7 +1641,7 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb = existing_tb; tb = existing_tb;
} }
if (p2) { if (p2 && p2 != p) {
page_unlock(p2); page_unlock(p2);
} }
page_unlock(p); page_unlock(p);

View File

@ -339,6 +339,29 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ #define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
| TLB_RECHECK) | TLB_RECHECK)
/**
* tlb_hit_page: return true if page aligned @addr is a hit against the
* TLB entry @tlb_addr
*
* @addr: virtual address to test (must be page aligned)
* @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
*/
static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
{
return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
}
/**
* tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr
*
* @addr: virtual address to test (need not be page aligned)
* @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
*/
static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
{
return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
}
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf); void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf); void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf);
#endif /* !CONFIG_USER_ONLY */ #endif /* !CONFIG_USER_ONLY */

View File

@ -422,8 +422,7 @@ static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
g_assert_not_reached(); g_assert_not_reached();
} }
if ((addr & TARGET_PAGE_MASK) if (!tlb_hit(tlb_addr, addr)) {
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
/* TLB entry is for a different page */ /* TLB entry is for a different page */
return NULL; return NULL;
} }

View File

@ -620,11 +620,13 @@ static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs) static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
int ret = 0;
if (cc->asidx_from_attrs) { if (cc->asidx_from_attrs) {
return cc->asidx_from_attrs(cpu, attrs); ret = cc->asidx_from_attrs(cpu, attrs);
assert(ret < cpu->num_ases && ret >= 0);
} }
return 0; return ret;
} }
#endif #endif