two self-modifying code fixes
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJXgA0CAAoJEK0ScMxN0CebwYoH/3HfuF87wP/Ck4svhMzRAqTo e5FClGOvoH1+6atSaXPSpYPLp93RXmJDlSPq3HU163bGATAoNyWc4rVLZZrip1Lq QmLh3qCJcFQGtAsljVm+Mh1hTZn7bmQyV7qj8AuoJhD043OvPRuUbsJfMJ9uXxuO AUEs8bgvDP7wkdWiyaF8coGhc4tjSfJK/o1yNoYvhQdyEqm+C2woZmNqp/A0243a j/F97uO7x8LQTlY9ZmEwXja/h3/FkZ+vHkq5K+2WKUEbuTuX1azBeztheFnlUbVQ +ga1GzL4WFhJ7dBIUET8gLIOCTSJUj2e/xUguC4/LdEF5pSemiprE9UmzU/KD0U= =6KiV -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20160708' into staging two self-modifying code fixes # gpg: Signature made Fri 08 Jul 2016 21:28:50 BST # gpg: using RSA key 0xAD1270CC4DD0279B # gpg: Good signature from "Richard Henderson <rth7680@gmail.com>" # gpg: aka "Richard Henderson <rth@redhat.com>" # gpg: aka "Richard Henderson <rth@twiddle.net>" # Primary key fingerprint: 9CB1 8DDA F8E8 49AD 2AFC 16A4 AD12 70CC 4DD0 279B * remotes/rth/tags/pull-tcg-20160708: translate-all: Fix user-mode self-modifying code in 2 page long TB cputlb: Fix for self-modifying writes across page boundaries cputlb: Add address parameter to VICTIM_TLB_HIT cputlb: Move VICTIM_TLB_HIT out of line Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
7de2cc8f78
29
cputlb.c
29
cputlb.c
@ -498,6 +498,35 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
|
||||
return qemu_ram_addr_from_host_nofail(p);
|
||||
}
|
||||
|
||||
/* Return true if ADDR is present in the victim tlb, and has been copied
|
||||
back to the main tlb. */
|
||||
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
|
||||
size_t elt_ofs, target_ulong page)
|
||||
{
|
||||
size_t vidx;
|
||||
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
|
||||
CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
|
||||
target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
|
||||
|
||||
if (cmp == page) {
|
||||
/* Found entry in victim tlb, swap tlb and iotlb. */
|
||||
CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
|
||||
CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
|
||||
CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
|
||||
|
||||
tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
|
||||
tmpio = *io; *io = *vio; *vio = tmpio;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Macro to call the above, with local variables from the use context. */
|
||||
#define VICTIM_TLB_HIT(TY, ADDR) \
|
||||
victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
|
||||
(ADDR) & TARGET_PAGE_MASK)
|
||||
|
||||
#define MMUSUFFIX _mmu
|
||||
|
||||
#define SHIFT 0
|
||||
|
@ -116,31 +116,6 @@
|
||||
# define helper_te_st_name helper_le_st_name
|
||||
#endif
|
||||
|
||||
/* macro to check the victim tlb */
|
||||
#define VICTIM_TLB_HIT(ty) \
|
||||
({ \
|
||||
/* we are about to do a page table walk. our last hope is the \
|
||||
* victim tlb. try to refill from the victim tlb before walking the \
|
||||
* page table. */ \
|
||||
int vidx; \
|
||||
CPUIOTLBEntry tmpiotlb; \
|
||||
CPUTLBEntry tmptlb; \
|
||||
for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \
|
||||
if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\
|
||||
/* found entry in victim tlb, swap tlb and iotlb */ \
|
||||
tmptlb = env->tlb_table[mmu_idx][index]; \
|
||||
env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \
|
||||
env->tlb_v_table[mmu_idx][vidx] = tmptlb; \
|
||||
tmpiotlb = env->iotlb[mmu_idx][index]; \
|
||||
env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \
|
||||
env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
/* return true when there is a vtlb hit, i.e. vidx >=0 */ \
|
||||
vidx >= 0; \
|
||||
})
|
||||
|
||||
#ifndef SOFTMMU_CODE_ACCESS
|
||||
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
|
||||
CPUIOTLBEntry *iotlbentry,
|
||||
@ -186,7 +161,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
|
||||
/* If the TLB entry is for a different page, reload and try again. */
|
||||
if ((addr & TARGET_PAGE_MASK)
|
||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
if (!VICTIM_TLB_HIT(ADDR_READ)) {
|
||||
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||
mmu_idx, retaddr);
|
||||
}
|
||||
@ -260,7 +235,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
|
||||
/* If the TLB entry is for a different page, reload and try again. */
|
||||
if ((addr & TARGET_PAGE_MASK)
|
||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
if (!VICTIM_TLB_HIT(ADDR_READ)) {
|
||||
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
|
||||
mmu_idx, retaddr);
|
||||
}
|
||||
@ -370,7 +345,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||
/* If the TLB entry is for a different page, reload and try again. */
|
||||
if ((addr & TARGET_PAGE_MASK)
|
||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
if (!VICTIM_TLB_HIT(addr_write)) {
|
||||
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
||||
}
|
||||
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
||||
@ -395,12 +370,25 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||
if (DATA_SIZE > 1
|
||||
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
|
||||
>= TARGET_PAGE_SIZE)) {
|
||||
int i;
|
||||
int i, index2;
|
||||
target_ulong page2, tlb_addr2;
|
||||
do_unaligned_access:
|
||||
/* XXX: not efficient, but simple */
|
||||
/* Note: relies on the fact that tlb_fill() does not remove the
|
||||
* previous page from the TLB cache. */
|
||||
for (i = DATA_SIZE - 1; i >= 0; i--) {
|
||||
/* Ensure the second page is in the TLB. Note that the first page
|
||||
is already guaranteed to be filled, and that the second page
|
||||
cannot evict the first. */
|
||||
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
|
||||
index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
|
||||
if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
|
||||
&& !VICTIM_TLB_HIT(addr_write, page2)) {
|
||||
tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
|
||||
mmu_idx, retaddr);
|
||||
}
|
||||
|
||||
/* XXX: not efficient, but simple. */
|
||||
/* This loop must go in the forward direction to avoid issues
|
||||
with self-modifying code in Windows 64-bit. */
|
||||
for (i = 0; i < DATA_SIZE; ++i) {
|
||||
/* Little-endian extract. */
|
||||
uint8_t val8 = val >> (i * 8);
|
||||
/* Note the adjustment at the beginning of the function.
|
||||
@ -440,7 +428,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||
/* If the TLB entry is for a different page, reload and try again. */
|
||||
if ((addr & TARGET_PAGE_MASK)
|
||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
if (!VICTIM_TLB_HIT(addr_write)) {
|
||||
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
||||
}
|
||||
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
|
||||
@ -465,12 +453,25 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
|
||||
if (DATA_SIZE > 1
|
||||
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
|
||||
>= TARGET_PAGE_SIZE)) {
|
||||
int i;
|
||||
int i, index2;
|
||||
target_ulong page2, tlb_addr2;
|
||||
do_unaligned_access:
|
||||
/* Ensure the second page is in the TLB. Note that the first page
|
||||
is already guaranteed to be filled, and that the second page
|
||||
cannot evict the first. */
|
||||
page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
|
||||
index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
|
||||
if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
|
||||
&& !VICTIM_TLB_HIT(addr_write, page2)) {
|
||||
tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
|
||||
mmu_idx, retaddr);
|
||||
}
|
||||
|
||||
/* XXX: not efficient, but simple */
|
||||
/* Note: relies on the fact that tlb_fill() does not remove the
|
||||
* previous page from the TLB cache. */
|
||||
for (i = DATA_SIZE - 1; i >= 0; i--) {
|
||||
/* This loop must go in the forward direction to avoid issues
|
||||
with self-modifying code. */
|
||||
for (i = 0; i < DATA_SIZE; ++i) {
|
||||
/* Big-endian extract. */
|
||||
uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
|
||||
/* Note the adjustment at the beginning of the function.
|
||||
@ -502,7 +503,7 @@ void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
|
||||
if ((addr & TARGET_PAGE_MASK)
|
||||
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
|
||||
/* TLB entry is for a different page */
|
||||
if (!VICTIM_TLB_HIT(addr_write)) {
|
||||
if (!VICTIM_TLB_HIT(addr_write, addr)) {
|
||||
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
||||
}
|
||||
}
|
||||
|
@ -2000,6 +2000,7 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
|
||||
int page_unprotect(target_ulong address, uintptr_t pc)
|
||||
{
|
||||
unsigned int prot;
|
||||
bool current_tb_invalidated;
|
||||
PageDesc *p;
|
||||
target_ulong host_start, host_end, addr;
|
||||
|
||||
@ -2021,6 +2022,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
|
||||
host_end = host_start + qemu_host_page_size;
|
||||
|
||||
prot = 0;
|
||||
current_tb_invalidated = false;
|
||||
for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
|
||||
p = page_find(addr >> TARGET_PAGE_BITS);
|
||||
p->flags |= PAGE_WRITE;
|
||||
@ -2028,10 +2030,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
|
||||
|
||||
/* and since the content will be modified, we must invalidate
|
||||
the corresponding translated code. */
|
||||
if (tb_invalidate_phys_page(addr, pc)) {
|
||||
mmap_unlock();
|
||||
return 2;
|
||||
}
|
||||
current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
|
||||
#ifdef DEBUG_TB_CHECK
|
||||
tb_invalidate_check(addr);
|
||||
#endif
|
||||
@ -2040,7 +2039,8 @@ int page_unprotect(target_ulong address, uintptr_t pc)
|
||||
prot & PAGE_BITS);
|
||||
|
||||
mmap_unlock();
|
||||
return 1;
|
||||
/* If current TB was invalidated return to main loop */
|
||||
return current_tb_invalidated ? 2 : 1;
|
||||
}
|
||||
mmap_unlock();
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user