target/i386: leave the A20 bit set in the final NPT walk
The A20 mask is only applied to the final memory access. Nested
page tables are always walked with the raw guest-physical address.
Unlike the previous patch, in this one the masking must be kept, but
it was done too early.
Cc: qemu-stable@nongnu.org
Fixes: 4a1e9d4d11
("target/i386: Use atomic operations for pte updates", 2022-10-18)
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
a28fe7dc19
commit
b5a9de3259
@ -134,7 +134,6 @@ static inline bool ptw_setl(const PTETranslate *in, uint32_t old, uint32_t set)
|
||||
static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
|
||||
TranslateResult *out, TranslateFault *err)
|
||||
{
|
||||
const int32_t a20_mask = x86_get_a20_mask(env);
|
||||
const target_ulong addr = in->addr;
|
||||
const int pg_mode = in->pg_mode;
|
||||
const bool is_user = is_mmu_index_user(in->mmu_idx);
|
||||
@ -417,10 +416,13 @@ do_check_protect_pse36:
|
||||
}
|
||||
}
|
||||
|
||||
/* align to page_size */
|
||||
paddr = (pte & a20_mask & PG_ADDRESS_MASK & ~(page_size - 1))
|
||||
| (addr & (page_size - 1));
|
||||
/* merge offset within page */
|
||||
paddr = (pte & PG_ADDRESS_MASK & ~(page_size - 1)) | (addr & (page_size - 1));
|
||||
|
||||
/*
|
||||
* Note that NPT is walked (for both paging structures and final guest
|
||||
* addresses) using the address with the A20 bit set.
|
||||
*/
|
||||
if (in->ptw_idx == MMU_NESTED_IDX) {
|
||||
CPUTLBEntryFull *full;
|
||||
int flags, nested_page_size;
|
||||
@ -459,7 +461,7 @@ do_check_protect_pse36:
|
||||
}
|
||||
}
|
||||
|
||||
out->paddr = paddr;
|
||||
out->paddr = paddr & x86_get_a20_mask(env);
|
||||
out->prot = prot;
|
||||
out->page_size = page_size;
|
||||
return true;
|
||||
|
Loading…
Reference in New Issue
Block a user