target/i386: leave the A20 bit set in the final NPT walk
The A20 mask is only applied to the final memory access. Nested page tables are always walked with the raw guest-physical address. Unlike the previous patch, in this one the masking must be kept, but it was done too early. Cc: qemu-stable@nongnu.org Fixes:4a1e9d4d11
("target/i386: Use atomic operations for pte updates", 2022-10-18) Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> (cherry picked from commitb5a9de3259
) Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
This commit is contained in:
parent
91ad0d26e1
commit
1165d9601d
@ -133,7 +133,6 @@ static inline bool ptw_setl(const PTETranslate *in, uint32_t old, uint32_t set)
|
||||
static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
|
||||
TranslateResult *out, TranslateFault *err)
|
||||
{
|
||||
const int32_t a20_mask = x86_get_a20_mask(env);
|
||||
const target_ulong addr = in->addr;
|
||||
const int pg_mode = in->pg_mode;
|
||||
const bool is_user = (in->mmu_idx == MMU_USER_IDX);
|
||||
@ -415,10 +414,13 @@ do_check_protect_pse36:
|
||||
}
|
||||
}
|
||||
|
||||
/* align to page_size */
|
||||
paddr = (pte & a20_mask & PG_ADDRESS_MASK & ~(page_size - 1))
|
||||
| (addr & (page_size - 1));
|
||||
/* merge offset within page */
|
||||
paddr = (pte & PG_ADDRESS_MASK & ~(page_size - 1)) | (addr & (page_size - 1));
|
||||
|
||||
/*
|
||||
* Note that NPT is walked (for both paging structures and final guest
|
||||
* addresses) using the address with the A20 bit set.
|
||||
*/
|
||||
if (in->ptw_idx == MMU_NESTED_IDX) {
|
||||
CPUTLBEntryFull *full;
|
||||
int flags, nested_page_size;
|
||||
@ -457,7 +459,7 @@ do_check_protect_pse36:
|
||||
}
|
||||
}
|
||||
|
||||
out->paddr = paddr;
|
||||
out->paddr = paddr & x86_get_a20_mask(env);
|
||||
out->prot = prot;
|
||||
out->page_size = page_size;
|
||||
return true;
|
||||
|
Loading…
Reference in New Issue
Block a user