target-i386: raise page fault for reserved physical address bits

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2014-05-27 12:58:36 +02:00
parent b728464ae8
commit e8f6d00c30
2 changed files with 32 additions and 12 deletions

View File

@ -260,6 +260,8 @@
#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
#define PG_PSE_MASK (1 << PG_PSE_BIT) #define PG_PSE_MASK (1 << PG_PSE_BIT)
#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
#define PG_ADDRESS_MASK 0x000ffffffffff000LL
#define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK)
#define PG_HI_USER_MASK 0x7ff0000000000000LL #define PG_HI_USER_MASK 0x7ff0000000000000LL
#define PG_NX_MASK (1LL << PG_NX_BIT) #define PG_NX_MASK (1LL << PG_NX_BIT)
@ -1137,6 +1139,14 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define TARGET_VIRT_ADDR_SPACE_BITS 32 #define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif #endif
/* XXX: This value should match the one returned by CPUID
* and in exec.c */
# if defined(TARGET_X86_64)
# define PHYS_ADDR_MASK 0xffffffffffLL
# else
# define PHYS_ADDR_MASK 0xfffffffffLL
# endif
static inline CPUX86State *cpu_init(const char *cpu_model) static inline CPUX86State *cpu_init(const char *cpu_model)
{ {
X86CPU *cpu = cpu_x86_init(cpu_model); X86CPU *cpu = cpu_x86_init(cpu_model);

View File

@ -510,14 +510,6 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
#else #else
/* XXX: This value should match the one returned by CPUID
* and in exec.c */
# if defined(TARGET_X86_64)
# define PHYS_ADDR_MASK 0xfffffff000LL
# else
# define PHYS_ADDR_MASK 0xffffff000LL
# endif
/* return value: /* return value:
* -1 = cannot handle fault * -1 = cannot handle fault
* 0 = nothing more to do * 0 = nothing more to do
@ -533,6 +525,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
int error_code = 0; int error_code = 0;
int is_dirty, prot, page_size, is_write, is_user; int is_dirty, prot, page_size, is_write, is_user;
hwaddr paddr; hwaddr paddr;
uint64_t rsvd_mask = PG_HI_RSVD_MASK;
uint32_t page_offset; uint32_t page_offset;
target_ulong vaddr, virt_addr; target_ulong vaddr, virt_addr;
@ -580,7 +573,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
if (!(pml4e & PG_PRESENT_MASK)) { if (!(pml4e & PG_PRESENT_MASK)) {
goto do_fault; goto do_fault;
} }
if (pml4e & PG_PSE_MASK) { if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
goto do_fault_rsvd; goto do_fault_rsvd;
} }
if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) { if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
@ -591,12 +584,15 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
stl_phys_notdirty(cs->as, pml4e_addr, pml4e); stl_phys_notdirty(cs->as, pml4e_addr, pml4e);
} }
ptep = pml4e ^ PG_NX_MASK; ptep = pml4e ^ PG_NX_MASK;
pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) & pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
env->a20_mask; env->a20_mask;
pdpe = ldq_phys(cs->as, pdpe_addr); pdpe = ldq_phys(cs->as, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) { if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault; goto do_fault;
} }
if (pdpe & rsvd_mask) {
goto do_fault_rsvd;
}
if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) { if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
goto do_fault_rsvd; goto do_fault_rsvd;
} }
@ -622,15 +618,22 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
if (!(pdpe & PG_PRESENT_MASK)) { if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault; goto do_fault;
} }
rsvd_mask |= PG_HI_USER_MASK | PG_NX_MASK;
if (pdpe & rsvd_mask) {
goto do_fault_rsvd;
}
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
} }
pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) & pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
env->a20_mask; env->a20_mask;
pde = ldq_phys(cs->as, pde_addr); pde = ldq_phys(cs->as, pde_addr);
if (!(pde & PG_PRESENT_MASK)) { if (!(pde & PG_PRESENT_MASK)) {
goto do_fault; goto do_fault;
} }
if (pde & rsvd_mask) {
goto do_fault_rsvd;
}
if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) { if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
goto do_fault_rsvd; goto do_fault_rsvd;
} }
@ -647,12 +650,15 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
pde |= PG_ACCESSED_MASK; pde |= PG_ACCESSED_MASK;
stl_phys_notdirty(cs->as, pde_addr, pde); stl_phys_notdirty(cs->as, pde_addr, pde);
} }
pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) & pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
env->a20_mask; env->a20_mask;
pte = ldq_phys(cs->as, pte_addr); pte = ldq_phys(cs->as, pte_addr);
if (!(pte & PG_PRESENT_MASK)) { if (!(pte & PG_PRESENT_MASK)) {
goto do_fault; goto do_fault;
} }
if (pte & rsvd_mask) {
goto do_fault_rsvd;
}
if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) { if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
goto do_fault_rsvd; goto do_fault_rsvd;
} }
@ -694,9 +700,13 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
/* combine pde and pte user and rw protections */ /* combine pde and pte user and rw protections */
ptep &= pte | PG_NX_MASK; ptep &= pte | PG_NX_MASK;
page_size = 4096; page_size = 4096;
rsvd_mask = 0;
} }
do_check_protect: do_check_protect:
if (pte & rsvd_mask) {
goto do_fault_rsvd;
}
ptep ^= PG_NX_MASK; ptep ^= PG_NX_MASK;
if ((ptep & PG_NX_MASK) && is_write1 == 2) { if ((ptep & PG_NX_MASK) && is_write1 == 2) {
goto do_fault_protect; goto do_fault_protect;