diff --git a/bochs/cpu/svm.cc b/bochs/cpu/svm.cc index 215bd273f..6af6ef654 100644 --- a/bochs/cpu/svm.cc +++ b/bochs/cpu/svm.cc @@ -219,6 +219,8 @@ void BX_CPU_C::SvmEnterSaveHostState(SVM_HOST_STATE *host) host->rip = RIP; host->rsp = RSP; host->rax = RAX; + + host->pat_msr = BX_CPU_THIS_PTR msr.pat; } void BX_CPU_C::SvmExitLoadHostState(SVM_HOST_STATE *host) @@ -246,6 +248,8 @@ void BX_CPU_C::SvmExitLoadHostState(SVM_HOST_STATE *host) } } + BX_CPU_THIS_PTR msr.pat = host->pat_msr; + BX_CPU_THIS_PTR dr7.set32(0x00000400); setEFlags(host->eflags & ~EFlagsVMMask); // ignore saved copy of EFLAGS.VM @@ -373,7 +377,7 @@ bool BX_CPU_C::SvmEnterLoadCheckControls(SVM_CONTROLS *ctrls) return 0; } - Bit64u guest_pat = vmcb_read32(SVM_GUEST_PAT); + Bit64u guest_pat = vmcb_read64(SVM_GUEST_PAT); if (! isValidMSR_PAT(guest_pat)) { BX_ERROR(("VMRUN: invalid memory type in guest PAT_MSR !")); return 0; @@ -461,6 +465,8 @@ bool BX_CPU_C::SvmEnterLoadCheckGuestState(void) return 0; } + guest.pat_msr = vmcb_read64(SVM_GUEST_PAT); + for (n=0;n < 4; n++) { svm_segment_read(&guest.sregs[n], SVM_GUEST_ES_SELECTOR + n * 0x10); } @@ -539,6 +545,10 @@ bool BX_CPU_C::SvmEnterLoadCheckGuestState(void) } } } + else { + // load guest PAT when nested paging is enabled + BX_CPU_THIS_PTR msr.pat = guest.pat_msr; + } BX_CPU_THIS_PTR dr6.set32(guest.dr6); BX_CPU_THIS_PTR dr7.set32(guest.dr7 | 0x400); diff --git a/bochs/cpu/svm.h b/bochs/cpu/svm.h index 02c40ec61..e05ed6b1f 100644 --- a/bochs/cpu/svm.h +++ b/bochs/cpu/svm.h @@ -255,6 +255,8 @@ typedef struct bx_SVM_HOST_STATE Bit64u rsp; Bit64u rax; + BxPackedRegister pat_msr; + } SVM_HOST_STATE; typedef struct bx_SVM_GUEST_STATE @@ -271,6 +273,7 @@ typedef struct bx_SVM_GUEST_STATE Bit32u dr6; Bit32u dr7; bx_phy_address cr3; + BxPackedRegister pat_msr; Bit32u eflags; Bit64u rip; Bit64u rsp;