implement MSR PAR handling in AMD SVM

This commit is contained in:
Stanislav Shwartsman 2021-03-21 15:33:18 +00:00
parent ca654e2033
commit 8e58d7336f
2 changed files with 14 additions and 1 deletions

View File

@ -219,6 +219,8 @@ void BX_CPU_C::SvmEnterSaveHostState(SVM_HOST_STATE *host)
host->rip = RIP;
host->rsp = RSP;
host->rax = RAX;
host->pat_msr = BX_CPU_THIS_PTR msr.pat;
}
void BX_CPU_C::SvmExitLoadHostState(SVM_HOST_STATE *host)
@ -246,6 +248,8 @@ void BX_CPU_C::SvmExitLoadHostState(SVM_HOST_STATE *host)
}
}
BX_CPU_THIS_PTR msr.pat = host->pat_msr;
BX_CPU_THIS_PTR dr7.set32(0x00000400);
setEFlags(host->eflags & ~EFlagsVMMask); // ignore saved copy of EFLAGS.VM
@ -373,7 +377,7 @@ bool BX_CPU_C::SvmEnterLoadCheckControls(SVM_CONTROLS *ctrls)
return 0;
}
Bit64u guest_pat = vmcb_read32(SVM_GUEST_PAT);
Bit64u guest_pat = vmcb_read64(SVM_GUEST_PAT);
if (! isValidMSR_PAT(guest_pat)) {
BX_ERROR(("VMRUN: invalid memory type in guest PAT_MSR !"));
return 0;
@ -461,6 +465,8 @@ bool BX_CPU_C::SvmEnterLoadCheckGuestState(void)
return 0;
}
guest.pat_msr = vmcb_read64(SVM_GUEST_PAT);
for (n=0;n < 4; n++) {
svm_segment_read(&guest.sregs[n], SVM_GUEST_ES_SELECTOR + n * 0x10);
}
@ -539,6 +545,10 @@ bool BX_CPU_C::SvmEnterLoadCheckGuestState(void)
}
}
}
else {
// load guest PAT when nested paging is enabled
BX_CPU_THIS_PTR msr.pat = guest.pat_msr;
}
BX_CPU_THIS_PTR dr6.set32(guest.dr6);
BX_CPU_THIS_PTR dr7.set32(guest.dr7 | 0x400);

View File

@ -255,6 +255,8 @@ typedef struct bx_SVM_HOST_STATE
Bit64u rsp;
Bit64u rax;
BxPackedRegister pat_msr;
} SVM_HOST_STATE;
typedef struct bx_SVM_GUEST_STATE
@ -271,6 +273,7 @@ typedef struct bx_SVM_GUEST_STATE
Bit32u dr6;
Bit32u dr7;
bx_phy_address cr3;
BxPackedRegister pat_msr;
Bit32u eflags;
Bit64u rip;
Bit64u rsp;