more robust handling of SVM VMCB host ptr

This commit is contained in:
Stanislav Shwartsman 2021-07-23 09:30:17 +00:00
parent daaab792b6
commit 0cba8b66c9
3 changed files with 27 additions and 14 deletions

View File

@ -5084,6 +5084,7 @@ public: // for now...
#endif
#if BX_SUPPORT_SVM
BX_SMF void set_VMCBPTR(Bit64u vmcbptr);
BX_SMF void SvmEnterSaveHostState(SVM_HOST_STATE *host);
BX_SMF bool SvmEnterLoadCheckControls(SVM_CONTROLS *ctrls);
BX_SMF bool SvmEnterLoadCheckGuestState(void);

View File

@ -657,6 +657,10 @@ void BX_CPU_C::after_restore_state(void)
set_VMCSPTR(BX_CPU_THIS_PTR vmcsptr);
#endif
#if BX_SUPPORT_SVM
set_VMCBPTR(BX_CPU_THIS_PTR vmcbptr);
#endif
#if BX_SUPPORT_PKEYS
set_PKeys(BX_CPU_THIS_PTR pkru, BX_CPU_THIS_PTR pkrs);
#endif
@ -1056,13 +1060,9 @@ void BX_CPU_C::reset(unsigned source)
#endif
#if BX_SUPPORT_SVM
set_VMCBPTR(0);
BX_CPU_THIS_PTR in_svm_guest = 0;
BX_CPU_THIS_PTR svm_gif = 1;
BX_CPU_THIS_PTR vmcbptr = 0;
BX_CPU_THIS_PTR vmcbhostptr = 0;
#if BX_SUPPORT_MEMTYPE
BX_CPU_THIS_PTR vmcb_memtype = BX_MEMTYPE_UC;
#endif
#endif
#if BX_SUPPORT_VMX || BX_SUPPORT_SVM

View File

@ -32,6 +32,24 @@
extern const char *segname[];
void BX_CPU_C::set_VMCBPTR(Bit64u vmcbptr)
{
BX_CPU_THIS_PTR vmcbptr = vmcbptr;
if (vmcbptr != 0) {
BX_CPU_THIS_PTR vmcbhostptr = BX_CPU_THIS_PTR getHostMemAddr(vmcbptr, BX_WRITE);
#if BX_SUPPORT_MEMTYPE
BX_CPU_THIS_PTR vmcb_memtype = resolve_memtype(BX_CPU_THIS_PTR vmcbptr);
#endif
}
else {
BX_CPU_THIS_PTR vmcbhostptr = 0;
#if BX_SUPPORT_MEMTYPE
BX_CPU_THIS_PTR vmcb_memtype = BX_MEMTYPE_UC;
#endif
}
}
// When loading segment bases from the VMCB or the host save area
// (on VMRUN or #VMEXIT), segment bases are canonicalized (i.e.
// sign-extended from the highest implemented address bit to bit 63)
@ -986,11 +1004,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMRUN(bxInstruction_c *i)
BX_ERROR(("VMRUN: invalid or not page aligned VMCB physical address !"));
exception(BX_GP_EXCEPTION, 0);
}
BX_CPU_THIS_PTR vmcbptr = pAddr;
BX_CPU_THIS_PTR vmcbhostptr = BX_CPU_THIS_PTR getHostMemAddr(pAddr, BX_WRITE);
#if BX_SUPPORT_MEMTYPE
BX_CPU_THIS_PTR vmcb_memtype = resolve_memtype(BX_CPU_THIS_PTR vmcbptr);
#endif
set_VMCBPTR(pAddr);
BX_DEBUG(("VMRUN VMCB ptr: 0x" FMT_ADDRX64, BX_CPU_THIS_PTR vmcbptr));
@ -1060,8 +1074,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMLOAD(bxInstruction_c *i)
BX_ERROR(("VMLOAD: invalid or not page aligned VMCB physical address !"));
exception(BX_GP_EXCEPTION, 0);
}
BX_CPU_THIS_PTR vmcbptr = pAddr;
BX_CPU_THIS_PTR vmcbhostptr = BX_CPU_THIS_PTR getHostMemAddr(pAddr, BX_WRITE);
set_VMCBPTR(pAddr);
BX_DEBUG(("VMLOAD VMCB ptr: 0x" FMT_ADDRX64, BX_CPU_THIS_PTR vmcbptr));
@ -1111,8 +1124,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMSAVE(bxInstruction_c *i)
BX_ERROR(("VMSAVE: invalid or not page aligned VMCB physical address !"));
exception(BX_GP_EXCEPTION, 0);
}
BX_CPU_THIS_PTR vmcbptr = pAddr;
BX_CPU_THIS_PTR vmcbhostptr = BX_CPU_THIS_PTR getHostMemAddr(pAddr, BX_WRITE);
set_VMCBPTR(pAddr);
BX_DEBUG(("VMSAVE VMCB ptr: 0x" FMT_ADDRX64, BX_CPU_THIS_PTR vmcbptr));