SVM fixes
This commit is contained in:
parent
fe6741d84d
commit
3c0d712146
@ -428,6 +428,11 @@ enum {
|
|||||||
#define BX_MSR_KERNELGSBASE 0xc0000102
|
#define BX_MSR_KERNELGSBASE 0xc0000102
|
||||||
#define BX_MSR_TSC_AUX 0xc0000103
|
#define BX_MSR_TSC_AUX 0xc0000103
|
||||||
|
|
||||||
|
#define BX_SVM_VM_CR_MSR 0xc0010114
|
||||||
|
#define BX_SVM_IGNNE_MSR 0xc0010115
|
||||||
|
#define BX_SVM_SMM_CTL_MSR 0xc0010116
|
||||||
|
#define BX_SVM_HSAVE_PA_MSR 0xc0010117
|
||||||
|
|
||||||
#define BX_MODE_IA32_REAL 0x0 // CR0.PE=0 |
|
#define BX_MODE_IA32_REAL 0x0 // CR0.PE=0 |
|
||||||
#define BX_MODE_IA32_V8086 0x1 // CR0.PE=1, EFLAGS.VM=1 | EFER.LMA=0
|
#define BX_MODE_IA32_V8086 0x1 // CR0.PE=1, EFLAGS.VM=1 | EFER.LMA=0
|
||||||
#define BX_MODE_IA32_PROTECTED 0x2 // CR0.PE=1, EFLAGS.VM=0 |
|
#define BX_MODE_IA32_PROTECTED 0x2 // CR0.PE=1, EFLAGS.VM=0 |
|
||||||
@ -662,6 +667,10 @@ typedef struct
|
|||||||
Bit32u ia32_feature_ctrl;
|
Bit32u ia32_feature_ctrl;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if BX_SUPPORT_SVM
|
||||||
|
Bit64u svm_hsave_pa;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* TODO finish of the others */
|
/* TODO finish of the others */
|
||||||
} bx_regs_msr_t;
|
} bx_regs_msr_t;
|
||||||
#endif
|
#endif
|
||||||
|
@ -57,7 +57,9 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_DdRd(bxInstruction_c *i)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if BX_SUPPORT_SVM
|
#if BX_SUPPORT_SVM
|
||||||
|
if (BX_CPU_THIS_PTR in_svm_guest) {
|
||||||
if (SVM_DR_WRITE_INTERCEPTED(i->nnn())) Svm_Vmexit(SVM_VMEXIT_DR0_WRITE + i->nnn());
|
if (SVM_DR_WRITE_INTERCEPTED(i->nnn())) Svm_Vmexit(SVM_VMEXIT_DR0_WRITE + i->nnn());
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
invalidate_prefetch_q();
|
invalidate_prefetch_q();
|
||||||
@ -166,7 +168,9 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RdDd(bxInstruction_c *i)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if BX_SUPPORT_SVM
|
#if BX_SUPPORT_SVM
|
||||||
|
if (BX_CPU_THIS_PTR in_svm_guest) {
|
||||||
if (SVM_DR_READ_INTERCEPTED(i->nnn())) Svm_Vmexit(SVM_VMEXIT_DR0_READ + i->nnn());
|
if (SVM_DR_READ_INTERCEPTED(i->nnn())) Svm_Vmexit(SVM_VMEXIT_DR0_READ + i->nnn());
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* This instruction is always treated as a register-to-register,
|
/* This instruction is always treated as a register-to-register,
|
||||||
@ -242,7 +246,9 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_DqRq(bxInstruction_c *i)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if BX_SUPPORT_SVM
|
#if BX_SUPPORT_SVM
|
||||||
|
if (BX_CPU_THIS_PTR in_svm_guest) {
|
||||||
if (SVM_DR_WRITE_INTERCEPTED(i->nnn())) Svm_Vmexit(SVM_VMEXIT_DR0_WRITE + i->nnn());
|
if (SVM_DR_WRITE_INTERCEPTED(i->nnn())) Svm_Vmexit(SVM_VMEXIT_DR0_WRITE + i->nnn());
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
invalidate_prefetch_q();
|
invalidate_prefetch_q();
|
||||||
@ -352,7 +358,9 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RqDq(bxInstruction_c *i)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if BX_SUPPORT_SVM
|
#if BX_SUPPORT_SVM
|
||||||
|
if (BX_CPU_THIS_PTR in_svm_guest) {
|
||||||
if (SVM_DR_READ_INTERCEPTED(i->nnn())) Svm_Vmexit(SVM_VMEXIT_DR0_READ + i->nnn());
|
if (SVM_DR_READ_INTERCEPTED(i->nnn())) Svm_Vmexit(SVM_VMEXIT_DR0_READ + i->nnn());
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* This instruction is always treated as a register-to-register,
|
/* This instruction is always treated as a register-to-register,
|
||||||
|
@ -227,6 +227,16 @@ bx_bool BX_CPP_AttrRegparmN(2) BX_CPU_C::rdmsr(Bit32u index, Bit64u *msr)
|
|||||||
val64 = BX_CPU_THIS_PTR efer.get32();
|
val64 = BX_CPU_THIS_PTR efer.get32();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
#if BX_SUPPORT_SVM
|
||||||
|
case BX_SVM_HSAVE_PA_MSR:
|
||||||
|
if (! bx_cpuid_support_svm()) {
|
||||||
|
BX_ERROR(("RDMSR SVM_HSAVE_PA_MSR: SVM support not enabled !"));
|
||||||
|
return handle_unknown_rdmsr(index, msr);
|
||||||
|
}
|
||||||
|
val64 = BX_CPU_THIS_PTR msr.svm_hsave_pa;
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
|
||||||
case BX_MSR_STAR:
|
case BX_MSR_STAR:
|
||||||
if ((BX_CPU_THIS_PTR efer_suppmask & BX_EFER_SCE_MASK) == 0) {
|
if ((BX_CPU_THIS_PTR efer_suppmask & BX_EFER_SCE_MASK) == 0) {
|
||||||
BX_ERROR(("RDMSR MSR_STAR: SYSCALL/SYSRET support not enabled !"));
|
BX_ERROR(("RDMSR MSR_STAR: SYSCALL/SYSRET support not enabled !"));
|
||||||
@ -643,6 +653,19 @@ bx_bool BX_CPP_AttrRegparmN(2) BX_CPU_C::wrmsr(Bit32u index, Bit64u val_64)
|
|||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if BX_SUPPORT_SVM
|
||||||
|
case BX_SVM_HSAVE_PA_MSR:
|
||||||
|
if (! bx_cpuid_support_svm()) {
|
||||||
|
BX_ERROR(("WRMSR SVM_HSAVE_PA_MSR: SVM support not enabled !"));
|
||||||
|
return handle_unknown_wrmsr(index, val_64);
|
||||||
|
}
|
||||||
|
if ((val_64 & 0xfff) != 0 || ! IsValidPhyAddr(val_64)) {
|
||||||
|
BX_ERROR(("WRMSR SVM_HSAVE_PA_MSR: invalid or not page aligned physical address !"));
|
||||||
|
}
|
||||||
|
BX_CPU_THIS_PTR msr.svm_hsave_pa = val_64;
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
|
||||||
case BX_MSR_EFER:
|
case BX_MSR_EFER:
|
||||||
if (! SetEFER(val_64)) return 0;
|
if (! SetEFER(val_64)) return 0;
|
||||||
break;
|
break;
|
||||||
|
@ -433,7 +433,7 @@ bx_bool BX_CPU_C::SvmEnterLoadCheckGuestState(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (! guest.sregs[BX_SEG_REG_SS].cache.valid || ! guest.sregs[BX_SEG_REG_SS].selector.value) {
|
if (! guest.sregs[BX_SEG_REG_SS].cache.valid || ! guest.sregs[BX_SEG_REG_SS].selector.value) {
|
||||||
if (! guest.efer.get_LME()) {
|
if (! guest.efer.get_LMA()) {
|
||||||
BX_ERROR(("VMRUN: VMCB null stack segment in 32-bit mode"));
|
BX_ERROR(("VMRUN: VMCB null stack segment in 32-bit mode"));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -539,13 +539,13 @@ bx_bool BX_CPU_C::SvmEnterLoadCheckGuestState(void)
|
|||||||
|
|
||||||
void BX_CPU_C::Svm_Vmexit(int reason)
|
void BX_CPU_C::Svm_Vmexit(int reason)
|
||||||
{
|
{
|
||||||
|
BX_ERROR(("SVM VMEXIT reason=%d", reason));
|
||||||
|
|
||||||
if (!BX_CPU_THIS_PTR in_svm_guest) {
|
if (!BX_CPU_THIS_PTR in_svm_guest) {
|
||||||
if (reason != SVM_VMEXIT_INVALID)
|
if (reason != SVM_VMEXIT_INVALID)
|
||||||
BX_PANIC(("PANIC: VMEXIT not in SVM guest mode !"));
|
BX_PANIC(("PANIC: VMEXIT %d not in SVM guest mode !", reason));
|
||||||
}
|
}
|
||||||
|
|
||||||
BX_ERROR(("SVM VMEXIT reason=%d", reason));
|
|
||||||
|
|
||||||
// VMEXITs are FAULT-like: restore RIP/RSP to value before VMEXIT occurred
|
// VMEXITs are FAULT-like: restore RIP/RSP to value before VMEXIT occurred
|
||||||
RIP = BX_CPU_THIS_PTR prev_rip;
|
RIP = BX_CPU_THIS_PTR prev_rip;
|
||||||
if (BX_CPU_THIS_PTR speculative_rsp)
|
if (BX_CPU_THIS_PTR speculative_rsp)
|
||||||
|
Loading…
Reference in New Issue
Block a user